Initial V1 Backup

This commit is contained in:
Javier
2026-01-22 00:36:01 -06:00
commit 4c5a588197
27 changed files with 7509 additions and 0 deletions

222
database/init_db.py Normal file
View File

@@ -0,0 +1,222 @@
"""
ScanLook Database Initialization
Creates all tables and indexes for the inventory management system
UPDATED: Reflects post-migration schema (CURRENT baseline is now global)
"""
import sqlite3
import os
from datetime import datetime
from werkzeug.security import generate_password_hash
DB_PATH = os.path.join(os.path.dirname(__file__), 'scanlook.db')
def init_database():
"""Initialize the database with all tables and indexes"""
conn = sqlite3.connect(DB_PATH)
cursor = conn.cursor()
# Users Table
cursor.execute('''
CREATE TABLE IF NOT EXISTS Users (
user_id INTEGER PRIMARY KEY AUTOINCREMENT,
username TEXT UNIQUE NOT NULL,
password TEXT NOT NULL,
full_name TEXT NOT NULL,
email TEXT,
role TEXT NOT NULL CHECK(role IN ('owner', 'admin', 'staff')),
is_active INTEGER DEFAULT 1,
branch TEXT DEFAULT 'Main',
created_timestamp DATETIME DEFAULT CURRENT_TIMESTAMP
)
''')
# CountSessions Table
# NOTE: current_baseline_version removed - CURRENT is now global
cursor.execute('''
CREATE TABLE IF NOT EXISTS CountSessions (
session_id INTEGER PRIMARY KEY AUTOINCREMENT,
session_name TEXT NOT NULL,
session_type TEXT NOT NULL CHECK(session_type IN ('cycle_count', 'full_physical')),
created_by INTEGER NOT NULL,
created_timestamp DATETIME DEFAULT CURRENT_TIMESTAMP,
master_baseline_timestamp DATETIME,
current_baseline_timestamp DATETIME,
status TEXT DEFAULT 'active' CHECK(status IN ('active', 'completed', 'archived')),
branch TEXT DEFAULT 'Main',
FOREIGN KEY (created_by) REFERENCES Users(user_id)
)
''')
# BaselineInventory_Master Table (Session-specific, immutable)
cursor.execute('''
CREATE TABLE IF NOT EXISTS BaselineInventory_Master (
baseline_id INTEGER PRIMARY KEY AUTOINCREMENT,
session_id INTEGER NOT NULL,
lot_number TEXT NOT NULL,
item TEXT NOT NULL,
description TEXT,
system_location TEXT NOT NULL,
system_bin TEXT NOT NULL,
system_quantity REAL NOT NULL,
uploaded_timestamp DATETIME DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY (session_id) REFERENCES CountSessions(session_id)
)
''')
# BaselineInventory_Current Table (GLOBAL - shared across all sessions)
# MIGRATION CHANGE: No session_id, no baseline_version, no is_deleted
# This table is replaced entirely on each upload
cursor.execute('''
CREATE TABLE IF NOT EXISTS BaselineInventory_Current (
current_id INTEGER PRIMARY KEY AUTOINCREMENT,
lot_number TEXT NOT NULL,
item TEXT NOT NULL,
description TEXT,
system_location TEXT,
system_bin TEXT NOT NULL,
system_quantity REAL NOT NULL,
upload_timestamp DATETIME DEFAULT CURRENT_TIMESTAMP,
UNIQUE(lot_number, system_bin)
)
''')
# LocationCounts Table
cursor.execute('''
CREATE TABLE IF NOT EXISTS LocationCounts (
location_count_id INTEGER PRIMARY KEY AUTOINCREMENT,
session_id INTEGER NOT NULL,
location_name TEXT NOT NULL,
counted_by INTEGER NOT NULL,
start_timestamp DATETIME DEFAULT CURRENT_TIMESTAMP,
end_timestamp DATETIME,
status TEXT DEFAULT 'not_started' CHECK(status IN ('not_started', 'in_progress', 'completed')),
expected_lots_master INTEGER DEFAULT 0,
lots_found INTEGER DEFAULT 0,
lots_missing INTEGER DEFAULT 0,
FOREIGN KEY (session_id) REFERENCES CountSessions(session_id),
FOREIGN KEY (counted_by) REFERENCES Users(user_id)
)
''')
# ScanEntries Table
# MIGRATION CHANGE: Removed current_* columns - now fetched via JOIN
cursor.execute('''
CREATE TABLE IF NOT EXISTS ScanEntries (
entry_id INTEGER PRIMARY KEY AUTOINCREMENT,
session_id INTEGER NOT NULL,
location_count_id INTEGER NOT NULL,
lot_number TEXT NOT NULL,
item TEXT,
description TEXT,
scanned_location TEXT NOT NULL,
actual_weight REAL NOT NULL,
scanned_by INTEGER NOT NULL,
scan_timestamp DATETIME DEFAULT CURRENT_TIMESTAMP,
-- MASTER baseline comparison (immutable, set at scan time)
master_status TEXT CHECK(master_status IN ('match', 'wrong_location', 'ghost_lot', 'missing')),
master_expected_location TEXT,
master_expected_weight REAL,
master_variance_lbs REAL,
master_variance_pct REAL,
-- Duplicate detection
duplicate_status TEXT DEFAULT '00' CHECK(duplicate_status IN ('00', '01', '03', '04')),
duplicate_info TEXT,
-- CURRENT baseline comparison removed - now via JOIN in queries
-- Removed: current_status, current_system_location, current_system_weight,
-- current_variance_lbs, current_variance_pct, current_baseline_version
-- Metadata
comment TEXT,
is_deleted INTEGER DEFAULT 0,
deleted_by INTEGER,
deleted_timestamp DATETIME,
modified_timestamp DATETIME,
FOREIGN KEY (session_id) REFERENCES CountSessions(session_id),
FOREIGN KEY (location_count_id) REFERENCES LocationCounts(location_count_id),
FOREIGN KEY (scanned_by) REFERENCES Users(user_id),
FOREIGN KEY (deleted_by) REFERENCES Users(user_id)
)
''')
# MissingLots Table
cursor.execute('''
CREATE TABLE IF NOT EXISTS MissingLots (
missing_id INTEGER PRIMARY KEY AUTOINCREMENT,
session_id INTEGER NOT NULL,
location_count_id INTEGER,
lot_number TEXT NOT NULL,
item TEXT,
master_expected_location TEXT NOT NULL,
master_expected_quantity REAL NOT NULL,
current_system_location TEXT,
current_system_quantity REAL,
marked_by INTEGER NOT NULL,
marked_timestamp DATETIME DEFAULT CURRENT_TIMESTAMP,
found_later TEXT DEFAULT 'N' CHECK(found_later IN ('Y', 'N')),
found_location TEXT,
FOREIGN KEY (session_id) REFERENCES CountSessions(session_id),
FOREIGN KEY (location_count_id) REFERENCES LocationCounts(location_count_id),
FOREIGN KEY (marked_by) REFERENCES Users(user_id)
)
''')
# Create Indexes
# MASTER baseline indexes
cursor.execute('CREATE INDEX IF NOT EXISTS idx_baseline_master_lot ON BaselineInventory_Master(session_id, lot_number)')
cursor.execute('CREATE INDEX IF NOT EXISTS idx_baseline_master_loc ON BaselineInventory_Master(session_id, system_location)')
# ScanEntries indexes
cursor.execute('CREATE INDEX IF NOT EXISTS idx_scanentries_session ON ScanEntries(session_id)')
cursor.execute('CREATE INDEX IF NOT EXISTS idx_scanentries_location ON ScanEntries(location_count_id)')
cursor.execute('CREATE INDEX IF NOT EXISTS idx_scanentries_lot ON ScanEntries(lot_number)')
cursor.execute('CREATE INDEX IF NOT EXISTS idx_scanentries_deleted ON ScanEntries(is_deleted)')
# LocationCounts indexes
cursor.execute('CREATE INDEX IF NOT EXISTS idx_location_counts ON LocationCounts(session_id, status)')
# Note: No indexes on BaselineInventory_Current needed - UNIQUE constraint handles lookups
conn.commit()
conn.close()
print(f"✅ Database initialized at: {DB_PATH}")
print("📝 Schema version: Post-migration (CURRENT baseline is global)")
def create_default_users():
"""Create default users for testing"""
conn = sqlite3.connect(DB_PATH)
cursor = conn.cursor()
default_users = [
('owner', generate_password_hash('owner123'), 'System Owner', 'owner'),
('admin', generate_password_hash('admin123'), 'Admin User', 'admin'),
('staff1', generate_password_hash('staff123'), 'John Doe', 'staff'),
('staff2', generate_password_hash('staff123'), 'Jane Smith', 'staff'),
]
try:
cursor.executemany('''
INSERT INTO Users (username, password, full_name, role)
VALUES (?, ?, ?, ?)
''', default_users)
conn.commit()
print("✅ Default users created:")
print(" Owner: owner / owner123")
print(" Admin: admin / admin123")
print(" Staff: staff1 / staff123")
print(" Staff: staff2 / staff123")
except sqlite3.IntegrityError:
print(" Default users already exist")
conn.close()
if __name__ == '__main__':
init_database()
create_default_users()

View File

@@ -0,0 +1,155 @@
-- Migration: Make CURRENT baseline global (not session-specific)
-- Run this ONCE to update existing database
BEGIN TRANSACTION;
-- Step 1: Create new global CURRENT table
CREATE TABLE IF NOT EXISTS BaselineInventory_Current_New (
current_id INTEGER PRIMARY KEY AUTOINCREMENT,
lot_number TEXT NOT NULL,
item TEXT NOT NULL,
description TEXT,
system_location TEXT,
system_bin TEXT NOT NULL,
system_quantity REAL NOT NULL,
upload_timestamp DATETIME DEFAULT CURRENT_TIMESTAMP,
UNIQUE(lot_number, system_bin)
);
-- Step 2: Copy latest CURRENT data (if any exists)
-- Get the most recent baseline_version
INSERT OR IGNORE INTO BaselineInventory_Current_New
(lot_number, item, description, system_location, system_bin, system_quantity, upload_timestamp)
SELECT
lot_number,
item,
description,
system_location,
system_bin,
system_quantity,
upload_timestamp
FROM BaselineInventory_Current
WHERE is_deleted = 0
AND baseline_version = (SELECT MAX(baseline_version) FROM BaselineInventory_Current WHERE is_deleted = 0)
ORDER BY upload_timestamp DESC;
-- Step 3: Drop old CURRENT table
DROP TABLE IF EXISTS BaselineInventory_Current;
-- Step 4: Rename new table
ALTER TABLE BaselineInventory_Current_New RENAME TO BaselineInventory_Current;
-- Step 5: Remove CURRENT columns from ScanEntries
-- SQLite doesn't support DROP COLUMN directly, so we recreate the table
CREATE TABLE ScanEntries_New (
entry_id INTEGER PRIMARY KEY AUTOINCREMENT,
session_id INTEGER NOT NULL,
location_count_id INTEGER NOT NULL,
lot_number TEXT NOT NULL,
item TEXT,
description TEXT,
scanned_location TEXT NOT NULL,
actual_weight REAL NOT NULL,
scanned_by INTEGER NOT NULL,
scan_timestamp DATETIME DEFAULT CURRENT_TIMESTAMP,
-- MASTER baseline reference (stored for performance)
master_status TEXT,
master_expected_location TEXT,
master_expected_weight REAL,
master_variance_lbs REAL,
master_variance_pct REAL,
-- Duplicate tracking
duplicate_status TEXT DEFAULT '00',
duplicate_info TEXT,
-- Metadata
comment TEXT,
is_deleted INTEGER DEFAULT 0,
deleted_by INTEGER,
deleted_timestamp DATETIME,
modified_timestamp DATETIME,
FOREIGN KEY (session_id) REFERENCES CountSessions(session_id),
FOREIGN KEY (location_count_id) REFERENCES LocationCounts(location_count_id),
FOREIGN KEY (scanned_by) REFERENCES Users(user_id),
FOREIGN KEY (deleted_by) REFERENCES Users(user_id)
);
-- Copy all scan data
INSERT INTO ScanEntries_New
SELECT
entry_id,
session_id,
location_count_id,
lot_number,
item,
description,
scanned_location,
actual_weight,
scanned_by,
scan_timestamp,
master_status,
master_expected_location,
master_expected_weight,
master_variance_lbs,
master_variance_pct,
duplicate_status,
duplicate_info,
comment,
is_deleted,
deleted_by,
deleted_timestamp,
modified_timestamp
FROM ScanEntries;
-- Drop old table and rename
DROP TABLE ScanEntries;
ALTER TABLE ScanEntries_New RENAME TO ScanEntries;
-- Recreate indexes
CREATE INDEX idx_scanentries_session ON ScanEntries(session_id);
CREATE INDEX idx_scanentries_location ON ScanEntries(location_count_id);
CREATE INDEX idx_scanentries_lot ON ScanEntries(lot_number);
CREATE INDEX idx_scanentries_deleted ON ScanEntries(is_deleted);
-- Step 6: Remove current_baseline_version from CountSessions
-- Recreate CountSessions table
CREATE TABLE CountSessions_New (
session_id INTEGER PRIMARY KEY AUTOINCREMENT,
session_name TEXT NOT NULL,
description TEXT,
status TEXT DEFAULT 'active',
branch TEXT DEFAULT 'Main',
created_by INTEGER NOT NULL,
created_timestamp DATETIME DEFAULT CURRENT_TIMESTAMP,
master_baseline_timestamp DATETIME,
current_baseline_timestamp DATETIME,
completed_timestamp DATETIME,
FOREIGN KEY (created_by) REFERENCES Users(user_id)
);
-- Copy session data
INSERT INTO CountSessions_New
SELECT
session_id,
session_name,
description,
status,
branch,
created_by,
created_timestamp,
master_baseline_timestamp,
current_baseline_timestamp,
completed_timestamp
FROM CountSessions;
-- Drop old and rename
DROP TABLE CountSessions;
ALTER TABLE CountSessions_New RENAME TO CountSessions;
COMMIT;
-- Migration complete!
-- CURRENT baseline is now global and always shows latest data

BIN
database/scanlook.db Normal file

Binary file not shown.

BIN
database/scanlook.db.backup Normal file

Binary file not shown.

Binary file not shown.