V0.15.0 - Not done yet

This commit is contained in:
Javier
2026-02-01 16:22:59 -06:00
parent 89be88566f
commit 2a649fdbcc
8 changed files with 455 additions and 115 deletions

View File

@@ -8,18 +8,23 @@ cons_sheets_bp = Blueprint('cons_sheets', __name__)
@cons_sheets_bp.route('/admin/consumption-sheets')
@role_required('owner', 'admin')
def admin_processes():
"""List all consumption sheet process types"""
processes = query_db('''
SELECT cp.*, u.full_name as created_by_name,
(SELECT COUNT(*) FROM cons_process_fields
WHERE process_id = cp.id AND is_active = 1) as field_count
FROM cons_processes cp
LEFT JOIN Users u ON cp.created_by = u.user_id
WHERE cp.is_active = 1
ORDER BY cp.process_name
''')
"""List all consumption sheet process types (Active or Archived)"""
show_archived = request.args.get('archived') == '1'
is_active_val = 0 if show_archived else 1
return render_template('cons_sheets/admin_processes.html', processes=processes)
processes = query_db('''
SELECT cp.*,
u.full_name as created_by_name,
(SELECT COUNT(*) FROM cons_process_fields WHERE process_id = cp.id) as field_count
FROM cons_processes cp
LEFT JOIN users u ON cp.created_by = u.user_id
WHERE cp.is_active = ?
ORDER BY cp.process_name ASC
''', [is_active_val])
return render_template('cons_sheets/admin_processes.html',
processes=processes,
showing_archived=show_archived)
@cons_sheets_bp.route('/admin/consumption-sheets/create', methods=['GET', 'POST'])
@@ -144,6 +149,36 @@ def rename_column_in_detail_table(process_key, old_name, new_name):
conn.close()
@cons_sheets_bp.route('/admin/consumption-sheets/<int:process_id>/delete', methods=['POST'])
@role_required('owner', 'admin')
def delete_process(process_id):
"""Soft-delete a process type (Archive it)"""
# Check if process exists
process = query_db('SELECT * FROM cons_processes WHERE id = ?', [process_id], one=True)
if not process:
flash('Process not found', 'danger')
return redirect(url_for('cons_sheets.admin_processes'))
# Soft delete: Set is_active = 0
# The existing admin_processes route already filters for is_active=1,
# so this will effectively hide it from the list.
execute_db('UPDATE cons_processes SET is_active = 0 WHERE id = ?', [process_id])
flash(f'Process "{process["process_name"]}" has been deleted.', 'success')
return redirect(url_for('cons_sheets.admin_processes'))
@cons_sheets_bp.route('/admin/consumption-sheets/<int:process_id>/restore', methods=['POST'])
@role_required('owner', 'admin')
def restore_process(process_id):
"""Restore a soft-deleted process type"""
execute_db('UPDATE cons_processes SET is_active = 1 WHERE id = ?', [process_id])
flash('Process has been restored.', 'success')
return redirect(url_for('cons_sheets.admin_processes', archived=1))
@cons_sheets_bp.route('/admin/consumption-sheets/<int:process_id>')
@role_required('owner', 'admin')
def process_detail(process_id):
@@ -284,28 +319,35 @@ def update_template_settings(process_id):
rows_per_page = request.form.get('rows_per_page', 30)
detail_start_row = request.form.get('detail_start_row', 10)
detail_end_row = request.form.get('detail_end_row') # <--- Get the new value
page_height = request.form.get('page_height')
print_start_col = request.form.get('print_start_col', 'A').strip().upper()
print_end_col = request.form.get('print_end_col', '').strip().upper()
try:
rows_per_page = int(rows_per_page)
detail_start_row = int(detail_start_row)
# Handle empty string for end row (it's optional-ish, but needed for this specific strategy)
detail_end_row = int(detail_end_row) if detail_end_row and detail_end_row.strip() else None
# We enforce page_height is required now
page_height = int(page_height) if page_height and page_height.strip() else None
if not page_height:
flash('Page Height is required for the new strategy', 'danger')
return redirect(url_for('cons_sheets.process_template', process_id=process_id))
except ValueError:
flash('Invalid number values', 'danger')
return redirect(url_for('cons_sheets.process_template', process_id=process_id))
# Update query to include the new column
# Update query - We ignore detail_end_row (leave it as is or null)
execute_db('''
UPDATE cons_processes
SET rows_per_page = ?, detail_start_row = ?, detail_end_row = ?
WHERE id = ?
''', [rows_per_page, detail_start_row, detail_end_row, process_id])
UPDATE cons_processes
SET rows_per_page = ?, detail_start_row = ?, page_height = ?,
print_start_col = ?, print_end_col = ?
WHERE id = ?
''', [rows_per_page, detail_start_row, page_height, print_start_col, print_end_col, process_id])
flash('Settings updated successfully!', 'success')
return redirect(url_for('cons_sheets.process_template', process_id=process_id))
@cons_sheets_bp.route('/admin/consumption-sheets/<int:process_id>/template/download')
@role_required('owner', 'admin')
def download_template(process_id):
@@ -909,21 +951,59 @@ def archive_session(session_id):
return jsonify({'success': True})
# --- BULK IMPORT ROUTES ---
@cons_sheets_bp.route('/cons-sheets/session/<int:session_id>/export')
@cons_sheets_bp.route('/cons-sheets/session/<int:session_id>/template')
@login_required
def export_session(session_id):
"""Export session to Excel using the One Giant Template strategy"""
from flask import Response
def download_import_template(session_id):
"""Generate a blank Excel template for bulk import"""
from flask import Response # <--- ADDED THIS
from io import BytesIO
import openpyxl
from datetime import datetime
# Get session with process info AND the new detail_end_row
# Get Process ID
sess = query_db('SELECT process_id FROM cons_sessions WHERE id = ?', [session_id], one=True)
if not sess: return redirect(url_for('cons_sheets.index'))
# Get Detail Fields
fields = query_db('''
SELECT field_name, field_label
FROM cons_process_fields
WHERE process_id = ? AND table_type = 'detail' AND is_active = 1
ORDER BY sort_order
''', [sess['process_id']])
# Create Workbook
wb = openpyxl.Workbook()
ws = wb.active
ws.title = "Import Data"
# Write Header Row (Field Names)
headers = [f['field_name'] for f in fields]
ws.append(headers)
output = BytesIO()
wb.save(output)
output.seek(0)
return Response(
output.getvalue(),
mimetype='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
headers={'Content-Disposition': 'attachment; filename=import_template.xlsx'}
)
@cons_sheets_bp.route('/cons-sheets/session/<int:session_id>/import', methods=['POST'])
@login_required
def import_session_data(session_id):
"""Bulk import detail rows from Excel"""
# Import EVERYTHING locally to avoid NameErrors
import openpyxl
from datetime import datetime
from flask import request, flash, redirect, url_for, session
# 1. Get Session Info
sess = query_db('''
SELECT cs.*, cp.process_name, cp.process_key, cp.id as process_id,
cp.template_file, cp.template_filename, cp.rows_per_page,
cp.detail_start_row, cp.detail_end_row
SELECT cs.*, cp.process_key
FROM cons_sessions cs
JOIN cons_processes cp ON cs.process_id = cp.id
WHERE cs.id = ?
@@ -932,12 +1012,125 @@ def export_session(session_id):
if not sess:
flash('Session not found', 'danger')
return redirect(url_for('cons_sheets.index'))
if not sess['template_file']:
flash('No template configured for this process', 'danger')
# 2. Check File
if 'file' not in request.files:
flash('No file uploaded', 'danger')
return redirect(url_for('cons_sheets.scan_session', session_id=session_id))
file = request.files['file']
if file.filename == '':
flash('No file selected', 'danger')
return redirect(url_for('cons_sheets.scan_session', session_id=session_id))
try:
# 3. Read Excel
wb = openpyxl.load_workbook(file)
ws = wb.active
# Get headers from first row
headers = [cell.value for cell in ws[1]]
# Get valid field names for this process
valid_fields = query_db('''
SELECT field_name
FROM cons_process_fields
WHERE process_id = ? AND table_type = 'detail' AND is_active = 1
''', [sess['process_id']])
valid_field_names = [f['field_name'] for f in valid_fields]
# Map Excel Columns to DB Fields
col_mapping = {}
for idx, header in enumerate(headers):
if header and header in valid_field_names:
col_mapping[idx] = header
if not col_mapping:
flash('Error: No matching columns found in Excel. Please use the template.', 'danger')
return redirect(url_for('cons_sheets.scan_session', session_id=session_id))
# 4. Process Rows
table_name = f"cons_proc_{sess['process_key']}_details"
rows_inserted = 0
# Get User ID safely from session
user_id = session.get('user_id')
for row in ws.iter_rows(min_row=2, values_only=True):
if not any(row): continue
data = {}
for col_idx, value in enumerate(row):
if col_idx in col_mapping:
data[col_mapping[col_idx]] = value
if not data: continue
# Add Metadata
data['session_id'] = session_id
data['scanned_at'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
data['scanned_by'] = user_id
# REMOVED: data['is_valid'] = 1 (This column does not exist)
data['is_deleted'] = 0
# Dynamic Insert SQL
columns = ', '.join(data.keys())
placeholders = ', '.join(['?'] * len(data))
values = list(data.values())
sql = f"INSERT INTO {table_name} ({columns}) VALUES ({placeholders})"
execute_db(sql, values)
rows_inserted += 1
flash(f'Successfully imported {rows_inserted} records!', 'success')
except Exception as e:
# This will catch any other errors and show them to you
flash(f'Import Error: {str(e)}', 'danger')
print(f"DEBUG IMPORT ERROR: {str(e)}") # Print to console for good measure
return redirect(url_for('cons_sheets.scan_session', session_id=session_id))
@cons_sheets_bp.route('/cons-sheets/session/<int:session_id>/export')
@login_required
def export_session(session_id):
"""Export session: Hide Rows Strategy + Manual Column Widths"""
from flask import Response
from io import BytesIO
import openpyxl
# Correct imports for newer openpyxl
from openpyxl.utils.cell import coordinate_from_string, get_column_letter
from openpyxl.worksheet.pagebreak import Break
from datetime import datetime
import math
# Get header fields and values
# --- FIX 1: Update SQL to fetch the new columns ---
sess = query_db('''
SELECT cs.*, cp.process_name, cp.process_key, cp.id as process_id,
cp.template_file, cp.template_filename,
cp.rows_per_page, cp.detail_start_row, cp.page_height,
cp.print_start_col, cp.print_end_col
FROM cons_sessions cs
JOIN cons_processes cp ON cs.process_id = cp.id
WHERE cs.id = ?
''', [session_id], one=True)
if not sess or not sess['template_file']:
flash('Session or Template not found', 'danger')
return redirect(url_for('cons_sheets.index'))
# Validation
page_height = sess['page_height']
rows_per_page = sess['rows_per_page'] or 30
detail_start_row = sess['detail_start_row'] or 10
if not page_height:
flash('Configuration Error: Page Height is not set.', 'danger')
return redirect(url_for('cons_sheets.scan_session', session_id=session_id))
# Get Data
header_fields = query_db('''
SELECT cpf.field_name, cpf.excel_cell, cshv.field_value
FROM cons_process_fields cpf
@@ -945,7 +1138,6 @@ def export_session(session_id):
WHERE cpf.process_id = ? AND cpf.table_type = 'header' AND cpf.is_active = 1 AND cpf.excel_cell IS NOT NULL
''', [session_id, sess['process_id']])
# Get detail fields with their column mappings
detail_fields = query_db('''
SELECT field_name, excel_cell, field_type
FROM cons_process_fields
@@ -953,7 +1145,6 @@ def export_session(session_id):
ORDER BY sort_order, id
''', [sess['process_id']])
# Get all scanned details
table_name = f'cons_proc_{sess["process_key"]}_details'
scans = query_db(f'''
SELECT * FROM {table_name}
@@ -961,71 +1152,80 @@ def export_session(session_id):
ORDER BY scanned_at ASC
''', [session_id])
# Load the template
template_bytes = BytesIO(sess['template_file'])
wb = openpyxl.load_workbook(template_bytes)
ws = wb.active # We only work on the first sheet now
# Setup Excel
wb = openpyxl.load_workbook(BytesIO(sess['template_file']))
ws = wb.active
detail_start_row = sess['detail_start_row'] or 11
detail_end_row = sess['detail_end_row'] # This is our new target
# Clear existing breaks
ws.row_breaks.brk = []
ws.col_breaks.brk = []
# --- STEP 1: Fill Header ---
for field in header_fields:
if field['excel_cell'] and field['field_value']:
try:
ws[field['excel_cell']] = field['field_value']
except:
pass
# Calculate Pages Needed
total_items = len(scans)
total_pages = math.ceil(total_items / rows_per_page) if total_items > 0 else 1
# --- STEP 2: Fill ALL Details ---
# We just write them all sequentially, relying on the template being "Giant"
for i, scan in enumerate(scans):
row_num = detail_start_row + i
for field in detail_fields:
if field['excel_cell']:
try:
col_letter = field['excel_cell'].upper().strip()
cell_ref = f"{col_letter}{row_num}"
value = scan[field['field_name']]
# Convert types
if field['field_type'] == 'REAL' and value:
value = float(value)
elif field['field_type'] == 'INTEGER' and value:
value = int(value)
ws[cell_ref] = value
except Exception as e:
print(f"Error filling cell: {e}")
# --- STEP 3: Delete Unused Rows & Fix Print Area ---
if detail_end_row:
first_empty_row = detail_start_row + len(scans)
# --- MAIN LOOP ---
for page_idx in range(total_pages):
# Only delete if we actually have empty rows to remove
if first_empty_row <= detail_end_row:
rows_to_delete = detail_end_row - first_empty_row + 1
ws.delete_rows(first_empty_row, amount=rows_to_delete)
# --- FIX 1: Clear Breaks ---
ws.row_breaks.brk = []
ws.col_breaks.brk = []
# --- FIX 2: Explicitly Set Print Area ---
# The "Total" line (and footer) has now moved UP to 'first_empty_row'.
# We want to print everything from A1 down to that Total line.
# (If your footer is taller than 1 row, increase the +0 below)
footer_height = 0
final_print_row = first_empty_row + footer_height
# Force the print area to cut off the "Zombie Pages"
ws.print_area = f"A1:K{final_print_row}"
# Reset scaling
if ws.sheet_properties.pageSetUpPr:
ws.sheet_properties.pageSetUpPr.fitToPage = False
# --- Save & Export ---
# 1. Fill Header
for field in header_fields:
if field['excel_cell'] and field['field_value']:
try:
col_letter, row_str = coordinate_from_string(field['excel_cell'])
base_row = int(row_str)
target_row = base_row + (page_idx * page_height)
ws[f"{col_letter}{target_row}"] = field['field_value']
except: pass
# 2. Fill Details
start_idx = page_idx * rows_per_page
end_idx = start_idx + rows_per_page
page_scans = scans[start_idx:end_idx]
for i, scan in enumerate(page_scans):
target_row = detail_start_row + (page_idx * page_height) + i
for field in detail_fields:
if field['excel_cell']:
try:
col_letter = field['excel_cell'].upper().strip()
cell_ref = f"{col_letter}{target_row}"
value = scan[field['field_name']]
if field['field_type'] == 'REAL' and value: value = float(value)
elif field['field_type'] == 'INTEGER' and value: value = int(value)
ws[cell_ref] = value
except: pass
# 3. Force Page Break (BEFORE the new header)
if page_idx < total_pages - 1:
next_page_start_row = ((page_idx + 1) * page_height) # No +1 here!
ws.row_breaks.append(Break(id=next_page_start_row))
# --- STEP 3: CLEANUP (Hide Unused Rows) ---
last_used_row = (total_pages * page_height)
SAFE_MAX_ROW = 5000
for row_num in range(last_used_row + 1, SAFE_MAX_ROW):
ws.row_dimensions[row_num].hidden = True
# --- FINAL POLISH (Manual Widths) ---
# --- FIX 2: Use bracket notation (sess['col']) instead of .get() ---
# We use 'or' to provide defaults if the DB value is None
start_col = sess['print_start_col'] or 'A'
if sess['print_end_col']:
end_col = sess['print_end_col']
else:
# Fallback to auto-detection if user left it blank
end_col = get_column_letter(ws.max_column)
# Set Print Area
ws.print_area = f"{start_col}1:{end_col}{last_used_row}"
if ws.sheet_properties.pageSetUpPr:
ws.sheet_properties.pageSetUpPr.fitToPage = False
# Save
output = BytesIO()
wb.save(output)
output.seek(0)