from flask import Flask, send_file, jsonify, request
from flask_cors import CORS
from google.oauth2 import service_account
from googleapiclient.discovery import build, MediaFileUpload
from googleapiclient.http import MediaIoBaseDownload
from configs.databaseConnection import connect_to_database,insertQuerySearchCriteria, insertQuerySearchRequests,updateProgressPercentage, updateRowStatus,updateExecTimeAndQuota, selectStatus,updateStatus, update_filename
from service.VideoCall import get_video_metadata 
import logging
from googleapiclient.errors import HttpError
import pandas as pd
from flask_app import app
import datetime
import os
import threading
import time
import datetime


CORS(app, origins="*")  # This will enable CORS for all routes
SCOPES = ['https://www.googleapis.com/auth/drive']
SERVICE_ACCOUNT_FILE = 'configs/credentials.json'
UPLOAD_FOLDER = 'uploads'
TEMP_FOLDER = 'tempUploads'
os.makedirs(UPLOAD_FOLDER, exist_ok=True)
os.makedirs(TEMP_FOLDER, exist_ok=True)
USERNAME = None

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

# Authenticate and build the Drive API client
def get_drive_service():
    credentials = service_account.Credentials.from_service_account_file(
        SERVICE_ACCOUNT_FILE, scopes=SCOPES)
    service = build('drive', 'v3', credentials=credentials)
    return service

def process_files() -> None:
    """Process Excel files in the uploads folder."""
    while True:
        for filename in os.listdir(UPLOAD_FOLDER):
            file_path = os.path.join(UPLOAD_FOLDER, filename)
            if filename.endswith('.xlsx'):
                try:
                    get_details_from_youtube(file_path)
                except Exception as e:
                    print(f"Error processing file {filename}: {e}")
        time.sleep(10)  # Wait for 10 seconds before checking the folder again


def validate_upload_file(df):
    errors = []
    for index, row in df.iterrows():
        search_type = row['SearchType'] if pd.notna(row['SearchType']) else None
        language = row['Language'] if pd.notna(row['Language']) else None
        standard = row['Standard'] if pd.notna(row['Standard']) else None
        subject = row['Subject'] if pd.notna(row['Subject']) else None
        topic = row['Topic'] if pd.notna(row['Topic']) else None
        subtopic = row['SubTopic'] if pd.notna(row['SubTopic']) else None
        freeTextSearch = row['FreeTextSearch'] if pd.notna(row['FreeTextSearch']) else None
        max_videos_per_playlist = row['MaxResultsPerPlaylists'] if pd.notna(row['MaxResultsPerPlaylists']) else 50
        channel_id = row['ChannelId'] if pd.notna(row['ChannelId']) else None
        video_license = row['LicenceType'] if pd.notna(row['LicenceType']) else None
        video_duration = row['VideoDuration'] if pd.notna(row['VideoDuration']) else None
        max_results = row['MaxResults'] if pd.notna(row['MaxResults']) else 1000

        # Validate search type
        if search_type not in ['Videos', 'Playlists', 'AllChannelVideos']:
            errors.append(f"Invalid SearchType at row {index+1}. Must be 'Videos', 'Playlists', or 'AllChannelVideos'.")

        # Validate channel_id for AllChannelVideos
        if search_type == 'AllChannelVideos' and not channel_id:
            errors.append(f"ChannelId is required for AllChannelVideos at row {index+1}.")

        # Validate max_results
        if not isinstance(max_results, int) or max_results <= 0:
            errors.append(f"Invalid MaxResults at row {index+1}. Must be a positive integer.")

        # Validate max_videos_per_playlist
        if not isinstance(max_videos_per_playlist, int) or max_videos_per_playlist <= 0:
            errors.append(f"Invalid MaxResultsPerPlaylists at row {index+1}. Must be a positive integer.")

    return errors

# Thread-local storage for quota tracking
thread_local = threading.local()

def get_details_from_youtube(file_path):
    connection = connect_to_database()
    df = pd.read_excel(file_path)
    total_records = len(df)
    timestamp = datetime.datetime.now()
    stopStatus=""
    print(timestamp)
    global USERNAME

    parts = os.path.basename(file_path).split('_')
    searchId = int(parts[0])

    updateStatus(connection, searchId, 'In-Progress')

    # Initialize thread-local quota if not already done
    if not hasattr(thread_local, 'total_quota_used'):
        thread_local.total_quota_used = 0
    try:
        all_responses = []
    
        # Iterate over the rows and print each row
        for index, row in df.iterrows():
            stopStatus = selectStatus(connection, searchId)
            print("stopstatus", stopStatus)
            if stopStatus != "Stopped":
                search_type = row['SearchType'] if pd.notna(row['SearchType']) else None
                board = row['Board'] if pd.notna(row['Board']) else None
                language = row['Language'] if pd.notna(row['Language']) else None
                standard = row['Standard'] if pd.notna(row['Standard']) else None
                subject = row['Subject'] if pd.notna(row['Subject']) else None
                topic = row['Topic'] if pd.notna(row['Topic']) else None
                subtopic = row['SubTopic'] if pd.notna(row['SubTopic']) else None
                freeTextSearch = row['FreeTextSearch'] if pd.notna(row['FreeTextSearch']) else None
                max_videos_per_playlist = row['MaxResultsPerPlaylists'] if pd.notna(row['MaxResultsPerPlaylists']) else None
                channel_id = row['ChannelId'] if pd.notna(row['ChannelId']) else None
                video_license = row['LicenceType'] if pd.notna(row['LicenceType']) else None
                video_duration = row['VideoDuration'] if pd.notna(row['VideoDuration']) else None
                max_results = row['MaxResults'] if pd.notna(row['MaxResults']) else None
                search_criteria = ""

                if pd.notna(freeTextSearch):
                    # FreeTextSearch overrides everything — use it as-is
                    search_criteria = freeTextSearch
                else:
                    # Build a natural query: Language Board Class <N> Subject Topic SubTopic
                    # e.g. "Telugu NCERT Class 9 Maths Algebra Quadratic Equations"
                    if pd.notna(language):
                        search_criteria += language + " "
                    if pd.notna(board):
                        search_criteria += board + " "
                    if pd.notna(standard):
                        search_criteria += "Class " + str(int(standard)) + " "
                    if pd.notna(subject):
                        search_criteria += subject + " "
                    if pd.notna(topic):
                        search_criteria += topic + " "
                    if pd.notna(subtopic):
                        search_criteria += subtopic
                    search_criteria = search_criteria.strip()

                logger.info(f"Built search_criteria: '{search_criteria}'")

                searchRowId = insertQuerySearchCriteria(connection, search_type, language, standard, subject, topic, subtopic, freeTextSearch, max_results, max_videos_per_playlist, channel_id, video_license, video_duration, "Started", searchId)
            
                print("Request starting")
                try:
                    response_df, quotaUsed = get_video_metadata(search_type, search_criteria, max_results, max_videos_per_playlist, channel_id, video_license, video_duration)
                    print("Response data received")
                    updateRowStatus(connection, "Completed", searchRowId)
                except ValueError as ve:
                    logger.exception(f"ValueError: {ve}")
                    updateRowStatus(connection, "Failed", searchRowId)
                    continue
                except HttpError as he:
                    logger.exception(f"HttpError: {he}")
                    if he.resp.status == 403 and 'quotaExceeded' in str(he):
                        updateRowStatus(connection, "Quota Exhausted", searchRowId)
                    else:
                        updateRowStatus(connection, "Failed", searchRowId)
                    continue
                except Exception as e:
                    logger.exception(f"Unexpected error processing row {index+1}: {e}")
                    updateRowStatus(connection, "Failed", searchRowId)
                    continue
                completePercent = ((index + 1) / total_records) * 100
                print(total_records)
                print("searchId:", searchId)
                updateProgressPercentage(connection, completePercent, searchId)
                
                # Update the thread-local quota
                thread_local.total_quota_used += quotaUsed
                print(f"Quota used for this request: {quotaUsed}. Total quota used: {thread_local.total_quota_used}")

                all_responses.append(response_df)

        # Handle case where no videos passed filtering
        if not all_responses:
            logger.warning("No videos passed filtering - creating empty result file")
            final_df = pd.DataFrame()
        else:
            final_df = pd.concat(all_responses, ignore_index=True)
        response_file = str(timestamp).replace(" ", "_").replace(":", "-")[:19] + '.xlsx'
        
        # Define the file name and path
        directory = 'responseFolder'
        file_path_result = os.path.join(directory, response_file)
        
        # Ensure the directory exists
        os.makedirs(directory, exist_ok=True)

        # Save the DataFrame to an Excel file
        final_df.to_excel(file_path_result, index=False)
        upload_file(searchId, file_path_result, searchRowId)
        end_time = datetime.datetime.now()
        minutes = divmod((end_time - timestamp).seconds, 60) 

        print('Total difference in minutes: ', minutes[0], 'mins', minutes[1], 'sec')
        difference = str(minutes[0]) + ' mins ' + str(minutes[1]) + ' sec'
        if(stopStatus!="Stopped"):
            updateExecTimeAndQuota(connection, difference, thread_local.total_quota_used,"Completed",searchId)
        if(stopStatus=="Stopped"):
            updateExecTimeAndQuota(connection, difference, thread_local.total_quota_used,"Stopped",searchId)   
    except HttpError as he:
        logger.exception(f"HttpError: {he}")
        if he.resp.status == 403 and 'quotaExceeded' in str(he):
            logger.exception(f"Quota exceeded")
            updateStatus(connection,searchId,"Quota Exhausted")
            updateRowStatus(connection, "Failed", searchRowId)
        raise
    except Exception as e:
        logger.exception(f"Unexpected error: {e}")
        updateStatus(connection,searchId,"Failed")
        updateRowStatus(connection, "Failed", searchRowId)
    finally:
        if os.path.exists(file_path):  # Check if the file exists
            os.remove(file_path)  # Remove the file
        else:
            logger.exception("File not found, could not remove: %s", file_path)  # Log if not found


def get_folder_id(service, folder_name):
    query = f"name='{folder_name}' and mimeType='application/vnd.google-apps.folder' and trashed=false"
    results = service.files().list(q=query, fields='files(id, name)').execute()
    items = results.get('files', [])
    if not items:
        return None
    return items[0]['id']

def upload_file(searchId: int, filePath: str, searchRowId: int):
    """Upload the excel file to Google Drive and update the file ID and name in MySQL database."""
    print("Uploading file to Google drive")
    print("rowid", searchId)
    print("filePath", filePath)
    connection = connect_to_database()
    if not searchId:
        print('Error: Row ID is required')
        os.remove(filePath)
        updateRowStatus(connection, "Failed", searchRowId)
        return
    if not filePath:
        print('Error: FilePath is required')
        updateRowStatus(connection, "Failed", searchRowId)
        updateStatus(connection,searchId,"Failed")
        return
    try:
        service = get_drive_service()
        folder_id = get_folder_id(service, "RKSS")
        if not folder_id:
            print('Error: Folder not found')
            os.remove(filePath)
            updateRowStatus(connection, "Failed", searchRowId)
            updateStatus(connection,searchId,"Failed")
            return

        print("folder", folder_id)

        file_metadata = {'name': os.path.basename(filePath), 'parents': [folder_id]}
        print(file_metadata)
        media = MediaFileUpload(
            filePath, mimetype='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
        uploaded_file = service.files().create(
            body=file_metadata, media_body=media, fields='id, name').execute()
        file_id = uploaded_file.get('id')
        file_name = uploaded_file.get('name')
        print("fileId", file_id)
        if not file_id or not file_name:
            print('Error: Failed to upload file to Google Drive')
            os.remove(filePath)
            updateStatus(connection,searchId,"Failed")
            updateRowStatus(connection, "Failed", searchRowId)
            return

        print("File Uploaded successfully")

        # Update file ID and name in MySQL database
        cursor = connection.cursor()
        update_query = "UPDATE search_requests SET OutputFileId = %s, OutputFileName = %s WHERE SearchID = %s"
        cursor.execute(update_query, (file_id, file_name, searchId))
        connection.commit()
        cursor.close()
        connection.close()
        os.remove(filePath)

    except Exception as e:
        updateRowStatus(connection, "Failed", searchRowId)
        updateStatus(connection,searchId,"Failed")
        os.remove(filePath)
        print('Error Processing the file', e)


@app.route('/api/process_excel', methods=['POST'])
def process_excel():
    connection=connect_to_database()

    if 'file' not in request.files:
        return jsonify({'error': 'No file found'}), 400

    file = request.files['file']
    
    if file.filename == '':
        return jsonify({'error': 'No file is selected'}), 400
    global USERNAME
    USERNAME = request.form.get('username')
    if file and file.filename.endswith('.xlsx'):
        timestamp = datetime.datetime.now()
        searchId = insertQuerySearchRequests(connection, file.filename, 0, str(timestamp), USERNAME)
        if searchId is None:
            return jsonify({'error': 'Failed to insert search request into database'}), 500
        
        # Generate the new filename with the search_id
        new_filename = f"{searchId}_{file.filename}"
        
        temp_file_path = os.path.join(TEMP_FOLDER, new_filename)
        file.save(temp_file_path)
        try:
            df = pd.read_excel(file)
        except Exception as e:
            os.remove(temp_file_path)
            return jsonify({'error': 'File template is not supported'}), 400
        
        print("Length", str(len(df)))
        if(len(df)==0):
            os.remove(temp_file_path)
            return jsonify({'error': 'Empty file uploaded'})
        
        # Validate the upload file
        errors = validate_upload_file(df)
        if errors:
            os.remove(temp_file_path)
            return jsonify({'error': 'File validation failed', 'details': errors}), 400
        
        # Update the filename in the database
        update_filename(connection, searchId, new_filename)
        final_file_path = os.path.join(UPLOAD_FOLDER, new_filename)
        os.rename(temp_file_path, final_file_path)
        return jsonify({'message': 'File uploaded successfully', 'file_name': file.filename}), 200
    else:
        return jsonify({'error': 'Unsupported file format. Please upload an Excel file (.xlsx)'}), 400

threading.Thread(target=process_files, daemon=True).start()