import json
import asyncio
import time
import base64
import os
import uuid
from datetime import datetime, timezone
from fastapi import APIRouter, WebSocket, WebSocketDisconnect
import openai
from elevenlabs import Voice
from elevenlabs.client import AsyncElevenLabs

from sqlalchemy import create_engine, Column, String, Integer, Text, DateTime
from sqlalchemy.orm import declarative_base, sessionmaker
from sqlalchemy.dialects.postgresql import UUID as pg_UUID

from config import settings
from rag_manager import search_knowledge_base

router = APIRouter()

openai_client = openai.AsyncOpenAI(api_key=settings.OPENAI_API_KEY)
eleven_client = AsyncElevenLabs(api_key=settings.ELEVENLABS_API_KEY)

engine = create_engine(settings.DATABASE_URL)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base = declarative_base()

class ProjectSummary(Base):
    __tablename__ = "projects"
    id = Column(pg_UUID(as_uuid=True), primary_key=True)
    voice_id = Column(String(100))
    test_duration_seconds = Column(Integer)

class TestReportSummary(Base):
    __tablename__ = "test_reports"
    id = Column(pg_UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
    user_id = Column(pg_UUID(as_uuid=True))
    project_id = Column(pg_UUID(as_uuid=True))
    test_date_time = Column(DateTime(timezone=True), default=lambda: datetime.now(timezone.utc))
    duration_seconds = Column(Integer)
    rating = Column(Integer)
    ai_statement_short = Column(String(50))
    ai_statement_full = Column(Text)
    recording_path = Column(Text)
    created_at = Column(DateTime(timezone=True), default=lambda: datetime.now(timezone.utc))

SYSTEM_PROMPT = """You are a CUSTOMER calling a business.
The person you are speaking to is the CUSTOMER SERVICE AGENT.
STRICT RULES:
1. YOU ARE THE CUSTOMER. Never act like the agent. Do not offer to help.
2. Never output prefixes like "Customer:" or "Agent:". Just say the words you want to speak.
3. Use the Knowledge Base below to ask natural questions. 
4. Be brief (1-2 sentences).

KNOWLEDGE BASE:
{knowledge_base}
"""

EVALUATION_PROMPT = """You are an expert QA evaluator for call centers. Review the following call transcript where a human AGENT is speaking to an AI CUSTOMER.
Provide a JSON response with the following keys:
1. "rating": An integer from 1 to 10 evaluating the HUMAN AGENT's performance.
2. "quality_tag": A short 2-3 word tag (e.g., "Rude", "Helpful", "Lacked Knowledge").
3. "problem_statement": A text string explaining what the agent did well and what they did wrong.
Transcript:
{transcript}
"""

@router.websocket("/ws/call/{project_id}/{user_id}")
async def voice_call_endpoint(websocket: WebSocket, project_id: str, user_id: str):
    await websocket.accept()
    
    db = SessionLocal()
    try:
        project_data = db.query(ProjectSummary).filter(ProjectSummary.id == project_id).first()
        active_voice_id = project_data.voice_id if project_data and project_data.voice_id else "EXAVITQu4vr4xnSDxMaL"
        target_duration = project_data.test_duration_seconds if project_data and project_data.test_duration_seconds else 180
    except Exception:
        active_voice_id = "EXAVITQu4vr4xnSDxMaL"
        target_duration = 180
    finally:
        db.close()

    chat_history = []
    full_transcript = [] 
    start_time = time.time()
    max_duration = target_duration + 20
    is_generating = False 
    
    recording_path = None
    call_ended_gracefully = False
    
    try:
        while True:
            elapsed_time = time.time() - start_time
            if elapsed_time >= max_duration:
                try: await websocket.send_json({"event": "call_ended", "reason": "duration_reached"})
                except: pass
                break

            try:
                data = await asyncio.wait_for(websocket.receive_json(), timeout=2.0)
            except asyncio.TimeoutError:
                continue 

            if data.get("event") == "user_speech_text":
                if is_generating:
                    continue 
                
                is_generating = True
                try:
                    user_text = data.get("text")
                    if "(Agent is silent" not in user_text:
                        full_transcript.append(f"Agent (Human): {user_text}")
                    
                    chat_history.append({"role": "user", "content": f"Agent says: {user_text}"})
                    
                    try:
                        kb_context = search_knowledge_base(project_id, user_text)
                        current_sys_prompt = SYSTEM_PROMPT.replace("{knowledge_base}", kb_context)
                        messages = [{"role": "system", "content": current_sys_prompt}] + chat_history
                        
                        text_response = await openai_client.chat.completions.create(
                            model="gpt-4o",
                            messages=messages
                        )
                        
                        ai_response_text = text_response.choices[0].message.content
                        ai_response_text = ai_response_text.split("Agent:")[0].split("Customer:")[0].strip()
                        
                        if not ai_response_text:
                            ai_response_text = "Could you clarify that?"

                        audio_stream = await eleven_client.generate(
                            text=ai_response_text,
                            voice=Voice(voice_id=active_voice_id), 
                            model="eleven_turbo_v2",
                            stream=True
                        )
                        
                        async for audio_chunk in audio_stream:
                            await websocket.send_bytes(audio_chunk)
                        
                        await websocket.send_json({"event": "ai_speech_complete"})
                        
                        chat_history.append({"role": "assistant", "content": ai_response_text})
                        if "(Agent is silent" not in user_text:
                            full_transcript.append(f"Customer (AI): {ai_response_text}")

                    except WebSocketDisconnect:
                        break
                    except Exception as ai_err:
                        try: await websocket.send_json({"event": "error", "message": f"AI Error: {str(ai_err)}"})
                        except: break 
                finally:
                    is_generating = False

            # NEW LOGIC: Accept Base64 Audio securely via WebSocket
            elif data.get("event") == "audio_upload":
                try:
                    audio_b64 = data.get("audio_b64")
                    if audio_b64:
                        file_name = f"{uuid.uuid4()}_recording.webm"
                        file_path = os.path.join("uploads", "recordings", file_name)
                        os.makedirs(os.path.dirname(file_path), exist_ok=True)
                        with open(file_path, "wb") as f:
                            f.write(base64.b64decode(audio_b64))
                        recording_path = file_path # Lock this exact audio file to this exact session
                except Exception as upload_err:
                    print(f"WS Audio Save Error: {upload_err}")

            elif data.get("event") == "end_call":
                call_ended_gracefully = True
                break

    except WebSocketDisconnect:
        pass
    except Exception as e:
        print(f"WebSocket Error: {e}")

    # --- SYNCHRONIZED EVALUATION BLOCK ---
    duration = int(time.time() - start_time)
    transcript_text = "\n".join(full_transcript)
    if not transcript_text.strip():
        transcript_text = "(No conversation took place)"
        
    try:
        if call_ended_gracefully:
            try: await websocket.send_json({"event": "status_update", "step": "evaluating"})
            except: pass

        eval_response = await openai_client.chat.completions.create(
            model="gpt-4o",
            response_format={ "type": "json_object" },
            messages=[
                {"role": "system", "content": "You are a JSON outputting QA bot."},
                {"role": "user", "content": EVALUATION_PROMPT.replace("{transcript}", transcript_text)}
            ]
        )
        eval_data = json.loads(eval_response.choices[0].message.content)
        
        raw_problem = eval_data.get('problem_statement', 'N/A')
        if isinstance(raw_problem, list): raw_problem = " ".join(str(item) for item in raw_problem)
        else: raw_problem = str(raw_problem)
            
        final_statement = f"{raw_problem}\n\n--- TRANSCRIPT ---\n{transcript_text}"
        
        db = SessionLocal()
        try:
            new_report = TestReportSummary(
                user_id=user_id, 
                project_id=project_id, 
                test_date_time=datetime.now(timezone.utc),
                duration_seconds=duration, 
                rating=int(eval_data.get('rating', 0)),
                ai_statement_short=str(eval_data.get('quality_tag', 'N/A'))[:50], 
                ai_statement_full=final_statement,
                recording_path=recording_path  # Saved securely!
            )
            db.add(new_report)
            db.commit()
        except Exception as db_err:
            print(f"DB Save Error: {db_err}")
        finally:
            db.close()
            
        if call_ended_gracefully:
            try: 
                await websocket.send_json({"event": "evaluation_complete"})
                await websocket.close()
            except: pass

    except Exception as eval_err:
        print(f"Eval Error: {eval_err}")
        if call_ended_gracefully:
            try: await websocket.close()
            except: pass