import os
from datetime import datetime
from typing import Dict, List, Optional
from fastapi import FastAPI, HTTPException
from g4f.client.service import get_last_provider
from pydantic import BaseModel
from g4f.client import Client
from fastapi.middleware.cors import CORSMiddleware
from youtube_transcript_api import YouTubeTranscriptApi
from langdetect import detect, DetectorFactory
from yt_dlp import YoutubeDL
import httpx
import time
import re


FIXME_KEYS = ['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',
'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',
'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',
'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]',]

#  TODO FIX URL AND MAKE IT CONFIGURABLE
APPLICATION_URL = "http://localhost:8000"
# Add OpenRouter configuration
OPENROUTER_API_KEY = FIXME_KEYS[1]  # Store this securely, consider environment variables
OPENROUTER_API_URL = "https://openrouter.ai/api/v1/chat/completions"
OPENROUTER_REFERER = APPLICATION_URL # Your application's URL

from MyUtils import debug
import YoutubeParser

# Set a seed for consistent language detection
DetectorFactory.seed = 0

# Constants
DOWNLOADS_FOLDER = os.path.join(os.path.expanduser("~"), "Downloads")
LANGUAGE_CODES = {
    "english": "en",
    "russian": "ru"
}

# FastAPI setup
app = FastAPI()

# CORS middleware configuration
app.add_middleware(
    CORSMiddleware,
    allow_origins=["app://obsidian.md", "http://127.0.0.1", "http://localhost"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)


# Request Models
class BaseRequest(BaseModel):
    model: str


class TextRequest(BaseModel):
    message: str
    files: List[dict] = []  # List of {path: str, name: str}
    model: str
    text_type: str
    language: str


class ImageRequest(BaseRequest):
    prompt: str


class TranscriptRequest(BaseModel):
    video_url: str
    language: str = "en"


class SummaryRequest(BaseRequest):
    video_url: str
    prompt: str


# YouTube Data Handler
class YouTubeDataHandler:
    def __init__(self):
        self.ydl_opts = {
            'quiet': True,
            'no_warnings': True,
            'extract_flat': True,
        }
        self.parser = YoutubeParser.YoutubeParser()

    def extract_video_data(self, video_id: str) -> Dict:
        debug(f"Starting video data extraction for ID: {video_id}", "NOTIFICATION")
        start_time = time.time()

        try:
            with YoutubeDL(self.ydl_opts) as ydl:
                info = ydl.extract_info(f"https://www.youtube.com/watch?v={video_id}", download=False)

            video_data = {
                'title': info.get('title'),
                'channel': info.get('uploader'),
                'channel_id': info.get('channel_id'),
                'description': info.get('description'),
                'duration': info.get('duration'),
                'view_count': info.get('view_count'),
                'like_count': info.get('like_count'),
                'upload_date': info.get('upload_date'),
                'thumbnail': info.get('thumbnail'),
                'tags': info.get('tags', []),
            }

            debug(f"Video data extracted in {time.time() - start_time:.2f} seconds", "NOTIFICATION")
            return video_data

        except Exception as e:
            debug(f"Error extracting video data: {str(e)}", "ERROR")
            raise HTTPException(status_code=500, detail=f"Video data extraction error: {str(e)}")

    def fetch_transcript(self, video_id: str, language: str, include_timestamps: bool = False) -> str:
        debug(f"Fetching transcript for video {video_id} in language {language}", "NOTIFICATION")
        start_time = time.time()

        try:
            lang_code = LANGUAGE_CODES.get(language.lower(), language)
            transcript = YouTubeTranscriptApi.get_transcript(video_id, languages=[lang_code])
            debug(f"Transcript fetched in {time.time() - start_time:.2f} seconds", "NOTIFICATION")

            return transcript if include_timestamps else " ".join(entry['text'] for entry in transcript)

        except Exception as e:
            debug(f"Error fetching transcript: {str(e)}", "ERROR")
            raise HTTPException(status_code=500, detail=f"Transcript error: {str(e)}")

    def save_transcript(self, video_id: str, transcript: str) -> str:
        file_name = f"transcript_{video_id}.txt"
        file_path = os.path.join(DOWNLOADS_FOLDER, file_name)

        with open(file_path, 'w', encoding='utf-8') as file:
            file.write(str(transcript))
            debug(f"Transcript saved to {file_path}", "NOTIFICATION")

        return file_path


# AI Client Handler
class AIClientHandler:
    def __init__(self):
        self.client = Client()
        # Add async HTTP client for OpenRouter
        self.http_client = httpx.AsyncClient(timeout=60.0)

    async def generate_text(self, message: str, model: str) -> str:
        debug(f"Attempting text generation with model: {model}", "NOTIFICATION")

        # Check if we should use OpenRouter based on model prefix or specific models
        if model.startswith("openrouter/") or model in ["openai/gpt-4", "anthropic/claude-3-opus"]:
            return await self._generate_text_openrouter(message, model)
        else:
            # Get the completions instance
            completions = self.client.chat.completions

            response = completions.create(
                model=model,
                messages=[{"role": "user", "content": message}]
            )

            # Get the actual provider used
            last_provider = get_last_provider(as_dict=True)
            if last_provider:
                debug("Provider details:", "NOTIFICATION")
                debug(f"- Name: {last_provider.get('name')}", "NOTIFICATION")
                debug(f"- URL: {last_provider.get('url')}", "NOTIFICATION")
                debug(f"- Model: {last_provider.get('model')}", "NOTIFICATION")
                debug(f"- Label: {last_provider.get('label')}", "NOTIFICATION")

            # Log response details
            debug(f"Response details:", "NOTIFICATION")
            debug(f"- ID: {response.id}", "NOTIFICATION")
            debug(f"- Created: {response.created}", "NOTIFICATION")
            debug(f"- Model: {response.model}", "NOTIFICATION")
            debug(f"- Provider: {response.provider}", "NOTIFICATION")

            return response.choices[0].message.content

    async def _generate_text_openrouter(self, message: str, model: str) -> str:
        """Generate text using OpenRouter API"""
        start_time = time.time()
        debug(f"Using OpenRouter for model: {model}", "NOTIFICATION")

        # Remove 'openrouter/' prefix if present
        if model.startswith("openrouter/"):
            model = model[len("openrouter/"):]

        # Prepare the request payload
        payload = {
            "model": model,
            "messages": [{"role": "user", "content": message}],
            "temperature": 0.7,  # Adjust as needed
            "max_tokens": 2000  # Adjust as needed
        }

        # Prepare the headers with API key
        headers = {
            "Authorization": f"Bearer {OPENROUTER_API_KEY}",
            "HTTP-Referer": OPENROUTER_REFERER,  # Required by OpenRouter
            "Content-Type": "application/json"
        }

        try:
            # Make the API call
            response = await self.http_client.post(
                OPENROUTER_API_URL,
                json=payload,
                headers=headers
            )
            response.raise_for_status()

            # Process the response
            result = response.json()

            # Log details similar to your existing code
            debug(f"OpenRouter response details:", "NOTIFICATION")
            debug(f"- ID: {result.get('id')}", "NOTIFICATION")
            debug(f"- Model: {result.get('model')}", "NOTIFICATION")

            provider = result.get('provider', '')
            if isinstance(provider, dict) and 'name' in provider:
                debug(f"- Provider: {provider['name']}", "NOTIFICATION")
            else:
                debug(f"- Provider: {provider}", "NOTIFICATION")

            # Extract the generated text
            content = result["choices"][0]["message"]["content"]

            debug(f"Text generation completed in {time.time() - start_time:.2f} seconds", "NOTIFICATION")
            return content

        except Exception as e:
            debug(f"Error in OpenRouter text generation: {str(e)}", "ERROR")
            raise HTTPException(status_code=500, detail=f"OpenRouter text generation error: {str(e)}")

    async def generate_image(self, prompt: str, model: str) -> str:
        debug(f"Attempting image generation with model: {model}", "NOTIFICATION")

        response = self.client.images.generate(
            model=model,
            prompt=prompt,
            response_format="url"
        )

        # Get the actual provider used
        last_provider = get_last_provider(as_dict=True)
        if last_provider:
            debug("Provider details:", "NOTIFICATION")
            debug(f"- Name: {last_provider.get('name')}", "NOTIFICATION")
            debug(f"- URL: {last_provider.get('url')}", "NOTIFICATION")
            debug(f"- Model: {last_provider.get('model')}", "NOTIFICATION")
            debug(f"- Label: {last_provider.get('label')}", "NOTIFICATION")

        return response.data[0].url


# API Endpoints
youtube_handler = YouTubeDataHandler()
ai_handler = AIClientHandler()


@app.post("/get-transcript")
async def get_transcript(request: TranscriptRequest):
    debug(f"Received transcript request for video {request.video_url}", "NOTIFICATION")
    start_time = time.time()

    try:
        video_id = youtube_handler.parser.extract_info(request.video_url)['video_id']
        video_data = youtube_handler.extract_video_data(video_id)
        transcript = youtube_handler.fetch_transcript(video_id, request.language)
        youtube_handler.save_transcript(video_id, transcript)

        debug(f"Total request processed in {time.time() - start_time:.2f} seconds", "NOTIFICATION")
        return {"video_data": video_data, "transcript": transcript}

    except Exception as e:
        debug(f"Error processing request: {str(e)}", "ERROR")
        raise HTTPException(status_code=500, detail=str(e))


@app.post("/generate-text")
async def generate_text(request: TextRequest):
    debug(f"Received text generation request", "NOTIFICATION")
    start_time = time.time()
    request_time = datetime.now().isoformat()

    try:
        # Process each file's content directly
        files_content = []
        for file_info in request.files:
            if file_info.get("content"):
                files_content.append(f"// {file_info['name']} contents:\n{file_info['content']}\n")
                debug(f"Added file content for: {file_info['name']}", "NOTIFICATION")
            else:
                debug(f"No content provided for file: {file_info['name']}", "WARNING")

        # Combine files content with the message
        if files_content:
            context_content = "\n".join(files_content)
            full_message = f"{request.message}\n\nContext Files:\n{context_content}"
            debug(f"Added {len(files_content)} files to context", "NOTIFICATION")
        else:
            full_message = request.message
            debug("No context files were added", "NOTIFICATION")

        language_prompt = f" always answer in {request.language}"
        full_message += language_prompt

        output = await ai_handler.generate_text(full_message, request.model)

        # Check if we used OpenRouter or G4F
        if request.model.startswith("openrouter/") or request.model in ["openai/gpt-4", "anthropic/claude-3-opus"]:
            # For OpenRouter, we can't get the provider details the same way
            # Create metadata directly
            provider_metadata = {
                "provider_name": "OpenRouter",
                "provider_url": "https://openrouter.ai",
                "actual_model": request.model,
                "request_time": request_time,
                "completion_time": datetime.now().isoformat(),
                "elapsed_time": f"{time.time() - start_time:.2f}"
            }
        else:
            # Use existing G4F last_provider
            last_provider = get_last_provider(as_dict=True)
            provider_metadata = {
                "provider_name": last_provider.get('name'),
                "provider_url": last_provider.get('url'),
                "actual_model": last_provider.get('model'),
                "request_time": request_time,
                "completion_time": datetime.now().isoformat(),
                "elapsed_time": f"{time.time() - start_time:.2f}"
            }

        elapsed_time = time.time() - start_time
        debug(f"Text generation completed in {elapsed_time:.2f} seconds", "NOTIFICATION")

        return {
            "output": output,
            "metadata": provider_metadata
        }

    except Exception as e:
        debug(f"Error in text generation: {str(e)}", "ERROR")
        raise HTTPException(status_code=500, detail=f"Text generation error: {str(e)}")

@app.post("/generate-image")
async def generate_image(request: ImageRequest):
    debug("Received image generation request", "NOTIFICATION")
    start_time = time.time()

    try:
        image_url = await ai_handler.generate_image(request.prompt, request.model)
        debug(f"Image generation completed in {time.time() - start_time:.2f} seconds", "NOTIFICATION")
        return {"image_url": image_url}

    except Exception as e:
        debug(f"Error in image generation: {str(e)}", "ERROR")
        raise HTTPException(status_code=500, detail=f"Image generation error: {str(e)}")


@app.post("/generate-summary")
async def generate_summary(request: SummaryRequest):
    debug("Received summary generation request", "NOTIFICATION")
    start_time = time.time()

    try:
        video_info = youtube_handler.parser.extract_info(request.video_url)
        transcript = youtube_handler.fetch_transcript(video_info['video_id'], 'en')

        summary_text = await ai_handler.generate_text(
            f"{request.prompt}\n\nTranscript:\n{transcript}",
            request.model
        )

        # Extract tags from the summary
        tags = []
        if 'tags:' in summary_text.lower():
            tags_match = re.search(r'tags:(.*?);', summary_text.lower())
            if tags_match:
                tags = [tag.strip() for tag in tags_match.group(1).split(',')]
                summary_text = summary_text[summary_text.find(';') + 1:].strip()

        debug(f"Summary generation completed in {time.time() - start_time:.2f} seconds", "NOTIFICATION")
        return {"summary": summary_text, "tags": tags}

    except Exception as e:
        debug(f"Error in summary generation: {str(e)}", "ERROR")
        raise HTTPException(status_code=500, detail=f"Summary generation error: {str(e)}")


if __name__ == "__main__":
    debug("Starting server...", "NOTIFICATION")
    import uvicorn

    uvicorn.run(app, host="127.0.0.1", port=8000)