import os
from datetime import datetime
from typing import Dict, List, Optional
from fastapi import FastAPI, HTTPException
from g4f.client.service import get_last_provider
from pydantic import BaseModel
from g4f.client import Client
from fastapi.middleware.cors import CORSMiddleware
from youtube_transcript_api import YouTubeTranscriptApi
from langdetect import detect, DetectorFactory
from yt_dlp import YoutubeDL
import time
import re



from MyUtils import debug
import YoutubeParser

# Set a seed for consistent language detection
DetectorFactory.seed = 0

# Constants
DOWNLOADS_FOLDER = os.path.join(os.path.expanduser("~"), "Downloads")
LANGUAGE_CODES = {
    "english": "en",
    "russian": "ru"
}

# FastAPI setup
app = FastAPI()

# CORS middleware configuration
app.add_middleware(
    CORSMiddleware,
    allow_origins=["app://obsidian.md", "http://127.0.0.1", "http://localhost"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)


# Request Models
class BaseRequest(BaseModel):
    model: str


class TextRequest(BaseModel):
    message: str
    files: List[dict] = []  # List of {path: str, name: str}
    model: str
    text_type: str
    language: str


class ImageRequest(BaseRequest):
    prompt: str


class TranscriptRequest(BaseModel):
    video_url: str
    language: str = "en"


class SummaryRequest(BaseRequest):
    video_url: str
    prompt: str


# YouTube Data Handler
class YouTubeDataHandler:
    def __init__(self):
        self.ydl_opts = {
            'quiet': True,
            'no_warnings': True,
            'extract_flat': True,
        }
        self.parser = YoutubeParser.YoutubeParser()

    def extract_video_data(self, video_id: str) -> Dict:
        debug(f"Starting video data extraction for ID: {video_id}", "NOTIFICATION")
        start_time = time.time()

        try:
            with YoutubeDL(self.ydl_opts) as ydl:
                info = ydl.extract_info(f"https://www.youtube.com/watch?v={video_id}", download=False)

            video_data = {
                'title': info.get('title'),
                'channel': info.get('uploader'),
                'channel_id': info.get('channel_id'),
                'description': info.get('description'),
                'duration': info.get('duration'),
                'view_count': info.get('view_count'),
                'like_count': info.get('like_count'),
                'upload_date': info.get('upload_date'),
                'thumbnail': info.get('thumbnail'),
                'tags': info.get('tags', []),
            }

            debug(f"Video data extracted in {time.time() - start_time:.2f} seconds", "NOTIFICATION")
            return video_data

        except Exception as e:
            debug(f"Error extracting video data: {str(e)}", "ERROR")
            raise HTTPException(status_code=500, detail=f"Video data extraction error: {str(e)}")

    def fetch_transcript(self, video_id: str, language: str, include_timestamps: bool = False) -> str:
        debug(f"Fetching transcript for video {video_id} in language {language}", "NOTIFICATION")
        start_time = time.time()

        try:
            lang_code = LANGUAGE_CODES.get(language.lower(), language)
            transcript = YouTubeTranscriptApi.get_transcript(video_id, languages=[lang_code])
            debug(f"Transcript fetched in {time.time() - start_time:.2f} seconds", "NOTIFICATION")

            return transcript if include_timestamps else " ".join(entry['text'] for entry in transcript)

        except Exception as e:
            debug(f"Error fetching transcript: {str(e)}", "ERROR")
            raise HTTPException(status_code=500, detail=f"Transcript error: {str(e)}")

    def save_transcript(self, video_id: str, transcript: str) -> str:
        file_name = f"transcript_{video_id}.txt"
        file_path = os.path.join(DOWNLOADS_FOLDER, file_name)

        with open(file_path, 'w', encoding='utf-8') as file:
            file.write(str(transcript))
            debug(f"Transcript saved to {file_path}", "NOTIFICATION")

        return file_path


# AI Client Handler
class AIClientHandler:
    def __init__(self):
        self.client = Client()

    async def generate_text(self, message: str, model: str) -> str:
        debug(f"Attempting text generation with model: {model}", "NOTIFICATION")

        # Get the completions instance
        completions = self.client.chat.completions

        response = completions.create(
            model=model,
            messages=[{"role": "user", "content": message}]
        )

        # Get the actual provider used
        last_provider = get_last_provider(as_dict=True)
        if last_provider:
            debug("Provider details:", "NOTIFICATION")
            debug(f"- Name: {last_provider.get('name')}", "NOTIFICATION")
            debug(f"- URL: {last_provider.get('url')}", "NOTIFICATION")
            debug(f"- Model: {last_provider.get('model')}", "NOTIFICATION")
            debug(f"- Label: {last_provider.get('label')}", "NOTIFICATION")

        # Log response details
        debug(f"Response details:", "NOTIFICATION")
        debug(f"- ID: {response.id}", "NOTIFICATION")
        debug(f"- Created: {response.created}", "NOTIFICATION")
        debug(f"- Model: {response.model}", "NOTIFICATION")
        debug(f"- Provider: {response.provider}", "NOTIFICATION")

        return response.choices[0].message.content

    async def generate_image(self, prompt: str, model: str) -> str:
        debug(f"Attempting image generation with model: {model}", "NOTIFICATION")

        response = self.client.images.generate(
            model=model,
            prompt=prompt,
            response_format="url"
        )

        # Get the actual provider used
        last_provider = get_last_provider(as_dict=True)
        if last_provider:
            debug("Provider details:", "NOTIFICATION")
            debug(f"- Name: {last_provider.get('name')}", "NOTIFICATION")
            debug(f"- URL: {last_provider.get('url')}", "NOTIFICATION")
            debug(f"- Model: {last_provider.get('model')}", "NOTIFICATION")
            debug(f"- Label: {last_provider.get('label')}", "NOTIFICATION")

        return response.data[0].url


# API Endpoints
youtube_handler = YouTubeDataHandler()
ai_handler = AIClientHandler()


@app.post("/get-transcript")
async def get_transcript(request: TranscriptRequest):
    debug(f"Received transcript request for video {request.video_url}", "NOTIFICATION")
    start_time = time.time()

    try:
        video_id = youtube_handler.parser.extract_info(request.video_url)['video_id']
        video_data = youtube_handler.extract_video_data(video_id)
        transcript = youtube_handler.fetch_transcript(video_id, request.language)
        youtube_handler.save_transcript(video_id, transcript)

        debug(f"Total request processed in {time.time() - start_time:.2f} seconds", "NOTIFICATION")
        return {"video_data": video_data, "transcript": transcript}

    except Exception as e:
        debug(f"Error processing request: {str(e)}", "ERROR")
        raise HTTPException(status_code=500, detail=str(e))


@app.post("/generate-text")
async def generate_text(request: TextRequest):
    debug(f"Received text generation request", "NOTIFICATION")
    start_time = time.time()
    request_time = datetime.now().isoformat()

    try:
        # Process each file's content directly
        files_content = []
        for file_info in request.files:
            if file_info.get("content"):
                files_content.append(f"// {file_info['name']} contents:\n{file_info['content']}\n")
                debug(f"Added file content for: {file_info['name']}", "NOTIFICATION")
            else:
                debug(f"No content provided for file: {file_info['name']}", "WARNING")

        # Combine files content with the message
        if files_content:
            context_content = "\n".join(files_content)
            full_message = f"{request.message}\n\nContext Files:\n{context_content}"
            debug(f"Added {len(files_content)} files to context", "NOTIFICATION")
        else:
            full_message = request.message
            debug("No context files were added", "NOTIFICATION")

        language_prompt = f" always answer in {request.language}"
        full_message += language_prompt

        output = await ai_handler.generate_text(full_message, request.model)

        # Get provider details
        last_provider = get_last_provider(as_dict=True)
        completion_time = datetime.now().isoformat()
        elapsed_time = time.time() - start_time

        debug(f"Text generation completed in {elapsed_time:.2f} seconds", "NOTIFICATION")

        return {
            "output": output,
            "metadata": {
                "provider_name": last_provider.get('name'),
                "provider_url": last_provider.get('url'),
                "actual_model": last_provider.get('model'),
                "request_time": request_time,
                "completion_time": completion_time,
                "elapsed_time": f"{elapsed_time:.2f}"
            }
        }

    except Exception as e:
        debug(f"Error in text generation: {str(e)}", "ERROR")
        raise HTTPException(status_code=500, detail=f"Text generation error: {str(e)}")

@app.post("/generate-image")
async def generate_image(request: ImageRequest):
    debug("Received image generation request", "NOTIFICATION")
    start_time = time.time()

    try:
        image_url = await ai_handler.generate_image(request.prompt, request.model)
        debug(f"Image generation completed in {time.time() - start_time:.2f} seconds", "NOTIFICATION")
        return {"image_url": image_url}

    except Exception as e:
        debug(f"Error in image generation: {str(e)}", "ERROR")
        raise HTTPException(status_code=500, detail=f"Image generation error: {str(e)}")


@app.post("/generate-summary")
async def generate_summary(request: SummaryRequest):
    debug("Received summary generation request", "NOTIFICATION")
    start_time = time.time()

    try:
        video_info = youtube_handler.parser.extract_info(request.video_url)
        transcript = youtube_handler.fetch_transcript(video_info['video_id'], 'en')

        summary_text = await ai_handler.generate_text(
            f"{request.prompt}\n\nTranscript:\n{transcript}",
            request.model
        )

        # Extract tags from the summary
        tags = []
        if 'tags:' in summary_text.lower():
            tags_match = re.search(r'tags:(.*?);', summary_text.lower())
            if tags_match:
                tags = [tag.strip() for tag in tags_match.group(1).split(',')]
                summary_text = summary_text[summary_text.find(';') + 1:].strip()

        debug(f"Summary generation completed in {time.time() - start_time:.2f} seconds", "NOTIFICATION")
        return {"summary": summary_text, "tags": tags}

    except Exception as e:
        debug(f"Error in summary generation: {str(e)}", "ERROR")
        raise HTTPException(status_code=500, detail=f"Summary generation error: {str(e)}")


if __name__ == "__main__":
    debug("Starting server...", "NOTIFICATION")
    import uvicorn

    uvicorn.run(app, host="127.0.0.1", port=8000)