import json
import pandas as pd
import numpy as np
import librosa
import matplotlib.pyplot as plt
from azure_connection import upload_img_to_azure_with_signed_url
from helpers import file_to_base64
import plotly.graph_objects as go
import plotly.graph_objects as go
import logging


logger = logging.getLogger("pm2_logger")
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)

def plot_gauge(confidence_score, output_file='confidence_gauge.png'):
    fig = go.Figure(go.Indicator(
        mode="gauge+number",
        value=confidence_score,
        title={'text': "Confidence Level (%)"},
        gauge={
            'axis': {'range': [0, 100]},
            'bar': {'color': "green"},
            'steps': [
                {'range': [0, 40], 'color': "red"},
                {'range': [40, 70], 'color': "yellow"},
                {'range': [70, 100], 'color': "lightgreen"}
            ],
        }
    ))
 
    # Save the figure as a PNG file
    fig.write_image(output_file)
 
 
def analyze_frequency_simple(wav_path):
    # Load audio
    y, sr = librosa.load(wav_path)

    # Energy (volume level)
    rms = librosa.feature.rms(y=y)[0]
    avg_energy = np.mean(rms)

    # Pitch variation (via zero crossing rate)
    zcr = librosa.feature.zero_crossing_rate(y)[0]
    pitch_var = np.std(zcr)

    # Speaking speed (estimated from syllable-like peaks)
    tempo, _ = librosa.beat.beat_track(y=y, sr=sr)

    # Silence percentage
    intervals = librosa.effects.split(y, top_db=30)
    total_speech_duration = sum((end - start) for start, end in intervals)
    silence_ratio = 1 - (total_speech_duration / len(y))

    # Heuristic scoring
    energy_score = min(avg_energy * 200, 1.0)
    pitch_score = 1.0 - min(pitch_var * 10, 1.0)
    tempo_score = min(tempo / 200, 1.0)
    silence_penalty = silence_ratio

    # Final confidence estimate (weighted average)
    confidence = float((0.4 * energy_score + 0.3 * pitch_score + 0.2 * tempo_score) * (1 - silence_penalty))
    confidence_percent = round(confidence * 100, 2)

    output_file = 'confidence_chart.png'
    plot_gauge(confidence, output_file)

    base64_string = upload_img_to_azure_with_signed_url(output_file, output_file)

    return confidence_percent, base64_string

# def analyze_frequency_simple(audio_file, min_freq=1000, max_freq=2000, sr=16000, plot_path='frequency_time_plot.png'):
#     try:
#         audio_segment, sr = librosa.load(audio_file, sr=sr)
#     except Exception as e:
#         print(f"Error loading audio file: {e}")
#         return None
    
#     if len(audio_segment) == 0:
#         return None

#     # Calculate the Short-Time Fourier Transform (STFT)
#     stft = np.abs(librosa.stft(audio_segment))

#     # Get the frequencies corresponding to the rows in the STFT
#     freqs = librosa.fft_frequencies(sr=sr)

#     # Calculate the mean frequency over time
#     mean_freqs = np.dot(freqs, stft) / np.sum(stft, axis=0)

#     # Create a time axis
#     times = librosa.frames_to_time(np.arange(stft.shape[1]), sr=sr)

#     # Calculate confidence
#     mean_frequency = np.mean(mean_freqs)
#     confidence = (mean_frequency - min_freq) / (max_freq - min_freq) * 10
#     confidence = np.clip(confidence, 1, 4)

#     # Plot the frequency over time
#     plt.figure(figsize=(10, 6))
#     plt.plot(times, mean_freqs, color='blue')
#     plt.xlabel('Time (s)')
#     plt.ylabel('Comfidence level')
#     plt.title('Confidence Over Time')
    
#     # Save the plot as a PNG file
#     plt.savefig(plot_path, format='png')
#     plt.close()
    
#     base64_string = upload_img_to_azure_with_signed_url(plot_path,plot_path)
#     # base64_string = file_to_base64(plot_path)


#     return confidence, base64_string


def change_key_name(data: dict, old_key: str, new_key: str) -> dict:

    if old_key in data:
        data[new_key] = data.pop(old_key)
    return data


def calculate_radar_scores(data):
    # Initialize default scores
    scores = {
        'technical_score': 0,
        'attitude_score': 0,
        'communication_score': 0,
        'soft_skills_score': 0,
        'alignment_with_values_score': 0,
        'attitude_professionalism_score': 0
    }
    
    try:
        # Calculate the average rating for technical skills
        technical_skills = data.get("technicalSkills", [])
        if technical_skills:
            average_technical_rating = sum(float(skill.get("rating", 0)) for skill in technical_skills) / len(technical_skills)
            scores['technical_score'] = (average_technical_rating / 5) * 100

        # Calculate the score for attitude
        attitude = next((skill for skill in data.get("softSkills", []) if skill.get("skill", "").lower() == "attitude"), None)
        if attitude:
            scores['attitude_score'] = (float(attitude.get("rating", 0)) / 5) * 100

        # Calculate the score for communication
        communication = next((skill for skill in data.get("softSkills", []) if skill.get("skill", "").lower() == "communication"), None)
        if communication:
            scores['communication_score'] = (float(communication.get("rating", 0)) / 5) * 100

        # Calculate the score for soft skills (average of all soft skills)
        soft_skills = data.get("softSkills", [])
        if soft_skills:
            average_soft_skills_rating = sum(float(skill.get("rating", 0)) for skill in soft_skills) / len(soft_skills)
            scores['soft_skills_score'] = (average_soft_skills_rating / 5) * 100

        # Calculate the score for alignment with company values
        alignment_with_values = next((skill for skill in data.get("softSkills", []) if skill.get("skill", "").lower() == "alignment with company values"), None)
        if alignment_with_values:
            scores['alignment_with_values_score'] = (float(alignment_with_values.get("rating", 0)) / 5) * 100

        # Calculate the score for the average of attitude and professionalism
        professionalism = next((skill for skill in data.get("softSkills", []) if skill.get("skill", "").lower() == "professionalism"), None)
        if attitude and professionalism:
            average_attitude_professionalism = (float(attitude.get("rating", 0)) + float(professionalism.get("rating", 0))) / 2
            scores['attitude_professionalism_score'] = (average_attitude_professionalism / 5) * 100

    except Exception as e:
        print(f"An error occurred: {e}")

    return scores

    
    
    
def extract_pie_ratings(data):
    """
    Extract ratings from the JSON data.

    Parameters:
    - data (dict): Dictionary containing the technical skill ratings data.

    Returns:
    - dict: Dictionary containing counts of each rating category.
    """
    ratings = {'low': 0, 'medium': 0, 'good': 0, 'best': 0}

    # Ensure that 'technicalSkills' key exists
    if 'technicalSkills' not in data:
        print("Error: 'technicalSkills' key not found in data.")
        return ratings

    for skill in data['technicalSkills']:
        if 'rating' not in skill:
            print(f"Error: 'rating' key not found in skill: {skill}")
            continue
        try:
            # Convert the rating to a float
            rating = float(skill['rating'])
        except ValueError:
            print(f"Error: Invalid rating value '{skill['rating']}' for skill: {skill}")
            continue

        # Categorize the rating
        if rating < 2.0:
            ratings['low'] += 1
        elif rating < 3.0:
            ratings['medium'] += 1
        elif rating < 4.0:
            ratings['good'] += 1
        else:
            ratings['best'] += 1

    return ratings

    
    
def extract_line_scores(data):
    """
    Calculate aptitude scores for general, technical, verbal, and overall categories.

    Parameters:
    - data (dict): Dictionary containing skill ratings data.

    Returns:
    - dict: Dictionary with calculated scores.
    """
    # Skills to consider for general aptitude
    general_skills = ["problem solving", "adaptability", "communication"]

    # Calculate General Aptitude
    general_ratings = [
        float(skill['rating']) for skill in data.get('softSkills', [])
        if skill['skill'].lower() in general_skills
    ]
    general_aptitude = sum(general_ratings) / (len(general_ratings) or 1)  # Avoid division by zero

    # Calculate Technical Aptitude
    technical_skills = data.get('technicalSkills', [])
    technical_ratings = [float(skill['rating']) for skill in technical_skills]
    technical_aptitude = sum(technical_ratings) / (len(technical_ratings) or 1)  # Avoid division by zero

    # Calculate Verbal Aptitude
    communication_skills = [
        float(skill['rating']) for skill in data.get('softSkills', [])
        if skill['skill'].lower() == 'communication'
    ]
    verbal_aptitude = communication_skills[0] if communication_skills else 0  # Default to 0 if no communication rating

    # Calculate Overall Aptitude
    overall_aptitude = (general_aptitude + technical_aptitude + verbal_aptitude) / 3

    return {
        'general': general_aptitude * 20,  # Convert to percentage
        'technical': technical_aptitude * 20,  # Convert to percentage
        'verbal': verbal_aptitude * 20,  # Convert to percentage
        'overall': overall_aptitude * 20  # Convert to percentage
    }

    
def extract_bar_skill_ratings(data):
    """
    Extract skill ratings from the JSON data and calculate scores for various categories.

    Parameters:
    - data (dict): Dictionary containing skill ratings data.

    Returns:
    - dict: Dictionary with calculated scores.
    """
    try:
        # Extract ratings for specific soft skills
        team_player = next((float(skill['rating']) for skill in data.get('softSkills', []) if skill.get('skill', '').lower() == 'teamwork'), 0)
        motivation = next((float(skill['rating']) for skill in data.get('softSkills', []) if skill.get('skill', '').lower() == 'attitude'), 0)

        # Calculate Individual Contributor score (60% Technical, 40% Soft Skills)
        max_rating = 5.0
        technical_skills = data.get('technicalSkills', [])
        technical_average = sum(float(skill['rating']) for skill in technical_skills) / (len(technical_skills) or 1)

        # Relevant soft skills for the score
        relevant_soft_skills = ["problem solving", "adaptability", "professionalism", "communication"]
        soft_ratings = [float(skill['rating']) for skill in data.get('softSkills', []) if skill.get('skill', '').lower() in relevant_soft_skills]
        soft_average = sum(soft_ratings) / (len(soft_ratings) or 1)

        individual_contributor = (0.6 * (technical_average / max_rating) + 0.4 * (soft_average / max_rating)) * 100

        # Calculate Leadership score
        leadership_skills = ["communication", "problem solving", "adaptability"]
        leadership_ratings = [float(skill['rating']) for skill in data.get('softSkills', []) if skill.get('skill', '').lower() in leadership_skills]
        leadership_average = sum(leadership_ratings) / (len(leadership_ratings) or 1)
        leadership = (leadership_average / max_rating) * 100

        return {
            'team_player': team_player * 20,  # Convert rating out of 5 to percentage
            'individual_contributor': individual_contributor,
            'leader': leadership,
            'motivation': motivation * 20  # Convert rating out of 5 to percentage
        }

    except Exception as e:
        raise ValueError(f"Error calculating skill ratings: {e}")


def creating_graphs(data,audio_path):
    try:
        # Perform calculations
        calculate_radar_scores_result = calculate_radar_scores(data)
        extract_pie_ratings_result = extract_pie_ratings(data)
        extract_line_scores_result = extract_line_scores(data)
        extract_bar_skill_ratings_result = extract_bar_skill_ratings(data)

        # Extract weaknesses and strengths
        area_rating = {
            'weakness': data.get('weaknesses', []),
            'strengths': data.get('strengths', [])
        }
        
        confidence, base64_string = analyze_frequency_simple(audio_path)
        confidence = round(confidence,2)
        print("Confidence: ",confidence)


        if pd.isna(confidence):
            confidence = 2
	    
        print(confidence)
        confidence_json = {
            "score": confidence,
            "freqStr": base64_string
        }

        print("Radar Chart: ",calculate_radar_scores_result)
        print("Data type of Radar Chart: ",type(calculate_radar_scores))


        calculate_radar_scores_result = change_key_name(calculate_radar_scores_result, "technical_score", "technicalScore")
        calculate_radar_scores_result = change_key_name(calculate_radar_scores_result, "attitude_score", "attitudeScore")
        calculate_radar_scores_result = change_key_name(calculate_radar_scores_result, "communication_score", "communicationScore")
        calculate_radar_scores_result = change_key_name(calculate_radar_scores_result, "soft_skills_score", "softSkillsScore")
        calculate_radar_scores_result = change_key_name(calculate_radar_scores_result, "alignment_with_values_score", "alignmentWithValuesScore")
        calculate_radar_scores_result = change_key_name(calculate_radar_scores_result, "attitude_professionalism_score", "attitudeProfessionalismScore")

        extract_bar_skill_ratings_result = change_key_name(extract_bar_skill_ratings_result, "individual_contributor", "individualContributor")
        extract_bar_skill_ratings_result = change_key_name(extract_bar_skill_ratings_result, "team_player", "teamPlayer")


        # Construct the output JSON
        output_json = {
            "radarScores": calculate_radar_scores_result,
            "pieRatings": extract_pie_ratings_result,
            "lineScores": extract_line_scores_result,
            "barRatings": extract_bar_skill_ratings_result,
            "areaRating": area_rating,
            "confidence" : confidence_json
        }

        return output_json

    except Exception as e:
        print(f"An error occurred while processing the data: {e}")
        return None
    

