import ollama

def query_llama2(query):
    """
    Queries the Llama model and returns the response.

    Args:
    query (str): The query to send to the Llama model.

    Returns:
    str: The complete response from the Llama model.
    """
    stream = ollama.chat(
        model='llama3:8b',
        messages=[{'role': 'user', 'content': query}],
        stream=True,
    )
    
    response = ""
    # Print the response in chunks and accumulate it
    for chunk in stream:
        chunk_content = chunk['message']['content']
        #print(chunk_content, end='', flush=True)
        response += chunk_content
    
    return response


def read_file(file_path):
    """
    Reads the content of a text file and returns it as a string.

    Args:
    file_path (str): The path to the text file.

    Returns:
    str: The content of the file as a string.
    """
    try:
        with open(file_path, 'r') as file:
            content = file.read()
        return content
    except FileNotFoundError:
        return f"File '{file_path}' not found."
    except Exception as e:
        return f"An error occurred: {e}"


def get_response(report_file):
    """
    Reads a planet report from a file and queries the Llama model based on it.

    Args:
    report_file (str): The path to the planet report text file.

    Returns:
    str: The response from the Llama model.
    """
    report_data = read_file(report_file)

    user_query = f'''
    {report_data}
    Based on the above information about my planet report, tell me about my personality and other traits that can be predicted from your knowledge.
    '''

    # Query the Llama model and return the response
    response = query_llama2(user_query)
    return response, report_data
