Spaces:
Runtime error
Runtime error
| # -*- coding: utf-8 -*- | |
| """translation practice.ipynb | |
| Automatically generated by Colaboratory. | |
| Original file is located at | |
| https://colab.research.google.com/drive/1KrnodZGBZrUFdaJ9FIn8IhtWtCL7peoE | |
| """ | |
| import requests | |
| import gradio as gr | |
| from dotenv import load_dotenv | |
| import os | |
| from openai import OpenAI | |
| import spacy | |
| import random | |
| # Load environment variables from .env file | |
| load_dotenv() | |
| # Access the env | |
| HF_TOKEN = os.getenv('HUGGING_FACE_TOKEN') | |
| # openai setup | |
| client = OpenAI( | |
| api_key=os.getenv('OPENAI_API_KEY') | |
| ) | |
| # hugging face setup | |
| #model_name = "mmnga/ELYZA-japanese-Llama-2-7b-instruct-gguf" | |
| API_URL = f"https://api-inference.huggingface.co/models/" | |
| #API_URL = f"https://api-inference.huggingface.co/models/{model_name}" | |
| headers = {"Authorization": f"Bearer {HF_TOKEN}"} | |
| # Global variable to control debug printing | |
| DEBUG_MODE = True | |
| file_content = "Not yet loaded" | |
| try: | |
| file_path = "issun-boshi.txt" | |
| # Open the file in read mode ('r') | |
| with open(file_path, 'r') as file: | |
| # Read the entire content of the file into a string | |
| file_content = file.read() | |
| except Exception as e: | |
| print( "Error loading short story file.", str(e)) | |
| learning_content = file_content | |
| def debug_print(*args, **kwargs): | |
| if DEBUG_MODE: | |
| print(*args, **kwargs) | |
| def translate_openai(input_text): | |
| prompt = "Translate the following text into Japanese language: " + input_text | |
| response = client.chat.completions.create( # get translation from GPT | |
| messages=[ | |
| { | |
| "role": "user", | |
| "content": prompt, | |
| } | |
| ], | |
| model="gpt-3.5-turbo", | |
| temperature=0 # should be the same translation every time | |
| ) | |
| translation = response.choices[0].message.content | |
| debug_print("GPT translation:", translation) | |
| return translation | |
| def assess(original_japanese, student_translation): | |
| try: | |
| # get the English translation | |
| generated_translation = translate_hf(original_japanese) | |
| debug_print("Generated translation:", generated_translation) | |
| except Exception as e: | |
| return "Error in processing translation.", str(e) | |
| try: | |
| prompt = (f"Evaluate the student's English translation of Japanese for accuracy and naturalness. " | |
| f"Original: {original_japanese}, " | |
| f"Reference Translation: {generated_translation}, " | |
| f"Student Translation: {student_translation}. " | |
| "Highlight errors, suggest improvements, and note any nuances. Provide concise and very simple feedback for an English language learner aimed at improving their translation skills. Where possible, give concrete examples.") | |
| debug_print(prompt) | |
| # Evaluating the student's translation attempt | |
| response = client.chat.completions.create( | |
| messages=[ | |
| { | |
| "role": "user", | |
| "content": prompt, | |
| } | |
| ], | |
| model="gpt-3.5-turbo", | |
| ) | |
| debug_print("Full GPT response:", response) | |
| debug_print("Generated translation:", generated_translation) | |
| evaluation_feedback = response.choices[0].message.content | |
| return generated_translation, evaluation_feedback | |
| except Exception as e: | |
| return "Error in processing evaluation.", str(e) | |
| assessor = gr.Interface(fn=assess, | |
| inputs=[ | |
| gr.Textbox(label="Learning Content", placeholder="Enter content to focus on here", lines=4, value=learning_content),#example_Japanese),#" | |
| gr.Textbox(label="Student Data", placeholder="Enter student data here", lines=4, value="EFL Learner")#"This is an example") | |
| ], | |
| outputs=[ | |
| gr.Textbox(label="Machine Generated Translation"), | |
| gr.Textbox(label="Evaluation Feedback") | |
| ], | |
| title="Translation Practice", | |
| description="Enter a Japanese sentence and your English translation attempt to receive evaluation feedback." | |
| ) | |
| history_openai_format = [] | |
| def predict(message, history): | |
| debug_print("### History:", history_openai_format) | |
| for human, assistant in history: | |
| history_openai_format.append({"role": "user", "content": human }) | |
| history_openai_format.append({"role": "assistant", "content":assistant}) | |
| history_openai_format.append({"role": "user", "content": message}) | |
| try: | |
| response = client.chat.completions.create( | |
| model='gpt-3.5-turbo', | |
| messages=history_openai_format, | |
| temperature=0.7, | |
| #max_tokens=150, | |
| stream=True | |
| ) | |
| except Exception as e: | |
| debug_print("Error in getting LLM response.", str(e)) | |
| try: | |
| partial_message = "" | |
| for chunk in response: | |
| if chunk.choices[0].delta.content is not None: | |
| partial_message = partial_message + chunk.choices[0].delta.content | |
| yield partial_message | |
| except Exception as e: | |
| debug_print("Error in streaming output", str(e)) | |
| strategies = ''' | |
| - making connections between the text and their prior knowledge; | |
| - forming and testing hypotheses about texts; | |
| - asking questions about the text; | |
| - creating mental images or visualising; | |
| - inferring meaning from the text; | |
| - identifying the writer’s purpose and point of view; | |
| - identifying the main idea or theme in the text; | |
| - summarising the information or events in the text; | |
| - analysing and synthesising ideas, information, structures, and features in the text; | |
| - evaluating ideas and information''' | |
| with gr.Blocks() as demo: | |
| with gr.Row(): | |
| learner_data = gr.Textbox(label="Learner Data", placeholder="Enter learner data here...", lines=4, value="Honoka is a Japanese EFL student. [summary of relevant student data]") | |
| learning_content = gr.Textbox(label="Learning Content", placeholder="Enter learning content here...", lines=4, value=learning_content) | |
| teacher_prompt = gr.Textbox(label="Teacher Prompt", placeholder="Enter chat guidance here...", lines=4, | |
| value=f"You are a professional EFL teacher. Help the student actively read the text using these strategies: {strategies}. Use simple vocabulary and short sentences a beginner would understand. Guide the conversation to discuss the Learning Content below.") | |
| # pre prompt the history_openai_format list | |
| history_openai_format.append({"role": "system", "content": f"{teacher_prompt.value} Learner Data: {learner_data.value}. Learning Content: {learning_content.value}. "}) | |
| try: | |
| gr.ChatInterface(predict) | |
| except Exception as e: | |
| debug_print("Error in gr.ChatInterface(predict)", str(e)) | |
| demo.launch(debug=True) | |