Spaces:
Sleeping
Sleeping
| import requests | |
| import json | |
| def get_response(messages, verbose=False): | |
| """ | |
| Send a request to the OpenRouter API to get a response. | |
| Args: | |
| messages (list): A list of messages to send to the API. | |
| effort (str): The effort level for reasoning. Can be "high", "medium", or "low". | |
| verbose (bool): Whether to print the response to the console. | |
| Returns: | |
| str: The response from the API. | |
| Reasoning documentation: | |
| https://openrouter.ai/docs/use-cases/reasoning-tokens | |
| """ | |
| response = requests.post( | |
| url="https://openrouter.ai/api/v1/chat/completions", | |
| headers={ | |
| "Authorization": "Bearer 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", | |
| "HTTP-Referer": "huggingface.co", # Optional. Site URL for rankings on openrouter.ai. | |
| "X-Title": "RoundTripTranslation", # Optional. Site title for rankings on openrouter.ai. | |
| }, | |
| # "model": "deepseek/deepseek-r1-0528:free", #"tngtech/deepseek-r1t2-chimera:free", #"deepseek/deepseek-chat-v3.1:free", | |
| # previous model: "deepseek/deepseek-chat-v3-0324:free", "arcee-ai/trinity-large-preview:free" | |
| data=json.dumps({ | |
| "model": "openrouter/free", | |
| "messages": messages | |
| }) | |
| ) | |
| if verbose: | |
| print(response.json(), flush=True) | |
| return response.json()["choices"][0]["message"]["content"] | |
| def get_parsed_response(messages, verbose=False): | |
| """ | |
| Parses the response from the OpenRouter API with the specified tags. | |
| The tags are <THINKING>, <RESULT>, <FOLLOW_UP> | |
| """ | |
| response = get_response(messages, verbose=verbose) | |
| start_thinking = response.find('<THINKING>') | |
| end_thinking = response.rfind('</THINKING>') | |
| start_result = response.find('<RESULT>') | |
| end_result = response.rfind('</RESULT>') | |
| start_follow_up = response.find('<FOLLOW_UP>') | |
| end_follow_up = response.rfind('</FOLLOW_UP>') | |
| thinking = response[start_thinking + len('<THINKING>'):end_thinking] | |
| result = response[start_result + len('<RESULT>'):end_result] | |
| follow_up = response[start_follow_up + len('<FOLLOW_UP>'):end_follow_up] | |
| if verbose: | |
| print(f"Thinking: {thinking}", flush=True) | |
| print(f"Result: {result}", flush=True) | |
| print(f"Follow-up: {follow_up}", flush=True) | |
| return thinking, result, follow_up | |
| if __name__ == "__main__": | |
| from translation import one_shot_translation_prompt, post_edit_prompt | |
| from translation_examples import translation_examples | |
| #print(translation_examples[4], flush=True) | |
| messages = [] | |
| for message_part in post_edit_prompt: #one_shot_translation_prompt: | |
| messages.append({ | |
| "role": message_part["role"], | |
| "content": message_part["content"].format( | |
| source=translation_examples[4]["source_language"], | |
| target=translation_examples[4]["target_language"], | |
| sentence=translation_examples[4]["text"], | |
| context="None", | |
| generated_questions="None", | |
| removed_groups="None", | |
| draft_translation="None" | |
| ) | |
| }) | |
| print(messages, flush=True) | |
| print("Sending request...", flush=True) | |
| thinking, response_text, follow_up = get_parsed_response(messages, verbose=True) | |
| print("response:", flush=True) | |
| print(response_text, flush=True) | |