Spaces:
Runtime error
Runtime error
| import os | |
| import spaces | |
| import gradio as gr | |
| from transformers import AutoTokenizer, AutoModelForCausalLM | |
| import torch | |
| def main(): | |
| # Optional: force install gradio 3.50.2 to avoid node issues | |
| os.system("pip install gradio==3.50.2") | |
| # Load model and tokenizer | |
| model_id = "codellama/CodeLlama-7b-Instruct-hf" | |
| tokenizer = AutoTokenizer.from_pretrained(model_id) | |
| model = AutoModelForCausalLM.from_pretrained( | |
| model_id, | |
| device_map="auto", | |
| torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32 | |
| ) | |
| tokenizer.pad_token = tokenizer.eos_token | |
| def convert_python_to_r(python_code): | |
| prompt = f"""### Task: | |
| Convert the following Python code to equivalent R code. | |
| ### Python code: | |
| {python_code} | |
| ### R code:""" | |
| input_ids = tokenizer(prompt, return_tensors="pt", truncation=True).input_ids | |
| if torch.cuda.is_available(): | |
| input_ids = input_ids.to("cuda") | |
| outputs = model.generate( | |
| input_ids, | |
| max_length=1024, | |
| do_sample=True, | |
| temperature=0.2, | |
| pad_token_id=tokenizer.eos_token_id, | |
| num_return_sequences=1 | |
| ) | |
| generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| if "### R code:" in generated_text: | |
| generated_text = generated_text.split("### R code:")[-1].strip() | |
| return generated_text | |
| gr.Interface( | |
| fn=convert_python_to_r, | |
| inputs=gr.Textbox(lines=10, placeholder="Paste your Python code here..."), | |
| outputs="text", | |
| title="Python to R Code Converter using CodeLlama 7B Instruct", | |
| description="Enter Python code below, and the tool will convert it to R code using the CodeLlama 7B Instruct model." | |
| ).launch() | |
| if __name__ == "__main__": | |
| main() | |