Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| from transformers import AutoTokenizer, AutoModelForCausalLM | |
| # Define constants | |
| MODEL_NAME = "Ct1tz/Codebert-Base-B2D4G5" | |
| MAX_LENGTH = 512 | |
| # Load the tokenizer with error handling | |
| try: | |
| tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, model_max_length=MAX_LENGTH, trust_remote_code=True) | |
| print(f"Tokenizer vocabulary size: {len(tokenizer)}") | |
| print(f"Tokenizer type: {tokenizer.__class__.__name__}") | |
| except Exception as e: | |
| print(f"Error loading tokenizer: {e}") | |
| raise | |
| # Load the model with error handling | |
| try: | |
| # Load the model (using AutoModelForCausalLM for chat/generation tasks) | |
| model = AutoModelForCausalLM.from_pretrained( | |
| MODEL_NAME, | |
| trust_remote_code=True | |
| ) | |
| except Exception as e: | |
| print(f"Error loading model: {e}") | |
| raise | |
| # Define a chat function | |
| def chat(input_text, history=[]): | |
| inputs = tokenizer(input_text, return_tensors="pt", padding=True, truncation=True, max_length=MAX_LENGTH) | |
| outputs = model.generate(**inputs, max_new_tokens=50, do_sample=True) | |
| response = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| history.append((input_text, response)) | |
| return history, history | |
| # Create Gradio chat interface | |
| interface = gr.ChatInterface( | |
| fn=chat, | |
| title="CodeBERT Chat", | |
| description="Chat with the CodeBERT model (Ct1tz/Codebert-Base-B2D4G5) for code-related tasks.", | |
| theme="soft" | |
| ) | |
| # Launch the interface | |
| if __name__ == "__main__": | |
| interface.launch() |