Spaces:
Runtime error
Runtime error
| import streamlit as st | |
| from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer | |
| from threading import Thread | |
| # Load the tokenizer and model | |
| tokenizer = AutoTokenizer.from_pretrained("gpt2") | |
| model = AutoModelForCausalLM.from_pretrained("gpt2") | |
| # Define the research paper summary | |
| research_summary = """ | |
| Hybrid additive manufacturing processes combining FDM and SLA have been developed to improve resolution and mechanical strength. | |
| Key insights include: | |
| 1. Optimal layer height to minimize delamination in multi-material prints. | |
| 2. Enhanced print quality with in-situ monitoring using thermal imaging. | |
| 3. Applications in medical implants, aerospace components, and lightweight structures. | |
| 4. Innovations in material blending techniques for improved flexibility and durability. | |
| 5. Cost analysis and efficiency improvements in production workflows. | |
| """ | |
| # Function to retrieve relevant sections of the summary | |
| def get_relevant_summary(query, summary): | |
| sections = summary.split("\n") | |
| relevant_sections = [section for section in sections if query.lower() in section.lower()] | |
| return "\n".join(relevant_sections) if relevant_sections else "No relevant information found in the research summary." | |
| # Function to generate chatbot responses | |
| def generate_response(user_input, history, temperature, max_tokens): | |
| # Retrieve relevant summary sections | |
| relevant_summary = get_relevant_summary(user_input, research_summary) | |
| # Build the domain-specific system instruction | |
| domain_instruction = ( | |
| "You are a chatbot specialized in additive manufacturing (3D printing). " | |
| "Below is a relevant section of the research paper summary:\n\n" | |
| + relevant_summary + | |
| "\n\nAnswer queries related to this research or general 3D printing topics. " | |
| "Provide accurate, concise, and actionable responses." | |
| ) | |
| conversation = [{"role": "system", "content": domain_instruction}] | |
| for user, assistant in history: | |
| conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}]) | |
| conversation.append({"role": "user", "content": user_input}) | |
| input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt").to(model.device) | |
| streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True) | |
| generate_kwargs = dict( | |
| input_ids=input_ids, | |
| streamer=streamer, | |
| max_new_tokens=max_tokens, | |
| do_sample=True, | |
| temperature=temperature, | |
| ) | |
| if temperature == 0: | |
| generate_kwargs['do_sample'] = False | |
| t = Thread(target=model.generate, kwargs=generate_kwargs) | |
| t.start() | |
| outputs = [] | |
| for text in streamer: | |
| outputs.append(text) | |
| yield "".join(outputs) | |
| # Streamlit app | |
| st.title("Additive Manufacturing Chatbot") | |
| st.markdown("### Ask questions about 3D printing or the research paper summary!") | |
| # Sidebar parameters | |
| st.sidebar.header("Model Parameters") | |
| temperature = st.sidebar.slider("Temperature", min_value=0.0, max_value=1.0, value=0.7, step=0.1) | |
| max_tokens = st.sidebar.slider("Max Tokens", min_value=128, max_value=1024, value=512, step=32) | |
| # Chat interface | |
| if "history" not in st.session_state: | |
| st.session_state.history = [] | |
| user_input = st.text_input("Your question:", placeholder="Ask me about 3D printing or hybrid manufacturing...") | |
| if user_input: | |
| with st.spinner("Generating response..."): | |
| response_generator = generate_response(user_input, st.session_state.history, temperature, max_tokens) | |
| response = "" | |
| for partial_response in response_generator: | |
| response = partial_response | |
| st.empty() # Clear previous response output for a smooth UI update | |
| # Update conversation history | |
| st.session_state.history.append((user_input, response)) | |
| # Display chat history | |
| for user, assistant in st.session_state.history: | |
| st.markdown(f"**You:** {user}") | |
| st.markdown(f"**Assistant:** {assistant}") | |
| st.markdown("---") | |