Spaces:
Runtime error
Runtime error
Update src/streamlit_app.py
Browse files- src/streamlit_app.py +17 -34
src/streamlit_app.py
CHANGED
|
@@ -1,40 +1,23 @@
|
|
| 1 |
-
import altair as alt
|
| 2 |
-
import numpy as np
|
| 3 |
-
import pandas as pd
|
| 4 |
import streamlit as st
|
|
|
|
|
|
|
| 5 |
|
| 6 |
-
|
| 7 |
-
|
|
|
|
|
|
|
| 8 |
|
| 9 |
-
|
| 10 |
-
If you have any questions, checkout our [documentation](https://docs.streamlit.io) and [community
|
| 11 |
-
forums](https://discuss.streamlit.io).
|
| 12 |
|
| 13 |
-
|
| 14 |
-
"""
|
| 15 |
|
| 16 |
-
|
| 17 |
-
num_turns = st.slider("Number of turns in spiral", 1, 300, 31)
|
| 18 |
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
"x": x,
|
| 28 |
-
"y": y,
|
| 29 |
-
"idx": indices,
|
| 30 |
-
"rand": np.random.randn(num_points),
|
| 31 |
-
})
|
| 32 |
-
|
| 33 |
-
st.altair_chart(alt.Chart(df, height=700, width=700)
|
| 34 |
-
.mark_point(filled=True)
|
| 35 |
-
.encode(
|
| 36 |
-
x=alt.X("x", axis=None),
|
| 37 |
-
y=alt.Y("y", axis=None),
|
| 38 |
-
color=alt.Color("idx", legend=None, scale=alt.Scale()),
|
| 39 |
-
size=alt.Size("rand", legend=None, scale=alt.Scale(range=[1, 150])),
|
| 40 |
-
))
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import streamlit as st
|
| 2 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 3 |
+
import torch
|
| 4 |
|
| 5 |
+
# Load model and tokenizer
|
| 6 |
+
MODEL_NAME = "Johannes/code-generation-model-fine-tuned-to-produce-good-code-snippets"
|
| 7 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
| 8 |
+
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
|
| 9 |
|
| 10 |
+
st.title("Code Generation Model")
|
|
|
|
|
|
|
| 11 |
|
| 12 |
+
prompt = st.text_area("Enter a code prompt:")
|
|
|
|
| 13 |
|
| 14 |
+
max_length = st.slider("Maximum generated tokens", min_value=50, max_value=500, value=150)
|
|
|
|
| 15 |
|
| 16 |
+
if st.button("Generate Code"):
|
| 17 |
+
if prompt.strip() != "":
|
| 18 |
+
inputs = tokenizer(prompt, return_tensors="pt")
|
| 19 |
+
outputs = model.generate(**inputs, max_length=max_length, do_sample=True, temperature=0.7)
|
| 20 |
+
generated_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 21 |
+
st.code(generated_code, language="python")
|
| 22 |
+
else:
|
| 23 |
+
st.warning("Please enter a prompt.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|