Error Lover commited on
Commit
51c594b
·
1 Parent(s): badc803

fix startup when torch missing

Browse files
Files changed (3) hide show
  1. README.md +21 -6
  2. app.py +42 -19
  3. requirements.txt +4 -0
README.md CHANGED
@@ -1,12 +1,27 @@
1
  ---
2
- title: Kimi Coder Demo
3
- emoji: 🦀
4
- colorFrom: red
5
- colorTo: purple
6
  sdk: gradio
7
- sdk_version: 6.11.0
 
8
  app_file: app.py
9
  pinned: false
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ title: kimi-coder-135m
3
+ emoji: 🤖
4
+ colorFrom: blue
5
+ colorTo: cyan
6
  sdk: gradio
7
+ sdk_version: "4.44.0"
8
+ python_version: "3.10"
9
  app_file: app.py
10
  pinned: false
11
  ---
12
 
13
+ # kimi-coder-135m Demo
14
+
15
+ **SmolLM2-135M** fine-tuned on ~15k coding samples distilled from [KIMI-K2.5-700000x](https://huggingface.co/datasets/ianncity/KIMI-K2.5-700000x).
16
+
17
+ ## Model
18
+
19
+ → [yava-code/kimi-coder-135m](https://huggingface.co/yava-code/kimi-coder-135m)
20
+
21
+ ## Training details
22
+
23
+ - Base: `HuggingFaceTB/SmolLM2-135M-Instruct`
24
+ - Data: 15k coding Q&A pairs (Python, JS, C++, etc.)
25
+ - CoT stripped from answers — model learns direct responses
26
+ - 1 epoch, lr=2e-4, cosine schedule, bf16
27
+ - Hardware: Colab T4 (~2h)
app.py CHANGED
@@ -1,19 +1,34 @@
1
  import gradio as gr
2
- import torch
3
- from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
4
  from threading import Thread
5
 
6
- MODEL = "yava-code/kimi-coder-135m"
 
 
 
 
 
7
 
8
- tok = AutoTokenizer.from_pretrained(MODEL)
9
- model = AutoModelForCausalLM.from_pretrained(
10
- MODEL,
11
- torch_dtype=torch.bfloat16,
12
- device_map="auto",
13
- )
14
- model.eval()
 
 
 
 
 
15
 
16
  def respond(msg, history, max_new, temp):
 
 
 
 
 
 
 
17
  chat = []
18
  for u, a in history:
19
  chat += [{"role": "user", "content": u}, {"role": "assistant", "content": a}]
@@ -50,14 +65,22 @@ SmolLM2-135M fine-tuned on 15k coding samples distilled from KIMI-K2.5.
50
  Model: [yava-code/kimi-coder-135m](https://huggingface.co/yava-code/kimi-coder-135m)
51
  """
52
  )
53
- chatbot = gr.ChatInterface(
54
- respond,
55
- additional_inputs=[
56
- gr.Slider(64, 1024, value=512, label="Max new tokens"),
57
- gr.Slider(0, 1, value=0.3, step=0.05, label="Temperature"),
58
- ],
59
- examples=EXAMPLES,
60
- title="",
61
- )
 
 
 
 
 
 
 
 
62
 
63
  demo.launch()
 
1
  import gradio as gr
 
 
2
  from threading import Thread
3
 
4
+ err = None
5
+ try:
6
+ import torch
7
+ from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
8
+ except ModuleNotFoundError as e:
9
+ err = e
10
 
11
+ MODEL = "yava-code/kimi-coder-135m"
12
+ tok = None
13
+ model = None
14
+
15
+ if err is None:
16
+ tok = AutoTokenizer.from_pretrained(MODEL)
17
+ model = AutoModelForCausalLM.from_pretrained(
18
+ MODEL,
19
+ torch_dtype=torch.bfloat16,
20
+ device_map="auto",
21
+ )
22
+ model.eval()
23
 
24
  def respond(msg, history, max_new, temp):
25
+ if err is not None:
26
+ yield (
27
+ f"missing dependency: `{err.name}`\n\n"
28
+ "add it to requirements.txt and rebuild the space."
29
+ )
30
+ return
31
+
32
  chat = []
33
  for u, a in history:
34
  chat += [{"role": "user", "content": u}, {"role": "assistant", "content": a}]
 
65
  Model: [yava-code/kimi-coder-135m](https://huggingface.co/yava-code/kimi-coder-135m)
66
  """
67
  )
68
+ if err is not None:
69
+ gr.Markdown(
70
+ f"### startup warning\n"
71
+ f"missing dependency: `{err.name}`\n\n"
72
+ f"current requirements include `torch`, so this usually means the build failed.\n"
73
+ f"try restarting/rebuilding the space."
74
+ )
75
+ else:
76
+ chatbot = gr.ChatInterface(
77
+ respond,
78
+ additional_inputs=[
79
+ gr.Slider(64, 1024, value=512, label="Max new tokens"),
80
+ gr.Slider(0, 1, value=0.3, step=0.05, label="Temperature"),
81
+ ],
82
+ examples=EXAMPLES,
83
+ title="",
84
+ )
85
 
86
  demo.launch()
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ transformers>=4.40
2
+ accelerate
3
+ gradio
4
+ torch