Commit History

Update app.py
ef89148
verified

VIATEUR-AI commited on

Update app.py
4159988
verified

VIATEUR-AI commited on

Update app.py
ff175bb
verified

VIATEUR-AI commited on

Update app.py
5359afb
verified

VIATEUR-AI commited on

Update app.py
96be3ae
verified

VIATEUR-AI commited on

Create requirements . text
dd02363
verified

VIATEUR-AI commited on

Delete requirements . text
53eb188
verified

VIATEUR-AI commited on

Update requirements . text
7b6b8bc
verified

VIATEUR-AI commited on

Update app.py
81a67a0
verified

VIATEUR-AI commited on

Update app.py
b915e38
verified

VIATEUR-AI commited on

Update app.py
1085ded
verified

VIATEUR-AI commited on

gradio fpdf2
a7d4547
verified

VIATEUR-AI commited on

gradio fpdf
f5c19f8
verified

VIATEUR-AI commited on

Update requirements . text
4935344
verified

VIATEUR-AI commited on

Update app.py
58f39bc
verified

VIATEUR-AI commited on

Update requirements . text
2314f50
verified

VIATEUR-AI commited on

Update requirements . text
9a0a815
verified

VIATEUR-AI commited on

Update app.py
ba6d85f
verified

VIATEUR-AI commited on

Update app.py
504a377
verified

VIATEUR-AI commited on

Update app.py
b244d8e
verified

VIATEUR-AI commited on

gradio pyttsx3 SpeechRecognition fpdf
3a78f6e
verified

VIATEUR-AI commited on

code Python import gradio as gr import pyttsx3 import speech_recognition as sr from fpdf import FPDF import datetime # -------------------------- # VOCATIONAL KNOWLEDGE BASE # -------------------------- knowledge = { "gusudira": { "beginner": "Ibikoresho: machine, gloves, goggles. Tangira wiga safety.", "intermediate": "Wiga guhuza ibyuma n'uburyo bwo gukomeza umuriro.", "advanced": "Professional welding techniques & finishing." }, "ububaji": { "beginner": "Saw, hammer, tape measure. Tangira upime.", "intermediate": "Gukora furniture n'imisusire.", "advanced": "Finishing, design, na business." }, "amashanyarazi": { "beginner": "Wires, gloves. Tangira wiga safety.", "intermediate": "Guhuza circuits zoroheje.", "advanced": "Professional wiring & maintenance." }, "gukanika": { "beginner": "Check engine basics, tools.", "intermediate": "Engine maintenance, basic repair.", "advanced": "Advanced troubleshooting and repair." }, "plumbing": { "beginner": "Pipes, wrench, basic tools. Learn safety.", "intermediate": "Fix leaks, connect pipes.", "advanced": "Install complex plumbing systems." }, "kudoda": { "beginner": "Needle, fabric, threads. Learn basic stitching.", "intermediate": "Make simple clothes.", "advanced": "Design, tailoring, finishing." }, "masonry": { "beginner": "Bricks, cement, trowel. Learn basic safety.", "intermediate": "Build walls, small structures.", "advanced": "Advanced masonry & finishing." }, "electronics": { "beginner": "TV, radio basics, components.", "intermediate": "Assemble simple circuits.", "advanced": "Advanced electronics troubleshooting." }, "ict": { "beginner": "Computer basics, typing, file management.", "intermediate": "Office software, internet basics.", "advanced": "Networking, software troubleshooting." } } safety = { "gusudira": "Ambara gloves na goggles. Irinde umuriro.", "ububaji": "Ambara gloves, irinde gukata ibiti nabi.", "amashanyarazi": "Zimya umuriro mbere yo gukora.", "gukanika": "Irinde moteri ishyushye, ambara gloves.", "plumbing": "Ambara gloves, ibikoresho byizewe.", "kudoda": "Irinde kudoda nabi, ambara gloves.", "masonry": "Ambara gloves na helmet, witondere ibikoresho.", "electronics": "Irinde amashanyarazi, ambara gloves.", "ict": "Irinde kwandura virus, backup data." } # -------------------------- # UTILITY FUNCTIONS # -------------------------- def detect_skill(text): for skill in knowledge.keys(): if skill in text.lower(): return skill return "unknown" def detect_level(text): if "ntangiriro" in text.lower() or "ntafite ubumenyi" in text.lower(): return "beginner" elif "maze igihe" in text.lower() or "nzi bike" in text.lower(): return "intermediate" else: return "beginner" def safety_advice(skill): return safety.get(skill, "Kurikiza amabwiriza rusange y'umutekano.") def speak_text(text): engine = pyttsx3.init() engine.say(text) engine.runAndWait() # -------------------------- # CERTIFICATE GENERATOR # -------------------------- def generate_certificate(name, skill, level): pdf = FPDF() pdf.add_page() pdf.set_font("Arial", "B", 24) pdf.cell(0, 40, "Certificate of Completion", ln=True, align="C") pdf.set_font("Arial", "", 16) pdf.ln(10) pdf.multi_cell(0, 10, f"This certifies that {name} has completed the {level} level of {skill} vocational training.", align="C") pdf.ln(20) pdf.cell(0, 10, f"Date: {datetime.datetime.now().strftime('%Y-%m-%d')}", ln=True, align="C") filename = f"{name}_{skill}_{level}.pdf" pdf.output(filename) return filename # -------------------------- # MAIN FUNCTION # -------------------------- def vocational_ai(query, user_name="Student"): skill = detect_skill(query) level = detect_level(query) if skill == "unknown": response = "Sinamenye umwuga. Sobanura neza icyo ushaka kwiga." cert_file = None else: lesson = knowledge.get(skill, {}).get(level, "Nta masomo ahari kuri iyi level.") advice = safety_advice(skill) response = f"📚 Isomo:\n{lesson}\n\n⚠️ Safety:\n{advice}\n\n➡️ Intambwe ikurikira: Komeza wimenyereze buri munsi." cert_file = generate_certificate(user_name, skill, level) speak_text(response) # AI ivuga igisubizo return response, cert_file # -------------------------- # GRADIO INTERFACE # -------------------------- iface = gr.Interface( fn=vocational_ai, inputs=[ gr.Textbox(label="Andika ikibazo cyawe"), gr.Textbox(label="Izina ryawe (kuri certificate)", placeholder="John Doe") ], outputs=[ "text", gr.File(label="Download Certificate") ], title="Vocational AI - Full Prototype", description="Andika ikibazo cyawe ku mwuga, AI izagusubiza hamwe na safety tips kandi itange certificate." ) iface.launch()
3bc7a21
verified

VIATEUR-AI commited on

import os from modelscope_studio.components.pro.chatbot import ChatbotActionConfig, ChatbotBotConfig, ChatbotUserConfig, ChatbotWelcomeConfig # Env is_cn = os.getenv('MODELSCOPE_ENVIRONMENT') == 'studio' api_key = os.getenv('API_KEY') def get_text(text: str, cn_text: str): if is_cn: return cn_text return text # Save history in browser save_history = True # Chatbot Config def user_config(disabled_actions=None): return ChatbotUserConfig( class_names=dict(content="user-message-content"), actions=[ "copy", "edit", ChatbotActionConfig( action="delete", popconfirm=dict(title=get_text("Delete the message", "删除消息"), description=get_text( "Are you sure to delete this message?", "确认删除该消息?"), okButtonProps=dict(danger=True))) ], disabled_actions=disabled_actions) def bot_config(disabled_actions=None): return ChatbotBotConfig(actions=[ "copy", "edit", ChatbotActionConfig( action="retry", popconfirm=dict( title=get_text("Regenerate the message", "重新生成消息"), description=get_text( "Regenerate the message will also delete all subsequent messages.", "重新生成消息会删除所有后续消息。"), okButtonProps=dict(danger=True))), ChatbotActionConfig(action="delete", popconfirm=dict( title=get_text("Delete the message", "删除消息"), description=get_text( "Are you sure to delete this message?", "确认删除该消息?"), okButtonProps=dict(danger=True))) ], avatar="./assets/qwen.png", disabled_actions=disabled_actions) def welcome_config(): return ChatbotWelcomeConfig( variant="borderless", icon="./assets/qwen.png", title=get_text("Hello, I'm Qwen3", "你好,我是 Qwen3"), description=get_text("Select a model and enter text to get started.", "选择模型并输入文本,开始对话吧。"), prompts=dict( title=get_text("How can I help you today?", "有什么我能帮助你的吗?"), styles={ "list": { "width": '100%', }, "item": { "flex": 1, }, }, items=[{ "label": get_text("📅 Make a plan", "📅 制定计划"), "children": [{ "description": get_text("Help me with a plan to start a business", "帮助我制定一个创业计划") }, { "description": get_text("Help me with a plan to achieve my goals", "帮助我制定一个实现目标的计划") }, { "description": get_text("Help me with a plan for a successful interview", "帮助我制定一个成功的面试计划") }] }, { "label": get_text("🖋 Help me write", "🖋 帮我写"), "children": [{ "description": get_text("Help me write a story with a twist ending", "帮助我写一个带有意外结局的故事") }, { "description": get_text("Help me write a blog post on mental health", "帮助我写一篇关于心理健康的博客文章") }, { "description": get_text("Help me write a letter to my future self", "帮助我写一封给未来自己的信") }] }]), ) DEFAULT_SUGGESTIONS = [{ "label": get_text('Make a plan', '制定计划'), "value": get_text('Make a plan', '制定计划'), "children": [{ "label": get_text("Start a business", "开始创业"), "value": get_text("Help me with a plan to start a business", "帮助我制定一个创业计划") }, { "label": get_text("Achieve my goals", "实现我的目标"), "value": get_text("Help me with a plan to achieve my goals", "帮助我制定一个实现目标的计划") }, { "label": get_text("Successful interview", "成功的面试"), "value": get_text("Help me with a plan for a successful interview", "帮助我制定一个成功的面试计划") }] }, { "label": get_text('Help me write', '帮我写'), "value": get_text("Help me write", '帮我写'), "children": [{ "label": get_text("Story with a twist ending", "带有意外结局的故事"), "value": get_text("Help me write a story with a twist ending", "帮助我写一个带有意外结局的故事") }, { "label": get_text("Blog post on mental health", "关于心理健康的博客文章"), "value": get_text("Help me write a blog post on mental health", "帮助我写一篇关于心理健康的博客文章") }, { "label": get_text("Letter to my future self", "给未来自己的信"), "value": get_text("Help me write a letter to my future self", "帮助我写一封给未来自己的信") }] }] DEFAULT_SYS_PROMPT = "You are a helpful and harmless assistant." MIN_THINKING_BUDGET = 1 MAX_THINKING_BUDGET = 38 DEFAULT_THINKING_BUDGET = 38 DEFAULT_MODEL = "qwen3-235b-a22b" MODEL_OPTIONS = [ { "label": get_text("Qwen3-235B-A22B", "通义千问3-235B-A22B"), "modelId": "Qwen/Qwen3-235B-A22B", "value": "qwen3-235b-a22b" }, { "label": get_text("Qwen3-32B", "通义千问3-32B"), "modelId": "Qwen/Qwen3-32B", "value": "qwen3-32b" }, { "label": get_text("Qwen3-30B-A3B", "通义千问3-30B-A3B"), "modelId": "Qwen/Qwen3-30B-A3B", "value": "qwen3-30b-a3b" }, { "label": get_text("Qwen3-14B", "通义千问3-14B"), "modelId": "Qwen/Qwen3-14B", "value": "qwen3-14b" }, { "label": get_text("Qwen3-8B", "通义千问3-8B"), "modelId": "Qwen/Qwen3-8B", "value": "qwen3-8b" }, { "label": get_text("Qwen3-4B", "通义千问3-4B"), "modelId": "Qwen/Qwen3-4B", "value": "qwen3-4b" }, { "label": get_text("Qwen3-1.7B", "通义千问3-1.7B"), "modelId": "Qwen/Qwen3-1.7B", "value": "qwen3-1.7b" }, { "label": get_text("Qwen3-0.6B", "通义千问3-0.6B"), "modelId": "Qwen/Qwen3-0.6B", "value": "qwen3-0.6b" }, ] for model in MODEL_OPTIONS: model[ "link"] = is_cn and f"https://modelscope.cn/models/{model['modelId']}" or f"https://huggingface.co/{model['modelId']}" MODEL_OPTIONS_MAP = {model["value"]: model for model in MODEL_OPTIONS} DEFAULT_LOCALE = 'zh_CN' if is_cn else 'en_US' DEFAULT_THEME = { "token": { "colorPrimary": "#6A57FF", } } DEFAULT_SETTINGS = { "model": DEFAULT_MODEL, "sys_prompt": DEFAULT_SYS_PROMPT, "thinking_budget": DEFAULT_THINKING_BUDGET }
1ddfa33
verified

VIATEUR-AI commited on

gradio modelscope_studio dashscope
b2ab057
verified

VIATEUR-AI commited on

import uuid import time import json import gradio as gr import modelscope_studio.components.antd as antd import modelscope_studio.components.antdx as antdx import modelscope_studio.components.base as ms import modelscope_studio.components.pro as pro import dashscope from config import DEFAULT_LOCALE, DEFAULT_SETTINGS, DEFAULT_THEME, DEFAULT_SUGGESTIONS, save_history, get_text, user_config, bot_config, welcome_config, api_key, MODEL_OPTIONS_MAP from ui_components.logo import Logo from ui_components.settings_header import SettingsHeader from ui_components.thinking_button import ThinkingButton from dashscope import Generation dashscope.api_key = api_key def format_history(history, sys_prompt): # messages = [{ # "role": "system", # "content": sys_prompt, # }] messages = [] for item in history: if item["role"] == "user": messages.append({"role": "user", "content": item["content"]}) elif item["role"] == "assistant": contents = [{ "type": "text", "text": content["content"] } for content in item["content"] if content["type"] == "text"] messages.append({ "role": "assistant", "content": contents[0]["text"] if len(contents) > 0 else "" }) return messages class Gradio_Events: @staticmethod def submit(state_value): history = state_value["conversation_contexts"][ state_value["conversation_id"]]["history"] settings = state_value["conversation_contexts"][ state_value["conversation_id"]]["settings"] enable_thinking = state_value["conversation_contexts"][ state_value["conversation_id"]]["enable_thinking"] model = settings.get("model") messages = format_history(history, sys_prompt=settings.get("sys_prompt", "")) history.append({ "role": "assistant", "content": [], "key": str(uuid.uuid4()), "header": MODEL_OPTIONS_MAP.get(model, {}).get("label", None), "loading": True, "status": "pending" }) yield { chatbot: gr.update(value=history), state: gr.update(value=state_value), } try: response = Generation.call( model=model, messages=messages, stream=True, result_format='message', incremental_output=True, enable_thinking=enable_thinking, thinking_budget=settings.get("thinking_budget", 1) * 1024) start_time = time.time() reasoning_content = "" answer_content = "" is_thinking = False is_answering = False contents = [None, None] for chunk in response: if (not chunk.output.choices[0].message.get("content") and not chunk.output.choices[0].message.get( "reasoning_content")): pass else: delta = chunk.output.choices[0].message if hasattr( delta, 'reasoning_content') and delta.reasoning_content: if not is_thinking: contents[0] = { "type": "tool", "content": "", "options": { "title": get_text("Thinking...", "思考中..."), "status": "pending" }, "copyable": False, "editable": False } is_thinking = True reasoning_content += delta.reasoning_content if hasattr(delta, 'content') and delta.content: if not is_answering: thought_cost_time = "{:.2f}".format(time.time() - start_time) if contents[0]: contents[0]["options"]["title"] = get_text( f"End of Thought ({thought_cost_time}s)", f"已深度思考 (用时{thought_cost_time}s)") contents[0]["options"]["status"] = "done" contents[1] = { "type": "text", "content": "", } is_answering = True answer_content += delta.content if contents[0]: contents[0]["content"] = reasoning_content if contents[1]: contents[1]["content"] = answer_content history[-1]["content"] = [ content for content in contents if content ] history[-1]["loading"] = False yield { chatbot: gr.update(value=history), state: gr.update(value=state_value) } print("model: ", model, "-", "reasoning_content: ", reasoning_content, "\n", "content: ", answer_content) history[-1]["status"] = "done" cost_time = "{:.2f}".format(time.time() - start_time) history[-1]["footer"] = get_text(f"{cost_time}s", f"用时{cost_time}s") yield { chatbot: gr.update(value=history), state: gr.update(value=state_value), } except Exception as e: print("model: ", model, "-", "Error: ", e) history[-1]["loading"] = False history[-1]["status"] = "done" history[-1]["content"] += [{ "type": "text", "content": f'<span style="color: var(--color-red-500)">{str(e)}</span>' }] yield { chatbot: gr.update(value=history), state: gr.update(value=state_value) } raise e @staticmethod def add_message(input_value, settings_form_value, thinking_btn_state_value, state_value): if not state_value["conversation_id"]: random_id = str(uuid.uuid4()) history = [] state_value["conversation_id"] = random_id state_value["conversation_contexts"][ state_value["conversation_id"]] = { "history": history } state_value["conversations"].append({ "label": input_value, "key": random_id }) history = state_value["conversation_contexts"][ state_value["conversation_id"]]["history"] state_value["conversation_contexts"][ state_value["conversation_id"]] = { "history": history, "settings": settings_form_value, "enable_thinking": thinking_btn_state_value["enable_thinking"] } history.append({ "role": "user", "content": input_value, "key": str(uuid.uuid4()) }) yield Gradio_Events.preprocess_submit(clear_input=True)(state_value) try: for chunk in Gradio_Events.submit(state_value): yield chunk except Exception as e: raise e finally: yield Gradio_Events.postprocess_submit(state_value) @staticmethod def preprocess_submit(clear_input=True): def preprocess_submit_handler(state_value): history = state_value["conversation_contexts"][ state_value["conversation_id"]]["history"] return { **({ input: gr.update(value=None, loading=True) if clear_input else gr.update(loading=True), } if clear_input else {}), conversations: gr.update(active_key=state_value["conversation_id"], items=list( map( lambda item: { **item, "disabled": True if item["key"] != state_value[ "conversation_id"] else False, }, state_value["conversations"]))), add_conversation_btn: gr.update(disabled=True), clear_btn: gr.update(disabled=True), conversation_delete_menu_item: gr.update(disabled=True), chatbot: gr.update(value=history, bot_config=bot_config( disabled_actions=['edit', 'retry', 'delete']), user_config=user_config( disabled_actions=['edit', 'delete'])), state: gr.update(value=state_value), } return preprocess_submit_handler @staticmethod def postprocess_submit(state_value): history = state_value["conversation_contexts"][ state_value["conversation_id"]]["history"] return { input: gr.update(loading=False), conversation_delete_menu_item: gr.update(disabled=False), clear_btn: gr.update(disabled=False), conversations: gr.update(items=state_value["conversations"]), add_conversation_btn: gr.update(disabled=False), chatbot: gr.update(value=history, bot_config=bot_config(), user_config=user_config()), state: gr.update(value=state_value), } @staticmethod def cancel(state_value): history = state_value["conversation_contexts"][ state_value["conversation_id"]]["history"] history[-1]["loading"] = False history[-1]["status"] = "done" history[-1]["footer"] = get_text("Chat completion paused", "对话已暂停") return Gradio_Events.postprocess_submit(state_value) @staticmethod def delete_message(state_value, e: gr.EventData): index = e._data["payload"][0]["index"] history = state_value["conversation_contexts"][ state_value["conversation_id"]]["history"] history = history[:index] + history[index + 1:] state_value["conversation_contexts"][ state_value["conversation_id"]]["history"] = history return gr.update(value=state_value) @staticmethod def edit_message(state_value, chatbot_value, e: gr.EventData): index = e._data["payload"][0]["index"] history = state_value["conversation_contexts"][ state_value["conversation_id"]]["history"] history[index]["content"] = chatbot_value[index]["content"] return gr.update(value=state_value) @staticmethod def regenerate_message(settings_form_value, thinking_btn_state_value, state_value, e: gr.EventData): index = e._data["payload"][0]["index"] history = state_value["conversation_contexts"][ state_value["conversation_id"]]["history"] history = history[:index] state_value["conversation_contexts"][ state_value["conversation_id"]] = { "history": history, "settings": settings_form_value, "enable_thinking": thinking_btn_state_value["enable_thinking"] } yield Gradio_Events.preprocess_submit()(state_value) try: for chunk in Gradio_Events.submit(state_value): yield chunk except Exception as e: raise e finally: yield Gradio_Events.postprocess_submit(state_value) @staticmethod def select_suggestion(input_value, e: gr.EventData): input_value = input_value[:-1] + e._data["payload"][0] return gr.update(value=input_value) @staticmethod def apply_prompt(e: gr.EventData): return gr.update(value=e._data["payload"][0]["value"]["description"]) @staticmethod def new_chat(thinking_btn_state, state_value): if not state_value["conversation_id"]: return gr.skip() state_value["conversation_id"] = "" thinking_btn_state["enable_thinking"] = True return gr.update(active_key=state_value["conversation_id"]), gr.update( value=None), gr.update(value=DEFAULT_SETTINGS), gr.update( value=thinking_btn_state), gr.update(value=state_value) @staticmethod def select_conversation(thinking_btn_state_value, state_value, e: gr.EventData): active_key = e._data["payload"][0] if state_value["conversation_id"] == active_key or ( active_key not in state_value["conversation_contexts"]): return gr.skip() state_value["conversation_id"] = active_key thinking_btn_state_value["enable_thinking"] = state_value[ "conversation_contexts"][active_key]["enable_thinking"] return gr.update(active_key=active_key), gr.update( value=state_value["conversation_contexts"][active_key]["history"] ), gr.update(value=state_value["conversation_contexts"][active_key] ["settings"]), gr.update( value=thinking_btn_state_value), gr.update( value=state_value) @staticmethod def click_conversation_menu(state_value, e: gr.EventData): conversation_id = e._data["payload"][0]["key"] operation = e._data["payload"][1]["key"] if operation == "delete": del state_value["conversation_contexts"][conversation_id] state_value["conversations"] = [ item for item in state_value["conversations"] if item["key"] != conversation_id ] if state_value["conversation_id"] == conversation_id: state_value["conversation_id"] = "" return gr.update( items=state_value["conversations"], active_key=state_value["conversation_id"]), gr.update( value=None), gr.update(value=state_value) else: return gr.update( items=state_value["conversations"]), gr.skip(), gr.update( value=state_value) return gr.skip() @staticmethod def toggle_settings_header(settings_header_state_value): settings_header_state_value[ "open"] = not settings_header_state_value["open"] return gr.update(value=settings_header_state_value) @staticmethod def clear_conversation_history(state_value): if not state_value["conversation_id"]: return gr.skip() state_value["conversation_contexts"][ state_value["conversation_id"]]["history"] = [] return gr.update(value=None), gr.update(value=state_value) @staticmethod def update_browser_state(state_value): return gr.update(value=dict( conversations=state_value["conversations"], conversation_contexts=state_value["conversation_contexts"])) @staticmethod def apply_browser_state(browser_state_value, state_value): state_value["conversations"] = browser_state_value["conversations"] state_value["conversation_contexts"] = browser_state_value[ "conversation_contexts"] return gr.update( items=browser_state_value["conversations"]), gr.update( value=state_value) css = """ .gradio-container { padding: 0 !important; } .gradio-container > main.fillable { padding: 0 !important; } #chatbot { height: calc(100vh - 21px - 16px); max-height: 1500px; } #chatbot .chatbot-conversations { height: 100vh; background-color: var(--ms-gr-ant-color-bg-layout); padding-left: 4px; padding-right: 4px; } #chatbot .chatbot-conversations .chatbot-conversations-list { padding-left: 0; padding-right: 0; } #chatbot .chatbot-chat { padding: 32px; padding-bottom: 0; height: 100%; } @media (max-width: 768px) { #chatbot .chatbot-chat { padding: 0; } } #chatbot .chatbot-chat .chatbot-chat-messages { flex: 1; } #chatbot .setting-form-thinking-budget .ms-gr-ant-form-item-control-input-content { display: flex; flex-wrap: wrap; } """ model_options_map_json = json.dumps(MODEL_OPTIONS_MAP) js = "function init() { window.MODEL_OPTIONS_MAP=" + model_options_map_json + "}" with gr.Blocks(css=css, js=js, fill_width=True) as demo: state = gr.State({ "conversation_contexts": {}, "conversations": [], "conversation_id": "", }) with ms.Application(), antdx.XProvider( theme=DEFAULT_THEME, locale=DEFAULT_LOCALE), ms.AutoLoading(): with antd.Row(gutter=[20, 20], wrap=False, elem_id="chatbot"): # Left Column with antd.Col(md=dict(flex="0 0 260px", span=24, order=0), span=0, elem_style=dict(width=0), order=1): with ms.Div(elem_classes="chatbot-conversations"): with antd.Flex(vertical=True, gap="small", elem_style=dict(height="100%")): # Logo Logo() # New Conversation Button with antd.Button(value=None, color="primary", variant="filled", block=True) as add_conversation_btn: ms.Text(get_text("New Conversation", "新建对话")) with ms.Slot("icon"): antd.Icon("PlusOutlined") # Conversations List with antdx.Conversations( elem_classes="chatbot-conversations-list", ) as conversations: with ms.Slot('menu.items'): with antd.Menu.Item( label="Delete", key="delete", danger=True ) as conversation_delete_menu_item: with ms.Slot("icon"): antd.Icon("DeleteOutlined") # Right Column with antd.Col(flex=1, elem_style=dict(height="100%")): with antd.Flex(vertical=True, gap="small", elem_classes="chatbot-chat"): # Chatbot chatbot = pro.Chatbot(elem_classes="chatbot-chat-messages", height=0, welcome_config=welcome_config(), user_config=user_config(), bot_config=bot_config()) # Input with antdx.Suggestion( items=DEFAULT_SUGGESTIONS, # onKeyDown Handler in Javascript should_trigger="""(e, { onTrigger, onKeyDown }) => { switch(e.key) { case '/': onTrigger() break case 'ArrowRight': case 'ArrowLeft': case 'ArrowUp': case 'ArrowDown': break; default: onTrigger(false) } onKeyDown(e) }""") as suggestion: with ms.Slot("children"): with antdx.Sender(placeholder=get_text( "Enter \"/\" to get suggestions", "输入 \"/\" 获取提示"), ) as input: with ms.Slot("header"): settings_header_state, settings_form = SettingsHeader( ) with ms.Slot("prefix"): with antd.Flex( gap=4, wrap=True, elem_style=dict(maxWidth='40vw')): with antd.Button( value=None, type="text") as setting_btn: with ms.Slot("icon"): antd.Icon("SettingOutlined") with antd.Button( value=None, type="text") as clear_btn: with ms.Slot("icon"): antd.Icon("ClearOutlined") thinking_btn_state = ThinkingButton() # Events Handler # Browser State Handler if save_history: browser_state = gr.BrowserState( { "conversation_contexts": {}, "conversations": [], }, storage_key="qwen3_chat_demo_storage") state.change(fn=Gradio_Events.update_browser_state, inputs=[state], outputs=[browser_state]) demo.load(fn=Gradio_Events.apply_browser_state, inputs=[browser_state, state], outputs=[conversations, state]) # Conversations Handler add_conversation_btn.click(fn=Gradio_Events.new_chat, inputs=[thinking_btn_state, state], outputs=[ conversations, chatbot, settings_form, thinking_btn_state, state ]) conversations.active_change(fn=Gradio_Events.select_conversation, inputs=[thinking_btn_state, state], outputs=[ conversations, chatbot, settings_form, thinking_btn_state, state ]) conversations.menu_click(fn=Gradio_Events.click_conversation_menu, inputs=[state], outputs=[conversations, chatbot, state]) # Chatbot Handler chatbot.welcome_prompt_select(fn=Gradio_Events.apply_prompt, outputs=[input]) chatbot.delete(fn=Gradio_Events.delete_message, inputs=[state], outputs=[state]) chatbot.edit(fn=Gradio_Events.edit_message, inputs=[state, chatbot], outputs=[state]) regenerating_event = chatbot.retry( fn=Gradio_Events.regenerate_message, inputs=[settings_form, thinking_btn_state, state], outputs=[ input, clear_btn, conversation_delete_menu_item, add_conversation_btn, conversations, chatbot, state ]) # Input Handler submit_event = input.submit( fn=Gradio_Events.add_message, inputs=[input, settings_form, thinking_btn_state, state], outputs=[ input, clear_btn, conversation_delete_menu_item, add_conversation_btn, conversations, chatbot, state ]) input.cancel(fn=Gradio_Events.cancel, inputs=[state], outputs=[ input, conversation_delete_menu_item, clear_btn, conversations, add_conversation_btn, chatbot, state ], cancels=[submit_event, regenerating_event], queue=False) # Input Actions Handler setting_btn.click(fn=Gradio_Events.toggle_settings_header, inputs=[settings_header_state], outputs=[settings_header_state]) clear_btn.click(fn=Gradio_Events.clear_conversation_history, inputs=[state], outputs=[chatbot, state]) suggestion.select(fn=Gradio_Events.select_suggestion, inputs=[input], outputs=[input]) if __name__ == "__main__": demo.queue(default_concurrency_limit=100, max_size=100).launch(ssr_mode=False, max_threads=100)
b217224
verified

VIATEUR-AI commited on

streamlit==1.30.0 transformers==5.5.0 torch==2.1.0
0c8a943
verified

VIATEUR-AI commited on

# app_options.py import streamlit as st from transformers import AutoTokenizer, AutoModelForQuestionAnswering, pipeline # 1️⃣ Load QA model model_name = "distilbert-base-uncased-distilled-squad" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForQuestionAnswering.from_pretrained(model_name) qa_pipeline = pipeline("question-answering", model=model, tokenizer=tokenizer) # 2️⃣ Knowledge base in Kinyarwanda healthcare_kb = """ Malariya iterwa n'udukoko twitwa Plasmodium twoherezwa n'udukoko tw'inkende (mosquitoes). Ibimenyetso bya malariya ni umuriro, gucika intege, gucika umutsi, no kuribwa umutwe. Kuvurwa bikorwa hakoreshejwe imiti ya malariya itangwa na muganga. Gufata amazi menshi no kuruhuka ni ingenzi. Ikirwara cy'inkorora n'ibicurane iterwa na virusi. Ibimenyetso ni inkorora, kuribwa mu muhogo, no kuba inuma. Kuruhuka, gufata amazi no gufata imiti yoroheje ni ingenzi. Ibyo kurya neza n'isuku bifasha mu kurwanya indwara. Kwitwararika ibiribwa no gukaraba intoki kenshi ni ingenzi. """ # 3️⃣ Initialize chat history if 'chat_history' not in st.session_state: st.session_state.chat_history = [] st.title("AI Healthcare Chatbot (Kinyarwanda) - Option-based") st.write("Hitamo ikibazo wifuza kubaza, maze AI igusubize. Ushobora guhitamo 'Exit' kugirango usohoke.") # 4️⃣ Predefined options options = [ "Ibimenyetso bya malariya", "Uburyo bwo kuvura malariya", "Ibimenyetso by'inkorora n'ibicurane", "Uburyo bwo kwirinda indwara", "Exit" ] user_choice = st.selectbox("Hitamo ikibazo:", options) if st.button("Ohereza") and user_choice: if user_choice == "Exit": st.write("Chatbot: Murabeho! Mukomeze kwiyitaho.") st.session_state.chat_history = [] else: question = user_choice st.session_state.chat_history.append(f"U: {question}") # Combine last 5 turns + knowledge base context = healthcare_kb + "\n" + "\n".join(st.session_state.chat_history[-5:]) try: result = qa_pipeline(question=question, context=context) answer = result['answer'] score = result['score'] except: answer = "Ndabona ntashoboye gusubiza neza iki kibazo." score = 0.0 st.session_state.chat_history.append(f"AI: {answer}") st.write(f"Chatbot ({score*100:.1f}% confidence): {answer}") # Display chat history st.write("### Chat History") for message in st.session_state.chat_history: st.write(message)
a9306de
verified

VIATEUR-AI commited on

pygame==2.3.0 transformers==5.5.0 torch==2.1.0
e46f41d
verified

VIATEUR-AI commited on

Update app.py
04be250
verified

VIATEUR-AI commited on

gradio==3.45.0 # GUI for the app torch>=2.0.0 # Backend for Whisper and MarianMT transformers==4.34.0 # For MarianMT translation model whisper==20230314 # Speech-to-Text transcription numpy # Whisper dependency soundfile # Audio processing dependency sentencepiece # MarianMT tokenizer dependency
d639a0e
verified

VIATEUR-AI commited on

import gradio as gr import whisper from transformers import MarianMTModel, MarianTokenizer # ====================== # LOAD MODELS # ====================== # Whisper model (offline speech-to-text) whisper_model = whisper.load_model("base") # ushobora guhitamo tiny, small, medium, large # MarianMT model (English -> Kinyarwanda) mt_model_name = "Helsinki-NLP/opus-mt-en-rw" tokenizer = MarianTokenizer.from_pretrained(mt_model_name) translation_model = MarianMTModel.from_pretrained(mt_model_name) # ====================== # FUNCTION: TRANSCRIBE & TRANSLATE # ====================== def audio_translate(audio_file): # 1. Transcribe audio to text result = whisper_model.transcribe(audio_file) text = result['text'] # 2. Translate text to Kinyarwanda translated = translation_model.generate(**tokenizer(text, return_tensors="pt", padding=True)) translated_text = tokenizer.decode(translated[0], skip_special_tokens=True) return text, translated_text # ====================== # GRADIO APP # ====================== with gr.Blocks(title="Audio Translator") as app: gr.Markdown("## 🎙️ Audio Translator (English → Kinyarwanda)") gr.Markdown("Upload audio file (.wav or .mp3) → AI izayandika → hanyuma ihindure mu Kinyarwanda") audio_input = gr.Audio(label="Upload Audio", type="filepath") transcribed_output = gr.Textbox(label="Transcribed Text (English)") translated_output = gr.Textbox(label="Translated Text (Kinyarwanda)") gr.Button("Translate").click(audio_translate, inputs=audio_input, outputs=[transcribed_output, translated_output]) app.launch()
a9dace1
verified

VIATEUR-AI commited on

requirements . text
c5b5c19
verified

VIATEUR-AI commited on

Rename requirement.text to requirement . text
c7ba489
verified

VIATEUR-AI commited on

transformers==4.46.2 gradio==4.44.0 torch Pillow datasets accelerate sentencepiece protobuf huggingface_hub
408f7f6
verified

VIATEUR-AI commited on

# Full AI for Road Traffic Laws (Amategeko y'umuhanda) # Includes: # 1. Chatbot # 2. Photo recognition for road signs # 3. Mock exam system # 4. Dataset templates # 5. Fine‑tuning template (LoRA) # 6. Full Gradio UI import gradio as gr from transformers import pipeline from transformers import AutoModelForCausalLM, AutoTokenizer from PIL import Image import random import json # -------------------------- # 1. TEXT MODEL (CHATBOT) # -------------------------- text_model = pipeline( "text-generation", model="mistralai/Mistral-7B-Instruct", device_map="auto", max_new_tokens=300 ) def ask_ai(question): prompt = f"Sobanura neza amategeko y'umuhanda: {question}" answer = text_model(prompt)[0]["generated_text"] return answer # -------------------------- # 2. VISION MODEL (PHOTO RECOGNITION) # -------------------------- vision_model = pipeline( "image-classification", model="google/vit-base-patch16-224" ) def analyze_sign(image): results = vision_model(image) top = results[0] return f"Icyapa kirasa niki: {top['label']} (confidence: {top['score']:.2f})" # -------------------------- # 3. MOCK EXAM SYSTEM # -------------------------- mock_questions = [ {"q": "Iki cyapa gisobanura iki?", "a": "STOP"}, {"q": "Ni ryari wemerewe gutanga priorité?", "a": "Iyo uri mu muhanda nyamukuru."}, {"q": "Ukoresha gute indangururamajwi ('clignotant')?", "a": "Ugaragaza icyerekezo ugiye gufata mbere yo kwavuga."} ] def generate_exam(): item = random.choice(mock_questions) return item["q"] def check_answer(user_answer): correct = None for item in mock_questions: if user_answer.strip().lower() in item["a"].lower(): correct = item["a"] break return "✔️ Igisubizo ni cyo!" if correct else "❌ Siko. Ongera ugerageze." # -------------------------- # 4. DATASET TEMPLATES # -------------------------- example_json_dataset = { "instruction": "Sobanura iki cyapa.", "input": "<IMAGE001>", "output": "Iki cyapa gisobanura STOP." } example_csv_dataset = """question,answer Iki cyapa gisobanura iki?,STOP """ # -------------------------- # 5. LoRA FINE-TUNING TEMPLATE FINE-TUNING TEMPLATE # -------------------------- fine_tune_code = """ from datasets import load_dataset from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments, Trainer # Load dataset dataset = load_dataset('json', data_files='amategeko.json') # Load base model model = AutoModelForCausalLM.from_pretrained('mistralai/Mistral-7B-Instruct') tokenizer = AutoTokenizer.from_pretrained('mistralai/Mistral-7B-Instruct') training_args = TrainingArguments( output_dir='model_out', num_train_epochs=3, per_device_train_batch_size=1, save_steps=200, warmup_steps=20, ) trainer = Trainer( model=model, args=training_args, train_dataset=dataset, ) trainer.train() """ # -------------------------- # 6. FULL GRADIO UI # -------------------------- def build_ui(): with gr.Blocks(title="AI Yigisha Amategeko y'Umuhanda") as demo: gr.Markdown(""" # 🚦 AI Yigisha Amategeko y'Umuhanda - Chatbot: Sobanura amategeko yose - Photo analysis: Gusoma ibyapa by'umuhanda - Mock Exam: Ibizamini byo kwitoza """) with gr.Tab("Chatbot"): q = gr.Textbox(label="Andika ikibazo cyawe") a = gr.Textbox(label="Igisubizo") q.submit(ask_ai, q, a) gr.Button("Saba AI").click(ask_ai, q, a) with gr.Tab("Gusoma Icyapa (Photo)"): img_in = gr.Image(label="Shyiramo icyapa") img_out = gr.Textbox(label="Ibisobanuro") gr.Button("Soma icyapa").click(analyze_sign, img_in, img_out) with gr.Tab("Mock Exam"): exam_q = gr.Textbox(label="Ikibazo", value=generate_exam) user_a = gr.Textbox(label="Igisubizo cyawe") exam_result = gr.Textbox(label="Igisubizo") gr.Button("Ohereza igisubizo").click(check_answer, user_a, exam_result) gr.Button("Ikindi Kibazo").click(lambda: generate_exam(), None, exam_q) with gr.Tab("Dataset Templates"): gr.JSON(example_json_dataset, label="JSON Dataset Example") gr.Textbox(example_csv_dataset, label="CSV Dataset Example") with gr.Tab("Fine-tuning Code"): gr.Code(fine_tune_code, language="python", label="Fine-tuning (LoRA)") return demo # Run the full app app = build_ui() if __name__ == "__main__": app.launch()
1d77725
verified

VIATEUR-AI commited on

# Full AI for Road Traffic Laws (Amategeko y'umuhanda) # Includes: # 1. Chatbot # 2. Photo recognition for road signs # 3. Mock exam system # 4. Dataset templates # 5. Fine‑tuning template (LoRA) # 6. Full Gradio UI import gradio as gr from transformers import pipeline from transformers import AutoModelForCausalLM, AutoTokenizer from PIL import Image import random import json # -------------------------- # 1. TEXT MODEL (CHATBOT) # -------------------------- text_model = pipeline( "text-generation", model="mistralai/Mistral-7B-Instruct", device_map="auto", max_new_tokens=300 ) def ask_ai(question): prompt = f"Sobanura neza amategeko y'umuhanda: {question}" answer = text_model(prompt)[0]["generated_text"] return answer # -------------------------- # 2. VISION MODEL (PHOTO RECOGNITION) # -------------------------- vision_model = pipeline( "image-classification", model="google/vit-base-patch16-224" ) def analyze_sign(image): results = vision_model(image) top = results[0] return f"Icyapa kirasa niki: {top['label']} (confidence: {top['score']:.2f})" # -------------------------- # 3. MOCK EXAM SYSTEM # -------------------------- mock_questions = [ {"q": "Iki cyapa gisobanura iki?", "a": "STOP"}, {"q": "Ni ryari wemerewe gutanga priorité?", "a": "Iyo uri mu muhanda nyamukuru."}, {"q": "Ukoresha gute indangururamajwi ('clignotant')?", "a": "Ugaragaza icyerekezo ugiye gufata mbere yo kwavuga."} ] def generate_exam(): item = random.choice(mock_questions) return item["q"] def check_answer(user_answer): correct = None for item in mock_questions: if user_answer.strip().lower() in item["a"].lower(): correct = item["a"] break return "✔️ Igisubizo ni cyo!" if correct else "❌ Siko. Ongera ugerageze." # -------------------------- # 4. DATASET TEMPLATES # -------------------------- example_json_dataset = { "instruction": "Sobanura iki cyapa.", "input": "<IMAGE001>", "output": "Iki cyapa gisobanura STOP." } example_csv_dataset = """question,answer Iki cyapa gisobanura iki?,STOP """question,answer Iki cyapa gisobanura iki?,STOP """question,answer Iki cyapa gisobanura iki?,STOP """ Iki cyapa gisobanura iki?,STOP" # -------------------------- # 5. LoRA FINE-TUNING TEMPLATE # -------------------------- fine_tune_code = """ from datasets import load_dataset from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments, Trainer # Load dataset dataset = load_dataset('json', data_files='amategeko.json') # Load base model model = AutoModelForCausalLM.from_pretrained('mistralai/Mistral-7B-Instruct') tokenizer = AutoTokenizer.from_pretrained('mistralai/Mistral-7B-Instruct') training_args = TrainingArguments( output_dir='model_out', num_train_epochs=3, per_device_train_batch_size=1, save_steps=200, warmup_steps=20, ) trainer = Trainer( model=model, args=training_args, train_dataset=dataset, ) trainer.train() """ # -------------------------- # 6. FULL GRADIO UI # -------------------------- def build_ui(): with gr.Blocks(title="AI Yigisha Amategeko y'Umuhanda") as demo: gr.Markdown(""" # 🚦 AI Yigisha Amategeko y'Umuhanda - Chatbot: Sobanura amategeko yose - Photo analysis: Gusoma ibyapa by'umuhanda - Mock Exam: Ibizamini byo kwitoza """) with gr.Tab("Chatbot"): q = gr.Textbox(label="Andika ikibazo cyawe") a = gr.Textbox(label="Igisubizo") q.submit(ask_ai, q, a) gr.Button("Saba AI").click(ask_ai, q, a) with gr.Tab("Gusoma Icyapa (Photo)"): img_in = gr.Image(label="Shyiramo icyapa") img_out = gr.Textbox(label="Ibisobanuro") gr.Button("Soma icyapa").click(analyze_sign, img_in, img_out) with gr.Tab("Mock Exam"): exam_q = gr.Textbox(label="Ikibazo", value=generate_exam) user_a = gr.Textbox(label="Igisubizo cyawe") exam_result = gr.Textbox(label="Igisubizo") gr.Button("Ohereza igisubizo").click(check_answer, user_a, exam_result) gr.Button("Ikindi Kibazo").click(lambda: generate_exam(), None, exam_q) with gr.Tab("Dataset Templates"): gr.JSON(example_json_dataset, label="JSON Dataset Example") gr.Textbox(example_csv_dataset, label="CSV Dataset Example") with gr.Tab("Fine-tuning Code"): gr.Code(fine_tune_code, language="python", label="Fine-tuning (LoRA)") return demo # Run the full app app = build_ui() if __name__ == "__main__": app.launch()
c063def
verified

VIATEUR-AI commited on

gradio==4.16.0 transformers==4.36.2 torch Pillow accelerate safetensors numpy
d049a52
verified

VIATEUR-AI commited on

gradio==4.36.1 transformers==4.41.2 torch accelerate Pillow datasets sentencepiece protobuf huggingface_hub numpy
16672d1
verified

VIATEUR-AI commited on

# Full AI for Road Traffic Laws (Amategeko y'umuhanda) # Includes: # 1. Chatbot # 2. Photo recognition for road signs # 3. Mock exam system # 4. Dataset templates # 5. Fine‑tuning template (LoRA) # 6. Full Gradio UI import gradio as gr from transformers import pipeline from transformers import AutoModelForCausalLM, AutoTokenizer from PIL import Image import random import json # -------------------------- # 1. TEXT MODEL (CHATBOT) # -------------------------- text_model = pipeline( "text-generation", model="mistralai/Mistral-7B-Instruct", device_map="auto", max_new_tokens=300 ) def ask_ai(question): prompt = f"Sobanura neza amategeko y'umuhanda: {question}" answer = text_model(prompt)[0]["generated_text"] return answer # -------------------------- # 2. VISION MODEL (PHOTO RECOGNITION) # -------------------------- vision_model = pipeline( "image-classification", model="google/vit-base-patch16-224" ) def analyze_sign(image): results = vision_model(image) top = results[0] return f"Icyapa kirasa niki: {top['label']} (confidence: {top['score']:.2f})" # -------------------------- # 3. MOCK EXAM SYSTEM # -------------------------- mock_questions = [ {"q": "Iki cyapa gisobanura iki?", "a": "STOP"}, {"q": "Ni ryari wemerewe gutanga priorité?", "a": "Iyo uri mu muhanda nyamukuru."}, {"q": "Ukoresha gute indangururamajwi ('clignotant')?", "a": "Ugaragaza icyerekezo ugiye gufata mbere yo kwavuga."} ] def generate_exam(): item = random.choice(mock_questions) return item["q"] def check_answer(user_answer): correct = None for item in mock_questions: if user_answer.strip().lower() in item["a"].lower(): correct = item["a"] break return "✔️ Igisubizo ni cyo!" if correct else "❌ Siko. Ongera ugerageze." # -------------------------- # 4. DATASET TEMPLATES # -------------------------- example_json_dataset = { "instruction": "Sobanura iki cyapa.", "input": "<IMAGE001>", "output": "Iki cyapa gisobanura STOP." } example_csv_dataset = """question,answer Iki cyapa gisobanura iki?,STOP """question,answer Iki cyapa gisobanura iki?,STOP """ Iki cyapa gisobanura iki?,STOP" # -------------------------- # 5. LoRA FINE-TUNING TEMPLATE # -------------------------- fine_tune_code = """ from datasets import load_dataset from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments, Trainer # Load dataset dataset = load_dataset('json', data_files='amategeko.json') # Load base model model = AutoModelForCausalLM.from_pretrained('mistralai/Mistral-7B-Instruct') tokenizer = AutoTokenizer.from_pretrained('mistralai/Mistral-7B-Instruct') training_args = TrainingArguments( output_dir='model_out', num_train_epochs=3, per_device_train_batch_size=1, save_steps=200, warmup_steps=20, ) trainer = Trainer( model=model, args=training_args, train_dataset=dataset, ) trainer.train() """ # -------------------------- # 6. FULL GRADIO UI # -------------------------- def build_ui(): with gr.Blocks(title="AI Yigisha Amategeko y'Umuhanda") as demo: gr.Markdown(""" # 🚦 AI Yigisha Amategeko y'Umuhanda - Chatbot: Sobanura amategeko yose - Photo analysis: Gusoma ibyapa by'umuhanda - Mock Exam: Ibizamini byo kwitoza """) with gr.Tab("Chatbot"): q = gr.Textbox(label="Andika ikibazo cyawe") a = gr.Textbox(label="Igisubizo") q.submit(ask_ai, q, a) gr.Button("Saba AI").click(ask_ai, q, a) with gr.Tab("Gusoma Icyapa (Photo)"): img_in = gr.Image(label="Shyiramo icyapa") img_out = gr.Textbox(label="Ibisobanuro") gr.Button("Soma icyapa").click(analyze_sign, img_in, img_out) with gr.Tab("Mock Exam"): exam_q = gr.Textbox(label="Ikibazo", value=generate_exam) user_a = gr.Textbox(label="Igisubizo cyawe") exam_result = gr.Textbox(label="Igisubizo") gr.Button("Ohereza igisubizo").click(check_answer, user_a, exam_result) gr.Button("Ikindi Kibazo").click(lambda: generate_exam(), None, exam_q) with gr.Tab("Dataset Templates"): gr.JSON(example_json_dataset, label="JSON Dataset Example") gr.Textbox(example_csv_dataset, label="CSV Dataset Example") with gr.Tab("Fine-tuning Code"): gr.Code(fine_tune_code, language="python", label="Fine-tuning (LoRA)") return demo # Run the full app app = build_ui() if __name__ == "__main__": app.launch()
63ea0e9
verified

VIATEUR-AI commited on

# Full AI for Road Traffic Laws (Amategeko y'umuhanda) # Includes: # 1. Chatbot # 2. Photo recognition for road signs # 3. Mock exam system # 4. Dataset templates # 5. Fine‑tuning template (LoRA) # 6. Full Gradio UI import gradio as gr from transformers import pipeline from transformers import AutoModelForCausalLM, AutoTokenizer from PIL import Image import random import json # -------------------------- # 1. TEXT MODEL (CHATBOT) # -------------------------- text_model = pipeline( "text-generation", model="mistralai/Mistral-7B-Instruct", device_map="auto", max_new_tokens=300 ) def ask_ai(question): prompt = f"Sobanura neza amategeko y'umuhanda: {question}" answer = text_model(prompt)[0]["generated_text"] return answer # -------------------------- # 2. VISION MODEL (PHOTO RECOGNITION) # -------------------------- vision_model = pipeline( "image-classification", model="google/vit-base-patch16-224" ) def analyze_sign(image): results = vision_model(image) top = results[0] return f"Icyapa kirasa niki: {top['label']} (confidence: {top['score']:.2f})" # -------------------------- # 3. MOCK EXAM SYSTEM # -------------------------- mock_questions = [ {"q": "Iki cyapa gisobanura iki?", "a": "STOP"}, {"q": "Ni ryari wemerewe gutanga priorité?", "a": "Iyo uri mu muhanda nyamukuru."}, {"q": "Ukoresha gute indangururamajwi ('clignotant')?", "a": "Ugaragaza icyerekezo ugiye gufata mbere yo kwavuga."} ] def generate_exam(): item = random.choice(mock_questions) return item["q"] def check_answer(user_answer): correct = None for item in mock_questions: if user_answer.strip().lower() in item["a"].lower(): correct = item["a"] break return "✔️ Igisubizo ni cyo!" if correct else "❌ Siko. Ongera ugerageze." # -------------------------- # 4. DATASET TEMPLATES # -------------------------- example_json_dataset = { "instruction": "Sobanura iki cyapa.", "input": "<IMAGE001>", "output": "Iki cyapa gisobanura STOP." } example_csv_dataset = """question,answer Iki cyapa gisobanura iki?,STOP """ Iki cyapa gisobanura iki?,STOP" # -------------------------- # 5. LoRA FINE-TUNING TEMPLATE # -------------------------- fine_tune_code = """ from datasets import load_dataset from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments, Trainer # Load dataset dataset = load_dataset('json', data_files='amategeko.json') # Load base model model = AutoModelForCausalLM.from_pretrained('mistralai/Mistral-7B-Instruct') tokenizer = AutoTokenizer.from_pretrained('mistralai/Mistral-7B-Instruct') training_args = TrainingArguments( output_dir='model_out', num_train_epochs=3, per_device_train_batch_size=1, save_steps=200, warmup_steps=20, ) trainer = Trainer( model=model, args=training_args, train_dataset=dataset, ) trainer.train() """ # -------------------------- # 6. FULL GRADIO UI # -------------------------- def build_ui(): with gr.Blocks(title="AI Yigisha Amategeko y'Umuhanda") as demo: gr.Markdown(""" # 🚦 AI Yigisha Amategeko y'Umuhanda - Chatbot: Sobanura amategeko yose - Photo analysis: Gusoma ibyapa by'umuhanda - Mock Exam: Ibizamini byo kwitoza """) with gr.Tab("Chatbot"): q = gr.Textbox(label="Andika ikibazo cyawe") a = gr.Textbox(label="Igisubizo") q.submit(ask_ai, q, a) gr.Button("Saba AI").click(ask_ai, q, a) with gr.Tab("Gusoma Icyapa (Photo)"): img_in = gr.Image(label="Shyiramo icyapa") img_out = gr.Textbox(label="Ibisobanuro") gr.Button("Soma icyapa").click(analyze_sign, img_in, img_out) with gr.Tab("Mock Exam"): exam_q = gr.Textbox(label="Ikibazo", value=generate_exam) user_a = gr.Textbox(label="Igisubizo cyawe") exam_result = gr.Textbox(label="Igisubizo") gr.Button("Ohereza igisubizo").click(check_answer, user_a, exam_result) gr.Button("Ikindi Kibazo").click(lambda: generate_exam(), None, exam_q) with gr.Tab("Dataset Templates"): gr.JSON(example_json_dataset, label="JSON Dataset Example") gr.Textbox(example_csv_dataset, label="CSV Dataset Example") with gr.Tab("Fine-tuning Code"): gr.Code(fine_tune_code, language="python", label="Fine-tuning (LoRA)") return demo # Run the full app app = build_ui() if __name__ == "__main__": app.launch()
1eb5c99
verified

VIATEUR-AI commited on

gradio transformers torch accelerate Pillow datasets sentencepiece protobuf huggingface_hub numpy
528fef8
verified

VIATEUR-AI commited on

# Full AI for Road Traffic Laws (Amategeko y'umuhanda) # Includes: # 1. Chatbot # 2. Photo recognition for road signs # 3. Mock exam system # 4. Dataset templates # 5. Fine‑tuning template (LoRA) # 6. Full Gradio UI import gradio as gr from transformers import pipeline from transformers import AutoModelForCausalLM, AutoTokenizer from PIL import Image import random import json # -------------------------- # 1. TEXT MODEL (CHATBOT) # -------------------------- text_model = pipeline( "text-generation", model="mistralai/Mistral-7B-Instruct", device_map="auto", max_new_tokens=300 ) def ask_ai(question): prompt = f"Sobanura neza amategeko y'umuhanda: {question}" answer = text_model(prompt)[0]["generated_text"] return answer # -------------------------- # 2. VISION MODEL (PHOTO RECOGNITION) # -------------------------- vision_model = pipeline( "image-classification", model="google/vit-base-patch16-224" ) def analyze_sign(image): results = vision_model(image) top = results[0] return f"Icyapa kirasa niki: {top['label']} (confidence: {top['score']:.2f})" # -------------------------- # 3. MOCK EXAM SYSTEM # -------------------------- mock_questions = [ {"q": "Iki cyapa gisobanura iki?", "a": "STOP"}, {"q": "Ni ryari wemerewe gutanga priorité?", "a": "Iyo uri mu muhanda nyamukuru."}, {"q": "Ukoresha gute indangururamajwi ('clignotant')?", "a": "Ugaragaza icyerekezo ugiye gufata mbere yo kwavuga."} ] def generate_exam(): item = random.choice(mock_questions) return item["q"] def check_answer(user_answer): correct = None for item in mock_questions: if user_answer.strip().lower() in item["a"].lower(): correct = item["a"] break return "✔️ Igisubizo ni cyo!" if correct else "❌ Siko. Ongera ugerageze." # -------------------------- # 4. DATASET TEMPLATES # -------------------------- example_json_dataset = { "instruction": "Sobanura iki cyapa.", "input": "<IMAGE001>", "output": "Iki cyapa gisobanura STOP." } example_csv_dataset = "question,answer Iki cyapa gisobanura iki?,STOP" # -------------------------- # 5. LoRA FINE-TUNING TEMPLATE # -------------------------- fine_tune_code = """ from datasets import load_dataset from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments, Trainer # Load dataset dataset = load_dataset('json', data_files='amategeko.json') # Load base model model = AutoModelForCausalLM.from_pretrained('mistralai/Mistral-7B-Instruct') tokenizer = AutoTokenizer.from_pretrained('mistralai/Mistral-7B-Instruct') training_args = TrainingArguments( output_dir='model_out', num_train_epochs=3, per_device_train_batch_size=1, save_steps=200, warmup_steps=20, ) trainer = Trainer( model=model, args=training_args, train_dataset=dataset, ) trainer.train() """ # -------------------------- # 6. FULL GRADIO UI # -------------------------- def build_ui(): with gr.Blocks(title="AI Yigisha Amategeko y'Umuhanda") as demo: gr.Markdown(""" # 🚦 AI Yigisha Amategeko y'Umuhanda - Chatbot: Sobanura amategeko yose - Photo analysis: Gusoma ibyapa by'umuhanda - Mock Exam: Ibizamini byo kwitoza """) with gr.Tab("Chatbot"): q = gr.Textbox(label="Andika ikibazo cyawe") a = gr.Textbox(label="Igisubizo") q.submit(ask_ai, q, a) gr.Button("Saba AI").click(ask_ai, q, a) with gr.Tab("Gusoma Icyapa (Photo)"): img_in = gr.Image(label="Shyiramo icyapa") img_out = gr.Textbox(label="Ibisobanuro") gr.Button("Soma icyapa").click(analyze_sign, img_in, img_out) with gr.Tab("Mock Exam"): exam_q = gr.Textbox(label="Ikibazo", value=generate_exam) user_a = gr.Textbox(label="Igisubizo cyawe") exam_result = gr.Textbox(label="Igisubizo") gr.Button("Ohereza igisubizo").click(check_answer, user_a, exam_result) gr.Button("Ikindi Kibazo").click(lambda: generate_exam(), None, exam_q) with gr.Tab("Dataset Templates"): gr.JSON(example_json_dataset, label="JSON Dataset Example") gr.Textbox(example_csv_dataset, label="CSV Dataset Example") with gr.Tab("Fine-tuning Code"): gr.Code(fine_tune_code, language="python", label="Fine-tuning (LoRA)") return demo # Run the full app app = build_ui() if __name__ == "__main__": app.launch()
54df764
verified

VIATEUR-AI commited on

initial commit
1b8305d
verified

VIATEUR-AI commited on

Duplicate from gradio-templates/chatbot
65f3134
verified

VIATEUR-AI pngwn HF Staff commited on