Spaces:
Build error
Build error
| import random | |
| import gradio as gr | |
| import openai | |
| import os | |
| import re | |
| openai.api_key = os.environ.get("open_ai_key") | |
| prompt = [''' | |
| You are a ''', | |
| ''' | |
| machine learning developer, trying to debug this code: | |
| StackTrace: | |
| Traceback (most recent call last): | |
| File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/process.py”, line 258, in _bootstrap | |
| self.run() | |
| File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/process.py”, line 93, in run | |
| self._target(*self._args, **self._kwargs) | |
| File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/site-packages/torch/utils/data/dataloader.py”, line 61, in _worker_loop | |
| data_queue.put((idx, samples)) | |
| File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/queues.py”, line 341, in put | |
| File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/reduction.py”, line 51, in dumps | |
| File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/site-packages/torch/multiprocessing/reductions.py”, line 121, in reduce_storage | |
| RuntimeError: unable to open shared memory object </torch_54163_3383444026> in read-write mode at /opt/conda/conda-bld/pytorch_1525909934016/work/aten/src/TH/THAllocator.c:342 | |
| During handling of the above exception, another exception occurred: | |
| Traceback (most recent call last): | |
| File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/util.py”, line 262, in _run_finalizers | |
| File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/util.py”, line 186, in call | |
| File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/shutil.py”, line 476, in rmtree | |
| File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/shutil.py”, line 474, in rmtree | |
| OSError: [Errno 24] Too many open files: ‘/tmp/pymp-sgew4xdn’ | |
| Process Process-1: | |
| Traceback (most recent call last): | |
| File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/process.py”, line 258, in _bootstrap | |
| self.run() | |
| File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/process.py”, line 93, in run | |
| self._target(*self._args, **self._kwargs) | |
| File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/site-packages/torch/utils/data/dataloader.py”, line 61, in _worker_loop | |
| data_queue.put((idx, samples)) | |
| File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/queues.py”, line 341, in put | |
| File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/reduction.py”, line 51, in dumps | |
| File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/site-packages/torch/multiprocessing/reductions.py”, line 121, in reduce_storage | |
| RuntimeError: unable to open shared memory object </torch_54163_3383444026> in read-write mode at /opt/conda/conda-bld/pytorch_1525909934016/work/aten/src/TH/THAllocator.c:342 | |
| Traceback (most recent call last): | |
| File “/home/nlpgpu3/LinoHong/FakeNewsByTitle/main.py”, line 25, in | |
| for mini_batch in trainloader : | |
| File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/site-packages/torch/utils/data/dataloader.py”, line 280, in next | |
| idx, batch = self._get_batch() | |
| File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/site-packages/torch/utils/data/dataloader.py”, line 259, in _get_batch | |
| return self.data_queue.get() | |
| File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/queues.py”, line 335, in get | |
| res = self._reader.recv_bytes() | |
| File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/connection.py”, line 216, in recv_bytes | |
| buf = self._recv_bytes(maxlength) | |
| File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/connection.py”, line 407, in _recv_bytes | |
| buf = self._recv(4) | |
| File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/connection.py”, line 379, in _recv | |
| chunk = read(handle, remaining) | |
| File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/site-packages/torch/utils/data/dataloader.py”, line 178, in handler | |
| _error_if_any_worker_fails() | |
| RuntimeError: DataLoader worker (pid 54163) exited unexpectedly with exit code 1. | |
| Process finished with exit code 1 | |
| Question: Any idea how I can solve this problem? | |
| Are follow up questions needed here: Yes | |
| Follow up: Does your code run with less num_workers or num_workers=0? | |
| Intermediate Answer: It worked when I set num_workers equals to 0, but doesn’t work greater or equal to 1 | |
| Follow up: Could you try to increase the shared memory and try setting num_workers>0 again? | |
| Intermediate Answer: It worked! Can you explain what happened here? | |
| So the final answer is: The error usually means that your system doesn’t provide enough shared memory for multiple workers (used via num_workers>0). Check the shared memory limitation of your system and try to increase it. | |
| StackTrace: | |
| Traceback (most recent call last): | |
| File "main.py", line 39, in <module> | |
| request = create_request(page) | |
| File "main.py", line 15, in create_request | |
| url = base_url + data | |
| TypeError: can only concatenate str (not "bytes") to str | |
| Question: How do I fix this? | |
| Are follow up questions needed here: Yes | |
| Follow up: Could you try to decode the data before passing it to the url? | |
| Intermediate Answer: Yes, it made the data a string and worked! | |
| So the final answer is: You can try to decode the data before passing it to the url like this: | |
| data = urllib.parse.unquote(data) | |
| StackTrace: ''', | |
| ''' | |
| Question: ''', | |
| ''' | |
| Are follow up questions needed here:''',] | |
| def extract_answer(generated): | |
| if '\n' not in generated: | |
| last_line = generated | |
| else: | |
| last_line = generated.split('\n')[-1] | |
| if ':' not in last_line: | |
| after_colon = last_line | |
| else: | |
| after_colon = generated.split(':')[-1] | |
| if ' ' == after_colon[0]: | |
| after_colon = after_colon[1:] | |
| if '.' == after_colon[-1]: | |
| after_colon = after_colon[:-1] | |
| return after_colon | |
| def extract_question(generated): | |
| if '\n' not in generated: | |
| last_line = generated | |
| else: | |
| last_line = generated.split('\n')[-1] | |
| if 'Follow up:' not in last_line: | |
| print('we probably should never get here...' + generated) | |
| if ':' not in last_line: | |
| after_colon = last_line | |
| else: | |
| after_colon = generated.split(':')[-1] | |
| if ' ' == after_colon[0]: | |
| after_colon = after_colon[1:] | |
| if '?' != after_colon[-1]: | |
| print('we probably should never get here...' + generated) | |
| return after_colon | |
| def get_last_line(generated): | |
| if '\n' not in generated: | |
| last_line = generated | |
| else: | |
| last_line = generated.split('\n')[-1] | |
| return last_line | |
| def greenify(input): | |
| return "\x1b[102m" + input + "\x1b[0m" | |
| def yellowfy(input): | |
| return "\x1b[106m" + input + "\x1b[0m" | |
| def call_gpt(cur_prompt, stop): | |
| ans = openai.Completion.create( | |
| model="text-davinci-002", | |
| max_tokens=256, | |
| stop=stop, | |
| prompt=cur_prompt, | |
| temperature=0.7, | |
| top_p=1, | |
| frequency_penalty=0, | |
| presence_penalty=0 | |
| ) | |
| returned = ans['choices'][0]['text'] | |
| print( greenify(returned), end='') | |
| return returned | |
| def initial_query_builder(language, code, question, intermediate = "\nIntermediate Answer:", followup = "\nFollow up:", finalans= '\nSo the final answer is:'): | |
| cur_prompt = prompt[0] + language + prompt[1] + code + prompt[2] + question + prompt[3] | |
| # print("prompt: ", cur_prompt, end ='') | |
| ## check if follow up in the query, if not, make sure it contains the final answer. otherwise re-run until at least one of the 2 is in the response. break after 3 attempts. | |
| attempts = 0 | |
| ret_text = '' | |
| while followup not in ret_text and finalans not in ret_text: | |
| attempts +=1 | |
| ret_text = call_gpt(cur_prompt, intermediate) | |
| print(str(attempts) + " ret_text:", ret_text) | |
| if attempts == 3: | |
| break | |
| if "final answer is" in ret_text: | |
| updated_prompt = cur_prompt + re.findall(r".*?(?=is:)", ret_text)[0] + " is: Let's think step-by-step. " | |
| ret_text = call_gpt(updated_prompt, intermediate) | |
| return ret_text | |
| def subsequent_query_builder(curr_prompt, external_answer, intermediate = "\nIntermediate Answer:", followup = "\nFollow up:", finalans= '\nSo the final answer is:'): | |
| print("curr_prompt: ", curr_prompt) | |
| curr_prompt += intermediate + ' ' + external_answer + '.' | |
| ## check if follow up in the query, if not, make sure it contains the final answer. otherwise re-run until at least one of the 2 is in the response. break after 3 attempts. | |
| attempts = 0 | |
| ret_text = '' | |
| while followup not in ret_text and finalans not in ret_text: | |
| attempts +=1 | |
| ret_text = call_gpt(curr_prompt, intermediate) | |
| print("subsequent query " + str(attempts) + " ret_text:", ret_text) | |
| if attempts == 3: | |
| break | |
| print("ret_text: ", ret_text) | |
| if "final answer is" in ret_text: | |
| updated_prompt = curr_prompt + re.findall(r".*?(?=is:)", ret_text)[0] + " is: Let's think step-by-step. " | |
| # print("updated_prompt: ", updated_prompt) | |
| ret_text = call_gpt(updated_prompt, intermediate) | |
| return ret_text | |
| """subsequent query builder: | |
| the way to rebuild the prompt for each subsequent call: | |
| 1. every user response is 'intermediate answer' | |
| 2. until you hit 'so the final answer is: ' you're good | |
| 3. | |
| """ | |
| def prompt_builder(history, intermediate = "\nIntermediate Answer:", followup = "\nFollow up:", finalans= '\nSo the final answer is:'): | |
| #set language | |
| language = history[1][0] | |
| #set stack trace | |
| stacktrace = history[0][0] | |
| #set question (hardcoded) | |
| question = "Any idea how I can solve this problem?" | |
| # initial prompt | |
| curr_prompt = prompt[0] + language + prompt[1] + stacktrace + prompt[2] + question + prompt[3] | |
| #set subsequent conversation thread | |
| if len(history) >= 2: #subsequent conversations have occurred | |
| curr_prompt += history[1][1] ## get the first response to the stacktrace prompt | |
| for conversation in history[2:]: | |
| #grab intermediate answer | |
| curr_prompt += intermediate + ' ' + conversation[0] + '.' | |
| #grab the follow up | |
| curr_prompt += conversation[1] | |
| return curr_prompt | |
| def chat(message, history): | |
| history = history or [] | |
| print(len(history)) | |
| if len(history) == 0: ## just the stacktrace | |
| response = "which language is this in? (python, java, c++, kotlin, etc.)" | |
| elif len(history) == 1: ## stacktrace + just entered the language | |
| # get stacktrace | |
| stacktrace = history[0][0] | |
| # get language | |
| language = message | |
| # set question (hardcoded for v1) | |
| question = "Any idea how I can solve this problem?" | |
| response = initial_query_builder(language, stacktrace, question) | |
| else: # subsequent prompts | |
| # get stacktrace | |
| stacktrace = history[0][0] | |
| # get language | |
| language = history[1][0] | |
| # set question (hardcoded for v1) | |
| question = "Any idea how I can solve this problem?" | |
| curr_prompt = prompt_builder(history) | |
| response = subsequent_query_builder(curr_prompt, message) | |
| # response = query_builder(language, stacktrace, question) | |
| print("response: ", response) | |
| history.append((message, response)) | |
| return history, history | |
| def clear(arg): | |
| return "Enter your response - feel free to elaborate further, ask questions, etc." | |
| with gr.Blocks() as demo: | |
| user_state=gr.State([]) | |
| gr.Markdown("""# StackTrace QA Bot""") | |
| with gr.Row(): | |
| with gr.Column(): | |
| inp = gr.Textbox(placeholder="enter your stacktrace here") | |
| btn = gr.Button("Enter message") | |
| output = gr.Chatbot().style(color_map=("green", "pink")) | |
| # allow_flagging="never", | |
| inp.submit(chat, [inp, user_state], [output, user_state]) | |
| inp.submit(clear, inp, inp) | |
| btn.click(chat, [inp, user_state], [output, user_state]) | |
| btn.click(clear, inp, inp) | |
| gr.Markdown("""### need help? got feedback? have thoughts? etc. ➜ Join the [Discord](https://discord.gg/KvG3azf39U)""") | |
| gr.Examples(examples=['''PYTORCH: --------------------------------------------------------------------------- | |
| RuntimeError Traceback (most recent call last) | |
| /var/folders/49/9g9lxm9d3f3br8zlg2l2fmz80000gn/T/ipykernel_1349/2634282627.py in <module> | |
| ----> 1 torch.onnx.export(model, x, "output.onnx") | |
| /opt/anaconda3/lib/python3.9/site-packages/torch/onnx/utils.py in export(model, args, f, export_params, verbose, training, input_names, output_names, operator_export_type, opset_version, do_constant_folding, dynamic_axes, keep_initializers_as_inputs, custom_opsets, export_modules_as_functions) | |
| 502 """ | |
| 503 | |
| --> 504 _export( | |
| 505 model, | |
| 506 args, | |
| /opt/anaconda3/lib/python3.9/site-packages/torch/onnx/utils.py in _export(model, args, f, export_params, verbose, training, input_names, output_names, operator_export_type, export_type, opset_version, do_constant_folding, dynamic_axes, keep_initializers_as_inputs, fixed_batch_size, custom_opsets, add_node_names, onnx_shape_inference, export_modules_as_functions) | |
| 1527 _validate_dynamic_axes(dynamic_axes, model, input_names, output_names) | |
| 1528 | |
| -> 1529 graph, params_dict, torch_out = _model_to_graph( | |
| 1530 model, | |
| 1531 args, | |
| /opt/anaconda3/lib/python3.9/site-packages/torch/onnx/utils.py in _model_to_graph(model, args, verbose, input_names, output_names, operator_export_type, do_constant_folding, _disable_torch_constant_prop, fixed_batch_size, training, dynamic_axes) | |
| 1113 | |
| 1114 try: | |
| -> 1115 graph = _optimize_graph( | |
| 1116 graph, | |
| 1117 operator_export_type, | |
| /opt/anaconda3/lib/python3.9/site-packages/torch/onnx/utils.py in _optimize_graph(graph, operator_export_type, _disable_torch_constant_prop, fixed_batch_size, params_dict, dynamic_axes, input_names, module) | |
| 580 _C._jit_pass_lint(graph) | |
| 581 _C._jit_pass_onnx_autograd_function_process(graph) | |
| --> 582 C._jit_pass_lower_all_tuples(graph) | |
| 583 | |
| 584 # we now record some ops like ones/zeros | |
| RuntimeError: outerNode->outputs().size() == node->inputs().size() INTERNAL ASSERT FAILED at "/Users/runner/work/pytorch/pytorch/pytorch/torch/csrc/jit/passes/dead_code_elimination.cpp":140, please report a bug to PyTorch.''', '''RUST: error[E0382]: use of moved value: `primes` | |
| --> src/main.rs:9:31 | |
| | | |
| 9 | if vectorIsPrime(num, primes) { | |
| | ^^^^^^ value moved here, in previous iteration of loop | |
| | | |
| = note: move occurs because `primes` has type `std::vec::Vec<u64>`, which does not implement the `Copy` trait | |
| ''', "REACT: Uncaught Error: Invariant Violation: Element type is invalid: expected a string (for built-in components) or a class/function (for composite components) but got: object."],inputs=inp, cache_examples=False,) | |
| if __name__ == "__main__": | |
| demo.launch(debug=True) |