diff --git "a/1564.jsonl" "b/1564.jsonl" new file mode 100644--- /dev/null +++ "b/1564.jsonl" @@ -0,0 +1,670 @@ +{"seq_id":"367216138","text":"import uuid\n\nfrom musicscore.musictree.midi import G, D, A, E, C, MidiNote, Midi, B, F, midi_to_frequency, frequency_to_midi\nfrom musicscore.musictree.treeclef import TreeClef, TREBLE_CLEF, ALTO_CLEF, BASS_CLEF\nfrom musicscore.musicxml.types.complextypes.midiinstrument import ComplexTypeMidiInstrument\nfrom musicscore.musicxml.types.complextypes.scorepart import PartName, PartAbbreviation\n\n\nclass TreeInstrument(ComplexTypeMidiInstrument):\n _TAG = 'midi-instrument'\n\n def __init__(self, name, number_of_staves=None, abbreviation=None, number=None, *args, **kwargs):\n super().__init__(tag=self._TAG, id_='inst' + str(uuid.uuid4()), *args, **kwargs)\n self._part_name = PartName(name=name)\n self._part_abbreviation = PartAbbreviation()\n self._number_of_staves = None\n self._standard_clefs = None\n self._number = None\n\n self.number_of_staves = number_of_staves\n self.abbreviation = abbreviation\n self.number = number\n\n # public properties\n @property\n def abbreviation(self):\n return self._part_abbreviation.value\n\n @abbreviation.setter\n def abbreviation(self, val):\n self._part_abbreviation.value = val\n\n @property\n def name(self):\n return self._part_name.name\n\n @name.setter\n def name(self, val):\n self._part_name.name = val\n\n @property\n def number(self):\n return self._number\n\n @number.setter\n def number(self, val):\n if self._number is not None:\n raise AttributeError('number can only be set once')\n\n if val is not None and not isinstance(val, int):\n raise TypeError('number.value must be of type int not{}'.format(type(val)))\n self._number = val\n\n if self._number is not None:\n self.name += ' ' + str(self._number)\n self.abbreviation += ' ' + str(self._number)\n\n @property\n def part_name(self):\n return self._part_name\n\n @property\n def part_abbreviation(self):\n return self._part_abbreviation\n\n @property\n def standard_clefs(self):\n return self._standard_clefs\n\n @standard_clefs.setter\n def standard_clefs(self, vals):\n if not hasattr(vals, '__iter__'):\n vals = [vals]\n for index, val in enumerate(vals):\n if not isinstance(val, TreeClef):\n raise TypeError('standard_clef.value must be of type TreeClef not{}'.format(type(val)))\n vals[index] = val.__deepcopy__()\n\n if len(vals) > 1:\n for index, val in enumerate(vals):\n val.number = index + 1\n self._standard_clefs = vals\n\n @property\n def number_of_staves(self):\n return self._number_of_staves\n\n @number_of_staves.setter\n def number_of_staves(self, val):\n if val is not None and not isinstance(val, int):\n raise TypeError('number_of_staves.value must be of type int not{}'.format(type(val)))\n self._number_of_staves = val\n\n\n# strings\nclass String(object):\n def __init__(self, number, tuning, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._tuning = None\n self.number = number\n self.tuning = tuning\n\n @property\n def tuning(self):\n return self._tuning\n\n @tuning.setter\n def tuning(self, val):\n if not isinstance(val, Midi):\n raise TypeError('tuning.value must be of type Midi not{}'.format(type(val)))\n self._tuning = val\n\n def get_step(self, number):\n step = self.tuning.__deepcopy__()\n step.transpose(number)\n return step\n\n\nclass StringInstrument(TreeInstrument):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.strings = {}\n\n\nclass Violin(StringInstrument):\n def __init__(self, number=None, *args, **kwargs):\n super().__init__(name='Violin', abbreviation='vln.', number=number, *args, **kwargs)\n self.strings = {4: String(4, G(3)),\n 3: String(3, D(4)),\n 2: String(2, A(4)),\n 1: String(1, E(5))\n }\n self.standard_clefs = TREBLE_CLEF\n\n\nclass Viola(StringInstrument):\n def __init__(self, number=None, *args, **kwargs):\n super().__init__(name='Viola', abbreviation='vla.', number=number, *args, **kwargs)\n self.strings = {4: String(4, C(3)),\n 3: String(3, G(3)),\n 2: String(2, D(4)),\n 1: String(1, A(4))\n }\n self.standard_clefs = ALTO_CLEF\n\n\nclass Cello(StringInstrument):\n def __init__(self, number=None, *args, **kwargs):\n super().__init__(name='Cello', abbreviation='vc.', number=number, *args, **kwargs)\n self.strings = {4: String(4, C(2)),\n 3: String(3, G(2)),\n 2: String(2, D(3)),\n 1: String(1, A(3))\n }\n self.standard_clefs = BASS_CLEF\n\n\nclass ViolaDamore(StringInstrument):\n def __init__(self, number=None, *args, **kwargs):\n super().__init__(name='Viola d\\'amore\\n430', abbreviation='vla.', number=number, *args, **kwargs)\n # skordatura\n self.strings = {1: String(1, B(4)),\n 2: String(2, B(4)),\n 3: String(3, F(4, '#')),\n 4: String(4, C(4)),\n 5: String(5, G(3)),\n 6: String(6, D(3)),\n 7: String(7, A(2))\n }\n\n\n# keyboards\nclass KeyboardInstrument(TreeInstrument):\n def __init__(self, number_of_staves=2, *args, **kwargs):\n super().__init__(number_of_staves=number_of_staves, *args, **kwargs)\n self.standard_clefs = [TREBLE_CLEF, BASS_CLEF]\n\n\nclass Accordion(KeyboardInstrument):\n def __init__(self, number=None, *args, **kwargs):\n super().__init__(name='Accordion', abbreviation='acc.', number=number, *args, **kwargs)\n\n\nclass Piano(KeyboardInstrument):\n def __init__(self, *args, **kwargs):\n super().__init__(name='Piano', abbreviation='pno.', *args, **kwargs)\n\n\n# brass\nclass NaturalInstrument(TreeInstrument):\n\n def __init__(self, key, a4=440, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._a4 = None\n self._key = None\n self._transposition = None\n self.a4 = a4\n self.key = key\n\n @property\n def a4(self):\n return self._a4\n\n @a4.setter\n def a4(self, val):\n try:\n float(val)\n except AttributeError:\n raise TypeError()\n self._a4 = val\n\n @property\n def key(self):\n return self._key\n\n @key.setter\n def key(self, val):\n if not isinstance(val, MidiNote):\n raise TypeError('key.value must be of type MidiNote not{}'.format(type(val)))\n self._key = val\n\n @property\n def transposition(self):\n return self._transposition\n\n @transposition.setter\n def transposition(self, val):\n self._transposition = val\n\n def get_fundamental_frequency(self):\n return midi_to_frequency(self.key, self.a4)\n\n def get_partial_midi_value(self, number):\n if not isinstance(number, int):\n return TypeError()\n if number <= 0:\n return ValueError()\n\n return frequency_to_midi(self.get_fundamental_frequency() * number, self.a4)\n\n\nclass Horn(TreeInstrument):\n def __init__(self, number=None, *args, **kwargs):\n super().__init__(name='Horn', abbreviation='hrn.', number=number, *args, **kwargs)\n\n\nclass NaturalHorn(NaturalInstrument):\n def __init__(self, key=E(1, 'b'), a4=430, *args, **kwargs):\n super().__init__(name='Horn in E♭\\n430', abbreviation='hrn.', key=key, a4=a4, *args, **kwargs)\n self.transposition = 9\n\n\n# percussion\nclass Percussion(TreeInstrument):\n def __init__(self, number=None, *args, **kwargs):\n super().__init__(name='Percussion', abbreviation='perc.', number=number, *args, **kwargs)\n self.tamtam = TamTam()\n self.cymbal_1 = Cymbal(1)\n self.cymbal_2 = Cymbal(2)\n self.cymbal_3 = Cymbal(3)\n self.cymbal_4 = Cymbal(4)\n self.cymbal_5 = Cymbal(5)\n\n\nclass TamTam(TreeInstrument):\n def __init__(self, number=None, *args, **kwargs):\n super().__init__(name='Tam-tam', abbreviation='Tam-t.', number=number, *args, **kwargs)\n self.midi = B(3)\n self.midi.notehead = 'x'\n\n\nclass Cymbal(TreeInstrument):\n midis = {1: E(4), 2: G(4), 3: B(4), 4: D(5), 5: F(5)}\n\n def __init__(self, number=1, *args, **kwargs):\n super().__init__(name='cymbal-' + str(number), abbreviation='cym-' + str(number), number=number, *args,\n **kwargs)\n\n self.midi = self.midis[self.number]\n self.midi.notehead = 'x'\n\n\n# voice\nclass Voice(TreeInstrument):\n def __init__(self, *args, **kwargs):\n super().__init__(name='voice', abbreviation='v.', *args, **kwargs)\n","sub_path":"musicscore/musictree/treeinstruments.py","file_name":"treeinstruments.py","file_ext":"py","file_size_in_byte":9009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"102190419","text":"import math\n\ndef smallest_subarr(s, arr):\n window_sum = 0\n min_length = math.inf\n window_start = 0\n\n for window_end in range (0, len(arr)):\n window_sum += arr[window_end]\n while window_sum >= s:\n min_length = min(min_length, window_end - window_start + 1)\n window_sum -= arr[window_start]\n window_start += 1\n if min_length == math.inf:\n return 0\n return min_length\n\n\ndef main():\n print(\"smallest subarray length: \" + str(smallest_subarr(7,[2,3,5,1,6,7])))\n print(\"smallest subarray length: \" + str(smallest_subarr(2,[2,3,5,6,4,7])))\n\n\nmain()\n\n\n\n","sub_path":"solutions/smallest_subarr.py","file_name":"smallest_subarr.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"148680434","text":"\"\"\"\nUsage:\npython test-anomaly-detection-embedding.py \\\n --scope tongtai \\\n --name test-ad-tongtai-embedding \\\n --step-size 6 \\\n --hidden-size 16 \\\n --embedding-size 100 \\\n --symbol-size 132 \\\n --batch-size 128 \\\n --layer-depth 1 \\\n --dropout-rate 0.1 \\\n --use-column 9 # level_x \\\n --src ../build/models/phm2012/ad-phm-embedding/model \\\n --test-src ../build/data/tongtai/labeled/2017-08-21-0.5mm-working.csv \\\n --batch-step 250\n\"\"\"\nimport sys\nimport os\nimport time\nimport math\nimport numpy as np\nimport pandas as pd\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom utils.utils import log, get_args, prepare_directory, get_batch_count\nfrom utils.preprocess import unison_shuffled_copies\nfrom models.EncDecEmbedding import Model\n\nmpl.rcParams['agg.path.chunksize'] = 10000\n\n# to be caculated\ndataset = {\n 'ordered': [],\n}\n\nargs = get_args()\n\ndef read_dataset():\n global dataset\n\n table = np.genfromtxt(\n args.test_src,\n delimiter=',',\n skip_header=1,\n usecols=(args.use_column,)\n )\n for sample_from_idx in range(0, table.shape[0] - args.step_size + 1):\n table_sample = table[sample_from_idx: sample_from_idx + args.step_size]\n dataset['ordered'].append(table_sample)\n if sample_from_idx % 100000 == 0:\n log('%2.0f%% loaded' % ((float(sample_from_idx) / (table.shape[0] - args.step_size + 1)) * 100))\n\n dataset['ordered'] = np.array(dataset['ordered'])\n\n # cut last batch\n for name in ['ordered']:\n d = dataset[name]\n length = len(d)\n r = length % args.batch_size\n dataset[name] = dataset[name][0:-r]\n\ndef visualize(xs, ys):\n dest_dir = prepare_directory(os.path.join(\n '../build/plots',\n args.scope,\n args.name,\n os.path.basename(args.test_src).rsplit('.', 1)[0]\n ))\n\n plt.ylim(args.ylim)\n plt.ylabel('Accuracy')\n plt.xlabel('Index')\n title = 'Test Accuracy'\n\n if args.batch_step < 200:\n plt.scatter(xs, ys, color='purple', s=0.1)\n else:\n plt.plot(xs, ys, color='purple', linestyle='--', linewidth=1)\n\n plt.title(title)\n plt.savefig(\n os.path.join(\n dest_dir,\n 'test-accuracy-batch_step-{0}.png'.format(args.batch_step)\n ),\n dpi=400,\n format='png'\n )\n plt.clf()\n\nif __name__ == '__main__':\n read_dataset()\n model = Model(\n args.step_size,\n args.hidden_size,\n args.embedding_size,\n args.symbol_size,\n args.layer_depth,\n args.batch_size,\n args.dropout_rate\n )\n\n # start session\n sess = tf.InteractiveSession(\n # config=tf.ConfigProto(intra_op_parallelism_threads=N_THREADS)\n )\n\n # prepare model import or export\n importSaver = tf.train.Saver()\n importSaver.restore(sess, args.src)\n\n batch_count, data_size = get_batch_count(dataset['ordered'], args.batch_size)\n plot_xs = []\n plot_ys = []\n\n start_time = time.time()\n for batch_idx in range(0, batch_count, args.batch_step):\n begin_idx = batch_idx * args.batch_size\n end_idx = min(begin_idx + args.batch_size, data_size)\n ground_truth = dataset['ordered'][begin_idx: end_idx]\n predictions = model.prediction.eval(\n session=sess,\n feed_dict={\n model.xs: ground_truth,\n model.ys: ground_truth,\n model.feed_previous: True,\n }\n )\n errors = np.equal(predictions, ground_truth)\n accuracy = np.mean(errors.astype(int))\n plot_xs.append(end_idx)\n plot_ys.append(accuracy)\n\n if (batch_idx + 1) % 1000 == 0:\n elapsed_time = time.time() - start_time\n print('Batch\\t%d, Elapsed time\\t%.1fs' % (\n batch_idx + 1, elapsed_time\n ))\n\n visualize(plot_xs, plot_ys)\n","sub_path":"src/test-anomaly-detection-embedding.py","file_name":"test-anomaly-detection-embedding.py","file_ext":"py","file_size_in_byte":3919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"247053602","text":"# -*- coding: utf-8 -*-\nimport logging\nfrom datetime import datetime\nfrom enum import Enum\nfrom typing import Optional\n\nfrom pydantic import BaseModel, Field, HttpUrl\n\n__author__ = 'lundberg'\n\nlogger = logging.getLogger(__name__)\n\n\nclass MessageType(str, Enum):\n SUBSCRIPTION_CONFIRMATION = 'SubscriptionConfirmation'\n UNSUBSCRIBE_CONFIRMATION = 'UnsubscribeConfirmation'\n NOTIFICATION = 'Notification'\n\n\nclass SNSMessage(BaseModel):\n class Config:\n arbitrary_types_allowed = True\n\n type: MessageType = Field(alias='Type')\n timestamp: datetime = Field(alias='Timestamp')\n topicArn: str = Field(alias='TopicArn')\n message_id: str = Field(alias='MessageId')\n subject: Optional[str] = Field(alias='Subject')\n message: str = Field(alias='Message')\n signature: str = Field(alias='Signature')\n signature_version: str = Field(alias='SignatureVersion')\n signing_cert_url: HttpUrl = Field(alias='SigningCertURL')\n # For type SUBSCRIPTION_CONFIRMATION\n subscribe_url: Optional[HttpUrl] = Field(alias='SubscribeURL')\n token: Optional[str] = Field(alias='Token')\n # For type NOTIFICATION\n unsubscribe_url: Optional[HttpUrl] = Field(alias='UnsubscribeURL')\n","sub_path":"src/sns_monitor/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"355506322","text":"import os\nfrom werkzeug.utils import secure_filename\nfrom app_logger import logger\n\nos.putenv('LANG', 'en_US.UTF-8')\nos.putenv('LC_ALL', 'en_US.UTF-8')\n\nclass UploadFile:\n \"\"\"\n This class shall be used to upload the file provided for Prediction or Re-Training.\n \"\"\"\n\n def __init__(self,file):\n self.file_object = open(\"logs/UploadFile_Log.txt\", 'a+')\n self.logger_object = logger.App_Logger()\n\n def upload_file(self,file):\n\n try:\n\n if file.filename == '':\n self.logger_object.log(self.file_object, 'Upload File - No file selected for uploading')\n return \"No file selected for uploading\"\n if file:\n filename = secure_filename(file.filename)\n file.save(os.path.join(\"Input_Files\", 'Uploaded_file.csv'))\n \n self.logger_object.log(self.file_object, 'Upload File - File uploaded at the desired location')\n return \"File successfully uploaded at the desired location\"\n else:\n self.logger_object.log(self.file_object, 'Upload File - File not found!')\n return \"File not found\"\n\n except Exception as e:\n return \"Error during file upload!Please check logs for details.\"\n","sub_path":"upload_file/uploadFile.py","file_name":"uploadFile.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"531435453","text":"#!/usr/bin/env python3\n\nimport math\nimport shutil\nimport tempfile\nimport unittest\nfrom os import path\n\nfrom pytorch_translate.research.unsupervised_morphology.unsupervised_bilingual_morphology import (\n BilingualMorphologyHMMParams,\n BilingualMorphologySegmentor,\n UnsupervisedBilingualMorphology,\n)\n\n\ndef get_two_tmp_files():\n src_txt_content = [\n \"123 124 234 345\",\n \"112 122 123 345\",\n \"123456789\",\n \"123456 456789\",\n ]\n dst_txt_content = [\n \"123 124 234 345\",\n \"112 122 123 345\",\n \"123456789\",\n \"123456 456789\",\n ]\n content1, content2 = \"\\n\".join(src_txt_content), \"\\n\".join(dst_txt_content)\n tmp_dir = tempfile.mkdtemp()\n file1, file2 = path.join(tmp_dir, \"test1.txt\"), path.join(tmp_dir, \"test2.txt\")\n with open(file1, \"w\") as f1:\n f1.write(content1)\n with open(file2, \"w\") as f2:\n f2.write(content2)\n\n return tmp_dir, file1, file2\n\n\nclass TestUnsupervisedBilingualMorphology(unittest.TestCase):\n def test_morph_init(self):\n morph_hmm_model = BilingualMorphologyHMMParams()\n\n tmp_dir, f1, f2 = get_two_tmp_files()\n morph_hmm_model.init_params_from_data(f1, f2)\n assert len(morph_hmm_model.morph_emit_probs) == 200\n assert round(morph_hmm_model.morph_emit_probs[\"1234\"], 3) == round(\n 0.0062799043062200955, 3\n )\n v = 1.0 / 201\n assert morph_hmm_model.translation_probs[\"1234\"][\"1234\"] == v\n assert morph_hmm_model.translation_prob(\"1234\", \"1234\") == v\n assert morph_hmm_model.translation_log_prob(\"1234\", \"1234\") == math.log(v)\n shutil.rmtree(tmp_dir)\n\n def test_zero_out_params(self):\n morph_hmm_model = BilingualMorphologyHMMParams()\n\n tmp_dir, f1, f2 = get_two_tmp_files()\n morph_hmm_model.init_params_from_data(f1, f2)\n for morph in morph_hmm_model.translation_probs.keys():\n assert morph_hmm_model.morph_emit_probs[morph] > 0\n for target_morph in morph_hmm_model.translation_probs.keys():\n assert morph_hmm_model.translation_probs[morph][target_morph] > 0\n\n morph_hmm_model.zero_out_params()\n for morph in morph_hmm_model.morph_emit_probs.keys():\n assert morph_hmm_model.morph_emit_probs[morph] == 0\n for target_morph in morph_hmm_model.translation_probs.keys():\n assert morph_hmm_model.translation_probs[morph][target_morph] == 0\n shutil.rmtree(tmp_dir)\n\n def test_bilingual_segmentation(self):\n morph_hmm_model = BilingualMorphologyHMMParams()\n\n tmp_dir, f1, f2 = get_two_tmp_files()\n morph_hmm_model.init_params_from_data(f1, f2)\n segmentor = BilingualMorphologySegmentor(morph_hmm_model)\n assert segmentor.segment_viterbi(\"1234 1234\") == [0, 2, 4, 7, 9]\n assert segmentor.segment_blingual_viterbi(\"1234 1234\", \"1234 1234\") == [\n 0,\n 4,\n 7,\n 9,\n ]\n shutil.rmtree(tmp_dir)\n\n def test_save_load(self):\n morph_hmm_model = BilingualMorphologyHMMParams()\n\n tmp_dir, f1, f2 = get_two_tmp_files()\n morph_hmm_model.init_params_from_data(f1, f2)\n morph_hmm_model.save(path.join(tmp_dir, \"test.pickle\"))\n loaded_params = BilingualMorphologyHMMParams.load(\n path.join(tmp_dir, \"test.pickle\")\n )\n\n assert morph_hmm_model.morph_emit_probs == loaded_params.morph_emit_probs\n assert morph_hmm_model.smoothing_const == loaded_params.smoothing_const\n assert morph_hmm_model.SMALL_CONST == loaded_params.SMALL_CONST\n assert morph_hmm_model.len_cost_pow == loaded_params.len_cost_pow\n assert morph_hmm_model.max_morph_len == loaded_params.max_morph_len\n assert morph_hmm_model.translation_probs == loaded_params.translation_probs\n shutil.rmtree(tmp_dir)\n\n def test_forward_backward(self):\n tmp_dir, f1, f2 = get_two_tmp_files()\n unsupervised_model = UnsupervisedBilingualMorphology(\n src_file=f1, dst_file=f2, smoothing_const=0.0\n )\n print(unsupervised_model.params.smoothing_const)\n # todo will add stuff here later.\n shutil.rmtree(tmp_dir)\n\n def test_get_morpheme_counts(self):\n tmp_dir, f1, f2 = get_two_tmp_files()\n unsupervised_model = UnsupervisedBilingualMorphology(\n src_file=f1, dst_file=f2, smoothing_const=0.0\n )\n morph_counts_with_null = unsupervised_model.params.get_morpheme_counts(\n \"1234\", take_log=False, include_null=True\n )\n assert len(morph_counts_with_null) == 11\n morph_counts_without_null = unsupervised_model.params.get_morpheme_counts(\n \"1234\", take_log=True, include_null=False\n )\n assert len(morph_counts_without_null) == 10\n assert morph_counts_without_null[\"12\"] == math.log(morph_counts_with_null[\"12\"])\n\n morph_counts_with_repeats = unsupervised_model.params.get_morpheme_counts(\n \"12312\", take_log=False, include_null=True\n )\n assert len(morph_counts_with_repeats) == 13\n assert morph_counts_with_repeats[\"12\"] == 2\n shutil.rmtree(tmp_dir)\n","sub_path":"pytorch_translate/research/test/test_unsupervised_bilingual_morphology.py","file_name":"test_unsupervised_bilingual_morphology.py","file_ext":"py","file_size_in_byte":5209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"353356530","text":"import itertools as itrls\nfrom collections import Counter\nimport more_itertools as mi\n\nfrom aoc import aoc_utils as au\n\ninp = au.obtain_input_for_day('10')\ndebug_path = au.get_debug_file_path('10')\n# %%\njolt_counter = Counter(inp)\nspread = 3\n# au.write_to_debug_file_path(str(sorted(inp)), '10')\n\n# %%\ndevice_joltage = max(inp) + 3\n\n\ndef get_jolt_jump_dict(inp):\n jolt_jump = {}\n running_jolts = 0\n sorted_jolts = sorted(jolt_counter.items(), key=lambda item: item[0])\n for jolt_val, num in sorted_jolts:\n if num != 1:\n return False, {}\n # print(running_jolts, jolt_val)\n jump = jolt_val - running_jolts\n if jump > 3:\n return False, {}\n if jump not in jolt_jump.keys():\n jolt_jump[jump] = 0\n jolt_jump[jump] += 1\n running_jolts = jolt_val\n return True, jolt_jump\n\n\nb, res = get_jolt_jump_dict(inp)\nres[3] += 1 # Account for jolt jump into device\nprint(res[1] * res[3])\n\n#%%\ntest_cases = [\n ('foobar', 1),\n]\nfor in_data, correct in test_cases:\n out_data = RuntimeError(in_data)\n assert out_data == correct, f'{out_data} != {correct}'\n\n#%%\n","sub_path":"aoc/2020_aoc/dec_10.py","file_name":"dec_10.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"271409835","text":"#!/usr/bin/python\n\nimport fnmatch\nimport os\nimport sys\nimport subprocess\nimport datetime\nimport mysql.connector\n\nprint('WinCOR price and promotions processing started at ' + str(datetime.datetime.now()))\n\ntry:\n\tcnx = mysql.connector.connect(user='pimloader', password='lGClNEeyIU5Lss', database='pim_loader')\nexcept Exception as e:\n\tprint('An error ocurred while trying to connect to pim_loader database: ' + str(e))\n\nnew_prices = False;\nnew_promos = False;\n\ntry:\n\tfor fn in os.listdir(os.curdir):\n\t\tif fnmatch.fnmatch(fn, 'RDES*') or fnmatch.fnmatch(fn, 'RDPL*'): # Prices file\n\t\t\tincoming_file = open(fn, 'r')\n\t\t\tcursor = cnx.cursor()\n\t\t\tprice_add = (\"INSERT INTO tmp_wincor_price (op_type, country, branch, backoffice, shop, cod_wincor, promo_allowed, df_price, dp_price, eff_date_df, eff_date_dp) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\")\n\n\t\t\tstore_code = (fn.split('.')[1])\n\t\t\tcountry = store_code.split('_')[0]\n\t\t\tbranch = store_code.split('_')[1]\n\t\t\tbackoffice = store_code.split('_')[2]\n\t\t\tshop = store_code.split('_')[3]\n\n\t\t\tfor line in incoming_file: \n\t\t\t\tline = line.rstrip('\\n')\n\t\t\t\tfields = line.split('|')\n\t\t\t\tif fields[0] != \"\":\n\t\t\t\t\tif fields[6] == '\\r':\n\t\t\t\t\t\tfields[6] = ''\n\t\t\t\t\tprice_data = (fields[0], country, branch, backoffice, shop, fields[1], fields[2], fields[3], fields[4], fields[5], fields[6])\n\t\t\t\t\tcursor.execute(price_add, price_data)\n\t\t\t\n\t\t\tcnx.commit()\n\t\t\tincoming_file.close()\n\n\t\t\tos.rename(fn, './processed/' + fn)\n\t\t\tnew_prices = True\n\t\t\tprint('Maintaining WinCOR articles prices...')\n\t\t\tcursor.callproc('sp_wincor_price_maintainer')\t\t\t\n\t\t\tprint('Price file ' + fn + ' dumped into pim_loader database and moved to processed folder.')\n\n\t\tif fnmatch.fnmatch(fn, 'PDES*') or fnmatch.fnmatch(fn, 'PDPL*'): # Promotions file\n\t\t\tincoming_file = open(fn, 'r')\n\t\t\tcursor = cnx.cursor()\n\t\t\tpromo_add = (\"INSERT INTO tmp_wincor_promo (op_type, id_promo, id_category, id_promo_type, de_promo_type, de_promo, promo_start_date, promo_end_date, \\\n\t\t\t\t\t\t\t\t\t\tid_currency, id_company, id_hfm_shop_code, id_l_country, id_l_branch, id_l_backoffice, id_l_shop, de_shop, id_l_item, \\\n\t\t\t\t\t\t\t\t\t\tdf_dp_type, disc_type, condition_type, condition_value, discount_unit, discount_value, reward_item, disc_msg, \\\n\t\t\t\t\t\t\t\t\t\t multibuy_flag, promo_status, layer, export_timestamp) \\\n\t\t\t\t\t\t\tVALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\")\n\n\n\t\t\tfor line in incoming_file: \n\t\t\t\tline = line.rstrip('\\n')\n\t\t\t\tfields = line.split('|')\n\t\t\t\tif fields[0] == \"M\":\n\t\t\t\t\tpromo_data = (fields[0], fields[1], fields[2], fields[3], fields[4], fields[5], fields[6], fields[7], fields[8],\\\n\t\t\t\t\t\t\t\t fields[9],\tfields[10], fields[11],\tfields[12], fields[13], fields[14], fields[15], fields[16], fields[17], \\\n\t\t\t\t\t\t\t\t fields[18],fields[19], fields[20], fields[21], fields[22], fields[23], fields[24], fields[25], fields[26], \\\n\t\t\t\t\t\t\t\t fields[27], fields[28])\n\t\t\t\t\tcursor.execute(promo_add, promo_data)\n\n\t\t\t\t# Deletions come with 27 fields, while other records come with 29. \n\t\t\t\t# Add the logic here, or delete this comment in case they treat this issue in their side.\n\n\t\t\tcnx.commit()\n\t\t\tincoming_file.close()\n\n\t\t\tos.rename(fn, './processed/' + fn)\n\t\t\tnew_promos = True\n\t\t\tprint('Promotion file ' + fn + ' dumped into pim_loader database and moved to processed folder.')\n\n\t\tif fnmatch.fnmatch(fn, 'BDES*') or fnmatch.fnmatch(fn, 'BDPL*'): # Barcode file\n\t\t\tincoming_file = open(fn, 'r')\n\t\t\tcursor = cnx.cursor()\n\t\t\tbarcode_add = (\"INSERT INTO tmp_wincor_barcodes (op_type, cod_wincor, cod_ean, active) \\\n\t\t\t\t\t\t\tVALUES (%s, %s, %s, %s)\")\n\n\t\t\tfor line in incoming_file: \n\t\t\t\tline = line.rstrip('\\n')\n\t\t\t\tfields = line.split('|')\n\t\t\t\tif fields[0] == \"M\":\n\t\t\t\t\tbarcode_data = (fields[0], fields[1], fields[2], fields[3])\n\t\t\t\t\tcursor.execute(barcode_add, barcode_data)\n\t\t\tcnx.commit()\n\t\t\tincoming_file.close()\n\n\t\t\tprint('Barcode file ' + fn + ' dumped into pim_loader database. ')\n\t\t\tprint('Maintaining barcode list using informacion from file ' + fn + '. Depending on the number of records, this could take few minutes... Started at: ' + str(datetime.datetime.now()))\n\t\t\tcursor.callproc('sp_wincor_barcode_maintainer')\n\n\t\t\tos.rename(fn, './processed/' + fn)\n\t\t\tprint('Barcode file ' + fn + ' moved to processed folder.')\n\n\tif (new_promos or new_prices):\n\t\tprint(\"Price and promotions interface creation started at \" + str(datetime.datetime.now()))\n\t\tcursor.callproc('sp_wincor_price_and_promotion_IF')\n\t\tprint('Price and promotions interface created for incoming files.')\n\n\t\t# Export to Magento\n\t\tcmd_call = ['/bin/exportfromysqltable.sh', '/var/sftp/share/sftp/akeneo/export/channels/ecomerce/export/export_dufry_price_and_promo', 'pim_loader', 'et_price_promo_IF_I4']\n\t\tsubprocess.check_call(cmd_call)\n\t\t# Export to Mercaux\n\t\tcmd_call = ['/bin/exportfromysqltable.sh', '/var/sftp/share/sftp/akeneo/export/channels/selltablets/export/export_dufry_price_and_promo', 'pim_loader', 'et_price_promo_IF_mercaux']\n\t\tsubprocess.check_call(cmd_call)\n\t\tprint('Price and promotions tables exported to e-commerce and selltablets.')\n\n\t\tcursor.callproc('sp_wincor_price_to_hist')\n\t\tcursor.callproc('sp_wincor_promo_to_hist')\n\t\tprint('Information moved to historical tables at ' + str(datetime.datetime.now()))\n\nexcept Exception as e:\n\tprint('An error has ocurred while processing WinCOR prices and promotions: ' + str(e))\n\nfinally:\n\tcursor.callproc('sp_wincor_tmp_tables_truncation')\n\tcursor.close()\n\tcnx.close()\n\tprint('WinCOR price and promotions processing ended at ' + str(datetime.datetime.now()))\n","sub_path":"wincor/WinCOR_promotions_processing(deprecated).py","file_name":"WinCOR_promotions_processing(deprecated).py","file_ext":"py","file_size_in_byte":5566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"62074342","text":"# Built using Python 3.7.4\n\n#%%\n# Load libraries\nimport pandas as pd\nimport pandas_datareader.data as web\nimport numpy as np\nimport matplotlib.pyplot as plt\n#from OSM_blog import OSM_Portfolio_Simulations\n#%matplotlib inline\n\nfrom OSM_Portfolio_Simulations import Port_sim # for class and methods see prior po sts\n\nplt.style.use('ggplot')\n\n# Load data\ndf = pd.read_pickle('port_const.pkl')\ndat = pd.read_pickle('data_port_const.pkl')\n\n# Calculate returns and risk for longer period\nhist_mu = dat['1971':'1991'].mean(axis=0)\nhist_sigma = dat['1971':'1991'].std(axis=0)\n\n# Run simulation based on historical figures\nnp.random.seed(123)\nsim1 = []\n\nfor i in range(1000):\n #np.random.normal(mu, sigma, obs)\n a = np.random.normal(hist_mu[0], hist_sigma[0], 60) + np.random.normal(0, hist_sigma[0], 60)\n b = np.random.normal(hist_mu[1], hist_sigma[1], 60) + np.random.normal(0, hist_sigma[1], 60)\n c = np.random.normal(hist_mu[2], hist_sigma[2], 60) + np.random.normal(0, hist_sigma[2], 60)\n d = np.random.normal(hist_mu[3], hist_sigma[3], 60) + np.random.normal(0, hist_sigma[3], 60)\n \n df1 = pd.DataFrame(np.array([a, b, c, d]).T)\n \n cov_df1 = df1.cov()\n \n sim1.append([df1, cov_df1])\n \n\n# Create portfolio simulation\nnp.random.seed(123)\nport_sim_1, wts_1, _, sharpe_1, _ = Port_sim.calc_sim(df.iloc[1:60,0:4],1000,4)\n\n# Create efficient frontier function\nfrom scipy.optimize import minimize\n\ndef eff_frontier(df_returns, min_ret, max_ret):\n \n n = len(df_returns.columns)\n \n def get_data(weights):\n weights = np.array(weights)\n returns = np.sum(df_returns.mean() * weights)\n risk = np.sqrt(np.dot(weights.T, np.dot(df_returns.cov(), weights)))\n sharpe = returns/risk\n return np.array([returns,risk,sharpe])\n\n # Contraints\n def check_sum(weights):\n return np.sum(weights) - 1\n\n # Rante of returns\n mus = np.linspace(min_ret,max_ret,21) \n\n # Function to minimize\n def minimize_volatility(weights):\n return get_data(weights)[1] \n\n # Inputs\n init_guess = np.repeat(1/n,n)\n bounds = ((0.0,1.0),) * n\n\n eff_risk = []\n port_weights = []\n\n for mu in mus:\n # function for return\n cons = ({'type':'eq','fun': check_sum},\n {'type':'eq','fun': lambda w: get_data(w)[0] - mu})\n\n result = minimize(minimize_volatility,init_guess,method='SLSQP',bounds=bounds,constraints=cons)\n\n eff_risk.append(result['fun'])\n port_weights.append(result.x)\n \n eff_risk = np.array(eff_risk)\n \n return mus, eff_risk, port_weights\n\n# Create returns and min/max ranges\ndf_returns = df.iloc[1:60, 0:4]\nmin_ret = min(port_sim_1[:,0])\nmax_ret = max(port_sim_1[:,0])\n\n# Find efficient portfolio\neff_ret, eff_risk, eff_weights = eff_frontier(df_returns, min_ret, max_ret)\neff_sharpe = eff_ret/eff_risk\n\n\n### Test results of different weighting schemes on simulated returns\n## Create weight schemes\nsatisfice_wts = np.array([0.32, 0.4, 0.08, 0.2]) # Calculated in previous post using port_select_func\nsimple_wts = np.repeat(0.25, 4)\neff_sharp_wts = eff_weights[np.argmax(eff_sharpe)]\neff_max_wts = eff_weights[np.argmax(eff_ret)]\n\n## Create portfolio metric function to iterate\ndef port_func(df, wts):\n mean_ret = df.mean()\n returns = np.sum(mean_ret * wts)\n risk = np.sqrt(np.dot(wts, np.dot(df.cov(), wts)))\n return returns, risk\n \n# Run portfolio returns for return simulations\nfrom datetime import datetime\nstart_time = datetime.now()\n\nlist_df = [np.zeros((1000,2)) for _ in range(4)]\nwt_list = [satisfice_wts, simple_wts, eff_sharp_wts, eff_max_wts]\n\nfor i in range(4):\n arr = list_df[i]\n for j in range(1000):\n arr[j] = port_func(sim1[j][0], wt_list[i])\n \n sharpe_calc = arr[:,0]/arr[:,1]\n list_df[i] = np.c_[arr, sharpe_calc]\n\nsatis_df = list_df[0]\nsimple_df = list_df[1]\neff_sharp_df = list_df[2]\neff_max_df = list_df[3]\n\nend_time = datetime.now()\nprint('Duration: {}'.format(end_time - start_time))\n\n# Note python produces this much faster than R. Duration: 0:00:03.226398. Our R code must not be optimized.\n\n# Create portfolio means and names for graphing\n\nport_means = []\n\nfor df in list_df:\n port_means.append(df[:][:,0].mean()*1200)\n \nport_names = ['Satisfactory','Naive', 'Sharpe', 'Max']\n\n# Create graphing function\n\ndef pf_graf(names, values, rnd, nudge, ylabs, graf_title):\n df = pd.DataFrame(zip(names, values), columns = ['key', 'value'])\n sorted = df.sort_values(by = 'value')\n plt.figure(figsize = (12,6))\n plt.bar('key', 'value', data = sorted, color='darkblue')\n\n for i in range(len(names)):\n plt.annotate(str(round(sorted['value'][i], rnd)), xy = (sorted['key'][i], sorted['value'][i]+nudge))\n \n plt.ylabel(ylabs)\n plt.title('{} performance by portfolio'.format(graf_title))\n plt.show()\n\n# Graph return performance by portfolio\npf_graf(port_names, port_means, 2, 0.5, 'Returns (%)', 'Return')\n\n# Build names for comparison chart\ncomp_names= []\nfor i in range(4):\n for j in range(i+1,4):\n comp_names.append('{} vs. {}'.format(port_names[i], port_names[j]))\n\n# Calculate comparison values\ncomp_values = []\n\nfor i in range(4):\n for j in range(i+1, 4):\n comps =np.mean(list_df[i][:][:,0] > list_df[j][:][:,0])\n comp_values.append(comps)\n \n\n# Graph comparisons\npf_graf(comp_names[:-1], comp_values[:-1], 2, 0.025, 'Frequency (%)', 'Frequency of')\n\n# Build Sharpe portfolio comparisons \n\nsharp_means = []\nfor df in list_df:\n sharp_means.append(df[:][:,2].mean()*np.sqrt(12))\n \nsharp_comp = []\nfor i in range(4):\n for j in range(i+1, 4):\n comp = np.mean(list_df[i][:][:,2] > list_df[j][:][:,2])\n sharp_comp.append(comp)\n \n# Graph mean return comparsions for sharpe porfolio \npf_graf(port_names, sharp_means, 2, 0.005, \"Sharpe ratio\", \"Sharpe ratio\")\n\n# Graph sharpe results for sharpe portoflio\npf_graf(comp_names[:-1], sharp_comp[:-1], 2, 0.005, \"Frequency(%)\", \"Frequency\")\n\n# Bring in port simulation to compare results across million portfolios\nport_1m = pd.read_pickle(\"port_3m.pkl\")\nsharpe_1m = port_1m[:,0]/port_1m[:,1]\n\n# Create mean and sharpe outperformance results lists\nsim_mean = []\nsim_sharp = []\n\nfor i in range(4):\n mean = np.mean(np.mean(list_df[i][:,0]) > port_1m[:,0])\n sim_mean.append(mean)\n sharp = np.mean(np.mean(list_df[i][:,2]) > sharpe_1m[:])\n sim_sharp.append(sharp)\n \n# Graph return outperformance\npf_graf(port_names, sim_mean, 2, 0.005, \"Frequency(%)\", \"Frequency\")\n\n# Graph sharpe outperformance\npf_graf(port_names, sim_sharp, 2, 0.005, 'Frequency (%)', 'Frequency')\n# %%\n","sub_path":"OSM_blog/OSM_Satisficing_Optimizing.py","file_name":"OSM_Satisficing_Optimizing.py","file_ext":"py","file_size_in_byte":6627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"345262546","text":"#!/usr/bin/ python\nimport sys, string\n\n# GENERAL NOTES\n\"\"\"\nNote: let's assume row-major order, so for 2-d array below\n\nA:\n 11 12 13\n 21 22 23\n\n A[0][0] = 11\n A[0][1] = 12\n A[1][0] = 21\n \n And so on\n\"\"\"\n\n# EXAMPLE 1: Edit distance table sample\n# x: ABC\n# y: BBC\n\n# table:\n\n\"\"\"\ne = epislon = empty string\nX = BBC\nY = ABC\n\n\t\te A B C\n\t\t________\n\te | 0 1 2 3\n\tB | 1 1 1 2\n\tB | 2 2 1 2\n\tC | 3 3 2 1\n\n\"\"\"\n\n# following method given by lecture\ndef editDistRecursive(x, y):\n # This implementation is very slow\n if len(x) == 0:\n return len(y)\n elif len(y) == 0:\n return len(x)\n else: # following two lines adds 1 assuming current characters in x and y do not match (hence the min function below)\n distHor = editDistRecursive(x[:-1], y) + 1\n distVer = editDistRecursive(x, y[:-1]) + 1\n\n # compares last character of x and y, i.e. current characters being compared\n if x[-1] == y[-1]: # last characters match\n distDiag = editDistRecursive(x[:-1], y[:-1]) # compares prefix (-1 char) of x and y\n else: # last characters do not match\n distDiag = editDistRecursive(x[:-1], y[:-1]) + 1\n return min(distHor, distVer, distDiag)\n\n\ndef editDistance(x, y):\n\t# D represents 2-d array of x against y (with added empty string value in 0,0 cell)\n\tD = []\n\t\n\t# initializes D as array of arrays (each starting with 1 element: 0)\n\tfor i in range(len(x) + 1):\n\t\tD.append([0] * (len(y) + 1))\n\n\t# initializes first \"column\" with i value, i === length of some prefix of x\n\t# any prefix of x would have edit distance of len(prefix) to empty string\n\tfor i in range(len(x) + 1):\n\t\tD[i][0] = i\n\n\t# initializes first \"row\" with i value, similar to above block\n\tfor i in range(len(y) + 1):\n\t\tD[0][i] = i\n\n\t# not starting outer loop at index 0 because first column and first rows were handled above\n\tfor i in range(1, len(x) + 1):\n\t\tfor j in range(1, len(y) + 1):\n\t\t\tdistHor = D[i][j - 1] + 1\n\t\t\tdistVer = D[i - 1][j] + 1\n\n\t\t\t# subtract 1 from i and j to get correct index for current character\n\t\t\t# i and j started at 1 to account for the pre-filled first column/row above\n\t\t\t# but obviously, python string indexing starts at 0\n\t\t\tif x[i - 1] == y[j - 1]:\n\t\t\t\tdistDiag = D[i - 1][j - 1]\n\t\t\telse:\n\t\t\t\tdistDiag = D[i - 1][j - 1] + 1\n\n\t\t\tD[i][j] = min(distHor, distVer, distDiag)\n\n\treturn D[-1][-1] # returns last element\n\n\ndef main():\n\tx = 'shake spea'\n\ty = 'Shakespear'\n\n\tprint(editDistance(x, y))\n\nif __name__ == \"__main__\":\n main()","sub_path":"edit_distance.py","file_name":"edit_distance.py","file_ext":"py","file_size_in_byte":2486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"433119914","text":"import logging\nimport base64\nimport simplejson as json\nimport urllib2\nimport threading\n\n# TODO create a util method for getting a URL and return named element or None\n# TODO implement a \"sanitize\" method for handling return data (e.g. B64 decoding)\n\nclass PowerView:\n\n def __init__(self):\n self.logger = logging.getLogger('Plugin.PowerView')\n self.hub_lock = threading.Lock()\n\n def userdata(self, hubHostname):\n userdataUrl = 'http://%s/api/userdata/' % (hubHostname)\n\n data = self.__GET(userdataUrl)\n if data is None: return None\n\n return data.get('userData')\n\n def activateScene(self, hubHostname, sceneId):\n activateSceneUrl = \\\n 'http://%s/api/scenes?sceneId=%s' % (hubHostname, sceneId)\n\n self.__GET(activateSceneUrl)\n\n def activateSceneCollection(self, hubHostname, sceneCollectionId):\n activateSceneCollectionUrl = \\\n 'http://%s/api/scenecollections?scenecollectionid=%s' % (hubHostname, sceneCollectionId)\n\n self.__GET(activateSceneCollectionUrl)\n\n def calibrateShade(self, hubHostname, shadeId):\n shadeUrl = 'http://%s/api/shades/%s' % (hubHostname, shadeId)\n\n body = {\n 'shade': {\n 'motion': 'calibrate'\n }\n }\n\n self.__PUT(shadeUrl, body)\n\n def jogShade(self, hubHostname, shadeId):\n shadeUrl = 'http://%s/api/shades/%s' % (hubHostname, shadeId)\n\n body = {\n 'shade': {\n 'motion': 'jog'\n }\n }\n\n self.__PUT(shadeUrl, body)\n\n def room(self, hubHostname, roomId):\n roomUrl = 'http://%s/api/rooms/%s' % (hubHostname, roomId)\n\n data = self.__GET(roomUrl)\n if data is None: return\n data = data.pop('room')\n\n encName = data.pop('name')\n data['name'] = base64.b64decode(encName)\n\n return data\n\n def setShadePosition(self, hubHostname, shadeId, top, bottom):\n shadeUrl = 'http://%s/api/shades/%s' % (hubHostname, shadeId)\n\n body = {\n 'shade': {\n 'positions': {\n 'position1': bottom,\n 'posKind1': 1,\n 'position2': top,\n 'posKind2': 2\n }\n }\n }\n\n self.__PUT(shadeUrl, body)\n\n def scenes(self, hubHostname):\n scenesURL = 'http://%s/api/scenes/' % (hubHostname)\n\n data = self.__GET(scenesURL)\n if data is None: return\n data = data.pop('sceneData')\n\n for scene in data:\n encName = scene.pop('name')\n name = base64.b64decode(encName)\n\n room = self.room(hubHostname, scene['roomId'])\n\n scene['name'] = '%s - %s' % (room['name'], name)\n\n return data\n\n def sceneCollections(self, hubHostname):\n sceneCollectionsUrl = \\\n 'http://%s/api/scenecollections/' % (hubHostname)\n\n data = self.__GET(sceneCollectionsUrl)\n if data is None: return\n data = data.pop('sceneCollectionData')\n\n for sceneCollection in data:\n encName = sceneCollection.pop('name')\n sceneCollection['name'] = base64.b64decode(encName)\n\n return data\n\n def shade(self, hubHostname, shadeId):\n shadeUrl = 'http://%s/api/shades/%s' % (hubHostname, shadeId)\n\n data = self.__GET(shadeUrl)\n if data is None: return\n data = data.pop('shade')\n\n encName = data.pop('name')\n data['name'] = base64.b64decode(encName)\n data['batteryLevel'] = data.pop('batteryStrength')\n\n if 'positions' in data:\n shadePositions = data.pop('positions')\n data.update(shadePositions)\n\n return data\n\n def shades(self, hubHostname):\n shadesUrl = 'http://%s/api/shades/' % hubHostname\n\n data = self.__GET(shadesUrl)\n if data is None: return\n data = data.pop('shadeData')\n\n for shade in data:\n encName = shade.pop('name')\n shade['name'] = base64.b64decode(encName)\n\n return data\n\n def __GET(self, url):\n self.logger.debug('GET %s', url)\n\n response = None\n\n with self.hub_lock:\n try:\n f = urllib2.urlopen(url)\n response = json.load(f)\n f.close()\n except urllib2.URLError as err:\n self.logger.error('Error connecting to %s: %s', url, err.reason)\n\n return response\n\n def __PUT(self, url, data):\n self.logger.debug('PUT %s', url)\n body = json.dumps(data)\n\n request = urllib2.Request(url, data=body)\n request.add_header('Content-Type', 'application/json')\n request.get_method = lambda: \"PUT\"\n\n response = None\n\n opener = urllib2.build_opener(urllib2.HTTPHandler)\n\n with self.hub_lock:\n try:\n f = opener.open(request)\n response = json.load(f)\n f.close()\n except urllib2.URLError as err:\n self.logger.error('Error connecting to %s: %s', url, err.reason)\n\n return response\n\n","sub_path":"PowerView.indigoPlugin/Contents/Server Plugin/powerview.py","file_name":"powerview.py","file_ext":"py","file_size_in_byte":5151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"329154513","text":"import cv2\nimport torch\nfrom PIL import Image\nimport torchvision.transforms.functional as TF\n\n\ndef transform(img_array, input_size):\n \"\"\"\n\n :param img_array:\n :param input_size:\n :return:\n \"\"\"\n img_array = cv2.cvtColor(img_array, cv2.COLOR_BGR2RGB)\n img = Image.fromarray(img_array)\n\n width, height = img.size\n img = TF.resize(img, int(height / width * input_size)) # the smaller edge will be matched to input_size\n img = TF.pad(img, (0, int((img.size[0] - img.size[1]) / 2)))\n\n tensor = TF.to_tensor(img)\n # tensor = TF.normalize(tensor, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n return tensor\n\n\ndef stack_tensors(tensors):\n stacked = torch.stack(tensors)\n return stacked\n\n\ndef preds_postprocess(preds, stream_names, frame_shape, img_size, classes):\n # The amount of padding that was added\n pad_x = max(frame_shape[0] - frame_shape[1], 0) * (img_size / max(frame_shape))\n pad_y = max(frame_shape[1] - frame_shape[0], 0) * (img_size / max(frame_shape))\n # Image height and width after padding is removed\n unpad_h = img_size - pad_y\n unpad_w = img_size - pad_x\n\n preds_dict = {}\n\n for i, pred in enumerate(preds):\n if pred is None:\n preds_dict[stream_names[i]] = None\n else:\n person_bboxes = []\n pred = pred.cpu()\n for *xyxy, conf, cls_conf, cls_pred in pred:\n if classes[int(cls_pred)] == 'person': # 只检测人\n # Rescale coordinates to original dimensions\n box_h = ((xyxy[3] - xyxy[1]) / unpad_h) * frame_shape[0]\n box_w = ((xyxy[2] - xyxy[0]) / unpad_w) * frame_shape[1]\n y1 = ((xyxy[1] - pad_y // 2) / unpad_h) * frame_shape[0]\n x1 = ((xyxy[0] - pad_x // 2) / unpad_w) * frame_shape[1]\n\n person_bbox = (int(x1), int(y1), int(x1 + box_w), int(y1 + box_h)) # convert tensor to int\n person_bboxes.append(person_bbox)\n if len(person_bboxes) != 0:\n preds_dict[stream_names[i]] = person_bboxes\n else:\n preds_dict[stream_names[i]] = None\n\n return preds_dict\n\n\n","sub_path":"model/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":2206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"576879298","text":"import unittest\n\nfrom mock import patch, MagicMock # noqa: F401\nfrom pyhocon import ConfigFactory\n\nfrom databuilder.extractor.table_column_usage_aggregate_extractor import TblColUsgAggExtractor, RAW_EXTRACTOR\nfrom databuilder.models.table_column_usage import TableColumnUsage, ColumnReader\nfrom databuilder.transformer.regex_str_replace_transformer import RegexStrReplaceTransformer\nfrom databuilder.transformer.sql_to_table_col_usage_transformer import SqlToTblColUsageTransformer\n\n\nclass TestTblColUsgAggExtractor(unittest.TestCase):\n\n def test_aggregate(self):\n # type: () -> None\n with patch.object(RegexStrReplaceTransformer, 'init'),\\\n patch.object(SqlToTblColUsageTransformer, 'init'),\\\n patch.object(RegexStrReplaceTransformer, 'transform'),\\\n patch.object(SqlToTblColUsageTransformer, 'transform') as mock_sql_transform:\n\n raw_extractor = MagicMock()\n mock_raw_extractor = MagicMock()\n raw_extractor.extract = mock_raw_extractor\n raw_extractor.get_scope.return_value = 'foo'\n\n # Just to iterate 5 times\n mock_raw_extractor.side_effect = ['foo', 'bar', 'foo', 'bar', None]\n\n conf = ConfigFactory.from_dict(\n {RAW_EXTRACTOR: raw_extractor}\n )\n\n mock_sql_transform.side_effect = [\n TableColumnUsage(col_readers=[ColumnReader(database='database', cluster='gold', schema='test_schema1',\n table='test_table1', column='*',\n user_email='john@example.com')]),\n TableColumnUsage(col_readers=[ColumnReader(database='database', cluster='gold', schema='test_schema1',\n table='test_table1', column='*',\n user_email='john@example.com', read_count=2)]),\n TableColumnUsage(col_readers=[ColumnReader(database='database', cluster='gold', schema='test_schema1',\n table='test_table2', column='*',\n user_email='john@example.com', read_count=5)]),\n None]\n\n extractor = TblColUsgAggExtractor()\n extractor.init(conf)\n actual = extractor.extract()\n expected = TableColumnUsage(\n col_readers=[\n ColumnReader(database='database', cluster='gold', schema='test_schema1', table='test_table1',\n column='*', user_email='john@example.com', read_count=3),\n ColumnReader(database='database', cluster='gold', schema='test_schema1', table='test_table2',\n column='*', user_email='john@example.com', read_count=5)])\n\n self.assertEqual(expected.__repr__(), actual.__repr__())\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/unit/extractor/test_table_column_usage_aggregate_extractor.py","file_name":"test_table_column_usage_aggregate_extractor.py","file_ext":"py","file_size_in_byte":3050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"357072049","text":"from flask import Flask, render_template, request, url_for, redirect\nimport sqlite3\nimport Globals\n\n\napp = Flask(__name__)\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n return render_template(\"index.html\")\n\n\n@app.route('/record_marks', methods=['GET', 'POST'])\ndef record_marks():\n rows = Globals.get_practical_marks()\n return render_template(\"practical_mark_sheet.html\", rows=rows)\n\n\n@app.route('/student', methods=['GET', 'POST'])\ndef student():\n return render_template('student.html')\n\n\n@app.route('/config_course', methods=['GET', 'POST'])\ndef config_course():\n if request.method == \"GET\":\n options = Globals.course_type()\n rows = Globals.component()\n return render_template('config_course.html', options=options, rows=rows)\n else:\n nm = request.form['name']\n abv = request.form['abv']\n c_type = request.form.get('c_type')\n cos_id = 1\n marks = request.form['marks']\n con = sqlite3.connect(\"recSheet.db\")\n cur = con.cursor()\n cur.execute(\n 'INSERT INTO component (Name, Abreviation, ComponetType, CourseId, Marks) VALUES(?,?,?,?,?)',\n (nm, abv, c_type, cos_id, marks))\n con.commit()\n con.close()\n return render_template(\"config_course.html\")\n\n\n@app.route('/load_students', methods=['GET', 'POST'])\ndef load_students():\n if request.method == \"GET\":\n return render_template(\"config_course.html\")\n else:\n course_id = request.form['course_id']\n rows = Globals.get_students(course_id)\n conn = Globals.get_connection()\n cursor = conn.cursor()\n for row in rows:\n cursor.execute(\n 'INSERT INTO practical_mark_sheet (CourseId, StudentId, LevelOfStudy, DeptId, T1, T2, A1, A2, Lab1, Lab2, PCW, TCW, OCW, Exam, FinalMark) VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)',\n (row[0], row[1], row[2], row[3], '', '', '', '', '', '', '', '', '', '', ''))\n conn.commit()\n conn.close()\n return render_template(\"config_course.html\")\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"640594993","text":"import re\n\n\ndef productNameAnalysis(productName: str):\n data = productName.lower()\n if data.find(\"ip\") > -1 and data.find(\"iphone\") == -1:\n temp = data.replace(\"ip\", \"iphone\")\n return temp\n elif data.find(\"ss\") > -1 and data.find(\"galaxy\") == -1:\n temp = data.replace(\"ss\", \"samsung galaxy\")\n return temp\n elif data.find(\"samsung\") > -1 and data.find(\"galaxy\") == -1:\n temp = data.replace(\"samsung\", \"samsung galaxy\")\n return temp\n elif data.find(\"ss\") > -1 and data.find(\"galaxy\") > -1:\n temp = data.replace(\"ss\", \"samsung\")\n return temp\n\n return data\n\n\ndef romramAnalysis(rom: str):\n data = rom.lower()\n if data.find(\"gb\") > -1 and data.find(\" \") == -1:\n temp = data.replace(\"gb\", \" gb\")\n return temp\n\n if data.find(\"gb\") == -1 and data.find(\" \") == -1 and data.find(\"g\") > -1:\n temp = data.replace(\"g\", \" gb\")\n return temp\n return data\n\n\ndef priceAnalysis(price: str):\n data = price.lower().strip(\" \").replace(\"mươi\", \"\").replace(\"lăm\", \"5\").replace(\n \"mốt\", \"1\").replace(\"tư\", \"4\").replace(\" \", \"\").replace(\".\", \",\").replace(\"rưởi\",\"5\")\n word = ['một', 'hai', 'ba', 'bốn', 'năm', 'sáu', 'bảy', 'tám', 'chín']\n for i in range(0, 9):\n data = data.replace(word[i], str(i+1))\n # specialword = [\"m\",\"tr\",\"trịu\",\"trieu\"]\n # for item in specialword:\n # data = data.replace(item,\"triệu\")\n if re.match(\"[1-9]*triệu[1-9]*\", data) or re.match(\"[1-9]*m[1-9]*\", data) or re.match(\"[1-9]*tr[1-9]*\", data):\n temp = data.replace(\"triệu\", \".\").replace(\"tr\",\".\").replace(\"m\",\".\")\n # print(temp)\n num = temp.split(\".\")\n # print(num)\n if len(num[1]) == 1:\n result = num[0]+\".\"+num[1]+\"00.000\"\n if len(num[1]) == 2:\n result = num[0]+\".\"+num[1]+\"0.000\"\n if len(num[1]) == 3:\n result = num[0]+\".\"+num[1]+\".000\"\n return result\n elif re.match(\"[1-9]*,[1-9]*triệu\", data) or re.match(\"[1-9]*,[1-9]*m\", data) or re.match(\"[1-9]*,[1-9]*tr\", data):\n temp = data.replace(\"triệu\", \"\").replace(\"tr\",\".\").replace(\"m\",\".\")\n num = temp.split(\".\")\n # print(len(num[1]))\n if len(num[1]) == 1:\n result = num[0]+\".\"+num[1]+\"00.000\"\n if len(num[1]) == 2:\n result = num[0]+\".\"+num[1]+\"0.000\"\n if len(num[1]) == 3:\n result = num[0]+\".\"+num[1]+\".000\"\n return result\n elif re.match(\"[1-9]*triệu\", data) or re.match(\"[1-9]*m\", data) or re.match(\"[1-9]*tr\", data):\n temp = data.replace(\"triệu\", \"\").replace(\"tr\",\".\").replace(\"m\",\".\")\n # num = temp.split(\".\")\n # # print(len(num[1]))\n # if len(num[1])==1:\n # result = num[0]+\".\"+num[1]+\"00.000\"\n # if len(num[1])==2:\n # result = num[0]+\".\"+num[1]+\"0.000\"\n # if len(num[1])==3:\n result = temp+\".000.000\"\n return result\n\n return data\n\n\nprint(priceAnalysis(\"3.500\"))\n","sub_path":"inputAnalysis.py","file_name":"inputAnalysis.py","file_ext":"py","file_size_in_byte":3017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"518004537","text":"from bl_ui.properties_render_layer import RenderLayerButtonsPanel\nfrom bpy.types import Panel\nfrom ..ui import icons\n\n\nclass LUXCORE_RENDERLAYER_PT_aovs(RenderLayerButtonsPanel, Panel):\n bl_label = \"LuxCore Arbitrary Output Variables (AOVs)\"\n COMPAT_ENGINES = {\"LUXCORE\"}\n bl_options = {\"DEFAULT_CLOSED\"}\n\n def draw(self, context):\n layout = self.layout\n active_layer = context.scene.render.layers.active\n aovs = active_layer.luxcore.aovs\n engine_is_path = context.scene.luxcore.config.engine == \"PATH\"\n\n if not engine_is_path:\n layout.label(\"The Bidir engine only supports a few AOVs\", icon=icons.INFO)\n\n split = layout.split()\n col = split.column(align=True)\n\n # Supported by BIDIR\n col.label(\"Basic Information\")\n col.prop(aovs, \"rgb\")\n col.prop(aovs, \"rgba\")\n col.prop(aovs, \"alpha\")\n col.prop(aovs, \"depth\")\n col.prop(aovs, \"albedo\")\n\n # Supported by BIDIR\n col.label(\"Material/Object Information\")\n col.prop(aovs, \"material_id\")\n col.prop(aovs, \"material_id_color\")\n col.prop(aovs, \"object_id\")\n # Not supported by BIDIR\n sub = col.column(align=True)\n sub.active = engine_is_path\n sub.prop(aovs, \"emission\")\n\n # Not supported by BIDIR\n sub.label(\"Direct Light Information\")\n sub.prop(aovs, \"direct_diffuse\")\n sub.prop(aovs, \"direct_glossy\")\n\n # Not supported by BIDIR\n sub.label(\"Indirect Light Information\")\n sub.prop(aovs, \"indirect_diffuse\")\n sub.prop(aovs, \"indirect_glossy\")\n sub.prop(aovs, \"indirect_specular\")\n\n col = split.column(align=True)\n\n # Supported by BIDIR\n col.label(\"Geometry Information\")\n col.prop(aovs, \"position\")\n col.prop(aovs, \"shading_normal\")\n col.prop(aovs, \"avg_shading_normal\")\n col.prop(aovs, \"geometry_normal\")\n col.prop(aovs, \"uv\")\n\n # Not supported by BIDIR\n sub = col.column(align=True)\n sub.active = engine_is_path\n sub.label(\"Shadow Information\")\n sub.prop(aovs, \"direct_shadow_mask\")\n sub.prop(aovs, \"indirect_shadow_mask\")\n\n # Not supported by BIDIR\n sub.label(\"Render Information\")\n sub.prop(aovs, \"irradiance\")\n sub.prop(aovs, \"raycount\")\n\n # Supported by BIDIR\n col.prop(aovs, \"convergence\")\n col.prop(aovs, \"noise\")\n col.prop(aovs, \"samplecount\")\n","sub_path":"All_In_One/addons/BlendLuxCore/ui/aovs.py","file_name":"aovs.py","file_ext":"py","file_size_in_byte":2497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"235955933","text":"import requests\nimport json\nimport base64\nimport pandas as pd\nfrom requests.auth import HTTPBasicAuth\napi_key = '8d93fc61-b99d-4652-89f0-094427d12a20'\n\n\nkeywords = ['data+analyst']\n\n\t\t\t# 'audit+manager',\n\t\t\t# 'senior+auditor',\n\t\t\t# 'marketing+manager',\n\t\t\t# 'business+development+manager',\n\t\t\t# 'risk+manager',\n\t\t\t# 'tax+lawyer','business']\n\ndef job_details(job_id):\n\turl = 'http://www.reed.co.uk/api/1.0/jobs/'+str(job_id)\n\tr = requests.get(url, auth=HTTPBasicAuth(api_key, ''))\n\tresults_string = r.content\n\tjson_response = json.loads(results_string)\n\tprint(type(json_response), json_response)\n\treturn json_response\n\ndef search_jobs(keywords):\n\tcount = 0\n\tdf = None\n\tfor keyword in keywords:\n\t\turl = 'http://www.reed.co.uk/api/1.0/search?keywords='+keyword\n\t\tr = requests.get(url, auth=HTTPBasicAuth(api_key, ''))\n\t\tresults_string = r.content\n\t\tjson_response = json.loads(results_string)\n\t\tprint(type(json_response), json_response)\n\t\tif count ==0:\n\n\t\t\tdf = pd.DataFrame(json_response['results'])\n\t\t\tdf['keyword'] = keyword\n\t\t\tprint(df.head())\n\t\tif count > 0:\n\t\t\tdf_new = pd.DataFrame(json_response['results'])\n\t\t\tdf_new['keyword'] = keyword\n\t\t\tprint(df_new.head())\n\t\t\tdf = pd.concat([df,df_new])\n\n\t\tcount = count+1\n\tdf['job_details'] = df['jobId'].apply(job_details)\n\tdf['job_details'] = df['jobId'].apply(job_details)\n\tjob_detail = df['job_details'].apply(pd.Series)\n\tfinal = pd.merge(df,job_detail, on='jobId')\n\tprint(final.head())\n\tfinal.to_csv('scraped_data/20170220_B.csv',encoding='utf-8')\n\n\n\n# search_jobs(keywords)\n\ndf1 = pd.read_csv('scraped_data/20170220_B.csv')\ndf2 = pd.read_csv('scraped_data/20170220.csv')\n\n\ndf1['job_type'] = 'data analyst'\n\ndf2['job_type'] = 'data scientist' \t\t \nprint(df1.head())\nprint(df2.head())\n\ndf3 = pd.concat([df1,df2])\n\nprint(len(df3.index))\n\ndf3.to_csv('scraped_data/combined.csv')","sub_path":"jobs/jobs/jobs/reeds.py","file_name":"reeds.py","file_ext":"py","file_size_in_byte":1824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"389617178","text":"from app.models import OrderModel, OrderServicesModel, ServicesModel, PetModel\n\nfrom app.exc import InvalidKeysError, NotFoundError, MissingKeysError\nfrom app.services.helpers import (\n add_commit,\n delete_commit,\n check_valid_keys,\n check_missed_keys,\n)\n\n\nclass OrderServices:\n @staticmethod\n def create_order(data: dict) -> OrderModel:\n valid_keys = [\"date\", \"finished_date\", \"pet_delivery\", \"pet_id\"]\n\n if check_valid_keys(data, valid_keys):\n raise InvalidKeysError(data, valid_keys)\n\n required_fields = [\"pet_id\"]\n missed_fields: list[str] = check_missed_keys(data, required_fields)\n if missed_fields:\n raise MissingKeysError(required_fields, missed_fields)\n\n pet: PetModel = PetModel.query.get(data.get(\"pet_id\"))\n if not pet:\n raise NotFoundError(\"Pet not found\")\n\n order: OrderModel = OrderModel(**data)\n\n add_commit(order)\n\n return order\n\n @staticmethod\n def update_order_by_id(id: int, data: dict) -> dict:\n valid_keys = [\"finished_date\", \"pet_delivery\"]\n\n if check_valid_keys(data, valid_keys):\n raise InvalidKeysError(data, valid_keys)\n\n order: OrderModel = OrderModel.query.get(id)\n if not order:\n raise NotFoundError(\"Order not found\")\n\n for key, value in data.items():\n setattr(order, key, value)\n\n add_commit(order)\n\n return order.serialize\n\n @staticmethod\n def delete_order_by_id(id: int) -> None:\n\n order: OrderModel = OrderModel.query.get(id)\n if not order:\n raise NotFoundError(\"Order not found\")\n\n delete_commit(order)\n\n @staticmethod\n def get_all_orders() -> list[dict]:\n orders_json: list[dict] = []\n\n orders: OrderModel = OrderModel.query.all()\n\n for order in orders:\n\n query = (\n OrderModel.query.from_self(\n ServicesModel.id,\n ServicesModel.name,\n ServicesModel.description,\n ServicesModel.price,\n )\n .join(OrderServicesModel)\n .join(ServicesModel)\n .filter(OrderModel.id == order.id)\n .all()\n )\n\n order_json = order.serialize\n order_json[\"services\"] = [\n {\n \"id\": item[0],\n \"name\": item[1],\n \"description\": item[2],\n \"price\": item[3],\n }\n for item in query\n ]\n\n orders_json.append(order_json)\n\n return orders_json\n\n @staticmethod\n def get_order_by_id(id: int) -> dict:\n order: OrderModel = OrderModel.query.get(id)\n if not order:\n raise NotFoundError(\"Order not found\")\n\n query = (\n OrderModel.query.from_self(\n ServicesModel.id,\n ServicesModel.name,\n ServicesModel.description,\n ServicesModel.price,\n )\n .join(OrderServicesModel)\n .join(ServicesModel)\n .filter(OrderModel.id == order.id)\n .all()\n )\n\n order_json = order.serialize\n order_json[\"services\"] = [\n {\"id\": item[0], \"name\": item[1], \"description\": item[2], \"price\": item[3]}\n for item in query\n ]\n\n return order_json\n","sub_path":"app/services/order_service.py","file_name":"order_service.py","file_ext":"py","file_size_in_byte":3458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"571824055","text":"from unittest import TestCase\nfrom fanart.core import Request\nfrom fanart.errors import RequestFanartError, ResponseFanartError\nfrom httpretty import httprettified, HTTPretty\n\n\nclass RequestTestCase(TestCase):\n def test_valitate_error(self):\n self.assertRaises(RequestFanartError, Request, 'key', 'id', 'sport')\n\n @httprettified\n def test_response_error(self):\n request = Request('apikey', 'objid', 'tv')\n HTTPretty.register_uri(\n HTTPretty.GET,\n 'http://webservice.fanart.tv/v3/tv/objid?api_key=apikey',\n body='Please specify a valid API key',\n )\n try:\n request.response()\n except ResponseFanartError as e:\n self.assertEqual(repr(e), \"ResponseFanartError('Expecting value: \"\n \"line 1 column 1 (char 0)',)\")\n self.assertEqual(str(e), \"Expecting value: \"\n \"line 1 column 1 (char 0)\")\n","sub_path":"fanart/tests/test_core.py","file_name":"test_core.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"476534526","text":"#!/usr/bin/env python3\nimport os\nimport judge\nfrom time import process_time\n\n\ndef read_input():\n inputs = list() # a list containing inputs from all files\n for file in input_files:\n input = list() # input from a single file\n with open(file) as fh:\n input = [int(i) for i in fh.readline().split()]\n # remove first integer because it's only for Java's Scanner\n input.remove(input[0])\n inputs.append(input)\n return inputs\n\n\ndef maximum_subarrays_sum(A):\n \"\"\"returns the maximum subarray sum\"\"\"\n total_max = 0\n temp_max = 0\n for i in A:\n temp_max += i\n if temp_max < 0:\n temp_max = 0\n if temp_max > total_max:\n total_max = int(temp_max)\n return [total_max]\n\n\n# path to the local test files\npath = \"testdata/DnA-maximum-subarray-sum/\"\ninput_files = [os.path.join(path, file) for file in os.listdir(path) if file.endswith(\".input\")]\noutput_files = [os.path.join(path, file) for file in os.listdir(path) if file.endswith(\".output\")]\ninput_files.sort()\noutput_files.sort()\n# inputs from the test files\ninputs = read_input()\n# outputs generated by the algorithm\nstart_time = process_time()\noutputs = list()\nfor input in inputs:\n outputs.append(maximum_subarrays_sum(input))\nend_time = process_time()\n\njudge.run(output_files, outputs, end_time - start_time)\n","sub_path":"maximum_subarray_sum.py","file_name":"maximum_subarray_sum.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"121990782","text":"import sys\n\nassert sys.version_info >= (2, 6)\n\nfrom setuptools import setup, find_packages\n\nkwargs = dict(\n name=\"bd2k-python-lib\",\n version=\"1.14a1\",\n\n author='Hannes Schmidt',\n author_email='hannes@ucsc.edu',\n url='https://github.com/BD2KGenomics/bd2k-python-lib',\n description='The BD2K Python module kitchen sink',\n\n package_dir={ '': 'src' },\n packages=find_packages( 'src' ),\n install_requires=[ 'future' ],\n tests_require=[\n 'pytest==2.7.2',\n 'mock==1.0.1',\n 'lockfile==0.11.0',\n 'boto==2.38.0'],\n namespace_packages=[ 'bd2k' ] )\n\nfrom setuptools.command.test import test as TestCommand\n\n\nclass PyTest( TestCommand ):\n user_options = [ ('pytest-args=', 'a', \"Arguments to pass to py.test\") ]\n\n def initialize_options( self ):\n TestCommand.initialize_options( self )\n self.pytest_args = [ ]\n\n def finalize_options( self ):\n TestCommand.finalize_options( self )\n self.test_args = [ ]\n self.test_suite = True\n\n def run_tests( self ):\n import pytest\n # Sanitize command line arguments to avoid confusing Toil code attempting to parse them\n sys.argv[ 1: ] = [ ]\n errno = pytest.main( self.pytest_args )\n sys.exit( errno )\n\n\nkwargs[ 'cmdclass' ] = { 'test': PyTest }\n\nsetup( **kwargs )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"468996498","text":"lista = []\n\ni = 0\nwhile i < 5:\n\tnumero = int(input())\n\tlista.append(numero)\n\ti = i +1\n\ni = 0\npares = 0\nimpares = 0\npositivos = 0\nnegativos = 0\nwhile(i < len(lista)):\n\tif(lista[i] %2 == 0):\n\t\tpares = pares + 1\n\telif(lista[i] %2 != 0):\n\t\timpares = impares + 1\n\n\tif(lista[i] > 0):\n\t\tpositivos = positivos + 1\n\telif(lista[i] < 0):\n\t\tnegativos = negativos + 1\n\n\ti = i +1\n\nprint ('{} valor(es) par(es)'.format(pares))\nprint ('{} valor(es) impar(es)'.format(impares))\nprint ('{} valor(es) positivo(s)'.format(positivos))\nprint ('{} valor(es) negativo(s)'.format(negativos))\n","sub_path":"Python/Comando de repetição/1066.py","file_name":"1066.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"603326296","text":"import bottle\r\nfrom bottle import route, get, post, mako_view as view, request, abort, HTTPResponse\r\nimport globals\r\nimport json, string\r\nfrom ml import log\r\n\r\nfrom onyx import account\r\nfrom onyx import game\r\nfrom onyx import logging, api, markup\r\n\r\n@logging.trap\r\n@post('/api/:key/start_user_session', name=\"api.start_user_session\")\r\ndef start_user_session(key):\r\n '''## start_user_session\r\n\r\nTo start a user session make a __HTTP__ request to http://mil-api.muzzylane.com/api/__{api key}__/__start_user_session__/, the request method should be POST and the body of the request should be a __JSON__ Object. A __URL__, where the given user can play the requested game, will be returned.\r\n\r\n__ Properties of POSTed JSON Object __\r\n\r\n__ game_id __ - (*String*) The identifier for what game is to be played. __Valid Values : __ %(game_id_valid_values)s\r\n\r\n__ user_id __ - (*String*) A unique identifier for the user playing the game. If no user exists for the passed user_id the API will create the user using the passed user_id.\r\n\r\n__ restart __ - (*optional*) (*Boolean*) (__ default __:*false*) If set a new session will be created replacing the existing one. All data will be lost.\r\n\r\n__ group_id __ - (*optional*) (*String*) A string identifier of the group the user belongs to. Users with their group_id set won't be able to interact with users outside their group. If there isn't a group for the passed group_id the API will created the group and add the user to it. If no group_id is passed the user will be able to interact with any user not in a group. Group functionality is currently not implemented, but will be used as part of adding multiplayer support in the future.\r\n\r\nexample JSON: {\"game_id\":\"es\", \"user_id\":\"248861\", \"group_id\":\"187\"}\r\n\r\n\r\n__ Returned __\r\n\r\nhttp://mil-api-test.muzzylane.com:80/game/9463c3ac-50cb-41a3-9f42-0dfd9ca96de2\r\n\r\n '''\r\n db = globals.db.connect()\r\n \r\n developer = account.Developer.get_by_api_key(db, key)\r\n if not developer:\r\n log.error(['Key not set', request.urlparts.path, key])\r\n raise HTTPResponse(\"Key not set\", status=400)\r\n \r\n input = json.loads(request.body.read())\r\n \r\n if 'game_id' not in input or 'user_id' not in input or 'group_id' not in input:\r\n log.error(['Values not set', request.urlparts.path, key])\r\n raise HTTPResponse(\"values not set\", status=400)\r\n \r\n slice_id = None\r\n au_token = None\r\n restart = False\r\n group_id = None\r\n \r\n if 'restart' in input:\r\n if string.lower(input['restart']) == 'true':\r\n restart = True\r\n elif string.lower(input['restart']) == 'false':\r\n restart = False\r\n else:\r\n log.error(['restart not of type bool', request.urlparts.path, key])\r\n raise HTTPResponse(\"restart not of type bool. Valid values: true, false\", status=500)\r\n \r\n if 'group_id' in input:\r\n try:\r\n slice_id = input['group_id']\r\n except ValueError:\r\n log.error(['group_id not of type int', request.urlparts.path, key])\r\n raise HTTPResponse(\"group_id not of type int\", status=500)\r\n \r\n user = account.APIUser.get_by_token(db, user_id)\r\n if not user:\r\n user = account.APIUser(token=user_id, dev_id=developer.id)\r\n \r\n url = api.start_user_session(db, slice_id, user.id, restart, group_id)\r\n db.commit()\r\n \r\n return url\r\n \r\n \r\n@get('/doc/:key/start_user_session')\r\ndef doc_start_user_session(key):\r\n db = globals.db.connect()\r\n \r\n if not account.Developer.get_by_api_key(db, key):\r\n abort(400, \"invalide values\")\r\n \r\n slices = game.Slice.all(db)\r\n \r\n return markup.render(start_user_session.__doc__ % dict(game_id_valid_values = ', '.join([slice.token for slice in slices])), False)\r\n\r\n\r\n@post('/api/:key/remove_user_sessions')\r\n@logging.trap\r\ndef remove_user_sessions(key):\r\n '''## remove_user_sessions\r\n\r\nTo remove user sessions make a __HTTP__ request to %(http)s/api/__{api key}__/__remove_user_sessions__/, the request method should be POST and the body of the request should be a __JSON__ Object.\r\n\r\nRemoving user sessions will delete all sessions created for a users. This will delete the user's progress in all games as well as delete any stored date. You currently can __not__ specify a specific user session to be deleted, ie: you can't say, delete the french(fr) session but keep the spanish(es) session. This functionality can be useful when testing and you need to start with a clean slate.\r\n\r\n__ Properties of POSTed JSON Object __\r\n\r\n__ user_id __ - (*String*) The unique identifier of the user whose sessions you want to delete.\r\n\r\nexample JSON: {\"user_id\":\"216789\"}\r\n\r\n__ Returned __\r\n\r\nNothing\r\n\r\n '''\r\n \r\n db = globals.db.connect()\r\n \r\n if not account.Developer.get_by_api_key(db, key):\r\n log.error([\"Key not set\", request.urlparts.path, key])\r\n raise HTTPResponse(\"Key not set\", status=400)\r\n \r\n input = json.loads(request.body.read())\r\n \r\n if 'user_id' not in input:\r\n log.error([\"Values not set\", request.urlparts.path, key])\r\n raise HTTPResponse(\"values not set\", status=400)\r\n \r\n user_id = input['user_id']\r\n \r\n user = account.APIUser.get_by_token(db, user_id)\r\n \r\n if not user:\r\n log.error([\"Invalid user_id\", request.urlparts.path, key])\r\n raise HTTPResponse(\"Invalid user_id\", status=400)\r\n \r\n api.remove_user_sessions(db, user.id)\r\n \r\n db.commit()\r\n\r\n@get('/doc/:key/remove_user_sessions')\r\ndef doc_remove_user_sessions(key):\r\n if not account.Developer.get_by_api_key(db, key):\r\n abort(400, \"invalide values\")\r\n return markup.render(remove_user_sessions.__doc__ % dict(http=globals.config.ml.http), False)","sub_path":"web/onyx/game/controllers/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":5783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"492626637","text":"import sys\nimport os\nimport subprocess\nimport math\nfrom ROOT import TCanvas, TText, TGraphAsymmErrors, TFile, TEfficiency, TLegend\nimport settings\n\nimport ROOT\nROOT.gROOT.SetBatch(True)\nROOT.gStyle.SetOptTitle(1)\n\ndef get_layer_name(layer):\n if layer<5: return 'TIB L'+str(layer)\n if layer>=5 and layer<11: return 'TOB L'+str(layer-4)\n if layer>=11 and layer<14: return 'TID- D'+str(layer-10)\n if layer>=14 and layer<17: return 'TID+ D'+str(layer-13)\n if layer>=17 and layer<26: return 'TEC- W'+str(layer-16)\n if layer>=26 and layer<35: return 'TEC+ W'+str(layer-25)\n return ''\n\ndef get_short_layer_name(layer):\n if layer<5: return 'L'+str(layer)\n if layer>=5 and layer<11: return 'L'+str(layer-4)\n if layer>=11 and layer<14: return 'D'+str(layer-10)\n if layer>=14 and layer<17: return 'D'+str(layer-13)\n if layer>=17 and layer<26: return 'W'+str(layer-16)\n if layer>=26 and layer<35: return 'W'+str(layer-25)\n return ''\n\n\n\ndef add_points(graph, directory, subdir, layer):\n\n ipt=graph.GetN()\n labels = []\n \n # List runs\n for root, directories, files in os.walk(directory):\n for rundir in sorted(directories):\n if \"run_\" in rundir:\n # start to process run\n run = rundir[4:]\n #print \"processing run \", run\n\t\t\n # for efficiency\n frun = TFile(directory+\"/\"+rundir+\"/\"+subdir+\"/rootfile/SiStripHitEffHistos_run\"+run+\".root\")\n fdir = frun.GetDirectory(\"SiStripHitEff\")\n hfound = fdir.Get(\"found\")\n htotal = fdir.Get(\"all\")\n\n if htotal == None: \n print(' Missing histogram in file '+frun.GetName())\n continue\n\n # efficiency for a given layer\n found = hfound.GetBinContent(int(layer))\n total = htotal.GetBinContent(int(layer))\n if total>0: eff = found/total\n else: eff = 0\n #print run, eff\n\n graph.SetPoint(ipt, ipt+1, eff)\n labels.append(run)\n low = TEfficiency.Bayesian(total, found, .683, 1, 1, False)\n up = TEfficiency.Bayesian(total, found, .683, 1, 1, True)\n #eff_vs_run.SetPointError(ipt, 0, 0, eff-low, up-eff)\n ipt+=1\n frun.Close()\n\n axis = graph.GetXaxis()\n for i in range(graph.GetN()) : \n axis.SetBinLabel(axis.FindBin(i+1), labels[i])\n #print i, axis.FindBin(i+1), labels[i]\n return labels\n\n\n\ndef draw_subdet(graphs, subdet):\n\n l_min=0\n l_max=0\n subdet_str=''\n \n if subdet==1:\n l_min=1\n l_max=4\n subdet_str='TIB'\n\n if subdet==2:\n l_min=5\n l_max=9\n subdet_str='TOB'\n\n if subdet==3:\n l_min=11\n l_max=13\n subdet_str='TIDm'\n\n if subdet==4:\n l_min=14\n l_max=16\n subdet_str='TIDp'\n\n if subdet==5:\n l_min=17\n l_max=24\n subdet_str='TECm'\n\n if subdet==6:\n l_min=26\n l_max=33\n subdet_str='TECp'\n\n leg = TLegend(.92, .3, .99, .7)\n leg.SetHeader('')\n leg.SetBorderSize(0)\n\n min_y=1.\n for layer in range(l_min,l_max+1):\n if layer==l_min: graphs[layer-1].Draw('AP')\n else: graphs[layer-1].Draw('P')\n graphs[layer-1].SetMarkerColor(1+layer-l_min)\n min_y = graphs[layer-1].GetMinimum() if graphs[layer-1].GetMinimum()= self._min_rating and create_date <= latest_create_date:\n new_data[guid] = json_data[guid]\n\n return new_data\n\n def load(self, jdata):\n date = datetime.date.today().strftime(\"%Y%m%d\")\n store_json_to_s3(json.dumps(jdata),\n FILTERED_AMO_BASE_FILENAME,\n date,\n AMO_DUMP_PREFIX,\n AMO_DUMP_BUCKET)\n\n\n@click.command()\n@click.option(\"--s3-prefix\", default=AMO_DUMP_PREFIX)\n@click.option(\"--s3-bucket\", default=AMO_DUMP_BUCKET)\n@click.option(\"--input_filename\", default=AMO_DUMP_FILENAME)\n@click.option(\"--min_rating\", default=MIN_RATING)\n@click.option(\"--min_age\", default=MIN_AGE)\ndef main(s3_prefix, s3_bucket, input_filename, min_rating, min_age):\n etl = AMOTransformer(s3_bucket,\n s3_prefix,\n input_filename,\n float(min_rating),\n int(min_age))\n jdata = etl.extract()\n final_jdata = etl.transform(jdata)\n etl.load(final_jdata)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"mozetl/taar/taar_amowhitelist.py","file_name":"taar_amowhitelist.py","file_ext":"py","file_size_in_byte":3837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"239977716","text":"from fastapi import FastAPI\nfrom pydantic import BaseModel\nfrom ml import model_predict \n\n\napp = FastAPI()\n\n\nclass Usuario(BaseModel):\n pais: str\n genero: str\n edad: int\n antiguedad: int\n facturacion: float\n puntuacion_crediticia: int\n cantidad_productos: int\n posee_tarjeta: str\n miembro_activo: str\n salario_estimado: float\n velocidad_servicio: str\n\n\n@app.post('/abandono/')\ndef analize_article(usuario: Usuario):\n pred = model_predict(usuario) \n return {\"result\": pred}","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"30678469","text":"\n \n#class Solution(object):\n# def maxSubArray(self, nums):\n# \"\"\"\n# :type nums: List[int]\n# :rtype: int\n# \"\"\"\n# # WRONG when all elements are negative(maximum subarray equals to find the maximum negative element)\n# if not nums:\n# return \n# temp = 0 \n# maxSum = nums[0]\n# for num in nums:\n# temp = max(0, num+temp) # reset temp when temp< 0 (no need to keep neg value), correct it with temp = max(num, num+temp)\n# maxSum = max(maxSum, temp)\n# return maxSum\n\n\n# http://www.tangjikai.com/algorithms/leetcode-53-maximum-subarray\n#class Solution(object):\n# def maxSubArray(self, nums):\n# \"\"\"\n# :type nums: List[int]\n# :rtype: int\n# \"\"\" \n# # Divide and Conquer \n# def findSub(left,right,maxsum):\n# if left>right:\n# return float(\"-inf\")\n# mid=left+(right-left)//2\n# leftMax=findSub(left,mid-1,maxsum) # 不包括nums[mid]\n# rightMax=findSub(mid+1,right,maxsum)\n# \n# tempmax,left_midMax=0,0\n# for i in range(mid-1,left-1,-1):\n# tempmax+=nums[i]\n# left_midMax=max(left_midMax,tempmax)\n# \n# tempmax,right_midMax=0,0\n# for i in range(mid+1,right+1):\n# tempmax+=nums[i]\n# right_midMax=max(right_midMax,tempmax)\n# \n# maxsum=max(max(leftMax,rightMax),left_midMax+nums[mid]+right_midMax)\n# return maxsum\n# \n# if nums is None or len(nums) == 0:\n# return 0\n# maxsum=float(\"-inf\")\n# return findSub(0,len(nums)-1,maxsum)\n \n\nclass Solution(object):\n def maxSubArray(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n # DP with global optimum and local optimum\n n=len(nums) # contain at least 1\n lm=gm=nums[0]\n for i in range(1,n):\n lm=max(nums[i],lm+nums[i]) # if lm<=0, then ignore previous subarray\n gm=max(gm,lm)\n return gm\n \n\nif __name__==\"__main__\":\n print(Solution().maxSubArray([-2,1,-3,4,-1,2,1,-5,4]))\n print(Solution().maxSubArray([1,2,-1,-2,2,1,-2,1]))\n print(Solution().maxSubArray([-4,-1,-2,-3,-1]))\n \n","sub_path":"53. Maximum Subarray.py","file_name":"53. Maximum Subarray.py","file_ext":"py","file_size_in_byte":2348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"177699584","text":"#!/usr/bin/env python\n#=========================================================================\n# This is OPEN SOURCE SOFTWARE governed by the Gnu General Public\n# License (GPL) version 3, as described at www.opensource.org.\n# Copyright (C)2017 William H. Majoros (martiandna@gmail.com).\n#=========================================================================\nfrom __future__ import (absolute_import, division, print_function, \n unicode_literals, generators, nested_scopes, with_statement)\nfrom builtins import (bytes, dict, int, list, object, range, str, ascii,\n chr, hex, input, next, oct, open, pow, round, super, filter, map, zip)\n# The above imports should allow this program to run in both Python 2 and\n# Python 3. You might need to update your version of module \"future\".\nimport os\nimport sys\nimport ProgramName\nfrom Rex import Rex\nrex=Rex()\n\n#=========================================================================\n# main()\n#=========================================================================\nif(len(sys.argv)!=3):\n exit(ProgramName.get()+\" \\n\")\n(stem,outfile)=sys.argv[1:]\n#numJobs=int(numJobs)\n\nfiles=os.listdir(\"runs/\"+stem)\nnumJobs=0\nfor file in files:\n if(rex.find(\"predictions.*\\.txt\",file)): numJobs+=1\ncmd=\"cat \"\nfor i in range(numJobs):\n cmd+=\"runs/\"+stem+\"/predictions\"+str(i+1)+\".txt \"\ncmd+=\"> \"+outfile\n#print(cmd)\nos.system(cmd)\n\n\n","sub_path":"collect-Z.py","file_name":"collect-Z.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"390114536","text":"import nox\n\nnox.options.sessions = ['lint', 'typing']\nlocations = ['pdcheck.py', 'PagerDutyChecker/']\n\nlint_common_args = ['--max-line-length', '120']\nmypy_args = ['--ignore-missing-imports']\n\n\n@nox.session()\ndef lint(session):\n args = session.posargs or locations\n\n session.install('pycodestyle', 'flake8', 'flake8-import-order')\n session.run('pycodestyle', *(lint_common_args + args))\n session.run('flake8', *(lint_common_args + args))\n\n\n@nox.session()\ndef typing(session):\n args = session.posargs or locations\n session.install('mypy')\n session.run('mypy', *(mypy_args + args))\n","sub_path":"noxfile.py","file_name":"noxfile.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"278219687","text":"\n\n\n'''\nIntersect a vector with a raster\nand add up resistance\n'''\n\n# add to our path\nfrom sys import path\n#path.append('/home/shancoc2/src/OOSA-code-public/week5/rasters')\npath.append('/Users/dill/teaching/oosa/2019-20/OOSA-code-public/week5/rasters')\nfrom resistSquirrel import rasterMaths\nimport argparse\nimport pandas as pd\nimport numpy as np\nfrom pyproj import Proj, transform\nfrom math import sqrt\n\n\n#######################\n\nclass vectorSquirrel():\n\n def __init__(self,filename,epsg=27700):\n '''Initialiser'''\n # read the file in to pandas dataframe\n df=pd.read_csv(filename)\n\n # sort by the time column\n self.sortedData=df.sort_values('time').reset_index(drop=True)\n\n # reproject x and y\n inProj=Proj(init=\"epsg:4326\")\n outProj=Proj(init=\"epsg:\"+str(epsg))\n # reproject data\n self.x,self.y=transform(inProj, outProj, np.array(df.x), np.array(df.y))\n self.time=np.array(df.time)\n\n\n def interpolateLine(self,xRes,yRes):\n '''Function to interpolate lines through a raster'''\n\n # create empty lists\n x=[]\n y=[]\n t=[]\n\n # loop over recorded points\n for i in range(0,self.x.shape[0]-1):\n # determine distance between two points\n dx=self.x[i+1]-self.x[i]\n dy=self.y[i+1]-self.y[i]\n dt=self.time[i+1]-self.time[i]\n dist=sqrt(dx**2+dy**2)\n\n # determine direction, to help us increment\n if(dx<0):\n dirX=-1\n else:\n dirX=1\n if(dy<0):\n dirY=-1\n else:\n dirY=1\n\n # line equation parameters\n m=dy/dx\n c=self.y[i]-m*self.x[i]\n\n # how many pixels does this cross in x and y?\n nXint=int(abs(dx)/xRes)\n nYint=int(abs(dy)/yRes)\n if(nXint<0): # do at least one point per segment\n nXint=1\n if(nYint<0):\n nYint=1\n\n # x pixel crossings\n thisX=np.arange(0,dx,dirX*xRes)+self.x[i]\n thisY=m*thisX+c\n thisT=self.time[i]+dt*(thisX-self.x[i])/dx\n x.extend(thisX)\n y.extend(thisY)\n t.extend(thisT)\n\n # y pixel crossings\n thisY=np.arange(0,dy,dirY*yRes)+self.y[i]\n thisX=(thisY-c)/m\n thisT=self.time[i]+dt*(thisY-self.y[i])/dy\n x.extend(thisX)\n y.extend(thisY)\n t.extend(thisT)\n\n # copy over interpolated arrays\n self.x=np.array(x)\n self.y=np.array(y)\n self.time=np.array(t)\n\n\n def pathResist(self,tiff):\n '''Calculate the resistance for a squirrel's path'''\n\n # interpolate the line to get all pixels between nodes\n self.interpolateLine(tiff.pixelWidth,abs(tiff.pixelHeight))\n\n # determine indices\n xInd=np.array((self.x-tiff.xOrigin)//tiff.pixelWidth,dtype=int)\n yInd=np.array((self.y-tiff.yOrigin)//tiff.pixelHeight,dtype=int)\n useInd=np.where((xInd>=0)&(xInd=0)&(yInd0):\n useInd=useInd[0]\n #tempArr=np.ndarray.flatten(tiff.resist[xInd[useInd]][yInd[useInd]])\n #tempArr[tempArr<0]=0.0\n self.trackResist=np.sum(tiff.resist[xInd[useInd]][yInd[useInd]])/len(xInd)\n\n\n\n#######################\n\nif __name__==\"__main__\":\n '''Main block'''\n\n # set input names\n #tiffName='/geos/netdata/avtrain/data/3d/oosa/week5/raster/roughClass.LT.tif'\n #trackName='/home/shancoc2/src/OOSA-code-public/week5/data/squirrel.csv'\n tiffName='/Users/dill/data/bess/maps/roughClass.LT.tif'\n trackName='/Users/dill/teaching/oosa/2019-20/OOSA-code-public/week5/data/squirrel.csv'\n\n # read the tiff\n tiff=rasterMaths(tiffName)\n\n # calculate resistance raster layer\n tiff.convertResist()\n\n # read the track and reproject\n track=vectorSquirrel(trackName,epsg=tiff.epsg)\n\n # calculate resistance of the path\n track.pathResist(tiff)\n\n # write out\n print(\"Resistance is\",track.trackResist)\n\n","sub_path":"week5/vector_raster/intersectVector.py","file_name":"intersectVector.py","file_ext":"py","file_size_in_byte":3747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"264615592","text":"import os\nimport pandas as pd\nimport sys\n\n\nseparator = ','\n\nif __name__ == \"__main__\":\n input_dir = sys.argv[1]\n output_dir = sys.argv[2]\n \n for input_dir_path, _, files in os.walk(input_dir):\n output_dir_path = os.path.join(output_dir, *(input_dir_path.split(\"/\")[1:]))\n if not os.path.isdir(output_dir_path):\n os.mkdir(output_dir_path)\n for file in files:\n if '.csv' in file:\n df = pd.read_csv(os.path.join(input_dir_path, file), sep=separator)\n df.head(100).to_csv(os.path.join(output_dir_path, file), index=False, sep=separator)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"436637595","text":"import ROOT\n\nclass dataCardMaker:\n\n def __init__(self, path, signal, background, histos, lumi, outpath, othersys):\n self.path = path\n self.signal = signal\n self.background = background\n self.histos = histos\n self.outpath = outpath\n self.othersys = othersys\n self.lumi = lumi\n self.fillBinValues()\n self.writeCards()\n\n def calcBinValues(self, tfile):\n binValues = []\n for hist in self.histos.keys():\n h = tfile.Get(self.histos[hist][\"name\"])\n if isinstance(h, ROOT.TH2D) or isinstance(h, ROOT.TH2F):\n if self.histos[hist][\"end\"][0] == \"last\":\n lastbinx = h.GetNbinsX()\n else:\n lastbinx = self.histos[hist][\"end\"][0]\n \n histbinsx = lastbinx - self.histos[hist][\"start\"][0]\n skipx = histbinsx/self.histos[hist][\"nbins\"][0]\n if self.histos[hist][\"end\"][1] == \"last\":\n lastbiny = h.GetNbinsY()\n else:\n lastbiny = self.histos[hist][\"end\"][1]\n \n histbinsy = lastbiny - self.histos[hist][\"start\"][1]\n skipy = histbinsy/self.histos[hist][\"nbins\"][1]\n for binx in range(self.histos[hist][\"start\"][0], lastbinx, skipx):\n for biny in range(self.histos[hist][\"start\"][1], lastbiny, skipy):\n val = round(h.Integral(binx, binx + skipx, biny, biny + skipy), 1)\n if val < 0.1: val = 0.1\n binValues.append(val)\n else:\n if self.histos[hist][\"end\"] == \"last\":\n lastbin = h.GetNbinsX()\n else:\n lastbin = self.histos[hist][\"end\"]\n \n histbins = lastbin - self.histos[hist][\"start\"]\n skip = histbins/self.histos[hist][\"nbins\"]\n for bin in range(self.histos[hist][\"start\"], lastbin, skip):\n val = round(h.Integral(bin, bin + skip), 1)\n if val < 0.1: val = 0.1\n binValues.append(val)\n return binValues\n \n\n def fillBinValues(self):\n self.nbins = 0\n for sg in self.signal.keys():\n tfile = ROOT.TFile.Open(self.path+self.signal[sg][\"path\"]) \n self.signal[sg][\"binValues\"] = self.calcBinValues(tfile)\n for bg in self.background.keys():\n tfile = ROOT.TFile.Open(self.path+self.background[bg][\"path\"])\n self.background[bg][\"binValues\"] = self.calcBinValues(tfile)\n self.nbins = 0\n for h in self.histos.keys():\n if isinstance(self.histos[h][\"nbins\"], list) and len(self.histos[h][\"nbins\"]) == 2:\n self.nbins += self.histos[h][\"nbins\"][0]*self.histos[h][\"nbins\"][1]\n else:\n self.nbins += self.histos[h][\"nbins\"]\n self.observedPerBin = [] \n for n in range(self.nbins):\n obs = 0\n for sg in self.signal.keys():\n obs += self.signal[sg][\"binValues\"][n]\n for bg in self.background.keys():\n obs += self.background[bg][\"binValues\"][n]\n self.observedPerBin.append(obs)\n \n\n def writeCards(self):\n\n with open(self.outpath, \"w\") as file:\n file.write(\"imax {} \\n\".format(self.nbins))\n file.write(\"jmax {} \\n\".format(len(self.background.keys())))\n file.write(\"kmax * \\n\")\n file.write(\"\\n------------------------\")\n bin_str = \"{0:<15}\".format(\"\\nbin\")\n for bin in range(self.nbins):\n temp_str = \"D{}\".format(bin)\n bin_str += \"{0:<12}\".format(temp_str)\n file.write(bin_str)\n obs_str = \"{0:<15}\".format(\"\\nobservation\")\n for obs in range(self.nbins):\n obs_str += \"{0:<12}\".format(self.observedPerBin[obs])\n file.write(obs_str)\n file.write(\"\\n--------------------------\")\n pbin_str = \"{0:<15}\".format(\"\\nbin\")\n process1_str = \"{0:<15}\".format(\"\\nprocess\")\n process2_str = \"{0:<15}\".format(\"\\nprocess\")\n rate_str = \"{0:<15}\".format(\"\\nrate\")\n for bin in range(self.nbins):\n for proc in self.signal.keys():\n temp_str = \"D{}\".format(bin)\n pbin_str += \"{0:<12}\".format(temp_str)\n process1_str += \"{0:<12}\".format(proc)\n process2_str += \"{0:<12}\".format(0)\n rate_str += \"{0:<12}\".format(self.signal[proc][\"binValues\"][bin])\n cnt = 0\n for proc in self.background.keys():\n cnt += 1\n temp_str = \"D{}\".format(bin)\n pbin_str += \"{0:<12}\".format(temp_str)\n process1_str += \"{0:<12}\".format(proc)\n process2_str += \"{0:<12}\".format(cnt)\n rate_str += \"{0:<12}\".format(self.background[proc][\"binValues\"][bin])\n file.write(pbin_str)\n file.write(process1_str)\n file.write(process2_str)\n file.write(rate_str)\n process2_str = \"{0:<15}\".format(\"\\nprocess\")\n file.write(\"\\n--------------------------\")\n lumi_str = \"{0:<8}\".format(\"\\nlumi\")\n lumi_str += \"{0:<7}\".format(\"lnN\")\n for bin in range(self.nbins):\n for proc in range(len(self.signal.keys()) + len(self.background.keys())):\n lumi_str += \"{0:<12}\".format(1.02)\n file.write(lumi_str)\n for signal1 in self.signal.keys():\n sig_str = \"{0:<8}\".format(\"\\n\"+signal1)\n sig_str += \"{0:<7}\".format(\"lnN\")\n for bin in range(self.nbins):\n for signal2 in self.signal.keys():\n if signal1 == signal2:\n sig_str += \"{0:<12}\".format(self.signal[signal1][\"sys\"])\n else:\n sig_str += \"{0:<12}\".format(\"--\")\n sig_str += \"{0:<12}\".format(\"--\")*len(self.background.keys())\n file.write(sig_str)\n for background1 in self.background.keys():\n bg_str = \"{0:<8}\".format(\"\\n\"+background1)\n bg_str += \"{0:<7}\".format(\"lnN\")\n for bin in range(self.nbins):\n bg_str += \"{0:<12}\".format(\"--\")*len(self.signal.keys()) \n for background2 in self.background.keys():\n if background1 == background2:\n bg_str += \"{0:<12}\".format(self.background[background1][\"sys\"])\n else:\n bg_str += \"{0:<12}\".format(\"--\")\n file.write(bg_str)\n if self.othersys:\n for sys in self.othersys.keys():\n sys_str = \"{0:<8}\".format(\"\\n\"+sys)\n sys_str += \"{0:<7}\".format(self.othersys[sys][\"distr\"])\n for bin in range(self.nbins):\n for sg in self.signal.keys():\n if sg in self.othersys[sys][\"apply\"]:\n sys_str += \"{0:<12}\".format(self.othersys[sys][\"sys\"])\n else:\n sys_str += \"{0:<12}\".format(\"--\")\n for bg in self.background.keys():\n if bg in self.othersys[sys][\"apply\"]:\n sys_str += \"{0:<12}\".format(self.othersys[sys][\"sys\"])\n else:\n sys_str += \"{0:<12}\".format(\"--\")\n file.write(sys_str)\n \n\n","sub_path":"DataCardProducer/dataCardProducer.py","file_name":"dataCardProducer.py","file_ext":"py","file_size_in_byte":7894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"76190831","text":"from flask import Flask, jsonify, request, render_template, session\n\napp = Flask(__name__)\napp.secret_key = \"anks\"\ncontacts_list = [\n {\n \"id\": 1,\n \"name\": \"Aayisha\",\n \"city\": \"Kollam\",\n \"contact\": \"9123456789\"\n },\n {\n \"id\": 2,\n \"name\": \"Athira\",\n \"city\": \"Calicut\",\n \"contact\": \"9123459789\"\n },\n {\n \"id\": 3,\n \"name\": \"Jiss Theresa\",\n \"city\": \"Alapuzha\",\n \"contact\": \"9123457789\"\n }\n]\n\n\n@app.route('/')\ndef home():\n return render_template('home.html')\n\n\n@app.route('/login')\ndef login():\n return render_template(\"login.html\")\n\n\n@app.route(\"/enter\", methods=[\"POST\"])\ndef enter():\n if request.method == \"POST\":\n session[\"email\"] = request.form.get(\"email\")\n return render_template(\"enter.html\")\n\n\n'''\n@app.route(\"/contacts_list\")\ndef contacts_list():\n if \"email\" in session:\n return render_template(\"contacts_list.html\", contacts=enumerate(get_contacts()))\n else:\n return \"

Please Login first

\"\n'''\n\n\n@app.route('/logout')\ndef logout():\n if 'email' in session:\n session.pop('email', None)\n return render_template('logout.html')\n else:\n return '

User already logged out

'\n\n\n'''\ndef get_contacts():\n return contacts_list\n\n\n@app.route(\"/new\")\ndef new():\n return render_template(\"new.html\")\n\n\n@app.route(\"/add\", methods=[\"POST\"])\ndef add_contact():\n id = request.form.get(\"id\")\n name = request.form.get(\"name\")\n city = request.form.get(\"city\")\n contact = request.form.get(\"contact\")\n details = {\n \"id\": id,\n \"name\": name,\n \"city\": city,\n \"contact\": contact\n }\n contacts_list.append(details)\n return render_template(\"contacts_list.html\", contacts=enumerate(get_contacts()))\n\n'''\n\n\n@app.route('/profile')\ndef profile():\n if 'email' in session:\n email = session['email']\n return render_template('profile.html', name=email)\n else:\n return '

Please login first

'\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"37778776","text":"'''\nCreated on 2 oct. 2017\n\n@author: juamar\n'''\ndef item_order (orden):\n ensaladas = orden.count(\"ensalada\") \n hamburguesas = orden.count(\"hamburguesa\") \n agua = orden.count(\"agua\") \n\n print(\"ensaladas:\", ensaladas, \"hamburguesas:\", hamburguesas, \"agua:\", agua)\n \nitem_order(\"ensalada hamburguesa agua ensalada hamburguesa\")\nitem_order(\"hamburguesa hamburguesa agua\") ","sub_path":"PrimerosProgramas2/Programa4.py","file_name":"Programa4.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"406745749","text":"from django.urls import path\r\nfrom . import views\r\nfrom django.contrib.auth import views as auth_views\r\n\r\napp_name = 'app'\r\nurlpatterns = [\r\n # トップ\r\n path('', views.index, name='index'),\r\n # マイページ\r\n path('users/', views.users_detail, name='users_detail'),\r\n # タイムライン\r\n path('users/timeline', views.users_timeline, name='users_timeline'),\r\n # プロジェクト作成\r\n path('projects/create/', views.projects_create, name='projects_create'),\r\n # プロジェクト詳細\r\n path('projects//', views.projects_detail, name='projects_detail'),\r\n # プロジェクト更新\r\n path('projects//update', views.projects_update, name='projects_update'),\r\n # プロジェクト削除\r\n path('projects//delete', views.projects_delete, name='projects_delete'),\r\n # スタイル変換\r\n path('test_ajax', views.test_ajax, name='test_ajax'),\r\n # コメント\r\n path('comment_ajax/', views.comment_ajax, name='comment_ajax'),\r\n # いいね\r\n path('like/', views.like, name='like'),\r\n \r\n]\r\n","sub_path":"app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"305803744","text":"\"\"\"\n В прокате коньков есть разные размеры. Известно, что желающий покататься\n может надеть коньки любого размера, который не меньше размеров его ноги.\n\n Напишите программу, которая принимает список доступных размеров коньков и\n список размеров ног желающих.\n\n И выводит наибольшее количество людей,\n которые смогут покататься одновременно.\n\n\n Например:\n [in]\n [39, 38, 41, 37]\n [40, 39, 41]\n\n [out]\n 2\n\"\"\"\n\nsizes = [int(s) for s in input('Available sizes: ').split()]\nlegs = [int(s) for s in input('Legs sizes: ').split()]\nresult = list(set(sizes) & set(legs))\nprint(len(result))\n","sub_path":"hw9/skates.py","file_name":"skates.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"486642173","text":"import webapp2\nimport cgi\n\ntextarea = '''\n
\nwhat word would you like to encrypt?\n
\n\n
\n
\n'''\n\nalphabet = {\"a\":'n',\"b\":'o',\"c\":'p','d':'q','e':'r','f':'s','g':'t','h':'u','i':'v','j':'w','k':'x','l':'y','m':'z','n':'a','o':'b','p':'c','q':'d','r':'e','s':'f','t':'g','u':'h','v':'i','w':'j','x':'k','y':'l','z':'m'}\n#word = raw_input('what word?\\n').lower()\nrot = []\n\ndef escape_html(s):\n return cgi.escape(s, quote = True)\n return cgi.escape(s, greater = True)\n return cgi.escape(s, less = True)\n return cgi.escape(s, ampersand = True)\n\nclass rotten(webapp2.RequestHandler):\n\n\tdef get(self):\n\t\tself.word = self.request.get(\"word\")\n\t\t#self.write_form()\n\t\t#self.response.out.write(textarea)\n\n\tdef rotted(self,word):\n\t\tword = self.word\n\t#for key in alphabet:\n\t\tfor x in (word):\n\t\t\tif x in alphabet:\n\t\t\t\t#print value\n\t\t\t\tvalue = alphabet[x]\n\t\t\t\trot.append(value)\n\n\t\t#if len(rot) > 1:\n\t\t#\trot[0],rot[1] = rot[1],rot[0]\n\t\thashed = ''.join(rot)\n\t\treturn str(hashed)\n\n\tdef write_form(self):\n\t\t#self.response.out.write(self.rotted(self.word))\n\t\tself.response.out.write(form % {\"hashed\": hashed})\n\n\tdef post(self):\n\t\tself.response.out.write(form)\n\nclass MainPage(webapp2.RequestHandler):\n\tdef get(self):\n\t\t#self.response.headers['Content-Type'] = 'text/plain'\n\t\tself.response.out.write(textarea)\n\napp = webapp2.WSGIApplication([\n\t('/', MainPage),('/rot13', rotten)\n], debug=True)","sub_path":"rot13/rot13.py","file_name":"rot13.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"64529235","text":"#! python3\n# multidownloadXkcd.py - Downloads XKCD comics using multiple threads\n\nimport requests, os, bs4, threading\n\nsaveFolder = 'C:\\\\Users\\\\jonat\\\\Documents\\\\Python\\\\Sandbox\\\\xkcd'\nos.chdir(saveFolder)\n\ndef downloadXKCD(startComic, endComic):\n for urlNumber in range(startComic, endComic):\n if urlNumber == 0: continue\n # Download the page.\n statusMessage = f'Downloading page http://xkcd.com/{urlNumber} --> '\n try:\n thePage = requests.get(f'http://xkcd.com/{urlNumber}')\n thePage.raise_for_status() \n except requests.exceptions.HTTPError:\n statusMessage += 'HTTPError'\n continue\n except requests.exceptions.InvalidURL:\n statusMessage += 'Invalid URL'\n continue\n\n soup = bs4.BeautifulSoup(thePage.text, 'html.parser')\n\n # Find the URL of the comic image.\n comicElem = soup.select('#comic')\n if comicElem == []:\n statusMessage += 'No image found'\n else:\n try:\n imageURL = 'http:' + comicElem[0].contents[1].attrs['src']\n theImage = requests.get(imageURL)\n fileNameSplit = imageURL.split('/')\n fileName = fileNameSplit[-1]\n \n file = open(fileName, 'wb')\n file.write(theImage.content)\n file.close()\n statusMessage = ''\n except KeyError:\n statusMessage += 'KeyError'\n\n if len(statusMessage) > 0:\n print(statusMessage)\n\ndownloadThreads = []\nfor i in range(0, 1400, 100):\n downloadThread = threading.Thread(target=downloadXKCD, args=(i, i + 99))\n downloadThreads.append(downloadThread)\n downloadThread.start()\n\n# Wait for all threads to end.\nfor downloadThread in downloadThreads:\n downloadThread.join()\nprint('Done')","sub_path":"v1/multidownloadXkcd.py","file_name":"multidownloadXkcd.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"294721215","text":"import os\nimport argparse\nfrom nltk.stem import WordNetLemmatizer as WNL\nwnl = WNL()\nwith open(\"nouncount.raw\") as f:\n nouns = [x.split(\"\\t\")[0] for x in f.readlines()]\nprint(len(nouns))\nnouns = [wnl.lemmatize(n,pos='n') for n in nouns]\nprint(len(set(nouns)))\nexit()\n\ndef parseParams():\n parser = argparse.ArgumentParser(description='none')\n parser.add_argument('-hn', type=int, default=5000)\n parser.add_argument('-taken', type=int, default=500)\n parser.add_argument('-hv', type=int, default=10000)\n parser.add_argument('-takev', type=int, default=250)\n parser.add_argument('-nouns', type=str, default=\"nouncount.raw\")\n parser.add_argument('-verbs', type=str, default=\"verbcount.raw\")\n parser.add_argument('-out', type=str, default=\"train.raw\")\n args= parser.parse_args()\n return args\n\ndef getvocab(args):\n nouns = []\n with open(args.nouns) as f:\n i = 0\n while i\"]\n data.append(\" \".join(tmp))\n with open(args.out,'w') as f:\n f.write(\"\\n\".join(data))\n\nif __name__==\"__main__\":\n main()\n","sub_path":"12.5.redux/data/mkInterRaw.py","file_name":"mkInterRaw.py","file_ext":"py","file_size_in_byte":1943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"81294389","text":"import boto3\n\n\ndef save_account_db(name_account,email_account):\n '''\n Insert the name account create to make the index\n '''\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table(\"octopus_account\")\n\n item = {\"NameAccount\": name_account,\n \"EmailAccount\": email_account}\n\n table.put_item( Item=item )\n\n\ndef insert_account_id_db(name_account, account_id_generated):\n '''\n After create the index of the name account, update to insert the aws_account_id generated\n '''\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table(\"octopus_account\")\n table.update_item(\n Key={\"NameAccount\":name_account},\n UpdateExpression=\"set AccountId=:ai\",\n ExpressionAttributeValues={\":ai\":account_id_generated}\n )","sub_path":"src/model/dynamodb.py","file_name":"dynamodb.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"169933874","text":"import os\nimport cv2\nfrom PIL import Image\n\nclass TestEqual:\n # def _init_(self):\n\n def getEqual(self):\n dataroot = os.getcwd() + \"/data/annotated/\"\n saveroot = os.getcwd() + \"/data/TestEqual/\"\n for f in os.listdir(dataroot):\n if f.endswith(\".png\"):\n ins = f.split('.')[0].split('_')\n if len(ins) <= 3: # exclude the equation png only individual symbol\n im = Image.open(dataroot + f)\n im.save(saveroot + f)\n\ndef main():\n x = TestEqual()\n x.getEqual()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"testEqual.py","file_name":"testEqual.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"103973386","text":"import json\r\nfrom pyquery import PyQuery as _\r\n\r\n\r\ndef pq_iter(pq):\r\n for e in pq:\r\n yield _(e)\r\n\r\n\r\ndef float_or(val, default=None):\r\n try:\r\n return float(val)\r\n except (ValueError, TypeError):\r\n return default\r\n\r\n\r\ndef parse_album_similar_albums(pq):\r\n albums = list()\r\n for album_item in pq_iter(pq('#similar-albums .album a')):\r\n album = {\r\n 'url': album_item.attr('href'),\r\n 'title': album_item('img').attr('alt')\r\n }\r\n title = album_item.attr('title')\r\n title_suffix = u' - ' + album['title']\r\n assert title.endswith(title_suffix)\r\n album['artists'] = title[0:-len(title_suffix)]\r\n\r\n albums.append(album)\r\n return albums\r\n\r\n\r\ndef parse_album(d):\r\n data = dict()\r\n pq = d.pq\r\n\r\n data['artist'] = {\r\n 'name': pq('.album-artist a').text(),\r\n 'url': pq('.album-artist a').attr('href')\r\n }\r\n data['title'] = pq('.album-title').text()\r\n data['review'] = pq('.review-body .editorial-text').html()\r\n data['rating'] = float_or(pq('.allmusic.rating').attr('data-stars'))\r\n data['release_date'] = pq('.details .release-date').text()\r\n data['duration'] = pq('.details .duration').text()\r\n data['album_art'] = json.loads(pq('div.album-art .image-container').attr('data-large'))\r\n data['similar_albums'] = parse_album_similar_albums(pq)\r\n data['genres'] = [\r\n {\r\n 'name': _(e).text(),\r\n 'url': _(e).attr('href')\r\n } for e in pq('.details .genres a')\r\n ]\r\n data['styles'] = [\r\n {\r\n 'name': _(e).text(),\r\n 'url': _(e).attr('href')\r\n } for e in pq('.details .styles a')\r\n ]\r\n data['moods'] = [\r\n {\r\n 'name': _(e).text(),\r\n 'url': _(e).attr('href')\r\n } for e in pq('.sidebar-module.moods a')\r\n ]\r\n data['themes'] = [\r\n {\r\n 'name': _(e).text(),\r\n 'url': _(e).attr('href')\r\n } for e in pq('.sidebar-module.themes a')\r\n ]\r\n\r\n data['medias'] = list()\r\n for media_title in pq_iter(pq('#tracks h2')):\r\n media = dict()\r\n media['name'] = media_title('.disc-num').text()\r\n data['medias'].append(media)\r\n media['tracks'] = list()\r\n for track_row in pq_iter(media_title.next()('tbody tr')):\r\n track = dict()\r\n track['position'] = track_row('td.tracknum').text()\r\n track['title'] = track_row('td.title div.title a').text()\r\n track['url'] = track_row('td.title div.title a').attr('href')\r\n media['tracks'].append(track)\r\n track['composers'] = list()\r\n track['duration'] = track_row('td.time').text()\r\n for composer in pq_iter(track_row('td.title div.artist a')):\r\n track['composers'].append({\r\n 'name': composer.text(),\r\n 'url': composer.attr('href'),\r\n })\r\n track['performers'] = list()\r\n for performer in pq_iter(track_row('td.performer div.primary a')):\r\n track['performers'].append({\r\n 'name': performer.text(),\r\n 'url': performer.attr('href')\r\n })\r\n return data\r\n\r\n\r\ndef parse_album_releases(d):\r\n data = dict()\r\n pq = d.pq\r\n data['releases'] = list()\r\n for e in pq_iter(pq('#album-releases tbody tr')):\r\n data['releases'].append({\r\n 'format': e('.format').text(),\r\n 'year': e('.year').text(),\r\n 'title': e('.label strong').text(),\r\n 'label': e('.label').text().replace(e('.label strong').text(), '').strip(),\r\n })\r\n return data\r\n\r\n\r\ndef parse_album_release(d):\r\n data = dict()\r\n pq = d.pq\r\n data['release_date'] = pq('#sidebar .details .release-date').text()\r\n data['label'] = pq('#sidebar .details .label').text()\r\n data['format'] = pq('#sidebar .details .format').text()\r\n data['flags'] = [f.text() for f in pq_iter(pq('#sidebar .details .flags li'))]\r\n data['catalog_number'] = pq('#sidebar .details .catalog').text()\r\n return data\r\n\r\n\r\ndef parse_artist(d):\r\n data = dict()\r\n pq = d.pq\r\n data['picture'] = pq('#sidebar .artist-image img').attr('src')\r\n data['review'] = pq('.review-body .editorial-text').html()\r\n data['genres'] = [\r\n {\r\n 'name': _(e).text(),\r\n 'url': _(e).attr('href')\r\n } for e in pq('.details .genres a')\r\n ]\r\n data['styles'] = [\r\n {\r\n 'name': _(e).text(),\r\n 'url': _(e).attr('href')\r\n } for e in pq('.details .styles a')\r\n ]\r\n data['active'] = pq('#sidebar dd.active').text()\r\n data['formed'] = pq('#sidebar dd.birth').text()\r\n # data['members'] = [\r\n # {\r\n # 'name': m.text(),\r\n # 'url': m.attr('href')\r\n # } for m in pq_iter(pq('#sidebar .group-members li a'))\r\n # ]\r\n data['moods'] = [\r\n {\r\n 'name': _(e).text(),\r\n 'url': _(e).attr('href')\r\n } for e in pq('.sidebar-module.moods a')\r\n ]\r\n data['themes'] = [\r\n {\r\n 'name': _(e).text(),\r\n 'url': _(e).attr('href')\r\n } for e in pq('.sidebar-module.themes a')\r\n ]\r\n data['discography'] = [\r\n {\r\n 'year': e('.year').text(),\r\n 'thumbnail': e('.thumbnail-img img').attr('src'),\r\n 'title': e('.title a:first-child').text(),\r\n 'url': e('.title a:first-child').attr('href'),\r\n 'label': e('td.label .full-title').text(),\r\n 'rating': float_or(e('td.ed-rating .allmusic.rating').attr('data-stars')),\r\n } for e in pq_iter(pq('#discography .album-table tbody tr'))\r\n ]\r\n data['photo_gallery'] = [\r\n json.loads(\r\n e.attr('data-large')\r\n ) for e in pq_iter(pq('#sidebar .media-gallery div.media-gallery-image.thumbnail'))\r\n ]\r\n return data\r\n\r\n\r\ndef parse_artist_related(d):\r\n pq = d.pq\r\n return {\r\n e('ul > h2').text(): [\r\n {\r\n 'name': e2('li.secondary_link a').text(),\r\n 'url': e2('li.secondary_link a').attr('href'),\r\n } for e2 in pq_iter(e('li.secondary_link'))\r\n ] for e in pq_iter(pq('#content .related-list li.related-item'))\r\n }\r\n\r\n\r\ndef parse_search(text):\r\n pq = _(text)\r\n return {\r\n 'results': [\r\n {\r\n 'thumbnail': r('div.image .thumbnail img').attr('src'),\r\n 'title': r('div.title a').text(),\r\n 'artist': {\r\n 'name': r('div.artist').text(),\r\n 'url': r('div.artist a').attr('href'),\r\n },\r\n 'url': r('div.title a').attr('href'),\r\n } for r in pq_iter(pq('table.search-results tr'))\r\n ]\r\n }","sub_path":"allmusic/am_parser.py","file_name":"am_parser.py","file_ext":"py","file_size_in_byte":6831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"619351518","text":"\r\nimport numpy as np\r\n# import curses\r\n\r\nstudent = []\r\ncourse = []\r\nmark = []\r\n# gpa = []\r\n\r\nprint(\"--------------------------------\")\r\nprint()\r\n\r\n# stdscr = curses.initscr()\r\n# stdscr.addstr(0, 0, \"Current mode: Typing mode\",\r\n# curses.A_REVERSE)\r\n# stdscr.refresh()\r\n\r\n# --------------------------------------------------STUDENT INFORMATION\r\n# STUDENT INFORMATION -------------------------------------------------\r\n# ---------------------------------------------------------------------\r\nclass Std_Info(): \r\n def __init__(self, s_ID, s_Name, s_DoB):\r\n self.s_ID = s_ID\r\n self.s_Name = s_Name\r\n self.s_DoB = s_DoB\r\n \r\n # ID\r\n def get_ID(self):\r\n return self.s_ID\r\n def set_s_ID(self, s_ID):\r\n self.s_ID = s_ID\r\n \r\n # Name\r\n def get_Name(self):\r\n return self.s_Name\r\n def set_s_Name(self, s_Name):\r\n self.s_Name = s_Name\r\n\r\n # DoB\r\n def get_DoB(self):\r\n return self.s_DoB\r\n def set_s_DoB(self, s_DoB):\r\n self.s_DoB = s_DoB\r\n\r\n\r\n\r\n\r\n# ---------------------------------------------------COURSE INFORMATION\r\n# COURSE INFORMATION --------------------------------------------------\r\n# ---------------------------------------------------------------------\r\nclass Course_Info():\r\n def __init__(self, c_ID, c_Name, c_Credits):\r\n self.c_ID = c_ID\r\n self.c_Name = c_Name\r\n self.c_Credits = c_Credits\r\n\r\n # Course ID\r\n def get_c_ID(self):\r\n return self.c_ID\r\n def set_c_ID(self, c_ID):\r\n self.c_ID = c_ID\r\n\r\n # Course Name\r\n def get_c_Name(self):\r\n return self.c_Name\r\n def set_c_Name(self, c_Name):\r\n self.c_Name = c_Name\r\n\r\n # Course Credits\r\n def get_c_Credits(self):\r\n return self.c_Credits\r\n def set_c_Credits(self, c_Credits):\r\n self.c_Credits = c_Credits\r\n\r\n\r\n\r\n\r\n# -------------------------------------------------------------ADD MARK\r\n# ADD MARK ------------------------------------------------------------\r\n# ---------------------------------------------------------------------\r\nclass mark_Course():\r\n def __init__(self, s_ID, c_ID, markk):\r\n self.s_ID = s_ID\r\n self.c_ID = c_ID\r\n self.markk = markk\r\n\r\n # s_ID\r\n def set_s_ID(self, s_ID):\r\n self.s_ID = s_ID\r\n def get_s_ID(self):\r\n return self.s_ID\r\n \r\n # c_ID\r\n def get_c_ID(self):\r\n return self.c_ID\r\n def set_c_ID(self, c_ID):\r\n self.c_ID = c_ID\r\n\r\n # Mark\r\n def get_mark(self):\r\n return self.markk\r\n def set_mark(self, markk):\r\n self.markk = markk\r\n\r\n\r\n\r\n# ------------------------------------------------------------------GPA\r\n# GPA -----------------------------------------------------------------\r\n# ---------------------------------------------------------------------\r\ndef GPA():\r\n m_Gpa = np.array([mark_Course.get_mark])\r\n credits = np.array([Course_Info.get_c_Credits])\r\n x = 0\r\n for s in student:\r\n for c in course:\r\n # # cal_gpa = (m_Gpa*Course_Info.c_Credits)/credits\r\n x = np.dot(m_Gpa, credits)\r\n sum = np.sum(credits)\r\n cal_Gpa = x/sum\r\n print(cal_Gpa)\r\n# (mark*credits)/total credit\r\n\r\ndef sort_By_GPA():\r\n \r\n sort_Student = sorted(student, key = lambda s_Std: s_Std.gpa, reverse = True)\r\n # show_S = Std_Info.show_Std()\r\n for s_Std in sort_Student:\r\n s_Std.show_S\r\n\r\n\r\n\r\n# -------------------------------------------------------INPUT FUNCTION\r\n# INPUT FUNCTION ------------------------------------------------------\r\n# ---------------------------------------------------------------------\r\ndef add_Info():\r\n # Input Student information ##############################\r\n # Contain:\r\n # - ID\r\n # - Name\r\n # - Date of Birth\r\n s_Num = int(input(\"How many students?\\n -> There are: \"))\r\n for i in range(s_Num):\r\n Std_Info.s_ID = input(\" - Student \" + str(i + 1) + \" ID: \")\r\n Std_Info.s_Name = input(\" - Student \" + str(i + 1) + \" Name: \")\r\n Std_Info.s_DoB = input(\" - Student \" + str(i + 1) + \" DoB: \")\r\n student.append({\"Student #\" + str\r\n (i + 1) + \": Id: \" + Std_Info.s_ID + \"; Name: \" + Std_Info.s_Name + \"; DoB: \" + Std_Info.s_DoB})\r\n \r\n\r\n # student.append({\"Student \" + str(i + 1) + \"})\r\n print()\r\n print(\"-----------------------------------------------------------------\")\r\n\r\n # Input Course information ###############################\r\n # Contain:\r\n # - ID\r\n # - Name\r\n c_Num = int(input(\"How many courses?\\n -> There are: \"))\r\n for i in range(c_Num):\r\n print(\" * Enter information about course \" + str(i + 1) + \": \")\r\n Course_Info.c_ID = input(\" - Course \" + str(i + 1) +\" ID: \")\r\n Course_Info.c_Name = input(\" - Course \" + str(i + 1) +\" Name: \")\r\n Course_Info.c_Credits = int(input(\" - Course \" + str(i + 1) + \" Credits: \"))\r\n course.append({\"Course #\" + str(i + 1) + \": Id: \" + Course_Info.c_ID + \"; Name: \" + Course_Info.c_Name + \"; Credits: \" + str(Course_Info.c_Credits)})\r\n # print(\"Course #\" + str(i + 1))\r\n # print(\"ID: \" + Course_Info.c_ID)\r\n # print(\"Name: \" + Course_Info.c_Name)\r\n # print(\"Credits: \" + Course_Info.c_Credits)\r\n print()\r\n print(\"-----------------------------------------------------------------\")\r\n\r\n\r\n\r\n# ------------------------------------------------------------SHOW INFO\r\n# SHOW INFO -----------------------------------------------------------\r\n# ---------------------------------------------------------------------\r\ndef show_Student():\r\n print(\"Student list: \")\r\n print(student)\r\n \r\n\r\ndef show_Course():\r\n print(\"Course list: \")\r\n print(course)\r\n\r\ndef show_Mark():\r\n print(\"Mark list: \")\r\n print(mark)\r\n\r\n\r\n# ----------------------------------------------------------------------------\r\n# ADD MARK -------------------------------------------------------------------\r\n# ----------------------------------------------------------------------------\r\ndef marking():\r\n print(\"-------------------------------\")\r\n print()\r\n\r\n # Input for choosing:\r\n # - Student: ###########################\r\n show_Student()\r\n print(\" => Select student by ID:\")\r\n s_ID = input(\" +> Option: \")\r\n print(\"--------------------------------------------------------\")\r\n\r\n # - Course: ###########################\r\n show_Course()\r\n print(\" => Select course by ID:\")\r\n c_ID = input(\" +> Option: \")\r\n print(\"--------------------------------------------------------\")\r\n\r\n # Mark #######################################\r\n print()\r\n m = float(input(\" => Enter the mark: \"))\r\n mark.append({\"Student ID\": s_ID, \"Course ID\": c_ID, \"Mark\": m})\r\n print()\r\n\r\n\r\n\r\n# MAIN FUNCTION -------------- MAIN FUNCTION -------------- MAIN FUNCTION\r\n# MAIN FUNCTION -------------- MAIN FUNCTION -------------- MAIN FUNCTION\r\n# MAIN FUNCTION -------------- MAIN FUNCTION -------------- MAIN FUNCTION\r\n\r\ndef option():\r\n while (True):\r\n print()\r\n print(\"Type '?' for list of option\")\r\n choose = input(\" => Your option: \")\r\n print()\r\n if (choose == \"?\"):\r\n print(\"Select an option below: \")\r\n print(\" +> 1. Input information about student and course\")\r\n print(\" +> 2. Input mark of student and course\")\r\n print(\" +> 3. Show information about student\")\r\n print(\" +> 4. Show information about course\")\r\n print(\" +> 5. Show mark of students in courses\")\r\n print(\" +> 6. Show GPA of students\")\r\n print(\" +> 7. Show students after being sorted by GPA\")\r\n print(\"\"\"\r\n If you find that this program is to bad, just follow the step below.\r\n Have fun!!\r\n :)\"\"\")\r\n print(\" +> 0. Type '0' ('zero') to quit\")\r\n\r\n if (choose == \"1\"):\r\n add_Info()\r\n if (choose == \"2\"):\r\n marking()\r\n if (choose == \"3\"):\r\n show_Student()\r\n if (choose == \"4\"):\r\n show_Course()\r\n if (choose == \"5\"):\r\n show_Mark()\r\n if (choose == \"6\"):\r\n GPA()\r\n if (choose == \"7\"):\r\n print(\"You\")\r\n if (choose == \"0\"):\r\n break\r\n print(\"\"\" :)\r\n Thanks for using this.\r\n See you later.\r\n Have fun!\r\n :)\"\"\")\r\n\r\n# Information()\r\n# # show_Student()\r\n# # show_Course()\r\n# mark_Course()\r\n# show_Marks()\r\noption()\r\n# Std_Info.show_Std(self)\r\n","sub_path":"3.student.mark.oop.math.py","file_name":"3.student.mark.oop.math.py","file_ext":"py","file_size_in_byte":8651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"363963537","text":"from rest_framework.exceptions import ParseError\nfrom rest_framework.request import Request\nfrom rest_framework.response import Response\n\nfrom sentry import features\nfrom sentry.api.base import region_silo_endpoint\nfrom sentry.api.bases.organization import OrganizationEndpoint\nfrom sentry.api.exceptions import ResourceDoesNotExist\nfrom sentry.api.paginator import GenericOffsetPaginator\nfrom sentry.api.utils import InvalidParams\nfrom sentry.sentry_metrics.configuration import UseCaseKey\nfrom sentry.snuba.metrics import (\n QueryDefinition,\n get_metrics,\n get_series,\n get_single_metric_info,\n get_tag_values,\n get_tags,\n)\nfrom sentry.snuba.metrics.utils import DerivedMetricException, DerivedMetricParseException\nfrom sentry.snuba.sessions_v2 import InvalidField\nfrom sentry.utils.cursors import Cursor, CursorResult\n\n\ndef get_use_case_id(use_case: str) -> UseCaseKey:\n \"\"\"\n Get use_case from str and validate it against UseCaseKey enum type\n if use_case parameter has wrong value just raise an ParseError.\n \"\"\"\n try:\n if use_case == \"releaseHealth\":\n use_case = \"release-health\"\n\n return UseCaseKey(use_case)\n except ValueError:\n raise ParseError(\n detail=f\"Invalid useCase parameter. Please use one of: {', '.join(use_case.value for use_case in UseCaseKey)}\"\n )\n\n\n@region_silo_endpoint\nclass OrganizationMetricsEndpoint(OrganizationEndpoint):\n \"\"\"Get metric name, available operations and the metric unit\"\"\"\n\n def get(self, request: Request, organization) -> Response:\n if not features.has(\"organizations:metrics\", organization, actor=request.user):\n return Response(status=404)\n\n projects = self.get_projects(request, organization)\n metrics = get_metrics(\n projects, use_case_id=get_use_case_id(request.GET.get(\"useCase\", \"release-health\"))\n )\n # TODO: replace this with a serializer so that if the structure of MetricMeta changes the response of this\n # endpoint does not\n for metric in metrics:\n del metric[\"metric_id\"]\n del metric[\"mri_string\"]\n return Response(metrics, status=200)\n\n\n@region_silo_endpoint\nclass OrganizationMetricDetailsEndpoint(OrganizationEndpoint):\n \"\"\"Get metric name, available operations, metric unit and available tags\"\"\"\n\n def get(self, request: Request, organization, metric_name) -> Response:\n if not features.has(\"organizations:metrics\", organization, actor=request.user):\n return Response(status=404)\n\n projects = self.get_projects(request, organization)\n try:\n metric = get_single_metric_info(\n projects,\n metric_name,\n use_case_id=get_use_case_id(request.GET.get(\"useCase\", \"release-health\")),\n )\n except InvalidParams as e:\n raise ResourceDoesNotExist(e)\n except (InvalidField, DerivedMetricParseException) as exc:\n raise ParseError(detail=str(exc))\n\n return Response(metric, status=200)\n\n\n@region_silo_endpoint\nclass OrganizationMetricsTagsEndpoint(OrganizationEndpoint):\n \"\"\"Get list of tag names for this project\n\n If the ``metric`` query param is provided, only tags for a certain metric\n are provided.\n\n If the ``metric`` query param is provided more than once, the *intersection*\n of available tags is used.\n\n \"\"\"\n\n def get(self, request: Request, organization) -> Response:\n\n if not features.has(\"organizations:metrics\", organization, actor=request.user):\n return Response(status=404)\n\n metric_names = request.GET.getlist(\"metric\") or None\n projects = self.get_projects(request, organization)\n try:\n tags = get_tags(\n projects,\n metric_names,\n use_case_id=get_use_case_id(request.GET.get(\"useCase\", \"release-health\")),\n )\n except (InvalidParams, DerivedMetricParseException) as exc:\n raise (ParseError(detail=str(exc)))\n\n return Response(tags, status=200)\n\n\n@region_silo_endpoint\nclass OrganizationMetricsTagDetailsEndpoint(OrganizationEndpoint):\n \"\"\"Get all existing tag values for a metric\"\"\"\n\n def get(self, request: Request, organization, tag_name) -> Response:\n\n if not features.has(\"organizations:metrics\", organization, actor=request.user):\n return Response(status=404)\n\n metric_names = request.GET.getlist(\"metric\") or None\n\n projects = self.get_projects(request, organization)\n try:\n tag_values = get_tag_values(\n projects,\n tag_name,\n metric_names,\n use_case_id=get_use_case_id(request.GET.get(\"useCase\", \"release-health\")),\n )\n except (InvalidParams, DerivedMetricParseException) as exc:\n msg = str(exc)\n # TODO: Use separate error type once we have real data\n if \"Unknown tag\" in msg:\n raise ResourceDoesNotExist(f\"tag '{tag_name}'\")\n else:\n raise ParseError(msg)\n\n return Response(tag_values, status=200)\n\n\n@region_silo_endpoint\nclass OrganizationMetricsDataEndpoint(OrganizationEndpoint):\n \"\"\"Get the time series data for one or more metrics.\n\n The data can be filtered and grouped by tags.\n Based on `OrganizationSessionsEndpoint`.\n \"\"\"\n\n default_per_page = 50\n\n def get(self, request: Request, organization) -> Response:\n projects = self.get_projects(request, organization)\n\n def data_fn(offset: int, limit: int):\n try:\n query = QueryDefinition(\n projects, request.GET, paginator_kwargs={\"limit\": limit, \"offset\": offset}\n )\n data = get_series(\n projects,\n query.to_metrics_query(),\n use_case_id=get_use_case_id(request.GET.get(\"useCase\", \"release-health\")),\n )\n data[\"query\"] = query.query\n except (\n InvalidParams,\n DerivedMetricException,\n ) as exc:\n raise (ParseError(detail=str(exc)))\n return data\n\n return self.paginate(\n request,\n paginator=MetricsDataSeriesPaginator(data_fn=data_fn),\n default_per_page=self.default_per_page,\n max_per_page=100,\n )\n\n\nclass MetricsDataSeriesPaginator(GenericOffsetPaginator):\n def get_result(self, limit, cursor=None):\n assert limit > 0\n offset = cursor.offset if cursor is not None else 0\n data = self.data_fn(offset=offset, limit=limit + 1)\n\n if isinstance(data.get(\"groups\"), list):\n has_more = len(data[\"groups\"]) == limit + 1\n if has_more:\n data[\"groups\"].pop()\n else:\n raise NotImplementedError\n\n return CursorResult(\n data,\n prev=Cursor(0, max(0, offset - limit), True, offset > 0),\n next=Cursor(0, max(0, offset + limit), False, has_more),\n )\n","sub_path":"src/sentry/api/endpoints/organization_metrics.py","file_name":"organization_metrics.py","file_ext":"py","file_size_in_byte":7127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"559391532","text":"import unittest\n\n\ndef check_unique_string(my_string):\n \"\"\"\n Is Unique\n @chapter 1\n @exercise 1.1\n @page 90\n Implement an algorithm to determine if a string has all unique characters.\n What if you cannot yse additional data structures?\n \"\"\"\n if len(my_string) > 128:\n return True\n\n char_set = [False] * 128\n for elem in my_string:\n val = ord(elem)\n if char_set[val]:\n return False\n char_set[val] = True\n return True\n\n\nclass TestSolution(unittest.TestCase):\n\n def test_solution(self):\n self.assertFalse(check_unique_string('this is a test'))\n self.assertTrue(check_unique_string('thisagodr'))\n\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"interview/arrays_and_strings/is_unique.py","file_name":"is_unique.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"47727368","text":"'''\nDescription: henggao_learning\nversion: v1.0.0\nAuthor: henggao\nDate: 2020-10-23 21:53:21\nLastEditors: henggao\nLastEditTime: 2020-12-15 11:08:55\n'''\nfrom os import name\nfrom .views import DrillMetaViewSet\nfrom django.urls import include, path\n\nfrom . import views\nfrom .views import *\nfrom rest_framework import routers\nrouter = routers.DefaultRouter()\n# router.register(r'drillmeta', DrillMetaViewSet)\n\nurlpatterns = [\n path('', include(router.urls)),\n path('uploadfile/', views.uploadfile, name=\"uploadefile\"),\n path('fileinfo/', FileInfoView.as_view(), name=\"fileinfo\"),\n path('fileshow/', views.FileShow, name=\"fileshow\"),\n path('filedownload/', views.filedownload, name=\"filedownload\"),\n path('showdata/', views.ShowData, name=\"showdata\"),\n path('uploadcsv/', UploadCSV.as_view(), name=\"uploadcsv\"),\n path('uploadexcel/', UploadExcel.as_view(), name=\"uploadexcel\"),\n path('editdata/', views.EditData, name=\"editdata\"),\n path('deletedata/', views.DeleteData, name=\"deletedata\"),\n path('add_data/', views.AddData, name=\"add_data\"),\n path('querydata/', views.QueryData, name=\"add_data\"),\n path('showdatabase/', views.ShowDataBase, name=\"showdatabase\"),\n path('adddatabase/', views.AddDataBase, name=\"adddatabase\"),\n path('deletedatabase/', views.DeleteDataBase, name=\"deletedatabase\"),\n path('addcollection/', views.AddCollection, name=\"addcollection\"),\n path('deletecollection/', views.DeleteCollection, name=\"deletecollection\"),\n path('editdatabasename/', views.EditDataBase, name=\"editdatabasename\"),\n # mongeostore\n path('showcommondata/', views.ShowCommonData,\n name=\"showcommondata\"), # Collection展示数据\n path('commoneditdata/', views.CommonEditData,\n name=\"commoneditdata\"), # Collection编辑数据\n path('commondeletedata/', views.CommonDeleteData,\n name=\"commondeletedata\"), # Collection删除表格数据\n path('commonadd_data/', views.CommonAddData,\n name=\"commonadd_data\"), # Collection添加数据\n path('commonquerydata/', views.CommonQueryData,\n name=\"commonquerydata\"), # Collection数据搜索\n path('showdrillclination/', DrillInclinationPageView.as_view(),\n name=\"showdrillclination\"), # 钻孔测斜表\n path('commonuploadexcel/', CommonUploadExcel.as_view(),\n name=\"commonuploadexcel\"), # 上传Excel\n path('commonuploadcsv/', CommonUploadCSV.as_view(),\n name=\"commonuploadcsv\"), # 上传CSV\n path('commonuploadmeta/', CommonUploadMeta.as_view(),\n name=\"commonuploadmeta\"), # 上传元数据到GridFS\n path('commonmetashow/', views.CommonMetaShow,\n name=\"commonmetashow\"), # GridFS 源数据表格展示\n path('commonfiledownload/', views.CommonFileDownload,\n name=\"commonfiledownload\"), # 下载元数据\n path('drillinclination/', DrillInclinationPageView.as_view(),\n name='drillinclination'), # 钻孔数据分页\n path('inclinationsearch/', InclinationSearchView.as_view(),\n name='inclinationsearch'), # 钻孔数据搜索\n path('deleteinclination/', views.DeleteInclination,\n name=\"deleteinclination\"), # 钻孔Inclination删除表格数据\n path('editinclination/', views.EditInclination,\n name=\"editinclination\"), # Collection编辑数据\n path('listallimg/', views.listAllImgFromDB,\n name=\"listallimg\"), # 图片数据流读取\n path('drillmeta/', DrillMetaViewSet.as_view(),\n name='drillmeta'), # 钻孔数据分页\n\n path('drillhistogram/', DrillHistogramView.as_view(),\n name='drillhistogram'), # 钻孔元数据展示\n path('drillhistogramsearch/', DrillHistogramSearchView.as_view(),\n name='drillhistogramsearch'), # 钻孔元数据查询\n path('deletedrillhistogram/', views.DeleteDrillHistogram,\n name=\"deletedrillhistogram\"), # 钻孔元数据删除\n path('editdrillhistogram/', views.EditDrillHistogram,\n name=\"editdrillhistogram\"), # 钻孔元数据编辑\n path('drilllocationview/', DrillLocationView.as_view(),\n name=\"drilllocationview\"), # 钻孔定位表,用于天地图数据\n\n path('showdrilllocation/', ShowDrillLocationView.as_view(),\n name=\"showdrilllocation\"), # 钻孔定位表数据展示\n path('drilllocationsearch/', DrillLocationSearchView.as_view(),\n name='drilllocationsearch'), # 钻孔定位表数据查询\n path('deletedrilllocation/', views.DeleteDrillLocation,\n name=\"deletedrilllocation\"), # 钻孔定位数据删除\n path('editdrilllocation/', views.EditDrillLocation,\n name=\"editdrilllocation\"), # 钻孔定位数据删除\n path('adddrilllocation/', views.AddDrillLaction,\n name=\"adddrilllocation\"), # 添加单条定位表数据\n]\n","sub_path":"mongeostore_load/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":4791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"520049494","text":"'''\nCreated on 27 sep 2014\n\n@author: Peter\n'''\n# These sets let the configparser know when to convert a key's value.\nint_values = set([\n \n# controls.ini\n \"north\",\n \"crouch\",\n \"jump\",\n \"use\",\n \n# joystick.ini\n \"up_down\",\n \"left_right\",\n \"forward_backward\",\n \"strafe\",\n\t'look_x_invert',\n\t'look_y_invert',\n\n])\nfloat_values = set([\n \"float\",\n \n# joystick.ini\n \"up_down_scalar\",\n \"left_right_scalar\",\n \"forward_backward_scalar\",\n \"strafe_scalar\",\n \"axis_left\",\n \"axis_right\",\n \"axis_up\",\n \"axis_down\",\n])\nboolean_values = set([\n \"boolean\",\n])","sub_path":"config/config_map.py","file_name":"config_map.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"488472539","text":"#!/usr/bin/env python3\nimport sys\nfrom In_out.network.Client import Client\nfrom In_out.network.messages.get.Get_env_infos import Get_env_infos\n\ndef main():\n if len(sys.argv) < 2:\n print(\"Usage : env_name\")\n return \n name = sys.argv[1]\n client = Client()\n client.start()\n print(client.send(Get_env_infos(name)))\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"get_infos.py","file_name":"get_infos.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"292864842","text":"# -*- coding: cp1251 \r\nimport pylab as py\r\nN=1000\r\n\r\nprops = dict( alpha=0.5, edgecolors='none' )\r\n\r\nhandles = []\r\n#colours = ['red', 'green', 'blue', 'magenta', 'cyan', 'yellow']\r\ncolours = ['red', 'green', 'blue']\r\nfor colour in colours:\r\n x, y = py.rand(2,N)\r\n s = 400.0 * py.rand(N)\r\n handles.append(py.scatter(x, y, c=colour, s=s, **props))\r\n\r\npy.legend(handles, colours)\r\npy.grid(True)\r\n\r\npy.show()\r\n","sub_path":"source/unit0/many_tasks/04_circles.py","file_name":"04_circles.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"522890122","text":"import numpy as np\nfrom MIoUData import MIoU_dataloader\nclass Evaluator(object):\n def __init__(self, num_class):\n self.num_class = num_class\n self.confusion_matrix = np.zeros((self.num_class,)*2)\n\n def Pixel_Accuracy(self):\n Acc = np.diag(self.confusion_matrix).sum() / self.confusion_matrix.sum()\n return Acc\n\n # 计算每个类的正确预测的比例,求所有类的平均\n def Pixel_Accuracy_Class(self):\n Acc = np.diag(self.confusion_matrix) / self.confusion_matrix.sum(axis=1)\n #Acc = np.nanmean(Acc)\n return Acc\n\n def Mean_Intersection_over_Union(self):\n MIoU = np.diag(self.confusion_matrix) / (\n np.sum(self.confusion_matrix, axis=1) + np.sum(self.confusion_matrix, axis=0) -\n np.diag(self.confusion_matrix))\n MIoU = np.nanmean(MIoU)\n return MIoU\n\n def Frequency_Weighted_Intersection_over_Union(self):\n freq = np.sum(self.confusion_matrix, axis=1) / np.sum(self.confusion_matrix)\n iu = np.diag(self.confusion_matrix) / (\n np.sum(self.confusion_matrix, axis=1) + np.sum(self.confusion_matrix, axis=0) -\n np.diag(self.confusion_matrix))\n\n FWIoU = (freq[freq > 0] * iu[freq > 0]).sum()\n return FWIoU\n \n def kappa(self):\n pe_rows = np.sum(self.confusion_matrix, axis=0)\n pe_cols = np.sum(self.confusion_matrix, axis=1)\n sum_total = sum(pe_cols)\n pe = np.dot(pe_rows, pe_cols) / float(sum_total ** 2)\n # np.trace求矩阵的迹,即对角线的和\n po = np.trace(self.confusion_matrix) / float(sum_total)\n return (po - pe) / (1 - pe)\n\n def _generate_matrix(self, gt_image, pre_image):\n mask = (gt_image >= 0) & (gt_image < self.num_class)\n label = self.num_class * gt_image[mask].astype('int') + pre_image[mask]\n count = np.bincount(label, minlength=self.num_class**2)\n confusion_matrix = count.reshape(self.num_class, self.num_class)\n return confusion_matrix\n\n def add_batch(self, gt_image, pre_image):\n assert gt_image.shape == pre_image.shape\n self.confusion_matrix += self._generate_matrix(gt_image, pre_image)\n\n def reset(self):\n self.confusion_matrix = np.zeros((self.num_class,) * 2)\n\n\nif __name__ == \"__main__\":\n miou = Evaluator(4)\n #y_true = np.array([2, 0, 2, 2, 0, 1])\n #y_pred = np.array([0, 0, 2, 2, 0, 2])\n #cm = miou._fast_hist(y_pred, y_true)\n # batch=4,每4个算一个miou,最后求平均miou\n #kappaVal = 0\n #miouVal = 0\n #accVal = 0 \n acc_0 =[]\n acc_1 =[]\n acc_2 =[]\n acc_3 =[]\n for index, (label, predict) in enumerate(MIoU_dataloader):\n label = label.cpu().numpy()\n predict = predict.cpu().numpy()\n miou.add_batch(label,predict)\n #以下为计算pacc和miou\n #kappaVal += miou.kappa()\n # 输出acc_class\n #print(miou.Pixel_Accuracy_Class())\n #accVal += miou.Pixel_Accuracy()\n #miouVal += miou.Mean_Intersection_over_Union()\n #print('acc and miou are {},{}'.format(miou.Pixel_Accuracy(),miou.Mean_Intersection_over_Union()))\n acc_0.append(miou.Pixel_Accuracy_Class()[0])\n acc_1.append(miou.Pixel_Accuracy_Class()[1])\n acc_2.append(miou.Pixel_Accuracy_Class()[2])\n acc_3.append(miou.Pixel_Accuracy_Class()[3])\n print('kappaVal is:{}'.format(miou.kappa()))\n print('all acc and miou are {},{}'.format(miou.Pixel_Accuracy(),miou.Mean_Intersection_over_Union()))\n print('acc_0:{}'.format(np.nanmean(acc_0)))\n print('acc_1:{}'.format(np.nanmean(acc_1)))\n print('acc_2:{}'.format(np.nanmean(acc_2)))\n print('acc_3:{}'.format(np.nanmean(acc_3)))\n\n \n\n","sub_path":"RGBD语义分割/ACNet_v0923/MIouv0217.py","file_name":"MIouv0217.py","file_ext":"py","file_size_in_byte":3746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"351331457","text":"# FRANCISCO AGUILERA MORENO\n# 26/02/2020\n# Parser.py ---> Takes a whatsapp txt conversation, and converts it into a csv dataset\n\nimport datetime\nimport sys\nimport pandas as pd\nimport os\nimport emoji\n\nclass Message:\n def __init__(self, time, username, line, content, isMedia):\n self.time = time\n self.line = line\n self.username = username\n self.content = content\n self.isMedia = isMedia\n\n def toDict(self):\n return{\n \"time\": self.time,\n \"username\": self.username,\n \"content\": self.content,\n \"isMedia\": self.isMedia\n }\n\ndef remove_emoji(text):\n return emoji.get_emoji_regexp().sub(u'', text)\n\ndef parseMsg(input):\n line = input.rstrip()\n day = int(line[0:2])\n month = int(line[3:5])\n year = int(line[6:10])\n hour = int(line[12:14])\n minute = int(line[15:17])\n content = line[20:].split(':')[1].rstrip().lstrip()\n content = remove_emoji(content)\n if content == '':\n content = \"EMOJI\"\n user = remove_emoji(line[20:].split(':')[0]).rstrip()\n time = datetime.datetime(year, month, day, hour, minute).isoformat()\n isMedia = \"FALSE\"\n if content == \"\":\n isMedia = \"TRUE\"\n return Message(time, user, line, content, isMedia)\n\n\n# read conversation file and create a list with its parsed msgs\ndef readFromFile(filepath):\n msgList = []\n with open(filepath, encoding=\"utf-8\") as fp:\n line = fp.readline()\n while line:\n if len(line) > 0 and line.find(\"created group\") == -1 and line.find(\"changed the subject\") == -1 and line.find(\"security code changed\") == -1 and line.find(\"You created group\") == -1 and line.find(\"Messages to this group are now secured with\") == -1 and line.find(\"You changed this group's icon\") == -1 and line != \"\\n\" and line.find(\"Messages to this chat and calls\") == -1 and (line[0:2].isnumeric() and line[2] == \"/\") and line.find(\"added\") == -1 and line.find(\"removed\") == -1 and line.find(\"left\") == -1 and line.find(\"changed the group description\") == -1:\n # parse line to message\n msg = parseMsg(line)\n msgList.append(msg)\n line = fp.readline()\n fp.close()\n return msgList\n\nmsgList = readFromFile(sys.argv[1])\ndf = pd.DataFrame.from_records([msg.toDict() for msg in msgList])\nprint(df)\ndf.to_csv(os.path.splitext(sys.argv[1])[0]+\".csv\", index=False)","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":2430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"283249802","text":"import collections\nimport json\nimport logging\nfrom datetime import datetime\nfrom dateutil.relativedelta import relativedelta\n\n# Smashvods Libraries\nimport utility\n\nlogger = logging.getLogger(utility.logger_name)\n\n\ndef create_tier_stat(vid_count):\n output_data = dict()\n\n tier_list = collections.OrderedDict()\n tier_list[\"S\"] = collections.OrderedDict()\n tier_list[\"A\"] = collections.OrderedDict()\n tier_list[\"B\"] = collections.OrderedDict()\n tier_list[\"C\"] = collections.OrderedDict()\n tier_list[\"D\"] = collections.OrderedDict()\n tier_list[\"E\"] = collections.OrderedDict()\n\n ord_char_list = sorted(vid_count, key=vid_count.get, reverse=True)\n output_data[\"tier_count_list\"] = ord_char_list\n\n tier_names = [\"S\", \"A\", \"B\", \"C\", \"D\", \"E\"]\n total_count = 0\n for character in vid_count:\n total_count += vid_count[character]\n bin_size = total_count / 6.0\n\n total_vid_count = 0\n tier_index = 0\n for character in ord_char_list:\n if vid_count[character] == 0:\n tier_list[tier_names[-1]][character] = vid_count[character]\n continue\n total_vid_count += vid_count[character]\n tier_list[tier_names[tier_index]][character] = vid_count[character]\n if total_vid_count >= int((tier_index+1)*bin_size):\n tier_index += 1\n\n output_data['tier_list'] = tier_list\n return output_data\n\n\ndef update_total_tiers(game_mode):\n logger.info(\"Updating total tier list for game_mode=%s\", game_mode)\n\n query = \"SELECT name,count FROM characters WHERE game = '\" + game_mode + \"'\"\n char_list = utility.mysql_query(query)\n\n vid_count = dict()\n for char in char_list:\n vid_count[char['name']] = char['count']\n\n tier_stat = create_tier_stat(vid_count)\n total_string = json.dumps(tier_stat)\n\n query = \"UPDATE tiers SET \" + game_mode + \" = '\" + total_string + \\\n \"' WHERE date = 'total'\"\n utility.mysql_query(query)\n\n\ndef update_quarter_tiers():\n logger.info(\"Updating quarterly tier list\")\n\n quarter_now = datetime.now()\n quarter_titles = {1: \"Winter\", 4: \"Spring\", 7: \"Summer\", 10: \"Fall\"}\n\n start_date = dict()\n for game_mode in utility.game_list:\n query = \"SELECT \" + game_mode + \" FROM stats where title = 'Start Date'\"\n result = utility.mysql_query(query)\n\n start_date[game_mode] = datetime.strptime(result[0][game_mode], \"%B %Y\")\n\n if start_date[game_mode].month < 4:\n start_date[game_mode] = datetime(start_date[game_mode].year, 1, 1)\n elif start_date[game_mode].month < 7:\n start_date[game_mode] = datetime(start_date[game_mode].year, 4, 1)\n elif start_date[game_mode].month < 10:\n start_date[game_mode] = datetime(start_date[game_mode].year, 7, 1)\n else:\n start_date[game_mode] = datetime(start_date[game_mode].year, 10, 1)\n\n start_date_str = start_date[game_mode].strftime(\"%B %Y\")\n logger.info(\"start_date[game_mode=%s]=%s\", game_mode, start_date_str)\n\n for game_mode in utility.game_list:\n output_data = collections.OrderedDict()\n args = list()\n\n query = \"SELECT videos_pre.date,videos_post.c1,videos_post.c2 \"\n query += \"FROM videos_post JOIN videos_pre ON \"\n query += \"videos_post.videoPreId = videos_pre.id \"\n query += \"WHERE videos_post.game = '\" + game_mode + \"'\"\n videos = utility.mysql_many_query(query)\n\n date_list = list()\n for video in videos:\n date_list.append(video['date'])\n\n vid_count = dict()\n tier_count_list = list()\n\n quart_start = start_date[game_mode]\n quart_end = start_date[game_mode] + relativedelta(months=+3)\n\n char_names = utility.get_char_list(game_mode)\n\n while quart_end < quarter_now:\n quart_count = 0\n for tp in date_list:\n date_check = datetime(int(tp[:4]), int(tp[5:7]), int(tp[8:10]), 0, 0, 0)\n if quart_start <= date_check < quart_end:\n quart_count += 1\n\n for main in char_names:\n vid_count[main] = 0\n for video in videos:\n if main in video['c1'].split(',') or main in video['c2'].split(','):\n i = video['date']\n date_check = datetime(int(i[:4]), int(i[5:7]), int(i[8:10]), 0, 0, 0)\n if quart_start <= date_check < quart_end:\n vid_count[main] += 1\n\n date_title = quarter_titles[quart_start.month] + \" \" + str(quart_start.year)\n\n tier_stat = create_tier_stat(vid_count)\n tier_count_list.append(tier_stat[\"tier_count_list\"])\n output_data[date_title] = tier_stat\n\n logger.info(\"Successfully created %s tier list for %s\", game_mode, date_title)\n\n quart_start += relativedelta(months=+3)\n quart_end += relativedelta(months=+3)\n\n main_position = dict()\n for main in char_names:\n main_position[main] = list()\n for t_list in tier_count_list:\n main_position[main].append(t_list.index(main))\n\n position_change = dict()\n for main in char_names:\n position_change[main] = [0]\n for index in range(1, len(main_position[main])):\n position_change[main].append(main_position[main][index - 1] - \\\n main_position[main][index])\n\n date_index = 0\n for date in output_data:\n output_data[date][\"position_change\"] = dict()\n for main in char_names:\n output_data[date][\"position_change\"][main] = position_change[main][date_index]\n args.append((date, json.dumps(output_data[date])))\n date_index += 1\n\n query = \"INSERT into tiers(date, \" + game_mode + \") \" + \\\n \"VALUES (%s, %s) ON DUPLICATE KEY UPDATE \" + \\\n game_mode + \" = VALUES(\" + game_mode + \")\"\n utility.mysql_query(query, args)\n","sub_path":"tier_tools.py","file_name":"tier_tools.py","file_ext":"py","file_size_in_byte":6018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"114648069","text":"# %%\nimport pygrib\n\n# for ncep files, just use one to get variable names\ncmc_file = '/Users/rsteinhart/DATA/test_data/NAEFS/cdo_test/merged_files/cmc_merged.t00z.pgrb2f384' #for example\n\n# open the grib file\ngrbs = pygrib.open(cmc_file)\n# grbindex = pygrib.index(cmc_file)\n\n# create a new text file (\"x\" creates the file)\n# switch to \"a\" to append text file or \"w\" to overwrite the content of the file\ncmc_var_index = open(\"cmc_var_index.txt\", \"x\")\n\ngrbindex = pygrib.index(cmc_file,'shortName','typeOfLevel','level')\n# print the inventory of the file to a text file\n#grbindex.seek(0)\nfor grb in grbs:\n # grbindex = pygrib.index(cmc_file,'grb')\n selected_grbs = grbindex.select(shortName=grb)\n grbindex = str(grbindex)\n cmc_var_index.write(grbindex)\n cmc_var_index.write(\"\\n\")\n\n# %% \n# Variables not included in ncep\n\n\n\n\n","sub_path":"Scripts/ensmean_scripts/var_index.py","file_name":"var_index.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"611642868","text":"# Copyright 2016 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style\n# license that can be found in the LICENSE file or at\n# https://developers.google.com/open-source/licenses/bsd\n\n\"\"\"A page for site admins to create a new user group.\"\"\"\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\n\nimport logging\nimport re\n\nfrom framework import exceptions\nfrom framework import framework_helpers\nfrom framework import permissions\nfrom framework import servlet\nfrom proto import usergroup_pb2\nfrom sitewide import group_helpers\n\n\nclass GroupCreate(servlet.Servlet):\n \"\"\"Shows a page with a simple form to create a user group.\"\"\"\n\n _PAGE_TEMPLATE = 'sitewide/group-create-page.ezt'\n\n def AssertBasePermission(self, mr):\n \"\"\"Assert that the user has the permissions needed to view this page.\"\"\"\n super(GroupCreate, self).AssertBasePermission(mr)\n\n if not permissions.CanCreateGroup(mr.perms):\n raise permissions.PermissionException(\n 'User is not allowed to create a user group')\n\n def GatherPageData(self, _mr):\n \"\"\"Build up a dictionary of data values to use when rendering the page.\"\"\"\n visibility_levels = group_helpers.BuildUserGroupVisibilityOptions()\n initial_visibility = group_helpers.GroupVisibilityView(\n usergroup_pb2.MemberVisibility.ANYONE)\n group_types = group_helpers.BuildUserGroupTypeOptions()\n\n return {\n 'groupadmin': '',\n 'group_types': group_types,\n 'import_group': '',\n 'initial_friendprojects': '',\n 'initial_group_type': '',\n 'initial_name': '',\n 'initial_visibility': initial_visibility,\n 'visibility_levels': visibility_levels,\n }\n\n def ProcessFormData(self, mr, post_data):\n \"\"\"Process the posted form.\"\"\"\n # 1. Gather data from the request.\n group_name = post_data.get('groupname')\n try:\n existing_group_id = self.services.user.LookupUserID(mr.cnxn, group_name)\n existing_settings = self.services.usergroup.GetGroupSettings(\n mr.cnxn, existing_group_id)\n if existing_settings:\n mr.errors.groupname = 'That user group already exists'\n except exceptions.NoSuchUserException:\n pass\n\n if post_data.get('import_group'):\n vis = usergroup_pb2.MemberVisibility.OWNERS\n ext_group_type = post_data.get('group_type')\n friend_projects = ''\n if not ext_group_type:\n mr.errors.groupimport = 'Please provide external group type'\n else:\n ext_group_type = str(\n usergroup_pb2.GroupType(int(ext_group_type))).lower()\n\n if (ext_group_type == 'computed' and\n not group_name.startswith('everyone@')):\n mr.errors.groupimport = 'Computed groups must be named everyone@'\n\n else:\n vis = usergroup_pb2.MemberVisibility(int(post_data['visibility']))\n ext_group_type = None\n friend_projects = post_data.get('friendprojects', '')\n who_can_view_members = str(vis).lower()\n\n if not mr.errors.AnyErrors():\n project_ids, error = self.services.usergroup.ValidateFriendProjects(\n mr.cnxn, self.services, friend_projects)\n if error:\n mr.errors.friendprojects = error\n\n # 2. Call services layer to save changes.\n if not mr.errors.AnyErrors():\n group_id = self.services.usergroup.CreateGroup(\n mr.cnxn, self.services, group_name, who_can_view_members,\n ext_group_type, project_ids)\n\n # 3. Determine the next page in the UI flow.\n if mr.errors.AnyErrors():\n self.PleaseCorrect(mr, initial_name=group_name)\n else:\n # Go to the new user group's detail page.\n return framework_helpers.FormatAbsoluteURL(\n mr, '/g/%s/' % group_id, include_project=False)\n","sub_path":"appengine/monorail/sitewide/groupcreate.py","file_name":"groupcreate.py","file_ext":"py","file_size_in_byte":3782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"629422315","text":"from sklearn.svm import LinearSVC #\nfrom sklearn.preprocessing import StandardScaler #\nfrom sklearn.model_selection import train_test_split #\nimport glob #\nimport time #\nimport matplotlib.image as mpimg #\nimport numpy as np\nimport cv2\nimport pickle\n\nfrom lesson_functions import *\nfrom hyperparameters import *\n\n\ndef read_test_images():\n test_images = glob.glob('data/vehicle_or_not/**/*.jpeg')\n test_cars = []\n test_notcars = []\n for image in test_images:\n if 'image' in image or 'extra' in image:\n test_notcars.append(image)\n else:\n test_cars.append(image)\n return test_cars, test_notcars\n\ndef read_images():\n cars = glob.glob('data/vehicles/**/*.png')\n notcars = glob.glob('data/non-vehicles/**/*.png')\n return cars, notcars\n\ncars, notcars = read_images()\nprint(\"Cars:\", len(cars))\nprint(\"Not cars:\", len(notcars))\n\ncar_features = extract_features(cars, color_space=color_space,\n spatial_size=spatial_size, hist_bins=hist_bins,\n orient=orient, pix_per_cell=pix_per_cell,\n cell_per_block=cell_per_block,\n hog_channel=hog_channel, spatial_feat=spatial_feat,\n hist_feat=hist_feat, hog_feat=hog_feat)\nnotcar_features = extract_features(notcars, color_space=color_space,\n spatial_size=spatial_size, hist_bins=hist_bins,\n orient=orient, pix_per_cell=pix_per_cell,\n cell_per_block=cell_per_block,\n hog_channel=hog_channel, spatial_feat=spatial_feat,\n hist_feat=hist_feat, hog_feat=hog_feat)\n\n\n# In[6]:\n\nX = np.vstack((car_features, notcar_features)).astype(np.float64)\n# Fit a per-column scaler\nX_scaler = StandardScaler().fit(X)\n# Apply the scaler to X\nscaled_X = X_scaler.transform(X)\n\n# Define the labels vector\ny = np.hstack((np.ones(len(car_features)), np.zeros(len(notcar_features))))\nprint(\"y: \", y.shape)\nprint(\"X: \", scaled_X.shape)\n# Split up data into randomized training and test sets\nrand_state = np.random.randint(0, 100)\nX_train, X_test, y_train, y_test = train_test_split(\n scaled_X, y, test_size=0.2, random_state=rand_state)\n\n\n# In[7]:\n\nprint('Using:',orient,'orientations',pix_per_cell,\n 'pixels per cell and', cell_per_block,'cells per block')\nprint('Feature vector length:', len(X_train[0]))\n# Use a linear SVC\nsvc = LinearSVC()\n# Check the training time for the SVC\nt=time.time()\nsvc.fit(X_train, y_train)\nt2 = time.time()\nprint(round(t2-t, 2), 'Seconds to train SVC...')\n# Check the score of the SVC\nprint('Test Accuracy of SVC = ', round(svc.score(X_test, y_test), 4))\n# Check the prediction time for a single sample\nt=time.time()\n\nwith open(\"classified.p\", \"wb\") as cf:\n pickle.dump({\n \"svc\": svc,\n \"X_scaler\": X_scaler\n }, cf)","sub_path":"classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":2864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"337909772","text":"###\n## Exploring why 4x4 grids have a spike of patterns with stroke length 11\n## Will display all 4x4 grids of stroke length 11 in 4x4_data.csv\n##\n## D.E.Budzitowski 150876\n###\n\nimport Image, csv\n\nwith open(\"./analysis/3x3/3x3_data.csv\") as csvfile:\n reader = csv.DictReader(csvfile)\n savepath = \"./analysis/3x3_stroke8/\"\n for row in reader:\n if(float(row['stroke']) > 7 and float(row['stroke']) < 9):\n imgpath = row['image']\n im = Image.open(imgpath)\n im.show()\n imgname = imgpath.split('/')[5]\n im.save(savepath+imgname)\n","sub_path":"analysis_scripts/3x3_stroke8.py","file_name":"3x3_stroke8.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"551022701","text":"import boto3\nimport sure # noqa # pylint: disable=unused-import\nfrom unittest import TestCase\nfrom datetime import timedelta, datetime\n\nfrom moto import mock_logs\nfrom moto.core.utils import unix_time_millis\n\nTEST_REGION = \"eu-west-1\"\n\n\nclass TestLogFilter(TestCase):\n def setUp(self) -> None:\n self.conn = boto3.client(\"logs\", TEST_REGION)\n self.log_group_name = \"dummy\"\n self.log_stream_name = \"stream\"\n self.conn.create_log_group(logGroupName=self.log_group_name)\n self.conn.create_log_stream(\n logGroupName=self.log_group_name, logStreamName=self.log_stream_name\n )\n\n\n@mock_logs\nclass TestLogFilterParameters(TestLogFilter):\n def setUp(self) -> None:\n super().setUp()\n\n def test_filter_logs_interleaved(self):\n messages = [\n {\"timestamp\": 0, \"message\": \"hello\"},\n {\"timestamp\": 0, \"message\": \"world\"},\n ]\n self.conn.put_log_events(\n logGroupName=self.log_group_name,\n logStreamName=self.log_stream_name,\n logEvents=messages,\n )\n res = self.conn.filter_log_events(\n logGroupName=self.log_group_name,\n logStreamNames=[self.log_stream_name],\n interleaved=True,\n )\n events = res[\"events\"]\n for original_message, resulting_event in zip(messages, events):\n resulting_event[\"eventId\"].should.equal(str(resulting_event[\"eventId\"]))\n resulting_event[\"timestamp\"].should.equal(original_message[\"timestamp\"])\n resulting_event[\"message\"].should.equal(original_message[\"message\"])\n\n def test_put_log_events_now(self):\n ts_1 = int(unix_time_millis())\n ts_2 = int(unix_time_millis(datetime.utcnow() + timedelta(minutes=5)))\n ts_3 = int(unix_time_millis(datetime.utcnow() + timedelta(days=1)))\n\n messages = [\n {\"message\": f\"Message {idx}\", \"timestamp\": ts}\n for idx, ts in enumerate([ts_1, ts_2, ts_3])\n ]\n\n resp = self.conn.put_log_events(\n logGroupName=self.log_group_name,\n logStreamName=self.log_stream_name,\n logEvents=messages,\n sequenceToken=\"49599396607703531511419593985621160512859251095480828066\",\n )\n\n # Message 2 was too new\n resp.should.have.key(\"rejectedLogEventsInfo\").should.equal(\n {\"tooNewLogEventStartIndex\": 2}\n )\n # Message 0 and 1 were persisted though\n events = self.conn.filter_log_events(\n logGroupName=self.log_group_name,\n logStreamNames=[self.log_stream_name],\n limit=20,\n )[\"events\"]\n messages = [e[\"message\"] for e in events]\n messages.should.contain(\"Message 0\")\n messages.should.contain(\"Message 1\")\n messages.shouldnt.contain(\"Message 2\")\n\n def test_filter_logs_paging(self):\n timestamp = int(unix_time_millis(datetime.utcnow()))\n messages = []\n for i in range(25):\n messages.append({\"message\": f\"Message number {i}\", \"timestamp\": timestamp})\n timestamp += 100\n\n self.conn.put_log_events(\n logGroupName=self.log_group_name,\n logStreamName=self.log_stream_name,\n logEvents=messages,\n )\n res = self.conn.filter_log_events(\n logGroupName=self.log_group_name,\n logStreamNames=[self.log_stream_name],\n limit=20,\n )\n events = res[\"events\"]\n events.should.have.length_of(20)\n res[\"nextToken\"].should.equal(\"dummy@stream@\" + events[-1][\"eventId\"])\n\n res = self.conn.filter_log_events(\n logGroupName=self.log_group_name,\n logStreamNames=[self.log_stream_name],\n limit=20,\n nextToken=res[\"nextToken\"],\n )\n events += res[\"events\"]\n events.should.have.length_of(25)\n res.should_not.have.key(\"nextToken\")\n\n for original_message, resulting_event in zip(messages, events):\n resulting_event[\"eventId\"].should.equal(str(resulting_event[\"eventId\"]))\n resulting_event[\"timestamp\"].should.equal(original_message[\"timestamp\"])\n resulting_event[\"message\"].should.equal(original_message[\"message\"])\n\n res = self.conn.filter_log_events(\n logGroupName=self.log_group_name,\n logStreamNames=[self.log_stream_name],\n limit=20,\n nextToken=\"wrong-group@stream@999\",\n )\n res[\"events\"].should.have.length_of(0)\n res.should_not.have.key(\"nextToken\")\n\n def test_filter_logs_paging__unknown_token(self):\n res = self.conn.filter_log_events(\n logGroupName=self.log_group_name,\n logStreamNames=[self.log_stream_name],\n limit=20,\n nextToken=\"invalid-token\",\n )\n res[\"events\"].should.have.length_of(0)\n res.should_not.have.key(\"nextToken\")\n\n\n@mock_logs\nclass TestLogsFilterPattern(TestLogFilter):\n def setUp(self) -> None:\n super().setUp()\n now = int(unix_time_millis(datetime.utcnow()))\n messages = [\n {\"timestamp\": now, \"message\": \"hello\"},\n {\"timestamp\": now, \"message\": \"world\"},\n {\"timestamp\": now, \"message\": \"hello world\"},\n {\"timestamp\": now, \"message\": \"goodbye world\"},\n {\"timestamp\": now, \"message\": \"hello cruela\"},\n {\"timestamp\": now, \"message\": \"goodbye cruel world\"},\n ]\n self.conn.put_log_events(\n logGroupName=self.log_group_name,\n logStreamName=self.log_stream_name,\n logEvents=messages,\n )\n\n def test_unknown_pattern(self):\n events = self.conn.filter_log_events(\n logGroupName=self.log_group_name,\n logStreamNames=[self.log_stream_name],\n filterPattern='{$.message = \"hello\"}',\n )[\"events\"]\n events.should.have.length_of(6)\n\n def test_simple_word_pattern(self):\n events = self.conn.filter_log_events(\n logGroupName=self.log_group_name,\n logStreamNames=[self.log_stream_name],\n filterPattern=\"hello\",\n )[\"events\"]\n messages = [e[\"message\"] for e in events]\n set(messages).should.equal({\"hello\", \"hello cruela\", \"hello world\"})\n\n def test_multiple_words_pattern(self):\n events = self.conn.filter_log_events(\n logGroupName=self.log_group_name,\n logStreamNames=[self.log_stream_name],\n filterPattern=\"goodbye world\",\n )[\"events\"]\n messages = [e[\"message\"] for e in events]\n set(messages).should.equal({\"goodbye world\", \"goodbye cruel world\"})\n\n def test_quoted_pattern(self):\n events = self.conn.filter_log_events(\n logGroupName=self.log_group_name,\n logStreamNames=[self.log_stream_name],\n filterPattern='\"hello cruel\"',\n )[\"events\"]\n messages = [e[\"message\"] for e in events]\n set(messages).should.equal({\"hello cruela\"})\n","sub_path":"tests/test_logs/test_logs_filter.py","file_name":"test_logs_filter.py","file_ext":"py","file_size_in_byte":7020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"129283922","text":"from django.conf.urls import patterns, include, url\nfrom django.conf.urls.static import static\nfrom portal.core.url_finder import UrlFinder\nfrom django.conf import settings\nfrom django.contrib import admin\n\nadmin.autodiscover()\n\ndef get_media_url():\n urlpatterns = patterns('',\n (r'media/(?P.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}))\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n return urlpatterns\n\ndef get_app_urls():\n url_finder = UrlFinder()\n url_finder.find_apps()\n url_finder.save_apps_in_db()\n return url_finder.get_urls()\n\nurlpatterns = [url(r'^admin/', include(admin.site.urls))] + static(settings.STATIC_URL, document_root=settings.STATIC_URL)\nurlpatterns+= get_app_urls()\n\nif settings.DEBUG:\n urlpatterns+= get_media_url()\n \n\n\n","sub_path":"wsgi/portal/portal/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"221348876","text":"import Multi_GAN as model\nimport lbann.contrib.launcher\nimport lbann.contrib.args\nimport argparse\nimport os\nfrom lbann.core.util import get_parallel_strategy_args\nimport lbann\nimport lbann.modules as lm\n\nfrom lbann.contrib.modules.fftshift import FFTShift\nfrom lbann.contrib.modules.radial_profile import RadialProfile\ndef list2str(l):\n return ' '.join([str(i) for i in l])\n\ndef f_invtransform(y,scale=4.0): ### Transform to original space\n '''\n The inverse of the transformation function that scales the data before training\n '''\n inv_transform = lbann.WeightedSum(\n lbann.SafeDivide(\n lbann.Add(lbann.Constant(value=1.0, hint_layer=y),lbann.Identity(y)),\n lbann.Subtract(lbann.Constant(value=1.0, hint_layer=y),lbann.Identity(y))),\n scaling_factors=str(scale))\n\n return inv_transform\n\n\ndef construct_lc_launcher_args():\n parser = argparse.ArgumentParser()\n lbann.contrib.args.add_scheduler_arguments(parser)\n\n # General arguments\n parser.add_argument(\n '--job-name', action='store', default='lbann_cosmo3DGAN', type=str,\n help='scheduler job name (default: lbann_cosmoae)')\n parser.add_argument(\"--scheduler\", type=str, default=\"lsf\")\n parser.add_argument(\n '--mini-batch-size', action='store', default=1, type=int,\n help='mini-batch size (default: 1)', metavar='NUM')\n parser.add_argument(\n '--num-epochs', action='store', default=2, type=int,\n help='number of epochs (default: 2)', metavar='NUM')\n\n # Model specific arguments\n parser.add_argument(\n '--input-width', action='store', default=64, type=int,\n help='the input spatial width (default: 64)')\n\n parser.add_argument(\n '--input-channel', action='store', default=1, type=int,\n help='the input channel (default: 1)')\n\n parser.add_argument(\n '--data-dir', action='store', type=str,\n default = '/p/vast1/lbann/datasets/exagan/portal.nersc.gov/project/m3363/transfer_data_livermore/64cube_dataset/norm_1_train_val.npy',\n help='dataset directory')\n\n parser.add_argument(\n '--num-samples', action='store', default=100000, type=int,\n help='the numbe of training/val samples (default: 100000)')\n\n parser.add_argument(\n '--num-discblocks', action='store', default=1, type=int,\n help='number of discriminator blocks (default: 1)', metavar='NUM')\n # Parallelism arguments\n parser.add_argument(\n '--depth-groups', action='store', type=int, default=2,\n help='the number of processes for the depth dimension (default: 2)')\n parser.add_argument(\n '--dynamically-reclaim-error-signals', action='store_true',\n help='Allow LBANN to reclaim error signals buffers (default: False)')\n\n parser.add_argument(\n '--use-distconv', action='store_true',\n help='Use distconv')\n\n parser.add_argument(\n '--compute-mse', action='store_true',\n help='Compute MSE')\n\n parser.add_argument(\n '--spectral-loss', action='store_true',\n help='Use spectral loss')\n\n parser.add_argument(\n '--use-bn', action='store_true',\n help='Use batch norm layer')\n\n parser.add_argument(\n '--dump-outputs', action='store_true',\n help='Dump outputs')\n\n parser.add_argument(\n '--enable-subgraph', action='store_true',\n help='Enable subgraph parallelism')\n\n return parser.parse_args()\n\n\ndef construct_model(args):\n \"\"\"Construct LBANN for CosmoGAN 3D model.\n\n \"\"\"\n obj = []\n metrics = []\n callbacks = []\n\n w = [args.input_width]*3 \n w.insert(0,args.input_channel)\n _sample_dims = w\n\n ps = None\n #have model and input ps\n if(args.use_distconv):\n ps = get_parallel_strategy_args(\n sample_groups=args.mini_batch_size,\n height_groups=args.depth_groups)\n\n g_device = 'GPU'\n input = lbann.Input(data_field='samples',name='input',device=g_device)\n input_ = lbann.Reshape(input, dims=_sample_dims,name='in_reshape', device=g_device),\n x1 = lbann.Identity(input_, parallel_strategy=None, name='x1')\n x2 = lbann.Identity(input_, name='x2') if args.compute_mse or args.spectral_loss else None\n\n zero = lbann.Constant(value=0.0,num_neurons=1,name='zero',device=g_device)\n one = lbann.Constant(value=1.0,num_neurons=1,name='one', device=g_device)\n\n z = lbann.Reshape(lbann.Gaussian(mean=0.0,stdev=1.0, neuron_dims=64, name='noise_vec', device=g_device),\n dims=[1,64], name='noise_vec_reshape',device=g_device)\n print(\"RUN ARGS \", args) \n\n losses = model.Exa3DMultiGAN(args.input_width,args.input_channel,\n g_device,ps,use_bn=args.use_bn,num_discblocks=args.num_discblocks,\n enable_subgraph=args.enable_subgraph)(x1,z)\n print(\"LEN losses \", len(losses))\n \n layers=list(lbann.traverse_layer_graph([input,z,zero,one]))\n # Setup objective function\n weights = set()\n src_layers = []\n dst_layers = []\n for l in layers:\n if(l.weights and \"disc1\" in l.name and \"instance1\" in l.name):\n src_layers.append(l.name)\n #freeze weights in disc2, analogous to discrim.trainable=False in Keras\n if(l.weights and \"disc2\" in l.name):\n dst_layers.append(l.name)\n for idx in range(len(l.weights)):\n l.weights[idx].optimizer = lbann.NoOptimizer()\n weights.update(l.weights)\n\n block_ids = list(range(0, len(losses)-1, 3))\n for l, i in enumerate(block_ids):\n obj.append(lbann.SigmoidBinaryCrossEntropy([losses[i],one],name='d1_real_bce'+str(i)))\n obj.append(lbann.SigmoidBinaryCrossEntropy([losses[i+1],zero],name='d1_fake_bce'+str(i+1)))\n obj.append(lbann.SigmoidBinaryCrossEntropy([losses[i+2],one],name='d_adv_bce'+str(i+2)))\n \n gen_img = losses[-1] \n mse = lbann.MeanSquaredError([gen_img, x2], name='MSE') if args.compute_mse else None\n\n if args.spectral_loss:\n dft_gen_img = lbann.DFTAbs(f_invtransform(gen_img))\n dft_img = lbann.StopGradient(lbann.DFTAbs(f_invtransform(x2)))\n \n ## Adding full spectral loss\n print(\"SAMPLE DIMS \", _sample_dims)\n gen_fft=FFTShift()(dft_gen_img,_sample_dims)\n gen_spec_prof=RadialProfile()(gen_fft,_sample_dims,63)\n \n img_fft=FFTShift()(dft_img,_sample_dims)\n img_spec_prof=RadialProfile()(img_fft,_sample_dims,63)\n spec_loss = lbann.Log(lbann.MeanSquaredError(gen_spec_prof, img_spec_prof))\n obj.append(lbann.LayerTerm(spec_loss, scale=args.spectral_loss))\n metrics.append(lbann.Metric(spec_loss,name='spec_loss'))\n\n\n if (mse is not None):\n obj.append(mse)\n metrics.append(lbann.Metric(mse, name='MSE'))\n\n\n callbacks.append(lbann.CallbackPrint())\n callbacks.append(lbann.CallbackTimer())\n callbacks.append(lbann.CallbackStepLearningRate(step=10, amt=0.5))\n callbacks.append(lbann.CallbackGPUMemoryUsage())\n callbacks.append(lbann.CallbackReplaceWeights(source_layers=list2str(src_layers),\n destination_layers=list2str(dst_layers),\n batch_interval=2))\n if args.dump_outputs:\n callbacks.append(lbann.CallbackDumpOutputs(batch_interval=600,\n execution_modes='validation',\n directory='outputs',\n format='npy',\n layers=f'{x1.name} {gen_img.name}'))\n\n # ------------------------------------------\n # Construct model\n # ------------------------------------------\n\n return lbann.Model(args.num_epochs,\n weights=weights,\n layers=layers,\n objective_function=obj,\n metrics=metrics,\n callbacks=callbacks)\n\n\nif __name__ == '__main__':\n import lbann\n\n args = construct_lc_launcher_args()\n os.environ['INPUT_WIDTH'] = str(args.input_width)\n os.environ['DATA_DIR'] = args.data_dir\n os.environ['NUM_SAMPLES'] = str(args.num_samples)\n\n\n trainer = lbann.Trainer(args.mini_batch_size)\n model = construct_model(args)\n # Setup optimizer\n opt = lbann.Adam(learn_rate=0.001,beta1=0.5,beta2=0.99,eps=1e-8)\n\n # Runtime parameters/arguments\n environment = lbann.contrib.args.get_distconv_environment(\n num_io_partitions=args.depth_groups)\n\n if args.dynamically_reclaim_error_signals:\n environment['LBANN_KEEP_ERROR_SIGNALS'] = 0\n else:\n environment['LBANN_KEEP_ERROR_SIGNALS'] = 1\n\n\n import construct_data_reader as cdr\n print(\"Using Python Data READER!!!!\")\n data_reader = cdr.construct_python_data_reader()\n #Remove cosmoflow/hdf5 stuff\n environment.pop('LBANN_DISTCONV_COSMOFLOW_PARALLEL_IO')\n environment.pop('LBANN_DISTCONV_NUM_IO_PARTITIONS')\n lbann_args = ['--num_io_threads=1']\n\n environment['INPUT_WIDTH'] = str(args.input_width)\n environment['DATA_DIR'] = args.data_dir\n environment['NUM_SAMPLES'] = str(args.num_samples)\n\n #Corona stuff\n environment['MIOPEN_DEBUG_DISABLE_FIND_DB'] = 1\n environment['MIOPEN_DISABLE_CACHE']= 1\n\n print('LBANN args ', lbann_args)\n print(\"LBANN ENV VAR \", environment)\n\n status = lbann.contrib.launcher.run(trainer,model, data_reader, opt,\n scheduler=args.scheduler,\n account='exalearn',\n partition='pbatch',\n nodes=args.nodes,\n procs_per_node=args.procs_per_node,\n time_limit=720,\n environment=environment,\n lbann_args=lbann_args,\n setup_only=False,\n batch_job=False,\n job_name=args.job_name)\n print(status)\n","sub_path":"applications/physics/cosmology/ExaGAN/train_multigan.py","file_name":"train_multigan.py","file_ext":"py","file_size_in_byte":9870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"553563909","text":"from datetime import datetime as dt\nfrom collections import defaultdict\nimport time\nfrom typing import Optional\n\nfrom titus_optimize.compute_v3 import IPSolverParameters, IP_SOLUTION_TIME_BOUND, PlacementSolver\n\nfrom titus_isolate import log\nfrom titus_isolate.allocate.allocate_request import AllocateRequest\nfrom titus_isolate.allocate.allocate_response import AllocateResponse, get_workload_allocations\nfrom titus_isolate.allocate.allocate_threads_request import AllocateThreadsRequest\nfrom titus_isolate.allocate.constants import FREE_THREAD_IDS\nfrom titus_isolate.allocate.cpu_allocator import CpuAllocator\nfrom titus_isolate.config.config_manager import ConfigManager\nfrom titus_isolate.config.constants import ALPHA_NU, DEFAULT_ALPHA_NU, ALPHA_LLC, DEFAULT_ALPHA_LLC, ALPHA_L12, \\\n DEFAULT_ALPHA_L12, ALPHA_PREV, DEFAULT_ALPHA_PREV, \\\n MAX_SOLVER_RUNTIME, DEFAULT_MAX_SOLVER_RUNTIME, \\\n RELATIVE_MIP_GAP_STOP, DEFAULT_RELATIVE_MIP_GAP_STOP, MIP_SOLVER, DEFAULT_MIP_SOLVER\nfrom titus_isolate.metrics.constants import IP_ALLOCATOR_TIMEBOUND_COUNT, FORECAST_REBALANCE_FAILURE_COUNT\nfrom titus_isolate.model.processor.cpu import Cpu\nfrom titus_isolate.model.utils import get_burst_workloads, release_all_threads\nfrom titus_isolate.model.utils import get_sorted_workloads\nfrom titus_isolate.monitor.free_thread_provider import FreeThreadProvider\nfrom titus_isolate.predict.cpu_usage_predictor import PredEnvironment, CpuUsagePredictor\nfrom titus_isolate.predict.cpu_usage_predictor_manager import CpuUsagePredictorManager\nfrom titus_isolate.predict.simple_cpu_predictor import SimpleCpuPredictor\n\n\nclass CUVector:\n def __init__(\n self,\n requested_cus,\n curr_placement_vectors,\n ordered_workload_ids):\n self.requested_cus = requested_cus\n self.curr_placement_vectors_static = curr_placement_vectors\n self.ordered_workload_ids = ordered_workload_ids\n\n def __str__(self):\n return str(vars(self))\n\n\nclass ForecastIPCpuAllocator(CpuAllocator):\n\n def __init__(self,\n cpu_usage_predictor_manager: CpuUsagePredictorManager,\n config_manager: ConfigManager,\n free_thread_provider: FreeThreadProvider):\n self.__reg = None\n self.__time_bound_call_count = 0\n self.__rebalance_failure_count = 0\n self.__ip_solver_params = IPSolverParameters(\n alpha_nu=config_manager.get_float(ALPHA_NU, DEFAULT_ALPHA_NU),\n alpha_llc=config_manager.get_float(ALPHA_LLC, DEFAULT_ALPHA_LLC),\n alpha_l12=config_manager.get_float(ALPHA_L12, DEFAULT_ALPHA_L12),\n alpha_prev=config_manager.get_float(ALPHA_PREV, DEFAULT_ALPHA_PREV))\n\n self.__solver_max_runtime_secs = config_manager.get_float(MAX_SOLVER_RUNTIME, DEFAULT_MAX_SOLVER_RUNTIME)\n self.__solver_name = config_manager.get_str(MIP_SOLVER, DEFAULT_MIP_SOLVER)\n self.__solver_mip_gap = config_manager.get_float(RELATIVE_MIP_GAP_STOP, DEFAULT_RELATIVE_MIP_GAP_STOP)\n self.__cpu_usage_predictor_manager = cpu_usage_predictor_manager\n self.__config_manager = config_manager\n self.__free_thread_provider = free_thread_provider\n self.__cnt_rebalance_calls = 0\n self.__call_meta = None # track things __place_threads call\n\n def assign_threads(self, request: AllocateThreadsRequest) -> AllocateResponse:\n self.__call_meta = {}\n cpu = request.get_cpu()\n cpu_usage = request.get_cpu_usage()\n workloads = request.get_workloads()\n workload_id = request.get_workload_id()\n curr_ids_per_workload = cpu.get_workload_ids_to_thread_ids()\n\n return AllocateResponse(\n self.__compute_allocation(cpu, workload_id, workloads, curr_ids_per_workload, cpu_usage, True),\n get_workload_allocations(cpu, list(workloads.values())),\n self.get_name(),\n self.__call_meta)\n\n def free_threads(self, request: AllocateThreadsRequest) -> AllocateResponse:\n self.__call_meta = {}\n cpu = request.get_cpu()\n cpu_usage = request.get_cpu_usage()\n workloads = request.get_workloads()\n workload_id = request.get_workload_id()\n curr_ids_per_workload = cpu.get_workload_ids_to_thread_ids()\n\n if workload_id not in curr_ids_per_workload:\n raise Exception(\"workload_id=`%s` is not placed on the instance. Cannot free it.\" % (workload_id,))\n\n return AllocateResponse(\n self.__compute_allocation(cpu, workload_id, workloads, curr_ids_per_workload, cpu_usage, False),\n get_workload_allocations(cpu, list(workloads.values())),\n self.get_name(),\n self.__call_meta)\n\n def rebalance(self, request: AllocateRequest) -> AllocateResponse:\n self.__call_meta = {}\n cpu = request.get_cpu()\n cpu_usage = request.get_cpu_usage()\n workloads = request.get_workloads()\n self.__cnt_rebalance_calls += 1\n\n if len(workloads) == 0:\n log.warning(\"Ignoring rebalance of empty CPU.\")\n self.__call_meta['rebalance_empty'] = 1\n return AllocateResponse(\n cpu,\n get_workload_allocations(cpu, list(workloads.values())),\n self.get_name(),\n self.__call_meta)\n\n log.info(\"Rebalancing with predictions...\")\n curr_ids_per_workload = cpu.get_workload_ids_to_thread_ids()\n\n return AllocateResponse(\n self.__compute_allocation(cpu, None, workloads, curr_ids_per_workload, cpu_usage, None),\n get_workload_allocations(cpu, list(workloads.values())),\n self.get_name(),\n self.__call_meta)\n\n def __compute_allocation(self, cpu, workload_id, workloads, curr_ids_per_workload, cpu_usage, is_add):\n predicted_usage = self.__predict_usage(workloads, cpu_usage)\n cpu = self.__place_threads(cpu, workload_id, workloads, curr_ids_per_workload, predicted_usage, is_add)\n\n # Burst workload computation\n burst_workloads = get_burst_workloads(workloads.values())\n if not is_add:\n burst_workloads = [workload for workload in burst_workloads if workload.get_id() != workload_id]\n\n # Claim all free threads for burst workloads\n burst_workload_ids = [w.get_id() for w in burst_workloads]\n free_threads = self.__free_thread_provider.get_free_threads(cpu, workloads, predicted_usage)\n for t in free_threads:\n for w_id in burst_workload_ids:\n t.claim(w_id)\n\n self.__call_meta[FREE_THREAD_IDS] = [t.get_id() for t in free_threads]\n return cpu\n\n def get_name(self) -> str:\n return self.__class__.__name__\n\n def __get_cpu_usage_predictor(self) -> Optional[SimpleCpuPredictor]:\n return self.__cpu_usage_predictor_manager.get_cpu_predictor()\n\n def __predict_usage(self, workloads, cpu_usage):\n res = {}\n cpu_usage_predictor = self.__get_cpu_usage_predictor()\n\n cm = self.__config_manager\n pred_env = PredEnvironment(cm.get_region(), cm.get_environment(), dt.utcnow().hour)\n\n start_time = time.time()\n for w in workloads.values(): # TODO: batch the call\n # TODO: Integrate new prediction service\n pred = w.get_thread_count()\n if type(cpu_usage_predictor) is CpuUsagePredictor:\n pred = cpu_usage_predictor.predict(w, cpu_usage.get(w.get_id(), None), pred_env)\n log.info(\"Predicted cpu usage: %s for workload: %s\", pred, w.get_id())\n else:\n log.info(\"Not predicting cpu usage for workload: %s\", w.get_id())\n res[w.get_id()] = pred\n stop_time = time.time()\n self.__call_meta['pred_cpu_usage_dur_secs'] = stop_time - start_time\n try:\n self.__call_meta['pred_cpu_usage_model_id'] = cpu_usage_predictor.get_model().meta_data['model_training_titus_task_id']\n except:\n self.__call_meta['pred_cpu_usage_model_id'] = 'unknown'\n\n log.debug(\"Usage prediction per workload: \" + str(res))\n if len(res) > 0:\n self.__call_meta['pred_cpu_usage'] = dict(res)\n return res\n\n def __place_threads(self, cpu, workload_id, workloads, curr_ids_per_workload, predicted_cpu_usage, is_add) -> Cpu:\n # this will predict against the new or deleted workload too if it's static\n cu_vector = self.__get_requested_cu_vector(cpu, workload_id, workloads, curr_ids_per_workload, is_add)\n\n cpu = self.__compute_apply_placement(\n cpu,\n cu_vector.requested_cus,\n cu_vector.curr_placement_vectors_static,\n predicted_cpu_usage,\n workloads,\n cu_vector.ordered_workload_ids)\n\n return cpu\n\n @staticmethod\n def __get_requested_cu_vector(cpu, workload_id, workloads, curr_ids_per_workload, is_add) -> CUVector:\n n_compute_units = len(cpu.get_threads())\n tid_2order = cpu.get_natural_indexing_2_original_indexing()\n\n ordered_workload_ids = [w.get_id() for w in get_sorted_workloads(workloads.values())]\n\n changed_workload = workloads.get(workload_id, None)\n\n curr_placement_vectors_static = []\n for wid in ordered_workload_ids:\n if (changed_workload is not None) and (wid == changed_workload.get_id()) and is_add:\n continue\n cids = curr_ids_per_workload[wid]\n v = [1 if tid_2order[i] in cids else 0 for i in range(n_compute_units)]\n curr_placement_vectors_static.append(v)\n\n is_remove = (not is_add) and workload_id in ordered_workload_ids\n\n if is_remove:\n requested_cus = [\n workloads[wid].get_thread_count()\n if wid != changed_workload.get_id() else 0\n for wid in ordered_workload_ids\n ]\n else:\n requested_cus = [\n workloads[wid].get_thread_count()\n for wid in ordered_workload_ids\n ]\n\n return CUVector(\n requested_cus,\n curr_placement_vectors_static if len(curr_placement_vectors_static) > 0 else None,\n ordered_workload_ids)\n\n @staticmethod\n def __assign_new_mapping(cpu, thread_id2workload_ids):\n cpu.clear()\n for t in cpu.get_threads():\n wids = thread_id2workload_ids.get(t.get_id(), None)\n if wids is not None:\n for wi in wids:\n t.claim(wi)\n\n def __compute_apply_placement(\n self,\n cpu,\n requested_cus,\n curr_placement_vectors_static,\n predicted_usage_static,\n workloads,\n ordered_workload_ids_static):\n\n predicted_usage_static_vector = None\n if len(predicted_usage_static) > 0:\n predicted_usage_static_vector = [predicted_usage_static[w_id] for w_id in ordered_workload_ids_static]\n\n new_placement_vectors = self.__compute_new_placement(\n cpu,\n requested_cus,\n curr_placement_vectors_static,\n predicted_usage_static_vector)\n\n tid_2order = cpu.get_natural_indexing_2_original_indexing()\n thread_id2workload_ids = defaultdict(list)\n\n for w_ind, v in enumerate(new_placement_vectors):\n for i, e in enumerate(v):\n if e == 1:\n thread_id2workload_ids[tid_2order[i]].append(ordered_workload_ids_static[w_ind])\n\n release_all_threads(cpu, workloads.values())\n self.__assign_new_mapping(cpu, thread_id2workload_ids)\n\n # TODO: log what's in print_statistics of compute_v2\n return cpu\n\n def __compute_new_placement(\n self,\n cpu,\n requested_units,\n current_placement,\n predicted_usage):\n\n num_threads = len(cpu.get_threads())\n num_packages = len(cpu.get_packages())\n\n sparse_prev_alloc = None\n if current_placement is not None:\n sparse_prev_alloc = [[i for i, e in enumerate(v) if e == 1] for v in current_placement]\n\n use_per_workload = None\n if predicted_usage is not None:\n use_per_workload = predicted_usage\n\n self.__call_meta['ip_solver_call_args'] = {\n \"req_units\": [int(e) for e in requested_units],\n \"num_threads\": num_threads,\n \"num_packages\": num_packages\n }\n\n if sparse_prev_alloc is not None:\n self.__call_meta['ip_solver_call_args']['previous_allocation'] = sparse_prev_alloc\n if use_per_workload is not None:\n self.__call_meta['ip_solver_call_args']['use_per_workload'] = use_per_workload\n\n try:\n placement_solver = PlacementSolver(\n total_available_cus=num_threads,\n num_sockets=num_packages,\n solver_params=self.__ip_solver_params,\n backend=self.__solver_name)\n\n start_time = time.time()\n\n placement, status, prob, _ = placement_solver.optimize(\n requested_cus=requested_units,\n previous_allocation=current_placement,\n use_per_workload=predicted_usage,\n verbose=False,\n max_runtime_secs=self.__solver_max_runtime_secs,\n mip_gap=self.__solver_mip_gap)\n\n stop_time = time.time()\n\n self.__call_meta['ip_solver_call_dur_secs'] = stop_time - start_time\n self.__call_meta['ip_success'] = 1\n internal_solver_time = prob.solution.attr.get('solve_time', None)\n if internal_solver_time is not None:\n self.__call_meta['ip_internal_solver_call_dur_secs'] = internal_solver_time\n\n if status == IP_SOLUTION_TIME_BOUND:\n self.__time_bound_call_count += 1\n\n except Exception as e:\n self.__call_meta['ip_success'] = 0\n raise e\n\n return placement\n\n def set_solver_max_runtime_secs(self, val):\n self.__solver_max_runtime_secs = val\n\n def set_registry(self, registry, tags):\n self.__reg = registry\n\n def report_metrics(self, tags):\n self.__reg.gauge(IP_ALLOCATOR_TIMEBOUND_COUNT, tags).set(self.__time_bound_call_count)\n self.__reg.gauge(FORECAST_REBALANCE_FAILURE_COUNT, tags).set(self.__rebalance_failure_count)\n","sub_path":"titus_isolate/allocate/forecast_ip_cpu_allocator.py","file_name":"forecast_ip_cpu_allocator.py","file_ext":"py","file_size_in_byte":14384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"75969662","text":"from os.path import join\nimport json\nimport pickle\nimport difflib\n\n\ndef load_maps(path, method, tag_schema):\n with open(join(path, '{}2id.json'.format(method))) as f:\n word2id = json.loads(f.read())\n\n with open(join(path, '{}_tag2id.json'.format(tag_schema))) as f:\n tag2id = json.loads(f.read())\n\n return word2id, tag2id\n\n\ndef save_model(model, file_name):\n \"\"\"用于保存模型\"\"\"\n with open(file_name, \"wb\") as f:\n pickle.dump(model, f)\n\n\ndef load_model(file_name):\n \"\"\"用于加载模型\"\"\"\n with open(file_name, \"rb\") as f:\n model = pickle.load(f)\n return model\n\n\ndef flatten_lists(lists):\n flatten_list = []\n for l in lists:\n if type(l) == list:\n flatten_list += l\n else:\n flatten_list.append(l)\n return flatten_list\n\n\ndef get_meta(configs):\n \"\"\"获取模型信息\"\"\"\n meta = {}\n for config_dict in configs:\n for key, value in config_dict.items():\n if not key.startswith(\"__\"):\n meta[key] = value\n return meta\n\n\ndef back_map(ids_, lengths, map_, drop_last=False):\n \"\"\"反映射,将数字id表示的tag或者word转化为源文本\"\"\"\n if drop_last:\n lengths = [length-1 for length in lengths]\n\n bmap = dict((v, k) for k, v in map_.items())\n if type(ids_) != list:\n ids_ = ids_.tolist()\n\n results = []\n for id_list, length in zip(ids_, lengths):\n result = [bmap[id_] for id_ in id_list][:length]\n results.append(result)\n\n return results\n\n\ndef convert_loader(labeled_loader):\n \"\"\"labeled_loader --> unlabeled_loader for model.test function\"\"\"\n\n unlabeled_loader = []\n all_golden_tags = []\n none_id = [None]\n all_lengths = []\n for batch_articles, batch_tags, batch_lengths in labeled_loader:\n unlabeled_loader.append(\n (batch_articles, none_id * len(batch_lengths), batch_lengths))\n all_golden_tags += batch_tags.tolist()\n all_lengths += batch_lengths\n\n return unlabeled_loader, all_golden_tags, all_lengths\n\n\ndef get_edit_distance(str1, str2):\n \"\"\"衡量两个字符串之间的编辑距离\n 参考:https://www.jianshu.com/p/466cf6624e26\n \"\"\"\n leven_cost = 0\n s = difflib.SequenceMatcher(None, str1, str2)\n for tag, i1, i2, j1, j2 in s.get_opcodes():\n if tag == 'replace':\n leven_cost += max(i2-i1, j2-j1)\n elif tag == 'insert':\n leven_cost += (j2-j1)\n elif tag == 'delete':\n leven_cost += (i2-i1)\n return leven_cost\n","sub_path":"utils/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"551312193","text":"from flask import Flask, request, render_template\nimport re\nimport telegram\nfrom telebot import user_register\nfrom telebot import messagies_processing as msg_proc\nimport os\n\n\nbot_user_name = \"QuestLabyrinthBot\"\nURL = \"https://quest-labyrinth.herokuapp.com/\"\nbot_token = os.environ['bot_token']\n\nglobal bot\nglobal TOKEN\nTOKEN = bot_token\nbot = telegram.Bot(token=TOKEN)\n\napp = Flask(__name__)\n\n#for routes read here: https://flask.palletsprojects.com/en/1.0.x/quickstart/\n@app.route('/{}'.format(TOKEN), methods=['POST'])\ndef respond():\n # retrieve the message in JSON and then transform it to Telegram object\n update = telegram.Update.de_json(request.get_json(force=True), bot)\n chat_id:int = update.message.chat.id\n msg_id:int = update.message.message_id\n\n # Telegram understands UTF-8, so encode text for unicode compatibility\n text:str = update.message.text.encode('utf-8').decode()\n # for debugging purposes only\n print(\"got text message :\", text)\n # the first time you chat with the bot AKA the welcoming message\n\n if text.find(\"start\") != -1 or text == \"Начать сначала\":\n bot_welcome = \"\"\"\n Этот бот является текстовым квестом. Ваша задача выбраться из лабиринта, не попавшись монстрам в лапы. \n \"\"\"\n # send the welcoming message\n bot.sendMessage(chat_id=chat_id, text=bot_welcome)\n\n # registration:\n user_id:int = update.message.from_user.id\n\n is_registered: bool\n answer_text: str\n possible_actions: list\n is_registered, answer_text, possible_actions = user_register.registration(user_id) \n if is_registered:\n print(\"User was successfull registered and his state set to init values\")\n answer_text = \"\"\"Вы просыпаетесь от того, что истошный резкий крик врывается в ваше сознание. Вокруг вас лишь одна пугающая темнота. И вдруг этот ужасный крик перходит в булькающие звуки. Кровь стынет у вас в жилах, а в голове царит паника... Вы осознаете, что находитесь в тесном и узком коридоре, ведущем в темноту и таящем в себе неизведанные опасности. Надо спасаться! Но каждый свой шаг вы должны тщательно обдумать, ведь в загадочной темноте вас поджидает монстр, от одного крика которого волосы встают дыбом...\"\"\"\n possible_actions.append(['Начать сначала'])\n key_board = telegram.ReplyKeyboardMarkup(possible_actions)\n bot.sendMessage(chat_id=chat_id, text=answer_text, reply_markup = key_board)\n else:\n answer_text = \"Во время регистрации что-то пошло не так.\"\n bot.sendMessage(chat_id=chat_id, text=answer_text, reply_to_message_id=msg_id)\n \n else: \n try:\n user_id:int = update.message.from_user.id\n answer_text, possible_actions = msg_proc.prepare_answer(text, user_id)\n possible_actions.append(['Начать сначала'])\n print(possible_actions)\n if type(possible_actions[0])==list and len(possible_actions[0])>2:\n new_actions = []\n new_actions.append(possible_actions[0][:2])\n new_actions.append(possible_actions[0][2:])\n new_actions.append(possible_actions[1])\n print(new_actions)\n possible_actions = new_actions\n\n key_board = telegram.ReplyKeyboardMarkup(possible_actions)\n bot.sendMessage(chat_id=chat_id, text=answer_text, reply_markup = key_board)\n except Exception:\n # if things went wrong\n bot.sendMessage(chat_id=chat_id, text=\"Упс, что-то пошло не так. Попробуйте перезапустить бота.\")\n\n return 'ok'\n\n# for messagies arriving:\n@app.route('/setwebhook', methods=['GET', 'POST'])\ndef set_webhook():\n bot.deleteWebhook()\n s = bot.setWebhook('{URL}{HOOK}'.format(URL=URL, HOOK=TOKEN))\n # something to let us know things work\n if s:\n return \"webhook setup ok\"\n else:\n return \"webhook setup failed\"\n \n@app.route('/')\ndef index():\n return render_template('index.html')\nif __name__ == '__main__':\n app.run(threaded=True)\n set_webhook()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"163237945","text":"# =================================================================\n#\n# Authors: Tom Kralidis \n#\n# Copyright (c) 2014 Tom Kralidis\n#\n# Permission is hereby granted, free of charge, to any person\n# obtaining a copy of this software and associated documentation\n# files (the \"Software\"), to deal in the Software without\n# restriction, including without limitation the rights to use,\n# copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following\n# conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES\n# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\n# OTHER DEALINGS IN THE SOFTWARE.\n#\n# =================================================================\n\nDEBUG = False\nSQLALCHEMY_ECHO = False\nSQLALCHEMY_DATABASE_URI = 'sqlite:///data.db'\n\n# Replace None with 'your secret key string' in quotes\nSECRET_KEY = None\n\nGHC_RETENTION_DAYS = 30\nGHC_RUN_FREQUENCY = 'hourly'\nGHC_SELF_REGISTER = False\nGHC_NOTIFICATIONS = False\nGHC_ADMIN_EMAIL = 'you@example.com'\nGHC_SITE_TITLE = 'GeoHealthCheck Demonstration'\nGHC_SITE_URL = 'http://host'\n\nGHC_SMTP = {\n 'server': None,\n 'port': None,\n 'tls': False,\n 'ssl': False,\n 'username': None,\n 'password': None\n}\n\nGHC_RELIABILITY_MATRIX = {\n 'red': {\n 'min': 0,\n 'max': 49\n },\n 'orange': {\n 'min': 50,\n 'max': 79\n },\n 'green': {\n 'min': 80,\n 'max': 100\n }\n}\n\nGHC_MAP = {\n 'url': 'http://otile{s}.mqcdn.com/tiles/1.0.0/osm/{z}/{x}/{y}.png',\n 'centre_lat': 42.3626,\n 'centre_long': -71.0843,\n 'maxzoom': 18,\n 'subdomains': 1234,\n}\n","sub_path":"GeoHealthCheck/config_main.py","file_name":"config_main.py","file_ext":"py","file_size_in_byte":2189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"13323165","text":"import serial\nimport csv\n\nser = serial.Serial(port='COM7') # open serial port\nprint(ser.name) \n\nadresse_mac=[]\nadresse_mac_triee=[]\nliste_remplie=False\nen_cours = True\n\nwhile en_cours:\n\tligne = ser.readline().decode(\"utf-8\").replace('\\r\\n', \"\")\n\tprint(ligne) \n\t\n\twhile ligne != \" ---- \" and not liste_remplie:\n\t\tadresse_mac.append(ligne.split(\" & \")[1])\n\t\tligne = ser.readline().decode(\"utf-8\").replace('\\r\\n', \"\")\n\t\tif ligne == \" ---- \":\n\t\t\tprint(adresse_mac)\n\t\t\tliste_remplie=True\n\n\tif ligne != \" ---- \" and not ligne.split(\" & \")[1] in adresse_mac_triee:\n\t\tif float(ligne.split(\" & \")[2]) >= 35 and float(ligne.split(\" & \")[2]) < 70:\n\t\t\tadresse_mac_triee.append(ligne.split(\" & \")[1])\n\t\t\tif len(adresse_mac) == len(adresse_mac_triee):\n\t\t\t\ten_cours = False\n\tprint(adresse_mac_triee)\n\nser.close() \n\nf = open(\"Bus de test.csv\", \"w\")\nc = csv.writer(f)\nc.writerow([\"index\", \"Adresse Mac\"])\ncpt = 0\nfor el in adresse_mac_triee:\n\tc.writerow([cpt, el])\n\tcpt += 1\nf.close()\n\n \n\n","sub_path":"Capteurs.py","file_name":"Capteurs.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"116777041","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport argparse\n\n# Parse labels (dataset names).\nwith open('seq.txt', 'r') as f:\n labels = list(map(lambda x: x.strip(), f.readlines()))\n\n# Parse numbers of failures for each tracker.\nn_failures1 = np.loadtxt(\"./n_failures_ms.txt\").astype(int)\nn_failures2 = np.loadtxt(\"./n_failures_ncc.txt\").astype(int)\n\n# Plot results as a bar plot.\nfig, ax = plt.subplots()\nwidth=0.35\nrects1 = ax.bar(np.arange(len(labels)) - width/2, n_failures1, width, label='mean-shift tracker')\nrects2 = ax.bar(np.arange(len(labels)) + width/2, n_failures2, width, label='normalized cross-correlation tracker')\nax.set_xticks(np.arange(len(labels)))\nax.set_xticklabels(labels)\nax.legend()\nplt.xticks(rotation=45)\nplt.ylabel('number of failures')\nplt.show()\n\n","sub_path":"assignments/2/src/comp_stats.py","file_name":"comp_stats.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"523917743","text":"# -*- coding: utf-8 -*- \n\nfrom distutils.core import setup\n\nlong_description = open('README.rst').read()\n\nsetup(\n name='django-robots',\n version='1.6.2alt',\n description='Django robots.txt generator',\n long_description=long_description,\n url='https://github.com/valeriansaliou/django-robots',\n author='Valérian Saliou',\n author_email='valerian@valeriansaliou.name',\n license='Python Software Foundation License',\n packages=['robots', 'robots.tests'],\n package_data={'robots': ['templates/*.*']},\n platforms=['any'],\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Python Software Foundation License',\n 'Natural Language :: English',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Environment :: Web Environment',\n 'Framework :: Django',\n ],\n)","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"155924213","text":"#!/usr/bin/env python\n\nimport os\nimport subprocess\nimport tempfile\nfrom cloudify import ctx\nfrom cloudify.state import ctx_parameters as inputs\n\nMOUNT = ('#!/bin/bash\\n' +\n 'echo $@ >> /var/log/mount-calls.log\\n' +\n '{0} kubernetes $1 $2 $3 -deployment \"{1}\" -instance \"{2}\" ' +\n '-tenant \"{3}\" -password \"{4}\" -user \"{5}\" -host \"{6}\"')\n\n\ndef execute_command(_command, extra_args=None):\n\n ctx.logger.debug('_command {0}.'.format(_command))\n\n subprocess_args = {\n 'args': _command.split(),\n 'stdout': subprocess.PIPE,\n 'stderr': subprocess.PIPE\n }\n if extra_args is not None and isinstance(extra_args, dict):\n subprocess_args.update(extra_args)\n\n ctx.logger.debug('subprocess_args {0}.'.format(subprocess_args))\n\n process = subprocess.Popen(**subprocess_args)\n output, error = process.communicate()\n\n ctx.logger.debug('command: {0} '.format(_command))\n ctx.logger.debug('output: {0} '.format(output))\n ctx.logger.debug('error: {0} '.format(error))\n ctx.logger.debug('process.returncode: {0} '.format(process.returncode))\n\n if process.returncode:\n ctx.logger.error('Running `{0}` returns error.'.format(_command))\n return False\n\n return output\n\n\nif __name__ == '__main__':\n\n ctx.logger.info('Configuring Kubelet Volume Plugin.')\n\n cfy_go_binary_path = \\\n inputs.get('cfy_go_binary_path', '/usr/bin/cfy-go')\n\n plugin_directory = \\\n inputs.get(\n 'plugin_directory',\n '/usr/libexec/kubernetes/'\n 'kubelet-plugins/volume/exec/cloudify~mount/')\n\n cfy_deployment = \\\n inputs.get('cfy_deployment', ctx.deployment.id)\n\n cfy_instance = \\\n inputs.get('cfy_instance', ctx.instance.id)\n\n cfy_user = \\\n inputs.get('cfy_user', 'admin')\n\n cfy_pass = \\\n inputs.get('cfy_password', 'admin')\n\n cfy_tenant = \\\n inputs.get('cfy_tenant', 'default_tenant')\n\n cfy_host = \\\n inputs.get('cfy_host', 'localhost')\n\n cfy_ssl = \\\n inputs.get('cfy_ssl', False)\n\n if os.path.exists('/usr/bin/cfy-go'):\n ctx.logger.debug(\n 'Cfy Go Binary already at {0}'.format(cfy_go_binary_path))\n else:\n ctx.logger.debug(\n 'Copying Cfy Go Binary to {0}'.format(cfy_go_binary_path))\n execute_command(\n 'sudo cp /opt/bin/cfy-go {0}'.format(cfy_go_binary_path))\n\n execute_command('sudo chmod 555 {0}'.format(cfy_go_binary_path))\n execute_command('sudo chown root:root {0}'.format(cfy_go_binary_path))\n\n ctx.logger.info(\"Update create cfy-mount\")\n _, temp_mount_file = tempfile.mkstemp()\n\n with open(temp_mount_file, 'w') as outfile:\n outfile.write(MOUNT.format(\n cfy_go_binary_path,\n cfy_deployment,\n cfy_instance,\n cfy_tenant,\n cfy_pass,\n cfy_user,\n cfy_host if not cfy_ssl else \"https://\" + cfy_host))\n\n execute_command('sudo mkdir -p {0}'.format(\n plugin_directory))\n execute_command('sudo cp {0} {1}'.format(\n temp_mount_file,\n os.path.join(plugin_directory, 'mount')))\n execute_command('sudo chmod 555 {0}'.format(\n os.path.join(plugin_directory, 'mount')))\n execute_command('sudo chown root:root {0}'.format(\n os.path.join(plugin_directory, 'mount')))\n","sub_path":"examples/cluster_blueprint/scripts/kubelet_volume/install.py","file_name":"install.py","file_ext":"py","file_size_in_byte":3344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"648399634","text":"# 6)\n# Dany jest slownik:\nmiesiace = {'luty': 28, 'maj': 31, 'lipiec': 31,\n 'wrzesien': 55, 'listopad': 20}\n# jakie sa wartosci nastepujacych wyrazen:\n# a)\n# ----------------\n[k for k in miesiace]\n\n\" and \".join([\"%s in %s\" % (v, k) for k, v in miesiace.items()])\n\nreduce(lambda x, y: x+y, miesiace.values())\n\n# ----------------\n# b)\n# Napisz instrukcje, ktora:\n# - wypisze liczby wystepujace w slowniku miesiace\n# - zmieni liczbe dni lutego na 29\n# - usunie dane lipca lacznie z lipcem\n# - przypisze miesiacom kolejne liczby calkowice zaczynajac od 1\n","sub_path":"Kolokwium/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"269280725","text":"#对每个样本特征做相关性分析\nimport pandas as pd\nimport numpy as np\nimport scipy.stats as stats\n\nDeleteIndex = np.array([])\n\ndef data2mat(filename): #将Gname恐怖组织序列标签化\n dataMat = pd.read_excel(filename)\n dataset = dataMat.drop(['gname'],axis=1).values\n gnameMat = dataMat['gname']\n gnamearray = gnameMat.values\n gnamelist = gnamearray.tolist()\n gnamesort = gnameMat.drop_duplicates().values #去重过后的标签\n gnamesort_list = gnamesort.tolist()\n gname_num = {gnamesort_list[1]:0} #初始化字典\n gnamesort = np.delete(gnamesort, 1)#所有类别列\n gnamenumMat = []\n #建立字典 组织-序号\n for i in range(gnamesort.shape[0]+1):\n if(gnamesort_list[i] == 'Unknown'):\n pass\n else:\n gname_num[gnamesort_list[i]] = i\n gname_num['Hutu extremists'] = 1\n for j in range(gnamearray.shape[0]):\n gnamenumMat.append(gname_num[gnamelist[j]])\n gnamenumMat = np.array(gnamenumMat)\n\n return np.c_[dataset,gnamenumMat]\n\ndef frequency(dataset,col): #dataset:array 将对应列的特征按照出现的频率标签化\n year_num = dataset.shape[0] #获取样本的个数\n year = pd.DataFrame(dataset[:,col]).drop_duplicates() #获取一共有多少年份属性\n length = len(year)\n year = year.values.reshape(1,length)[0]\n yearsum = dataset[:,col]\n year_frequency = {}\n for i in year: #求取每一年的占比\n num = len(np.nonzero(yearsum == i)[0])\n year_frequency[i] = round(num/year_num,5)\n for i in range(year_num):\n dataset[i,col] = year_frequency[dataset[i,col]]\n if (col == 27):\n zeroindex = np.nonzero(yearsum == 0.42864)[0]\n dataset[zeroindex,col] = 0\n return dataset\n\ndef test(dataset,col): #将-9的未知量转化为其他样本对应特征的平均值\n data = dataset[:,col]\n index = np.nonzero(data != -9)[0] #获取不是-9的列索引\n index_reverse = np.nonzero(data == -9)[0] #获取不是-9的列索引\n dataverage = data[index]\n average = sum(dataverage)/len(dataverage)\n dataset[index_reverse,col] = average\n return dataset\n\ndef average(dataset,col): #对应列特征数据归一化\n data = dataset[:, col] #获取数据集的总和\n nonanindex = np.nonzero(data == data)[0]\n datamax = max(data[nonanindex])\n datamin = min(data[nonanindex])\n rang = datamax - datamin\n dataset[:, col] = (data - datamin)/rang\n return dataset\n\ndef exchange(dataset): #替换数值函数\n victiny = dataset[:,3]\n zeroindex = np.nonzero(victiny == 0)[0]\n oneindex = np.nonzero(victiny == 1)[0]\n dataset[zeroindex,3] = 1\n dataset[oneindex,3] = 0\n propextent = dataset[:,20]\n P4 = np.nonzero(propextent == 4)[0]\n P3 = np.nonzero(propextent == 3)[0]\n P2 = np.nonzero(propextent == 2)[0]\n P1 = np.nonzero(propextent == 1)[0]\n dataset[P4, 20] = 0.25\n dataset[P3, 20] = 0.5\n dataset[P2, 20] = 0.75\n dataset[P1, 20] = 1\n return dataset\n\ndef judgement(num1,col): #num1:nkill num2:nwound 分级函数\n #{0:CeP,1:Minimal,2:Minor,3:Major,4:Catastrophic}\n level = -1\n if(col == 17):\n if(num1 == 0):\n level = 0\n elif(num1>=1 and num1<3):\n level = 1\n elif(num1>=3 and num1<10):\n level = 2\n elif (num1 >= 10 and num1 < 30):\n level = 3\n else:\n level = 4\n elif(col == 18):\n if(num1 == 0):\n level = 0\n elif(num1>=1 and num1<10):\n level = 1\n elif (num1 >= 10 and num1 < 50):\n level = 2\n elif (num1 >= 50 and num1 < 100):\n level = 3\n else:\n level = 4\n return level\n\ndef kill_wound(dataset,col):\n #首先处理死亡人数\n num = dataset.shape[0]\n data = dataset[:,col]\n level = []\n for i in range(num):\n level.append(judgement(data[i],col))\n dataset[:, col] = np.array(level)\n return dataset\n\n#def kill_wound(dataset): #合并伤亡人数与伤亡程度\n# data = dataset[]\n\ndef delete(dataset,DeleteIndex): #处理经济损失特征,并将关于两个绑架特征删除\n #删除property中的-9\n data = dataset[:,19]\n todelIndex = np.nonzero(data != -9)[0]\n Index = np.nonzero(data == -9)[0]\n DeleteIndex = np.r_[DeleteIndex, Index].astype(np.int32)\n dataset = dataset[todelIndex,:]\n data = dataset[:,20]\n nonindex = np.nonzero(data != data)[0]\n dataset[nonindex,20] = 0\n dataset = np.delete(dataset,[19,21,22],axis=1)\n return dataset,DeleteIndex\n\ndef delete_nan(dataset,col,DeleteIndex): #删除对应列的空值\n data = dataset[:,col]\n nanindex = np.nonzero(data != data)[0]\n DeleteIndex = np.r_[DeleteIndex,nanindex].astype(np.int32)\n dataset = np.delete(dataset,nanindex,axis=0)\n return dataset,DeleteIndex\n\ndef delete_nine(dataset,col,DeleteIndex):\n data = dataset[:, col]\n nanindex = np.nonzero(data == -9)[0]\n DeleteIndex = np.r_[DeleteIndex, nanindex].astype(np.int32)\n dataset = np.delete(dataset, nanindex, axis=0)\n return dataset,DeleteIndex\n\ndef count(dataset): #用来获取每个特征为空值得个数\n num = dataset.shape[1] #获得特征数\n nan_count = []\n for i in range(num):\n data = dataset[:,i]\n nanindex = np.nonzero(data == -9)[0]\n count = len(nanindex) #一个特征的空值数\n nan_count.append(count)\n print(nan_count)\n\ndef pearsonr1(dataset): #判断显性相关\n r_sum = np.array([0 for i in range(dataset.shape[1])])\n p_sum = np.array([0 for i in range(dataset.shape[1])])\n num = dataset.shape[1] #得到特征的值\n for i in range(num):\n r = []\n p = []\n for j in range(num):\n r1, p1 = stats.pearsonr(dataset[:,i], dataset[:,j])\n r.append(r1)\n p.append(p1)\n r_sum = np.c_[r_sum, np.array(r)]\n p_sum = np.c_[p_sum, np.array(p)]\n return r_sum[:,1:],p_sum[:,1:]\n\ndef add_feature(dataset):\n aa = dataset[:,1]\n bb = dataset[:,7]\n cc = dataset[:, 8]\n dd = dataset[:, 10]\n ee = dataset[:, 19]\n dataset = np.delete(dataset, [1,7,8,10,19], axis=1)\n return np.c_[dataset,aa+bb+cc+dd+ee]\n\ndef add_feature2(dataset):\n aa = dataset[:, 3]\n bb = dataset[:, 4]\n cc = dataset[:, 5]\n dd = dataset[:, 2]\n ee = dataset[:, 14]\n sum = aa+bb+cc+ee+dd\n dataset[:,14] = sum\n dataset = np.delete(dataset, [2,3,4,5], axis=1)\n return dataset\n\n\naa = data2mat('附件1.xlsx') #将组织名称变为序号 并且放到最后\nbb = frequency(aa,0) #年份频率\ncc = frequency(frequency(bb,11),27) #组织名称频率 攻击方式频率\n#dd = test(test(test(cc,26),24),23) #将INT_ANY INT_IDEO INT_LOG的-9换为平均值\n#pd.DataFrame(dd).to_csv(\"dataset1.csv\",index=False,sep=',')\n\nee = kill_wound(cc,18) #给死亡人数定级\nee = kill_wound(ee,17) #给受伤人数定级\nff1 = exchange(ee)\nff = exchange(ee) #给财产损失按范围定级,并将Vicinity1和0互换\nff,ii = delete(ff,DeleteIndex) #删掉property ishostkid nhostkid\nff,ii = delete_nan(ff,18,ii)\nff,ii = delete_nan(ff,17,ii)\nff,ii = delete_nan(ff,16,ii)\nff,ii = delete_nan(ff,13,ii)\nff,ii = delete_nan(ff,12,ii)\nff = np.delete(ff,14,axis=1) #删掉individual\nff = np.delete(ff,[19,20,22],axis=1) #删掉INT\nff,ii = delete_nine(ff,3,ii)\nff = add_feature(ff)\nff = np.delete(ff,9,axis=1) #删掉nality\nff = np.delete(ff,10,axis=1) #删掉nality\nff = add_feature2(ff)\nrr ,pp = pearsonr1(ff)\nff = pd.DataFrame(ff)\n\n#print(ff.corr())\nff.corr().to_csv(\"relative.csv\",sep=',')\n\npd.DataFrame(rr).to_csv(\"rr.csv\",index=False,sep=',')\npd.DataFrame(pp).to_csv(\"pp.csv\",index=False,sep=',')\n","sub_path":"RelativeAnalysis.py","file_name":"RelativeAnalysis.py","file_ext":"py","file_size_in_byte":7672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"396569613","text":"\"\"\"\r\n A Keras implementation of a volumetric variational auto-encoder.\r\n\r\n This class is used to instantiate a train-ready model.\r\n\"\"\"\r\n\r\nimport keras.backend as backend\r\nfrom keras.models import Model\r\nfrom keras.layers import Input, Dense, Conv3D, Flatten, Reshape, Conv3DTranspose, Lambda, BatchNormalization\r\nfrom keras.layers.advanced_activations import LeakyReLU, ReLU\r\n\r\n\r\nclass VAE3D:\r\n\r\n \"\"\"\r\n Must take in parameters:\r\n - Batch size: Between 8 to 128, multiples of 8 or 16.\r\n - Optimizer: Keras optimizer object.\r\n - Kernel size: Size of convolutional kernels.\r\n - Structure parameters: For deciding the structure of the VAE, a dictionary of the format:\r\n {\r\n 'smallest': INTEGER, # Number of filters of outermost convolutional layer.\r\n 'middle': INTEGER, # Number of filters of middle convolutional layer.\r\n 'second_middle': INTEGER, # Number of filters of second middle convolutional layer.\r\n 'largest': INTEGER, # Number of filters of innermost convolutional layer.\r\n 'reshape_dimensions': INTEGER, # Dimensions of dense layer, right after latent vector.\r\n 'reshape_shape': Tuple of shape: (INTEGER, INTEGER, INTEGER, INTEGER) # Shape to reshape dense vector.\r\n }\r\n \"\"\"\r\n def __init__(self, batch_size, optimizer, latent_dimensions, kernel_size, structure_parameters):\r\n self.INPUT_SHAPE = (32, 32, 32, 1)\r\n self.LATENT_DIMENSIONS = latent_dimensions\r\n self.BATCH_SIZE = batch_size\r\n self.OPTIMIZER = optimizer\r\n self.KERNEL_SIZE = kernel_size\r\n\r\n self.outermost_layer = structure_parameters['outermost']\r\n self.middle_layer = structure_parameters['middle']\r\n self.innermost_layer = structure_parameters['innermost']\r\n self.reshape_dimensions = 4 * 4 * 4 * structure_parameters['innermost']\r\n self.reshape_shape = (4, 4, 4, structure_parameters['innermost'])\r\n\r\n self._set_up_encoder_layers()\r\n self._set_up_latent_layers()\r\n self._set_up_decoder_layers()\r\n self._set_up_models()\r\n\r\n def _set_up_encoder_layers(self):\r\n self.e_input = Input(shape=self.INPUT_SHAPE)\r\n\r\n self.e_cv_1 = Conv3D(filters=self.outermost_layer, kernel_size=self.KERNEL_SIZE, strides=2, padding='same')(self.e_input)\r\n self.e_bn_1 = BatchNormalization()(self.e_cv_1)\r\n self.e_lr_1 = LeakyReLU()(self.e_bn_1)\r\n\r\n self.e_cv_2 = Conv3D(filters=self.middle_layer, kernel_size=self.KERNEL_SIZE, strides=2, padding='same')(self.e_lr_1)\r\n self.e_bn_2 = BatchNormalization()(self.e_cv_2)\r\n self.e_lr_2 = LeakyReLU()(self.e_bn_2)\r\n\r\n self.e_cv_3 = Conv3D(filters=self.innermost_layer, kernel_size=self.KERNEL_SIZE, strides=2, padding='same')(self.e_lr_2)\r\n self.e_bn_3 = BatchNormalization()(self.e_cv_3)\r\n self.e_lr_3 = LeakyReLU()(self.e_bn_3)\r\n\r\n self.e_flat = Flatten()(self.e_lr_3)\r\n\r\n def _set_up_latent_layers(self):\r\n self.mean = Dense(self.LATENT_DIMENSIONS, activation='linear')(self.e_flat)\r\n self.variance = Dense(self.LATENT_DIMENSIONS, activation='linear')(self.e_flat)\r\n self.latent_vector = Lambda(self.sample_latent_vector)([self.mean, self.variance])\r\n\r\n def _set_up_decoder_layers(self):\r\n self.d_dense = Dense(self.reshape_dimensions)\r\n self.d_bn_1 = BatchNormalization()\r\n self.d_rl_1 = ReLU()\r\n self.d_reshape = Reshape(self.reshape_shape)\r\n\r\n self.d_dcv_1 = Conv3DTranspose(filters=self.middle_layer, kernel_size=self.KERNEL_SIZE, strides=2, padding='same')\r\n self.d_bn_2 = BatchNormalization()\r\n self.d_rl_2 = ReLU()\r\n\r\n self.d_dcv_2 = Conv3DTranspose(filters=self.outermost_layer, kernel_size=self.KERNEL_SIZE, strides=2, padding='same')\r\n self.d_bn_3 = BatchNormalization()\r\n self.d_rl_3 = ReLU()\r\n\r\n self.d_dcv_3 = Conv3DTranspose(filters=1, kernel_size=self.KERNEL_SIZE, strides=2, padding='same', activation='sigmoid')\r\n\r\n def _set_up_models(self):\r\n vae_d_input = self.d_dense(self.latent_vector)\r\n vae_bn_1 = self.d_bn_1(vae_d_input)\r\n vae_rl_1 = self.d_rl_1(vae_bn_1)\r\n vae_d_reshape = self.d_reshape(vae_rl_1)\r\n\r\n vae_d_dcv_1 = self.d_dcv_1(vae_d_reshape)\r\n vae_bn_2 = self.d_bn_2(vae_d_dcv_1)\r\n vae_rl_2 = self.d_rl_2(vae_bn_2)\r\n\r\n vae_d_dcv_2 = self.d_dcv_2(vae_rl_2)\r\n vae_bn_3 = self.d_bn_3(vae_d_dcv_2)\r\n vae_rl_3 = self.d_rl_3(vae_bn_3)\r\n\r\n vae_d_dcv_3 = self.d_dcv_3(vae_rl_3)\r\n\r\n self.vae_model = Model(self.e_input, vae_d_dcv_3)\r\n\r\n # Encoder Model\r\n self.vae_encoder = Model(self.e_input, self.mean)\r\n\r\n # Decoder Model\r\n decoder_input = Input(shape=(self.LATENT_DIMENSIONS,))\r\n decoder_dense = self.d_dense(decoder_input)\r\n decoder_bn_1 = self.d_bn_1(decoder_dense)\r\n decoder_rl_1 = self.d_rl_1(decoder_bn_1)\r\n decoder_reshape = self.d_reshape(decoder_rl_1)\r\n\r\n decoder_d_dcv_1 = self.d_dcv_1(decoder_reshape)\r\n decoder_bn_2 = self.d_bn_2(decoder_d_dcv_1)\r\n decoder_rl_2 = self.d_rl_2(decoder_bn_2)\r\n\r\n decoder_d_dcv_2 = self.d_dcv_2(decoder_rl_2)\r\n decoder_bn_3 = self.d_bn_3(decoder_d_dcv_2)\r\n decoder_rl_3 = self.d_rl_3(decoder_bn_3)\r\n\r\n decoder_d_dcv_3 = self.d_dcv_3(decoder_rl_3)\r\n\r\n self.vae_decoder = Model(decoder_input, decoder_d_dcv_3)\r\n\r\n self.vae_model.compile(loss=self.vae_loss, optimizer=self.OPTIMIZER, metrics=['accuracy'])\r\n self.vae_decoder.compile(loss=self.vae_loss, optimizer=self.OPTIMIZER, metrics=['accuracy'])\r\n\r\n def sample_latent_vector(self, args):\r\n mean, variance = args\r\n epsilon = backend.random_normal(shape=(self.BATCH_SIZE, self.LATENT_DIMENSIONS), mean=0.,\r\n stddev=1.)\r\n return mean + backend.exp(variance / 2) * epsilon\r\n\r\n def vae_loss(self, predicted, true):\r\n \"\"\" Loss = reconstruction loss + KL loss for each data in minibatch \"\"\"\r\n reconstruction = backend.sum(backend.binary_crossentropy(predicted, true), axis=1)\r\n kl = 0.5 * backend.sum(\r\n backend.exp(self.variance) + backend.square(self.mean) - 1. - self.variance,\r\n axis=1)\r\n\r\n return reconstruction + kl\r\n\r\n def print_model_summary(self):\r\n self.vae_encoder.summary()\r\n self.vae_decoder.summary()\r\n self.vae_model.summary()\r\n","sub_path":"models/vae3d.py","file_name":"vae3d.py","file_ext":"py","file_size_in_byte":6552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"337768589","text":"#!/usr/bin/env python3\n\"\"\"\nUtility app for the PubChem PUG REST API.\n\"\"\"\n###\nimport sys,os,re,argparse,time,logging\n#\nfrom .. import pubchem\n#\nAPI_HOST='pubchem.ncbi.nlm.nih.gov'\nAPI_BASE_PATH='/rest/pug'\n#\n##############################################################################\nif __name__=='__main__':\n ops = [\n \"list_sources_substance\", \"list_sources_assay\",\n \"get_name2sid\", \"get_name2cid\", \"get_name2synonyms\",\n \"get_smi2cid\",\n \"get_cid2smi\", \"get_cid2smiles\", \"get_cid2sdf\",\n \"get_cid2properties\", \"get_cid2inchi\",\n \"get_cid2synonyms\", \"get_cid2sid\", \"get_cid2assaysummary\",\n \"get_sid2cid\", \"get_sid2sdf\", \"get_sid2assaysummary\",\n \"get_assayname\", \"get_assaydescriptions\", \"get_assayresults\" ]\n parser = argparse.ArgumentParser(description=\"PubChem PUG REST client\")\n parser.add_argument(\"op\",choices=ops,help='operation')\n parser.add_argument(\"--i\", dest=\"ifile\", help=\"input IDs file (CID|SID|SMILES|name)\")\n parser.add_argument(\"--ids\", help=\"input IDs (CID|SID|SMILES|name) (comma-separated)\")\n parser.add_argument(\"--aids\", help=\"input AIDs (comma-separated)\")\n parser.add_argument(\"--iaid\", dest=\"ifile_aid\", help=\"input AIDs file\")\n parser.add_argument(\"--o\", dest=\"ofile\", help=\"output (usually TSV)\")\n parser.add_argument(\"--isomeric\", action=\"store_true\", help=\"return Isomeric SMILES\")\n parser.add_argument(\"--api_host\", default=API_HOST)\n parser.add_argument(\"--api_base_path\", default=API_BASE_PATH)\n parser.add_argument(\"--skip\", type=int, default=0)\n parser.add_argument(\"--nmax\", type=int, default=0)\n parser.add_argument(\"--nmax_per_cid\", type=int, default=20)\n parser.add_argument(\"-v\", \"--verbose\", default=0, action=\"count\")\n args = parser.parse_args()\n\n logging.basicConfig(format='%(levelname)s:%(message)s', level=(logging.DEBUG if args.verbose>1 else logging.INFO))\n\n BASE_URL = 'https://'+args.api_host+args.api_base_path\n\n fout = open(args.ofile, \"w\") if args.ofile else sys.stdout\n\n ids=[]\n if args.ifile:\n fin = open(args.ifile)\n while True:\n line = fin.readline()\n if not line: break\n ids.append(line.rstrip())\n fin.close()\n elif args.ids:\n ids = re.split(r'[,\\s]+', args.ids)\n logging.info('Input IDs: %d'%(len(ids)))\n\n aids=[]\n if args.ifile_aid:\n fin = open(args.ifile_aid)\n while True:\n line = fin.readline()\n if not line: break\n aids.append(line.rstrip())\n fin.close()\n elif args.aids:\n aids = re.split(r'[,\\s]+', args.aids)\n logging.info('Input AIDs: %d'%(len(aids)))\n\n t0=time.time()\n\n if args.op == 'list_sources_assay':\n pubchem.Utils.ListSources(BASE_URL, \"assay\", fout)\n\n elif args.op == 'list_sources_substance':\n pubchem.Utils.ListSources(BASE_URL, \"substance\", fout)\n\n elif args.op == 'get_cid2synonyms':\n pubchem.Utils.GetCID2Synonyms(BASE_URL, ids, args.skip, args.nmax, args.nmax_per_cid, fout)\n\n elif args.op == 'get_cid2properties':\n pubchem.Utils.GetCID2Properties(BASE_URL, ids, fout)\n\n elif args.op == 'get_cid2inchi':\n pubchem.Utils.GetCID2Inchi(BASE_URL, ids, fout)\n\n elif args.op == 'get_cid2sid':\n pubchem.Utils.GetCID2SID(BASE_URL, ids, fout)\n\n elif args.op == 'get_cid2smiles':\n pubchem.Utils.GetCID2Smiles(BASE_URL, ids, args.isomeric, fout)\n\n elif args.op == 'get_cid2sdf':\n pubchem.Utils.GetCID2SDF(BASE_URL, ids, fout)\n\n elif args.op == 'get_cid2assaysummary':\n pubchem.Utils.GetCID2AssaySummary(BASE_URL, ids, fout)\n\n elif args.op == 'get_sid2cid':\n pubchem.Utils.GetSID2CID(BASE_URL, ids, fout)\n\n elif args.op == 'get_sid2assaysummary':\n pubchem.Utils.GetSID2AssaySummary(BASE_URL, ids, fout)\n\n elif args.op == 'get_sid2sdf':\n pubchem.Utils.GetSID2SDF(BASE_URL, ids, fout, args.skip, args.nmax)\n\n elif args.op == 'get_smi2cid':\n pubchem.Utils.GetSmiles2CID(BASE_URL, ids, fout)\n\n elif args.op == 'get_name2sid':\n pubchem.Utils.GetName2SID(BASE_URL, ids, fout)\n\n elif args.op == 'get_name2cid':\n pubchem.Utils.GetName2CID(BASE_URL, ids, fout)\n\n elif args.op == 'get_name2synonyms':\n pubchem.Utils.GetName2Synonyms(BASE_URL, ids, fout)\n\n elif args.op == 'get_assayname':\n pubchem.Utils.GetAssayName(BASE_URL, aids, fout)\n\n elif args.op == 'get_assaydescriptions':\n pubchem.Utils.GetAssayDescriptions(BASE_URL, aids, args.skip, args.nmax, fout)\n\n elif args.op == 'get_assayresults':\n if not (aids and ids): parser.error('Input AIDs and SIDs required.')\n pubchem.Utils.GetAssaySIDResults(BASE_URL, aids, ids, args.skip, args.nmax, fout)\n\n else:\n parser.error('Invalid operation: %s'%args.op)\n\n logging.info(('elapsed time: %s'%(time.strftime('%Hh:%Mm:%Ss',time.gmtime(time.time()-t0)))))\n\n","sub_path":"BioClients/pubchem/Client.py","file_name":"Client.py","file_ext":"py","file_size_in_byte":4683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"602906205","text":"from collections import defaultdict\nimport blist\n\nclass Sol:\n def __init__(self):\n self.dic = defaultdict(blist.sortedlist)\n\n def add(self, quote):\n id, price = quote\n self.dic[id].add(price)\n\n def getTopKPrices(self, id, k):\n return self.dic[id][:k]\n\ns = Sol()\ns.add(('FB', 100))\ns.add(('FB', 200))\ns.add(('FB', 150))\nprint(s.getTopKPrices('FB', 2))\n'''\nbt = blist.sortedlist(key=lambda t: t[1])\nbt.add(('FB', 100))\nbt.add(('FB', 200))\nbt.add(('FB', 150))\nprint(bt[0:2])\n'''\n","sub_path":"BB/top_k_stocks.py","file_name":"top_k_stocks.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"466226331","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 31 21:29:47 2017\n\n@author: misakawa\n\"\"\"\n\nfrom ..abst import abstract_object,indent_setter, attrset_sugar, Seq\nfrom ..CSS.Grid import col\n\nclass input_field(col): \n \"\"\"\n See http://materializecss.com/forms.html\n user help : >> help (input_field.init)\n Guide:\n input_field(field_name = 'Username', type = 'text', icon = icon('mode_edit'), id = 'for-username')\n \n Take care for `submit`:\n submit = \\\n input_field(field_name = \"TiJiao/Submit\",\n type = 'submit',\n icon = icon('publish'),\n )\n \"\"\"\n def init(self, grid : \"CSS.Grid.grid\" , **attributes):\n sugar = attrset_sugar(self.conf, attributes)\n sugar('icon', None)\n sugar('id', 'Some Id')\n sugar('value', None)\n sugar('type', 'text')\n sugar('field_name', 'Some Field')\n if self.conf['type'].lower() == 'submit':\n if 'class' not in attributes:\n attributes['class'] = \"waves-effect waves-light btn\"\n else:\n attributes['class'] += \"waves-effect waves-light btn\"\n body =\\\n\"\"\"\n{{indent}}\n\"\"\"\n else:\n sugar('class', 'validate')\n body =\\\n\"\"\"\n{{indent}}
\n{% if icon %}\n{{indent+Indent_unit}}{{icon}}\n{% endif %}\n{{indent+Indent_unit}}\n{{indent+Indent_unit}}\n{{indent}}
\n\"\"\" \n \n self.conf.update(dict(indent = \" \", attributes_dict = attributes))\n self.append_class(grid.gen())\n self.body = body\n \n \n \nclass form(indent_setter, abstract_object):\n \"\"\"\n See http://materializecss.com/forms.html\n user help : >> help (form.init)\n Guide:\n a_form = form(\n Seq(\n input_field(grid(s=12), field_name = 'Username', type = 'text', icon = icon('mode_edit'), id = 'for-username'),\n input_field(grid(s=12), field_name = 'Password', type = 'password', icon = icon('brightness_auto'), id = 'for-password'),\n input_field(grid(s=12), field_name = 'School', type = 'text', icon = icon('brightness_3'), id = 'for-school'),\n input_field(grid(s=12), field_name = 'submit', type = 'submit', icon = icon('publish'), id = 'for-submit')->> right_align,\n ),\n action = 'script',\n method = 'POST')\n \"\"\"\n \n def init(self, content :(Seq,[input_field]), **attributes):\n body = \\\n\"\"\"\n{{indent}}
\n{{indent+Indent_unit}}{{content}}\n{{indent}}
\n\"\"\"\n self.conf.update(dict(content = content, indent = \" \", attributes_dict = attributes))\n self.body = body\n\n\n\n ","sub_path":"incantation/Module/Component/Form.py","file_name":"Form.py","file_ext":"py","file_size_in_byte":3162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"139678721","text":"#coding:utf-8 \nimport requests\nimport cookielib\nimport mechanize\nimport urllib\n\nclass Agent114:\n login_url=\"http://www.bjguahao.gov.cn/quicklogin.htm\";\n confirm_url=\"http://www.bjguahao.gov.cn/order/confirm.htm\";\n appoint_url=\"http://www.bjguahao.gov.cn/dpt/appoint/%s-%s.htm\"\n def __init__(self,task):\n cookiejar = cookielib.LWPCookieJar();\n self.session=requests.Session();\n self.session.cookies=cookiejar;\n self.session.headers = {'User-agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.124 Safari/537.36'};\n self.task=task;\n '''\n 登录平台\n '''\n def login(self):\n username=self.task.user114.username;\n pwd=self.task.user114.pwd;\n login_data={'mobileNo':username,'password':pwd,'yzm':'','isAjax':'true'};\n r=self.session.post(Agent114.login_url,data=login_data);\n if r.json()['msg']!=\"ok\":\n raise Exception(\"登录失败!\");\n '''\n 预约日期页面的总概况\n '''\n def appoint(self):\n appoint_url=Agent114.appoint_url%(self.task.hospitalId,self.task.departmentId);\n r=self.session.get(appoint_url);\n return r;\n '''\n 医院下的科室的某一天预约信息。\n '''\n def partduty(self):\n dutyCode=self.task.getDutycode();\n dutyDate=self.task.getDutydate();\n r=self.doPartduty( dutyCode, dutyDate);\n \n return r;\n def doPartduty(self,dutyCode,dutyDate):\n login_data={'hospitalId':self.task.hospitalId,'departmentId':self.task.epartmentId,'dutyCode':dutyCode,'dutyDate':dutyDate,'isAjax':'true'};\n r=self.session.post(Agent114.login_url,data=login_data);\n return r.json();\n '''\n 提交预订\n '''\n def confirm(self,dutySourceId,doctorId,smsVerifyCode):\n confirm_data={'dutySourceId':dutySourceId,'hospitalId':self.task.hospitalId,'departmentId':self.task.departmentId,'doctorId':doctorId,\n 'patientId':self.task.user114.patientid,'hospitalCardId':'','medicareCardId':'','reimbursementType':'1',\n 'smsVerifyCode':smsVerifyCode,'childrenBirthday':'','isAjax':'true'};\n r=self.session.post(Agent114.confirm_url,data=confirm_data);\n return r;\n def cancel(self,dutySourceId,doctorId,smsVerifyCode):\n login_data={'dutySourceId':dutySourceId,'hospitalId':self.task.hospitalId,'departmentId':self.task.departmentId,'doctorId':doctorId,\n 'patientId':self.task.user114.patientid,'hospitalCardId':'','medicareCardId':'','reimbursementType':'1',\n 'smsVerifyCode':smsVerifyCode,'childrenBirthday':'','isAjax':'true'};\n r=self.session.post(Agent114.login_url,data=login_data);\n return r;\n def execute(self): \n \n pass;\nif __name__ == '__main__':\n r=Agent114();\n r.login();\n \n ","sub_path":"robot/Agent114.py","file_name":"Agent114.py","file_ext":"py","file_size_in_byte":2944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"313230177","text":"from .base import FeatureRecommenderMixin\n\nimport time\nimport numpy as np\n\nfrom collections import deque\nfrom . import logger\n\n\nclass Evaluator(object):\n\n \"\"\"Base class for experimentation of the incremental models with positive-only feedback.\n \"\"\"\n\n def __init__(self, recommender, repeat=True, maxlen=None, debug=False):\n \"\"\"Set/initialize parameters.\n\n Args:\n recommender (Recommender): Instance of a recommender which has been initialized.\n repeat (boolean): Choose whether the same item can be repeatedly interacted by the same user.\n maxlen (int): Size of an item buffer which stores most recently observed items.\n\n \"\"\"\n self.rec = recommender\n self.feature_rec = issubclass(recommender.__class__, FeatureRecommenderMixin)\n\n self.repeat = repeat\n\n # create a ring buffer\n # save items which are observed in most recent `maxlen` events\n self.item_buffer = deque(maxlen=maxlen)\n\n self.debug = debug\n\n def fit(self, train_events, test_events, n_epoch=1):\n \"\"\"Train a model using the first 30% positive events to avoid cold-start.\n\n Evaluation of this batch training is done by using the next 20% positive events.\n After the batch SGD training, the models are incrementally updated by using the 20% test events.\n\n Args:\n train_events (list of Event): Positive training events (0-30%).\n test_events (list of Event): Test events (30-50%).\n n_epoch (int): Number of epochs for the batch training.\n\n \"\"\"\n # make initial status for batch training\n for e in train_events:\n self.__validate(e)\n self.rec.users[e.user.index]['known_items'].add(e.item.index)\n self.item_buffer.append(e.item.index)\n\n # for batch evaluation, temporarily save new users info\n for e in test_events:\n self.__validate(e)\n self.item_buffer.append(e.item.index)\n\n self.__batch_update(train_events, test_events, n_epoch)\n\n # batch test events are considered as a new observations;\n # the model is incrementally updated based on them before the incremental evaluation step\n for e in test_events:\n self.rec.users[e.user.index]['known_items'].add(e.item.index)\n self.rec.update(e)\n\n def evaluate(self, test_events):\n \"\"\"Iterate recommend/update procedure and compute incremental recall.\n\n Args:\n test_events (list of Event): Positive test events.\n\n Returns:\n list of tuples: (rank, recommend time, update time)\n\n \"\"\"\n for i, e in enumerate(test_events):\n self.__validate(e)\n\n # target items (all or unobserved depending on a detaset)\n unobserved = set(self.item_buffer)\n if not self.repeat:\n unobserved -= self.rec.users[e.user.index]['known_items']\n\n # item i interacted by user u must be in the recommendation candidate\n # even if it is a new item\n unobserved.add(e.item.index)\n\n candidates = np.asarray(list(unobserved))\n\n # make top-{at} recommendation for the 1001 items\n start = time.clock()\n recos, scores = self.__recommend(e, candidates)\n recommend_time = (time.clock() - start)\n\n rank = np.where(recos == e.item.index)[0][0]\n\n # Step 2: update the model with the observed event\n self.rec.users[e.user.index]['known_items'].add(e.item.index)\n start = time.clock()\n self.rec.update(e)\n update_time = (time.clock() - start)\n\n self.item_buffer.append(e.item.index)\n\n # (top-1 score, where the correct item is ranked, rec time, update time)\n yield scores[0], rank, recommend_time, update_time\n\n def __recommend(self, e, candidates):\n if self.feature_rec:\n return self.rec.recommend(e.user, candidates, e.context)\n else:\n return self.rec.recommend(e.user, candidates)\n\n def __validate(self, e):\n self.__validate_user(e)\n self.__validate_item(e)\n\n def __validate_user(self, e):\n if self.rec.is_new_user(e.user.index):\n self.rec.register_user(e.user)\n\n def __validate_item(self, e):\n if self.rec.is_new_item(e.item.index):\n self.rec.register_item(e.item)\n\n def __batch_update(self, train_events, test_events, n_epoch):\n \"\"\"Batch update called by the fitting method.\n\n Args:\n train_events (list of Event): Positive training events.\n test_events (list of Event): Test events.\n n_epoch (int): Number of epochs for the batch training.\n\n \"\"\"\n for epoch in range(n_epoch):\n # SGD requires us to shuffle events in each iteration\n # * if n_epoch == 1\n # => shuffle is not required because it is a deterministic training (i.e. matrix sketching)\n if n_epoch != 1:\n np.random.shuffle(train_events)\n\n # train\n for e in train_events:\n self.rec.update(e, batch_train=True)\n\n # test\n MPR = self.__batch_evaluate(test_events)\n if self.debug:\n logger.debug('epoch %2d: MPR = %f' % (epoch + 1, MPR))\n\n def __batch_evaluate(self, test_events):\n \"\"\"Evaluate the current model by using the given test events.\n\n Args:\n test_events (list of Event): Current model is evaluated by these events.\n\n Returns:\n float: Mean Percentile Rank for the test set.\n\n \"\"\"\n percentiles = np.zeros(len(test_events))\n\n all_items = set(self.item_buffer)\n for i, e in enumerate(test_events):\n\n # check if the data allows users to interact the same items repeatedly\n unobserved = all_items\n if not self.repeat:\n # make recommendation for all unobserved items\n unobserved -= self.rec.users[e.user.index]['known_items']\n # true item itself must be in the recommendation candidates\n unobserved.add(e.item.index)\n\n candidates = np.asarray(list(unobserved))\n recos, scores = self.__recommend(e, candidates)\n\n pos = np.where(recos == e.item.index)[0][0]\n percentiles[i] = pos / (len(recos) - 1) * 100\n\n return np.mean(percentiles)\n","sub_path":"flurs/evaluator.py","file_name":"evaluator.py","file_ext":"py","file_size_in_byte":6495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"537110781","text":"# coding=utf-8\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom tqdm import tqdm\nimport time\nimport csv\n\nNAME = \"人工膝關節手術\"\n\nYEAR_LIST = [\n \"9512\", \"9612\", \"9712\", \"9812\", \"9912\", \"10006\", \"10013\", \"10012\", \"10106\", \"10113\", \"10112\", \"10206\", \"10213\", \"10212\", \"10306\", \"10313\", \"10312\", \"10406\", \"10413\", \"10412\", \"10506\", \"10513\", \"10512\", \"10606\"\n]\n\nYEAR_DICT = {\n \"9512\": \"95年全年\",\n \"9612\": \"96年全年\",\n \"9712\": \"97年全年\",\n \"9812\": \"98年全年\",\n \"9912\": \"99年全年\",\n \"10006\": \"100年上半年\",\n \"10013\": \"100年下半年\",\n \"10012\": \"100年全年\",\n \"10106\": \"101年上半年\",\n \"10113\": \"101年下半年\",\n \"10112\": \"101年全���\",\n \"10206\": \"102年上半年\",\n \"10213\": \"102年下半年\",\n \"10212\": \"102年全年\",\n \"10306\": \"103年上半年\",\n \"10313\": \"103年下半年\",\n \"10312\": \"103年全年\",\n \"10406\": \"104年上半年\",\n \"10413\": \"104年下半年\",\n \"10412\": \"104年全年\",\n \"10506\": \"105年上半年\",\n \"10513\": \"105年下半年\",\n \"10512\": \"105年全年\",\n \"10606\": \"106年上半年\",\n \"10613\": \"106年下半年\",\n \"10612\": \"106年全年\"\n}\n\nTKA_OPTION = [\n \"351\", \"353\", \"355\"\n]\n\nTKA_DICT = {\n \"351\": \"人工膝關節置換後3個月(90天)內發生手術傷口表層感染率\",\n \"353\": \"人工膝關節置換手術後3個月(90天)內置換物深部感染率\",\n \"355\": \"人工膝關節置換術出院後30日內因相關問題再住院率\"\n}\n\ndef dict2CSV(data, year, option):\n keys = data[0].keys()\n with open('{}/{}_{}.csv'.format(NAME, YEAR_DICT[year], TKA_DICT[option]), 'w', encoding=\"utf8\") as f:\n dict_writer = csv.DictWriter(f, keys)\n dict_writer.writeheader()\n dict_writer.writerows(data)\n\n\ndef checkExist(year, option):\n import os.path\n if not os.path.isdir(NAME):\n os.makedirs(NAME)\n if os.path.isfile(\"{}/{}_{}.csv\".format(NAME, YEAR_DICT[year], TKA_DICT[option])):\n return True\n return False\n\ndef getTableData(year, option):\n if checkExist(year, option):\n return\n\n DATA = []\n driver = webdriver.Firefox()\n try:\n driver.get(\"http://www1.nhi.gov.tw/mqinfo/SearchPro.aspx?Type=TKA&List=4\")\n\n driver.find_element_by_id(\"ContentPlaceHolder1_DropDA\").click()\n driver.find_element_by_xpath(\"//option[@value='{}']\".format(option)).click()\n time.sleep(2)\n driver.find_element_by_id(\"ContentPlaceHolder1_drop1\").click()\n driver.find_element_by_xpath(\"//option[@value='{}']\".format(year)).click()\n driver.find_element_by_id(\"ContentPlaceHolder1_RowBox\").send_keys(\"000\")\n driver.find_element_by_id(\"ContentPlaceHolder1_But_Query\").click()\n time.sleep(60)\n\n table = driver.find_element_by_id(\"ContentPlaceHolder1_GV_List\")\n first_line = True\n\n with tqdm(total=len(table.find_elements_by_tag_name(\"tr\"))-1) as pbar:\n for row in table.find_elements_by_tag_name(\"tr\"):\n if first_line:\n first_line = False\n continue\n column = row.find_elements_by_tag_name(\"td\")\n info = {\n \"縣市別\": column[1].text,\n \"醫事機構名稱\": column[2].text,\n \"特約類別\": column[3].text,\n \"分子\": column[4].text,\n \"分母\": column[5].text,\n \"院所指標值\": column[6].text,\n \"所屬分區業務組指標值\": column[7].text,\n \"全國指標值\": column[8].text\n }\n DATA.append(info)\n pbar.update(1)\n dict2CSV(DATA, year, option)\n except Exception as e:\n print(e)\n driver.close()\n\nfor option in TKA_OPTION:\n for year in YEAR_LIST:\n print(\"{} {}\".format(YEAR_DICT[year], TKA_DICT[option]))\n getTableData(year, option)\n \n","sub_path":"TKA.py","file_name":"TKA.py","file_ext":"py","file_size_in_byte":3968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"571617665","text":"from rltime.training.multi_step_trainer import MultiStepTrainer\nimport torch\nimport numpy as np\nfrom rltime.models.torch.utils import set_lr\nfrom rltime.discriminators.torch.simple import Simple\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport json\nimport time\nimport os\n\nclass TorchTrainer(MultiStepTrainer):\n \"\"\" Base class for all multi-step pytorch training. \"\"\"\n\n @staticmethod\n def create_discriminator(observation_space, model_config):\n return Simple(observation_space, model_config)\n\n def _train(self, clip_grad=None, clip_grad_dynamic_alpha=None,\n adam_epsilon=1e-8, vf_scale_epsilon=None, **kwargs):\n \"\"\"Triggers the training\n\n Args:\n clip_grad: Whether to clip gradient-norms before the optimizer\n step. If not None this is the value to use for clipping.\n clip_grad_dynamic_alpha: If not None and clip_grad is not None then\n use dynamic gradient-norm clipping instead of fixed. In this\n case an exponential moving-average of the gradient-norms are\n tracked using this value as the 'alpha' value. The\n gradient-norm will be clipped to the moving-average value times\n the 'clip_grad' parameter value (For example set clip_grad=1.5\n to clip the gradient norm to 1.5x of the moving-average\n gradient-norm).\n This may be usefull to avoid 'destructive' gradient updates\n without losing the general magnitude of the gradients, however\n it may not help with 'exploding gradient' issues.\n adam_epsilon: Epsilon value to use for the adam optimizer\n vf_scale_epsilon: If not None then value-functon rescaling will be\n used for the target value, as defined in the R2D2 paper, using\n this value as the epsilon value. This is used as an alternative\n to reward clipping so should be enabled together with\n 'clip_rewards' set to False in the base class\n \"\"\"\n self.clip_grad = float(clip_grad) if clip_grad is not None else None\n self.clip_grad_dynamic_alpha = clip_grad_dynamic_alpha\n if clip_grad_dynamic_alpha is not None:\n self._grad_norm_moving_average = None\n self.adam_epsilon = adam_epsilon\n assert(vf_scale_epsilon is None or vf_scale_epsilon > 0)\n assert((not vf_scale_epsilon) or (not self.clip_rewards)), \\\n \"Value function rescaling only makes sense with clip_rewards=False\"\n self.vf_scale_epsilon = vf_scale_epsilon\n self.last_debug_ts = 0\n\n super()._train(**kwargs)\n\n def _vf_scale(self, x):\n \"\"\"Performs value-function scaling of the given value, if enabled\"\"\"\n if not self.vf_scale_epsilon:\n return x\n # Value function scaling as in the R2D2 paper\n return torch.sign(x) * \\\n (torch.sqrt(torch.abs(x) + 1) - 1) + (self.vf_scale_epsilon * x)\n\n def _vf_unscale(self, scaled_x):\n \"\"\"Computes the inverse of _vf_scale(x), if vf-rescaling is enabled\"\"\"\n if not self.vf_scale_epsilon:\n return scaled_x\n\n # We need double() otherwise we lose too much precision for low eps\n # values such as 1e-3, due to the eps**2 terms\n scaled_x = scaled_x.double()\n abs_scaled_x = torch.abs(scaled_x)\n eps = self.vf_scale_epsilon\n # TODO: Can this be simplified somehow?\n x = abs_scaled_x / eps - (\n (1 / (2. * (eps**2))) *\n torch.sqrt(\n 4 * self.vf_scale_epsilon*abs_scaled_x +\n (2. * eps + 1)**2)\n ) + \\\n (2. * eps + 1) / (2. * (eps ** 2))\n x *= torch.sign(scaled_x)\n\n # SANITY CHECK to make sure the inverse is working, enable only to\n # test this function\n # assert(torch.all(torch.abs(scaled_x - self._vf_scale(x))<1e-5)), (\"_vf_unscale() sanity failed:\",(scaled_x, self._vf_scale(x)),(scaled_x == self._vf_scale(x)))\n\n return x.float()\n\n def train_init(self, lr):\n \"\"\"Init the training with given LR (Called after policy creation)\"\"\"\n self.optimizer = torch.optim.Adam(\n self.policy.parameters(), eps=self.adam_epsilon)\n if self.discriminator_model_config:\n # TODO(frederik): Use different learning rate and epsilon for ADAM\n self.discriminator_optimizer = torch.optim.Adam(self.discriminator.parameters())\n\n def _compute_grads(self, states, targets, policy_outputs, extra_data,\n timesteps):\n \"\"\"Should be implemented by the sub-class to compute the gradients on\n the given states and targets\"\"\"\n raise NotImplementedError\n\n def _get_bootstrap_target_value(self, target_states, timesteps):\n \"\"\"Should be implemented by the sub-class to compute the bootstrap\n target value on the given target states\"\"\"\n raise NotImplementedError\n\n def _discount_bootstrap_target_value(self, target_values, nsteps):\n \"\"\"Discounts the calculated target-bootstrap value using the default\n discount method and given target nsteps\"\"\"\n return (self.gamma ** nsteps) * target_values\n\n def calc_target_values(self, returns, target_states, target_masks, nsteps,\n timesteps):\n \"\"\"Calculates target values for the given target-states\n\n Optionally using value-function-rescaling if configured, supports 1D\n and 2D targets\n \"\"\"\n with torch.no_grad(): # Target values should not generate gradients\n # Make tensors of everything (non_blocking can potentially help but\n # doesn't really since memory isn't pinned)\n target_states, returns, target_masks, nsteps = \\\n self.target_policy.make_tensor(\n (target_states, returns, target_masks, nsteps),\n non_blocking=True)\n # Calculate the bootstrap target value using the given target\n # states\n bootstrap_target_value = self._get_bootstrap_target_value(\n target_states, timesteps)\n\n # value function rescaling as in the R2D2 paper, here we\n # unscale/inverse the bootstrapped target value, and later rescale\n # the finalized target at the end. Note this has no effect in the\n # default case of vf_scale_epsilon=None\n bootstrap_target_value = self._vf_unscale(bootstrap_target_value)\n\n # Target value can be either 1D or 2D (2D for example in case of\n # distributional or IQN target), in any case the batch should be on\n # dim=0\n assert(returns.shape == (bootstrap_target_value.shape[0],))\n assert(target_masks.shape == (bootstrap_target_value.shape[0],))\n assert(nsteps.shape == (bootstrap_target_value.shape[0],))\n assert(len(bootstrap_target_value.shape) in [1, 2])\n if len(bootstrap_target_value.shape) == 2:\n returns = returns.unsqueeze(-1)\n target_masks = target_masks.unsqueeze(-1)\n nsteps = nsteps.unsqueeze(-1)\n\n # Calculate the final discounted nstep-bootstrapped (optionally\n # rescaled) target value.\n # In any case where the episode terminated within the target nstep\n # target_mask will be 0.\n # In case of a second/distributional dimension then\n # returns/nsteps/target_masks will be broadcasted to that dimension\n return self._vf_scale(\n returns +\n self._discount_bootstrap_target_value(\n bootstrap_target_value, nsteps)*target_masks)\n\n def set_lr(self, lr):\n \"\"\"Sets a new LR value for the optimizer\"\"\"\n set_lr(self.optimizer, lr)\n\n def _get_grad_norm_clip_value(self, cur_grad_norm):\n \"\"\"Returns the value to clip the gradient norm to, if any, given the\n current gradient norm\"\"\"\n if not self.clip_grad:\n # No grad clipping\n return None\n elif self.clip_grad_dynamic_alpha is None:\n # Standard fixed clipping to the specified value\n return self.clip_grad\n else:\n # Dynamic clipping, calculate moving-average of the grad norm and\n # clip to that value with 'clip_grad' factor\n if self._grad_norm_moving_average is None:\n self._grad_norm_moving_average = cur_grad_norm\n else:\n self._grad_norm_moving_average = \\\n (self._grad_norm_moving_average *\n self.clip_grad_dynamic_alpha) + cur_grad_norm * (\n 1 - self.clip_grad_dynamic_alpha)\n # Log the current grad norm moving-average value\n self.value_log.log(\n \"grad_norm_ma\", self._grad_norm_moving_average, group=\"train\")\n return self._grad_norm_moving_average * self.clip_grad\n\n def train_batch(self, *args, **kwargs):\n # Reset gradients\n self.policy.zero_grad()\n\n # Compute the gradients on the given training arugments\n self._compute_grads(*args, **kwargs)\n\n # Log and optionally clip the global gradient norm before updating the\n # weights\n grad_norm = self.policy.get_grad_norm()\n self.value_log.log(\"grad_norm\", grad_norm, group=\"train\")\n self.value_log.log(\n \"grad_norm_max\", grad_norm, group=\"train\", agg=\"max\")\n clip_value = self._get_grad_norm_clip_value(grad_norm)\n if clip_value is not None:\n torch.nn.utils.clip_grad_norm_(\n self.policy.parameters(), clip_value)\n self.value_log.log(\n \"grad_norm_clipped\", self.policy.get_grad_norm(),\n group=\"train\")\n\n # Perform the optimizer weight update\n self.optimizer.step()\n\n def train_batch_discriminator(self, states, predictions, env_indices, skipped_frames, timesteps):\n # Reset gradients\n self.discriminator.zero_grad()\n\n # Compute the gradients on the given training arugments\n self._compute_discriminator_grads(states, predictions, env_indices, skipped_frames, timesteps)\n\n # Log and optionally clip the global gradient norm before updating the\n # weights\n grad_norm = self.discriminator.get_grad_norm()\n self.value_log.log(\"grad_norm\", grad_norm, group=\"train_discriminator\")\n self.value_log.log(\n \"grad_norm_max\", grad_norm, group=\"train_discriminator\", agg=\"max\")\n # TODO(frederik): Use own clip value?\n clip_value = self._get_grad_norm_clip_value(grad_norm)\n if clip_value is not None:\n torch.nn.utils.clip_grad_norm_(\n self.discriminator.parameters(), clip_value)\n self.value_log.log(\n \"grad_norm_clipped\", self.policy.get_grad_norm(),\n group=\"train_discriminator\")\n # Perform the optimizer weight update\n self.discriminator_optimizer.step()\n\n def _prepare_states(self, states, skipped_frames, timesteps):\n # TODO(frederik): Reorder states for lstm layer dynamically (this depends on the model that was used)\n states = dict(states)\n states[\"layer2_state\"] = states[\"layer1_state\"]\n states[\"layer1_state\"] = states[\"layer0_state\"]\n is_tuple_state = type(states[\"x\"]) == tuple\n if is_tuple_state:\n obs = states[\"x\"][0]\n else:\n obs = states[\"x\"]\n obs = obs.clone()\n batch_size = obs.shape[0] // timesteps\n for layer_state in states.values():\n if type(layer_state) == dict:\n for field in [\"cx\", \"hx\"]:\n if field in layer_state:\n layer_state[field] = torch.zeros_like(layer_state[field])\n if \"initials\" in layer_state:\n layer_state[\"initials\"] = torch.zeros_like(layer_state[\"initials\"])\n # Set first state of trajectory as initial\n initial_indices = torch.arange(timesteps)\n layer_state[\"initials\"][initial_indices] = 1.0\n obs = obs.view((timesteps, batch_size)+obs.shape[1:])\n masked_indices = torch.fmod(torch.arange(timesteps), skipped_frames) != 0\n obs[masked_indices] = 0.0\n obs = obs.reshape((obs.shape[0]*obs.shape[1],)+obs.shape[2:])\n if is_tuple_state:\n states[\"x\"] = (obs, states[\"x\"][1])\n else:\n states[\"x\"] = obs\n return states\n\n def _compute_discriminator_grads(self, states, predictions, env_indices, skipped_frames, timesteps):\n loss_fn = torch.nn.BCELoss(reduction=\"none\")\n\n batch_size = env_indices.shape[0] // timesteps\n env_indices = env_indices.reshape((timesteps, batch_size)+env_indices.shape[1:])\n predictions = predictions.view((timesteps, batch_size)+predictions.shape[1:])\n nonmasked_indices = torch.fmod(torch.arange(timesteps), skipped_frames) == 0\n predictions_in_trajectory = predictions[nonmasked_indices]\n\n\n trajectory_labels = self.discriminator.make_tensor(env_indices == 0)[nonmasked_indices]\n loss = loss_fn(predictions_in_trajectory, trajectory_labels)\n loss = loss.mean()\n loss.backward()\n\n self.value_log.log(\"loss\", loss.item(), group=\"train_discriminator\")\n \n @staticmethod\n def write_trajectory(obs, env_indices, predictions, timesteps, path):\n trajectory_path = os.path.join(path, f\"trajectories_{time.time()}\")\n os.makedirs(trajectory_path)\n batch_size = obs.shape[0] // timesteps\n obs = obs.view((timesteps, batch_size)+obs.shape[1:])\n\n for i in range(batch_size):\n fig = plt.figure()\n artists = [[plt.imshow(obs[j, i].squeeze().cpu(), cmap='gray', animated=True)] for j in range(timesteps) if obs[j, i].max() > 0]\n pred_mean = predictions[i*timesteps+timesteps - 1].detach().cpu().numpy().round(2)\n env_index = env_indices[i*timesteps]\n plt.axis('off')\n ani = animation.ArtistAnimation(fig, artists, interval=600, blit=True)\n ani.save(os.path.join(trajectory_path, f\"{env_index}_{i}_{pred_mean}.mp4\"), extra_args=[\"-loglevel\", \"panic\"])\n plt.close(fig)\n\n def _process_train_data(self, train_data, skipped_frames, timesteps):\n states = self._prepare_states(train_data[\"states\"],skipped_frames, timesteps)\n predictions = self.discriminator.predict(states, timesteps).squeeze()\n # TODO(frederik): Scale rewards with running average of the rewards ONLY from env 0?\n train_data_entries = train_data[\"env_indices\"] != 0\n\n one_hour = 60 * 60\n\n if time.time() - self.last_debug_ts > one_hour:\n self.last_debug_ts = time.time()\n is_tuple_state = type(states[\"x\"]) == tuple\n TorchTrainer.write_trajectory(obs=states[\"x\"][0] if is_tuple_state else states[\"x\"], env_indices=train_data[\"env_indices\"], predictions=predictions, timesteps=timesteps, path=self.logger.path)\n returns = predictions.detach()[train_data_entries]\n train_data[\"returns\"][train_data_entries] = returns * 10.0\n self.value_log.log(\"discriminator_returns_mean\", returns.mean(), group=\"train_discriminator\")\n return train_data, predictions","sub_path":"rltime/training/torch/torch_trainer.py","file_name":"torch_trainer.py","file_ext":"py","file_size_in_byte":15506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"290648375","text":"\ndef three_sum(nums):\n if nums is None or len(nums) < 3:\n return []\n nums = sorted(nums)\n results = []\n for i in range(len(nums) - 2):\n if nums[i] > 0:\n break\n if i > 0 and nums[i] == nums[i - 1]:\n continue\n target = -nums[i]\n start = i + 1\n end = len(nums) - 1\n while start < end:\n if nums[start] + nums[end] == target:\n results.append([-target, nums[start], nums[end]])\n start += 1\n end -= 1\n while start < end and nums[start] == nums[start - 1]:\n start += 1\n while start < end and nums[end] == nums[end + 1]:\n end -= 1\n elif nums[start] + nums[end] < target:\n start += 1\n else:\n end -= 1\n return results\n\n\nif __name__ == \"__main__\":\n nums_ = [-1, 0, 1, 2, -1, -4]\n print(three_sum(nums_))\n","sub_path":"python/three_sum.py","file_name":"three_sum.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"495229861","text":"from pylastica.aggregation.avg import Avg\nfrom tests.base import Base\nfrom pylastica.aggregation.globalagg import Global\n\n__author__ = 'Joe Linn'\n\nimport unittest\n\n\nclass GlobalTest(unittest.TestCase, Base):\n def test_to_dict(self):\n expected = {\n \"global\": {},\n \"aggs\": {\n \"avg_price\": {\"avg\": {\"field\": \"price\"}}\n }\n }\n\n agg = Global(\"all_products\").add_aggregation(Avg(\"avg_price\").set_field(\"price\"))\n self.assertEqual(expected, agg.to_dict())\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/aggregation/test_global.py","file_name":"test_global.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"42191622","text":"#!/usr/bin/python\n\n# Copyright (C) 2011 by Ondrej Martinak \n# \n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n# fmake 0.5 (formatted make - https://github.com/omartinak/fmake)\n# - reformats make output to make it more readable\n# - inspired by colormake by Bjarni R. Einarsson\n# (http://bre.klaki.net/programs/colormake)\n\nfrom sys import stdin, stdout, stderr\nfrom configparser import ConfigParser, ExtendedInterpolation\nimport ast\nimport re\nimport os\nimport console\n\nclass Formatter:\n\tdef __init__(self):\n\t\tself.subs = {}\n\t\tself.fmt = {}\n\t\tself.fmtRepl = re.compile(r'\\{(\\w+)\\}')\n\t\tself.columns = 9999\n\n\tdef setColumns(self, columns):\n\t\tself.columns = columns\n\n\tdef loadConfig(self, fileName):\n\t\tconfig = ConfigParser(interpolation=ExtendedInterpolation())\n\n\t\tff = config.read(fileName)\n\t\tif len(ff) is not 0:\n\t\t\t# Prepare the formatting strings\n\t\t\tfor key, value in config['format'].items():\n\t\t\t\t# Preprocess the format by replacing {name} with {0[name]}\n\t\t\t\tval = self.fmtRepl.sub(r'{0[\\1]}', value)\n\t\t\t\tself.fmt[key] = ast.literal_eval(val) # decode escape chars\n\n\t\t\t# Prepare the regular expressions for substitution\n\t\t\tfor key, value in config['subs'].items():\n\t\t\t\tself.subs[key] = re.compile(value)\n\t\telse:\n\t\t\tstderr.write('!! Config file not found\\n')\n\n\tdef subsMake(self, line):\n\t\t# Go through all the prepared subs and try to apply them\n\t\tfor key, sub in self.subs.items():\n\t\t\tm = sub.search(line)\n\t\t\tif m is not None and key in self.fmt:\n\t\t\t\treturn self.fmt[key].format(m.groupdict())\n\n\t\t# If none applied just cut the line so it fits the terminal\n\t\tif len(line) > self.columns:\n\t\t\tll = line[:self.columns-3]\n\t\t\treturn ll + \"...\"\n\t\telse:\n\t\t\treturn line.rstrip()\n\nif __name__ == \"__main__\":\n\tformatter = Formatter()\n\n\t# Initialize the formatter\n\tfilenames = [\n\t\tos.getenv('XDG_CONFIG_HOME') + '/fmake/config',\n\t\tos.environ['HOME'] + '/.fmakerc',\n\t]\n\tformatter.loadConfig(filenames)\n\tformatter.setColumns(console.getTerminalSize()[1])\n\n\t# Format everything we get on standard input and put it on standard output\n\tfor line in stdin:\n\t\tstdout.write(\"%s\\n\" % formatter.subsMake(line))\n\n","sub_path":"fmake.py","file_name":"fmake.py","file_ext":"py","file_size_in_byte":3135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"443910022","text":"import requests\nimport inspect\n\nimport appdaemon.appapi as appapi\nimport appdaemon.utils as utils\n\n\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\nrequests.packages.urllib3.disable_warnings(InsecureRequestWarning)\n\n#\n# Define an entities class as a descriptor to enable read only access of HASS state\n#\n\ndef hass_check(func):\n def func_wrapper(*args, **kwargs):\n self = args[0]\n if not self.AD.get_plugin(self._get_namespace(**kwargs)).reading_messages:\n self.AD.log(\"WARNING\", \"Attempt to call Home Assistant while disconnected: {}\".format(func))\n return lambda *args: None\n else:\n return func(*args, **kwargs)\n\n return (func_wrapper)\n\n\nclass Hass(appapi.AppDaemon):\n #\n # Internal\n #\n\n def __init__(self, ad, name, logger, error, args, config, app_config, global_vars):\n\n super(Hass, self).__init__(ad, name, logger, error, args, config, app_config, global_vars)\n\n self.namespace = \"default\"\n self.AD = ad\n self.name = name\n self._logger = logger\n self._error = error\n self.args = args\n self.global_vars = global_vars\n self.config = config\n self.app_config = app_config\n\n #\n # Register specific constraints\n #\n self.register_constraint(\"constrain_presence\")\n self.register_constraint(\"constrain_input_boolean\")\n self.register_constraint(\"constrain_input_select\")\n self.register_constraint(\"constrain_days\")\n\n def _sub_stack(self, msg):\n # If msg is a data structure of some type, don't sub\n if type(msg) is str:\n stack = inspect.stack()\n if msg.find(\"__module__\") != -1:\n msg = msg.replace(\"__module__\", stack[2][1])\n if msg.find(\"__line__\") != -1:\n msg = msg.replace(\"__line__\", str(stack[2][2]))\n if msg.find(\"__function__\") != -1:\n msg = msg.replace(\"__function__\", stack[2][3])\n return msg\n\n def set_namespace(self, namespace):\n self.namespace = namespace\n\n def _get_namespace(self, **kwargs):\n if \"namespace\" in kwargs:\n namespace = kwargs[\"namespace\"]\n del kwargs[\"namespace\"]\n else:\n namespace = self.namespace\n\n return namespace\n\n\n #\n # Listen state stub here as super class doesn't know the namespace\n #\n\n def listen_state(self, cb, entity=None, **kwargs):\n namespace = self._get_namespace(**kwargs)\n if \"namespace\" in kwargs:\n del kwargs[\"namespace\"]\n return super(Hass, self).listen_state(namespace, cb, entity, **kwargs)\n\n #\n # Likewise with get state\n #\n\n def get_state(self, entity=None, **kwargs):\n namespace = self._get_namespace(**kwargs)\n if \"namespace\" in kwargs:\n del kwargs[\"namespace\"]\n return super(Hass, self).get_state(namespace, entity, **kwargs)\n\n def set_state(self, entity_id, **kwargs):\n namespace = self._get_namespace(**kwargs)\n if \"namespace\" in kwargs:\n del kwargs[\"namespace\"]\n self._check_entity(namespace, entity_id)\n self.AD.log(\n \"DEBUG\",\n \"set_state: {}, {}\".format(entity_id, kwargs)\n )\n\n if entity_id in self.get_state(namespace = namespace):\n new_state = self.get_state(namespace = namespace)[entity_id]\n else:\n # Its a new state entry\n new_state = {}\n new_state[\"attributes\"] = {}\n\n if \"state\" in kwargs:\n new_state[\"state\"] = kwargs[\"state\"]\n\n if \"attributes\" in kwargs:\n new_state[\"attributes\"].update(kwargs[\"attributes\"])\n\n config = self.AD.get_plugin(namespace).config\n if \"cert_path\" in config:\n cert_path = config[\"cert_path\"]\n else:\n cert_path = False\n\n if \"token\" in config:\n headers = {'Authorization': \"Bearer {}\".format(config[\"token\"])}\n elif \"ha_key\" in config:\n headers = {'x-ha-access': config[\"ha_key\"]}\n else:\n headers = {}\n\n apiurl = \"{}/api/states/{}\".format(config[\"ha_url\"], entity_id)\n\n r = requests.post(\n apiurl, headers=headers, json=new_state, verify=cert_path\n )\n r.raise_for_status()\n state = r.json()\n\n # Update AppDaemon's copy\n\n self.AD.set_state(namespace, entity_id, state)\n\n return state\n\n def set_app_state(self, entity_id, **kwargs):\n namespace = self._get_namespace(**kwargs)\n if \"namespace\" in kwargs:\n del kwargs[\"namespace\"]\n self._check_entity(namespace, entity_id)\n self.AD.log(\n \"DEBUG\",\n \"set_app_state: {}, {}\".format(entity_id, kwargs)\n )\n\n if entity_id in self.get_state(namespace = namespace):\n new_state = self.get_state(namespace = namespace)[entity_id]\n else:\n # Its a new state entry\n new_state = {}\n new_state[\"attributes\"] = {}\n\n if \"state\" in kwargs:\n new_state[\"state\"] = kwargs[\"state\"]\n\n if \"attributes\" in kwargs and kwargs.get('replace', False):\n new_state[\"attributes\"] = kwargs[\"attributes\"]\n else:\n if \"attributes\" in kwargs:\n new_state[\"attributes\"].update(kwargs[\"attributes\"])\n\n # Update AppDaemon's copy\n\n self.AD.set_app_state(namespace, entity_id, new_state)\n\n return new_state\n\n def entity_exists(self, entity_id, **kwargs):\n namespace = self._get_namespace(**kwargs)\n return self.AD.entity_exists(namespace, entity_id)\n\n #\n # Events\n #\n def listen_event(self, cb, event=None, **kwargs):\n namespace = self._get_namespace(**kwargs)\n if \"namespace\" in kwargs:\n del kwargs[\"namespace\"]\n return super(Hass, self).listen_event(namespace, cb, event, **kwargs)\n\n\n #\n # Utility\n #\n\n\n def split_entity(self, entity_id, **kwargs):\n self._check_entity(self._get_namespace(**kwargs), entity_id)\n return entity_id.split(\".\")\n\n def split_device_list(self, list_):\n return list_.split(\",\")\n\n def log(self, msg, level=\"INFO\"):\n msg = self._sub_stack(msg)\n self.AD.log(level, msg, self.name)\n\n def error(self, msg, level=\"WARNING\"):\n msg = self._sub_stack(msg)\n self.AD.err(level, msg, self.name)\n\n def get_plugin_config(self, **kwargs):\n namespace = self._get_namespace(**kwargs)\n return self.AD.get_plugin_meta(namespace)\n\n #\n #\n #\n\n def friendly_name(self, entity_id, **kwargs):\n self._check_entity(self._get_namespace(**kwargs), entity_id)\n state = self.get_state(**kwargs)\n if entity_id in state:\n if \"friendly_name\" in state[entity_id][\"attributes\"]:\n return state[entity_id][\"attributes\"][\"friendly_name\"]\n else:\n return entity_id\n return None\n\n #\n # Device Trackers\n #\n\n def get_trackers(self, **kwargs):\n return (key for key, value in self.get_state(\"device_tracker\", **kwargs).items())\n\n def get_tracker_details(self, **kwargs):\n return self.get_state(\"device_tracker\", **kwargs)\n\n def get_tracker_state(self, entity_id, **kwargs):\n self._check_entity(self._get_namespace(**kwargs), entity_id)\n return self.get_state(entity_id, **kwargs)\n\n def anyone_home(self, **kwargs):\n state = self.get_state(**kwargs)\n for entity_id in state.keys():\n thisdevice, thisentity = entity_id.split(\".\")\n if thisdevice == \"device_tracker\":\n if state[entity_id][\"state\"] == \"home\":\n return True\n return False\n\n def everyone_home(self, **kwargs):\n state = self.get_state(**kwargs)\n for entity_id in state.keys():\n thisdevice, thisentity = entity_id.split(\".\")\n if thisdevice == \"device_tracker\":\n if state[entity_id][\"state\"] != \"home\":\n return False\n return True\n\n def noone_home(self, **kwargs):\n state = self.get_state(**kwargs)\n for entity_id in state.keys():\n thisdevice, thisentity = entity_id.split(\".\")\n if thisdevice == \"device_tracker\":\n if state[entity_id][\"state\"] == \"home\":\n return False\n return True\n\n #\n # Built in constraints\n #\n\n def constrain_presence(self, value):\n unconstrained = True\n if value == \"everyone\" and not self.everyone_home():\n unconstrained = False\n elif value == \"anyone\" and not self.anyone_home():\n unconstrained = False\n elif value == \"noone\" and not self.noone_home():\n unconstrained = False\n\n return unconstrained\n\n def constrain_input_boolean(self, value):\n unconstrained = True\n state = self.get_state()\n\n values = value.split(\",\")\n if len(values) == 2:\n entity = values[0]\n desired_state = values[1]\n else:\n entity = value\n desired_state = \"on\"\n if entity in state and state[entity][\"state\"] != desired_state:\n unconstrained = False\n\n return unconstrained\n\n def constrain_input_select(self, value):\n unconstrained = True\n state = self.get_state()\n\n values = value.split(\",\")\n entity = values.pop(0)\n if entity in state and state[entity][\"state\"] not in values:\n unconstrained = False\n\n return unconstrained\n\n def constrain_days(self, value):\n day = self.get_now().weekday()\n daylist = [utils.day_of_week(day) for day in value.split(\",\")]\n if day in daylist:\n return True\n return False\n\n #\n # Helper functions for services\n #\n\n @hass_check\n def turn_on(self, entity_id, **kwargs):\n namespace = self._get_namespace(**kwargs)\n if \"namespace\" in kwargs:\n del kwargs[\"namespace\"]\n \n self._check_entity(namespace, entity_id)\n if kwargs == {}:\n rargs = {\"entity_id\": entity_id}\n else:\n rargs = kwargs\n rargs[\"entity_id\"] = entity_id\n \n rargs[\"namespace\"] = namespace\n self.call_service(\"homeassistant/turn_on\", **rargs)\n\n @hass_check\n def turn_off(self, entity_id, **kwargs):\n namespace = self._get_namespace(**kwargs)\n if \"namespace\" in kwargs:\n del kwargs[\"namespace\"]\n \n self._check_entity(namespace, entity_id)\n if kwargs == {}:\n rargs = {\"entity_id\": entity_id}\n else:\n rargs = kwargs\n rargs[\"entity_id\"] = entity_id\n\n rargs[\"namespace\"] = namespace\n device, entity = self.split_entity(entity_id)\n if device == \"scene\":\n self.call_service(\"homeassistant/turn_on\", **rargs)\n else:\n self.call_service(\"homeassistant/turn_off\", **rargs)\n\n @hass_check\n def toggle(self, entity_id, **kwargs):\n namespace = self._get_namespace(**kwargs)\n if \"namespace\" in kwargs:\n del kwargs[\"namespace\"]\n \n self._check_entity(namespace, entity_id)\n if kwargs == {}:\n rargs = {\"entity_id\": entity_id}\n else:\n rargs = kwargs\n rargs[\"entity_id\"] = entity_id\n \n rargs[\"namespace\"] = namespace\n self.call_service(\"homeassistant/toggle\", **rargs)\n\n @hass_check\n def set_value(self, entity_id, value, **kwargs):\n namespace = self._get_namespace(**kwargs)\n if \"namespace\" in kwargs:\n del kwargs[\"namespace\"]\n \n self._check_entity(namespace, entity_id)\n if kwargs == {}:\n rargs = {\"entity_id\": entity_id, \"value\": value}\n else:\n rargs = kwargs\n rargs[\"entity_id\"] = entity_id\n rargs[\"value\"] = value\n rargs[\"namespace\"] = namespace\n self.call_service(\"input_number/set_value\", **rargs)\n\n @hass_check\n def set_textvalue(self, entity_id, value, **kwargs):\n namespace = self._get_namespace(**kwargs)\n if \"namespace\" in kwargs:\n del kwargs[\"namespace\"]\n \n self._check_entity(namespace, entity_id)\n if kwargs == {}:\n rargs = {\"entity_id\": entity_id, \"value\": value}\n else:\n rargs = kwargs\n rargs[\"entity_id\"] = entity_id\n rargs[\"value\"] = value\n \n rargs[\"namespace\"] = namespace\n self.call_service(\"input_text/set_value\", **rargs)\n\n @hass_check\n def select_option(self, entity_id, option, **kwargs):\n namespace = self._get_namespace(**kwargs)\n if \"namespace\" in kwargs:\n del kwargs[\"namespace\"]\n \n self._check_entity(namespace, entity_id)\n if kwargs == {}:\n rargs = {\"entity_id\": entity_id, \"option\": option}\n else:\n rargs = kwargs\n rargs[\"entity_id\"] = entity_id\n rargs[\"option\"] = option\n \n rargs[\"namespace\"] = namespace\n self.call_service(\"input_select/select_option\", **rargs)\n\n @hass_check\n def notify(self, message, **kwargs):\n kwargs[\"message\"] = message\n if \"name\" in kwargs:\n service = \"notify/{}\".format(kwargs[\"name\"])\n del kwargs[\"name\"]\n else:\n service = \"notify/notify\"\n\n self.call_service(service, **kwargs)\n\n @hass_check\n def persistent_notification(self, message, title=None, id=None):\n kwargs = {}\n kwargs[\"message\"] = message\n if title is not None:\n kwargs[\"title\"] = title\n if id is not None:\n kwargs[\"notification_id\"] = id\n self.call_service(\"persistent_notification/create\", **kwargs)\n\n #\n # Event\n #\n\n @hass_check\n def fire_event(self, event, **kwargs):\n self.AD.log(\"DEBUG\",\n \"fire_event: {}, {}\".format(event, kwargs))\n \n namespace = self._get_namespace(**kwargs)\n if \"namespace\" in kwargs:\n del kwargs[\"namespace\"]\n \n config = self.AD.get_plugin(namespace).config \n if \"cert_path\" in config:\n cert_path = config[\"cert_path\"]\n else:\n cert_path = False\n\n if \"token\" in config:\n headers = {'Authorization': \"Bearer {}\".format(config[\"token\"])}\n elif \"ha_key\" in config:\n headers = {'x-ha-access': config[\"ha_key\"]}\n else:\n headers = {}\n\n\n apiurl = \"{}/api/events/{}\".format(config[\"ha_url\"], event)\n r = requests.post(\n apiurl, headers=headers, json=kwargs, verify=cert_path\n )\n r.raise_for_status()\n return r.json()\n\n #\n # Service\n #\n @staticmethod\n def _check_service(service):\n if service.find(\"/\") == -1:\n raise ValueError(\"Invalid Service Name: {}\".format(service))\n\n @hass_check\n def call_service(self, service, **kwargs):\n self._check_service(service)\n d, s = service.split(\"/\")\n self.AD.log(\n \"DEBUG\",\n \"call_service: {}/{}, {}\".format(d, s, kwargs)\n )\n \n namespace = self._get_namespace(**kwargs)\n if \"namespace\" in kwargs:\n del kwargs[\"namespace\"]\n\n config = self.AD.get_plugin(namespace).config\n if \"cert_path\" in config:\n cert_path = config[\"cert_path\"]\n else:\n cert_path = False\n\n if \"token\" in config:\n headers = {'Authorization': \"Bearer {}\".format(config[\"token\"])}\n elif \"ha_key\" in config:\n headers = {'x-ha-access': config[\"ha_key\"]}\n else:\n headers = {}\n\n apiurl = \"{}/api/services/{}/{}\".format(config[\"ha_url\"], d, s)\n r = requests.post(\n apiurl, headers=headers, json=kwargs, verify=cert_path\n )\n r.raise_for_status()\n return r.json()\n","sub_path":"appdaemon/plugins/hass/hassapi.py","file_name":"hassapi.py","file_ext":"py","file_size_in_byte":16134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"332520075","text":"import socket\nimport network\nimport time\n\ndef wifiAp():\n import ubinascii\n ap_if = network.WLAN(network.AP_IF)\n ap_if.active(True)\n essid = b\"UTTEC-%s\" % ubinascii.hexlify(ap_if.config(\"mac\")[-3:])\n ap_if.config(essid=essid, authmode=network.AUTH_WPA_WPA2_PSK, password=b\"123456789a\")\n print('mac:',b\"UTTEC-%s\" % ubinascii.hexlify(ap_if.config(\"mac\")[:]))\n\ndef wifiSta():\n sta = network.WLAN(network.STA_IF)\n sta.active(True)\n # 8764b1\n# sta.connect(\"UTTEC-8764b1\", \"123456789a\")\n sta.connect(\"utsol_tc140\", \"09090909\")\nwifiAp()\nwifiSta()\n\ndef Server():\n print('--------------- start Server -------------') #Setup Socket WebServer\n addr = socket.getaddrinfo('0.0.0.0', 80)[0][-1]\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n# sock.bind(('192.168.4.1', 80))\n sock.bind('',80)\n# sock.bind(addr)\n sock.listen(5)\n print('listening on', addr)\n count = 0\n\n while True:\n conn, addr = sock.accept()\n print(\"Got a connection from %s\" % str(addr))\n request = conn.recv(1024)\n print(\"Content = %s\" % str(request))\n response = 'I Received:: {}'.format(count)\n count += 1\n conn.send(response)\n conn.close()\n print(str(addr)+'close')\n\ndef Client():\n print('--------------- Wait 5Sec -------------') #Setup Socket WebServer\n time.sleep(5)\n print('--------------- start Client -------------') #Setup Socket WebServer\n # host='192.168.4.1' #my Computer Address\n host='192.168.185.12' #Windows Address\n port=80\n sock=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n\n while True:\n sock=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n sock.connect((host,port))\n sock.send(b'Hello, python Server')\n data=sock.recv(1024)\n sock.close()\n print('Received',repr(data))\n time.sleep(1)\nServer()\n#Client()\n","sub_path":"source/test/socket/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"222129065","text":"import time\nimport torch\nfrom torch_batch_svd import svd\n\n\ndef bench_speed(N, H, W):\n torch.manual_seed(0)\n a = torch.randn(N, H, W).cuda()\n b = a.clone().cuda()\n a.requires_grad = True\n b.requires_grad = True\n\n t0 = time.time()\n U, S, V = svd(a)\n t1 = time.time()\n print(\"Perform batched SVD on a {}x{}x{} matrix: {} s\".format(N, H, W, t1 - t0))\n\n t0 = time.time()\n U, S, V = torch.svd(b, some=True, compute_uv=True)\n t1 = time.time()\n print(\"Perform torch.svd on a {}x{}x{} matrix: {} s\".format(N, H, W, t1 - t0))\n\n\nif __name__ == '__main__':\n bench_speed(10000, 9, 9)\n bench_speed(20000, 9, 9)\n\n","sub_path":"benchs/bench_speed.py","file_name":"bench_speed.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"505726675","text":"\"\"\"Modulo que contiene las formulas de la Integracion Numerica (Integral de Romberg)\"\"\"\n\nfrom sage.all import SR, sage, round\nimport numpy as np\nimport sys\nfrom Mdls_Preparar_Metodos.Preparar_Programa import LLenar_Matriz_Datos, IntegralRom, EvalRom_Funcion\n\ndef Integracion_Romberg(nombre):\n \"\"\"Funcion que llevara a cabo la Integral de Romberg\"\"\"\n # Primero llena una matriz con los datos contenidos en el documento de texto\n funcion = LLenar_Matriz_Datos(nombre)\n\n # Si el usuario ingreso datos\n if type(funcion) != type(SR()):\n print(\"\\n\\nDebe ingresar una funcion\\n\\n\")\n sys.exit(1)\n\n # Primero pide los datos al usuario\n datosUsr = IntegralRom()\n\n # Crea la matriz que contendra los valores de las iteraciones de la integral de Romberg\n matRom = np.zeros((datosUsr[2], datosUsr[2]), dtype = 'f')\n\n # Bucle que calculara las primeras aproximaciones\n for cont in range(datosUsr[2]):\n # Llama a la funcion para realizar las evaluaciones necesarias y los almacena en la matriz 'matDatos'\n matDatos = EvalRom_Funcion(funcion, ((datosUsr[1] - datosUsr[0]) / (2 ** cont)), (2 ** cont), datosUsr[0])\n\n # Termino que multiplica a las sumas\n integral = ((datosUsr[1] - datosUsr[0]) / (2 ** cont)) / 2\n # Termino de las sumas\n sumatoria = matDatos[-1, 1] + matDatos[0, 1]\n for suma in range(1, (2 ** cont)):\n sumatoria += 2 * matDatos[suma, 1]\n\n integral *= sumatoria\n\n # Almacena los valores de las primeras aproximaciones en la primera columna de la matriz 'matDatos'\n matRom[cont, 0] = integral\n\n # Bucle anidado que aplicara la formula para la integral del Romberg [Burden p. 209]\n for col in range(1, matRom.shape[0]):\n for fila in range(col, matRom.shape[0]):\n matRom[fila, col] = (((4 ** col) * matRom[fila, (col - 1)]) - matRom[(fila - 1), (col - 1)]) / ((4 ** col) - 1)\n\n # Crea el vector que contendra los errores de las integrales de Romberg\n vectErr = np.zeros((matRom.shape[0], 1), dtype = 'f')\n # Bucle que recorre todas los elementos de la diagonal\n for error in range(1, matRom.shape[0]):\n vectErr[error] = abs(matRom[error, error] - matRom[(error - 1), (error - 1)])\n\n # Anexa los valores de los errores a la matriz 'matRom'\n matRom = np.append(matRom, vectErr, axis = 1)\n\n # Imprime la matriz 'matRom'\n print(\"\\n\" + \"\\n\".join([''.join(['{:13}'.format(round(val, 8)) for val in fila]) for fila in matRom]) + \"\\n\")\n\n print(f\"\\nUsando la integracion de Romberg\")\n print(f\"La aproximacion de la integral de x = {datosUsr[0]} a x = {datosUsr[1]}\", end = \" \")\n print(f\"con {datosUsr[2]} iteraciones es: {round(matRom[-1, -2], 8)}, con un error de: {round(matRom[-1, -1], 8)}\\n\\n\")\n\ndef IntegracionRomberg():\n fNombre = input(\"Escribe el nombre del archivo sin escribir la extension '.txt': \")\n Integracion_Romberg(fNombre)\n\nif __name__ == \"__main__\":\n Integracion_Romberg(\"prueba\")","sub_path":"Mets7_DiffEIntNum/Met3_IntRom.py","file_name":"Met3_IntRom.py","file_ext":"py","file_size_in_byte":2989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"457948930","text":"from __future__ import unicode_literals\nfrom pymongo import MongoClient\nimport numpy as np\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\nimport matplotlib.pyplot as plt\nplt.rcdefaults()\nimport matplotlib\nmatplotlib.style.use('ggplot')\nimport pandas as pd\nclient=MongoClient('mongodb://localhost:60000')\ndb=client.mydb1\np=db.customers.find()\nres=pd.DataFrame(list(p))\nsample_review=res.loc[res[\"BrandName\"]==\"CNPGD\",\"Review\"]\n\n\nsentiment = SentimentIntensityAnalyzer()\nli=[\"battery\",\"screen\",\"display\",\"camera\",\"accessories\",\"delivery\",\"design\",\"quality\",\"storage\",\"memory\",\"ram\",\"software\",\"weight\",\"bluetooth\"]\nli1=[0]*14\nfor sentences in sample_review:\n ss = sentiment.polarity_scores(sentences)\n '''for k in sorted(ss):\n print('{0}: {1}, '.format(k, ss[k]))\n print(sentences)'''\n if ss['neg']!=0.0:\n for k in range(len(li)):\n if li[k] in sentences:\n li1[k]=li1[k]+1\nfor k in range(len(li)):\n print(\"{0} : {1}\".format(li[k] , li1[k]))\ny_pos=np.arange(len(li))\nplt.bar(y_pos,li1,align='center',alpha=0.5)\nplt.xticks(y_pos,li)\n\nplt.suptitle(\"Distribution of Negative Reviews By Features\")\nplt.xlabel(\"Features Of Mobile\")\nplt.ylabel(\"Negative Review count\")\nplt.show()\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"412671839","text":"\"\"\"\r\nRemove Principle Components and Reconstruct\r\nMichael Heskett\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport scipy\r\nfrom scipy import linalg\r\nfrom sys import argv\r\nfrom datetime import datetime\r\n\r\nstartTime=datetime.now()\r\n\r\n####################################################\r\n\r\nif len(argv) != 3:\r\n print(\"Arg1: Sorted, Normalized Data Matrix samples x features \\n \\\r\nArg2: Path to output/file.tsv \\n \\\r\nArg3-N: space separated number of principle components to remove (0 index)\")\r\n exit()\r\n\r\n\r\n####################################################\r\n\r\ndf = pd.read_table(argv[1],header=0,index_col=0,sep='\\t')\r\nmatrix = df.as_matrix()\r\nexclude = argv[3:]\r\n\r\nmatrix -=np.mean(matrix,axis=0)\r\nU,s,V = linalg.svd(matrix)\r\nSigma = linalg.diagsvd(s,matrix.shape[0],matrix.shape[1])\r\nprin_comps = U.dot(Sigma)\r\n\r\n####################################################\r\n\r\nkeepers = list(range(0,11))\r\n\r\nfor i in range(len(exclude)):\r\n keepers.remove(exclude[i])\r\n\r\nreconstructed = prin_comps[:,keepers].dot(V[:,keepers].T)\r\n\r\npd.DataFrame(reconstructed,index=df.index,columns=df.columns).to_csv(argv[2],sep='\\t')\r\n\r\nprint(\"Time Elapsed: \" + datetime.now() - startTime)\r\nexit()\r\n","sub_path":"code/remove_PCs.py","file_name":"remove_PCs.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"602421541","text":"#!/usr/bin/env python3\n\nwith open('input', 'r') as f:\n masses = f.read().splitlines()\n \n# Determine if our fuel needs any additional fuel\ndef fuel(x):\n f = x // 3 - 2\n return 0 if f <= 0 else (f + fuel(f))\n\ntotal = sum(list(map(lambda x : fuel(int(x)), masses)))\nprint(total)\n","sub_path":"2019/day1/day1.py","file_name":"day1.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"542191558","text":"\n# https://developers.google.com/chart/interactive/docs/gallery/geochart#displaying-proportional-markers\n# http://www.covidcheatsheet.org/What_is_happening.html#history_block\n\nimport csv\n\nshortData = [['latitude', 'longitude', 'city', 'state', 'country', 'cases']]\nvisualizationData = [['latitude', 'longitude', 'city', 'state', 'country', 'cases']]\n\nwith open('time_series_covid19_confirmed_US.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n for row in csv_reader:\n if (line_count != 0): #if not header row,\n visualizationData.append([float(row[8]), float(row[9]), row[5], row[6], row[7], row[len(row)-1]])\n if (line_count <= 25): # additionally, append to shorter data array\n shortData.append([float(row[8]), float(row[9]), row[5], row[6], row[7], row[len(row)-1]])\n line_count += 1\n \nprint(line_count)\nprint(shortData)\n\nwith open(\"shortdata.txt\", \"w\") as f:\n f.write(\"[\")\n for row in shortData:\n f.write(\"%s,\\n\" % row)\n f.write(\"]\")\n \nf.close()\n\nwith open(\"fulldata.txt\", \"w\") as f:\n f.write(\"[\")\n for row in visualizationData:\n f.write(\"%s,\\n\" % row)\n f.write(\"]\")\n \nf.close()\n\n\n\n\n","sub_path":"csse_covid_19_data/csse_covid_19_time_series/extractforgeomaps.py","file_name":"extractforgeomaps.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"385575063","text":"import sqlite3\nfrom flask_restful import Resource, reqparse\nfrom models.user import UserModel\n\nclass UserRegister(Resource):\n parcer = reqparse.RequestParser()\n parcer.add_argument('username',\n type=str,\n required=True,\n help=\"This field cannot be left blank!\"\n )\n parcer.add_argument('password',\n type=str,\n required=True,\n help=\"This field cannot be left blank!\"\n )\n \n def post(self):\n data = UserRegister.parcer.parse_args()\n\n if UserModel.find_by_username(data['username']):\n return {\"message\": \"A user with that username already exists\"}, 400 \n\n user = UserModel(**data)\n user.save_to_db()\n \n return {\"message\": \"User created successfully.\"}, 201\n","sub_path":"Section_6/code/resources/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"169478272","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('cmsplugin_pdf', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='pdfpluginmodel',\n name='height',\n field=models.PositiveIntegerField(help_text=b'Hoogte als percentage (met %) of pixels (met px)', null=True, verbose_name=b'Hoogte', blank=True),\n ),\n migrations.AddField(\n model_name='pdfpluginmodel',\n name='width',\n field=models.PositiveIntegerField(help_text=b'Breedte als percentage (met %) of pixels (met px)', null=True, verbose_name=b'Breedte', blank=True),\n ),\n ]\n","sub_path":"cmsplugin_pdf/migrations/0002_auto_20171122_1300.py","file_name":"0002_auto_20171122_1300.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"90816512","text":"\"\"\"\nVinícius Vilar - ADS UNIFIP - Programação 1 - Lista 1\nPatos - PB | 2020\n\n6 - Escreva um programa que leia uma temperatura em graus Fahrenheit,\ntransforme-a em graus Celsius e exiba o resultado.\n\n\"\"\"\n\n\nfahr = float(input(\"Informe a temperatura em Fahrenheit: Fº\"))\ncelsius = (fahr - 32) * (5/9)\n\nprint(\"{}Fº convertido para graus celsius é {:.1f}Cº\".format(fahr, celsius))\n","sub_path":"Lista 1 - FIP/Ex006.py","file_name":"Ex006.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"481271940","text":"import json\nimport os\nimport shlex\nimport shutil\n\nfrom cleo import Application\nfrom cleo import CommandTester\n\nfrom pyklopp.console.commands.init import InitCommand\nfrom pyklopp.console.commands.train import TrainCommand\n\n\ndef test_success_init_simple_model():\n # Arrange\n # set up application with command\n application = Application()\n application.add(InitCommand())\n application.add(TrainCommand())\n\n # set up file path variables\n module_name = \"module_name_for_training\"\n module_file_path = module_name + \".py\"\n save_path = \"tmp-train-save-path/model.py\"\n dataset_module = \"tmp_training_dataset\"\n dataset_module_file_path = dataset_module + \".py\"\n\n user_config = {\"num_epochs\": 3} # Use only few epochs for test\n\n # clean up possible existing files\n if os.path.exists(module_file_path):\n os.remove(module_file_path)\n if os.path.exists(dataset_module_file_path):\n os.remove(dataset_module_file_path)\n if os.path.exists(save_path):\n shutil.rmtree(os.path.dirname(save_path))\n\n # write model to file path from which we want to import from\n content_model = \"\"\"\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass MyModel(nn.Module):\n def __init__(self, width: int, height: int):\n super(MyModel, self).__init__()\n c = 6 # intermediate_channels\n self.conv = nn.Conv2d(in_channels=3, out_channels=c, kernel_size=5)\n self.fc = nn.Linear((width-5+1)*(height-5+1)*c, 10)\n\n def forward(self, x):\n out = F.relu(self.conv(x))\n out = out.view(out.size(0), -1)\n return F.relu(self.fc(out))\n\n\ndef get_model(**args):\n return MyModel(width=32, height=32)\n\n\"\"\"\n with open(module_file_path, \"a\") as model_handle:\n model_handle.write(content_model)\n\n # write model to file path from which we want to import from\n content_dataset = \"\"\"\nimport torch\nimport numpy as np\nfrom torch.utils import data\n\n\nclass MyDataset(data.Dataset):\n def __len__(self):\n return 200\n\n def __getitem__(self, index):\n return torch.rand((3, 32, 32)), np.random.randint(0, 10)\n\n\ndef get_dataset(**args):\n return MyDataset()\n\n\"\"\"\n with open(dataset_module_file_path, \"a\") as dataset_handle:\n dataset_handle.write(content_dataset)\n\n command_init = application.find(\"init\")\n init_tester = CommandTester(command_init)\n init_tester.execute(module_name + \" --save=\" + save_path)\n\n command_train = application.find(\"train\")\n train_tester = CommandTester(command_train)\n train_tester.execute(\n save_path\n + \" \"\n + dataset_module\n + \".get_dataset\"\n + \" --config {json_config}\".format(\n json_config=shlex.quote(json.dumps(user_config))\n )\n )\n\n # Cleanup\n os.remove(module_file_path)\n os.remove(dataset_module_file_path)\n shutil.rmtree(os.path.dirname(save_path))\n","sub_path":"tests/commands/test_train.py","file_name":"test_train.py","file_ext":"py","file_size_in_byte":2892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"437059452","text":"#==============================================================================\r\n#AUTOMATED GEOCODER\r\n\r\n# Last Modified: 02/27/2018\r\n# Author: Jonathan Engelbert (Jonathan.Engelbert@sfgov.org)\r\n\r\n# Description: This script calls scripts that standardizes and geocodes a list\r\n# of addresses from an Excel spreadsheet\r\n\r\n\r\n#==============================================================================\r\n\r\nimport openpyxl\r\nimport imperfect_addresses\r\nimport geocoding_v2\r\nimport os\r\n\r\nprint(\"======================================================\")\r\nprint(\"=================AUTOMATED GEOCODER===================\")\r\nprint(\"======================================================\")\r\n\r\n#PATHS\r\n\r\ngdb = \"I:\\GIS\\OASIS\\Geocoder\\geocoder.gdb\"\r\narcpy.env.workspace = gdb\r\n\r\n#DELETE EXISTING FILES IN GEODATABASE\r\n\r\nprint(\"\\nPreparing Geodatabase...\")\r\n\r\nfeature_classes = arcpy.ListFeatureClasses()\r\ntables = arcpy.ListTables()\r\n\r\ntry:\r\n for fc in feature_classes:\r\n arcpy.Delete_management(fc)\r\n\r\n for table in tables:\r\n arcpy.Delete_management(table)\r\n\r\n#COMPRESS GEODATABASE\r\n\r\n arcpy.CompressFileGeodatabaseData_management(gdb)\r\n\r\n print(\"GEODATABASE READY\")\r\n\r\nexcept Exception as e:\r\n print(\"FAILED TO PREPARE GEODATABASE. POSSIBLE LOCK. \\n\\n\")\r\n print(e)\r\n raw_input()\r\n\r\n#INITIALIZING SPREADSHEET AND SWITCHES\r\n\r\nstandardize_addresses = True\r\n\r\ngeocode = raw_input(\"\\n\\tWould you like to geocode the \"\r\n \"spreadsheet?\\n\\t\\t\\t(Y/N)\\n\")\r\n\r\nif geocode == \"Y\" or geocode == \"y\":\r\n geocode_spreadsheet = True\r\n\r\nelse:\r\n geocode_spreadsheet = False\r\n\r\n#PROCESSES\r\n\r\nif standardize_addresses:\r\n try:\r\n imperfect_addresses.transform()\r\n print(\"\\n******************************************************\\n\")\r\n print(\"Cleaned addresses are stored in:\\nI:\\GIS\\OASIS\\Geocoder\"\r\n \"\\\\transformed.xlsx\")\r\n print(\"\\n******************************************************\\n\")\r\n if geocode_spreadsheet:\r\n try:\r\n print(\"Geocoding addresses...\")\r\n geocoding_v2.geocode()\r\n\r\n#GENERATES REPORT FOR GEOCODING\r\n\r\n # Cursor and target feature class\r\n\r\n fc = \"I:\\\\GIS\\\\OASIS\\\\Geocoder\\\\geocoder.gdb\\\\final\"\r\n cursor = arcpy.da.SearchCursor(fc, ['geocoder'])\r\n\r\n # Variables\r\n\r\n eas = 0\r\n sc = 0\r\n u = 0\r\n total = arcpy.GetCount_management(fc).getOutput(0)\r\n\r\n # Logic\r\n\r\n for row in cursor:\r\n if \"EAS\" in row:\r\n eas += 1\r\n elif \"SC\" in row:\r\n sc += 1\r\n elif \"U\" in row:\r\n u += 1\r\n\r\n # Result Rates\r\n\r\n eas_percentage = (100 * eas / int(total))\r\n sc_percentage = (100 * sc / int(total))\r\n u_percentage = (100 * u / int(total))\r\n success_rate = eas_percentage + sc_percentage\r\n\r\n #Report Output:\r\n\r\n print(\r\n \"\\n******************************************************\\n\")\r\n print(\"GEOCODING RESULTS:\\n\\nMaster Address Geocoder: \" +\r\n str(eas) + \" record(s) geocoded(\" + str(eas_percentage)\r\n + \"%)\")\r\n\r\n print(\"Street Centerlines Geocoder: \" + str(sc) + \" \"\r\n \"record(s) \"\r\n \"geocoded(\" + str(sc_percentage) + \"%)\")\r\n\r\n print(\"Unmatched Records: \" + str(u) + \" \"\r\n \"record(s) \"\r\n \"not geocoded(\" + str(u_percentage) + \"%)\")\r\n\r\n print(\"\\n\\nRECORDS GEOCODED: \" +\r\n str(success_rate) + \"%\")\r\n\r\n print(\r\n \"\\n******************************************************\\n\")\r\n\r\n raw_input()\r\n except Exception as e:\r\n print(\"\\nGEOCODING FAILED\\n\\n\")\r\n print(e)\r\n\r\n except Exception as e:\r\n print(\"\\nADDRESS TRANSFORMATION FAILED\\n\\n\")\r\n print(e)\r\n\r\n","sub_path":"development/v2/automated_geocoder.py","file_name":"automated_geocoder.py","file_ext":"py","file_size_in_byte":4213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"259441359","text":"from fastapi import FastAPI, HTTPException\nimport math\n\n\n# Calculate nth Fibonacci with O(n) complexity\ndef calculateFibonacciOn(num):\n if num < 0:\n raise HTTPException(status_code=404, detail=\"Value of input should not be negative\", headers={\n 'ValueError': \"Only Positive and 0 are allowed\"}) # Handling Exception\n\n first_num = 0\n second_num = 1\n fib = 0\n for i in range(1, num):\n fib = first_num + second_num\n first_num = second_num\n second_num = fib\n\n return fib\n\n# Calculate nth Fibonacci with O(1) complexity\n\n\ndef calculateFibonaccioO1(num):\n if num < 0:\n raise HTTPException(status_code=404, detail=\"Value of input should not be negative\",\n headers={'ValueError': \"Only Positive and 0 are allowed\"}) # Handling Exception\n phi = (1 + math.sqrt(5)) / 2\n fibo = round(pow(phi, num)/math.sqrt(5))\n return fibo\n# Calculate Factorial of Given Number\n\n\ndef calculateFactorial(num):\n if num < 0:\n raise HTTPException(status_code=404, detail=\"Value of input should not be negative\",\n headers={'ValueError': \"Only Positive and 0 are allowed\"})\n\n if num == 0:\n return 1\n\n return (num*calculateFactorial(num-1))\n\n\napp = FastAPI()\n\n\n@app.get('/')\ndef index():\n return {'For Factorial': '/factorial', 'For Fibonacci': '/fibonacci'}\n\n\n@app.get('/factorial/{num}')\ndef getFactorial(num: int):\n \"\"\"\n Calculating Factorial: Arguments ---> int\n Calculating Facotorial of given number using recursion Time Complexity: O(n)\n \"\"\"\n result = calculateFactorial(num)\n return {num: result}\n\n\n@app.get('/fibonacci/{num}')\ndef getFibonacci(num: int, complexity):\n \"\"\"\n Calculating Fibonacci: Arguments ---> int, complexity = 1 or n \\n\n Calculating fibonacci of given number by given time complexity\n \"\"\"\n if complexity == 'n':\n result = calculateFibonacciOn(num)\n elif complexity == '1':\n result = calculateFibonaccioO1(num)\n else:\n raise HTTPException(status_code=404, detail='Choose time complexity between O(1) or O(n)', headers={\n 'ComplexityError': 'input either 1 or n'})\n return {num: result}\n","sub_path":"modules/math_module.py","file_name":"math_module.py","file_ext":"py","file_size_in_byte":2228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"9436569","text":"\n\nimport numpy as np\n\nfrom numpy.random import RandomState, SeedSequence, MT19937\n\n\n \nclass Gaussian:\n \n def __init__(self, seed = 1010, mu = 0, sigma = 1):\n self.random = RandomState(MT19937(SeedSequence(int(seed))))\n \n self.mu = mu\n self.sigma = sigma\n \n \n def loglike(self, sample):\n # log likelihood of a specific sample \n return np.log(self.Gaussian_pdf(sample))\n \n def Gaussian_pdf(self, sample):\n #probability density of a sample\n a = sample\n mu = self.mu\n sigma = self.sigma\n \n return np.exp( - (a - mu)**2 / (2 * sigma**2))*1/(sigma*np.sqrt(2*np.pi))\n\n # function returns a random number from the Gaussian distr. using slice sampling method\n def Gaussian_sample(self, init, Nsample):\n samples = np.zeros(Nsample)\n \n pdf = self.Gaussian_pdf\n random = self.random\n sigma = self.sigma\n\n # initialize\n x_0 = init\n \n \n #generate a sequence of Nsample samples.\n for i in range(Nsample):\n p_0 = pdf(x_0)\n \n \n #pick a random place on the vertical line\n p = p_0*random.rand()\n \n # set a horizontal slice\n r = random.rand()\n x_1 = x_0 - r*sigma \n x_2 = x_0+(1-r)*sigma\n \n #increase length of the slice\n p_1 = pdf(x_1)\n while p_1 > p:\n x_1 = x_1 - sigma\n p_1 = pdf(x_1)\n p_2 = pdf(x_2)\n while p_2 > p:\n x_2 = x_2 + sigma\n p_2 = pdf(x_2)\n \n #try a sample\n while True:\n x = random.rand()*(x_2-x_1)+x_1\n p_0 = pdf(x)\n # if x is actually in the slice, take it, else adjust the length of the slice and find a new one.\n if p_0 > p:\n x_0 = x\n break\n elif x>x_0:\n x_2 = x\n elif x p:\n x_1[j] = x_1[j] - sigma[j]\n p_1 = pdf(x_1)\n p_2 = pdf(x_2)\n while p_2 > p:\n x_2[j] = x_2[j] + sigma[j]\n p_2 = pdf(x_2)\n \n #try a sample\n x_3 = x_0.copy()\n while True:\n x = random.rand()*(x_2[j]-x_1[j])+x_1[j]\n x_3[j] = x\n p_0 = pdf(x_3)\n # if x is actually in the slice, take it, else adjust the length of the slice and find a new one.\n if p_0 > p:\n x_0[j] = x\n break\n elif x>x_0[j]:\n x_2[j] = x\n elif x'2010-01-01']\r\nmaster_sec_df['year']=pd.DatetimeIndex(master_sec_df['date']).year\r\nmaster_sec_df['month']=pd.DatetimeIndex(master_sec_df['date']).month\r\n\r\n#aggregate to monthly\r\npysqldf = lambda q: sqldf(q, globals())\r\nq = \"\"\"\r\nSELECT DISTINCT\r\nticker, year, month,\r\nSUM(poswords) as sec_pos_words,\r\nSUM(negwords) as sec_neg_words,\r\nSUM(daily_filing_count) as mthly_filing_count\r\nFROM\r\nmaster_sec_df\r\nGROUP BY\r\nticker, year, month\r\nORDER BY\r\nticker, year, month\r\n\"\"\"\r\nmthly_sec= pysqldf(q)\r\n\r\n#GOOGLE SCRAPE EVENTS\r\nscrape_df=pd.read_csv(\"constructed\\\\capstone\\\\google_scrape_mthly.csv\")\r\nscrape_df= scrape_df.loc[:, ~scrape_df.columns.str.contains('^Unnamed')]\r\n\r\nall_mthly_df1=pd.merge(mthly_finance, mthly_sec, how='left', \\\r\non=['ticker', 'year', 'month'], validate='one_to_one')\r\n\r\nall_mthly_df2=pd.merge(all_mthly_df1, scrape_df, how='left', \\\r\non=['ticker', 'year', 'month'], validate='one_to_one')\r\n \r\nall_mthly_df2['sec_pos_words'].fillna(0, inplace=True)\r\nall_mthly_df2['sec_neg_words'].fillna(0, inplace=True)\r\nall_mthly_df2['mthly_filing_count'].fillna(0, inplace=True)\r\nall_mthly_df2['gs_poswords'].fillna(0, inplace=True)\r\nall_mthly_df2['gs_negwords'].fillna(0, inplace=True)\r\n\r\nall_mthly_df2.to_csv(\"constructed\\\\capstone\\\\combined_mthly_dataset.csv\", sep=',')","sub_path":"python/capstone/4. Capstone Merge and Analyze.py","file_name":"4. Capstone Merge and Analyze.py","file_ext":"py","file_size_in_byte":4821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"457610580","text":"\"\"\" importes da biblioteca Python para configuracao de data e hora \"\"\"\nfrom datetime import date, datetime\n\n\"\"\" importes das classes de negocio da aplicacao de BOLAO \"\"\"\nfrom apostador import Apostador\nfrom selecao_desafiante import SelecaoDesafiante\nfrom selecao_visitante import SelecaoVisitante\nfrom partida import Partida\nfrom aposta import Aposta\nfrom bolao import Bolao\n\nclass Main():\n \"\"\" Classe nucleo da aplicacao \"\"\"\n def __init__(self):\n \n # criando os apostadores\n apostador1 = Apostador(\"Emerson\",\"eme1\",\"123456\")\n apostador2 = Apostador(\"Lucas Va\",\"lukita\",\"123456\")\n apostador3 = Apostador(\"Pedro F\",\"pedroco\",\"123456\")\n apostador4 = Apostador(\"Lucas Vi\",\"ligero\",\"123456\")\n\n # criando as selecoes desafiante e visitante\n sel_des1 = SelecaoDesafiante(\"Brasil\", 5)\n sel_vis1 = SelecaoVisitante(\"Argentina\", 3)\n\n # criando a partida\n partida1 = Partida(sel_des1, sel_vis1, \"Fonte Nova\", datetime.strptime('18-10-2020 16:00', '%d-%m-%Y %H:%M'))\n\n # criando as apostas\n aposta1 = Aposta(apostador1, partida1, 2, 0)\n aposta2 = Aposta(apostador2, partida1, 3, 1)\n aposta3 = Aposta(apostador3, partida1, 2, 1)\n aposta4 = Aposta(apostador4, partida1, 1, 1)\n\n #apostadores = [apostador1, apostador2, apostador3, apostador4]\n\n # definindo o nome do bolao\n bolao1 = Bolao(\" Classico das americas: Brasil X Argentina \")\n\n # adicionando apostas realizadas no bolao\n bolao1.adicionar_aposta(aposta1)\n bolao1.adicionar_aposta(aposta2)\n bolao1.adicionar_aposta(aposta3)\n bolao1.adicionar_aposta(aposta4)\n\n #exibe as apostas associadas ao bolao informdo\n print(bolao1)\n\n #testando a remoção de uma aposta\n \"\"\" bolao1.remover_aposta(aposta1)\n print(bolao1)\n print(\"Valor total em disputa = {}\".format(bolao1.valor_disputado)) \"\"\"\n\n # definindo o placar da partida\n partida1.set_gols_desafiante(5)\n partida1.set_gols_visitante(0)\n\n # exibe resultado da partida\n print(partida1)\n\n # apurar os possiveis vencedores em funcao do placar informado\n bolao1.verificar_vencedores()\n\n # exibir vencedores com o valor de sua respectiva premiacao\n print(\">> Vencedores: \")\n if bolao1.vencedores:\n for ven in bolao1.vencedores:\n print(ven.nome, \"\\tpremio -> \", bolao1.premiacao)\n else:\n print(\"Ninguem acertou o bolao, sem ganhadores!\")\n print(\"\\n\")\n \n # exibir os apostadores e seus crediros\n print(\">> Apostadore / Creditos / Premio:\")\n for ap in bolao1.apostas:\n print(ap.apostador.nome, \"\\tcredito -> \", ap.apostador.credito, \"\\tpremio -> \", ap.apostador.premiacao_ganha)\n\n print(\"\\n\")\n\n # exibir a classificacao dos apostadores (em processo de construcao)\n \"\"\" for ap in sorted(apostadores, key=\"premiacao_ganha\", reverse=True) :\n print(ap.apostador.nome, \"credito -> \", ap.apostador.premiacao_ganha)\n print(\"\\n\") \"\"\"\n\n\napp_run = Main()\n\n","sub_path":"bolaopy/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"194474211","text":"import json\nimport ssl\nimport urllib.error\nimport urllib.parse\nimport urllib.request\nimport folium\nfrom geopy.geocoders import ArcGIS\nfrom task2 import twurl\n\nTWITTER_URL = 'https://api.twitter.com/1.1/users/show.json'\n\n# Ignore SSL certificate errors\nctx = ssl.create_default_context()\nctx.check_hostname = False\nctx.verify_mode = ssl.CERT_NONE\n\ndef latlon(film):\n geolocator = ArcGIS()\n loc = film\n coordinates = geolocator.geocode(loc, timeout=100)\n return [coordinates.latitude, coordinates.longitude]\n\ndef user_info(name):\n url = twurl.augment(TWITTER_URL,\n {'screen_name': name, 'count': '1'})\n print('Retrieving', url)\n connection = urllib.request.urlopen(url, context=ctx)\n data = connection.read().decode()\n d = json.loads(data)\n headers = dict(connection.getheaders())\n # print('Remaining', headers['x-rate-limit-remaining'])\n\n for key in d:\n if d[key] == None:\n d[key] = 'unknown'\n name = 'name: ' + d['name'] + '\\n'\n sn = 'nickname: ' + d['screen_name'] + '\\n'\n locat = 'location: ' + d['location'] + '\\n'\n desc = 'prof. description: ' + d['description'] + '\\n'\n url = 'url: ' + d['url']+ '\\n'\n foll = 'followers: ' + str(d['followers_count'])+ '\\n'\n fr = 'friends: ' + str(d['friends_count'])+ '\\n'\n dt = 'created at: ' + d['created_at']+ '\\n'\n img = 'image: ' + d['profile_image_url']+ '\\n'\n\n map = folium.Map(location=[48, 25], zoom_start=[3])\n loc = folium.FeatureGroup(name =\"User location\")\n loc.add_child(folium.Marker(location=latlon(d['location']),\n popup=name + sn + locat + desc\n + url + foll + fr + dt + img, icon=folium.Icon('cloud')))\n\n map.add_child(loc)\n map.save('templates//user.html')\n","sub_path":"twitter_app.py","file_name":"twitter_app.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"152331800","text":"# -*- coding: utf-8 -*- \n\nimport random\n\nimport telebot\nimport os\nfrom flask import Flask, request\n\n# Creating the bot\nTOKEN = os.environ['TELEGRAM_BOT_TOKEN'] # Token previously stored in an environment var\nbot = telebot.TeleBot(TOKEN)\n\n# Creating the server\nserver = Flask(__name__)\n\n\ndef select_response(message):\n\tresponses = ['Ie {} tio, no et canses?', '{}, eres un puto pesat de tio', 'Ie {}, ja hi ha prou que ja cansa',\n\t\t\t\t'Collons {}, que pesat eres quan vols', \"Que si {}, tio pesat, que ja t'hem llegit\"]\n\n\tresponse_to_use = random.choice(responses)\n\tresponse = response_to_use.format(message.from_user.first_name)\n\n\treturn response\n\n\n# The decorator (@bot.message_handler) indicates the type of messages that will activate this function\n# In this case, we'll activate it for every message. See telebot API for more possibilities \n@bot.message_handler(func=lambda message: True)\ndef pole_reply(message):\n\t# If the message contains the word 'pole' (case insensitive), the bot replies\n\tif 'pole' in message.text.lower():\n\t\tresposta = select_response(message)\n\t\tbot.reply_to(message, resposta)\n\n# Server configuration\n@server.route(\"/bot\", methods=['POST'])\ndef getMessage():\n bot.process_new_updates([telebot.types.Update.de_json(request.stream.read().decode(\"utf-8\"))])\n return \"!\", 200\n\n@server.route(\"/\")\ndef webhook():\n bot.remove_webhook()\n bot.set_webhook(url=\"https://pesatbot.herokuapp.com/bot\")\n return \"!\", 200\n\n# Running the server\n# It's very important to set the port with the environment variable, because it's how heroku stores it\nserver.run(host=\"0.0.0.0\", port=os.environ.get('PORT', 5000))\nserver = Flask(__name__)\n","sub_path":"pesatbot.py","file_name":"pesatbot.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"156846796","text":"#*_*coding:utf-8*_*\n\nimport xml.dom.minidom as xmlDoc\nimport os\nimport lib.gl\nimport sys\nimport time\n\n\nclass cREPORTXML(object):\n def __init__(self):\n self.__struct = self.createReportNode()\n\n #创建report节点\n def createReportNode(self):\n try:\n xmlD = xmlDoc.Document()\n\n #xml样式\n xlstNode = xmlD.createProcessingInstruction(\"xml-stylesheet\",\"href=\\\"../../LOG.XSLT\\\" type=\\\"text/xsl\\\"\")\n xmlD.appendChild(xlstNode)\n\n report = xmlD.createElement('REPORT')\n xmlD.appendChild(report)\n\n overStatus = xmlD.createElement('OVER_STATUS')\n overStatus.appendChild(xmlD.createTextNode(str(gl.getOverallStatus('OVER_STATUS'))))\n report.appendChild(overStatus)\n\n\n\n returnResult = []\n returnResult.append(xmlD)\n returnResult.append(report)\n returnResult.append(overStatus)\n\n except Exception as ex:\n return ex\n return returnResult\n '''\n def writeOverStatus(self,overStatus = 'PASSED'):\n self.__struct[2]._get_childNodes().item(0).nodeValue = overStatus\n '''\n\n def writeReport(self,dict,xmlPath):\n #reportNodeList = self.createReportNode()\n\n entry = self.createLogEntry(self.__struct[0],dict)\n self.__struct[1].appendChild(entry)\n\n self.writeXml(self.__struct[0],xmlPath + r'\\reportLog.xml')\n #self.writeXml(self.__struct[0],gl.reporterPath+'reportxml_%s.xml'%(gl.curTimeStr))\n\n\n #-------------创建xml格式-有多个相同的节点,并且该节点下有4个名称相同的子节点----------------\n #createLogEntry(self,docObj,executeTime,stepResult,description,stepDiscription,action,index,element,value,expectResult):\n def createLogEntry(self,docObj,dict):\n entry = docObj.createElement(\"LOG_ENTRY\")\n\n status = docObj.createElement(\"STATUS\")\n nodeStep = docObj.createElement(\"STEP\")\n nodeExecuteTime = docObj.createElement(\"EXECUTION_TIME\")\n nodeStepResult = docObj.createElement(\"STEP_RESULT\")\n nodeComponentName = docObj.createElement(\"DESCRIPTION\")\n nodeStepDiscription = docObj.createElement(\"STEP_DESCRIPTION\")\n nodeActin = docObj.createElement(\"ACTION\")\n nodeIndex = docObj.createElement(\"INDEX\")\n nodeElement = docObj.createElement(\"ELEMENT\")\n nodeValue = docObj.createElement(\"VALUE\")\n nodeExpectResult = docObj.createElement(\"EXPECTED_RESULTS\")\n nodeimagePath = docObj.createElement('IMAGE_PATH')\n\n status.appendChild(docObj.createTextNode(dict['stepResult']))\n\n nodeStep.appendChild(docObj.createTextNode(str(dict['Step'])))\n nodeExecuteTime.appendChild(docObj.createTextNode(dict['executeTime']))\n nodeStepResult.appendChild(docObj.createTextNode(dict['stepResult']))\n nodeComponentName.appendChild(docObj.createTextNode(dict['description']))\n nodeStepDiscription.appendChild(docObj.createTextNode(dict['stepDiscription']))\n nodeActin.appendChild(docObj.createTextNode(dict['action']))\n nodeIndex.appendChild(docObj.createTextNode(dict['index']))\n nodeElement.appendChild(docObj.createTextNode(dict['element']))\n nodeValue.appendChild(docObj.createTextNode(dict['value']))\n nodeExpectResult.appendChild(docObj.createTextNode(dict['expectResult']))\n nodeimagePath.appendChild(docObj.createTextNode(dict['ImagePath']))\n\n entry.appendChild(status)\n entry.appendChild(nodeStep)\n entry.appendChild(nodeExecuteTime)\n entry.appendChild(nodeStepResult)\n entry.appendChild(nodeComponentName)\n entry.appendChild(nodeStepDiscription)\n entry.appendChild(nodeActin)\n entry.appendChild(nodeIndex)\n entry.appendChild(nodeElement)\n entry.appendChild(nodeValue)\n entry.appendChild(nodeExpectResult)\n entry.appendChild(nodeimagePath)\n return entry\n\n\n #参数,xml对象,准备存储xml文件路径,文件模式:读 and 写 (r and w)\n def writeXml(self,xmlDoc,xmlPath):\n f = open(xmlPath,\"w\")\n xmlDoc.writexml(f,indent='\\t', addindent='\\t', newl='\\n', encoding=\"utf-8\")\n f.close()\n\n\n\n\nif __name__=='__main__':\n #createLogEntry(self,docObj,executeTime,stepResult,description,stepDiscription,action,index,element,value,expectResult):\n curTime = time.strftime('%Y.%m.%d %H:%M:%S',time.localtime())\n print(curTime)\n dict = {\n 'step':'',\n 'executeTime':curTime,\n 'stepResult':'PASSED',\n 'description':'普惠家登录',\n 'stepDiscription':\"输入'用户名'\",\n 'action':'input',\n 'index':'By.XPATH',\n 'element':\"//Input[@id='userName']\",\n 'value':'msh195',\n 'expectResult':'输入用户名'}\n\n reportx =cREPORTXML()\n reportx.writeReport(dict)\n","sub_path":"lib/reportLog.py","file_name":"reportLog.py","file_ext":"py","file_size_in_byte":4937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"172471109","text":"import torch.nn as nn\nimport numpy as np\nimport torch\nimport copy\nimport json\nfrom Sublayers import MultiHeadedAttention, PositionwiseFeedForward, Generator, LayerNorm\nfrom Layers import EncoderLayer, DecoderLayer, clones\nfrom Embed import Embeddings, PositionalEncoding\nfrom Layers import EncoderLayer4KG\n\n# =============================================================================\n#\n# Full Model : 整体模型\n#\n# =============================================================================\n# 定义一个函数,它接受超参数并生成完整的模型。\n# Transformer由encoder和decoder组成。其中用到的sublayer有MultiHeadedAttention,PositionwiseFeedForward,这两个是在encoder和decoder中的,\n# 然后PositionalEncoding和Embeddings是用在输入之后,encoder及decoder层之间的,Generator是用在decoder之后的\ndef make_model(src_vocab, tgt_vocab, N=6, d_model=512, d_ff=2048, h=8, dropout=0.1):\n\t\"从超参数构造模型\"\n\tc = copy.deepcopy\n\tattn = MultiHeadedAttention(h, d_model)\n\tff = PositionwiseFeedForward(d_model, d_ff, dropout)\n\tposition = PositionalEncoding(d_model, dropout)\n\tmodel = EncoderDecoder(\n\t\tEncoder(EncoderLayer(d_model, c(attn), c(ff), dropout), N),\n\t\tDecoder(DecoderLayer(d_model, c(attn), c(attn),c(ff), dropout), N),\n\t\tnn.Sequential(Embeddings(d_model, src_vocab), c(position)),\n\t\tnn.Sequential(Embeddings(d_model, tgt_vocab), c(position)),\n\t\tGenerator(d_model, tgt_vocab))\n\n\t# 从代码来看,使用 Glorot / fan_avg初始化参数很重要。\n\t# 对参数进行均匀分布初始化\n\tfor p in model.parameters():\n\t\tif p.dim() > 1:\n\t\t\tnn.init.xavier_uniform_(p)\n\treturn model\n\nclass EncoderDecoder(nn.Module):\n\t\"\"\"\n\t标准编码器-解码器结构,本案例及其他各模型的基础。\n\t\"\"\"\n\n\tdef __init__(self, encoder, decoder, src_embed, tgt_embed, generator):\n\t\tsuper(EncoderDecoder, self).__init__()\n\t\tself.encoder = encoder\n\t\tself.decoder = decoder\n\t\tself.src_embed = src_embed\n\t\tself.tgt_embed = tgt_embed\n\t\tself.generator = generator\n\n\tdef forward(self, src, tgt, src_mask, tgt_mask):\n\t\t\"处理屏蔽的源序列与目标序列\"\n\t\treturn self.decode(self.encode(src, src_mask), src_mask, tgt, tgt_mask)\n\n\tdef encode(self, src, src_mask):\n\t\treturn self.encoder(self.src_embed(src), src_mask)\n\n\tdef decode(self, memory, src_mask, tgt, tgt_mask):\n\t\treturn self.decoder(self.tgt_embed(tgt), memory, src_mask, tgt_mask)\n\n\n# =============================================================================\n#\n# Encoder 编码器\n#\n# =============================================================================\nclass Encoder(nn.Module):\n\t\"核心编码器是N层堆叠\"\n\n\tdef __init__(self, layer, N):\n\t\tsuper(Encoder, self).__init__()\n\t\tself.layers = clones(layer, N)\n\t\tself.norm = LayerNorm(layer.size)\n\n\tdef forward(self, x, mask):\n\t\t\"依次将输入的数据(及屏蔽数据)通过每个层\"\n\t\tfor layer in self.layers:\n\t\t\tx = layer(x, mask)\n\t\treturn self.norm(x)\n\n# =============================================================================\n#\n# Decoder 解码器\n#\n# =============================================================================\n# 解码器也由一个N=6个相同层的堆栈组成。\nclass Decoder(nn.Module):\n\t\"带屏蔽的通用N层解码器\"\n\n\tdef __init__(self, layer, N):\n\t\tsuper(Decoder, self).__init__()\n\t\tself.layers = clones(layer, N)\n\t\tself.norm = LayerNorm(layer.size)\n\n\tdef forward(self, x, memory, src_mask, tgt_mask):\n\t\tfor layer in self.layers:\n\t\t\tx = layer(x, memory, src_mask, tgt_mask)\n\t\treturn self.norm(x)\n\n\ndef subsequent_mask(size):\n\t\"屏蔽后续位置\"\n\tattn_shape = (1, size, size)\n\tsubsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')\n\treturn torch.from_numpy(subsequent_mask) == 0\n\n# plt.figure(figsize=(5, 5))\n# plt.imshow(subsequent_mask(20)[0])\n\n\ndef make_model_kg(src_vocab, tgt_vocab, kg_embed, N=6, d_model=512, d_ff=2048, d_intermediate=512, h=8, dropout=0.1):\n\t\"从超参数构造模型\"\n\tc = copy.deepcopy\n\tattn = MultiHeadedAttention(h, d_model)\n\tattn_ent = MultiHeadedAttention(h, d_model)\n\tff = PositionwiseFeedForward(d_model, d_ff, dropout)\n\tposition = PositionalEncoding(d_model, dropout)\n\twith open(kg_embed, \"r\", encoding='utf-8') as f:\n\t\tlines = json.loads(f.read())\n\t\tvecs = list()\n\t\t# vecs.append([0] * 100) # CLS\n\t\tfor (i, line) in enumerate(lines):\n\t\t\tif line == \"ent_embeddings\":\n\t\t\t\tfor vec in lines[line]:\n\t\t\t\t\tvec = [float(x) for x in vec]\n\t\t\t\t\tvecs.append(vec)\n\tembed = torch.FloatTensor(vecs)\n\tmodel = EncoderDecoder4KG(\n\t\tEncoder4KG(EncoderLayer4KG(d_model, d_intermediate, c(attn), c(attn_ent), c(ff), dropout), N),\n\t\tDecoder(DecoderLayer(d_model, c(attn), c(attn), c(ff), dropout), N),\n\t\tnn.Sequential(Embeddings(d_model, src_vocab), c(position)),\n\t\ttorch.nn.Embedding.from_pretrained(embed),\n\t\tnn.Sequential(Embeddings(d_model, tgt_vocab), c(position)),\n\t\tGenerator(d_model, tgt_vocab))\n\n\t# 从代码来看,使用 Glorot / fan_avg初始化参数很重要。\n\t# 对参数进行均匀分布初始化\n\tfor p in model.parameters():\n\t\tif p.dim() > 1:\n\t\t\tnn.init.xavier_uniform_(p)\n\treturn model\n\n\nclass EncoderDecoder4KG(nn.Module):\n\t\"\"\"\n\t标准编码器-解码器结构,本案例及其他各模型的基础。\n\t\"\"\"\n\n\tdef __init__(self, encoder, decoder, src_embed, ent_embed, tgt_embed, generator):\n\t\tsuper(EncoderDecoder4KG, self).__init__()\n\t\tself.encoder = encoder\n\t\tself.decoder = decoder\n\t\tself.src_embed = src_embed\n\t\tself.ent_embed = ent_embed\n\t\tself.tgt_embed = tgt_embed\n\t\tself.generator = generator\n\n\tdef forward(self, src, ent, tgt, src_mask, ent_mask, tgt_mask):\n\t\t\"处理屏蔽的源序列与目标序列\"\n\t\treturn self.decode(self.encode(src, src_mask, ent, ent_mask), src_mask, tgt, tgt_mask)\n\n\tdef encode(self, src, src_mask, ent, ent_mask):\n\t\treturn self.encoder(self.src_embed(src), src_mask, self.ent_embed(ent), ent_mask)\n\n\tdef decode(self, memory, src_mask, tgt, tgt_mask):\n\t\treturn self.decoder(self.tgt_embed(tgt), memory, src_mask, tgt_mask)\n\n\nclass Encoder4KG(nn.Module):\n\t\"核心编码器是N层堆叠\"\n\n\tdef __init__(self, layer, N):\n\t\tsuper(Encoder4KG, self).__init__()\n\t\tself.layers = clones(layer, N)\n\t\tself.norm = LayerNorm(layer.size)\n\n\tdef forward(self, x, mask, ent, ent_mask):\n\t\t\"依次将输入的数据(及屏蔽数据)通过每个层\"\n\t\tfor layer in self.layers:\n\t\t\tx, ent = layer(x, mask, ent, ent_mask)\n\t\treturn self.norm(x)","sub_path":"autoComapp/Autocomplete_Transformer/Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":6350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"213533812","text":"from __future__ import print_function\nfrom __future__ import unicode_literals\nfrom __future__ import division\nfrom __future__ import absolute_import\nfrom builtins import range\nfrom future import standard_library\nstandard_library.install_aliases()\nimport matplotlib.pyplot as plt\nimport ROOT\nimport numpy\nimport matplotlib.pyplot as plt\n\ndef loadWaveform(filename):\n \"\"\"\n result[freq][amp][iChip][iChan][iSample]\n \"\"\"\n f = ROOT.TFile(filename)\n tree = f.Get(\"femb_wfdata\")\n metadataTree = f.Get(\"metadata\")\n metadatas = []\n amplitudes = set()\n frequencies = set()\n metadataTree.GetEntry(0)\n metadata = {\n 'funcType': metadataTree.funcType,\n 'funcAmp': metadataTree.funcAmp,\n 'funcOffset': metadataTree.funcOffset,\n 'funcFreq': metadataTree.funcFreq,\n }\n \n result = {}\n for iEntry in range(tree.GetEntries()):\n tree.GetEntry(iEntry)\n iChip = tree.chan//16\n iChannel = tree.chan % 16\n adccodes = list(tree.wf)\n #if self.nBits < 12:\n # adccodes = [i >> (12 - self.nBits) for i in adccodes]\n try:\n result[iChip][iChannel].extend(adccodes)\n except KeyError:\n if iChip in result:\n result[iChip][iChannel] = adccodes\n else:\n result[iChip] = {iChannel:adccodes}\n return result, metadata\n\ndef main():\n from ..configuration.argument_parser import ArgumentParser\n parser = ArgumentParser(description=\"Displays a trace from a root file\")\n parser.add_argument(\"infilename\",help=\"file name to read the waveform from\")\n args = parser.parse_args()\n waveforms, metadata = loadWaveform(args.infilename)\n\n for iChip in waveforms:\n fig, axs = plt.subplots(4,4)\n print(axs)\n for iChan in waveforms[iChip]:\n ax = axs[iChan // 4][iChan % 4]\n waveform = waveforms[iChip][iChan]\n ax.plot(waveform)\n ax.set_title(\"Chip: {} Channel: {}\".format(iChip,iChan))\n ax.set_xlabel(\"Time Sample\")\n ax.set_ylabel(\"ADC Code\")\n plt.show()\n","sub_path":"femb_python/helper_scripts/show_trace_root.py","file_name":"show_trace_root.py","file_ext":"py","file_size_in_byte":2068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"79319855","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# ライブラリの読み込み\nimport RPi.GPIO as GPIO\nimport time\n# GPIOピン番号の定義方法を設定する(BCM/BOARD)\nGPIO.setmode(GPIO.BCM)\n# 18番ピンを出力モードで初期化する\nGPIO.setup(23, GPIO.OUT)\n\nfor x in xrange(5):\n GPIO.output(23, True)\n time.sleep(1)\n GPIO.output(23, False)\n time.sleep(1)\n\nGPIO.cleanup()\n","sub_path":"led_test.py","file_name":"led_test.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"74908261","text":"import pandas as pd\r\nimport numpy as np\r\nimport wfdb\r\nimport pickle\r\n\r\ndirName = 'data/training2017/'\r\ndf = pd.read_csv(dirName + 'REFERENCE.csv', header=None, names = ['name','label'])\r\ndf = df[df.label != 'O']\r\ntest = df.sample(frac=.2)\r\ntrainIdx = list(set(df.index) - set(test.index))\r\ntrain = df.loc[trainIdx]\r\n\r\nvalid = train.sample(frac=.2)\r\ntrainIdx = list(set(train.index) - set(valid.index))\r\ntrain = df.loc[trainIdx]\r\n\r\nsigs, ys = consolidateData(train, dirName)\r\nnp.save(dirName + 'train', sigs)\r\nnp.save(dirName + 'trainlabel', ys)\r\n\r\nsigs, ys = consolidateData(test, dirName)\r\nnp.save(dirName + 'test', sigs)\r\nnp.save(dirName + 'testlabel', ys)\r\n\r\nsigs, ys = consolidateData(valid, dirName)\r\nnp.save(dirName + 'valid', sigs)\r\nnp.save(dirName + 'validlabel', ys)\r\n\r\ndef chunk(signal, y, chunkSize=4096):\r\n '''\r\n chunk signal into chunks of size chunkSize\r\n Input: signal: nonempty 1D np array or list, y: label, chunkSize: int>0\r\n Output: 2D numpy array of chunked signal and its label\r\n '''\r\n reps=1\r\n if len(signal)chunkSize:\r\n offset = np.random.randint(0,len(signal)%chunkSize + 1)\r\n out = []\r\n reps=len(signal)//chunkSize\r\n for i in range(0,reps):\r\n tmp=signal[offset+i*chunkSize:offset+(i+1)*chunkSize]\r\n out.append(tmp)\r\n signal=out\r\n\r\n else:\r\n signal =[signal]\r\n\r\n y=[y]*reps\r\n\r\n return signal, y\r\n\r\ndef consolidateData(df, dirName, chunkSize=4096):\r\n '''\r\n turn dataset into single file, with signals chunked into size chunkSize\r\n Input: df: dataframe with cols 'name' and 'label'; name is name of file\r\n dirName: dir where file can be found\r\n chunkSize: int>0\r\n Output: 2D numpy array of chunked signals and their labels\r\n '''\r\n sigOut=[]\r\n yOut=[]\r\n for _, row in df.iterrows():\r\n fname = dirName + row['name']\r\n sig, _ = wfdb.rdsamp(fname)\r\n sig=sig.reshape(-1)\r\n sigs, ys = chunk(sig, [row['name'], row.label], chunkSize)\r\n sigOut.extend(sigs)\r\n yOut.extend(ys)\r\n return np.array(sigOut), np.array(yOut)","sub_path":"src/0_split_data.py","file_name":"0_split_data.py","file_ext":"py","file_size_in_byte":2348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"519902750","text":"def digits(n):\n return list(map(int, str(n)))\n\n\ndef decreases(n):\n for a, b in zip(n, n[1:]):\n if b < a:\n return True\n return False\n\n\ndef double_not_tripple(n):\n n = [-1] + n + [-1]\n for a, b, c, d in zip(n, n[1:], n[2:], n[3:]):\n if b == c and a != b and c != d:\n return True\n return False\n\n\ndef gen(n1, n2):\n for n in range(n1, n2):\n d = digits(n)\n if decreases(d):\n continue\n if not double_not_tripple(d):\n continue\n yield n\n\n\ndef count(n1, n2):\n return len(list(gen(n1, n2)))\n\n\nif __name__ == '__main__':\n n1, n2 = map(int, input().split('-'))\n print(count(n1, n2))\n","sub_path":"04-secure-container/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"171586875","text":"import torch\nimport pytorch_lightning as pl\n\nfrom processors import MnliProcessor\nfrom firebert_fct import FireBERT_FCT\nfrom bert_base_model import LightningBertForSequenceClassification\n\nnum_gpus = -1 if torch.cuda.is_available() else None\n\nimport gc\nimport random\ngc.enable()\n\nimport time\n\nfrom torch.utils.data import TensorDataset\n\n# prepare hyperparameters\n\nmax_steps = -1 # if -1 then calculate number of training steps based on the length of the train set\nlen_train_set = 392702\n\ngradient_accumulation_steps = 1\nlearning_rate = 2e-5\nweight_decay = 0.00\nadam_epsilon = 1e-8\nwarmup_proportion = 0 \n\nnum_train_epochs = 1\nbatch_size = 7\n\nif max_steps > 0:\n num_train_epochs = max_steps // (len_train_set // gradient_accumulation_steps) + 1\n num_training_steps = max_steps\nelse:\n num_training_steps = len_train_set // gradient_accumulation_steps * num_train_epochs\n \nwarmup_steps = num_training_steps // num_train_epochs * warmup_proportion\n\ndef load_examples(features_file):\n\n features = torch.load(features_file)\n\n # Convert to Tensors and build dataset\n all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)\n all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)\n all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)\n all_labels = torch.tensor([f.label for f in features], dtype=torch.long)\n\n all_idxs = torch.tensor([i for i in range(len(all_input_ids))], dtype=torch.long)\n \n dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels, all_idxs)\n \n return dataset\n\ndef elapsed_time():\n global t_start\n\n t_now = time.time()\n t = t_now-t_start\n t_start = t_now\n return t\n\nuse_full_example = True\nuse_USE = True # if random.randint(1, 2) == 1 else False\nUSE_method = 'rank' #'filter' if random.randint(1, 2) == 1 else 'rank'\nUSE_multiplier = 12 #random.randint(5, 15) # 3\nstop_words = True\nperturb_words = 9 #random.randint(4, 9) # 2\ncandidates_per_word = 10 #random.randint(5, 15) #10\ntotal_alternatives = 4 #random.randint(4, 7) # 5\nmatch_pos = True #if random.randint(1, 2) == 1 else False\nleave_alone = 0\nrandom_out_of = 0\njudge_bert = False\n\nhparams = { 'learning_rate': learning_rate,\n 'adam_epsilon': adam_epsilon,\n 'weight_decay': weight_decay,\n 'warmup_steps': warmup_steps,\n 'num_training_steps': num_training_steps,\n 'batch_size': batch_size,\n 'use_USE': use_USE,\n 'USE_method': USE_method,\n 'USE_multiplier': USE_multiplier,\n 'stop_words': stop_words,\n 'perturb_words': perturb_words,\n 'candidates_per_word': candidates_per_word,\n 'total_alternatives': total_alternatives,\n 'match_pos': match_pos,\n 'use_full_example': use_full_example,\n 'leave_alone': leave_alone,\n 'random_out_of': random_out_of,\n 'judge_bert': judge_bert\n }\n\nprint(hparams)\n\nproc_hparams = {}\n# delete this next line to run full 100%\nproc_hparams.update({'sample_percent': 3,\n 'randomize': True})\n\n# instantiate the model used for SWITCH\nswitch_model = LightningBertForSequenceClassification(load_from = 'resources/models/MNLI/pytorch_model.bin', \n processor = MnliProcessor(), \n hparams = {'batch_size': 6 })\nswitch_model.cuda()\n\nmodel = FireBERT_FCT(switch_model=switch_model, processor=MnliProcessor(hparams=proc_hparams), hparams=hparams)\n\nprocessor = model.get_processor()\n\ntrain_dataset, train_examples = processor.load_and_cache_examples(\"data/MNLI\", example_set='train')\nval_dataset, _ = processor.load_and_cache_examples(\"data/MNLI\", example_set='dev')\ntest_dataset, _ = processor.load_and_cache_examples(\"data/MNLI\", example_set='test')\n\nmodel.set_train_dataset(train_dataset, train_examples)\nmodel.set_val_dataset(val_dataset)\nmodel.set_test_dataset(test_dataset)\n\namp_opt_level='O1' \nmax_grad_norm = 1.0\n\nt_start = time.time()\n\ntrainer = pl.Trainer(gpus=num_gpus,\n max_epochs = num_train_epochs, amp_level=amp_opt_level, gradient_clip_val=max_grad_norm,\n max_steps = num_training_steps)\n\ntrainer.fit(model)\n\ntraining_time = round(elapsed_time(),2)\n\ntrainer.test(model)\n\ntrain_results = trainer.tqdm_metrics\n\n\ndel train_dataset\ndel train_examples\ndel val_dataset\ndel test_dataset\ndel trainer\n\ngc.collect()\ntorch.cuda.empty_cache()\n\n# compare how well the model does against adversarial samples\ntest_dataset = load_examples('data/MNLI/generated/mnli_adversarial_samples_for_dev')\nmodel.set_test_dataset(test_dataset)\ntrainer = pl.Trainer(gpus=num_gpus)\ntrainer.test(model)\nadv_results = trainer.tqdm_metrics\n\nresults = \"training_time: \" + str(training_time) + \"s, \" + \\\n \"val_loss: \" + str(train_results['val_loss']) + \", \" + \\\n \"val_acc: \" + str(train_results['avg_val_acc']) + \", \" + \\\n \"adv_val_acc: \" + str(adv_results['avg_test_acc']) + \", \" + \\\n \"use_USE: \" + str(use_USE) + \", \" + \\\n \"USE_multiplier: \" + str(USE_multiplier) + \", \" + \\\n \"USE_method: \" + str(USE_method) + \", \" + \\\n \"perturb_words: \" + str(perturb_words) + \", \" + \\\n \"candidates_per_word: \" + str(candidates_per_word) + \", \" + \\\n \"total_alternatives: \" + str(total_alternatives) + \", \" + \\\n \"match_pos: \" + str(match_pos)\n\nprint(results)\n\nfname = str(\"results/fct/mnli-hparams-results.txt\")\nf = open(fname, \"a\")\nf.write(results)\nf.write( \"\\n\")\nf.close()\n\ndel processor\ndel test_dataset\ndel model\ndel switch_model\ndel trainer\ndel train_results\ndel adv_results\ndel results\ndel fname\ndel f\n\ngc.collect()\ntorch.cuda.empty_cache() \n\n\n","sub_path":"randomsearchMNLI.py","file_name":"randomsearchMNLI.py","file_ext":"py","file_size_in_byte":5845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"15448047","text":"#\n# Copyright 2008 Free Software Foundation, Inc.\n#\n# This file is part of GNU Radio\n#\n# GNU Radio is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 3, or (at your option)\n# any later version.\n#\n# GNU Radio is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with GNU Radio; see the file COPYING. If not, write to\n# the Free Software Foundation, Inc., 51 Franklin Street,\n# Boston, MA 02110-1301, USA.\n#\n\nfrom gnuradio import eng_notation\nfrom gnuradio import gr, digital\nfrom gnuradio.eng_option import eng_option\nfrom gnuradio.gr import firdes\nfrom gnuradio.wxgui import numbersink2\nfrom gnuradio.wxgui import scopesink2\nfrom grc_gnuradio import blks2 as grc_blks2\n\nfrom grc_gnuradio import wxgui as grc_wxgui\nfrom optparse import OptionParser\nimport wx\nimport math\n\nimport howto \n\nn2s = eng_notation.num_to_str\n\nclass bpsk_demodulator(gr.hier_block2, grc_wxgui.top_block_gui):\n def __init__(self, options):\n \n gr.hier_block2.__init__(self, \"bpsk_demodulator\",\n gr.io_signature(1, 1, gr.sizeof_gr_complex), # Input signature\n gr.io_signature(0, 0, 0)) # Output signature\n \n grc_wxgui.top_block_gui.__init__(self, title=\"Top Block\")\n \n self._samples_per_symbol = options.samples_per_symbol\n \n # Number of bits per symbol\n self._bits_per_symbol = 1\n # Create AGC to scale input to unity\n self._agc = gr.agc_cc(1e-5, 1.0, 1.0, 1.0)\n # Create RRC with specified excess bandwidth\n taps = gr.firdes.root_raised_cosine(1.0, # Gain\n self._samples_per_symbol, # Sampling rate\n 1.0, # Symbol rate\n 0.35, # Roll-off factor\n 11*self._samples_per_symbol) # Number of taps\n \n self._rrc = gr.fir_filter_ccf(1, taps)\n \n # Create a Costas loop frequency/phase recovery block\n self._costas = digital.costas_loop_cc(6.28/100.0, 2)\n self.gr_null_sink_f1 = gr.null_sink(gr.sizeof_float*1)\n \n \n # Create a M&M bit synchronization retiming block \n self._mm = digital.clock_recovery_mm_cc(self._samples_per_symbol, # Initial samples/symbol\n 1e-06, # Second order gain\n 0.5, # Initial symbol phase\n 0.001, # First order gain\n 0.0001) # Maximum timing offset\n \n # Add an SNR probe on the demodulated constellation\n #self._snr_probe = gr.probe_mpsk_snr_c(10.0/symbol_rate)\n self._symbol_rate = options.data_rate / self._bits_per_symbol\n #self._snr_probe = digital.mpsk_snr_est_cc(0, 10000, 0.001) # 0 at the first mean Simple\n \n self._snr_probe = digital.probe_mpsk_snr_est_c(digital.SNR_EST_M2M4, alpha=10.0/self._symbol_rate)\n #self._snr_probe = digital.mpsk_snr_est_cc(digital.SNR_EST_SIMPLE, alpha=10.0/self._symbol_rate)\n \n \n # Slice the resulting constellation into bits.\n # Get inphase channel and make decision about 0\n self._c2r = gr.complex_to_real()\n self._slicer = digital.binary_slicer_fb() \n \n # Descramble BERT sequence. A channel error will create 3 incorrect bits\n self._descrambler = gr.descrambler_bb(0x8A, 0x7F, 31) # CCSDS 7-bit descrambler\n\n # Measure BER by the density of 0s in the stream\n # self._ber = gr.probe_density_b(1.0/symbol_rate)\n self._ber = grc_blks2.error_rate(type='BER', \n win_size=1000,\n bits_per_symbol=1)\n \n #self.create_number_sink(self._samples_per_symbol)\n self.gr_null_sink_f2 = gr.null_sink(gr.sizeof_float*1)\n \n #Create a vector source reference to calculate BER\n self._vector_source_ref = gr.vector_source_b(([1,]), True, 1)\n \n #create a comparator \n self.comparator = howto.compare_vector_cci((1, 1, 1, 1 ,0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1), (0,0, 1, 1, 0,0), 5, 0, True)\n \n \n #Connection of blocks\n #\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"-->_snr_probe-->gr_null_sink_cc\n # agc --> _rrc --> costas --> _mm --> _c2r --> _slicer --> _descrambler --> _ber --> gr_null_sink_f2\n #\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"--> gr_null_sink_f1 \"\" _vector_source_ref-->\n \n# \n# self.connect(self, self._agc, self._rrc, self._costas, self._mm, \n# self._c2r, self._slicer, self._descrambler, self.comparator, (self._ber, 1))\n# \n \n self.connect(self, self._agc, self._rrc, self._costas, self._mm, \n self._c2r, self._slicer, self._descrambler, (self._ber, 1))\n \n \n self.connect((self._costas, 1), (self.gr_null_sink_f1, 0))\n self.connect(self._mm, self._snr_probe)\n \n self.connect(self._vector_source_ref, (self._ber,0)) \n self.connect(self._ber, self.gr_null_sink_f2)\n \n \n def get_compare_vector_decision(self):\n return self.comparator.is_same_vector_decision\n \n def set_compare_vector_decision(self, decision):\n self.comparator.is_same_vector_decision = decision\n \n def get_comparator_vector_number(self):\n return self.comparator.is_same_vector_number\n\n def snr(self):\n return self._snr_probe.snr()\n\n def ber(self):\n return self._ber.ber_\n \n def create_number_sink (self, samp_rate): \n self.wxgui_numbersink = numbersink2.number_sink_f(\n self.GetWin(),\n unit=\"Units\",\n minval=-100,\n maxval=100,\n factor=1.0,\n decimal_places=10,\n ref_level=0,\n sample_rate=samp_rate,\n number_rate=15,\n average=False,\n avg_alpha=None,\n label=\"SNR\",\n peak_hold=False,\n show_gauge=True,\n )\n self.Add(self.wxgui_numbersink.win)\n","sub_path":"usrpN210_gnuradio_3-6/bpsk_modulation/bpsk_demodulator.py","file_name":"bpsk_demodulator.py","file_ext":"py","file_size_in_byte":6662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"42018201","text":"import csv\r\nimport re\r\nimport time as ti\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport argparse\r\nimport pymysql\r\nimport warnings\r\nwarnings.filterwarnings(\"ignore\")\r\nimport function as fn\r\n\r\nfrom config import Config as cf\r\nfrom itertools import product\r\n\r\n#st_time = ti.time()\r\nperiod = cf.get_period()\r\nlimited = cf.get_limited()\r\nthreshold = cf.get_threshold()\r\n\r\nparser = argparse.ArgumentParser()\r\n'''\r\nparser.add_argument(\r\n \"--loadCourseFile\", default=\"class.csv\", type=str, required=False, help=\"输入待排课表位置\"\r\n )'''\r\n\r\nparser.add_argument(\r\n \"--class_inf\", default=\"classs_inf\",type=str, required=True, help=\"待输入排考表名\"\r\n )\r\n\r\nparser.add_argument(\r\n \"--outputFile\",default=\"排考最终结果.csv\", type=str, required=True, help=\"输入输出文件名\"\r\n)\r\nargs = parser.parse_args()\r\n\r\n\r\n\r\ndef main(args):\r\n with open(\"temp.csv\",\"w\") as f:\r\n csv_write = csv.writer(f)\r\n csv_write.writerow([])\r\n sql = \"select ci_course_no,ci_teacher_name,ci_class_name,ci_student_number,ci_class_dep from \" + args.class_inf + \" where ci_teacher_name != ' '\"\r\n # print(\"get data successfully\")\r\n data = fn.getDF(sql)\r\n # data = pd.read_csv(args.loadCourseFile, usecols=[\"ci_course_no\",\"ci_class_name\",\"ci_student_number\",\"ci_teacher_name\",\"ci_class_dep\"])\r\n \r\n #数据预处理模板 拆分同一行内按类似“1-3”的班级\r\n data[\"ci_class_name\"] = data[\"ci_class_name\"].apply(fn.pre_split_class) \r\n # print(\"preprocess successfully\")\r\n # 读取当前资源表\r\n dictTeacherTime = fn.csv2dict(\"TeacherSource.csv\")\r\n dictStudentTime = fn.csv2dict(\"StudentSource.csv\")\r\n\r\n # 建立所有学院和老师的联系\r\n #dataCollege = pd.read_csv(args.loadCourseFile, usecols=[\"ci_class_dep\",\"ci_teacher_name\"])\r\n dataCollege = pd.DataFrame(data,columns=[\"ci_class_dep\",\"ci_teacher_name\"])\r\n dictCollegeTeacher = fn.buildConnectionDict(dataCollege, \"ci_class_dep\",\"ci_teacher_name\")\r\n\r\n # 建立课程和学院的联系\r\n #dataCourse = pd.read_csv(args.loadCourseFile, usecols=[\"ci_class_dep\",\"ci_course_no\"])\r\n dataCourse = pd.DataFrame(data,columns=[\"ci_course_no\",\"ci_class_dep\"])\r\n dictCourseCollege = fn.buildConnectionDict(dataCourse,\"ci_course_no\",\"ci_class_dep\")\r\n\r\n\r\n # 生成一个字典,对应的形式是这样的{\"班级1\":[],\"班级2\":[],\"班级3\":[],...}\r\n # dictClassTime = {}\r\n for courseNumber in fn.SamplingCourse(data):\r\n course = courseNumber[0]\r\n temp = [1] * period\r\n tempTeacher = [1] * period\r\n classWhoTakeCourse = []\r\n classCourseNumber = []\r\n unitClass = []\r\n #classTeacherList = []\r\n indexList = []\r\n unitTeacher = []\r\n\r\n for i,cl_name,cl_num,cl_ter,cl_co in zip(data.index,data[\"ci_class_name\"],data[\"ci_student_number\"],data[\"ci_teacher_name\"],data[\"ci_course_no\"].values):\r\n # 查找每一个考这门课的班级\r\n if cl_co == course:\r\n className = cl_name\r\n classNumber = cl_num\r\n # 保存班级人数和班级的时候以整体为单位\r\n classWhoTakeCourse.append(className)\r\n classCourseNumber.append(classNumber)\r\n indexList.append(i)\r\n\r\n # 取每个班级的公共时间\r\n splitClassList = fn.unitedClass(className) \r\n for className in splitClassList:\r\n # 对考这门课的班级的时间进行初始化, 如果不存在就生成一个[1,1,1,1...,]\r\n dictStudentTime[className] = dictStudentTime.get(className,[1] * period)\r\n # 找到所有参加这门考试 班级的共同空闲时间\r\n # [0,1,1,...,] 和 [1,0,1,...,] 相与\r\n temp = fn.findCommonTime(temp,dictStudentTime[className])\r\n unitClass.append(className)\r\n \r\n classTeacher = cl_ter\r\n #classTeacherList.append(classTeacher)\r\n \r\n # 接下取监考老师的公共时间\r\n if \":\" in classTeacher:\r\n teachersList = classTeacher.split(\":\")\r\n for teacher in teachersList:\r\n dictTeacherTime[teacher] = dictTeacherTime.get(teacher,[1] * period)\r\n tempTeacher = fn.findCommonTime(tempTeacher, dictTeacherTime[teacher])\r\n unitTeacher.append(teacher)\r\n else:\r\n dictTeacherTime[classTeacher] = dictTeacherTime.get(classTeacher,[1] * period)\r\n tempTeacher = fn.findCommonTime(tempTeacher,dictTeacherTime[classTeacher])\r\n unitTeacher.append(classTeacher)\r\n \r\n # 去除重复老师\r\n unitTeacher = list(set(unitTeacher))\r\n \r\n\r\n # 两个公共时间相与\r\n temp = fn.findCommonTime(tempTeacher,temp)\r\n \r\n # 判断监考老师是否足够, 不够从同学院找\r\n if len(unitTeacher) < courseNumber[1]:\r\n college = dictCourseCollege[course]\r\n collegeTeachers = dictCollegeTeacher[college[0]]\r\n for Teacher in collegeTeachers:\r\n dictTeacherTime[Teacher] = dictTeacherTime.get(Teacher,[1] * period)\r\n if Teacher not in unitTeacher and fn.findCommonTime(dictTeacherTime[Teacher], tempTeacher) == tempTeacher:\r\n unitTeacher.append(Teacher)\r\n\r\n # 监考老师多了, 就选到只有这么多老师, 非常粗糙 \r\n if len(unitTeacher) > courseNumber[1]:\r\n unitTeacher = unitTeacher[0:courseNumber[1]]\r\n \r\n di = 0\r\n if len(unitTeacher) < courseNumber[1]:\r\n di = courseNumber[1] - len(unitTeacher)\r\n print(courseNumber[0]+\"课程,当前老师不足,缺少\"+str(di)+\"个,用-1代替\")\r\n di_list = [\"-1\"]*di\r\n unitTeacher.extend(di_list)\r\n\r\n\r\n # 记录没有授课老师的情况\r\n #fn.writeImperfect(indexList,unitTeacher,data)\r\n\r\n if 1 not in temp:\r\n print(\"当前\" + course +\"无法排考\")\r\n # 找到共同可行的时间, 写入csv, 并且标记占用时间\r\n else:\r\n _time = fn.optimalTime(temp)\r\n # newClassWhoTakeCourse, newClassCourseNumber = splitClass(classWhoTakeCourse,classCourseNumber)\r\n college = dictCourseCollege[course]\r\n fn.writeToCsv(_time,course,classWhoTakeCourse,classCourseNumber, unitTeacher,college) \r\n # writeToCsv(temp,course,classWhoTakeCourse,classCourseNumber,college) \r\n # 更新老师班级的时间表\r\n fn.updateClassTime(dictStudentTime, unitClass, _time)\r\n fn.updateTeacherTime(dictTeacherTime, unitTeacher, _time,di)\r\n fn.dict2csv(dictTeacherTime,\"TeacherSource.csv\")\r\n fn.dict2csv(dictStudentTime,\"StudentSource.csv\")\r\n\r\n#===== 连接数据库=====#\r\n conn = pymysql.connect(\r\n host = '123.60.11.177',\r\n port = 3306,\r\n user = 'root',\r\n password = 'ncu@jw114',\r\n db = 'examArrange1',\r\n charset='utf8'\r\n )\r\n cur = conn.cursor()\r\n cur.execute(\"SELECT column_name FROM information_schema.columns WHERE (table_name = 'classroom_inf_new') AND (ordinal_position >= 1) order by ordinal_position\")\r\n classroom_inf_new_columns=fn.get_columns(cur.fetchall())\r\n cur.execute(\"SELECT * FROM classroom_inf_new\")\r\n classroom_inf_new=pd.DataFrame(cur.fetchall(),columns=classroom_inf_new_columns)\r\n conn.close()\r\n\r\n temp=pd.read_csv(\"./temp.csv\",names=[\"ci_course_no\",\"test_time\",\"ci_class_name\",\"ci_student_number\",\"ci_teacher_name\",\"ci_class_unit\"])\r\n temp.insert(0,\"index\",temp.index.values)\r\n\r\n res = list(product(classroom_inf_new.loc[:,[\"教学楼名称\",\"房间号\",\"座位数\"]].values.tolist(),range(period)))\r\n dm_classroom_state_new=fn.get_dm_classroom_state_new(res)\r\n\r\n D=dm_classroom_state_new\r\n zhujiao_D=D[D[\"cr_building\"]==\"教学主楼\"] #后面是要从数据库中读取\r\n gonggong_temp=temp[temp[\"ci_class_unit\"]==\"公共课排课\"]\r\n jiangong_D=D[D[\"cr_building\"]==\"建工楼\"]\r\n jiangong_temp=temp[temp[\"ci_class_unit\"]==\"建筑工程学院\"]\r\n xingong_D=D[D[\"cr_building\"]==\"信工楼\"]\r\n xingong_temp=temp[temp[\"ci_class_unit\"]==\"信工学院\"]\r\n jidian_D=D[D[\"cr_building\"]==\"机电楼\"]\r\n jidian_temp=temp[temp[\"ci_class_unit\"]==\"机电学院\"]\r\n lisheng_D=D[D[\"cr_building\"]==\"理生楼\"]\r\n lisheng_temp=temp[(temp[\"ci_class_unit\"]==\"理学院\")|(temp[\"ci_class_unit\"]==\"生命学院\")]\r\n huanjing_D=D[D[\"cr_building\"]==\"环境楼\"]\r\n huanjing_temp=temp[temp[\"ci_class_unit\"]==\"环化学院\"]\r\n cailiao_D=D[D[\"cr_building\"]==\"材料楼\"]\r\n cailiao_temp=temp[temp[\"ci_class_unit\"]==\"材料学院\"]\r\n renwen_D=D[D[\"cr_building\"]==\"人文楼\"]\r\n renwen_temp=temp[temp[\"ci_class_unit\"]==\"人文学院\"]\r\n faxue_D=D[D[\"cr_building\"]==\"法学楼\"]\r\n faxue_temp=temp[temp[\"ci_class_unit\"]==\"法学院\"]\r\n waijing_D=D[D[\"cr_building\"]==\"外经楼\"]\r\n waijing_temp=temp[(temp[\"ci_class_unit\"]==\"外语学院\")|(temp[\"ci_class_unit\"]==\"经管学院\")]\r\n\r\n\r\n gonggongalldata,cannt_find_class_id=fn.schedule_algorithm(zhujiao_D,gonggong_temp)\r\n gonggongoutdata=gonggongalldata[gonggongalldata.cr_state==0]\r\n\r\n jiangongalldata,jiangongcannt_find_class_id=fn.schedule_algorithm(jiangong_D,jiangong_temp)\r\n jiangongoutdata=jiangongalldata[jiangongalldata.cr_state==0]\r\n #机电\r\n jidianalldata,jidiancannt_find_class_id=fn.schedule_algorithm(jidian_D,jidian_temp)\r\n jidianoutdata=jidianalldata[jidianalldata.cr_state==0]\r\n #信工\r\n xingongalldata,xingongcannt_find_class_id=fn.schedule_algorithm(xingong_D,xingong_temp)\r\n xingongoutdata=xingongalldata[xingongalldata.cr_state==0]\r\n #理生\r\n lishengalldata,lishengcannt_find_class_id=fn.schedule_algorithm(lisheng_D,lisheng_temp)\r\n lishengoutdata=lishengalldata[lishengalldata.cr_state==0]\r\n #环境\r\n huanjingalldata,huanjingcannt_find_class_id=fn.schedule_algorithm(huanjing_D,huanjing_temp)\r\n huanjingoutdata=huanjingalldata[huanjingalldata.cr_state==0]\r\n #材料\r\n cailiaoalldata,cailiaocannt_find_class_id=fn.schedule_algorithm(cailiao_D,cailiao_temp)\r\n cailiaooutdata=cailiaoalldata[cailiaoalldata.cr_state==0]\r\n #人文\r\n renwenalldata,renwencannt_find_class_id=fn.schedule_algorithm(renwen_D,renwen_temp)\r\n renwenoutdata=renwenalldata[renwenalldata.cr_state==0]\r\n #外经\r\n waijingalldata,waijingcannt_find_class_id=fn.schedule_algorithm(waijing_D,waijing_temp)\r\n waijingoutdata=waijingalldata[waijingalldata.cr_state==0]\r\n #法学\r\n faxuealldata,faxuecannt_find_class_id=fn.schedule_algorithm(faxue_D,faxue_temp)\r\n faxueoutdata=faxuealldata[faxuealldata.cr_state==0]\r\n\r\n dfoutdata = pd.concat([gonggongoutdata,jiangongoutdata,jidianoutdata,xingongoutdata,lishengoutdata,huanjingoutdata,cailiaooutdata,renwenoutdata,waijingoutdata,faxueoutdata], axis = 0, ignore_index = False, join = \"outer\")\r\n dfcannt_find_class_id=(cannt_find_class_id+jiangongcannt_find_class_id+jidiancannt_find_class_id+xingongcannt_find_class_id+lishengcannt_find_class_id+huanjingcannt_find_class_id+cailiaocannt_find_class_id+renwencannt_find_class_id+waijingcannt_find_class_id+faxuecannt_find_class_id)\r\n\r\n dfalldata = pd.concat([gonggongalldata,waijingalldata,faxuealldata,renwenalldata,jiangongalldata,jidianalldata,xingongalldata,cailiaoalldata,huanjingalldata,lishengalldata], axis = 0, ignore_index = False, join = \"outer\")\r\n\r\n dfoutdata=dfoutdata.rename(columns={\"state_id\":\"index\"})\r\n df=pd.merge(temp,dfoutdata,how='inner',on=[\"index\"]).iloc[:,[1,2,3,4,5,8,7]]\r\n cannt_temp=temp[temp.apply(lambda x:x[0] in dfcannt_find_class_id,axis=1)]\r\n\r\n\r\n df.to_csv(args.outputFile)\r\n cannt_temp.to_csv(\"canntArrangeClass.csv\", mode = \"a\")\r\n return print(\"Exam ready\")\r\n\r\nif __name__ == \"__main__\":\r\n main(args)\r\n\r\n# 此处由于写入数据库速度过慢, 先注掉\r\n# fn.uploadCsv(\"TeacherSource.csv\")\r\n# fn.uploadCsv(\"StudentSource.csv\")\r\n\r\n\r\n#print(ti.time() - st_time)\r\n","sub_path":"DDL/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"31524147","text":"# -*- coding: utf-8 -*-\n# @Time : 2020/3/13 9:50\n# @Author : Ye Jinyu__jimmy\n# @File : forecast_model.py\n\n\n\nimport pandas as pd\n# 显示所有列\npd.set_option('display.max_columns', None)\n# 显示所有行\npd.set_option('display.max_rows', 500)\n# 设置value的显示长度为100,默认为50\npd.set_option('max_colwidth', 100)\nfrom sklearn import preprocessing,metrics\nimport numpy as np\nimport time\nimport xgboost as xgb\nfrom sklearn.model_selection import GridSearchCV,train_test_split\nimport matplotlib\nmatplotlib.use('TkAgg')\nfrom statsmodels.tsa.holtwinters import ExponentialSmoothing, SimpleExpSmoothing, Holt\n\nimport features_engineering\nimport cx_Oracle\nimport datetime\nimport pymysql\nimport tqdm\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport math\n\n'''当前是针对给点的数据集进行模型的训练和输出'''\n\nfrom matplotlib.pylab import rcParams\nrcParams['figure.figsize'] = 12, 4\n\n#————————————————————热力图————————————\ndef heatmap(train_df):\n plt.subplots(figsize=(24,20))\n sns.heatmap(train_df.corr(),cmap='RdYlGn',annot=True,vmin=-0.1,vmax=0.1,center=0)\n\n\n\n#————————————————————构建训练集与测试集————————————————————————\ndef df_division(data_frame,end_date):\n\n #--------用于训练使用----\n train_df = data_frame[data_frame['Account_date'] <= end_date]\n train_df = train_df[~train_df['sales_qty'].isin([0])]\n\n train, test = train_test_split(train_df, test_size=0.2, random_state=10)\n #拆分特征与标签,并将标签取对数处理\n ytrain = np.log1p(train['sales_qty'])\n ytest = np.log1p(test['sales_qty'])\n\n\n Xtrain = train.drop(['Account_date','sales_qty'],axis=1).values\n Xtest = test.drop(['Account_date','sales_qty'],axis=1).values\n #-----------------把需要预测的特征也准备好\n predict_df = data_frame[data_frame['Account_date'] > end_date]\n new_start = predict_df['Account_date'].min()\n end = predict_df['Account_date'].max()\n predict_df.sort_values('Account_date')\n Xpredict = predict_df.drop([ 'Account_date','sales_qty'], axis=1).values\n\n return Xtrain,ytrain,Xtest,ytest,Xpredict, new_start, end\n\n#————————————————————————获取需要预测的特征——————————————————————\n# def predict_function(data_frame,end_date):\n# # print(data_frame)\n#\n# return Xpredict,new_start,end\n\n#——————————————定义评价函数,可以传入后面模型中替代模型本身的损失函数————————————\n\ndef mape(yhat, y):\n \"\"\" mape -- MAPE 评价指标 \"\"\"\n n = len(y)\n mape = sum(np.abs((y - yhat) / y)) / n * 100\n return mape\n\n\ndef mae_value(yhat, y):\n ''' 返回:\n mae -- MAE 评价指标'''\n n = len(y)\n print('优化��数的总长度是:%d'%n)\n mae = sum(np.abs(y - yhat)) / n\n return mae\n\n\n#定义评价函数,可以传入后面模型中替代模型本身的损失函数\ndef rmspe(y,yhat):\n return np.sqrt(np.mean((y - yhat) ** 2))\n\n\ndef rmspe_xg(yhat,y):\n y_ = np.expm1(y.get_label())\n yhat_ = np.expm1(yhat)\n return 'rmspe',rmspe(y_,yhat_)\n\n\n\n# ————————————————————————初始模型构建参数设定————————————————————————————————\ndef training_function(Xtrain,ytrain,Xtest,ytest):\n params ={'objective':'reg:linear',\n 'booster':'gbtree',\n 'eta':0.15,\n 'max_depth':10,\n 'subsample':0.8,\n 'colsample_bytree':0.7,\n 'silent':1,\n 'seed':10}\n num_boost_round = 6000\n dtrain = xgb.DMatrix(Xtrain,ytrain)\n dvalid = xgb.DMatrix(Xtest,ytest)\n watchlist = [(dtrain,'train'),(dvalid,'eval')]\n\n # #模型训练\n print('Train a XGBoost model')\n start = time.time()\n gbm = xgb.train(params,dtrain,num_boost_round,evals=watchlist,\n early_stopping_rounds=80,feval=rmspe_xg,verbose_eval=True) #\n\n end = time.time()\n print('Train time is {:.2f} s.'.format(end-start))\n return gbm\n\n#————————————————————模型优化————————————————————————————————\ndef modeling_optimize(ytest,yhat):\n print('weight correction')\n W = [(0.980 + (i / 1000)) for i in range(200)]\n S = []\n for w in W:\n error = mae_value(np.expm1(ytest), np.expm1(yhat * w))\n print('mae_value for {:.3f}:{:.6f}'.format(w, error))\n S.append(error)\n Score = pd.Series(S, index=W)\n Score.plot()\n BS = Score[Score.values == Score.values.min()]\n a = np.array(BS.index.values)\n W_ho = a.repeat(len(ytest))\n print('Best weight for Score:{}'.format(BS))\n ##计算校正后整体数据的MAE得分\n yhat_new = yhat * W_ho\n error = mae_value(np.expm1(ytest), np.expm1(yhat_new))\n print('mae_value for weight corretion {:.6f}'.format(error))\n return yhat_new,BS\n\n\n#——————————————————————对预测值进行修正————————————————————————\ndef revised_predict(data,predict_df):\n max_qty = data['sales_qty'].max()\n min_qty = data['sales_qty'].min()\n predict_df[\"forecast\"].iloc[np.where(predict_df[\"forecast\"] < min_qty)] = min_qty\n predict_df[\"forecast\"].iloc[np.where(predict_df[\"forecast\"] > max_qty)] = max_qty\n return predict_df\n\n\n#————————————————定义函数用于训练模型并输出修正系数——————\ndef model_revised(Xtrain, ytrain, Xtest, ytest,Xpredict):\n gbm = training_function(Xtrain, ytrain, Xtest, ytest)\n yhat = gbm.predict(xgb.DMatrix(Xtest))\n yhat_new,BS = modeling_optimize(ytest, yhat)\n\n compare = pd.DataFrame({'real:': np.expm1(ytest), 'forecast': np.expm1(yhat_new)})\n print('compare', compare)\n\n a = np.array(BS.index.values)\n a.repeat(len(ytest))\n W_revise = a.repeat(len(Xpredict))\n return gbm,W_revise\n\n\n\n\ndef exponential_smoothing(alpha, s):\n '''\n 一次指数平滑\n :param alpha: 平滑系数\n :param s: 数据序列, list\n :return: 返回一次指数平滑模型参数, list\n '''\n\n s_temp = [0 for i in range(len(s))]\n s_temp[0] = ( s[0] + s[1] + s[2] ) / 3\n for i in range(1, len(s)):\n s_temp[i] = alpha * s[i] + (1 - alpha) * s_temp[i-1]\n return s_temp\n\ndef compute_single(alpha, s):\n '''\n 一次指数平滑\n :param alpha: 平滑系数\n :param s: 数据序列, list\n :return: 返回一次指数平滑模型参数, list\n '''\n return exponential_smoothing(alpha, s)\n\ndef compute_double(alpha, s):\n '''\n 二次指数平滑\n :param alpha: 平滑系数\n :param s: 数据序列, list\n :return: 返回二次指数平滑模型参数a, b, list\n '''\n s_single = compute_single(alpha, s)\n s_double = compute_single(alpha, s_single)\n\n a_double = [0 for i in range(len(s))]\n b_double = [0 for i in range(len(s))]\n\n for i in range(len(s)):\n a_double[i] = 2 * s_single[i] - s_double[i] #计算二次指数平滑的a\n b_double[i] = (alpha / (1 - alpha)) * (s_single[i] - s_double[i]) #计算二次指数平滑的b\n #----------------------------------------构建未来7日的预测------------------------------------------------------------\n pre_list = list()\n for i in range(0,7):\n pre_new = a_double[-1] + b_double[-1] * i\n pre_list.append(pre_new)\n\n return pre_list\n\n\ndef model_double(data_sr):\n fit2 = Holt(data_sr, exponential=True).fit(smoothing_level=0.8, smoothing_slope=0.2, optimized=False)\n result = fit2.forecast(7)\n l2, = plt.plot(list(fit2.fittedvalues) + list(fit2.forecast(7)), marker='.')\n plt.show()\n return result\n\n#——————————————————————完整的单个SKU的预测的整合————————————————\ndef forecast_merge(train_df,end_date):\n\n train_df['sales_qty'].astype('int')\n #------------有些SKU前面各种处理后还是只有少量的销售--------------\n if len(train_df[~train_df['sales_qty'].isin([0])]) <=10:\n new_start = (datetime.datetime.strptime(end_date, '%Y%m%d') + datetime.timedelta(7)).strftime('%Y%m%d')\n print('train_df',train_df)\n #=============================================================================================>二阶指数预测\n ts = train_df[['sales_qty']]\n pre_list = compute_double(0.7, ts)\n predict_df = pd.DataFrame({'Account_date': pd.date_range(end_date, new_start, freq='D'),\n 'forecast': np.array(pre_list)})\n else:\n Xtrain,ytrain,Xtest,ytest,Xpredict, new_start, end = df_division(train_df,end_date)\n gbm, W_revise = model_revised(Xtrain, ytrain, Xtest, ytest,Xpredict)\n\n predict_list = np.round(np.expm1(gbm.predict(xgb.DMatrix(Xpredict)) * W_revise))\n predict_df = pd.DataFrame({'Account_date': pd.date_range(new_start, end, freq='D'),\n 'forecast':np.array(predict_list)})\n predict_df = revised_predict(train_df,predict_df)\n return predict_df\n\n\n\n#——————————————————特征重要度画图————————————\ndef feature_importance(train_df,gbm):\n columns = train_df.drop(['Account_date','sales_qty'],axis=1).columns\n feature_score = gbm.get_fscore()\n feature_score = sorted(feature_score.items(), key=lambda x: x[1], reverse=True)\n\n fs = []\n for (key, value) in feature_score:\n fs.append(\"{0},{1}\".format(key[1:], value))\n\n feature_list = list()\n fscore_list = list()\n for i in range(len(fs)):\n temp = fs[i]\n temp_split = temp.split(',', 1)\n feature_list.append(int(temp_split[0]))\n fscore_list.append(int(temp_split[1]))\n\n importance_columns = list()\n for x in range(len(feature_list)):\n columns_index = feature_list[x]\n importance_columns.append(columns[columns_index])\n df = pd.DataFrame({'feature_list': feature_list, 'fscore_list': fscore_list,'feature_name':importance_columns})\n df['fscore_list'] = df['fscore_list'] / df['fscore_list'].sum()\n\n #\n feature_name = df['feature_name'][:30]\n fscore_list = df['fscore_list'][:30]\n\n fig = plt.figure(figsize=(20, 10), facecolor='white')\n ax1 = fig.add_subplot(111)\n # 左轴\n ax1.bar(feature_name, fscore_list, width=0.5, align='center', label='real_qty', color=\"black\")\n\n plt.xticks(feature_name, color='blue', rotation=90)\n plt.legend(loc='upper left', fontsize=10)\n # plt.text('2019-10-01', sum, text, fontdict={'size': 20, 'color': 'y'}, verticalalignment='top',\n # horizontalalignment='left')\n ax1.set_xlabel('relative importance')\n # ax1.set_ylabel('relative importance')\n plt.title('XGBoost Feature Importance')\n # plt.xlabel('relative importance')\n plt.savefig('./fscore_list.jpg', dpi=600, bbox_inches='tight')\n plt.close()\n\n\n\nif __name__ == '__main__':\n # train_df = pd.read_csv('D:/AI/xianfengsg/forecaset/warehouse/V2.0/compare_old_new/merge.csv',encoding='utf_8_sig')\n # end_date = datetime.datetime.now().strftime('%Y-%m-%d')\n #\n # df = train_df[['Account_date','sales_qty']]\n\n ts = list([31.0,152.0,128.0,34.0,67.0,47.0,5.0,])\n\n print(ts)\n pre_list = compute_double(0.7,ts)\n print(pre_list)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\ndef modelfit(alg, dtrain, predictors, useTrainCV=True, cv_folds=5, early_stopping_rounds=50):\n if useTrainCV:\n xgb_param = alg.get_xgb_params()\n xgtrain = xgb.DMatrix(dtrain[predictors].values, label=dtrain[target].values)\n cvresult = xgb.cv(xgb_param, xgtrain, num_boost_round=alg.get_params()['n_estimators'], nfold=cv_folds,\n metrics='auc', early_stopping_rounds=early_stopping_rounds) #, show_progress=False\n alg.set_params(n_estimators=cvresult.shape[0])\n\n\n # Fit the algorithm on the data\n alg.fit(dtrain[predictors], dtrain['Disbursed'], eval_metric='auc')\n\n # Predict training set:\n dtrain_predictions = alg.predict(dtrain[predictors])\n dtrain_predprob = alg.predict_proba(dtrain[predictors])[:, 1]\n\n # Print model report:\n print('\\n','Model Report')\n print(\"Accuracy : %.4g\" % metrics.accuracy_score(dtrain['Disbursed'].values, dtrain_predictions))\n print(\"AUC Score (Train): %f\" % metrics.roc_auc_score(dtrain['Disbursed'], dtrain_predprob))\n\n feat_imp = pd.Series(alg.booster().get_fscore()).sort_values(ascending=False)\n feat_imp.plot(kind='bar', title='Feature Importances')\n plt.ylabel('Feature Importance Score')\n","sub_path":"xianfengsg/forecaset/warehouse/V2.0/algorithm_model/forecast_model.py","file_name":"forecast_model.py","file_ext":"py","file_size_in_byte":13482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"459822260","text":"import pandas as pd \r\n\r\naddress ='http://www.jpx.co.jp/listing/stocks/new/00-archives-04.html'\r\nyear = 2013\r\n\r\ndf = pd.read_html(address)[0]\r\n\r\neven = [x for x in range(len(df)) if x % 2 == 0]\r\nodd = [x for x in range(len(df)) if x % 2 == 1]\r\n\r\ndf1 = df[df.index.isin(even)]\r\ndf2 = df[df.index.isin(odd)]\r\none = [x for x in range(len(df1))]\r\ntwo = [x for x in range(len(df2))]\r\ndf1.index = one\r\ndf2.index = two\r\n\r\ndf1.columns = ['date', 'name', 'code','del','del1','kari','kobo','lot','del2','del3','del4','del5','del6','del7']\r\ndf2.columns = ['market','del','del1','price','uri','del2','del3','del4','del5','del6','del7','del8','del9','del10']\r\n\r\ndf3 = df1[['date','name','code','kari','kobo','lot']]\r\ndf4 = df2[['market','price','uri']]\r\n\r\n#上場日と上場承認日で分ける\r\ndate = {}\r\n\r\nfor i in df3.index:\r\n\tdf3.loc[i,'date'] = df3.loc[i,'date'].replace(')','')\r\n\tipodate = df3.loc[i,'date'].split('(')\r\n\tdate[i] = ipodate\r\n\r\nd = pd.DataFrame(date).T \r\nd.columns =['上場日']\r\n\r\n#仮条件の分割\r\nlimit = {}\r\n\r\nfor i in df3.index:\r\n\tzyoken = df3.loc[i,'kari'].split('~')\r\n\tif df3.loc[i,'kari'] =='-':\r\n\t\tzyoken = ['0','0']\r\n\tlimit[i] = zyoken\r\n\r\nl= pd.DataFrame(limit).T \r\nl.columns = ['仮条件下限','仮条件上限']\r\n\r\ndf5 = pd.concat([d,df3[['name','code','kobo','lot']],l], axis=1)\r\n\r\nuridashi = {}\r\n\r\nud = df4['uri'].copy()\r\n\r\nfor i in range(len(ud)):\r\n\tud[i] = ud[i].replace(')','')\r\n\tud[i] = ud[i].replace('OA','')\r\n\turid = ud[i].split('(')\r\n\tif ud[i] =='-':\r\n\t\turid = ['0','0']\r\n\tif len(urid) ==1:\r\n\t\turid.append('0')\r\n\turidashi[i] = urid \r\n\r\nu = pd.DataFrame(uridashi).T\r\nu.columns = ['売り出し(千株)', 'オーバーアロットメント(千株)']\r\n\r\ndf6 = pd.concat([df4[['market','price']],u],axis=1)\r\n\r\ndf7 = pd.concat([df5,df6],axis=1)\r\ndf7.columns = ['上場日','会社名','コード','公募株数(千株)','売買単位','仮条件下限','仮条件上限','市場','公募価格','売り出し(千株)','オーバーアロットメント(千株)']\r\n\r\ndf7.to_csv('ipo_{}.csv'.format(year))","sub_path":"ipodata_before_2015.py","file_name":"ipodata_before_2015.py","file_ext":"py","file_size_in_byte":2040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"637436695","text":"import pymysql.cursors\nimport pymysql\nimport configparser\n\nclass PersonRegistry(): \n def __init__(self):\n config = configparser.ConfigParser()\n config.read('.db_config.ini')\n db_conf = config['database']\n self.connection = pymysql.connect(host=db_conf['host'],user=db_conf['user'], password=db_conf['password'], db=db_conf['database'])\n self.cursor = self.connection.cursor()\n # Creating schema\n self.cursor.execute(\"CREATE TABLE IF NOT EXISTS users (name VARCHAR (20), birth DATE);\")\n \n def get_sql_data(self, query):\n self.cursor.execute(query)\n result = self.cursor.fetchone()\n if result:\n return result[0]\n else:\n return None\n\n def add_person(self, name,birth_day):\n registered_birt_day = self.get_birh_date(name)\n if registered_birt_day and registered_birt_day != birth_day:\n # update birth_day\n self.cursor.execute(\"UPDATE users SET birth = '\"+str(birth_day)+\"' WHERE name ='\"+ name+\"';\")\n # self.data_base[name] = birth_day\n elif not registered_birt_day:\n # insert new birh_day\n self.cursor.execute(\"INSERT INTO users VALUES ('\"+ name+\"','\"+str(birth_day)+\"');\")\n # self.data_base[name] = birth_day\n self.connection.commit()\n \n def get_birh_date(self,name):\n birth_date = self.get_sql_data(\"SELECT birth FROM users WHERE name = '\"+ name +\"';\")\n return birth_date\n\n def remove_person(self,name):\n if self.get_birh_date(name):\n self.cursor.execute(\"DELETE FROM users WHERE name ='\"+ name+\"';\")\n self.connection.commit()","sub_path":"person_registry.py","file_name":"person_registry.py","file_ext":"py","file_size_in_byte":1694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"427227008","text":"import socket\nimport io\nimport json\nfrom scipy import misc\nfrom enum import Enum\n\n\nclass SkinFeature(Enum):\n ERYTHEMA = 'ery'\n PORE = 'por'\n PIGMENTATION = 'pig'\n WRINKLE = 'wri'\n\n\nclass TrackBarValue(Enum):\n TIGHT = 3\n NORMAL = 2\n LOOSE = 1\n\n\ndef deserialize_resp_data(response_data):\n \"\"\"\n 서버로부터 받은 응답을(JSON 형식의 string -> bytes 데이터) dict으로 변환한다.\n :param response_data: byte(JSON string)\n :return: dict()\n \"\"\"\n #return pickle.loads(response_data)\n return json.loads(response_data)\n\n\ndef serialize_req_param_data(feature_type, roi_cv_img,\n tv_1=TrackBarValue.NORMAL,\n tv_2=TrackBarValue.NORMAL,\n tv_3=TrackBarValue.NORMAL,\n tv_4=TrackBarValue.NORMAL,\n visible=False):\n \"\"\"\n 서버로 보낼 요청 데이터를 bytes로 변환한다.\n :param feature_type: Enum 타입 client.SkinFeature 중 하나의 값\n :param tv_1: Enum 타입 client.TrackBarValue 중 하나의 값\n :param tv_2: Enum 타입 client.TrackBarValue 중 하나의 값\n :param tv_3: Enum 타입 client.TrackBarValue 중 하나의 값\n :param tv_4: Enum 타입 client.TrackBarValue 중 하나의 값\n :param roi_cv_img: OpenCV-python의 이미지(numpy array)\n :param visible: 이미지 출력 여부 (소켓 서버 디버깅 용)\n :return: 요청에 대한 응답 dict()\n \"\"\"\n # Type checking\n if not isinstance(feature_type, SkinFeature):\n raise TypeError('feature_type must be an instance of SkinFeature Enum')\n\n if not isinstance(tv_1, TrackBarValue):\n raise TypeError('tv_1 must be an instance of TrackBarValue Enum')\n\n if not isinstance(tv_2, TrackBarValue):\n raise TypeError('tv_2 must be an instance of TrackBarValue Enum')\n\n if not isinstance(tv_3, TrackBarValue):\n raise TypeError('tv_3 must be an instance of TrackBarValue Enum')\n\n if not isinstance(tv_4, TrackBarValue):\n raise TypeError('tv_4 must be an instance of TrackBarValue Enum')\n\n data = dict()\n data['type'] = feature_type.value\n # data['ROI'] = roi\n data['tv_1'] = tv_1.value\n data['tv_2'] = tv_2.value\n data['tv_3'] = tv_3.value\n data['tv_4'] = tv_4.value\n data['visible'] = int(visible)\n\n # data serialization to str\n json_str = bytes(json.dumps(data), 'utf-8')\n\n # Append RoI Data\n # Request Data format: byte[78] <- json_str(Parameters) + Image bytes\n json_str += image_to_bytes(roi_cv_img)\n\n return json_str\n\n\ndef image_to_bytes(img):\n \"\"\"\n OpenCV-python에서 이미지 (numpy array)를 Bytes로 만든다.\n :param img: Image(numpy array)\n :return: Image(Bytes)\n \"\"\"\n # Convert BGR(opencv) to RGB\n img_rgb = img[..., ::-1]\n output = io.BytesIO()\n misc.toimage(img_rgb).save(output, format='JPEG')\n\n return output.getvalue()\n\n\ndef request(send_data):\n \"\"\"\n 서버로 요청을 보내고 응답을 리턴한다.\n :param send_data: Serialized Request Data > Bytes or byte[]\n :return: Deserialized Response Data > dict()\n \"\"\"\n host, port = \"127.0.0.1\", 34567\n # Create a socket (SOCK_STREAM means a TCP socket)\n client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n try:\n # Connect to server and send data\n client.connect((host, port))\n client.sendall(send_data)\n\n # Receive data from server\n data = client.recv(1024)\n\n # Deserialize received JSON data\n data = deserialize_resp_data(data)\n\n finally:\n client.close()\n\n return data\n","sub_path":"api/services/network/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":3575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"74911330","text":"\"\"\"\nExercise 5:\n\nTask 1: Classify the magnetic phases in terms of\n- a fully connected layer (FCL)\n- a convolutional neural network (CNN)\n- the toy model (s. lecture slides)\nPlot test accuracy vs. temperature for both networks and for the toy model.\n\nTask 2: Discriminative localization\nPick out two correctly and two wrongly classified images from the CNN.\nLook at Exercise 4, task 2 (visualize.py) to extract weights and feature maps from the trained model.\nCalculate and plot the class activation maps and compare them with the images in order to see which regions lead to the class decision.\n\nHand in a printout of your commented code and plots.\n\nIf you are interested in the data generation look at MonteCarlo.py.\n\"\"\"\n\n# Note: if you are having troubles with loading the dlipr library you can\n# comment in the following two lines.\n# import sys\n# sys.path.append(\"/software/community/dlipr\")\nimport dlipr\n\n\n# Load the Ising dataset\ndata = dlipr.ising.load_data()\n\n# plot some examples\ndata.plot_examples(5, fname='examples.png')\n\n# features: images of spin configurations\nX_train = data.train_images\nX_test = data.test_images\n\n# classes: simulated temperatures\nT = data.classes\n\n# labels: class index of simulated temperature\n# create binary training labels: T > Tc?\nTc = 2.27\ny_train = T[data.train_labels] > Tc\ny_test = T[data.test_labels] > Tc\n","sub_path":"Deep_Learning_in_Physics_ResearchSS17/exercise5/solution_exercise5/Exercise5.py","file_name":"Exercise5.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"308032319","text":"# -*- coding: utf-8 -*-\nfrom Utils import *\n\nclass ApplePie:\n \n ###공통정보\n \n #디렉토리경로\n path = '' \n \n #기존파일정보\n originFiles = [] \n \n #확장자별파일정보\n extFiles = {}\n \n \n #저장가능한설정정보\n #변경후파일명\n #처리제외대상확장자정보\n #인덱스시작번호\n settings = { 'fileName': '새 파일_%idx%', 'exceptInfo': {}, 'startno': 1, 'idxSize': 1}\n \n #처리결과\n SUCCESS = 'success'\n ERR_WRONG_FILENAMES = '파일 갯수가 달라요'\n \n ###단순화면정보\n \n #일괄저장파일명\n newFileNames = ''\n \n ###복잡화면정보\n \n #미리보기파일정보\n previewFiles = [] \n \n #확장자별파일정보\n previewExtFiles = {}\n \n def __init__(self):\n #설정 불러오기 \n config = getConfig('./config.txt') \n for c in config :\n self.settings[c] = config[c]\n \n #정보초기화 \n def refresh(self, inputPath=None):\n print('appliepie refresh')\n if inputPath != None:\n if inputPath[-1] != '/':\n inputPath = inputPath + '/'\n self.path = inputPath\n \n self.originFiles = self.fileList(self.path)\n self.previewFiles = self.fileList(self.path)\n self.extFiles = self.makeExtFiles(self.originFiles)\n self.settings['exceptInfo'] = self.makeExceptInfo(self.originFiles)\n \n #복잡)처리제외확장자정보설정\n def makeExceptInfo(self, fileList):\n extList = {}\n for file in fileList:\n if getExt(file) in self.settings['exceptInfo']:\n extList[getExt(file)] = self.settings['exceptInfo'][getExt(file)] \n else:\n extList[getExt(file)] = '0' \n return extList\n\n #단순)새파일명 설정\n def setNewFileNames(self, filenames):\n self.newFileNames = filenames \n \n #인덱스사이즈설정\n def setIdxSize(self, idxSize):\n self.settings['idxSize'] = idxSize\n \n #확장자별 파일정보 생성\n def makeExtFiles(self, fileList):\n fileInfo = {}\n for file in fileList:\n if fileInfo.get(getExt(file)) == None:\n fileInfo[getExt(file)] = []\n fileInfo[getExt(file)].append(file)\n return fileInfo\n \n #하위디렉토리&파일추출 TODO:소스정리\n def fileList(self, path):\n list = fileList(path)\n \n fileInfo = {}\n for file in list:\n if fileInfo.get(getExt(file)) == None:\n fileInfo[getExt(file)] = []\n fileInfo[getExt(file)].append(file)\n \n list = []\n for ext in fileInfo:\n for file in fileInfo[ext]:\n list.append(file)\n \n return list\n \n #복잡)처리제외대상확장자ON/OFF\n def toggleExcept(self, ext):\n if self.settings['exceptInfo'].get(ext) != None:\n self.settings['exceptInfo'][ext] = 1 - self.settings['exceptInfo'][ext]\n \n #단순)파일명 일괄변경\n def replaceFileNames1(self):\n parentPath = self.path + '/'\n lines = self.newFileNames.split('\\n') \n cnt = 0\n if len(self.originFiles) == len(lines):\n for file in self.originFiles:\n renameFile(self.path + file, self.path + lines[cnt])\n cnt = cnt + 1\n return self.SUCCESS\n else :\n return self.ERR_WRONG_FILENAMES + ' ' + str(len(self.originFiles)) + ' != ' + str(len(lines))\n\n #복잡)파일명 일괄변경\n def replaceFileNames2(self): \n print('replace filename 2') \n \n try:\n for ext in self.settings['exceptInfo']:\n if self.settings['exceptInfo'][ext] == '0':\n idx = int(self.settings['startno'])\n for file in self.extFiles[ext]:\n newFileName = self.settings['fileName'].replace('%idx%', lpad(str(idx), self.settings['idxSize'])) + '.' + ext\n renameFile(self.path + file, self.path + newFileName)\n idx = idx + 1\n return self.SUCCESS\n except:\n return self.SUCCESS + '1'\n \n def setNewFileName(self, filename):\n self.settings['fileName'] = filename\n \n def getNewFileName(self):\n return self.settings['fileName']\n\n #설정저장\n def saveSettings(self):\n try:\n list = []\n for setting in self.settings:\n if setting != 'exceptInfo':\n list.append(setting + '=' + str(self.settings[setting]))\n \n f = open('./config.txt', 'w', encoding='utf-8')\n f.write(listToString(list)) \n \n return self.SUCCESS\n except:\n return self.SUCCESS + '1'","sub_path":"applepie/app/ruri/ApplePie.py","file_name":"ApplePie.py","file_ext":"py","file_size_in_byte":4986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"503180226","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jan 31 20:02:17 2020\r\n\r\n@author: Administrator\r\n\"\"\"\r\n\r\nimport pandas as pd \r\nimport numpy as np \r\nimport matplotlib.pyplot as plt \r\nimport seaborn as sns \r\nfrom sklearn.model_selection import train_test_split \r\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn import metrics\r\nfrom sklearn.preprocessing import LabelEncoder\r\nimport pickle\r\n#%%\r\ndf = pd.read_csv('/Users/Administrator/.spyder-py3/DC_Properties.csv')\r\ndfNull= df.isnull().sum()\r\n#%%\r\nprint('Percent of missing \"Price\" records is %.2f%%' %((df['PRICE'].isnull().sum()/df.shape[0])*100))\r\n#%%\r\n#Percent of missing \"Price\" records is 38.21%\r\ntotal = df.isnull().sum().sort_values(ascending=False)\r\npercent_1 = df.isnull().sum()/df.isnull().count()*100\r\npercent_2 = (round(percent_1, 1)).sort_values(ascending=False)\r\nmissing_data = pd.concat([df.count(),total, percent_2], axis=1, keys=['Sum','Total', '%'])\r\ndf_NULLdataset=missing_data\r\n#%%\r\ndummy_dataset = df\r\n\r\ndummy_dataset['Price_Flag'] = np.where(dummy_dataset.PRICE > 0 , 1,0)\r\n\r\nunknown_dataset = dummy_dataset[dummy_dataset.Price_Flag != 1]\r\n\r\nunknown_dataset.shape\r\ndataset = dummy_dataset[dummy_dataset.Price_Flag != 0]\r\ndataset.corr()\r\n#%%\r\ndf=dataset\r\ndf.drop(['Unnamed: 0', \"CMPLX_NUM\", \"LIVING_GBA\" , \"ASSESSMENT_SUBNBHD\", \"CENSUS_TRACT\", \r\n \"CENSUS_BLOCK\", \"GIS_LAST_MOD_DTTM\", \"SALE_NUM\",\"STORIES\", \"USECODE\", \"CITY\", \r\n \"STATE\", \"NATIONALGRID\",'X','Y','SALEDATE'],axis=1,inplace=True)\r\n#%%\r\ndf.dropna(subset=['AYB'],inplace=True)\r\ngroup_remodel= df.groupby(['EYB','AYB']).mean()['YR_RMDL']\r\n#%%\r\ndef applyRemodel(x):\r\n if pd.notnull(x['YR_RMDL']):\r\n return x['YR_RMDL']\r\n else:\r\n return round(group_remodel.loc[x['EYB']][x['AYB']])\r\n \r\n#%%\r\ndf['YR_RMDL'] = df[['YR_RMDL','EYB','AYB']].apply(applyRemodel,axis = 1)\r\ndf.dropna(subset=['YR_RMDL'],inplace=True)","sub_path":"dcDataProcessing.py","file_name":"dcDataProcessing.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"37479520","text":"import urllib.request, json\nfrom .models import News,Sources,Articles\n\napi_key=None\nbase_url=None\nsource_url=None\narticle_url=None\n\ndef configure_request(app):\n global api_key,base_url,source_url,article_url\n api_key=app.config[\"NEWS_API_KEY\"]\n base_url=app.config[\"NEWS_API_BASE_URL\"]\n source_url=app.config[\"NEWS_BASE_URL\"]\n article_url=app.config[\"NEWS_ARTICLE_API\"]\n \ndef news(headlines):\n get_news_url=base_url.format(headlines,api_key)\n \n with urllib.request.urlopen(get_news_url) as url:\n get_news_data=url.read()\n get_news_response=json.loads(get_news_data)\n\n news_results=None\n\n if get_news_response['articles']:\n news_result_list=get_news_response[\"articles\"]\n news_results=process_results(news_result_list)\n\n return news_results\n\ndef process_results(news_list):\n news_results=[]\n for news in news_list:\n\n title=news.get(\"title\")\n description=news.get(\"description\")\n urltoImage=news.get(\"urlToImage\")\n content=news.get(\"content\")\n url=news.get(\"url\")\n if description and urltoImage:\n\n news_object=News(title,description,urltoImage,content,url)\n\n news_results.append(news_object)\n news_results=news_results[:6]\n return news_results\n'''\nGetting various sources\n'''\ndef sources(sources):\n get_news_url=source_url.format(sources,api_key)\n with urllib.request.urlopen(get_news_url) as url:\n source_data=url.read()\n source_response=json.loads(source_data)\n\n source_results=None\n\n if source_response[\"sources\"]:\n new_source_results=source_response[\"sources\"]\n source_results=process_sources(new_source_results)\n return source_results\n\ndef process_sources(source_list):\n source_results=[]\n for source in source_list:\n id=source.get(\"id\")\n name=source.get(\"name\")\n description=source.get(\"description\")\n url=source.get(\"url\")\n\n source_object=Sources(id,name,description,url)\n source_results.append(source_object)\n source_results=source_results[:4]\n\n return source_results\n\n'''\nFetching Articles\n'''\ndef get_articles(name):\n get_articles_url=article_url.format(name,api_key)\n with urllib.request.urlopen(get_articles_url) as url:\n article_data=url.read()\n article_response=json.loads(article_data)\n\n article_object=None;\n\n if article_response['articles']:\n at_result_list=article_response[\"articles\"]\n at_results=process_articles(at_result_list)\n return at_results\n\ndef process_articles(article_list):\n article_results=[]\n for article in article_list:\n title=article.get(\"title\")\n description=article.get(\"description\")\n urlToImage=article.get(\"urlToImage\")\n url=article.get(\"url\")\n publishedAt=article.get(\"publishedAt\")\n\n article_object=Articles(title,description,urlToImage,url,publishedAt)\n article_results.append(article_object)\n\n return article_results\n\ndef get_sources(name):\n get_articles_details_url=article_url.format(name,api_key)\n\n with urllib.request.urlopen(get_articles_details_url) as url:\n news_details_data=url.read();\n news_details_response=json.loads(news_details_data)\n\n news_object=None\n if news_details_response:\n title=news_details_response.get(\"title\")\n description=news_details_response.get(\"description\")\n urlToImage=news_details_response.get(\"urlToImage\")\n url=news_details_response.get(\"url\")\n publishedAt=news_details_response.get(\"publishedAt\")\n\n news_object=Articles(title,description,url,urlToImage,publishedAt)\n return news_object\n","sub_path":"app/request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":3767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"351284509","text":"import logging\nfrom typing import Annotated, Any\n\nfrom fastapi import APIRouter, Depends\n\nfrom ...models.pagination import LimitOffsetPage, LimitOffsetParams, OnePage\nfrom ...models.schemas.studies import Study, StudyID, StudyPort\nfrom ...services.webserver import AuthSession\nfrom ..dependencies.webserver import get_webserver_session\nfrom ._common import API_SERVER_DEV_FEATURES_ENABLED\n\n_logger = logging.getLogger(__name__)\nrouter = APIRouter()\n\n\n@router.get(\n \"/\",\n response_model=LimitOffsetPage[Study],\n include_in_schema=API_SERVER_DEV_FEATURES_ENABLED,\n)\nasync def list_studies(\n page_params: Annotated[LimitOffsetParams, Depends()],\n):\n msg = f\"list user's studies with pagination={page_params!r}. SEE https://github.com/ITISFoundation/osparc-simcore/issues/4177\"\n raise NotImplementedError(msg)\n\n\n@router.get(\n \"/{study_id}\",\n response_model=Study,\n include_in_schema=API_SERVER_DEV_FEATURES_ENABLED,\n)\nasync def get_study(study_id: StudyID):\n msg = f\"get user's study study_id={study_id!r}. SEE https://github.com/ITISFoundation/osparc-simcore/issues/4177\"\n raise NotImplementedError(msg)\n\n\n@router.get(\n \"/{study_id}/ports\",\n response_model=OnePage[StudyPort],\n include_in_schema=API_SERVER_DEV_FEATURES_ENABLED,\n)\nasync def list_study_ports(\n study_id: StudyID,\n webserver_api: Annotated[AuthSession, Depends(get_webserver_session)],\n):\n \"\"\"Lists metadata on ports of a given study\n\n New in *version 0.5.0* (only with API_SERVER_DEV_FEATURES_ENABLED=1)\n \"\"\"\n project_ports: list[\n dict[str, Any]\n ] = await webserver_api.get_project_metadata_ports(project_id=study_id)\n\n return OnePage[StudyPort](items=project_ports) # type: ignore[arg-type]\n","sub_path":"services/api-server/src/simcore_service_api_server/api/routes/studies.py","file_name":"studies.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"178929993","text":"\"\"\"This module contains the default values and constants used in pvfactors \"\"\"\nfrom future.utils import iteritems\nimport numpy as np\n\n# Geometry params\nDEFAULT_NORMAL_VEC = None\nTOL_COLLINEAR = 1e-5\n\n# Ground params\nMAX_X_GROUND = 1e2\nMIN_X_GROUND = - MAX_X_GROUND\nY_GROUND = 0.\n\n# PV rows parameters\nX_ORIGIN_PVROWS = 0.\n\n# Define colors used for plotting the 2D arrays\nCOLOR_DIC = {\n 'i': '#FFBB33',\n 's': '#A7A49D',\n 't': '#6699cc',\n 'pvrow_illum': '#6699cc',\n 'pvrow_shaded': '#ff0000',\n 'ground_shaded': '#A7A49D',\n 'ground_illum': '#FFBB33'\n}\nPLOT_FONTSIZE = 20\nALPHA_TEXT = 0.20\n\n# Tolerance and thresholds to use from experience getting errors with shapely\nDISTANCE_TOLERANCE = 1e-8\nTHRESHOLD_DISTANCE_TOO_CLOSE = 1e-10\n\n\n# The view dictionaries associate integer indices to 'types' of views. For\n# instance \"ground_sky\" would be the view between a ground surface and the\n# sky, and it would use the integer index ``1``.\nVIEW_DICT = {\n None: 0,\n \"ground_sky\": 1,\n \"back_gnd\": 2,\n \"gnd_back\": 3,\n \"front_sky\": 4,\n \"back_sky\": 5,\n \"back_gnd_obst\": 6,\n \"gnd_back_obst\": 7,\n \"front_gnd_obst\": 8,\n \"gnd_front_obst\": 9,\n \"pvrows\": 10}\nREVERSE_VIEW_DICT = {v: k for (k, v) in iteritems(VIEW_DICT)}\nTHRESHOLD_VF_12 = 5e-5\n\n\n# Gaussian shading default parameters: TOTAL_GAUSSIAN_AREA dependent on these\nSIGMA = 1. / np.sqrt(2.)\nN_SIGMA = 3.\nGAUSSIAN_DIAMETER_CIRCUMSOLAR = 2. * N_SIGMA * SIGMA\nRADIUS_CIRCUMSOLAR = 1.\nDEFAULT_CIRCUMSOLAR_ANGLE = 30.\n\n# Horizon band shading\nDEFAULT_HORIZON_BAND_ANGLE = 6.5\n\nSKY_REFLECTIVITY_DUMMY = 1.\n","sub_path":"pvfactors/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"103288612","text":"import os\nimport sys\nfrom PIL import Image\nimport random\nfrom random import shuffle\nimport numpy as np\n\n\ndef flipping(img):\n # img = Image.open(img_name)\n img = np.array(img)\n img_flip = np.flipud(img)\n return Image.fromarray(img_flip)\n\n\ndef mirroring(img):\n # img = Image.open(img_name)\n img = np.array(img)\n img_mir = np.fliplr(img)\n return Image.fromarray(img_mir)\n\n\ndef rotate(img):\n # img = Image.open\n img = np.array(img)\n img_rot = np.rot90(img)\n return Image.fromarray(img_rot)\n\n\ndef random_transform(img):\n if bool(random.getrandbits(1)):\n img = flipping(img)\n if bool(random.getrandbits(1)):\n img = mirroring(img)\n if bool(random.getrandbits(1)):\n img = rotate(img)\n return img\n\n\ndef image_crop(infilename, col_num, row_num, out_path):\n img = Image.open(infilename)\n (img_h, img_w) = img.size\n print(img.size)\n\n col_num = int(col_num)\n row_num = int(row_num)\n col_extra = img_w % col_num\n row_extra = img_h % row_num\n\n if col_extra == 0:\n grid_w = img_w / col_num # crop width(int)\n else:\n grid_w = (img_w - col_extra) / col_num # crop width(not_int)\n\n if row_extra == 0:\n grid_h = img_h / row_num # crop height(int)\n else:\n grid_h = (img_h - row_extra) / row_num # crop height(not_int)\n print(grid_w, grid_h)\n\n img_num = list(range(1, col_num * row_num + 1))\n\n shuffle(img_num)\n print(img_num)\n i = 0\n for w in range(col_num):\n for h in range(row_num):\n img_box = (h * grid_h, w * grid_w, (h + 1) * grid_h, (w + 1) * grid_w)\n print(h * grid_h, w * grid_w, (h + 1) * grid_h, (w + 1) * grid_w)\n crop_img = img.crop(img_box)\n transformed_img = random_transform(crop_img)\n fname = '0' + str(img_num[i]) if img_num[i] / 10 < 1 else str(img_num[i])\n full_name = fname + '.jpg'\n savename = os.path.join(out_path, full_name)\n transformed_img.save(savename)\n print('save file ' + savename + '....')\n i += 1\n\n\n\n\nif __name__ == '__main__':\n image_file_name = sys.argv[1]\n column_number = sys.argv[2]\n row_number = sys.argv[3]\n prefix_output_filename = sys.argv[4]\n image_crop(image_file_name , column_number , row_number, prefix_output_filename)","sub_path":"slice_image.py","file_name":"slice_image.py","file_ext":"py","file_size_in_byte":2329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"70643870","text":"# Created by zh on 2016/4/2.\n# -*- coding: UTF-8 -*-\n\n\nimport json\nimport requests\nimport logging\nimport traceback\n\n\nclass mediaList(object):\n \"\"\"\n 媒体列表类\n 获取媒体ID、列表,包括标题、类型、图片\n \"\"\"\n def __init__(self):\n self.logger = logging.getLogger(name='mediaList')\n\n def get_media_list(self, init_offset, offset):\n \"\"\"\n 获取媒体列表\n :return: media_list列表\n [\n {'message_id': '', 'headline': '', 'tags': '', 'pic': ''},\n ...\n ]\n \"\"\"\n self.logger.info(\"进入get_media_list\")\n\n # 1.将参数转换成json格式\n jsons = json.dumps(\n {\n 'init_offset': init_offset,\n 'offset': offset\n }\n ).encode('utf8')\n\n # 1.获取并解析JSON对象\n try:\n r = requests.get('http://localhost:8805%s' % '/media/media_list', data=jsons)\n json_obj = r.json()\n r.close()\n self.logger.info(\"远程调用get_media_list成功\")\n except Exception:\n self.logger.error(traceback.format_exc())\n self.logger.error(\"远程调用get_media_list失败\")\n raise Exception\n\n # 2.判断数据是否合法\n if json_obj['result'] == 'fail':\n self.logger.error(json_obj['fail_reason'])\n self.logger.info(\"退出get_media_list\")\n return []\n else:\n media_list = []\n for dic in json_obj['data']:\n my_dic = {}\n for key in dic:\n my_dic[key] = dic[key]\n media_list.append(my_dic)\n self.logger.info(\"退出get_media_list\")\n return media_list\n\n\n","sub_path":"model/storm/mediaList.py","file_name":"mediaList.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"213863365","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2017/8/25 AM10:46\n# @Author : Qiming Zhang\n# @File : WiggleSort\nclass Solution(object):\n # 直接排序 然后从第二项开始两两交换\n # 1 2 3 4 5 6\n # 1 3 2 5 4 6\n def wiggleSort(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: void Do not return anything, modify nums in-place instead.\n \"\"\"\n nums.sort()\n for i in range(1, len(nums) - 1, 2):\n nums[i], nums[i + 1] = nums[i + 1], nums[i]\n\n\n def wiggleSort(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: void Do not return anything, modify nums in-place instead.\n \"\"\"\n for i in range(len(nums) - 1):\n if i % 2 == 0 and nums[i] > nums[i + 1] or \\\n i % 2 == 1 and nums[i] < nums[i + 1]:\n nums[i], nums[i + 1] = nums[i + 1], nums[i]\n","sub_path":"Array/WiggleSort.py","file_name":"WiggleSort.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"221016883","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/quantecon/util/tests/test_notebooks.py\n# Compiled at: 2019-07-07 21:19:40\n# Size of source mod 2**32: 1136 bytes\n\"\"\"\nTests for Notebook Utilities\n\nFunctions\n---------\nfetch_nb_dependencies\n\n\"\"\"\nfrom quantecon.util import fetch_nb_dependencies\nimport unittest, os\nFILES = [\n 'test_file.md']\nREPO = 'https://github.com/QuantEcon/QuantEcon.py'\nRAW = 'raw'\nBRANCH = 'master'\nFOLDER = 'quantecon/util/tests/'\n\nclass TestNotebookUtils(unittest.TestCase):\n\n def test_fetch_nb_dependencies(self):\n \"\"\"\n Run First and Test Download\n \"\"\"\n status = fetch_nb_dependencies(files=FILES,\n repo=REPO,\n raw=RAW,\n branch=BRANCH,\n folder=FOLDER)\n self.assertFalse(False in status)\n\n def test_fetch_nb_dependencies_overwrite(self):\n \"\"\"\n Run Second and Ensure file is skipped by checking a False is found in status\n \"\"\"\n status = fetch_nb_dependencies(files=FILES,\n repo=REPO,\n raw=RAW,\n branch=BRANCH,\n folder=FOLDER)\n status = fetch_nb_dependencies(files=FILES,\n repo=REPO,\n raw=RAW,\n branch=BRANCH,\n folder=FOLDER)\n self.assertTrue(False in status)\n\n def tearDown(self):\n os.remove('test_file.md')","sub_path":"pycfiles/quantecon-0.4.6-py3.7/test_notebooks.cpython-37.py","file_name":"test_notebooks.cpython-37.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"214473605","text":"import sys\nimport random\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\nimport os\n\n\nclass MainWindow(QWidget):\n def __init__(self, parent=None):\n super(MainWindow, self).__init__(parent)\n\n # 横のレイアウト\n self.horizon = QHBoxLayout()\n # 縦のレイアウト\n self.vertical = QVBoxLayout()\n\n # ボタンの追加\n self.button = QPushButton('Reset', self)\n self.button.move(350,720)\n self.button.clicked.connect(self.randomize)\n\n self.horizon.addLayout(self.vertical)\n self.setLayout(self.horizon)\n\n self.setGeometry(0, 50, 800, 800)\n self.setWindowTitle('Randomize Ball Arrange')\n\n def paintEvent(self, event):\n painter = QPainter()\n painter.begin(self)\n\n #背景\n painter.setBrush(Qt.gray)\n painter.drawRect(0, 0.0, 800.0, 800.0)\n\n # 枠\n painter.setBrush(Qt.darkGreen)\n painter.drawRect(27, 0.0, 740.0, 800.0)\n\n #壁\n painter.setPen(QPen(Qt.black, 11, Qt.SolidLine))\n painter.drawLine(33, 0, 33, 800)\n painter.drawLine(767, 0, 767, 800)\n\n #ボール配置用枠\n painter.setPen(QPen(Qt.gray, 2, Qt.SolidLine))\n #横ライン\n painter.drawLine(40, 125, 760, 125)\n painter.drawLine(40, 250, 760, 250)\n #縦ライン\n painter.drawLine(150, 25, 150, 350)\n painter.drawLine(275, 25, 275, 350)\n painter.drawLine(525, 25, 525, 350)\n painter.drawLine(650, 25, 650, 350)\n\n #白線\n painter.setPen(QPen(Qt.white, 4, Qt.SolidLine))\n #横ライン\n painter.drawLine(40, 400, 760, 400)\n #縦ライン\n painter.drawLine(400, 0, 400, 800)\n\n #台\n painter.setPen(Qt.gray)\n painter.setBrush(Qt.black)\n painter.drawRect(40, 650, 150, 150)\n painter.drawRect(610, 650, 150, 150)\n painter.setBrush(Qt.red)\n painter.drawRect(55, 665, 120, 120)\n painter.setBrush(Qt.blue)\n painter.drawRect(625, 665, 120, 120)\n\n #ボールの配置\n center_left = [QPoint(95, 70), QPoint(215, 70), QPoint(335, 70),\n QPoint(95, 185), QPoint(215, 185), QPoint(335, 185),\n QPoint(95, 300), QPoint(215, 300), QPoint(335, 300)\n ]\n\n center_right = [QPoint(460, 70), QPoint(590, 70), QPoint(710, 70),\n QPoint(460, 185), QPoint(590, 185), QPoint(710, 185),\n QPoint(460, 300), QPoint(590, 300), QPoint(710, 300)\n ]\n\n #ボールの配置をランダムに決める\n place_left = random.sample(center_left, 6)\n place_right = random.sample(center_right, 6)\n\n #ボール描画\n for i in range(0, 3):\n painter.setPen(Qt.white)\n painter.setBrush(Qt.red)\n painter.drawEllipse(place_left[i], 35, 35)\n painter.drawEllipse(place_right[i], 35, 35)\n for i in range(3, 6):\n painter.setPen(Qt.white)\n painter.setBrush(Qt.blue)\n painter.drawEllipse(place_left[i], 35, 35)\n painter.drawEllipse(place_right[i], 35, 35)\n\n painter.end()\n\n def randomize(self):\n python = sys.executable\n os.execl(python, python, *sys.argv)\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n main_window = MainWindow()\n main_window.show()\n sys.exit(app.exec_())","sub_path":"RandomizeBallArrange.py","file_name":"RandomizeBallArrange.py","file_ext":"py","file_size_in_byte":3508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"387519287","text":"import vtk\nimport os\nimport pickle\nimport utils\nimport torch\nfrom reconstruction import AE\nimport numpy as np\nfrom utils import DataLoader\nfrom datasets import MeshData\n\n#Initialize Renderer\nren = vtk.vtkRenderer()\nren.GradientBackgroundOn()\nren.SetBackground(135/255, 206/255, 235/255)\nren.SetBackground2(44/255, 125/255, 158/255)\nrenWin = vtk.vtkRenderWindow()\nrenWin.SetFullScreen(False)\nrenWin.AddRenderer(ren)\niren = vtk.vtkRenderWindowInteractor()\niren.SetRenderWindow(renWin)\n\n#JPolydata\nreader = vtk.vtkOBJReader()\nreader.SetFileName(\"data/CoMA/template/template.obj\")\nreader.Update()\npolydata = reader.GetOutput()\n\n#Set Torch device\ndevice = \"cpu\"\nif torch.cuda.is_available():\n device = \"cuda\"\n\n\n\n\ndef getInputData(polydata):\n nPoints = polydata.GetNumberOfPoints()\n\n result = []\n\n for pid in range(nPoints):\n point = polydata.GetPoint(pid)\n result.append(point)\n\n\n tensor = torch.tensor([result])\n\n return tensor\n\n\ndef getOutputPoly(polydata, pred):\n output = vtk.vtkPolyData()\n output.DeepCopy(polydata)\n\n for pid, pos in enumerate(pred[0]):\n output.GetPoints().SetPoint(pid, pos[0], pos[1], pos[2])\n \n\n output.GetPoints().Modified()\n\n return output\n\n\ndef updatePoly(polydata, pred):\n for pid, pos in enumerate(pred[0]):\n polydata.GetPoints().SetPoint(pid, pos[0], pos[1], pos[2])\n \n\n polydata.GetPoints().Modified()\n\n\ndef MakeActor(polydata):\n \n #Visualize\n mapper = vtk.vtkOpenGLPolyDataMapper()\n mapper.SetInputData(polydata)\n # mapper.SetFragmentShaderCode(frag)\n\n actor = vtk.vtkActor()\n actor.SetMapper(mapper)\n\n return actor\n\n\n\nclass LatentInteractorStyle(vtk.vtkInteractorStyleTrackballCamera):\n\n def __init__(self, model, target, std, mean, samples, parent=None):\n\n\n \n self.AddObserver(\"LeftButtonPressEvent\", self.LeftButtonPressed)\n self.AddObserver(\"MouseMoveEvent\", self.MouseMove)\n self.AddObserver(\"LeftButtonReleaseEvent\", self.LeftButtonReleased)\n\n\n self.model = model\n self.mean = mean\n self.std = std\n\n\n #Initialize Plane \n planeSource = vtk.vtkPlaneSource()\n planeSource.SetCenter(0, 0, 0)\n planeSource.Update()\n planePoly = planeSource.GetOutput()\n planePoly.GetPointData().RemoveArray(\"Normals\")\n self.planeActor = MakeActor(planePoly)\n self.planeActor.GetProperty().SetRepresentationToWireframe()\n self.planeActor.GetProperty().SetColor(1, 0, 0)\n\n\n \n\n ren.AddActor(self.planeActor)\n\n\n #ADd Target Actor\n target = target*std +mean\n self.polydata = getOutputPoly(polydata, target)\n self.actor = MakeActor(self.polydata)\n ren.AddActor(self.actor)\n\n\n\n\n bounds = self.planeActor.GetBounds()\n self.latentPositions = np.array( [\n [bounds[0], bounds[2], 0],\n [bounds[0], bounds[3], 0],\n [bounds[1], bounds[2], 0],\n [bounds[1], bounds[3], 0]\n ])\n \n\n self.latentSize = 16\n self.outputLatents = []\n #Add Sample Actor\n for idx, sample in enumerate(samples):\n\n #ADd Target Actor\n z = self.model.encoder(sample.to(device))\n self.latentSize = z.shape[1]\n self.outputLatents.append(z[0])\n sample = sample*std +mean\n outpoly = getOutputPoly(polydata, sample)\n actor = MakeActor(outpoly)\n actor.SetPosition(self.latentPositions[idx])\n ren.AddActor(actor)\n\n self.pickedPosition = -1\n \n\n def LeftButtonPressed(self, obj, ev):\n \n self.OnLeftButtonDown()\n\n pos = obj.GetInteractor().GetEventPosition()\n\n picker = vtk.vtkCellPicker()\n picker.PickFromListOn()\n picker.AddPickList(self.planeActor)\n picker.Pick(pos[0], pos[1], 0, ren)\n\n \n\n position = picker.GetPickPosition()\n \n if picker.GetActor() == self.planeActor:\n self.pickedPosition = position\n\n \n\n def MouseMove(self, obj, ev):\n if self.pickedPosition == -1:\n self.OnMouseMove()\n return\n\n pos = obj.GetInteractor().GetEventPosition()\n\n picker = vtk.vtkCellPicker()\n picker.PickFromListOn()\n picker.AddPickList(self.planeActor)\n picker.Pick(pos[0], pos[1], 0, ren)\n \n \n position = picker.GetPickPosition()\n targetPos = np.array([position[0], position[1], 0])\n\n\n if targetPos[0] < -0.5 : targetPos[0] = -0.5\n elif targetPos[0] > 0.5 : targetPos[0] = 0.5\n if targetPos[1] < -0.5 : targetPos[1] = -0.5\n elif targetPos[1] > 0.5 : targetPos[1] = 0.5\n\n distances = []\n for sample in self.latentPositions: \n distances.append(np.linalg.norm(targetPos-sample))\n \n\n weights = np.array(distances)\n\n weights[weights > 1] = 1\n weights = 1 - weights\n \n calculatedLatent = torch.zeros(self.latentSize).to(device)\n\n for idx, weight in enumerate(weights):\n calculatedLatent += self.outputLatents[idx] * weight\n \n\n out = self.model.decoder(calculatedLatent)\n out = out.detach().cpu()\n\n target = out*self.std + self.mean\n updatePoly(self.polydata, target)\n renWin.Render()\n\n\n def LeftButtonReleased(self, obj, ev):\n\n self.pickedPosition = -1\n self.OnLeftButtonUp()\n\n\nif __name__ == \"__main__\":\n \n dilation = [1, 1, 1, 1]\n seq_length = [9, 9, 9, 9]\n\n transform_fp = os.path.join( \"data\", \"CoMA\", \"transform.pkl\" )\n with open(transform_fp, 'rb') as f:\n tmp = pickle.load(f, encoding='latin1')\n\n spiral_indices_list = [\n utils.preprocess_spiral(tmp['face'][idx], seq_length[idx], tmp['vertices'][idx], dilation[idx]).to(device)\n for idx in range(len(tmp['face']) - 1)\n ]\n down_transform_list = [\n utils.to_sparse(down_transform).to(device)\n for down_transform in tmp['down_transform']\n ]\n up_transform_list = [\n utils.to_sparse(up_transform).to(device)\n for up_transform in tmp['up_transform']\n ]\n\n\n meshdata = MeshData(\"data/CoMA\", \"data/CoMA/template/template.obj\", split=\"interpolation\", test_exp=\"bareteeth\")\n \n\n mean = meshdata.mean\n std = meshdata.std\n\n\n\n\n model = AE(3, [32, 32, 32,64], 16, spiral_indices_list, down_transform_list, up_transform_list).to(device)\n checkpoint = torch.load(\"out/interpolation_exp/checkpoints/checkpoint_300.pt\")\n model.load_state_dict( checkpoint[\"model_state_dict\"] )\n model.eval()\n\n print(len(meshdata.train_dataset))\n train_loader = DataLoader(meshdata.train_dataset, batch_size=1, shuffle=False)\n\n x = meshdata.train_dataset[10].x.unsqueeze(0)\n \n # # x = inputTensor\n # out = model(x)\n # out = (x.cpu() * std) +mean \n\n\n samples = [\n meshdata.train_dataset[10].x.unsqueeze(0),\n meshdata.train_dataset[5768].x.unsqueeze(0),\n meshdata.train_dataset[8654].x.unsqueeze(0),\n meshdata.train_dataset[2054].x.unsqueeze(0)\n ]\n\n \n #Add Interactor Style\n interactorStyle = LatentInteractorStyle(model, x, std, mean, samples)\n iren.SetInteractorStyle(interactorStyle)\n \n renWin.Render()\n iren.Initialize()\n iren.Start()\n\n\n\n","sub_path":"inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":7349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"451817018","text":"import os\nimport cv2\nimport threading\n\nfrom camera.models import Image\nfrom datetime import datetime\nfrom django.http import StreamingHttpResponse\nfrom django.shortcuts import render\n\ndirectory = os.getcwd()\nfile_path = directory + \"/camera/templates/imgaes\"\n\n\nclass VideoCamera(object):\n def __init__(self):\n self.video = cv2.VideoCapture(0)\n self.grabbed, self.frame = self.video.read()\n threading.Thread(target=self.update, args=()).start()\n\n def __del__(self):\n self.video.release()\n\n def get_frame(self):\n image = self.frame\n ret, jpeg = cv2.imencode(\".jpg\", image)\n return jpeg.tobytes()\n\n def update(self):\n while True:\n self.grabbed, self.frame = self.video.read()\n\n def take_frame(self):\n now = datetime.now()\n file_name = file_path + now.strftime(\"%y%m%d_%H%M%S\") + \".png\"\n cv2.imwrite(file_name, self.frame)\n\n image = Image(name=now.strftime(\"%y%m%d_%H%M%S\"))\n image.save()\n\n\ncam = VideoCamera()\n\n\ndef gen(camera):\n while True:\n frame = cam.get_frame()\n yield (b\"--frmae\\r\\n\" b\"Content-Type: image/jpeg\\r\\n\\r\\n\" + frame + b\"\\r\\n\\r\\n\")\n\n\ndef stream(request):\n try:\n return StreamingHttpResponse(\n gen(()), content_type=\"multipart/x-mixed-replace;boundary=frame\"\n )\n except:\n pass\n\n\ndef live(request):\n if request.method == \"POST\":\n cam.take_frame()\n\n return render(request, \"design/html/live.html\")\n\n\ndef playback(request):\n image_list = Image.objects.all()\n return render(request, \"design/html/playback.html\", {\"image_list\": image_list})\n\n\ndef setting(request):\n return render(request, \"design/html/setting.html\")\n","sub_path":"camera/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"473356454","text":"import os,sys,inspect\ncurrentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nsys.path.insert(0,currentdir) \n\nimport numpy as np\n\n# These functions were determined experimentally\ncliTypeFuncMapping = {'hostSSH' : [900, 0.288, 1],\n 'hostVIP' : [2.66e+05, 2.80e-01, 2.64e+01],\n 'hostVID' : [1.67e+02, 3.54e-03, 1.53e-22],\n 'hostLVD' : [1.69e+01, 4.83e-03, 5.50e-01],\n 'hostFDO' : [4.51e+01, 4.21e-03, 1.66e+00]\n }\n\ndef negExpFunc(x, a, b, c):\n return a * np.exp(-b*x) + c\n\ndef negLinFunc(x, a, b, c):\n return -a*x + b\n\ndef estDelay(cliType, availBand):\n funcMap = cliTypeFuncMapping[cliType]\n if cliType in cliTypeFuncMapping:\n delay = negExpFunc(availBand, *funcMap)\n if cliType == 'hostVIP':\n return delay\n return delay/2\n else:\n return 1000","sub_path":"algorithm/delayEstimation.py","file_name":"delayEstimation.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"366992883","text":"# To change this license header, choose License Headers in Project Properties.\n# To change this template file, choose Tools | Templates\n# and open the template in the editor.\n\n\nA = [5, 2, 2, 4, 3, 5, 4, 7, 3]\n\nRepetidos = 0\nfor i in range(1):\n print(A)\n\nfor i in range(9):\n for j in range(i + 1, 9):\n if(A[i] != A[j]):\n Repetidos = A[i];\nprint (\"Numero NO Repetido\")\nprint(Repetidos)","sub_path":"Taller N2/Python/Taller_No2_2.py","file_name":"Taller_No2_2.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"448754004","text":"price_ratings = [int(el) for el in input().split()]\r\nentry_point = int(input())\r\nitems_type = input()\r\nprice_ratings_type = input()\r\n\r\nleft_items = price_ratings[:entry_point]\r\nright_items = price_ratings[entry_point + 1:]\r\n\r\nif items_type == \"cheap\":\r\n left_items = [el for el in left_items if el < price_ratings[entry_point]]\r\n right_items = [el for el in right_items if el < price_ratings[entry_point]]\r\n\r\nelif items_type == \"expensive\":\r\n left_items = [el for el in left_items if el >= price_ratings[entry_point]]\r\n right_items = [el for el in right_items if el >= price_ratings[entry_point]]\r\n\r\nif price_ratings_type == \"positive\":\r\n left_items = [el for el in left_items if el > 0]\r\n right_items = [el for el in right_items if el > 0]\r\n\r\nelif price_ratings_type == \"negative\":\r\n left_items = [el for el in left_items if el < 0]\r\n right_items = [el for el in right_items if el < 0]\r\n\r\nif sum(left_items) >= sum(right_items):\r\n print(f\"Left - {sum(left_items)}\")\r\nelse:\r\n print(f\"Right - {sum(right_items)}\")","sub_path":"Dictionaries/angry_pets_v3.py","file_name":"angry_pets_v3.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"557720556","text":"# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree. An additional grant\n# of patent rights can be found in the PATENTS file in the same directory.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport logging\n\nlogger = logging.getLogger(__name__)\n\nimport numpy as np\nimport pandas as pd\nfrom functools import reduce\n\n\ndef _cutoffs(df, horizon, k, period):\n \"\"\"Generate cutoff dates\n\n Parameters\n ----------\n df: pd.DataFrame with historical data\n horizon: pd.Timedelta.\n Forecast horizon\n k: Int number.\n The number of forecasts point.\n period: pd.Timedelta.\n Simulated Forecast will be done at every this period.\n\n Returns\n -------\n list of pd.Timestamp\n \"\"\"\n # Last cutoff is 'latest date in data - horizon' date\n cutoff = df['ds'].max() - horizon\n result = [cutoff]\n\n for i in range(1, k):\n cutoff -= period\n # If data does not exist in data range (cutoff, cutoff + horizon]\n if not (((df['ds'] > cutoff) & (df['ds'] <= cutoff + horizon)).any()):\n # Next cutoff point is 'closest date before cutoff in data - horizon'\n closest_date = df[df['ds'] <= cutoff].max()['ds']\n cutoff = closest_date - horizon\n if cutoff < df['ds'].min():\n logger.warning('Not enough data for requested number of cutoffs! Using {}.'.format(k))\n break\n result.append(cutoff)\n\n # Sort lines in ascending order\n return reversed(result)\n\n\ndef simulated_historical_forecasts(model, horizon, k, period=None):\n \"\"\"Simulated Historical Forecasts.\n If you would like to know it in detail, read the original paper\n https://facebookincubator.github.io/prophet/static/prophet_paper_20170113.pdf\n\n Parameters\n ----------\n model: Prophet class object.\n Fitted Prophet model\n horizon: string which has pd.Timedelta compatible style.\n Forecast horizon ('5 days', '3 hours', '10 seconds' etc)\n k: Int number.\n The number of forecasts point.\n period: string which has pd.Timedelta compatible style or None, default None.\n Simulated Forecast will be done at every this period.\n 0.5 * horizon is used when it is None.\n\n Returns\n -------\n A pd.DataFrame with the forecast, actual value and cutoff.\n \"\"\"\n df = model.history.copy().reset_index(drop=True)\n horizon = pd.Timedelta(horizon)\n period = 0.5 * horizon if period is None else pd.Timedelta(period)\n cutoffs = _cutoffs(df, horizon, k, period)\n predicts = []\n for cutoff in cutoffs:\n # Generate new object with copying fitting options\n m = model.copy(cutoff)\n # Train model\n m.fit(df[df['ds'] <= cutoff])\n # Calculate yhat\n index_predicted = (df['ds'] > cutoff) & (df['ds'] <= cutoff + horizon)\n columns = ['ds'] + (['cap'] if m.growth == 'logistic' else [])\n yhat = m.predict(df[index_predicted][columns])\n # Merge yhat(predicts), y(df, original data) and cutoff\n predicts.append(pd.concat([\n yhat[['ds', 'yhat', 'yhat_lower', 'yhat_upper']],\n df[index_predicted][['y']].reset_index(drop=True),\n pd.DataFrame({'cutoff': [cutoff] * len(yhat)})\n ], axis=1))\n\n # Combine all predicted pd.DataFrame into one pd.DataFrame\n return reduce(lambda x, y: x.append(y), predicts).reset_index(drop=True)\n\n\ndef cross_validation(model, horizon, period, initial=None):\n \"\"\"Cross-Validation for time-series.\n This function is the same with Time series cross-validation described in https://robjhyndman.com/hyndsight/tscv/\n when the value of period is equal to the time interval of data.\n\n Parameters\n ----------\n model: Prophet class object. Fitted Prophet model\n horizon: string which has pd.Timedelta compatible style.\n Forecast horizon ('5 days', '3 hours', '10 seconds' etc)\n period: string which has pd.Timedelta compatible style.\n Simulated Forecast will be done at every this period.\n initial: string which has pd.Timedelta compatible style or None, default None.\n First training period.\n 3 * horizon is used when it is None.\n\n Returns\n -------\n A pd.DataFrame with the forecast, actual value and cutoff.\n \"\"\"\n te = model.history['ds'].max()\n ts = model.history['ds'].min()\n horizon = pd.Timedelta(horizon)\n period = pd.Timedelta(period)\n initial = 3 * horizon if initial is None else pd.Timedelta(initial)\n k = int(np.floor(((te - horizon) - (ts + initial)) / period))\n return simulated_historical_forecasts(model, horizon, k, period)\n","sub_path":"python/fbprophet/diagnostics.py","file_name":"diagnostics.py","file_ext":"py","file_size_in_byte":4864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"540107562","text":"# coding: utf-8\n\"\"\"\nPydici core views. Http request are processed here.\n@author: Sébastien Renard (sebastien.renard@digitalfox.org)\n@license: AGPL v3 or newer (http://www.gnu.org/licenses/agpl-3.0.html)\n\"\"\"\n\nimport csv\nimport datetime\nimport json\n\nfrom django.shortcuts import render\nfrom django.db.models import Q, Sum, Min, Max\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponse\nfrom django.utils.html import strip_tags\nfrom django.utils.translation import ugettext as _\nfrom django.core.cache import cache\nfrom django.conf import settings\n\nfrom django_select2.views import AutoResponseView\nfrom taggit.models import Tag\n\nfrom .decorator import pydici_non_public, pydici_feature, PydiciNonPublicdMixin\nfrom leads.models import Lead\nfrom people.models import Consultant\nfrom crm.models import Company, Contact, Subsidiary\nfrom staffing.models import Mission, FinancialCondition, Staffing, Timesheet\nfrom billing.models import ClientBill\nfrom expense.models import Expense\nfrom people.views import consultant_home\nfrom .utils import nextMonth, previousMonth\n\n\n\n@login_required\ndef index(request):\n key = \"core.index.\" + request.user.username\n consultant_trigramme = cache.get(key)\n if consultant_trigramme is None:\n try:\n consultant_trigramme = Consultant.objects.get(trigramme__iexact=request.user.username).trigramme\n cache.set(key, consultant_trigramme)\n except Consultant.DoesNotExist:\n consultant_trigramme = None\n\n if consultant_trigramme:\n return consultant_home(request, consultant_trigramme)\n else:\n # User is not a consultant. Go for default index page.\n return render(request, \"core/pydici.html\",\n {\"user\": request.user})\n\n\n@pydici_non_public\n@pydici_feature(\"search\")\ndef search(request):\n \"\"\"Very simple search function on all major pydici objects\"\"\"\n\n words = request.GET.get(\"q\", \"\")\n words = words.split()\n consultants = companies = contacts = leads = missions = bills = tags = None\n max_record = 50\n more_record = False # Wether we have more records\n\n if words:\n # Consultant\n consultants = Consultant.objects.all()\n for word in words:\n consultants = consultants.filter(Q(name__icontains=word) |\n Q(trigramme__icontains=word))\n consultants = consultants.distinct()\n\n # Companies\n companies = Company.objects.all()\n for word in words:\n companies = companies.filter(name__icontains=word)\n companies = companies.distinct()\n\n # Contacts\n contacts = Contact.objects.all()\n for word in words:\n contacts = contacts.filter(name__icontains=word)\n contacts = contacts.distinct()[:max_record]\n if len(contacts) >= max_record:\n more_record = True\n\n # Tags\n tags = Tag.objects.all()\n for word in words:\n tags = tags.filter(name__icontains=word)\n\n # Leads\n leads = Lead.objects.all()\n for word in words:\n leads = leads.filter(Q(name__icontains=word) |\n Q(description__icontains=word) |\n Q(tags__name__iexact=word) |\n Q(client__contact__name__icontains=word) |\n Q(client__organisation__company__name__icontains=word) |\n Q(client__organisation__name__iexact=word) |\n Q(deal_id__icontains=word[:-1])) # Squash last letter that could be mission letter\n leads = leads.distinct().select_related(\"client__organisation__company\")[:max_record]\n if len(leads) >= max_record:\n more_record = True\n\n # Missions\n missions = Mission.objects.all()\n for word in words:\n missions = missions.filter(Q(deal_id__icontains=word) |\n Q(description__icontains=word))\n missions = missions.select_related(\"lead__client__organisation__company\")[:max_record]\n if len(missions) >= max_record:\n more_record = True\n\n # Add missions from lead\n if leads:\n missions = set(missions)\n for lead in leads.prefetch_related(\"mission_set\"):\n for mission in lead.mission_set.all():\n missions.add(mission)\n missions = list(missions)\n\n # Bills\n bills = ClientBill.objects.all()\n for word in words:\n bills = bills.filter(Q(bill_id__icontains=word) |\n Q(comment__icontains=word))\n bills = bills.select_related(\"lead__client__organisation__company\")[:max_record]\n if len(bills) >= max_record:\n more_record = True\n\n # Add bills from lead\n if leads:\n bills = set(bills)\n for lead in leads.prefetch_related(\"clientbill_set\"):\n for bill in lead.clientbill_set.all():\n bills.add(bill)\n # Sort\n bills = list(bills)\n bills.sort(key=lambda x: x.creation_date)\n\n return render(request, \"core/search.html\",\n {\"query\": \" \".join(words),\n \"consultants\": consultants,\n \"companies\": companies,\n \"contacts\": contacts,\n \"leads\": leads,\n \"tags\": tags,\n \"missions\": missions,\n \"bills\": bills,\n \"more_record\": more_record,\n \"user\": request.user})\n\n\n@pydici_non_public\n@pydici_feature(\"reports\")\ndef dashboard(request):\n \"\"\"Tactical management dashboard. This views is in core module because it aggregates data\n accross different modules\"\"\"\n\n return render(request, \"core/dashboard.html\",\n {\"subsidiaries\": Subsidiary.objects.all()})\n\n\n@pydici_non_public\n@pydici_feature(\"reports\")\ndef financial_control(request, start_date=None, end_date=None):\n \"\"\"Financial control extraction. This view is intented to be processed by\n a spreadsheet or a financial package software\"\"\"\n if end_date is None:\n end_date = previousMonth(datetime.date.today())\n else:\n end_date = datetime.date(int(end_date[0:4]), int(end_date[4:6]), 1)\n if start_date is None:\n start_date = previousMonth(previousMonth(datetime.date.today()))\n else:\n start_date = datetime.date(int(start_date[0:4]), int(start_date[4:6]), 1)\n\n response = HttpResponse(content_type=\"text/plain\")\n response[\"Content-Disposition\"] = \"attachment; filename=financialControl.dat\"\n writer = csv.writer(response, delimiter=';')\n\n financialConditions = {}\n for fc in FinancialCondition.objects.all():\n financialConditions[\"%s-%s\" % (fc.mission_id, fc.consultant_id)] = (fc.daily_rate, fc.bought_daily_rate)\n\n # Header\n header = [\"FiscalYear\", \"Month\", \"Type\", \"Nature\", \"Archived\",\n \"Subsidiary\", \"ClientCompany\", \"ClientCompanyCode\", \"ClientOrganization\",\n \"Lead\", \"DealId\", \"LeadPrice\", \"Billed\", \"LeadResponsible\", \"LeadResponsibleTrigramme\", \"LeadTeam\",\n \"Mission\", \"MissionId\", \"BillingMode\", \"MissionPrice\",\n \"TotalQuantityInDays\", \"TotalQuantityInEuros\",\n \"ConsultantSubsidiary\", \"ConsultantTeam\", \"Trigramme\", \"Consultant\", \"Subcontractor\", \"CrossBilling\",\n \"ObjectiveRate\", \"DailyRate\", \"BoughtDailyRate\", \"BudgetType\", \"QuantityInDays\", \"QuantityInEuros\",\n \"StartDate\", \"EndDate\"]\n\n writer.writerow(header)\n\n timesheets = Timesheet.objects.filter(working_date__gte=start_date, working_date__lt=nextMonth(end_date))\n staffings = Staffing.objects.filter(staffing_date__gte=start_date, staffing_date__lt=nextMonth(end_date))\n\n consultants = dict([(i.trigramme.lower(), i) for i in Consultant.objects.all().select_related()])\n\n missionsIdsFromStaffing = Mission.objects.filter(probability__gt=0, staffing__staffing_date__gte=start_date, staffing__staffing_date__lt=nextMonth(end_date)).values_list(\"id\", flat=True)\n missionsIdsFromTimesheet = Mission.objects.filter(probability__gt=0, timesheet__working_date__gte=start_date, timesheet__working_date__lt=nextMonth(end_date)).values_list(\"id\", flat=True)\n missionsIds = set(list(missionsIdsFromStaffing) + list(missionsIdsFromTimesheet))\n missions = Mission.objects.filter(id__in=missionsIds)\n missions = missions.distinct().select_related().prefetch_related(\"lead__client__organisation__company\", \"lead__responsible\")\n\n def createMissionRow(mission, start_date, end_date):\n \"\"\"Inner function to create mission row\"\"\"\n missionRow = []\n missionRow.append(start_date.year)\n missionRow.append(end_date.isoformat())\n missionRow.append(\"timesheet\")\n missionRow.append(mission.nature)\n missionRow.append(not mission.active)\n if mission.lead:\n missionRow.append(mission.lead.subsidiary)\n missionRow.append(mission.lead.client.organisation.company.name)\n missionRow.append(mission.lead.client.organisation.company.code)\n missionRow.append(mission.lead.client.organisation.name)\n missionRow.append(mission.lead.name)\n missionRow.append(mission.lead.deal_id)\n missionRow.append(mission.lead.sales or 0)\n missionRow.append(list(mission.lead.clientbill_set.filter(state__in=(\"1_SENT\", \"2_PAID\"), creation_date__lt=end_date, creation_date__gte=start_date).aggregate(Sum(\"amount\")).values())[0] or 0)\n if mission.lead.responsible:\n missionRow.append(mission.lead.responsible.name)\n missionRow.append(mission.lead.responsible.trigramme)\n missionRow.append(mission.lead.responsible.staffing_manager.trigramme if mission.lead.responsible.staffing_manager else \"\")\n else:\n missionRow.extend([\"\", \"\", \"\"])\n else:\n missionRow.extend([mission.subsidiary, \"\", \"\", \"\", \"\", \"\", 0, 0, \"\", \"\", \"\"])\n missionRow.append(mission.description or \"\")\n missionRow.append(mission.mission_id())\n missionRow.append(mission.billing_mode or \"\")\n missionRow.append(mission.price or 0)\n missionRow.extend(mission.done_work())\n return missionRow\n\n for mission in missions:\n missionRow = createMissionRow(mission, start_date, end_date)\n for consultant in mission.consultants().select_related().prefetch_related(\"staffing_manager\"):\n consultantRow = missionRow[:] # copy\n daily_rate, bought_daily_rate = financialConditions.get(\"%s-%s\" % (mission.id, consultant.id), [0, 0])\n rateObjective = consultant.getRateObjective(end_date, rate_type=\"DAILY_RATE\")\n if rateObjective:\n rateObjective = rateObjective.rate\n else:\n rateObjective = 0\n doneDays = timesheets.filter(mission_id=mission.id, consultant=consultant.id).aggregate(charge=Sum(\"charge\"), min_date=Min(\"working_date\"), max_date=Max(\"working_date\"))\n forecastedDays = staffings.filter(mission_id=mission.id, consultant=consultant.id).aggregate(charge=Sum(\"charge\"), min_date=Min(\"staffing_date\"), max_date=Max(\"staffing_date\"))\n consultantRow.append(consultant.company)\n consultantRow.append(consultant.staffing_manager.trigramme if consultant.staffing_manager else \"\")\n consultantRow.append(consultant.trigramme)\n consultantRow.append(consultant.name)\n consultantRow.append(consultant.subcontractor)\n if mission.lead:\n consultantRow.append(mission.lead.subsidiary != consultant.company)\n else:\n consultantRow.append(mission.subsidiary != consultant.company)\n consultantRow.append(rateObjective)\n consultantRow.append(daily_rate or 0)\n consultantRow.append(bought_daily_rate or 0)\n # Timesheet row\n for budgetType, days in ((\"done\", doneDays), (\"forecast\", forecastedDays)):\n quantity = days[\"charge\"] or 0\n row = consultantRow[:] # Copy\n row.append(budgetType)\n row.append(quantity or 0)\n row.append((quantity * daily_rate) if (quantity > 0 and daily_rate > 0) else 0)\n row.append(days[\"min_date\"] or \"\")\n row.append(days[\"max_date\"] or \"\")\n writer.writerow(row)\n\n archivedMissions = Mission.objects.filter(active=False, archived_date__gte=start_date, archived_date__lt=end_date)\n archivedMissions = archivedMissions.filter(lead__state=\"WON\")\n archivedMissions = archivedMissions.prefetch_related(\"lead__client__organisation__company\", \"lead__responsible\")\n for mission in archivedMissions:\n if mission in missions:\n # Mission has already been processed for this period\n continue\n missionRow = createMissionRow(mission, start_date, end_date)\n writer.writerow(missionRow)\n\n for expense in Expense.objects.filter(expense_date__gte=start_date, expense_date__lt=nextMonth(end_date), chargeable=False).select_related():\n row = []\n row.append(start_date.year)\n row.append(end_date.isoformat())\n row.append(\"expense\")\n row.append(expense.category)\n if expense.lead:\n row.append(expense.lead.subsidiary)\n row.extend([\"\", \"\", \"\", \"\"])\n row.append(expense.lead.deal_id)\n else:\n row.extend([\"\", \"\", \"\", \"\", \"\", \"\"])\n row.extend([\"\", \"\", \"\", \"\", \"\"])\n try:\n consultant = consultants[expense.user.username.lower()]\n row.append(consultant.company.name)\n row.append(consultant.staffing_manager.trigramme)\n row.append(consultant.trigramme)\n row.append(consultant.name)\n row.append(consultant.subcontractor)\n if expense.lead:\n row.append(expense.lead.subsidiary != consultant.company)\n else:\n row.append(\"unknown for now\")\n except KeyError:\n # Exepense user is not a consultant\n row.extend([\"\", \"\", \"\", \"\", \"\", \"\"])\n row.extend([\"\", \"\", \"\", \"\", \"\"])\n row.append(expense.amount) # TODO: compute pseudo HT amount\n writer.writerow(row)\n\n return response\n\n\n@pydici_non_public\n@pydici_feature(\"reports\")\ndef risk_reporting(request):\n \"\"\"Risk reporting synthesis\"\"\"\n data = []\n today = datetime.date.today()\n # Sent bills (still not paid)\n for bill in ClientBill.objects.filter(state=\"1_SENT\").select_related():\n if bill.due_date < today:\n data_type = _(\"overdue bills\")\n else:\n data_type = _(\"sent bills\")\n data.append({_(\"type\"): data_type,\n _(\"subsidiary\"): str(bill.lead.subsidiary),\n _(\"deal_id\"): bill.lead.deal_id,\n _(\"deal\"): bill.lead.name,\n _(\"amount\"): int(bill.amount),\n _(\"company\"): str(bill.lead.client.organisation.company),\n _(\"client\"): str(bill.lead.client),\n })\n\n # Leads with done works beyond sent or paid bills\n for lead in Lead.objects.filter(mission__active=True).distinct().select_related():\n if not \"TIME_SPENT\" in [m.billing_mode for m in lead.mission_set.all()]:\n # All missions of this lead are fixed price (no one is time spent). So done works beyond billing is not considered here\n # Fixed price mission tracking is done a separate report\n continue\n done_d, done_a = lead.done_work()\n billed = float(ClientBill.objects.filter(lead=lead).filter(Q(state=\"1_SENT\") | Q(state=\"2_PAID\")).aggregate(amount=Sum(\"amount\"))[\"amount\"] or 0)\n if billed < done_a:\n data.append({_(\"type\"): _(\"work without bill\"),\n _(\"subsidiary\"): str(lead.subsidiary),\n _(\"deal_id\"): lead.deal_id,\n _(\"deal\"): lead.name,\n _(\"amount\"): int(done_a - billed),\n _(\"company\"): str(lead.client.organisation.company),\n _(\"client\"): str(lead.client),\n })\n\n return render(request, \"core/risks.html\", { \"data\": json.dumps(data),\n \"derivedAttributes\": []})\n\n\nclass PydiciSelect2View(PydiciNonPublicdMixin, AutoResponseView):\n \"\"\"Overload default select2 view that is used to get data through ajax calls to limit it to login users\"\"\"\n pass\n\n\ndef tableToCSV(table, filename=\"data.csv\"):\n \"\"\"A view that convert a django_table2 object to a CSV in a http response object\"\"\"\n response = HttpResponse(content_type=\"text/csv\")\n response[\"Content-Disposition\"] = \"attachment; filename=%s\" % filename\n writer = csv.writer(response, delimiter=';')\n header = [column.header for column in table.columns]\n writer.writerow(header)\n for row in table.rows:\n row = [strip_tags(str(cell)) for column, cell in list(row.items())]\n row = [i.replace(\"\\u2714\", _(\"No\")).replace(\"\\u2718\", _(\"Yes\")) for i in row]\n writer.writerow(row)\n return response\n\n\ndef internal_error(request):\n \"\"\"Custom internal error view.\n Like the default builtin one, but with context to allow proper menu display with correct media path\"\"\"\n return render(request, \"500.html\")\n\n\ndef forbiden(request):\n \"\"\"When access is denied...\"\"\"\n if request.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest':\n # Ajax request, use stripped forbiden page\n template = \"core/_access_forbiden.html\"\n else:\n # Standard request, use full forbiden page with menu\n template = \"core/forbiden.html\"\n return render(request, template,\n {\"admins\": settings.ADMINS, })\n","sub_path":"core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":18048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"465086666","text":"\nimport copy\nfrom utils.exceptions import PubErrorCustom\nfrom utils.decorator.utils.base import ConnectorFuncsBase\n\nclass ConnectorFuncsDeleteBase(ConnectorFuncsBase):\n\n def __init__(self,**kwargs):\n\n super().__init__(**kwargs)\n\n self.robot = copy.deepcopy(self.connector.robot)\n\n self.pk_key = self.robot.pop(\"pk_key\")\n\n if not self.pk_key:\n raise PubErrorCustom(\"pk_key是空!\")\n\n async def run(self):\n if self.connector_app.request.method == 'DELETE':\n await self.delete()\n\n async def delete_inner_handler(self,**kwargs):\n\n robot_table = kwargs.get(\"robot_table\")\n model_class = robot_table['model_class']\n pk_key = robot_table.get(\"pk_key\") if robot_table.get(\"pk_key\") else self.pk_key\n\n if self.pk:\n await self.connector_app.db.execute(model_class.delete(). \\\n where(\n getattr(model_class, pk_key) == self.pk)\n )\n else:\n await self.connector_app.db.execute(model_class.delete(). \\\n where(\n getattr(model_class, pk_key) << self.connector_app.data.get(\"ids\"))\n )\n\n async def recursion(self,robot):\n\n if robot:\n\n for key, value in robot.items():\n\n await self.delete_inner_handler(\n robot_table=value\n )\n await self.recursion(robot=value.get(\"child\",None))\n\n async def delete(self):\n\n if not self.pk and not self.connector_app.data.get(\"ids\"):\n raise PubErrorCustom(\"请选择数据!\")\n\n if self.connector.del_before_handler:\n await self.connector.del_before_handler(\n self.connector_app, pk=self.pk if self.pk else self.connector_app.data.get(\"ids\"))\n\n if self.connector_app.data.get(\"ids\"):\n\n if not self.connector.is_del_batch:\n raise PubErrorCustom(\"此接口不支持批量删除!\")\n\n if not isinstance(self.connector_app.data.get(\"ids\"), list):\n raise PubErrorCustom(\"批量删除数据格式有误!\")\n\n await self.recursion(self.robot)\n","sub_path":"utils/decorator/utils/delete/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"3829467","text":"import sys\nimport datetime\nfrom pathlib import Path \ncurrent_path = Path(__file__).absolute()\nabs_path = str(current_path.parent.parent)\nsys.path.append(abs_path)\nfrom capture import Capture\n\ndef from_api_to_db_deputados(data_list, url):\n \n func = lambda datum: dict(\n ide_cadastro=datum['ideCadastro'],\n cod_orcamento=datum['codOrcamento'],\n condicao=datum['condicao'],\n matricula=datum['matricula'],\n id_parlamentar=datum['idParlamentar'],\n nome=datum['nome'],\n nome_parlamentar=datum['nomeParlamentar'],\n url_foto=datum['urlFoto'],\n sexo=datum['sexo'],\n uf=datum['uf'],\n partido=datum['partido'],\n gabinete=datum['gabinete'],\n anexo=datum['anexo'],\n fone=datum['fone'],\n email=datum['email'],\n data_captura=datetime.datetime.now(),\n url_captura=url\n )\n \n return map(func, data_list)\n\ndef main():\n\n capture = Capture(\n schema='camara_v1',)\n\n # capture data with this\n capture.capture_data(\n url='http://www.camara.leg.br/SitCamaraWS/Deputados.asmx/ObterDeputados')\n\n # get the list of dict for this table\n data_list = capture.data['deputados']['deputado'] \n\n # \n data_list = capture.to_default_dict(data_list) \n\n # make it rigth\n data_list = from_api_to_db_deputados(data_list, capture.url) \n\n # insert it!\n capture.insert_data(data_list, table='deputados')\n\nif __name__ == '__main__':\n main()\n","sub_path":"bigua/API/camara_v1/deputados.py","file_name":"deputados.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"399964245","text":"#!/usr/bin/python\nimport sys\nimport csv\ninfile = sys.stdin\n#next(infile)\ncount = 0\nfor line in infile:\n\tline = line.strip()\n\tmy_list = line.split(',')\n\tif(my_list[0] == 'ball'): #checking if it is a delivery\n\t\t# print('Hello World')\n\t\tout = 0\n\t\tif(my_list[9] in [\"lbw\",\"caught\",\"caught and bowled\",\"bowled\",\"stumped\",\"hit wicket\"]): #checking if the ball bowled got a wicket\n\t\t\tout = 1\t#setting out as 1 if ball is a wicket\n\t\t#print(out)\n\t\tkey_list = my_list[4]+','+my_list[6]+','+str(out) #setting key as batsman on strike, bowler and if it is a wicket or not\n\t\tprint('%s\\t%s' % (key_list,'1')) #sending key and '1' from the mapper to the reducer. 1 stands for a delivery bowled\n","sub_path":"adminmgr/media/code/python/map1/BD_058_217_267_1417_mapper.py","file_name":"BD_058_217_267_1417_mapper.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"105979380","text":"import os\nimport tensorflow as tf\n\nimport car_vision_model_scripts as const\nfrom car_vision_model_scripts.models.autopilot_model import AutopilotModel\nfrom car_vision_model_scripts.processing.autopilot_driving_data import AutopilotProcessing\n\n\n#GRAPH_DIR = './graph'\n#epochs = 30\n#batch_size = 100\n\nLOGS_PATH = os.path.join(const.LOG_DIR, \"small_car_training_autopilot\")\nL2NormConst = 0.001\n\ndef main(): \n proc = AutopilotProcessing(const.DEST_IMAGE_DIR, const.CSV_PATH, \n const.TRAINING_SET_SIZE, const.VALID_SET_SIZE)\n sess = tf.InteractiveSession()\n model = AutopilotModel()\n train_vars = tf.trainable_variables()\n \n reg = tf.add_n([tf.nn.l2_loss(v) for v in train_vars]) * L2NormConst\n loss = tf.reduce_mean(tf.square(tf.subtract(model.labels, model.output))) + reg\n train_step = tf.train.AdamOptimizer(const.LEARNING_RATE).minimize(loss)\n sess.run(tf.global_variables_initializer())\n \n # create a summary to monitor cost tensor\n tf.summary.scalar(\"loss\", loss)\n # merge all summaries into a single op\n merged_summary_op = tf.summary.merge_all()\n \n saver = tf.train.Saver()\n \n # op to write logs to Tensorboard\n summary_writer = tf.summary.FileWriter(LOGS_PATH, graph=tf.get_default_graph())\n \n # train over the dataset \n for epoch in range(const.STEPS):\n for i in range(int(proc.num_images/const.BATCH_SIZE)):\n xs, ys = proc.load_train_batch(const.BATCH_SIZE)\n train_step.run(feed_dict={model.features: xs, model.labels: ys, \n model.keep_prob: const.KEEP_PROB})\n if i % 10 == 0:\n xs, ys = proc.load_val_batch(const.BATCH_SIZE)\n loss_value = loss.eval(feed_dict={model.features:xs, model.labels: ys, \n model.keep_prob: 1.0})\n print(\"Epoch: %d, Step: %d, Loss: %g\" % (epoch, epoch * const.BATCH_SIZE + i, \n loss_value))\n \n # write logs at every iteration\n summary = merged_summary_op.eval(feed_dict={model.features: xs, model.labels: ys, \n model.keep_prob: 1.0})\n summary_writer.add_summary(summary, epoch * const.BATCH_SIZE + i)\n \n if i % const.BATCH_SIZE == 0:\n if not os.path.exists(LOGS_PATH):\n os.makedirs(LOGS_PATH)\n checkpoint_path = os.path.join(LOGS_PATH, \"graph.pb\")\n filename = saver.save(sess, checkpoint_path)\n print(\"Model saved in file: %s\" % filename)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"car_vision_model_scripts/training/small_car_training_autopilot.py","file_name":"small_car_training_autopilot.py","file_ext":"py","file_size_in_byte":2739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"312124745","text":"### MTRX5700\n# Dancing Drone\n# Angus, Neill, Xue Yin\n#\n#\n# This file is a main file created for testing purposes, to get clustering and classification working\n#\n\nimport scipy.io.wavfile as wavfile\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom cluster import cluster\n\n# read in raw data\nrate, raw_data = wavfile.read(\"../music/Vegas.wav\")\ndata = (raw_data[:,0]/2.0+raw_data[:,1]/2.0)\n\n#cluster(data[0:1024*10])\n# get mfcc\ntime, labels, class_labels = cluster(data,samplerate=44100, show_plots=True)\n\n\ndata = data[1::10]\nx = range(0, len(data))\n#plot the song\nplt.subplot(311)\nplt.scatter(time, labels)\nplt.grid()\nplt.axis([0, max(time), min(labels),max(labels) ])\n\nplt.subplot(312)\nplt.scatter(time, class_labels)\nplt.ylabel('amplitude')\nplt.xlabel('sample No.')\nplt.title('raw .WAV data')\nplt.grid()\nplt.axis([0, max(time), min(labels),max(labels) ])\n\nplt.subplot(313)\nplt.plot(x, data)\nplt.grid()\nplt.axis([0, max(x), min(data), max(data)])\n\nplt.show()\n\n","sub_path":"code/classification/classification_main.py","file_name":"classification_main.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"127306943","text":"import numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom scipy.stats import norm, skewnorm, ks_2samp\nfrom scipy.integrate import trapz, cumtrapz\nfrom scipy.interpolate import PchipInterpolator\nfrom scipy.special import gammaln\nfrom scipy.optimize import minimize\nimport numdifftools as ndt\nfrom astropy.io import ascii\nimport dynesty\nfrom dynesty import plotting as dyplot\nimport pickle\n \ndef remove_nan(array):\n return array[np.logical_not(np.isnan(array))]\n\ndef KS(data1,data2):\n ks, p = ks_2samp(data1, data2)\n print(\"KS test (test statistic, p-value): \", (ks, p))\n return ks,p\n\ndef uniform(a, b, u):\n \"\"\"Given u in [0,1], return a uniform number in [a,b].\"\"\"\n return a + (b-a)*u\n\ndef jeffreys(a, b, u):\n \"\"\"Given u in [0,1], return a Jeffreys random number in [a,b].\"\"\"\n return a**(1-u) * b**u\n\ndef plot_hist(data1,data2,bins_,label=\"property\"):\n '''\n Histogram the data (left) and show normalized histograms (right).\n '''\n print(\"plotting histograms of data...\")\n fig, axes = plt.subplots(1,2, figsize=(11,4), sharex=True, tight_layout=True)\n\n ax = axes[0]\n ax.hist(data1, bins=bins_, alpha=0.5,label=\"data1\")\n ax.hist(data2, bins=bins_, alpha=0.5, label=\"data2\")\n \n ax.set(xlabel=label)\n ax.set_title(\"data histogram\")\n ax.legend()\n ax.grid(True)\n\n ax = axes[1]\n ax.hist(data1, bins=bins_, alpha=0.5, density=True, label=\"data1\")\n ax.hist(data2, bins=bins_, alpha=0.5, density=True, label=\"data2\")\n \n ax.set(xlabel=label)\n ax.set_title(\"Normalized data histogram\")\n ax.legend()\n \n ax.grid(True)\n \n fig.savefig('data_plot_hist_'+label+'.png', dpi=100)\n\n plt.show()\n\ndef bin_data(data1,data2,bins_,label, plot=False): \n density = False\n n1, edges1 = np.histogram(data1, bins=bins_)\n dn1 = np.sqrt(n1)\n x = 0.5*(edges1[1:] + edges1[:-1])\n if density:\n N1 = np.trapz(n1, x1)\n n1, dn1 = n1/N1, dn1/N1\n\n n2, edges2 = np.histogram(data2, bins=bins_)\n dn2 = np.sqrt(n2)\n # x = 0.5*(edges2[1:] + edges2[:-1])\n if density:\n N2 = np.trapz(n2, x2)\n n2, dn2 = n2/N2, dn2/N2\n \n if plot == True:\n plt.errorbar(x, n1, yerr=dn1, fmt='.') \n plt.errorbar(x, n2, yerr=dn2, fmt='.')\n plt.xlabel(label)\n plt.ylabel('count') \n plt.title(\"Binned Data\")\n plt.show()\n \n return x,n1,n2,dn1,dn2\n\n\ndef mixturemodel_skew(params, x):\n \"\"\"Mixture of two skew normal distributions.\n \n Parameters\n ----------\n params : list or ndarray\n List of parameters (expect 2x3).\n x : float or ndarray\n Values to calculate the model.\n \n Returns\n -------\n model : float or ndarray\n Mixture model evaluated at x.\n \"\"\"\n a, mua, sga, askew = params[:4]\n b, mub, sgb, bskew = params[4:]\n return a*skewnorm.pdf(x, askew, loc=mua, scale=sga) + \\\n b*skewnorm.pdf(x, bskew, loc=mub, scale=sgb)\n\ndef logLjoint1_skew(params, m, n, x):\n \"\"\"Joint log-likelihood of the two data sets.\n \n Parameters\n ----------\n params : list or ndarray\n List of 9 parameters: 2x4 skew normal pars + scale factor.\n m : ndarray\n Binned counts in data set 1.\n n : ndarray\n Binned counts in data set 2.\n x : ndarray\n Bin centers used to construct the histogrammed counts m and n.\n \n Returns\n -------\n logL : float\n Log likelihood of sets m and n given model parameters.\n \"\"\"\n s, pars = params[0], params[1:]\n \n lambda1 = mixturemodel_skew(pars, x)\n lambda1[lambda1<=0] = np.finfo(dtype=np.float64).tiny\n \n lambda2 = s*lambda1\n \n return np.sum(m*np.log(lambda1) - lambda1 - gammaln(m+1) + n*np.log(lambda2) - lambda2 - gammaln(n+1))\n\ndef nlogLjoint1_skew(params, m, n, x):\n \"\"\"Negative log-likelihood, for minimizers.\"\"\"\n return -logLjoint1_skew(params, m, n, x)\n\ndef logLjoint2_skew(params, m, n, x):\n \"\"\"Joint log-likelihood of the two data sets.\n \n Parameters\n ----------\n params : list or ndarray\n List of 16 parameters: 2x4x2 Gaussian components.\n m : ndarray\n Binned counts in data set 1.\n n : ndarray\n Binned counts in data set 2.\n x : ndarray\n Bin centers used to construct the histogrammed counts m and n.\n \n Returns\n -------\n logL : float\n Log likelihood of sets m and n given model parameters.\n \"\"\"\n lambda1 = mixturemodel_skew(params[:8], x)\n lambda1[lambda1<=0] = np.finfo(dtype=np.float64).tiny\n \n lambda2 = mixturemodel_skew(params[8:], x)\n lambda2[lambda2<=0] = np.finfo(dtype=np.float64).tiny\n \n return np.sum(m*np.log(lambda1) - lambda1 - gammaln(m+1) + n*np.log(lambda2) - lambda2 - gammaln(n+1))\n\ndef nlogLjoint2_skew(params, m, n, x):\n \"\"\"Negative log likelihood, for minimizers.\"\"\"\n return -logLjoint2_skew(params, m, n, x)\n\ndef Model_1_fit(bounds1,data1,data2,bins_,label):\n x,n1,n2,dn1,dn2 = bin_data(data1,data2,bins_,label)\n \n # Generate 30 random seeds for the minimizer.\n # Store the result with the lowest -ln(L) in bestfit.\n print(\"running minimizer...this might take a few minutes...\")\n\n bestfit1 = None\n\n for i in range(30):\n p0 = [np.random.uniform(b[0], b[1]) for b in bounds1]\n result = minimize(nlogLjoint1_skew, p0, method='L-BFGS-B', args=(n1, n2, x), bounds=bounds1)\n\n if result.success:\n# print(p0)\n# print(' {:.2f}'.format(result.fun))\n if bestfit1 is None:\n bestfit1 = result\n else:\n if result.fun < bestfit1.fun:\n bestfit1 = result\n \n print(\"best fit parameters\",bestfit1)\n fig, axes = plt.subplots(2,2, figsize=(10,5), sharex=True,\n gridspec_kw={'height_ratios':[3,1], 'hspace':0},\n tight_layout=True)\n\n ax = axes[0,0]\n ep = ax.errorbar(x, n1, yerr=dn1, fmt='.', alpha=0.5)\n ax.plot(x, mixturemodel_skew(bestfit1.x[1:], x), color=ep[0].get_color(), label='data set 1')\n ax.set(ylabel='count',\n title=r'$\\mathcal{M}_1$ (Skew normal model)')\n ax.grid(ls=':')\n ax.legend(fontsize=10)\n\n ax = axes[1,0]\n ax.errorbar(x, n1 - mixturemodel_skew(bestfit1.x[1:], x), yerr=dn1, fmt='.')\n ax.grid(ls=':')\n\n ax.set(#xlim=(0, 4),\n xlabel=label,)\n #ylim=(-750,750))\n\n ax = axes[0,1]\n ep = ax.errorbar(x, n2, yerr=dn1, fmt='.', color='#ff7f0e', alpha=0.5);\n ax.plot(x, bestfit1.x[0]*mixturemodel_skew(bestfit1.x[1:], x), color=ep[0].get_color(), label='data set 2')\n ax.grid(ls=':')\n ax.set(title=r'$-\\ln{{\\mathcal{{L}}_\\mathrm{{max}}}}={{{:.1f}}}$'.format(bestfit1.fun))\n ax.legend(fontsize=10)\n\n ax = axes[1,1]\n ax.errorbar(x, n2 - bestfit1.x[0]*mixturemodel_skew(bestfit1.x[1:], x), yerr=dn2, fmt='.', color='#ff7f0e')\n ax.grid(ls=':')\n\n ax.set(#xlim=(0, 4),\n xlabel=label,)\n #ylim=(-275,275))\n\n fig.savefig('model1_fit_'+label+'.png', dpi=100)\n \n \ndef Model_1_sampler(prior_xform1,data1,data2,bins_,label):\n x,n1,n2,dn1,dn2 = bin_data(data1,data2,bins_,label)\n print(\"running the nested sampler... this might take from minutes to hours...\")\n dsampler = dynesty.DynamicNestedSampler(logLjoint1_skew, prior_xform1, ndim=9,\n logl_args=(n1, n2, x),\n nlive=2000,\n bound='multi',\n sample='auto')\n\n dsampler.run_nested()\n dres1 = dsampler.results\n \n with open('sampler_results_model1_'+label, 'wb') as dres1_file:\n pickle.dump(dres1, dres1_file)\n print(\"sampler output saved as pickle file 'sampler_results_model1_\"+label+\"'\")\n \ndef Model1_output(data1,data2,bins_,label,sampler_results='sampler_results_model1_'): \n '''\n sampler_results: path of pickle file where sampler results are saved\n '''\n x,n1,n2,dn1,dn2 = bin_data(data1,data2,bins_,label)\n\n with open(sampler_results+label, 'rb') as dres1_file:\n dres1 = pickle.load(dres1_file)\n \n print(\"plotting corner plots...\")\n labels = ['$s$', r'$\\alpha$', r'$\\mu_\\alpha$', r'$\\sigma_\\alpha$', r'$\\xi_\\alpha$',\n r'$\\beta$', r'$\\mu_\\beta$', r'$\\sigma_\\beta$', r'$\\xi_\\beta$']\n\n fig, axes = dyplot.cornerplot(dres1, smooth=0.03,\n labels=labels,\n show_titles=True,\n quantiles_2d=[1-np.exp(-0.5*r**2) for r in [1.,2.,3]],\n quantiles=[0.16, 0.5, 0.84],\n fig=plt.subplots(9, 9, figsize=(2.5*9,2.6*9)),\n color='#1f77d4')\n\n fig.savefig('corner_model1_'+label+'.png', dpi=100)\n \n mapvals1 = np.zeros(9, dtype=float)\n for i in range(9):\n x16, x50, x84 = dynesty.utils.quantile(dres1.samples[:,i],\n np.asarray([0.16, 0.5, 0.84]))\n mapvals1[i] = x50\n \n print(\"The maximum a posteriori (MAP) values of the parameters: \",mapvals1)\n print(\"Best fit results: \")\n fig, axes = plt.subplots(2,2, figsize=(10,5), sharex=True,\n gridspec_kw={'height_ratios':[3,1], 'hspace':0},\n tight_layout=True)\n\n ax = axes[0,0]\n ep = ax.errorbar(x, n1, yerr=dn1, fmt='.', alpha=0.5)\n ax.plot(x, mixturemodel_skew(mapvals1[1:], x), color=ep[0].get_color(), label='data set 1')\n ax.set(ylabel='count',\n title=r'$\\mathcal{M}_1$ (Skew normal model)')\n ax.grid(ls=':')\n ax.legend(fontsize=10)\n\n ax = axes[1,0]\n ax.errorbar(x, n1 - mixturemodel_skew(mapvals1[1:], x), yerr=dn1, fmt='.')\n ax.grid(ls=':')\n\n ax.set(#xlim=(0, 4),\n xlabel=label,)\n #ylim=(-750,750))\n\n ax = axes[0,1]\n ep = ax.errorbar(x, n2, yerr=dn1, fmt='.', color='#ff7f0e', alpha=0.5);\n ax.plot(x, mapvals1[0]*mixturemodel_skew(mapvals1[1:], x), color=ep[0].get_color(), label='data set 2')\n ax.grid(ls=':')\n ax.set(title='Parameter MAP values')\n ax.legend(fontsize=10)\n\n ax = axes[1,1]\n ax.errorbar(x, n2 - mapvals1[0]*mixturemodel_skew(mapvals1[1:], x), yerr=dn2, fmt='.', color='#ff7f0e')\n ax.grid(ls=':')\n\n ax.set(#xlim=(0, 4),\n xlabel=label,)\n #ylim=(-275,275))\n\n fig.savefig('map_model1_'+label+'.png', dpi=100)\n \n lnZ1 = dres1.logz[-1]\n print(\"Bayesian Evidence for model 1 : \", lnZ1)\n \n return lnZ1\n\n\n\ndef Model_2_fit(bounds2,data1,data2,bins_,label):\n x,n1,n2,dn1,dn2 = bin_data(data1,data2,bins_,label)\n print(\"running minimizer...this might take a few minutes...\")\n bestfit2 = None\n\n for i in range(20):\n p0 = [np.random.uniform(b[0], b[1]) for b in bounds2]\n result = minimize(nlogLjoint2_skew, p0, method='L-BFGS-B', args=(n1, n2, x), bounds=bounds2)\n\n if result.success:\n# print(p0)\n# print(' {:.2f}'.format(result.fun))\n if bestfit2 is None:\n bestfit2 = result\n else:\n if result.fun < bestfit2.fun:\n bestfit2 = result\n print(\"plotting best fit results...\")\n fig, axes = plt.subplots(2,2, figsize=(10,5), sharex=True,\n gridspec_kw={'height_ratios':[3,1], 'hspace':0},\n tight_layout=True)\n\n ax = axes[0,0]\n ep = ax.errorbar(x, n1, yerr=dn1, fmt='.')\n ax.plot(x, mixturemodel_skew(bestfit2.x[:8], x), color=ep[0].get_color(), label='data set 1')\n ax.set(ylabel='count',\n title=r'$\\mathcal{M}_2$ (Skew normal model)')\n ax.grid(ls=':')\n ax.legend(fontsize=10)\n\n ax = axes[1,0]\n ax.errorbar(x, n1 - mixturemodel_skew(bestfit2.x[:8], x), yerr=dn1, fmt='.')\n ax.grid(ls=':')\n\n ax.set(#xlim=(0, 4),\n xlabel=label,)\n #ylim=(-750,750))\n\n ax = axes[0,1]\n ep = ax.errorbar(x, n2, yerr=dn1, fmt='.', color='#ff7f0e');\n ax.plot(x, mixturemodel_skew(bestfit2.x[8:], x), color=ep[0].get_color(), label='data set 2')\n ax.grid(ls=':')\n ax.set(title=r'$-\\ln{{\\mathcal{{L}}_\\mathrm{{max}}}}={{{:.1f}}}$'.format(bestfit2.fun))\n ax.legend(fontsize=10)\n\n ax = axes[1,1]\n ax.errorbar(x, n2 - mixturemodel_skew(bestfit2.x[8:], x), yerr=dn2, fmt='.', color='#ff7f0e')\n ax.grid(ls=':')\n\n ax.set(#xlim=(0, 4),\n xlabel=label,)\n #ylim=(-275,275))\n\n fig.savefig('model2_fit_'+label+'.png', dpi=100)\n \ndef Model_2_sampler(prior_xform2,data1,data2,bins_,label):\n x,n1,n2,dn1,dn2 = bin_data(data1,data2,bins_,label)\n print(\"running the nested sampler... this might take from minutes to hours...\")\n dsampler = dynesty.DynamicNestedSampler(logLjoint2_skew, prior_xform2, ndim=16,\n logl_args=(n1, n2, x),\n nlive=2000,\n bound='multi',\n sample='auto')\n\n dsampler.run_nested()\n dres2 = dsampler.results\n \n with open('sampler_results_model2_'+label, 'wb') as dres2_file:\n pickle.dump(dres2, dres2_file)\n print(\"sampler output saved as pickle file 'sampler_results_model2_\"+label+\"'\")\n\ndef Model2_output(data1,data2,bins_,label,sampler_results='sampler_results_model2_'):\n '''\n sampler_results: path of pickle file where sampler results are saved\n '''\n x,n1,n2,dn1,dn2 = bin_data(data1,data2,bins_,label)\n\n with open(sampler_results+label, 'rb') as dres2_file:\n dres2 = pickle.load(dres2_file)\n \n print(\"plotting corner plots...\")\n labels = [r'$\\alpha$', r'$\\mu_\\alpha$', r'$\\sigma_\\alpha$', r'$\\xi_\\alpha$',\n r'$\\beta$', r'$\\mu_\\beta$', r'$\\sigma_\\beta$', r'$\\xi_\\beta$',\n r'$\\gamma$', r'$\\mu_\\gamma$', r'$\\sigma_\\gamma$', r'$\\xi_\\gamma$',\n r'$\\delta$', r'$\\mu_\\delta$', r'$\\sigma_\\delta$', r'$\\xi_\\delta$']\n\n fig, axes = dyplot.cornerplot(dres2, smooth=0.03,\n labels=labels,\n show_titles=True,\n quantiles_2d=[1-np.exp(-0.5*r**2) for r in [1.,2.,3]],\n quantiles=[0.16, 0.5, 0.84],\n fig=plt.subplots(16, 16, figsize=(2.5*16,2.6*16)),\n color='#1f77d4')\n\n fig.savefig('corner_model2_'+label+'.png', dpi=100)\n \n \n mapvals2 = np.zeros(16, dtype=float)\n for i in range(16):\n x16, x50, x84 = dynesty.utils.quantile(dres2.samples[:,i],\n np.asarray([0.16, 0.5, 0.84]))\n mapvals2[i] = x50\n print(\"The maximum a posteriori (MAP) values of the parameters: \",mapvals2)\n \n print(\"Best fit results: \")\n fig, axes = plt.subplots(2,2, figsize=(10,5), sharex=True,\n gridspec_kw={'height_ratios':[3,1], 'hspace':0},\n tight_layout=True)\n\n ax = axes[0,0]\n ep = ax.errorbar(x, n1, yerr=dn1, fmt='.', alpha=0.5)\n ax.plot(x, mixturemodel_skew(mapvals2[:8], x), color=ep[0].get_color(), label='data set 1')\n ax.set(ylabel='count',\n title=r'$\\mathcal{M}_2$ (Skew normal model)')\n ax.grid(ls=':')\n ax.legend(fontsize=10)\n\n ax = axes[1,0]\n ax.errorbar(x, n1 - mixturemodel_skew(mapvals2[:8], x), yerr=dn1, fmt='.')\n ax.grid(ls=':')\n\n ax.set(#xlim=(0, 4),\n xlabel=label,)\n #ylim=(-750,750))\n\n ax = axes[0,1]\n ep = ax.errorbar(x, n2, yerr=dn1, fmt='.', color='#ff7f0e', alpha=0.5);\n ax.plot(x, mixturemodel_skew(mapvals2[8:], x), color=ep[0].get_color(), label='data set 2')\n ax.grid(ls=':')\n ax.set(title='Parameter MAP values')\n ax.legend(fontsize=10)\n\n ax = axes[1,1]\n ax.errorbar(x, n2 - mixturemodel_skew(mapvals2[8:], x), yerr=dn2, fmt='.', color='#ff7f0e')\n ax.grid(ls=':')\n\n ax.set(#xlim=(0, 4),\n xlabel=label,)\n #ylim=(-275,275))\n\n fig.savefig('map_model2_'+label+'.png', dpi=100)\n \n lnZ2 = dres2.logz[-1]\n print(\"Bayesian Evidence for model 2 : \", lnZ2)\n \n return lnZ2","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":16299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"111690206","text":"#observation 位置信息\r\n\r\nimport road_env_append\r\nimport self_qlearning\r\nimport graph_append\r\nimport time\r\n#car[0] start_point ;car[1] end_point ;car[2] current_position\r\ndef update():\r\n g = graph_append.graph_set().draw()\r\n init_car =[1,16,1,5]\r\n # 大概要训练1600 episde\r\n car=init_car\r\n self_q_table = self_qlearning.QlearningTable()\r\n road_this = road_env_append.road()\r\n for episode in range (3000):\r\n step = 0\r\n while True:\r\n # car[2] current position\r\n action = self_q_table.choose_action(int(car[2]))\r\n\r\n # print('hi',action)\r\n # 这一步是对的\r\n next_positon , reward ,done,current_position = road_this.travel(action,g=g,car=car)\r\n self_q_table.learn(next_position=next_positon,action=action,reward=reward,position=current_position,final_positon=car[1])\r\n #print(position)\r\n #print(next_positon)\r\n #car[2] = int(next_positon)\r\n step+=1\r\n # print(time)\r\n if done:\r\n if (step< 10):\r\n print('close enough')\r\n car = init_car\r\n break\r\n print('game over')\r\n\r\nif __name__ == '__main__':\r\n time1 = time.time()\r\n update()\r\n time2 = time.time()\r\n print(time2 - time1)\r\n","sub_path":"q_learning_run.py","file_name":"q_learning_run.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"445098047","text":"\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\n\nfrom custo_reglin_multi import custo_reglin_multi\nfrom normalizacao import normalizarCaracteristica \n#from normalizacao import normalizar_caracteristica \nfrom gd_reglin_multi import gd\n\n#Carrega dados \ndata = pd.read_csv(\"ex1data2.txt\", header=None)\n\n#acrescenta uma coluna preenchida com 1s\n#data.insert(0, 'Ones', 1)\n\ncols = data.shape[1]\nX = data.iloc[:,0:cols-1]\ny = data.iloc[:,cols-1:cols]\n\n# converte os valores em numpy arrays\nX = np.array(X.values)\ny = np.array(y.values)\n\n#Obtém X e y normalizados\nall_X = normalizarCaracteristica(X)\nall_y = normalizarCaracteristica(y)\nX_norm = all_X[0]\ny_norm = all_y[0]\n\n#X_norm, y_norm, mean_X, std_X, mean_y, std_y = normalizar_caracteristica(X, y)\n\n#adicionar uma coluna de 1s a X para poder multiplicar as arrays X e theta\nX_norm = np.c_[np.ones((X.shape[0],1)), X_norm]\n\n#Definição de parâmetros\ntheta = np.matrix([10, 2, 10])\nalpha = 1\nepochs = 15\n\n# Cálculo do custo (inicial)\nj = custo_reglin_multi(X_norm, y_norm, theta)\nprint(\" \")\nprint(\"Custo inicial, com theta = \",theta,\" : \", j)\nprint(\" \")\n\ngd = gd(X_norm, y_norm, alpha, epochs, theta)\n\nprint(\"Custo final : \", gd[1], \" Theta final : \", gd[0]) \nprint(\" \")\n'''\n\nprint(\"Custo final e valores de theta após \",epochs, \" epochs:\")\nprint(gd)\nprint(\" \")\n'''","sub_path":"T1-2018/Parte2 (copy)/testes_gradiente.py","file_name":"testes_gradiente.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"174768835","text":"# -*- coding: utf-8 -*-\nimport requests\nimport os\nimport xdrlib\nimport sys\nimport codecs\nimport xlrd\nimport json\n\n\ndef open_excel(file='player.xls'):\n try:\n data = xlrd.open_workbook(file)\n return data\n except (Exception, e):\n pass\n# 根据索引获取Excel表格中的数据 参数:file:Excel文件路径 colnameindex:表头列名所在行的所以\n# ,by_index:表的索引\n\n\ndef addToClipBoard(text):\n command = 'echo ' + text.strip() + '| clip'\n os.system(command)\n\navatar_url = 'http://rtmp.icassi.us:8092/img/player/0602/'\n\ndef excel_table_byindex(file='file.xls', num=0):\n data = open_excel(file)\n by_index = 0\n table = data.sheets()[by_index]\n plist = []\n plist_for_lowerthird = []\n v = table.cell(2, 0)\n print(v)\n row = 1\n playerNum = 9\n if num > 0:\n playerNum = num\n playerMap = {}\n avatar_url = table.cell(29, 0).value\n for i in range(0, playerNum):\n n = table.cell(row + i, 0).value\n # hupuID = table.cell(1 + i, 1).value\n h = int(table.cell(row + i, 1).value)\n w = int(table.cell(row + i, 2).value)\n a = int(table.cell(row + i, 3).value)\n print(n, 'p' + str(i + 1))\n # t = ''\n title = table.cell(row + i, 4).value.replace(',', '\\n').replace(' ',\n '\\n').replace(',', '\\n').replace('\\t', '').replace('、', '\\n')\n # info = ''\n\n info = table.cell(row + i, 5).value\n plist.append({'name': n, 'hwa': [\n h, w, a], 'title': title, 'playerId': 'p' + str(i + 1), 'info': info\n })\n playerMap['p' + str(i + 1)] = plist[-1:][0]\n plist_for_lowerthird.append(n)\n playerMap['p0'] = {\"name\": \"\", \"hwa\": [0, 0, 0],\n \"title\": \"\", \"playerId\": \"p0\", \"info\": \"\"}\n jstr = json.dumps(playerMap, ensure_ascii=False)\n addToClipBoard(jstr)\n print(jstr)\n # print(plist_for_lowerthird)\n return playerMap\n\n\ndef uploadTo8090():\n player_url = 'http://rtmp.icassi.us:8090/player2/'\n res = requests.get(player_url)\n player_arr = res.json()\n print(player_arr)\n return player_arr\n\n\n\n\ndef main():\n\n if len(sys.argv) > 1:\n num = int(sys.argv[1])\n print('num', num)\n\n playerMap = excel_table_byindex('player.xlsx', num=num)\n player_arr = uploadTo8090()\n putUrl = 'http://rtmp.icassi.us:8090/player2/'\n for p in player_arr:\n if p['player_id'] in playerMap:\n pdata = playerMap[p['player_id']]\n p2 = {}\n p2['name'] = pdata['name']\n p2['height'] = pdata['hwa'][0]\n p2['weight'] = pdata['hwa'][1]\n p2['age'] = pdata['hwa'][2]\n p2['title'] = pdata['title']\n p2['info'] = pdata['info']\n p2['avatar'] = avatar_url+p['player_id']+'.png'\n put1_res = requests.put(putUrl + p['_id'], data=p2)\n if put1_res.status_code==200:\n print(p['name'], put1_res.status_code)\n else:\n print(p['name'], put1_res.text)\n # for row in tables:\n # print(row)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"utils/playerGen.py","file_name":"playerGen.py","file_ext":"py","file_size_in_byte":3211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"137609903","text":"'''\nThis is configuration file for our project\n'''\n\nUSE_STDIO_FOR_LOGGING = True\nENABLE_DEBUG_MESSAGE = True\nENABLE_INFO_MESSAGE = True\nENABLE_WARNING_MESSAGE = True\nROGUE_LIKE_FRAMEWORK = \"TCOD\"\nMODULES_DIR_PREFIX=\"src\\\\pyrl\"\nTCOD_FONT = \"data\\\\font.bmp\"\nTCOD_SCREEN_WIDTH = 60\nTCOD_SCREEN_HEIGHT = 30\nTCOD_LIMIT_FPS = 30\nSCHEDULER_TCOD_KEY_EVENTS = True\nSCHEDULER_TCOD_MOUSE_EVENTS = False\n \n\n","sub_path":"src/pyrl/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"164655974","text":"import time\nfrom collections import defaultdict\n\n\ndef sleep(t):\n def inner(station, namespace):\n time.sleep(t)\n return {}\n return inner\n\n\nclass DictMerge:\n def __init__(self, **strategy):\n self._strategy = DictMerge._add_default(strategy)\n\n @staticmethod\n def _add_default(dictionary):\n result = defaultdict(lambda: \"dict_merge\")\n for k, v in dictionary.items():\n result[k] = v\n\n return result\n\n @staticmethod\n def _make_list(v):\n if isinstance(v, list):\n return list(v) # Return a copy!\n return list([v])\n\n def _merge_two(self, d1, d2):\n result = dict(d1)\n for k, v in d2.items():\n if k in result:\n\n strategy = self._strategy[k]\n\n m = None\n if strategy == \"append\":\n m = self._make_list(result[k])\n m.extend(self._make_list(v))\n elif strategy == \"replace\":\n m = v\n elif strategy == \"dict_merge\":\n m = self._merge_two(v, result[k])\n\n result[k] = m\n else:\n result[k] = v\n\n return result\n\n def merge(self, dicts):\n result = dict(dicts[0])\n for d in dicts[1:]:\n result = self._merge_two(result, dict(d))\n return result\n\n\n","sub_path":"pysweep/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"258672365","text":"# To run single test module:\n# >>> python -m unittest test.test_some_module\n\nimport os\nimport unittest\nfrom unittest import mock\n\nfrom steamCLI.config import Config\n\n\nclass ConfigTests(unittest.TestCase):\n @mock.patch('steamCLI.config.os.path.isfile')\n def test_should_create_config_given_valid_path(self, mocked_isfile):\n test_path = 'valid_path.ini'\n mocked_isfile.return_value = True\n\n config = Config(test_path)\n\n self.assertTrue(config)\n\n @mock.patch('steamCLI.config.os.path.isfile')\n def test_should_aggregate_multiple_config_arguments_into_path(self, mocked_isfile):\n test_path = 'deepfolder/folder/config.ini'\n mocked_isfile.return_value = True\n arguments = test_path.split('/')\n\n config = Config(arguments[0], arguments[1], arguments[2])\n\n self.assertTrue(config)\n\n @mock.patch('steamCLI.config.os.path.isfile')\n def test_should_support_different_root(self, mocked_isfile):\n test_path = '/rooty/root'\n mocked_isfile.return_value = True\n\n config = Config(package_folder=test_path)\n\n self.assertTrue(config)\n self.assertIn(mock.call(test_path), mocked_isfile.call_args_list)\n\n @mock.patch('steamCLI.config.os.path.isfile')\n def test_should_throw_no_file_exception(self, mocked_isfile):\n mocked_isfile.return_value = False\n with self.assertRaises(FileNotFoundError):\n Config('nonexistent.ini')\n\n def test_config_should_return_correct_real_values(self):\n \"\"\" Note: test depends on external file. Hence, it is very brittle. \"\"\"\n\n dir_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'steamCLI')\n\n config = Config(dir_path, 'resources.ini')\n\n self.assertEqual(\n config.get_value('SteamAPIs', 'applist'),\n 'http://api.steampowered.com/ISteamApps/GetAppList/v0002/'\n )\n","sub_path":"test/test_config.py","file_name":"test_config.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"296039186","text":"# Search and persist all departments and their subdepartments.\n#\n# Approximate # of API requests required: 1\n\nfrom local.utils import default_logger\nfrom walmart.apiuser import find_depts\n\n\nlogger = default_logger('pipeline.1_insert_dept')\nn_persisted = 0\nfor dept in find_depts():\n persisted = dept.persist()\n if persisted:\n n_persisted += 1\nlogger.info('Persisted %s departments' % n_persisted)\n","sub_path":"src/pipeline/1_insert_dept.py","file_name":"1_insert_dept.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"156555849","text":"# Copyright (c) 2012 LE GOFF Vincent\n# All rights reserved.\n# \n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# \n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n# * Neither the name of the copyright holder nor the names of its contributors\n# may be used to endorse or promote products derived from this software\n# without specific prior written permission.\n# \n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT\n# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\n\n\"\"\"This module contains the AboardDispatcher class, defined below.\"\"\"\n\nimport cherrypy\nDispatcher = cherrypy.dispatch.Dispatcher\n\nfrom ext.aboard.router.route import Route, ALL_METHODS\n\nclass AboardDispatcher:\n \n \"\"\"Cherrypy dispatcher for Python Aboard.\n \n The dispatcher contains the routing process. The routes, defined\n in the ./route.py file, are added to this dispatcher and are used to\n redirect an HTTP request to a controller.\n \n The controllers are callables (usually instance methods) that are\n configured for a matching URI.\n \n Arguments given to this callable are:\n Positional: when the route expects resource identifiers [1]\n Keyword: the arguments given to a GET, POST or PUT method\n \n [1] Take for example the URI:\n images/1\n Here, the 'images' part won't change. Though, the '1' part\n usually expects a integer. This route will accept URIs like:\n images/5\n images/32\n ...\n \n \"\"\"\n \n def __init__(self):\n \"\"\"Construct the dispatcher for Python Aboard.\n \n Note that the translator used on the default dispatcher is\n not used here.\n \n \"\"\"\n Dispatcher.__init__(self, translate={})\n self.routes = {}\n \n def __call__(self, path):\n \"\"\"Look for a matching route to 'path'.\"\"\"\n if path.endswith(\"/\"):\n path = path[:-1]\n \n return Dispatcher.__call__(self, path)\n \n def find_handler(self, path):\n \"\"\"Return the appropriate page handler, plus any virtual path.\"\"\"\n request = cherrypy.serving.request\n request.config = {\n \"tools.encode.on\": True,\n }\n request.is_index = False\n \n # Get the path without taking in account the format\n format = path.split(\".\")[-1].lower()\n if len(format) < len(path):\n path = path[:-(len(format) + 1)]\n else:\n format = \"\"\n \n for route in self.routes.values():\n match = route.match(request, path)\n if not isinstance(match, bool):\n return route.callable, match\n \n return None, []\n \n def add_route(self, name, pattern, controller, callable,\n methods=ALL_METHODS):\n \"\"\"Add a route.\"\"\"\n route = Route(pattern, controller, callable, methods)\n self.routes[name] = route\n return route\n","sub_path":"ext/aboard/router/dispatcher.py","file_name":"dispatcher.py","file_ext":"py","file_size_in_byte":3995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"360712789","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport random\n\n\nclass traveling_salesman():\n def __init__(self, num_cities=50):\n # initialize the position of the cities\n random.seed(42)\n gridsize = 100\n self.num_cities = num_cities\n self.cities = [random.sample(range(gridsize), 2) for x in range(self.num_cities)]\n\n def random_tour(self, num_stops=6):\n # take a random tour of a given length\n self.num_stops = num_stops\n self.tour = random.sample(range(self.num_cities), self.num_stops)\n return self.tour\n\n def new_tour(self, tour):\n # define a new tour\n tour = [int(_) for _ in tour]\n self.num_stops = len(tour)\n self.tour = tour\n\n def tour_length(self):\n # calculate tour length\n visited_cities = [self.cities[_] for _ in self.tour]\n for city in visited_cities:\n if city == visited_cities[0]:\n # start tour at the start\n traveled_distance = 0\n location = city\n else:\n # each travelled leg\n traveled_distance += np.sqrt((location[0] - city[0])**2 + (location[1] - city[1])**2)\n location = city\n # return to start\n traveled_distance += np.sqrt((visited_cities[0][0] - city[0])**2 + (visited_cities[0][1] - city[1])**2)\n return traveled_distance\n\n def plot(self, tour=None):\n # plot the tour\n if tour is not None:\n self.tour = tour\n plt.plot([self.cities[_][0] for _ in range(self.num_cities)],\n [self.cities[_][1] for _ in range(self.num_cities)],\n linestyle='',\n marker='*',\n markersize=11.0,\n color=[1, 0, 0])\n plt.plot([self.cities[self.tour[_ % self.num_stops]][0] for _ in range(self.num_cities)],\n [self.cities[self.tour[_ % self.num_stops]][1] for _ in range(self.num_cities)],\n 'xb-')\n plt.show()\n","sub_path":"2_GA_&_DE/NP_hard.py","file_name":"NP_hard.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"207219531","text":"#!/usr/bin/env python3\n\nimport os\nimport os.path\nimport platform\nimport socket\nimport sys\nimport subprocess\n\nPLATFORM = platform.system()\nIS_MAC = PLATFORM == 'Darwin'\nIS_LINUX = PLATFORM == 'Linux'\nIS_WINDOWS = PLATFORM == 'Windows'\n\nhost_name = socket.gethostname()\nIS_WORK_LAPTOP = host_name in ['C02RT09FG8WL-mackduan', 'C02FL7URMD6M-mackduan']\n\nDIR_PATH = os.path.dirname(os.path.realpath(__file__))\n\ndef call_shell(command):\n return subprocess.call(command, shell=True)\n\ndef command_exists(command):\n return not call_shell('which {} > /dev/null'.format(command))\n\ndef path_exists(p):\n return os.path.exists(os.path.expanduser(p))\n\n# region installations\n\ndef install_ag():\n if command_exists('ag'):\n return\n\n if IS_LINUX:\n call_shell('sudo apt-get install silversearcher-ag')\n elif IS_MAC:\n call_shell('brew install the_silver_searcher')\n\ndef install_fzf():\n if command_exists('fzf'):\n return\n\n if IS_LINUX:\n call_shell(\n 'rm -rf ~/.fzf'\n ' && git clone --depth 1 https://github.com/junegunn/fzf.git ~/.fzf'\n ' && ~/.fzf/install'\n )\n elif IS_MAC:\n call_shell('brew install fzf')\n call_shell('$(brew --prefix)/opt/fzf/install')\n\ndef install_google_cloud_sdk():\n if command_exists('gcloud'):\n return\n\n if not path_exists('~/google-cloud-sdk'):\n call_shell(\n 'curl -o /tmp/google-cloud-sdk.tar.gz https://dl.google.com/dl/cloudsdk/release/google-cloud-sdk.tar.gz'\n ' && tar -xzf /tmp/google-cloud-sdk.tar.gz --directory ~/'\n )\n\n call_shell('~/google-cloud-sdk/install.sh')\n print(\"Remember to run 'gcloud auth login'\")\n\ndef install_tmux():\n if command_exists('tmux'):\n return\n\n if IS_LINUX:\n call_shell('sudo apt-get install tmux')\n elif IS_MAC:\n call_shell('brew install tmux')\n\ndef install_zsh():\n if command_exists('zsh'):\n return\n\n if IS_LINUX:\n call_shell('sudo apt-get install zsh')\n elif IS_MAC:\n call_shell('brew install zsh')\n\ndef install_exa():\n if command_exists('exa'):\n return\n\n if IS_LINUX:\n call_shell('sudo apt-get install exa')\n elif IS_MAC:\n call_shell('brew install exa')\n\n\ndef install_exa():\n if command_exists('exa'):\n return\n\n if IS_LINUX:\n call_shell('sudo apt-get install exa')\n elif IS_MAC:\n call_shell('brew install exa')\n\ndef install_jq():\n if command_exists('jq'):\n return\n\n if IS_LINUX:\n call_shell('sudo apt-get install jq')\n elif IS_MAC:\n call_shell('brew install jq')\n\ndef install_tldr():\n if command_exists('tldr'):\n return\n\n if IS_LINUX:\n call_shell('sudo apt-get install tldr')\n elif IS_MAC:\n call_shell('brew install tldr')\n\n# endregion\n\n# region mac installations\n\ndef install_homebrew():\n if command_exists('brew'):\n return\n\n call_shell('/usr/bin/ruby -e \"$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)\"')\n\ndef install_sleep_watcher():\n if not call_shell('brew ls | grep sleepwatcher'):\n return\n\n call_shell('brew install sleepwatcher')\n call_shell('brew services start sleepwatcher')\n print('Installed SleepWatcher. Remember to enable permissions in Security & Privacy')\n\ndef install_blueutil():\n if command_exists('blueutil'):\n return\n\n call_shell('brew install blueutil')\n\ndef install_docker():\n if path_exists('/Applications/Docker.app'):\n return\n\n call_shell('brew install --cask docker')\n\n# endregion\n\ndef script():\n additional_instructions = []\n\n if os.environ['USER'] == 'root':\n print('Do not run this script as root')\n sys.exit(1)\n\n response = input(\n 'This script overwrite dotfiles in your home directory.'\n ' Continue? [y/N]? ',\n )\n if response != 'y':\n sys.exit(1)\n\n if IS_LINUX:\n call_shell('sudo apt-get update')\n if IS_MAC:\n install_homebrew()\n call_shell('brew doctor')\n install_sleep_watcher()\n install_blueutil()\n\n # TODO: install `fd` (`find` replacement)\n # TODO: consider replacing `ag` with `rg` (ripgrep)\n # TODO: hook up `fzf` to use `fd` + `rg`\n install_ag()\n install_exa()\n install_jq()\n install_tldr()\n install_fzf()\n install_tmux()\n install_zsh()\n\n call_shell('sudo chsh -s $(which zsh) $USER')\n\n DOTFILES = [\n '.agignore',\n '.gitconfig',\n '.gitignore_global',\n '.gvimrc',\n '.ideavimrc',\n '.tmux.conf',\n '.vimrc',\n '.zshrc',\n '.p10k.zsh',\n\n # region Mac OS specific\n # TODO(mack): Limit copying these files to just Mac OS systems\n\n # For SleepWatcher on Mac OS\n '.sleep',\n '.wakeup',\n 'sleepwatch_helper.py',\n\n # endregion\n ]\n\n for dotfile in DOTFILES:\n call_shell(\n 'ln -sf {dir_path}/{dotfile} ~/{dotfile}'.format(\n dir_path=DIR_PATH, dotfile=dotfile,\n )\n )\n\n call_shell('mkdir -p ~/bin')\n call_shell('ln -sf {dir_path}/tmux_renum.sh ~/bin/tmux_renum.sh'.format(dir_path=DIR_PATH))\n\n call_shell('rm -rf ~/.vim/ && mkdir -p ~/.vim/bundle')\n vundle_path = '~/.vim/bundle/Vundle.vim'\n call_shell(\n 'git clone https://github.com/VundleVim/Vundle.vim {vundle_path}'\n ' && vim +PluginInstall +qall'.format(vundle_path=vundle_path)\n )\n call_shell('ln -sf {dir_path}/.vim/ftplugin ~/.vim/ftplugin'.format(dir_path=DIR_PATH))\n\n call_shell(\n 'rm -rf ~/.oh-my-zsh'\n ' && git clone git://github.com/robbyrussell/oh-my-zsh.git ~/.oh-my-zsh'\n )\n\n call_shell(\n 'rm -rf ~/.tmux/plugins'\n ' && git clone https://github.com/tmux-plugins/tpm ~/.tmux/plugins/tpm'\n )\n\n install_google_cloud_sdk()\n if IS_WORK_LAPTOP:\n install_docker()\n print(\"Remember to 'gcloud auth configure-docker'\")\n\n # TODO: automate the following instruction\n print('\\nAdditional instructions:')\n additional_instructions.append('See https://www.notion.so/mduan/New-MacBook-laptop-dotfiles-setup-dd717f24c8164277b67e0a505d218d25 for additional manual instructions')\n for instruction in additional_instructions:\n print('- {}'.format(instruction))\n\n print('\\nLog out and back in to start using Zsh')\n\nif __name__ == '__main__':\n script()\nelse:\n print('Cannot be imported')\n sys.exit(1)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":6473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"7226256","text":"\"\"\"\nTests for numba.targets.codegen.\n\"\"\"\n\nfrom __future__ import print_function\n\nimport base64\nimport ctypes\nimport pickle\nimport subprocess\nimport sys\n\nimport llvmlite.binding as ll\n\nimport numba.unittest_support as unittest\nfrom numba import utils\nfrom numba.targets.codegen import JITCPUCodegen\nfrom .support import TestCase\n\n\nasm_sum = r\"\"\"\n define i32 @sum(i32 %.1, i32 %.2) {\n %.3 = add i32 %.1, %.2\n ret i32 %.3\n }\n \"\"\"\n\nctypes_sum_ty = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_int, ctypes.c_int)\n\n\nclass JITCPUCodegenTestCase(TestCase):\n \"\"\"\n Test the JIT code generation.\n \"\"\"\n\n def setUp(self):\n self.codegen = JITCPUCodegen('test_codegen')\n\n def compile_module(self, asm):\n ll_module = ll.parse_assembly(asm)\n ll_module.verify()\n library = self.codegen.create_library('compiled_module')\n library.add_llvm_module(ll_module)\n return library\n\n @classmethod\n def _check_unserialize_sum(cls, state):\n codegen = JITCPUCodegen('other_codegen')\n library = codegen.unserialize_library(state)\n ptr = library.get_pointer_to_function(\"sum\")\n cfunc = ctypes_sum_ty(ptr)\n res = cfunc(2, 3)\n assert res == 5, res\n\n def test_get_pointer_to_function(self):\n library = self.compile_module(asm_sum)\n ptr = library.get_pointer_to_function(\"sum\")\n self.assertIsInstance(ptr, utils.integer_types)\n cfunc = ctypes_sum_ty(ptr)\n self.assertEqual(cfunc(2, 3), 5)\n\n def test_serialize_unserialize(self):\n library = self.compile_module(asm_sum)\n state = library.serialize()\n self._check_unserialize_sum(state)\n\n def test_unserialize_other_process(self):\n library = self.compile_module(asm_sum)\n state = library.serialize()\n arg = base64.b64encode(pickle.dumps(state, -1))\n code = \"\"\"if 1:\n import base64\n import pickle\n import sys\n from numba.tests.test_codegen import %(test_class)s\n\n state = pickle.loads(base64.b64decode(sys.argv[1]))\n %(test_class)s._check_unserialize_sum(state)\n \"\"\" % dict(test_class=self.__class__.__name__)\n subprocess.check_call([sys.executable, '-c', code, arg.decode()])\n\n def test_magic_tuple(self):\n tup = self.codegen.magic_tuple()\n pickle.dumps(tup)\n cg2 = JITCPUCodegen('xxx')\n self.assertEqual(cg2.magic_tuple(), tup)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"numba/tests/test_codegen.py","file_name":"test_codegen.py","file_ext":"py","file_size_in_byte":2515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"264826302","text":"import json\n\nfile = 'number.json'\n\ntry:\n with open(file) as obj:\n content = json.load(obj)\n\nexcept FileNotFoundError:\n number = input(\"Какое у вас любимое число? \")\n with open(file, 'w') as obj:\n json.dump(number, obj)\n\nelse:\n stirng = \"Я знаю ваше любимое число! Это \" + content\n print(stirng)","sub_path":"10/10-12.py","file_name":"10-12.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"43287463","text":"import util.functions as functions\nfunctions.clear_console()\n\nTAMANHO_VETOR = 10\n\nvetor = functions.monta_vetor_int(TAMANHO_VETOR)\n\nuser_input = int(input('insira um número: '))\n\nfor item in vetor:\n if item % user_input == 0:\n print(f'{item} é multiplo de {user_input}')","sub_path":"programação em python do básico ao avançado/exercicios/s07_p1_ex18.py","file_name":"s07_p1_ex18.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"399009580","text":"#!/usr/bin/env python\nimport numpy as np\n\nfrom pyquaternion import Quaternion\nimport rospy\n\nfrom geometry_msgs.msg import Pose, PoseArray, PoseStamped\nfrom visualization_msgs.msg import Marker, MarkerArray\n\n\npublisher_position_boat_NED = rospy.Publisher(\"/mavros/local_position/pose_NED\", PoseStamped, queue_size=1)\n\nrate = None\nrviz = True\nif rviz:\n publisher_marker = rospy.Publisher('/pose_boat_NED', Marker, queue_size=1)\n\nqx_180 = Quaternion(axis=[1, 0, 0], angle=np.pi)\nqz_90p = Quaternion(axis=[0, 0, 1], angle=np.pi / 2)\n\n\ndef callback(msg):\n \"\"\"\"\"\"\n global rate\n # msg=PoseStamped()# SPAETER ENTFERNEN@@@@@@@@@@@@@@@@@@@@@@@@@@@\n\n # msg.pose.position.x\n # print(p.shape)\n pos_boat_x = msg.pose.position.y\n pos_boat_y = msg.pose.position.x\n pos_boat_z = -msg.pose.position.z\n # z 90 erst dann x 180\n tmpquat = Quaternion(w=msg.pose.orientation.w,\n x=msg.pose.orientation.x,\n y=msg.pose.orientation.y,\n z=msg.pose.orientation.z)\n\n tmpquat = qz_90p * qx_180 * tmpquat * qx_180\n NED = PoseStamped()\n NED.header = msg.header\n NED.header.frame_id = 'global_tank'\n NED.pose.position.x = pos_boat_x\n NED.pose.position.y = pos_boat_y\n NED.pose.position.z = pos_boat_z\n NED.pose.orientation.w = tmpquat.w\n NED.pose.orientation.x = tmpquat.x\n NED.pose.orientation.y = tmpquat.y\n NED.pose.orientation.z = tmpquat.z\n publisher_position_boat_NED.publish(NED)\n\n if rviz:\n marker = Marker()\n marker.header.frame_id = \"global_tank\"\n marker.id = 0\n marker.type = marker.MESH_RESOURCE\n marker.action = marker.ADD\n marker.scale.x = 0.8\n marker.scale.y = 0.8\n marker.scale.z = 0.8\n marker.color.r = 1\n marker.color.a = 1 # transparency\n marker.pose.orientation.w = tmpquat.w\n marker.pose.orientation.x = tmpquat.x\n marker.pose.orientation.y = tmpquat.y\n marker.pose.orientation.z = tmpquat.z\n marker.pose.position.x = pos_boat_x # x\n marker.pose.position.y = pos_boat_y # y\n marker.pose.position.z = pos_boat_z # z\n marker.mesh_resource = \"package://hippocampus_tools_ros/models/uuv_hippocampus.stl\"\n publisher_marker.publish(marker)\n\n\ndef main():\n rospy.init_node('enu_to_ned')\n global rate\n rate = rospy.Rate(30)\n\n rospy.Subscriber(\"/mavros/local_position/pose\", PoseStamped, callback, queue_size=1)\n\n rospy.spin()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"hippocampus_tools_ros/scripts/mavros_local_pos_ENU_to_NED.py","file_name":"mavros_local_pos_ENU_to_NED.py","file_ext":"py","file_size_in_byte":2541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"604105110","text":"# -*- coding: utf-8 -*-\n\nimport os\n\nfrom django.test import TestCase\n\nimport oscar.apps\n\n\nclass TestMigrations(TestCase):\n\n def check_for_auth_model(self, filepath):\n with open(filepath) as f:\n s = f.read()\n return 'auth.User' in s or 'auth.user' in s\n\n def test_dont_contain_hardcoded_user_model(self):\n root_path = os.path.dirname(oscar.apps.__file__)\n matches = []\n for dir, __, migrations in os.walk(root_path):\n if dir.endswith('migrations'):\n paths = [os.path.join(dir, migration) for migration in migrations\n if migration.endswith('.py')]\n matches += filter(self.check_for_auth_model, paths)\n\n if matches:\n pretty_matches = '\\n'.join(\n [match.replace(root_path, '') for match in matches])\n self.fail('References to hardcoded User model found in the '\n 'following migration(s):\\n' + pretty_matches)\n","sub_path":"tests/unit/core/migrations_tests.py","file_name":"migrations_tests.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"128166667","text":"# -*- coding: utf8 -*-\n\n# Copyright 2012 Vincent Jacques \n\nimport datetime\nimport collections\n\nimport RawAllocine\n\nTheater = collections.namedtuple( \"Theater\", [ \"id\", \"name\", \"url\" ] )\n\nMovie = collections.namedtuple( \"Movie\", [ \"id\", \"title\", \"url\" ] )\n\nclass MovieVariant:\n def __init__( self, movie, originalVersion, language, threeDimensions ):\n self.movie = movie\n self.originalVersion = originalVersion\n self.language = language\n self.threeDimensions = threeDimensions\n\nclass MovieShow:\n def __init__( self, theater, movieVariant, showTime, movieTime, endTime ):\n self.theater = theater\n self.movieVariant = movieVariant\n self.showTime = showTime\n self.movieTime = movieTime\n self.endTime = endTime\n\nclass Repository:\n def __init__( self ):\n self.__movies = {}\n self.__variants = {}\n\n def Theater( self, identifier, name ):\n url = \"http://www.allocine.fr/seance/salle_gen_csalle=\" + identifier + \".html\" ### @todo Get URL from allocine's response\n return Theater( identifier, name, url )\n\n def Movie( self, identifier, title ):\n if identifier not in self.__movies:\n url = \"http://www.allocine.fr/film/fichefilm_gen_cfilm=\" + str( identifier ) + \".html\" ### @todo Get URL from allocine's response\n self.__movies[ identifier ] = Movie( identifier, title, url )\n return self.__movies[ identifier ]\n\n def MovieVariant( self, movie, originalVersion, language, threeDimension ):\n if movie not in self.__variants:\n self.__variants[ movie ] = {}\n if originalVersion not in self.__variants[ movie ]:\n self.__variants[ movie ][ originalVersion ] = {}\n if threeDimension not in self.__variants[ movie ][ originalVersion ]:\n self.__variants[ movie ][ originalVersion ][ threeDimension ] = MovieVariant( movie, originalVersion, language, threeDimension )\n return self.__variants[ movie ][ originalVersion ][ threeDimension ]\n\nclass Allocine:\n __baseUrl = \"http://api.allocine.fr/rest/v3\"\n\n def __init__( self ):\n self.__raw = RawAllocine.RawAllocine()\n self.__repository = Repository()\n\n def searchTheaters( self, query ):\n theaters = []\n if query != \"\":\n feed = self.__raw.searchTheaters( query )[ \"feed\" ]\n if \"theater\" in feed:\n for theater in feed[ \"theater\" ]:\n if \"code\" in theater and \"name\" in theater:\n theaters.append( self.__repository.Theater( identifier = str( theater[ \"code\" ] ), name = theater[ \"name\" ] ) )\n return sorted( theaters, key = lambda theater: theater.name )\n\n def getMovieShows( self, theaterIdentifier ):\n movieShows = []\n\n feed = self.__raw.getMovieShows( theaterIdentifier )[ \"feed\" ]\n\n if \"theaterShowtimes\" in feed and len( feed[ \"theaterShowtimes\" ] ) > 0:\n theaterShowtime = feed[ \"theaterShowtimes\" ][ 0 ]\n \n if \"place\" in theaterShowtime and \"theater\" in theaterShowtime[ \"place\" ] and \"code\" in theaterShowtime[ \"place\" ][ \"theater\" ] and \"name\" in theaterShowtime[ \"place\" ][ \"theater\" ] and \"movieShowtimes\" in theaterShowtime:\n theater = self.__repository.Theater( identifier = str( theaterShowtime[ \"place\" ][ \"theater\" ][ \"code\" ] ), name = theaterShowtime[ \"place\" ][ \"theater\" ][ \"name\" ] )\n\n for movieShowtime in theaterShowtime[ \"movieShowtimes\" ]:\n movie = self.__repository.Movie( identifier = str( movieShowtime[ \"onShow\" ][ \"movie\" ][ \"code\" ] ), title = movieShowtime[ \"onShow\" ][ \"movie\" ][ \"title\" ] )\n movieVariant = self.__repository.MovieVariant(\n movie = movie,\n originalVersion = movieShowtime[ \"version\" ][ \"original\" ] == \"true\",\n language = movieShowtime[ \"version\" ][ \"$\" ],\n threeDimension = \"screenFormat\" in movieShowtime and \"$\" in movieShowtime[ \"screenFormat\" ] and movieShowtime[ \"screenFormat\" ][ \"$\" ] == \"3D\"\n )\n for screening in movieShowtime[ \"scr\" ]:\n screeningYear, screnningMonth, screeningDay = [ int( s ) for s in screening[ \"d\" ].split( \"-\" ) ]\n for screeningTime in screening[ \"t\" ]:\n screeningHour, screeningMinute = [ int( s ) for s in screeningTime[ \"$\" ].split( \":\" ) ]\n showTime = datetime.datetime( screeningYear, screnningMonth, screeningDay, screeningHour, screeningMinute)\n movieTime = showTime + datetime.timedelta( minutes = int( screeningTime[ \"p\" ] ) )\n if \"runtime\" in movieShowtime[ \"onShow\" ][ \"movie\" ]:\n endTime = movieTime + datetime.timedelta( seconds = int( movieShowtime[ \"onShow\" ][ \"movie\" ][ \"runtime\" ] ) )\n else:\n endTime = movieTime + datetime.timedelta( seconds = 1.5 * 3600 )\n movieShow = MovieShow( theater = theater, movieVariant = movieVariant, showTime = showTime, movieTime = movieTime, endTime = endTime )\n movieShows.append( movieShow )\n\n return movieShows\n","sub_path":"cine_planning/Allocine.py","file_name":"Allocine.py","file_ext":"py","file_size_in_byte":5388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"226089988","text":"import config\nimport csv\nimport time\ntimestr = time.strftime(\" %Y%m%d-%H%M%S\")\ndb = config.db\ncount = 0\noriginal = [] #original list before termRule change\nduplicate = [] #list after termRule change\nwith open('original.csv', 'r') as csvfile1: #read csv file before termRule change\n reader1 = csv.DictReader(csvfile1)\n for orig in reader1:\n original.append((orig[\"termId\"], orig[\"entityId\"], orig[\"FY\"] , orig[\"FQ\"], orig[\"termName\"], orig[\"rank\"], orig[\"expression\"], orig[\"elementName\"], orig[\"value\"]))\nwith open('dup.csv', 'r') as csvfile2: #read csv file after termRule change\n reader2 = csv.DictReader(csvfile2)\n for dup in reader2:\n duplicate.append(( dup[\"termId\"],dup[\"entityId\"], dup[\"FY\"],dup[\"FQ\"], dup[\"termName\"], dup[\"rank\"], dup[\"expression\"], dup[\"elementName\"],dup[\"value\"]))\nfilename1 = \"issue8\" + \"output_files_changed\" + timestr + \".csv\" #output csv\nwith open(filename1, 'w') as csvfile1:\n fieldnames = [\"termId\",\"entityId\",\"FY\",\"FQ\",\"termName\",\"rank\",\"expression\",\"elementName\",\"value\"]\n writer = csv.DictWriter(csvfile1, fieldnames=fieldnames, delimiter=',', lineterminator='\\n')\n writer.writeheader()\n changed_items = list(set(duplicate) - set(original))# difference between secNormalizedFacts dict and TermResults dict #comparison\n for rows in changed_items:\n print(rows)\n datarow = {\"termId\": rows[0], \"entityId\": rows[1], \"FY\": rows[2], \"FQ\": rows[3],\"termName\": rows[4], \"rank\": rows[5], \"expression\": rows[6],\"elementName\": rows[7], \"value\": rows[8]}\n writer.writerow(datarow)","sub_path":"InternTestData/Test Scripts/Data Verification/Test-Scripts/issue8_comparison_of_termResults.py","file_name":"issue8_comparison_of_termResults.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"31840472","text":"'''\nprompt:\n\tfind longest substring that has no duplicate \n\tcharacters\n\nstrategy:\n\n\ttraverse, at each character, store in a hash table\n\tthe last index at which the character was encountered\n\tand make a running entry for longest substring\n\n\tif a letter in the substring was encountered prior to the\n\tstart index, update the hash entry but continue\n\n\tonce a duplicate is encountered, recalc start index for new substing\n\tfind the largest of the following: current start index of substring\n\tand position of the last index of the encountered duplicate + 1\n\n\tthen test if current substring is longer than longest substring\n'''\n\ndef longestSubstringWithoutDuplication(string):\n\n\tstartIndex = 0\n\t# longest substring is an array with a start and end index\n\tlongest = [0,0]\n\tcache = {}\n\t# one for loop-\n\tfor i, char in enumerate(string):\n\t\t# if we saw the character, recalc start index\n\t\tif char in cache:\n\t\t\tstartIndex = max(startIndex, cache[char] + 1)\n\t\t# if current subtring is longer, update the longest substring\n\t\tif longest[1] - longest[0] < i - startIndex:\n\t\t\tlongest = [startIndex, i]\n\t\t# store the letter in the hashtable\n\t\tcache[char] = i\n\t# return the string\n\treturn string[longest[0]:longest[i]]\n","sub_path":"longestSubstringWODupe.py","file_name":"longestSubstringWODupe.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"429050900","text":"# Runtime: 16 ms, faster than 96.34% of Python online submissions for Rotate List.\n# Memory Usage: 12 MB, less than 9.52% of Python online submissions for Rotate List.\n\n# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def rotateRight(self, head, k):\n \"\"\"\n :type head: ListNode\n :type k: int\n :rtype: ListNode\n \"\"\"\n \n if head == None:\n return None\n \n actualHead = head\n \n n = 0\n last = head\n \n while True:\n n += 1\n if last.next == None:\n break\n else:\n last = last.next\n \n r = k%n\n first = head\n \n for i in range(n-r-1):\n first = first.next\n \n last.next = actualHead\n newHead = first.next\n first.next = None\n \n return newHead","sub_path":"Leetcode/LinkedLists/Medium/61_rotate_list.py","file_name":"61_rotate_list.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"365694911","text":"from joblib import load\nimport pandas as pd\nimport re\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\nfrom datetime import timedelta\nimport pvlib as pv\nimport math\n\ncsv_array = pd.read_csv('noaa\\\\noaa.csv').as_matrix()\n\nordered_hourly = []\nhourly_forecast = []\nhour_forecast = []\n\nhours_per_forecast = 168\n\n#2019-02-26T15:00:00-08:00\n\nfor i in range(0,len(csv_array)-1):\n if((i+1) % hours_per_forecast == 0):\n ordered_hourly.append(np.array(hourly_forecast))\n hourly_forecast = []\n\n hour_forecast = csv_array[i]\n hour_forecast = np.append(hour_forecast, datetime.strptime(csv_array[i][2][:-6], '%Y-%m-%dT%H:%M:%S') + timedelta(hours=7) )\n\n hourly_forecast.append(hour_forecast)\n\n\nordered_hourly = np.array(ordered_hourly)\n\n#Number of ours to look ahead in our data.\npredicted_lookahead = 24*4\n\nnoaa_data_array = []\nfor hours in ordered_hourly:\n timeStamp = hours[predicted_lookahead][8]\n\n noaa_data_array.append([timeStamp, float(hours[predicted_lookahead][3]), float(hours[predicted_lookahead][4]), float(hours[predicted_lookahead][5]), float(hours[predicted_lookahead][6]), float(hours[predicted_lookahead][7])])\nnoaa_data_array = np.array(noaa_data_array)\n\n\n# sets the location: latitude, longitude, and time zone\nhnxloc = pv.location.Location(36.31357, -119.63164, 'US/Pacific')\n\n#We create a times array that corrisponds to each entry in our trimmed arrays\ntimes = pd.DatetimeIndex(noaa_data_array[:,0])\n\n#Computes the clear sky (theoretical max) for each entry in the times array\ncs = hnxloc.get_clearsky(times + timedelta(hours=0), model='ineichen', linke_turbidity=3)\n\n\n\npredictInputs = []\nfor index in range(0, len(noaa_data_array)):\n predictInputs.append([noaa_data_array[index][2], noaa_data_array[index][3], noaa_data_array[index][4], noaa_data_array[index][5], cs['dhi'][index]])\n\n\n#Reload the model\nmodelReloaded = load('MLPRegressor_model.joblib')\nprint('Model loaded')\n\nplt.plot(times, (modelReloaded.predict(predictInputs)), label = 'Predicted using MLPRegressor and reloaded')\n\n#plt.plot(times, cs['dhi']*(model.predict(predictInputs)), label = 'Predicted')\nplt.legend(loc = 'upper left')\nplt.show()\n","sub_path":"notebooks/.ipynb_checkpoints/modelReloadTest-checkpoint.py","file_name":"modelReloadTest-checkpoint.py","file_ext":"py","file_size_in_byte":2186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"164730896","text":"import numpy as np\nimport scipy.optimize as optimize\n\na = 2.0\nb = 3.0\nc = 1.0\nN = 300\n\nnp.random.seed(1000)\n\nX_np = np.random.rand(N) * 1\nnoise = np.random.normal(loc=0.0, scale=1.0, size=N)*20\nY_np = np.exp(a*X_np**2 + b*X_np + c)\nY_observed = Y_np + noise\n\n\n# 使用optimize中的最小二乘法\ndef ls_func(x_data, a, b, c):\n return np.exp(a*x_data**2 + b*x_data + c)\n\npopt, pcov = optimize.curve_fit(ls_func, X_np, Y_observed)\n\nprint(popt) # [2.0178611 3.10661608 0.90126158] 果然 稳、准、狠\n\n\nimport numpy as np\nfrom scipy.optimize import leastsq\n# https://blog.csdn.net/suzyu12345/article/details/70046826\ndef func(x, p):\n \"\"\" 数据拟合所用的函数: A*sin(2*pi*k*x + theta) \"\"\"\n # A, k, theta = p\n a, b, c = p\n # return A*np.sin(2*np.pi*k*x+theta)\n return np.exp(a*x**2 + b*x + c)\n\ndef residuals(p, y, x):\n \"\"\" 实验数据x, y和拟合函数之间的差,p为拟合需要找到的系数 \"\"\"\n return 0.5 * (y - func(x, p))**2\n\np0 = [0.3, 0.1, -0.1]\n\n# 调用leastsq进行数据拟合, residuals为计算误差的函数\n# p0为拟合参数的初始值\n# args为需要拟合的实验数据,也就是,residuals误差函数\n# 除了P之外的其他参数都打包到args中\n\nplsq = leastsq(residuals, p0, args=(Y_np, X_np))\nprint(plsq[0])\n\n\n","sub_path":"codes/Optim_1/Scipy_optim.py","file_name":"Scipy_optim.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"357989993","text":"#!/bin/python\nimport sys\nimport os\nimport argparse\nimport json\nimport re\nfrom utility import *\nfrom sklearn.cluster import KMeans\n\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\ndef main(args):\n obj = {}\n for root,dirs,files in os.walk(args.d):\n for file in files:\n fObj = os.path.splitext(file)\n tag = fObj[0]\n ext = fObj[1]\n if ext == '.json':\n obj[tag] = {}\n with open(os.path.join(root,file)) as f:\n for line in f:\n fields = getFieldsFromJSON(line,['text','user.screen_name','user.description','user.followers_count','user.profile_image_url','created_at'],True)\n text = mysqlifyString(fields['text'])\n if fields['screen_name'] in obj[tag]:\n obj[tag][fields['screen_name']]['count'] += 1\n if obj[tag][fields['screen_name']]['count'] <= args.l:\n obj[tag][fields['screen_name']]['tweets'].append(text)\n obj[tag][fields['screen_name']]['timestamp'].append(fields['created_at'])\n else:\n obj[tag][fields['screen_name']] = {\n 'tweets': [text],\n 'description': mysqlifyString(fields['description']),\n 'followers': fields['followers_count'],\n 'icon': fields['profile_image_url'],\n 'count': 1,\n 'timestamp': [fields['created_at']]\n }\n clusterObject(obj,extractMatrix(obj),args.k)\n if args.f == 'csv':\n exportCSV(args.o,obj)\n elif args.f == 'json':\n exportJSON(args.o,obj)\n\ndef clusterObject(obj,data,k):\n kmeans = KMeans(n_clusters=int(k),verbose=1)\n kmeans.fit(data)\n i = 0\n for tag in obj:\n for screenName in obj[tag]:\n obj[tag][screenName]['group'] = kmeans.labels_[i]\n i += 1\n \ndef extractMatrix(obj):\n matrix = []\n for tag in obj:\n for screenName in obj[tag]:\n user = obj[tag][screenName]\n matrix.append([user['followers'],user['count']])\n return matrix\n \ndef exportJSON(outPath,data):\n with open(outPath,'w+') as o:\n o.write(json.loads(data))\n\ndef exportCSV(outPath,data):\n with open(outPath,'w+') as o:\n o.write('tag,count,group,screen_name,description,followers,icon,timestamp,tweets\\n')\n for tag in data:\n for screenName in data[tag]:\n user = data[tag][screenName]\n o.write(\n tag+','+\n str(user['count'])+','+\n str(user['group'])+','+\n screenName+','+\n user['description']+','+\n str(user['followers'])+','+\n str(user['icon'])+','+\n str('::'.join(user['timestamp']))+','+\n str(';;'.join(user['tweets']))+'\\n'\n )\n \nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Find top users based on either followers count or tweets count')\n parser.add_argument('-d', help='directory to look for tweets json', required=True)\n parser.add_argument('-o', help='output file', required=True)\n parser.add_argument('-f', help='file format (csv or json)', required=True)\n parser.add_argument('-l', help='limit number of tweets', default=10)\n parser.add_argument('-k', help='number of clusters', default=4)\n args = parser.parse_args()\n main(args)\n","sub_path":"tools/clustering/twitter/k_means_users.py","file_name":"k_means_users.py","file_ext":"py","file_size_in_byte":3705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"523711882","text":"from ..core import *\n\n\nclass VastBouwParser(SingleRequestLoaderMixin, BaseParser):\n middlewares = [ JSONMiddleware(\"response\", \"docs\") ]\n\n url = \"https://www.vastbouw.pl/_web_service/data/solr/mieszkania-query\"\n method = \"GET\"\n headers = {\n \"Host\": \"www.vastbouw.pl\",\n \"User-Agent\": \"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:58.0) Gecko/20100101 Firefox/58.0\",\n \"Accept\": \"application/json, text/plain, */*\",\n \"Accept-Language\": \"en-US,en;q=0.5\",\n \"Accept-Encoding\": \"gzip, deflate, br\",\n \"Referer\": \"https://www.vastbouw.pl\",\n \"Connection\": \"keep-alive\"\n }\n\n def get_records(self):\n return self.load()\n\n def parse_record(self, data): \n return {\n \"number\": data.get(\"numerBudowlany\", None),\n \"_inv\": data.get(\"inwestycja\", None),\n \"_loc\": data.get(\"lokalizacja\", None),\n \"rooms\": data.get(\"pokoje\", None),\n \"_type\": data.get(\"typ\", None),\n \"area\": data.get(\"powierzchnia\", None),\n \"floor\": data.get(\"pietro\", None),\n \"plan\": data.get(\"rzut\", None),\n \"status\": data.get(\"status\", None),\n \"price\": data.get(\"cena\", None)\n }\n\n def modify_record(self, record, data):\n record[\"fid\"] = record[\"number\"]\n return record","sub_path":"parsers/vastbouw/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"577383102","text":"\n\nimport sys\n\ndatafile = sys.argv[1]\nf = open(datafile, 'r')\n\ndata=[]\ni=0\nl=f.readline()\n\n#############\n##Read data\n############\n\nwhile(l!=''):\n\ta=l.split()\n\tl2=[]\n\tfor j in range(0,len(a),1):\n\t\tl2.append(float(a[j]))\n\tdata.append(l2)\n\tl=f.readline()\nrows=len(data)\ncols=len(data[0])\nf.close()\n\n\nlabelfile = sys.argv[2]\n\nf = open(labelfile)\n\n\ntrainlabels={}\nn=[]\nn.append(0)\nn.append(0)\nl=f.readline()\nwhile(l !=''):\n\ta=l.split()\n\ttrainlabels[int(a[1])]=int(a[0])\n\tl =f.readline()\n\tn[int(a[0])]+=1\nm0=[]\nfor j in range(0,cols,1):\n\tm0.append(1)\nm1=[]\nfor j in range(0,cols,1):\n\tm1.append(1)\nfor i in range(0,rows,1):\n\tif(trainlabels.get(i)!=None and trainlabels[i]==0):\n\t\tfor j in range(0,cols,1):\n\t\t\tm0[j]=m0[j]+data[i][j]\n\tif(trainlabels.get(i)!=None and trainlabels[i]==1):\t\t\n\t\tfor j in range(0,cols,1):\n\t\t\tm1[j]=m1[j]+data[i][j]\n\nfor j in range(0,cols,1):\n\tm0[j]=m0[j]/n[0]\n\tm1[j]=m1[j]/n[1]\n\n\n\n\nsd0=[]\nsd0.append(0)\nsd1=[]\nsd1.append(0)\n\nsqsd0=[]\nfor j in range(0,cols,1):\n sqsd0.append(0)\nsqsd1=[]\nfor j in range(0,cols,1):\n sqsd1.append(0)\nfor i in range(0,rows,1):\n if(trainlabels.get(i)!=None and trainlabels[i]==0):\n for j in range(0,cols,1):\n sqsd0[j]=(m0[j]-data[i][j])**2\n if(trainlabels.get(i)!=None and trainlabels[i]==1):\n for j in range(0,cols,1):\n sqsd1[j]=(m1[j]-data[i][j])**2\n \nfor j in range(0,cols,1):\n sd0.append(0)\n sd1.append(1)\n sd0[j]=(sqsd0[j]/n[0])**0.5\n sd1[j]=(sqsd1[j]/n[1])**0.5\n\n\nad=[]\nfor j in range(0,cols,1):\n ad.append(0)\ns=[]\nfor j in range(0,cols,1):\n s.append(0)\nfor i in range(0,rows,1):\n if(trainlabels.get(i)==None):\n ad=0\n s=0\n for j in range(0,cols,1):\n ad=((m0[j]-data[i][j])/sd0[j])**2\n s=((m1[j]-data[i][j])/sd1[j])**2\n if(ad= beta:\n # break\n return best_move, value\n elif player == 1:\n #print(\"i am here\")\n value = float('inf')\n valid_moves = functions.all_valid_moves(list, player)\n print(valid_moves)\n for i in valid_moves:\n board_now, player = functions.play(list, i, player, mode)\n print(player)\n #functions.show_game(board_now)\n value = min(value, minimax(board_now, depth - 1, alpha, beta, 2, mode)[1])\n beta = min(beta, value)\n if beta <= alpha:\n break\n return -10, value\n\n\n","sub_path":"minimax.py","file_name":"minimax.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"590910084","text":"# Gestionale Servizi Medici\n# Version 1.0.0\n# Developed by Alessio Rubicini, Andrea Malloni, Daniele Monaldi, Alessio Cinti, Matteo Di Perna\n\n# SideBar.py\n# SideBar containing companies list\n\n# --------------- MODULES -----------------\n\nfrom tkinter.ttk import Treeview\nimport tkinter as tk\nimport tkinter.ttk as ttk\nimport tkinter.messagebox as mbox\n\n# --------------- CLASS DEFINITION -----------------\n\nclass SideBar(Treeview):\n\n def __init__(self, master, tabView):\n super().__init__(master = master)\n \n self.tabView = tabView\n \n\n # Return a dictionary containing info about the selected item\n def getSelectedItem(self):\n if self.selection():\n item = self.item(self.selection())\n\n # Tags structure: Treeview ID, DATA TYPE, ROOT ID, SECOND ROOT ID\n result = {\"text\":item[\"text\"], \"id\":item[\"tags\"][0], \"type\":item['tags'][1]}\n\n if result[\"type\"] == \"CENSUS\":\n result[\"company\"] = item['tags'][2]\n \n elif result[\"type\"] == \"EMPLOYEE\":\n result[\"company\"] = item['tags'][2]\n result[\"census\"] = item['tags'][3]\n\n return result\n else:\n mbox.showerror(\"Errore\", \"Devi selezionare un elemento dalla lista\")\n\n \n # Add a new item to TreeView\n def addItem(self, root, id, text, tags):\n self.insert(root, \"end\", id, text = text, tags = tags)\n\n\n # Edit selected item\n def editItem(self, value):\n if self.focus():\n selected = self.focus()\n\n values = self.item(selected, text = value)\n else:\n mbox.showerror(\"Errore\", \"Devi selezionare un elemento da modificare\")\n\n \n # Delete selected item\n def deleteSelectedItem(self):\n if self.selection():\n item = self.selection()[0]\n self.delete(item)\n else:\n mbox.showerror(\"Errore\", \"Devi selezionare un elemento da eliminare\")\n\n\n # Delete all items\n def deleteAll(self):\n if mbox.askokcancel(\"Attenzione\", \"Confermi la cancellazione di tutti gli elementi?\\nL'azione è irreversibile\") == True:\n for record in self.get_children():\n self.delete(record)\n\n \n # Clear all sidebar\n def clear(self):\n for record in self.get_children():\n self.delete(record)\n \n","sub_path":"View/SideBar.py","file_name":"SideBar.py","file_ext":"py","file_size_in_byte":2362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"361752164","text":"import PySimpleGUI as sg\nimport sopa_de_letras_v2_0\nimport configurar\n\ndef barraDeProgreso():\n '''Proceso que contiene un loop que normalmente haria algo util, su funcion es estetica'''\n ok = True\n layout = [[sg.Text('Ajustando el juego :)')],\n [sg.ProgressBar(10000, orientation='h', size=(20, 20), key='progressbar')],\n [sg.Cancel()]]\n\n window = sg.Window('Cargando configuraciones').Layout(layout)\n progress_bar = window.FindElement('progressbar')\n\n for i in range(10000):\n event, values = window.Read(timeout=0)\n if event == 'Cancel' or event is None:\n ok = False\n break\n progress_bar.UpdateBar(i + 1)\n\n window.Close()\n return ok\n\ndef main_sopa():\n '''Centro de Control. Menu principal cuyo propósito es seleccionar la funcion que se desea ejecutar. Entre las cuales se encuentra:\n Ajustar la configuración del juego, Jugar, o terminar la ejecución del programa'''\n layout = [\n [sg.Text(text = 'SOPA DE LETRAS ', justification = 'center')],\n [sg.Button(\"JUGAR\", button_color=('white', 'blue')), sg.Button(\"CONFIGURAR\", button_color=('white', 'blue')),\n sg.Button(\"SALIR\", button_color=('white', 'red'))]\n\n\n ]\n window = sg.Window('Sopa de Letras', auto_size_text=True, default_element_size=(20, 1)).Layout(layout)\n\n while True:\n event, values = window.Read()\n print(event)\n if event == 'JUGAR':\n\n if (barraDeProgreso()):\n sopa_de_letras_v2_0.main()\n\n\n elif event == 'CONFIGURAR':\n configurar.config_main()\n elif event == 'SALIR' or event == None:\n sys.exit()\n break\n\n\n\nif __name__ == '__main__':\n import sys\n sys.exit(main_sopa())","sub_path":"main_sopa_de_letras.py","file_name":"main_sopa_de_letras.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"553720891","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\n\ndailyf = pd.read_csv(r'C:\\Users\\lhvav\\Desktop\\snapshot_daily.csv', encoding='latin-1', index_col='user_id')\n\n# First, make deep copy of the original df in case we need to refer to the original\n\nmanny = dailyf.copy(deep=True) # By default, deep=True is the default\n\n# Now, let's make the df for a single user\nuser_13004 = manny.loc[13004] \n\n# Drop the unnessecary columns\nnoneedcols = ['sleep_deadline_time', 'count_academic_deadlines', 'sleep_reason:No major factors', \n 'sleep_reason:Early-morning event', 'sleep_reason:Early-fatigue', 'sleep_reason:Early-illness', \n 'sleep_reason:Delayed-academic deadline', 'sleep_reason:Delayed-academic project', \n 'sleep_reason:Delayed-social reasons', 'sleep_reason7:Delayed-extracurricular activity', \n 'sleep_reason:Delayed-employment', 'sleep_reason:Delayed-media', 'sleep_reason: CouldnÕt fall asleep', \n 'sleep_reason:Other']\n\nuser_13004 = user_13004.drop(columns=noneedcols) # These columns were identified via the original file.\n\n# Iterate through the columns to detect any that are empty to drop them\nmore_drops = []\nfor column in user_13004: \n if user_13004[str(column)].isnull().all() == True: \n print(column)\n more_drops.append(column)\nuser_13004 = user_13004.drop(more_drops,axis=1)\n\n\n# In[2]:\n\n\nfrom sklearn.experimental import enable_iterative_imputer\nfrom sklearn.impute import IterativeImputer\nfrom sklearn.ensemble import ExtraTreesRegressor\nimport numpy as np\nfrom numpy import nan\n\n# Impute missing data with Iterative Imputer, w/ initial strategy = median (safer than mean), and\n# ExtraTrees as estimator. Pass all features besides the user_id and timestamp\n\n\ndf_13004 = user_13004.copy(deep=True)\nimputer = IterativeImputer(estimator = ExtraTreesRegressor(n_estimators=10,random_state=0), missing_values=nan, \n sample_posterior=False, max_iter=10, n_nearest_features=None, initial_strategy='median',\n imputation_order='random', random_state=0)\n\nX_feats = df_13004.iloc[:,12:].values # DON'T include the labels\nX_feats[:,:] = imputer.fit_transform(X_feats[:,:])\n\n# Pass the imputed values stored as a numpy.ndarray and then we can cluster\ndf_13004.iloc[:,12:] = X_feats\n\n# Normalize the features: \nfrom sklearn.preprocessing import RobustScaler\n\n# Normalize the data\nrobust_scaler = RobustScaler()\ndf_13004.iloc[:,12:] = robust_scaler.fit_transform(X_feats)\ndf_13004.head()\n\n\n# In[3]:\n\n\nfrom sklearn.decomposition import PCA\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib as mpl\nimport numpy as np\nimport seaborn as sns\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n# Let's look for potential correlations: \n# Correlation Matrix Heatmap\nf, ax = plt.subplots(figsize=(10, 6))\ncorr = df_13004.iloc[:,12:].corr()\nhm = sns.heatmap(corr, )\nf.subplots_adjust(top=0.93)\nt= f.suptitle('SNAPSHOT Attributes Correlation Heatmap (13004)', fontsize=14)\n\n\n# In[56]:\n\n\n# Apply PCA to the scaled data: \npca_df = df_13004.copy(deep=True) # this is the df for the dimesion-reduced data\npca_featvals = PCA().fit(pca_df.iloc[:,12:])\n\n# plot the cumulative summation of the explained variance \nplt.figure()\nplt.plot(np.cumsum(pca_featvals.explained_variance_ratio_))\n\n# define titles and labels\nplt.xlabel('# of components')\nplt.ylabel('variance (%)')\nplt.title('explained variance')\nplt.xticks(np.arange(0, 30, 1.0))\n# plt.grid()\n# show the plot\nplt.show()\n\n\n# In[5]:\n\n\n# create a PCA modified dataset\npca_data = PCA(n_components=5).fit(pca_df.iloc[:,12:]).transform(pca_df.iloc[:,12:])\n# store the dataset in a new df\npca_data = pd.DataFrame(data=pca_data, columns=['comp1','comp2','comp3','comp4','comp5'])\npca_data.head()\n\n\n# In[16]:\n\n\n# figure for pca_dataset2 (4 components)\n\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\n\nimg = ax.scatter(pca_data['comp1'], pca_data['comp2'], pca_data['comp3'], c=pca_data['comp5'], cmap=plt.cool())\nax.set_xlabel('comp1')\nax.set_ylabel('comp2')\nax.set_zlabel('comp3')\nax.set_title('4-component PCA')\nplt.figure(figsize=(40,40))\nplt.show()\n\n\n# In[17]:\n\n\n# Hierarchical agglomerative clustering set-up\nimport scipy\nfrom scipy.cluster.hierarchy import dendrogram, linkage\nfrom scipy.cluster.hierarchy import fcluster\nfrom scipy.cluster.hierarchy import cophenet\nfrom scipy.spatial.distance import pdist\nfrom sklearn.metrics import silhouette_score\nfrom sklearn.metrics import davies_bouldin_score\nfrom sklearn.metrics import calinski_harabasz_score\n\n\nfrom pylab import rcParams\n\nfrom sklearn.cluster import AgglomerativeClustering\nimport sklearn.metrics as sm\n\nres_dict = {}\nlinkage_criteria = ['single','complete','average','ward']\namt_clusters = range(2,30)\nfor link_meth in linkage_criteria:\n# print(\"Linkage type: \", link_meth)\n Z = linkage(pca_data,link_meth)\n c, coph_dist = cophenet(Z,pdist(pca_data))\n print(\"CPCC: \", c)\n sil_per_clus = []\n for amt in amt_clusters: \n clusters = fcluster(Z,amt,criterion='maxclust')\n sil_score = silhouette_score(pca_data, clusters, metric='euclidean')\n db_score = davies_bouldin_score(pca_data, clusters)\n ch_score = calinski_harabasz_score(pca_data, clusters)\n sil_per_clus.append(sil_score)\n# print(\"Amount of clusters: \", amt)\n# print(\"Silhoutte score: \", sil_score)\n# print(\"Davies-Bouldin score: \", db_score)\n# print(\"Calinski-Harabasz score: \", ch_score)\n# print('----------------------------------------------------------------')\n res_dict[link_meth] = sil_per_clus\n\n\n# In[18]:\n\n\nplt.plot(amt_clusters,res_dict['single'], label='single')\nplt.plot(amt_clusters,res_dict['complete'], label='complete')\nplt.plot(amt_clusters,res_dict['average'], label='average')\nplt.plot(amt_clusters,res_dict['ward'], label='ward')\nplt.legend()\nplt.xlabel('Number of Clusters')\nplt.ylabel('Silhouette Score')\nplt.xticks(np.arange(2, 30, 1.0))\nplt.grid()\nplt.show()\n\n\n# In[19]:\n\n\nZ1 = linkage(pca_data,'average')\nclusters = fcluster(Z1,3,criterion='maxclust')\n\n\n# In[20]:\n\n\nfrom sklearn.decomposition import PCA\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib as mpl\nimport numpy as np\nimport seaborn as sns\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n\n# In[21]:\n\n\n# figure for pca_dataset2 (4 components)\n\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\n\nimg = ax.scatter(pca_data['comp1'], pca_data['comp2'], pca_data['comp3'], c=clusters, cmap='Accent')\nax.set_xlabel('comp1')\nax.set_ylabel('comp2')\nax.set_zlabel('comp3')\nax.set_title('4-component PCA')\nplt.figure(figsize=(40,40))\nplt.show()\n\n\n# In[22]:\n\n\n# %matplotlib notebook\n# import matplotlib.pyplot as plt\n# from mpl_toolkits.mplot3d import Axes3D\n# import matplotlib as mpl\n# import numpy as np\n# import seaborn as sns\n\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\n\nimg = ax.scatter(pca_data['comp1'], pca_data['comp2'], pca_data['comp3'], c=clusters, cmap='Accent')\nax.set_xlabel('comp1')\nax.set_ylabel('comp2')\nax.set_zlabel('comp3')\nax.set_title('4-component PCA')\nplt.figure(figsize=(10, 8))\nplt.show()\n\n\n# In[72]:\n\n\n# Cluster all features \nnon_pca = df_13004.iloc[:,12:].copy()\nZ1 = linkage(non_pca,'average')\ndendrogram(Z1, truncate_mode='lastp',\n p=12,leaf_rotation=90., leaf_font_size=10., show_contracted=True)\nclusters = fcluster(Z1,3,criterion='maxclust')\nplt.title('Hierarchical Clustering Dendogram Average Linkage Participant 4', fontsize = 20)\nplt.xlabel('Cluster Size', fontsize = 20, labelpad = 15)\nplt.ylabel('Distance', fontsize = 20, labelpad = 15)\nplt.tick_params(labelsize=15)\nplt.rcParams['figure.figsize'] = (15,10)\n# plt.axhline(y=9000)\nplt.savefig(\"dendroparticipant4.png\",bbox_inches='tight',dpi=100)\nplt.show()\n\n\n# In[71]:\n\n\nres_dict2 = {}\nlinkage_criteria = ['single','complete','average','ward']\namt_clusters = range(2,30)\nfor link_meth in linkage_criteria:\n# print(\"Linkage type: \", link_meth)\n Z = linkage(non_pca,link_meth)\n c, coph_dist = cophenet(Z,pdist(non_pca))\n print(\"CPCC: \", c)\n sil_per_clus = []\n for amt in amt_clusters: \n clusters = fcluster(Z,amt,criterion='maxclust')\n sil_score = silhouette_score(non_pca, clusters, metric='euclidean')\n db_score = davies_bouldin_score(non_pca, clusters)\n ch_score = calinski_harabasz_score(non_pca, clusters)\n sil_per_clus.append(sil_score)\n# print(\"Amount of clusters: \", amt)\n# print(\"Silhoutte score: \", sil_score)\n# print(\"Davies-Bouldin score: \", db_score)\n# print(\"Calinski-Harabasz score: \", ch_score)\n# print('----------------------------------------------------------------')\n res_dict2[link_meth] = sil_per_clus\n\nplt.plot(amt_clusters,res_dict['single'], label='single', linewidth=3)\nplt.plot(amt_clusters,res_dict['complete'], label='complete', linewidth=3)\nplt.plot(amt_clusters,res_dict['average'], label='average', linewidth=3)\nplt.plot(amt_clusters,res_dict['ward'], label='ward', linewidth=3)\nplt.legend(fontsize=20)\nplt.title('Silhouette Score per Number of Clusters Paricipant 4', fontsize = 20)\nplt.xlabel('Number of Clusters', fontsize = 20, labelpad = 15)\nplt.ylabel('Silhouette Score', fontsize = 20, labelpad = 15)\nplt.xticks(np.arange(2, 30, 1.0))\nplt.tick_params(labelsize=15)\nplt.rcParams['figure.figsize'] = (14,10)\n# plt.grid()\nplt.savefig(\"silscoresparticipant4.png\",bbox_inches='tight',dpi=100)\nplt.show()\n\n\n# In[125]:\n\n\n# Plot labels by timestamp and color by cluster samples belong to \nplt.scatter(user_13004['timestamp'],user_13004.iloc[:,9], c = clusters, cmap='Accent', linewidth = 12)\nplt.title('Time Evolution of Evening Sick-Healthy for Paricipant 4', fontsize = 30)\nplt.xlabel('Timestamp', fontsize = 30, labelpad = 15)\nplt.xticks(np.arange(2, 30, 5.0))\nplt.ylabel('Label Evening Sick-Healthy', fontsize = 30, labelpad = 15)\nplt.tick_params(labelsize=30)\nplt.rcParams['figure.figsize'] = (30,10)\nplt.savefig(\"timeevolutionparticipant4.png\",bbox_inches='tight',dpi=100)\n# plt.grid()\nplt.show()\n\n\n# In[113]:\n\n\nuser_13004.iloc[:,9].head()\n\n\n# In[112]:\n\n\nuser_13004.iloc[:,:15]\n\n\n# In[58]:\n\n\n# %matplotlib notebook\n# import matplotlib.pyplot as plt\n# from mpl_toolkits.mplot3d import Axes3D\n# import matplotlib as mpl\n# import numpy as np\n# import seaborn as sns\n\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\nimg = ax.scatter(pca_data['comp1'], pca_data['comp2'], pca_data['comp3'], \n linewidth = 10, c=clusters, cmap='Accent')\nax.set_xlabel('comp1', fontsize = 20, labelpad = 15)\nax.set_ylabel('comp2', fontsize = 20, labelpad = 15)\nax.set_zlabel('comp3', fontsize = 20, labelpad = 15)\nax.tick_params(labelsize=15)\nax.set_title('3 Clusters Participant 4', fontsize = 25)\nplt.rcParams['figure.figsize'] = (15,10)\nplt.tight_layout()\nplt.savefig(\"3clustersparticipant4.png\",bbox_inches='tight',dpi=100)\nplt.show()\n\n\n# In[28]:\n\n\nfrom sklearn.experimental import enable_iterative_imputer\nfrom sklearn.impute import IterativeImputer\nfrom sklearn.ensemble import ExtraTreesRegressor\nimport numpy as np\nfrom numpy import nan\n\n# First, make sure the data set is complete by imputing missing values\n# Use Iterative Imputer, with initial strategy = median (safer than mean), and ExtraTrees as estimator\n# We need to pass all features besides the user_id and timestamp\n\nimputer = IterativeImputer(estimator = ExtraTreesRegressor(n_estimators=10,random_state=0), missing_values=nan, \n sample_posterior=False, max_iter=10, n_nearest_features=None, initial_strategy='median',\n imputation_order='random', random_state=0)\n\ny_labels = df_13004.iloc[:,1:12].values # impute missing labels if any\n\nprint(y_labels.shape)\n\ny_labels[:,:] = imputer.fit_transform(y_labels[:,:])\n\ndf_13004.iloc[:,1:12] = y_labels\n\n\n# In[30]:\n\n\nfrom scipy.stats import normaltest\nfrom scipy.stats import shapiro\n\n# Check for normality among the variables: \nalpha = .05\nfor feature in user_13004:\n if feature == 'timestamp':\n continue\n s, p1 = normaltest(df_13004[feature])\n w, p2 = shapiro(df_13004[feature])\n \n# if p1 <= alpha or p2 <= alpha: \n# print(feature, \"doesn't pass normality\")\n\n\n# In[32]:\n\n\n# Add the column to the df that will specify a sample's cluster class\n\ndf_13004['Cluster'] = clusters\ndf_13004.head()\n\n\n# In[33]:\n\n\n# Perform statistical test for all features between cluster groups \n\nfrom scipy.stats import kruskal\nfrom scipy.stats import f_oneway\n\nc1 = df_13004.loc[df_13004['Cluster'] == 1]\nc2 = df_13004.loc[df_13004['Cluster'] == 2]\nc3 = df_13004.loc[df_13004['Cluster'] == 3]\n\nc1 = c1.iloc[:,12:-1]\nc2 = c2.iloc[:,12:-1]\nc3 = c3.iloc[:,12:-1]\n\nall_feats = []\nfor feat in c1: \n feat_name = str(feat)\n all_feats.append(feat_name)\n\nimportant_feats_kruskal = []\nimportant_feats_anova = []\nfor feature in all_feats: \n\n# h, p = kruskal(c1[feature],c2[feature],c3[feature])\n f, p2 = f_oneway(c1[feature],c2[feature],c3[feature])\n \n# if p <= .05: \n# print('Kruskal-Wallis ', p, feature)\n# important_feats_kruskal.append(feature)\n\n if p2 <= .05: \n print('One Way ANOVA ', p2, feature)\n important_feats_anova.append(feature)\n \n\n\n# In[34]:\n\n\n# Perform post hoc test for previous findings\n\nfrom scikit_posthocs import posthoc_mannwhitney\n\nmw_results = {}\n\nfor feature in important_feats_anova: \n print(feature)\n try: \n \n x = [c1[feature],c2[feature],c3[feature]]\n psthc_p = posthoc_mannwhitney(x, p_adjust='holm') \n \n print(psthc_p)\n \n mw_results[feature] = psthc_p\n \n except: \n \n pass\n \n\n\n# In[35]:\n\n\n# Make a dataframe to use to create a heatmap of the pvalues: \nsig_feats_df = pd.DataFrame(columns = ['Feature','Groups','Pvalue'])\ngroups_list = [(1,2),(1,3),(2,3)]\n\nfor key in mw_results: \n for group in groups_list: \n feature = key\n groups_to_compare = str(group)\n p_val = mw_results[key][group[0]][group[1]]\n# print(feature, groups_to_compare, p_val)\n sig_feats_df = sig_feats_df.append({'Feature': feature, 'Groups': groups_to_compare, 'Pvalue': p_val}, ignore_index=True)\nsig_feats_df\n\n\n# In[90]:\n\n\n# Make the heatmap of the p-values per group per feature: \nhtmp = pd.pivot_table(sig_feats_df, values = 'Pvalue', index=['Groups'], columns = 'Feature')\nsns.set(font_scale=2) \nsns.heatmap(htmp, cmap = 'RdBu', linewidths=.3, vmax= .1)\nplt.rcParams['figure.figsize'] = (30,10)\nplt.title('Heat Map Participant 4')\nplt.savefig(\"hmparticipant4.png\",bbox_inches='tight',dpi=100)\n\n\n# In[38]:\n\n\n# For the features for the clusters that were significant (as shown in the heatmap), find how they are different\n\n# Extract the statistically significant features: # there aren't many cluster groups yet, so not necessary to extract \nfeaturesPergroups_sig = [] \nfor index, row in sig_feats_df.iterrows(): \n if row['Pvalue'] <= .05: \n featuresPergroups_sig.append(row['Feature'])\n \nprint(featuresPergroups_sig)\n\n# Plot whisker plots (boxplot) per feature per cluster group for features that were\n# statistically significant between groups\n\nimport seaborn as sns\nsns.set(style=\"whitegrid\")\n\nax = sns.boxplot(x='Cluster',y='phys_10H-17H:percentMedPeakNoArtifact',data=df_13004)\nax.set_title('Feature Score per Cluster')\n\n\n# In[92]:\n\n\n# Find the mean and standard deviation of each label per cluster: \n\ncluster_classes = [1,2,3]\n\ncluster_label_stats = {}\n\n# Iterate through the labels: \nfor label in df_13004.iloc[:,1:12]: \n cluster_label_stats[label] = {}\n for cluster_class in cluster_classes: \n tmpvar = df_13004.loc[df_13004['Cluster'] == cluster_class][label]\n cluster_label_stats[label][str(label) + ' ' + str(cluster_class) +' mean'] = tmpvar.mean()\n cluster_label_stats[label][str(label) + ' ' + str(cluster_class) +' stdv'] = tmpvar.std()\n\n# Replace instances of nan with 0. These are due clusters with a size of 1 \n\nimport math\n\nfor key in cluster_label_stats: \n for item in cluster_label_stats[key]: \n if math.isnan(cluster_label_stats[key][item]) == True: \n cluster_label_stats[key][item] = 0\n\n\n# Make a bar graph with label on the x axis splitting into the three cluster classes \n# for each label and then the score on the y axis\n\nimport numpy as np\n\nlabels_brgrph = []\nfor key in cluster_label_stats: \n labels_brgrph.append(key)\n \nc1_means = []\nc2_means = []\nc3_means = []\n\nc1_errors = []\nc2_errors = []\nc3_errors = []\n\nfor key in cluster_label_stats: \n for item in cluster_label_stats[key]: \n if '1' in item: \n if 'mean' in item: \n c1_means.append(cluster_label_stats[key][item])\n if 'stdv' in item: \n c1_errors.append(cluster_label_stats[key][item])\n if '2' in item: \n if 'mean' in item:\n c2_means.append(cluster_label_stats[key][item])\n if 'stdv' in item: \n c2_errors.append(cluster_label_stats[key][item])\n if '3' in item: \n if 'mean' in item: \n c3_means.append(cluster_label_stats[key][item])\n if 'stdv' in item: \n c3_errors.append(cluster_label_stats[key][item])\n \n\nx_set = np.arange(len(labels_brgrph))\n\nfig, ax = plt.subplots()\nrects1 = ax.bar(x_set - .2, c1_means, width = .2,label = 'C1', yerr = c1_errors, align = 'center', ecolor = 'black')\nrects2 = ax.bar(x_set, c2_means, width = .2,label = 'C2', yerr = c2_errors, align = 'center', ecolor = 'black')\nrects3 = ax.bar(x_set + .2, c3_means, width = .2,label = 'C3', yerr = c3_errors, align = 'center', ecolor = 'black')\n\n\nax.set_ylabel('Score', fontsize = 30, labelpad = 15)\nax.set_xlabel('Well-being Label', fontsize = 30, labelpad = 15)\nax.set_title('Feature Scores by Cluster Class Participant 4', fontsize = 40)\nax.set_xticks(x_set)\nax.set_xticklabels(labels_brgrph)\nax.legend(fontsize=30)\n\nfig.tight_layout()\nplt.rcParams['figure.figsize'] = (60,10)\nplt.savefig(\"labelscoresparticipant4.png\",bbox_inches='tight',dpi=100)\nplt.show()\n\n\n# In[127]:\n\n\n# Get the counts of transitions \n\n\nfrst_row = 0\nscnd_row = 1 \ncluster_clmn = 435 # this is the column containing the cluster classes\n\n# Transitions from cluster class 1 \none_one = 0\none_two = 0\none_three = 0\n# Transitions from cluster class 2\ntwo_one = 0\ntwo_two = 0\ntwo_three = 0\n# Transitions from cluster class 3 \nthree_one = 0\nthree_two = 0\nthree_three = 0\n\nwhile frst_row != (len(df_13004.iloc[:,cluster_clmn]) - 1): # the index of the last row is len(df) - 1\n \n# bobby.iloc[frst_row:scnd_row,435] # This shows the pair of rows we're comparing but idk how to take advantage\n \n if df_13004.iloc[frst_row,cluster_clmn] == 1 and df_13004.iloc[scnd_row,cluster_clmn] == 1: \n one_one = one_one + 1\n if df_13004.iloc[frst_row,cluster_clmn] == 1 and df_13004.iloc[scnd_row,cluster_clmn] == 2:\n one_two = one_two + 1 \n if df_13004.iloc[frst_row,cluster_clmn] == 1 and df_13004.iloc[scnd_row,cluster_clmn] == 3:\n one_three = one_three + 1\n if df_13004.iloc[frst_row,cluster_clmn] == 2 and df_13004.iloc[scnd_row,cluster_clmn] == 1:\n two_one = two_one + 1\n if df_13004.iloc[frst_row,cluster_clmn] == 2 and df_13004.iloc[scnd_row,cluster_clmn] == 2:\n two_two = two_two + 1\n if df_13004.iloc[frst_row,cluster_clmn] == 2 and df_13004.iloc[scnd_row,cluster_clmn] == 3:\n two_three = two_three + 1\n if df_13004.iloc[frst_row,cluster_clmn] == 3 and df_13004.iloc[scnd_row,cluster_clmn] == 1:\n three_one = three_one + 1\n if df_13004.iloc[frst_row,cluster_clmn] == 3 and df_13004.iloc[scnd_row,cluster_clmn] == 2:\n three_two = three_two + 1\n if df_13004.iloc[frst_row,cluster_clmn] == 3 and df_13004.iloc[scnd_row,cluster_clmn] == 3:\n three_three = three_three + 1\n \n frst_row = frst_row + 1 \n scnd_row = scnd_row + 1\n \n# Determine the probability of each transition \n\n# Transitions from cluster class 1 \none_one_prb = one_one / (len(df_13004.iloc[:,cluster_clmn]) - 1)\none_two_prb = one_two / (len(df_13004.iloc[:,cluster_clmn]) - 1)\none_three_prb = one_three / (len(df_13004.iloc[:,cluster_clmn]) - 1)\n# Transitions from cluster class 2\ntwo_one_prb = two_one / (len(df_13004.iloc[:,cluster_clmn]) - 1)\ntwo_two_prb = two_two / (len(df_13004.iloc[:,cluster_clmn]) - 1)\ntwo_three_prb = two_three / (len(df_13004.iloc[:,cluster_clmn]) - 1)\n# Transitions from cluster class 3 \nthree_one_prb = three_one / (len(df_13004.iloc[:,cluster_clmn]) - 1)\nthree_two_prb = three_two / (len(df_13004.iloc[:,cluster_clmn]) - 1)\nthree_three_prb = three_three / (len(df_13004.iloc[:,cluster_clmn]) - 1)\n\ntrns_prbs1 = [one_one_prb, one_two_prb, one_three_prb]\ntrns_prbs2 = [two_one_prb, two_two_prb, two_three_prb]\ntrns_prbs3 = [three_one_prb, three_two_prb, three_three_prb]\n\n# Plot the probability distributions for each transition \n\nx_clstclss = [1,2,3]\n\nx_lenax = np.arange(len(x_clstclss))\n\nfig, ax = plt.subplots()\nrectsc1 = ax.bar(x_lenax - .3, trns_prbs1, width = .3, label = 'Probability from Cluster 1')\nrectsc2 = ax.bar(x_lenax, trns_prbs2, width = .3, label = 'Probability from Cluster 2')\nrectsc3 = ax.bar(x_lenax + .3, trns_prbs3, width = .3, label = 'Probability from Cluster 3')\n\nax.set_xticks(x_lenax)\nax.set_xlabel('Cluster Transitioned to', fontsize = 30, labelpad = 15)\nax.set_xticklabels(x_clstclss)\nax.set_ylabel('Probability', fontsize = 30, labelpad = 15)\nax.set_title('Probability Distributions for Transitions Between Clusters Participant 4', fontsize = 30)\nax.tick_params(labelsize=25)\nax.legend(fontsize=30)\nplt.rcParams['figure.figsize'] = (30,10)\nplt.savefig(\"probabilitiesparticipant4.png\",bbox_inches='tight',dpi=100)\nplt.show()\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"FinalProject/Cluster All Daily Features 13004.py","file_name":"Cluster All Daily Features 13004.py","file_ext":"py","file_size_in_byte":21941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"145110152","text":"# -*- coding: UTF-8 -*-\n# File: ozone_temp_evi\n# Author: Philip Cheng\n# Time: 8/8/16 -> 1:56 PM\nimport os\nimport pandas as pd\n\nWORK_SPACE = \"data/ozone_temp_evi\"\n\n\ndef get_temp_dfs():\n type_fields = (\"tavg\", \"tmax\", \"tmin\")\n temp_csvs = [os.path.join(\"data/temp_merge\", \"temp_{}.csv\".format(type_field)) for type_field in type_fields]\n return [pd.read_csv(temp_csv) for temp_csv in temp_csvs]\n\n\ndef ozone_temp():\n temp_dfs = get_temp_dfs()\n for year in range(1990, 2015):\n ozone_csv = \"data/ozone_merge/self/ozone_{}.csv\".format(year)\n ozone_df = pd.read_csv(ozone_csv)\n temp_year_dfs = [temp_df.loc[lambda x: x.Year == year, :] for temp_df in temp_dfs]\n\n def row_process(row_series):\n pass\n ozone_df.apply(row_process, axis=1)\n\n\nif __name__ == \"__main__\":\n if not os.path.exists(WORK_SPACE):\n os.mkdir(WORK_SPACE)\n ozone_temp()","sub_path":"ozone_temp_evi.py","file_name":"ozone_temp_evi.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"558718856","text":"# This is extremely sloppy.\n# There are so many redundant operations and it's mostly checks\n# But ultimately it's a quick and simple script that gets the job done\n\ndef get_map(size, path):\n minx = size\n miny = size\n maxx = 0\n maxy = 0\n\n # Find maxes and mins\n for point in path:\n minx = min(point[0], minx)\n maxx = max(point[0], maxx)\n miny = min(point[1], miny)\n maxy = max(point[1], maxy)\n\n # Attempt to pad out\n minx = max(0, minx-30)\n maxx = min(size, maxx + 30)\n miny = max(0, miny-30)\n maxy = min(size, maxy+30)\n\n # Find max difference and find new co-ordinates based on it\n max_diff = max(maxx-minx, maxy-miny)\n minx = ((maxx+minx)/2)-(max_diff/2)\n maxx = minx+max_diff\n miny = ((maxy+miny)/2)-(max_diff/2)\n maxy = miny+max_diff\n\n # Shift and truncate co-ordiantes which are out of bounds\n if minx < 0:\n maxx = min(size, maxx + abs(minx))\n minx = 0\n if maxx > size:\n minx = max(0,minx-(maxx-size))\n maxx = size\n if miny < 0:\n maxy = min(size, maxy + abs(miny))\n miny = 0\n if maxy > size:\n miny = max(0,miny-(maxy-size))\n maxy = size\n \n # Print minima and box size\n print(str((int(minx), int(miny))) + ', ' + str(max_diff))\n\nif __name__ == \"__main__\":\n # Example tests\n get_map(2000, [(600, 600), (700, 1200)])\n #get_map(2000, [(300, 300), (1300, 300)])\n #get_map(2000, [(825, 820), (840, 830), (830, 865), (835, 900)])\n #get_map(5079, [(5079, 2000), (5079, 3000)])\n #get_map(5079, [(1000, 0), (1000, 5079)])\n #eval(\"get_map(\"+input()+\")\") # custom map that can be tested\n","sub_path":"Small Challenges/Daily Programmer/20171124_Map.py","file_name":"20171124_Map.py","file_ext":"py","file_size_in_byte":1546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"165034855","text":"import RPi.GPIO as gp\nimport time\nled = 21\nprequency = 100\ngp.setmode(gp.BCM)\ngp.setup(led, gp.OUT)\n\npwm_led = gp.PWM(led,prequency)\npwm_led.start(0)\ntry:\n while True:\n for i in range(0,101,10):\n pwm_led.ChangeDutyCycle(i)\n print(i)\n time.sleep(0.3)\n for i in range(100,0,-10):\n pwm_led.ChangeDutyCycle(i)\n print(i)\n time.sleep(0.3)\n #break\nexcept KeyboardInterrupt:\n print(\"end\")\npwm_led.stop()\ngp.cleanup()","sub_path":"rasp_Python/led_brightness03.py","file_name":"led_brightness03.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"579252843","text":"# -*- coding: utf-8 -*-\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nfrom matplotlib.finance import candlestick_ohlc\n\nquot_x = quot[(quot['fut'] == 'SR') & (quot['main'] == 1)][['open', 'high', 'low', 'close']]\nquot_x.reset_index(inplace=True)\nquot_x = quot_x[quot_x['trad_date'] >= pd.to_datetime('2015-07-01')]\nquot_x.trad_date = mdates.date2num(quot_x.trad_date.dt.to_pydatetime())\nquot_x_ohlc = zip(quot_x['trad_date'], quot_x['open'], quot_x['high'], quot_x['low'], quot_x['close'])\n\nweekday_quot = [tuple([i]+list(quote[1:])) for i,quote in enumerate(quot_x_ohlc)]\n\nfig, ax = plt.subplots()\nfig.subplots_adjust(bottom=0.2)\n\ncandlestick_ohlc(ax, weekday_quot, width=0.6, colorup='r', colordown='g')\nax.set_xticks(range(0,len(weekday_quot),5))\nax.set_xticklabels([mdates.num2date(quot_x_ohlc[index][0]).strftime('%b-%d')\n for index in ax.get_xticks()])\nplt.setp(plt.gca().get_xticklabels(), rotation=45, horizontalalignment='right')\nplt.show()\n\n\n\n\n","sub_path":"Explore.py","file_name":"Explore.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"220664269","text":"\"\"\"\nMIT License\n\nCopyright (c) 2016 Zeke Barge\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\nimport os\nfrom core.compat import QtGui\n\nICONS_DIRECTORY = os.path.dirname(__file__)\nICON_NAMES = ['add', 'delete', 'download', 'export_csv',\n 'export_generic', 'export_pdf','folder',\n 'import','merge','redo','save','saveas', 'send','split',\n 'suppress', 'undo', 'edit', 'settings', 'home','ok',\n 'cancel','delete_database', 'lightning','add_column', 'add_row',\n 'delete_column', 'delete_row', 'filter', 'rename', 'spreadsheet']\n\n\ndef path_for(name: str, directory: str = None, verify=False):\n if directory is None:\n directory = ICONS_DIRECTORY\n\n filename = os.path.join(directory, name)\n\n if '.' not in name:\n filename += '.png'\n\n if verify and not os.path.exists(filename):\n raise OSError(\"Cannot find icon file {} \".format(filename))\n\n return QtGui.QIcon(filename)\n\n\nclass Icons(dict):\n \"\"\"\n Initialize this after the application\n has started to avoid QtGui errors.\n Call like a dict or an object.\n \"\"\"\n def __init__(self, directory=None):\n if directory is None:\n self._directory = directory\n\n self.update({x: self.path_for(x) for x in ICON_NAMES})\n\n def path_for(self, name, verify=True):\n return path_for(name, directory=self._directory, verify=verify)\n\n def __getattr__(self, item):\n return self[item]\n\n\n","sub_path":"zeex/icons/icons.py","file_name":"icons.py","file_ext":"py","file_size_in_byte":2451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"232308747","text":"import numpy as np\nimport subprocess\nimport datetime\nimport matplotlib.pyplot as plt\nimport random\nimport pandas as pd\nimport os\nimport time\nfrom BOCS import Cat_BOCS1#, MO_Cat_BOCS\n\n\n#########################################################################\n########################### SINGLE OBJECTIVE ############################\n#########################################################################\n\nprint(\"\\nMinimizing Noisy Trap function, x_max=[1,1,1,1,1,1,1,1,0], y_max= 9 \\n\")\n\nnp.random.seed(20)\n\n# Give the number of categories of each element\nn=5\nn_Cats = np.array(2*np.ones(n),dtype=int) #[2,2,2,2,2,2]\nprint(n_Cats)\n# define objective\ndef objective(x):\n xx=str()\n for i in x:\n xx+=str(i)\n cmd=['python', '/home/pkent/Documents/Solo Research Project/Trap/Trap.py',xx]\n output = subprocess.Popen( cmd, stdout=subprocess.PIPE ).communicate()[0]\n#print(output.decode('utf-8'))\n#print(output.decode('utf-8')[8:])\n return -int(output.decode('utf-8')[8:])\n # x = x.reshape(9,)\n # return -np.sum(x)\n\ndef noisyobjective(x):\n xx=str()\n for i in x:\n xx+=str(i)\n cmd=['python', '/home/pkent/Documents/Solo Research Project/Trap/Trap.py',xx]\n output = subprocess.Popen( cmd, stdout=subprocess.PIPE ).communicate()[0]\n#print(output.decode('utf-8'))\n#print(output.decode('utf-8')[8:])\n #noise = np.random.normal(0,1,1)\n returnval = -int(output.decode('utf-8')[8:])\n return returnval#+noise\n # x = x.reshape(9,)\n # return -np.sum(x)\n# call the optimizer\n\ndef savetofile(array,directory,filename):\n '''A functon takes np.array:array and String:directory and saves it in the directory'''\n if not os.path.exists(directory):\n os.makedirs(directory)\n timeval = datetime.datetime.now().strftime(\"%c\")\n np.savetxt(directory+\"/\"+timeval+filename+\".csv\", array, delimiter=\",\")\n\nif __name__ == \"__main__\":\n ## How many runs to compare\n runs =100\n ## How many iterations per run\n iterations = 20\n ARRAY=np.zeros((runs,iterations))\n TrueArray=np.zeros((runs,iterations))\n TimerArray=np.zeros((runs,iterations-2))\n for i in range(runs):\n ARRAY[i,:],TimerArray[i,:],TrueArray[i,:]=Cat_BOCS1(fnoisy=noisyobjective,f=objective, n_Cats=n_Cats, n_init=np.max(n_Cats), n_evals=iterations,verbose=True,knownmax=0)\n #plt.errorbar(range(1,iterations+1),ARRAY.mean(axis=0),xerr=0,yerr=ARRAY.std(axis=0))\n #Convert arrays to dataframes\n FitnessArray2=pd.DataFrame(TrueArray)\n TimeArray2=pd.DataFrame(TimerArray)\n #Export as csv\n FitnessArray2.to_csv(('Data/runs:'+str(runs)+'it:'+str(iterations)+'n:'+str(n)+' '+str(time.strftime(\"%d %b %H:%M:%S\",time.gmtime()))+'BOCS_Fitness.csv'))\n TimeArray2.to_csv(('Data/runs:'+str(runs)+'it:'+str(iterations)+'n:'+str(n)+' '+str(time.strftime(\"%d %b %H:%M:%S\",time.gmtime()))+'BOCS_Timer.csv'))\n\n plt.errorbar(range(1,iterations+1),TrueArray.mean(axis=0),xerr=0,yerr=ARRAY.std(axis=0),label='True Value')\n plt.plot(range(3,iterations+1),TimerArray.mean(axis=0),label='time per iteration')\n plt.title('BOCs running on MaxOne Function with n=%i over %i runs' %(iterations,runs))\n plt.legend()\n plt.xlabel('runs')\n plt.ylabel('fit')\n plt.ylim((0,n))\n print('Wall Clock time: ', np.mean(np.sum(TimerArray,axis=1))/np.max(np.sum(TimerArray,axis=1))*n,'max: ', np.max(np.sum(TimerArray,axis=1)),'min: ',np.min(np.sum(TimerArray,axis=1)))\n savetofile(TimerArray,'/home/pkent/Documents/Solo Research Project/BOCS Code/Output','Time, n: '+str(n)+' iter: '+str(iterations)+' ')\n savetofile(TrueArray,'/home/pkent/Documents/Solo Research Project/BOCS Code/Output','True, n: '+str(n)+' iter: '+str(iterations)+' ')\n plt.show()\n","sub_path":"BOCS Code/BOCsTrap.py","file_name":"BOCsTrap.py","file_ext":"py","file_size_in_byte":3700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"247955037","text":"show_logs = 0\ndef log(message):\n\tif show_logs:\n\t\tprint(message)\n\nprogram = open('18.txt').readlines()\nprogram = [[p.strip() for p in ins.split(' ')] for ins in program]\n\nclass Process():\n\tdef __init__(self, p):\n\t\tself.i = 0\n\t\tself.memory = {'p': p, '1': 1}\n\t\tself.inbox = []\n\t\tself.linked = None\n\t\tself.sent = 0\n\t\t\n\tdef evaluate(self):\n\t\tparams = program[self.i]\n\t\tcmd = params[0]\n\t\t\n\t\treg = params[1]\n\t\tif reg not in self.memory:\n\t\t\tself.memory[reg] = 0\n\t\t\n\t\tval = params[2] if len(params) > 2 else None\n\t\tif val in self.memory:\n\t\t\tval = self.memory[val]\n\t\telif val:\n\t\t\tval = int(val)\n\n\t\ti_delta = 1\n\t\t\n\t\tif cmd == 'snd':\n\t\t\tlog(\"sending value in \" + reg)\n\t\t\tself.linked.inbox += [self.memory[reg]]\n\t\t\tself.sent += 1\n\t\telif cmd == 'set':\n\t\t\tlog(\"setting register \" + reg + \" to \" + str(val))\n\t\t\tself.memory[reg] = int(val)\n\t\telif cmd == 'add':\n\t\t\tlog(\"adding \" + str(val) + \" to register \" + reg)\n\t\t\tself.memory[reg] += val\n\t\telif cmd == 'mul':\n\t\t\tlog(\"multiplying \" + str(val) + \" to register \" + reg)\n\t\t\tself.memory[reg] *= val\n\t\telif cmd == 'mod':\n\t\t\tlog(\"modulo \" + str(val) + \" to register \" + reg)\n\t\t\tself.memory[reg] %= val\n\t\telif cmd == 'rcv':\n\t\t\tif len(self.inbox) == 0:\n\t\t\t\tlog(\"waiting for value...\")\n\t\t\t\treturn True\n\t\t\tlog(\"receiving value into \" + reg)\n\t\t\tself.memory[reg] = self.inbox.pop(0)\n\t\telif cmd == 'jgz':\n\t\t\tlog(\"jumping if \" + reg + \" greater than zero with offset \" + str(val))\n\t\t\tif self.memory[reg] > 0:\n\t\t\t\ti_delta = val\n\t\telse:\n\t\t\traise Exception(\"invalid command found: \" + cmd)\n\t\t\n\t\tself.i += i_delta\n\t\t\n\t\treturn False\n\na = Process(p=0)\nb = Process(p=1)\na.linked = b\nb.linked = a\n\nprint(\"starting processes a and b\")\n\nwhile a.i in range(len(program)) and b.i in range(len(program)):\n\ta_locked = a.evaluate()\n\tb_locked = b.evaluate()\n\tif a_locked and b_locked:\n\t\tprint(\"deadlocked!\")\n\t\tbreak\n\nprint(\"finished processing!\")\nprint(\"program 0 sent \" + str(a.sent) + \" values\")\nprint(\"program 1 sent \" + str(b.sent) + \" values\")\n","sub_path":"18.py","file_name":"18.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"122644561","text":"#!/usr/bin/env python3\n\nimport json\nfrom os.path import exists as os_path_exists\nfrom sys import exit\nimport bottle\n\napp = bottle.default_app()\n\n\n@app.get('/ping')\ndef ping():\n version = app.config.get('version', 'version not found')\n return f\"{version}\\n\"\n\n\n@app.error(404)\ndef error404():\n return 'No anything here.'\n\n\n@app.error(500)\ndef error500():\n return 'Oops. Serverside error.'\n\n\nCONFIG_FILENAME = './config.json'\nif not os_path_exists(CONFIG_FILENAME):\n exit(f\"It seems {CONFIG_FILENAME} does not exist. Check it out.\")\nelse:\n with open(CONFIG_FILENAME) as fp:\n config = json.load(fp)\n app.config.update(config)\n\nbottle.run(app)\n","sub_path":"3/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"562843526","text":"\n'''a =[1,2,5,6,4,645,6,34,54,5,5]\nmaxx = max(a)\nprint(maxx)\n'''\n'''a=23\nb=3\nc=a/b\nprint( round(c,3))\n'''\n#import random #生成随机数\n#from urllib import request\n#import os\n#a=random.random()\n#import webbrowser\n\n#os.system(\"C:\")\n#webbrowser.open(\"http://www.baidu.com\")\n\n'''fh1 =open(r'D:\\Atools\\Sublime3\\toor\\001.txt',\"a\") # r-- read 读取 w-- 写入 a--追加 fh1--文件句柄,用来控制文件\n#data = fh1.readlines()\ndata2 = fh1.write(\"假如时光倒流我\") #如果文件存在,就重新写入;如果文件不存在;新建文件再写入\n#print(data[3])\nprint(data2)\n\n#for i in fh1: #读取大文件\n # print(i)\n\nfh1.close() # 文件操作完毕后一定要关闭文件io资源\n'''\n##文件分为两大类:1.文本文件 2.二进制文件\nfh = open(r'D:\\Atools\\Sublime3\\toor\\007.jpg',\"rb\")\ndata = fh.read()\nprint(data)\n\nfh1 = open(r'D:\\Atools\\Sublime3\\toof\\003.jpg',\"wb\")\nfh1.write(data)\nfh.close()\nfh1.close()\n","sub_path":"toor/test06.py","file_name":"test06.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"264176","text":"\"\"\"\n有效字母的异位词\n\"\"\"\n\n\nclass Solution:\n def isAnagram(self, s: str, t: str) -> bool:\n # 第一种:字符串排序\n # if s and not t:\n # return False\n # if not t and s:\n # return False\n\n # def sort_str(x: str):\n # x_list = list(x)\n # x_num_list = [ord(i) for i in x_list]\n # x_num_list.sort()\n\n # return \"\".join([chr(j) for j in x_num_list])\n\n # return sort_str(s) == sort_str(t)\n\n # 第二种: 直接sort()\n # return sorted(s) == sorted(t)\n\n # 第三种: 使用set()减少循环规模,如果发现count(i)的值相等的话: 提前终止\n result = True\n set_s = set(s)\n if set_s == set(t):\n for i in set_s:\n result = result and (s.count(i) == t.count(i))\n if result == False:\n return result\n else:\n result = False\n\n return result\n","sub_path":"Week_02/242.有效字母的异位词.py","file_name":"242.有效字母的异位词.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"92438183","text":"# -*- coding: utf-8 -*-\n\ncard_type = {\n 10: 'Hero Power',\n 3: 'Hero',\n 4: 'Minion',\n 5: 'Spell',\n 7: 'Weapon'\n}\n\ncard_class = {\n 1: 'Warrior',\n 3: 'Hunter',\n 5: 'Priest',\n 11:'Druid',\n 7: 'Shaman',\n 4: 'Rogue',\n 2: 'Paladin',\n 9: 'Warlock',\n 8: 'Mage',\n\n 0: 'Neutral'\n}\n\ncard_rarity = {\n 0: 'Free',\n 1: 'Common',\n 3: 'Rare',\n 4: 'Epic',\n 5: 'Legendary'\n}\n\nclass Card:\n def __init__(self, card_id, name, card_class_id, card_type_id, image, quality):\n \"\"\"\n Base class for all cards\n\n card_id : Unique identifier\n name : Card name\n classs : Card class, refer to the hero class or neutral\n type : Card type minion, spell, weapon, hero, hero, power\n image : Reference to the card image\n \"\"\"\n self.card_id = int(card_id)\n self.name = str(name).strip()\n self.classs = card_class[card_class_id]\n self.type = card_type[card_type_id]\n self.image = image\n self.quality = quality\n self.mean_color = self.image.meanColor()\n\n def show(self):\n if self.image:\n self.image.getPIL().show()\n\n def get_cost_img(self):\n return self.image.crop(8, 38, 50, 50)\n\n def get_img(self):\n return self.image.crop(45, 38, 105, 200)\n\n def get_rarity_img(self):\n return self.image.crop(95, 180, 15, 20)\n\n def __str__(self):\n return '{0} {1} {2}'.format(self.name, self.classs, self.type)","sub_path":"src/card.py","file_name":"card.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"602979052","text":"from abc import ABC, abstractmethod\nfrom datetime import time\nimport hashlib\n\n\nclass busutil(ABC):\n def __init__(self, buscontext):\n self.buscontext = buscontext\n self.oldmessage_timestamp = None\n\n def Send(self, msg):\n \"\"\" Send a message ge to redis\"\"\"\n ts = hashlib.sha256(\"this is a test\".encode()).hexdigest()\n payload = {\n \"msg\":msg,\n \"ts\": ts\n }\n\n self.buscontext.Connection.hmset(self.buscontext.Address,payload)\n\n self.oldmessage_timestamp = ts\n\n def Read(self):\n \"\"\" Read a message and compair it with last message time stamp \"\"\"\n payload = self.buscontext.Connection.hgetall(self.buscontext.Address)\n msg = payload[b\"msg\"]\n timestamp = payload[b\"ts\"]\n isNew = self.oldmessage_timestamp != timestamp\n return (msg, isNew)\n\n def Run(self):\n \"\"\"\n Run throw an infinite loop that will read its current message execute it\n The implementation class must call send if it need to pass it to an other unit\n \"\"\"\n while True:\n data, isnew = self.Read()\n if (isnew):\n data = self.Execute(data,time.time())\n","sub_path":"Bus/Busutil.py","file_name":"Busutil.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"512633902","text":"from selenium import webdriver\nimport time\n\nlink = \"http://suninjuly.github.io/huge_form.html\"\nwith webdriver.Chrome() as browser:\n browser.get(\"http://suninjuly.github.io/huge_form.html\")\n elements = browser.find_elements_by_css_selector(\"input[type='text']\")\n for element in elements:\n element.send_keys(\"Test\")\n button = browser.find_element_by_css_selector(\"button.btn\")\n button.click()\n time.sleep(30)\n # закрываем браузер после всех манипуляций\n browser.quit()\n","sub_path":"lesson6_step7.py","file_name":"lesson6_step7.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"66058091","text":"#!/usr/bin/env python3\n# File: prime.py\n# Name: D.Saravanan\n# Date: 09/08/2021\n\n\"\"\" Script that checks whether a number is prime \"\"\"\n\nimport math\n\ndef prime(number):\n \"\"\" function to check prime \"\"\"\n sqrt_number = math.sqrt(number)\n for n in range(2, int(sqrt_number) + 1):\n if (number/n).is_integer():\n return False\n return True\n\nprint(f'Check number(10,000,000) = {prime(10_000_000)}')\nprint(f'Check number(10,000,019) = {prime(10_000_019)}')\n\n#def primevect(number):\n# \"\"\" function to check prime with concept of vectorization \"\"\"\n# sqrt_number = math.sqrt(number)\n# numbers = range(2, int(sqrt_number) + 1)\n# for m in range(0, len(numbers), 5):\n# result = (number/numbers[m:(m+5)]).is_integer()\n# if any(result):\n# return False\n# return True\n#\n#print(f'Check number(10,000,000) = {primevect(10_000_000)}')\n#print(f'Check number(10,000,019) = {primevect(10_000_019)}')\n","sub_path":"prime.py","file_name":"prime.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"46313497","text":"import dotenv\nimport boto3\nimport os\n\n\ndef init_s3_credentials():\n dotenv.load_dotenv(dotenv.find_dotenv())\n access_key = os.getenv(\"ACCESS_KEY\")\n secret_key = os.getenv(\"SECRET_KEY\")\n client = boto3.client(\n \"s3\",\n aws_access_key_id=access_key,\n aws_secret_access_key=secret_key,\n # aws_session_token=SESSION_TOKEN,\n )\n return client\n","sub_path":"globalgiving/s3_interface.py","file_name":"s3_interface.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"412906073","text":"#!python3\nimport matplotlib.pyplot as plt\nimport os\nimport pandas as pd\nimport warnings\n\n# Ignore warnings\nwarnings.filterwarnings( \"ignore\")\n\n\ndef plot(x, y, xlabel, ylabel, filename):\n n_cols = len(datasets)\n n_rows = 1\n\n fig, axs = plt.subplots(n_rows, n_cols, figsize=(5*n_cols, 4.2*n_rows), sharey=True, sharex=True)\n fig.tight_layout()\n\n for i, dataset in enumerate(datasets):\n ax = axs[i]\n for l1 in l1_models:\n for l2 in l2_models:\n data = df[(df['dataset']==dataset) & (df['layer1']==l1) & (df['layer2']==l2)]\n ax.scatter(x=data[x], y=data[y], label=f'{l1}$\\mapsto${l2}', marker='x', alpha=0.7)\n\n # Axes labels\n ax.set_title(dataset)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n\n # Visuals\n ax.set_xscale('log')\n ax.set_yscale('log')\n ax.grid()\n ax.legend(ncol=1)\n\n fig.savefig(os.path.join(path, filename), bbox_inches='tight')\n\nif __name__ == \"__main__\":\n path = 'results'\n\n # Read csv file\n file = os.path.join(path, 'rmi_errors.csv')\n df = pd.read_csv(file, delimiter=',', header=0, comment='#')\n\n datasets = sorted(df['dataset'].unique())\n l1_models = sorted(df['layer1'].unique())\n l2_models = sorted(df['layer2'].unique())\n\n # Plot mean absolute error\n filename = 'rmi_errors-mean_absolute_error.pdf'\n plot('n_models', 'mean_ae', '# of segments', 'Mean absolute error', filename)\n\n # Plot mean absolute error\n filename = 'rmi_errors-median_absolute_error.pdf'\n plot('n_models', 'median_ae', '# of segments', 'Median absolute error', filename)\n\n # Plot max absolute error\n filename = 'rmi_errors-max_absolute_error.pdf'\n plot('n_models', 'max_ae', '# of segments', 'Maximum absolute error', filename)\n","sub_path":"scripts/plot_rmi_errors.py","file_name":"plot_rmi_errors.py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"566951049","text":"# https://atcoder.jp/contests/arc004/tasks/arc004_1\nimport sys\nsys.setrecursionlimit(2147483647)\nINF=float(\"inf\")\nMOD=10**9+7\ninput=lambda :sys.stdin.readline().rstrip()\ndef resolve():\n from itertools import combinations\n n=int(input())\n A=[tuple(map(int,input().split())) for _ in range(n)]\n C=combinations(A,2)\n ans=0\n for u,v in C:\n ans=max(ans,(u[0]-v[0])**2+(u[1]-v[1])**2)\n print(ans**.5)\nresolve()\n","sub_path":"ARC004/a_the_longest_distance.py","file_name":"a_the_longest_distance.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"499096689","text":"\n\nimport A3C as A3C\nimport gym\n\n## import env\nenv = gym.make('Pendulum-v0')\nstate_dim = env.observation_space.shape[0]\naction_dim = env.action_space.shape[0]\na_bound = [env.action_space.low, env.action_space.high]\n\nprint('state_dim', state_dim, 'action_dim', action_dim, 'a_bound', a_bound)\n\n\n## train\ntrain_flag = True\ntrain_flag = False\npara = A3C.Para(env, # 环境参数包括state_dim,action_dim,abound,step,reset\n state_dim=state_dim, # 状态的维度\n action_dim=action_dim, # 动作的维度\n a_bound=a_bound, # 动作的上下界\n units_a=200, # 双层网络,第一层的大小\n units_c=100, # 双层网络,critic第一层的大小\n MAX_GLOBAL_EP=2000, # 全局需要跑多少轮数\n UPDATE_GLOBAL_ITER=10, # 多少代进行一次学习,调小一些学的比较快\n gamma=0.9, # 奖励衰减率\n ENTROPY_BETA=0.01, # 表征探索大小的量,越大结果越不确定\n LR_A=0.0001, # Actor的学习率\n LR_C=0.001, # Crtic的学习率\n sigma_mul=1,\n MAX_EP_STEP=200, # 控制一个回合的最长长度\n train=train_flag # 表示训练\n )\nRL = A3C.A3C(para)\nif para.train:\n RL.run()\nelse:\n # 1 stable\n # 2 random\n # 3 multi\n RL.display(1)\n#\n\n\n\n\n","sub_path":"3 A3C_con/1 Pendulum-v0/main_A3C.py","file_name":"main_A3C.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"599264504","text":"#!/usr/bin/env python\n\nimport requests\nimport sys\nfrom iok.meta import KnowledgeGraph, AwesomeClient\n\n\ndef run_scrape(link: str, out_file: str, debug: bool = False) -> None:\n r = requests.get(link)\n graph = r.json()\n\n g = KnowledgeGraph(obj=graph, debug=debug)\n\n a = AwesomeClient(g)\n a.build_map()\n\n if out_file:\n a.write_to_file(out_file)\n else:\n print(a.build_str())\n","sub_path":"src/scraper/scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"89331343","text":"import json, os, pickle\nfrom pybrain.datasets import SupervisedDataSet\nfrom pybrain.tools.shortcuts import buildNetwork\n\nDATA_FILE = \"./data/boardsdata.raw.json\"\nNETWORK_FILE = \"board.network.raw\"\n\ndef getSupervisedDataSet():\n boardsDataset = SupervisedDataSet(16, 1)\n\n with open(DATA_FILE) as f:\n print(\"Loading board data...\")\n boardsRawDataset = json.load(f)\n print(\"Done.\")\n\n print(\"Adding data into dataset...\")\n for board in boardsRawDataset:\n boardsDataset.addSample(tuple(board[0]), (board[1],))\n print(\"Done\")\n\n return boardsDataset\n\ndef loadNetwork():\n if loadNetwork._cachedNetwork is None:\n network = buildNetwork(16,8,1)\n\n if os.path.isfile(NETWORK_FILE):\n network = pickle.load(open(NETWORK_FILE,\"r\"))\n network.sorted = False\n network.sortModules()\n\n loadNetwork._cachedNetwork = network\n return loadNetwork._cachedNetwork\nloadNetwork._cachedNetwork = None\n\ndef saveNetwork(network):\n pickle.dump(network, open(NETWORK_FILE, \"w\"))\n\n","sub_path":"problem4/pybrainHelpers.py","file_name":"pybrainHelpers.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"33961953","text":"import torch\nimport math\nfrom apex.multi_tensor_apply import multi_tensor_applier\n\nclass FusedACClip(torch.optim.Optimizer):\n\n \"\"\"Implements ACClip on paper\n\n Currently GPU-only. Requires Apex to be installed via\n ``pip install -v --no-cache-dir --global-option=\"--cpp_ext\" --global-option=\"--cuda_ext\" ./``.\n\n ACClip was been proposed in `Why are Adaptive Methods Good for Attention Models'\n\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups.\n lr (float, optional): learning rate. (default: 1e-3)\n betas (Tuple[float, float], optional): coefficients used for computing\n running averages of gradient and its square. (default: (0.9, 0.999))\n eps (float, optional): term added to the denominator to improve\n numerical stability. (default: 1e-8)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n amsgrad (boolean, optional): whether to use the AMSGrad variant of this\n algorithm from the paper `On the Convergence of Adam and Beyond`_\n (default: False) NOT SUPPORTED in FusedAdam!\n adam_w_mode (boolean, optional): Apply L2 regularization or weight decay\n True for decoupled weight decay(also known as AdamW) (default: True)\n set_grad_none (bool, optional): whether set grad to None when zero_grad()\n method is called. (default: True)\n\n .. _Adam - A Method for Stochastic Optimization:\n https://arxiv.org/abs/1412.6980\n .. _On the Convergence of Adam and Beyond:\n https://openreview.net/forum?id=ryQu7f-RZ\n \"\"\"\n\n def __init__(self, params, lr=1e-3, bias_correction=True,\n betas=(0.9, 0.999), eps=1e-6, adam_w_mode=True,\n weight_decay=0., max_grad_norm=1.0, set_grad_none=True, clip_grad_norm=True):\n\n defaults = dict(lr=lr, bias_correction=bias_correction, betas=betas, eps=eps, weight_decay=weight_decay, max_grad_norm=max_grad_norm)\n super(FusedACClip, self).__init__(params, defaults)\n if multi_tensor_applier.available:\n import amp_C\n self.multi_tensor_l2norm=amp_C.multi_tensor_l2norm\n # Skip buffer\n self._dummy_overflow_buf = torch.cuda.IntTensor([0])\n self.multi_tensor_acclip = amp_C.multi_tensor_acclip\n else:\n raise RuntimeError('apex.optimizers.FusedADAM requires cuda extensions')\n self.adam_w_mode = 1 if adam_w_mode else 0\n self.set_grad_none = set_grad_none\n self._clip_grad_norm = clip_grad_norm\n\n def zero_grad(self):\n if self.set_grad_none:\n for group in self.param_groups:\n for p in group['params']:\n p.grad = None\n else:\n super(FusedACClip, self).zero_grad()\n\n def step(self, closure=None, grads=None, output_params=None, scale=None, grad_norms=None):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n\n The remaining arguments are deprecated, and are only retained (for the moment) for error-checking purposes.\n \"\"\"\n if any(p is not None for p in [grads, output_params, scale, grad_norms]):\n raise RuntimeError('FusedAdam has been updated. Simply initialize it identically to torch.optim.Adam, and call step() with no arguments.')\n loss = None\n if closure is not None:\n loss = closure()\n\n # create separate grad lists for fp32 and fp16 params\n g_all_32, g_all_16 = [], []\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n if p.dtype == torch.float32:\n g_all_32.append(p.grad.data)\n elif p.dtype == torch.float16:\n g_all_16.append(p.grad.data)\n else:\n raise RuntimeError('FusedLAMB only support fp16 and fp32.')\n\n device = self.param_groups[0][\"params\"][0].device\n g_norm_32, g_norm_16 = torch.zeros(1, device=device), torch.zeros(1, device=device)\n # compute grad norm for two lists\n if len(g_all_32) > 0:\n g_norm_32 = multi_tensor_applier(self.multi_tensor_l2norm,\n self._dummy_overflow_buf,\n [g_all_32], False)[0]\n if len(g_all_16) > 0:\n g_norm_16 = multi_tensor_applier(self.multi_tensor_l2norm,\n self._dummy_overflow_buf,\n [g_all_16], False)[0]\n\n # blend two grad norms to get global grad norm\n global_grad_norm = multi_tensor_applier(self.multi_tensor_l2norm,\n self._dummy_overflow_buf,\n [[g_norm_32, g_norm_16]],\n False)[0]\n self.global_grad_norm = global_grad_norm\n max_grad_norm = self.defaults['max_grad_norm']\n combined_scale = 1.0\n if self._clip_grad_norm and max_grad_norm > 0 and math.isfinite(global_grad_norm):\n if global_grad_norm > max_grad_norm:\n combined_scale = (global_grad_norm + 1e-6) / max_grad_norm\n\n for group in self.param_groups:\n bias_correction = 1 if group['bias_correction'] else 0\n beta1, beta2 = group['betas']\n\n # assume same step across group now to simplify things\n # per parameter step can be easily support by making it tensor, or pass list into kernel\n if 'step' in group:\n group['step'] += 1\n else:\n group['step'] = 1\n\n # create lists for multi-tensor apply\n g_16, p_16, m_16, v_16 = [], [], [], []\n g_32, p_32, m_32, v_32 = [], [], [], []\n\n for p in group['params']:\n if p.grad is None:\n continue\n if p.grad.data.is_sparse:\n raise RuntimeError('FusedAdam does not support sparse gradients, please consider SparseAdam instead')\n\n state = self.state[p]\n # State initialization\n if len(state) == 0:\n # Exponential moving average of gradient values\n state['exp_avg'] = torch.zeros_like(p.data)\n # Exponential moving average of squared gradient values, paper, used only alpha=1, not _sq actually\n state['exp_avg_sq'] = torch.ones_like(p.data) # Tau is initialized as 1\n\n if p.dtype == torch.float16:\n g_16.append(p.grad.data)\n p_16.append(p.data)\n m_16.append(state['exp_avg'])\n v_16.append(state['exp_avg_sq'])\n elif p.dtype == torch.float32:\n g_32.append(p.grad.data)\n p_32.append(p.data)\n m_32.append(state['exp_avg'])\n v_32.append(state['exp_avg_sq'])\n else:\n raise RuntimeError('FusedAdam only support fp16 and fp32.')\n\n if(len(g_16) > 0):\n multi_tensor_applier(self.multi_tensor_acclip,\n self._dummy_overflow_buf,\n [g_16, p_16, m_16, v_16],\n group['lr'],\n beta1,\n beta2,\n group['eps'],\n combined_scale,\n group['step'],\n bias_correction,\n group['weight_decay'])\n if(len(g_32) > 0):\n multi_tensor_applier(self.multi_tensor_acclip,\n self._dummy_overflow_buf,\n [g_32, p_32, m_32, v_32],\n group['lr'],\n beta1,\n beta2,\n group['eps'],\n combined_scale,\n group['step'],\n bias_correction,\n group['weight_decay'])\n\n\n return loss\n","sub_path":"apex/optimizers/fused_acclip.py","file_name":"fused_acclip.py","file_ext":"py","file_size_in_byte":8629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"552581510","text":"import numpy as np\nimport os\nfrom textwrap import dedent\n\niseed = 0\n\nmodel = \"mq5\"\ncarddir = \"./runs/out_{model}_13tev_v1/\".format(model=model)\nnevents=1000000\nnjobs_per_mass = 90\n# carddir = \"./runs/out_{model}_13tev_xsecscan/\".format(model=model)\n# nevents=10000\n# njobs_per_mass = 1\n# masses = [0.1,0.28,0.43,0.6,0.78,1.0,1.25,1.52,1.84,2.2,2.6,3.04,3.54,4.1,4.71,5.4,6.15,6.98,7.9,8.9,10.0,11.2,12.5,14.0,15.5,17.2,19.1,21.1,23.3,25.6,28.2,30.9,33.9,37.1,40.5,44.2,48.2,52.5,57.1,62.1,67.4,73.0,79.1,85.6,92.6,100.]\n# masses = [0.010, 0.020, 0.030, 0.050, 0.100, 0.200, 0.300, 0.400, 0.500, 0.700, 1.000, 1.400, 1.600, 1.800, 2.000, 3.000, 4.000, 4.500, 5.000, 7.000, 10.000]\nmasses = [1.0, 1.4, 1.6, 1.8, 2.0, 3.0, 3.5, 4.0, 4.5, 5.0, 7.0, 10.0, 14.0, 20.0, 28.0, 34.0, 40.0, 44.0, 48.0, 52.0, 58.0, 68.0, 80.0, 100.0]\n# masses = [2.0, 3.0, 3.5, 4.0, 4.5, 5.0]\n# masses = [round(10**x,3) for x in np.linspace(-2, 1.9, 1301)]\n\ndef get_card_mq(\n model,\n ncores=1,\n nevents=10000,\n mgoutputname=\"./runs/out_test_v1/test_v1\",\n carddir=\"./runs/out_test_v1/\",\n kappa=1.0,\n mass=25.0,\n unique_seeds=True,\n ):\n global iseed\n seedstr = \"\"\n if unique_seeds:\n iseed += 1\n seedstr = \"set run_card iseed {}\".format(iseed)\n\n template = dedent(\"\"\"\n set auto_update 0\n set run_mode 2\n set nb_core {ncores}\n\n {importstr}\n\n define p = p b b~\n define j = j b b~\n generate p p > {particle}+ {particle}-\n add process p p > {particle}+ {particle}- j\n\n output {mgoutputname} -nojpeg\n launch\n \n set param_card MASS {pid} {mass}\n {kappaparam}\n set run_card ebeam1 6500.0\n set run_card ebeam2 6500.0\n set run_card nevents {nevents}\n set run_card use_syst False\n\n set run_card ptl -1.0\n set run_card etal -1.0\n set run_card ickkw 0\n set run_card xqcut 0.0\n\n {seedstr}\n \"\"\")\n\n if model in [\"mq5\",\"mq\"]:\n return template.format(\n ncores=ncores,\n nevents=nevents,\n mass = mass,\n mgoutputname=mgoutputname,\n importstr = \"import model mq5_UFO-full\",\n particle = \"e\",\n pid = 11,\n kappaparam = \"set param_card TEMP 11 {kappa}\".format(kappa=kappa),\n seedstr=seedstr,\n )\n if model == \"mq4\":\n return template.format(\n ncores=ncores,\n nevents=nevents,\n mass = mass,\n mgoutputname=mgoutputname,\n importstr = \"import model_v4 mq4_UFO\",\n particle = \"mq\",\n pid = 300015,\n kappaparam = \"\",\n seedstr=seedstr,\n )\n\ndef write_card(s,fname,dryrun=False):\n if dryrun:\n print(\"Would write {}\".format(fname))\n return\n with open(fname,\"w\") as fh:\n fh.write(s)\n \n# carddir = \"./runs/out_{model}_13tev_xsecscan/\".format(model=model)\nos.system(\"mkdir -p {}\".format(carddir))\nkappa = 1.0\nfor mass in masses:\n for chunk in range(njobs_per_mass):\n tag = \"{model}_{mass}_{kappa}_chunk{chunk}\".format(model=model,mass=str(mass).replace(\".\",\"p\"),kappa=str(kappa).replace(\".\",\"p\"),chunk=chunk)\n mgoutputname = \"{carddir}/{tag}\".format(carddir=carddir,tag=tag)\n cardname = \"{carddir}/proc_card_{tag}.dat\".format(carddir=carddir,tag=tag)\n buff = get_card_mq(model=model,ncores=1,mgoutputname=mgoutputname,carddir=carddir, mass=mass,kappa=kappa,nevents=nevents)\n write_card(buff,cardname,dryrun=False)\n\n\n","sub_path":"madgraphDY/write_cards.py","file_name":"write_cards.py","file_ext":"py","file_size_in_byte":3595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"361034149","text":"contas = []\r\nsaldo = []\r\nvlrdep = int()\r\nvlrtransf = int()\r\nvlrsaque = int()\r\nvl = int()\r\nvlp = int()\r\npos = int()\r\ntentlogin = int(3)\r\nver = str('S')\r\nverifconta = str()\r\nvoltarsaque = str('S')\r\n\r\n\r\nprint('=' * 30)\r\nprint('{:^30}'.format('NICOLAS BANK'))\r\nprint('=' * 30)\r\n\r\n\r\nrespcliente = str(input('Você já é nosso cliente do banco? S / N.'))\r\n\r\nif respcliente == 'N':\r\n desejocad = str(input('Deseja cadastrar uma conta? S/N'))\r\n\r\n if desejocad == 'S':\r\n while desejocad == 'S':\r\n\r\n numconta = str(input('Digite o número da conta a ser cadastrada.'))\r\n\r\n if numconta in contas:\r\n print( 'Parece que você já tem uma conta.' )\r\n ver = str(input('deseja entra?'))\r\n if ver == 'S':\r\n desejocad = 'N'\r\n respcliente = 'S'\r\n\r\n else:\r\n contas.append(numconta)\r\n saldo.append(0)\r\n print(contas,saldo)\r\n desejocad = str( input( 'Desja cadastrar outra conta?' ) )\r\n if desejocad == 'N':\r\n desejoent = str(input('Deseja entrar no sistema do banco?'))\r\n if desejoent == 'S':\r\n desejocad = 'N'\r\n respcliente = 'S'\r\n else:\r\n desejocad = 'S'\r\n\r\n if desejocad == 'N':\r\n print('NICOLAS BANK lhe deseja sucesso.')\r\n\r\n\r\nwhile respcliente == 'S':\r\n while verifconta not in contas or tentlogin == 0:\r\n verifconta = str( input( 'Digita sua conta. Você ainda possui {} dentativas'.format( tentlogin ) ) )\r\n print( 'Login efetuado com sucesso.' )\r\n while verifconta in contas and voltarsaque == 'S':\r\n\r\n print('Qual peração vc deseja realizar?')\r\n operacao = int(input('Digite 1 para saldo, 2 para depósito, 3 para saque, 4 para transferencia ou 5 para sair:'))\r\n\r\n if operacao == 1:\r\n pos = contas.index(verifconta)\r\n print('Seu saldo é {}'.format( saldo[pos]))\r\n\r\n\r\n if operacao == 2:\r\n vlrdep = bool(input('Digite o valor a ser depositado:'))\r\n pos = contas.index(verifconta)\r\n s1 = saldo[pos]\r\n saldo.insert(s1, s1+vlrdep)\r\n print(f'Seu saldo é {saldo[s1]}.')\r\n respcliente = 'S'\r\n\r\n while operacao == 3:\r\n print('Você optou para opção de saque')\r\n vlrsaque = int(input('Quanto vc deseja sacar?'))\r\n pos = contas.index(verifconta)\r\n s1 = saldo[pos]\r\n if vlrsaque > saldo[pos]:\r\n print('Saldo insuficiente. Deseja tentar novamente?')\r\n\r\n if vlrsaque <= saldo[pos]:\r\n saldo.insert(s1, s1-vlrsaque)\r\n print(f'Seu novo saldo é {saldo[1]}.')\r\n\r\n opnovosaque = str(input('Deseja realizar um novo saque? S/N.'))\r\n if opnovosaque == 'S':\r\n operacao = 3\r\n else:\r\n voltarsaque = 'S'\r\n break\r\n\r\n\r\n if operacao == 4:\r\n contatransf = int(input('Digite a conta para transferir:'))\r\n vlrtransf = int(input('Digite o valor a ser transferido:'))\r\n pos = contas.index(verifconta)\r\n pos2 = contas.index(contatransf)\r\n s1 = saldo[pos]\r\n s2 = saldo[pos2]\r\n if s1 < vlrdep:\r\n print('Transferencia impossível:')\r\n else:\r\n for c in range(0, len(contas)):\r\n if contatransf == c:\r\n p1 = pos\r\n p2 = pos2\r\n saldo.insert(s1, s1-vlrtransf)\r\n saldo.insert(s2, s2+vlrtransf)\r\n\r\n\r\n\r\n else:\r\n tentlogin -= 1\r\n if tentlogin == 0:\r\n print('Erro ao logar. Tente novamente em 30 minutos.')\r\n verifconta = ''\r\n break\r\n\r\n\r\n\r\n","sub_path":"ATM 2.py","file_name":"ATM 2.py","file_ext":"py","file_size_in_byte":3960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"195230178","text":"'''\nCreated on Dec 2, 2017\n\n@author: flyn\n'''\n# windows temporary fix\nimport sys, os\nsys.path.append(\"%s/utility\" %os.getcwd())\n\nimport numpy as np\nimport multiprocessing as mp\nimport tensorflow as tf\nimport json\nimport base64\nimport ctypes\nimport inference_config\nimport time\nimport socket\n\ndef check_maya_connection(host, port, logger):\n clientsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n clientsocket.connect((host, port))\n except socket.error as e:\n logger.info(\"maya connection failed!!!\")\n logger.debug(\"maya connection failed!!! {0}\".format(e))\n return \n else:\n return True\n \n\n\ndef process_handler(mode,\n logger):\n live_process = []\n manager = mp.Manager()\n lifetime_end = manager.Value(ctypes.c_char_p, False)\n input_image_queue, output_image_queue = [mp.Queue() for _ in range(2)]\n img_cap_process = mp.Process(target=local_inference,\n args=(input_image_queue,\n output_image_queue,\n lifetime_end,\n mode,\n logger,))\n \n live_process.append(img_cap_process)\n logger.debug(\"starting local inference process\")\n img_cap_process.start()\n \n return input_image_queue, output_image_queue, live_process, lifetime_end\n\ndef local_inference(input_image_queue,\n output_image_queue,\n lifetime_end,\n mode,\n logger,\n *args):\n \n logger.info(\"local inference started\")\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n with tf.Session(config=config) as sess:\n logger.debug(\"local inference session started with {0}\".format(mode))\n if mode == \"facades\":\n output_file = inference_config.output_file\n model = inference_config.model\n else:\n model = inference_config.maps_model\n output_file = inference_config.output_maps_file \n saver = tf.train.import_meta_graph(model + \"/export.meta\")\n saver.restore(sess, model + \"/export\")\n \n input_vars = json.loads(tf.get_collection(\"inputs\")[0].decode('utf-8'))\n output_vars = json.loads(tf.get_collection(\"outputs\")[0].decode('utf-8'))\n \n _input = tf.get_default_graph().get_tensor_by_name(input_vars[\"input\"])\n output = tf.get_default_graph().get_tensor_by_name(output_vars[\"output\"])\n while True:\n if not input_image_queue.empty():\n start = time.time()\n input_file = input_image_queue.get()\n logger.debug(\"local inference get {0}\".format(input_file))\n # if input_file:\n with open(input_file, \"rb\") as f:\n input_data = f.read()\n \n input_instance = dict(_input=base64.urlsafe_b64encode(input_data).decode(\"ascii\"), key=\"0\")\n input_instance = json.loads(json.dumps(input_instance))\n \n input_value = np.array(input_instance[\"_input\"])\n output_value = sess.run(output, feed_dict={_input: np.expand_dims(input_value, axis=0)})[0]\n \n output_instance = dict(output=output_value.decode(\"ascii\"), key=\"0\")\n \n b64data = output_instance[\"output\"]\n b64data += \"=\" * (-len(b64data) % 4)\n output_data = base64.urlsafe_b64decode(b64data.encode(\"ascii\"))\n \n with open(output_file, \"wb\") as f:\n logger.debug(\"local inference open file {0}\".format(output_file))\n f.write(output_data)\n \n \n output_image_queue.put(output_file)\n logger.debug(\"local inference sent {0}\".format(output_file))\n logger.info(time.time() - start)\n \n if lifetime_end.value:\n logger.debug(\"exiting local inference\")\n break\n","sub_path":"ui/utility/inference_utility.py","file_name":"inference_utility.py","file_ext":"py","file_size_in_byte":4123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"342095359","text":"# There is an N by M matrix of zeroes. Given N and M, write a function to count the number of ways of starting at the\n# top-left corner and getting to the bottom-right corner. You can only move right or down.\n#\n# For example, given a 2 by 2 matrix, you should return 2, since there are two ways to get to the bottom-right:\n#\n# Right, then down\n# Down, then right\n# Given a 5 by 5 matrix, there are 70 ways to get to the bottom-right.\n\nimport unittest\n\n\ndef solve_dp(n: int, m: int) -> int:\n mat = [[0 for _ in range(m)] for _ in range(n)]\n for i in range(n):\n mat[i][0] = 1\n for j in range(m):\n mat[0][j] = 1\n for i in range(1, n):\n for j in range(1, m):\n mat[i][j] = mat[i - 1][j] + mat[i][j - 1]\n\n return mat[-1][-1]\n\ndef solve_reccursive(n: int, m: int) -> int:\n if(n == 1 or m == 1):\n return 1\n else:\n return solve_reccursive(n - 1, m) + solve_reccursive(n, m - 1)\n\nclass TestReportGeneratorConstruction(unittest.TestCase):\n\n def test_solution(self):\n n, m = 5, 5\n self.assertEquals(solve_dp(n, m), solve_reccursive(n, m))\n\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"Daily_Coding_Problem/2019-08-27.py","file_name":"2019-08-27.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"285950546","text":"import unittest\nimport googleapiclient\nimport pipe.src.harvest_gmail as p\n\n\nclass testHarvest(unittest.TestCase):\n\n def test_get_credentials(self):\n harvest = p.HarvestGmail()\n result = harvest.get_credentials()\n self.assertTrue(isinstance(result, googleapiclient.discovery.Resource))\n\n def test_constructor(self):\n harvest = p.HarvestGmail()\n self.assertTrue(isinstance(harvest, p.HarvestGmail))\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_harvest.py","file_name":"test_harvest.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"478821627","text":"from threading import Thread\nfrom library.utilities.quart import Quart, jsonify, request, session\n# from quart import Quart, jsonify, request, session\n# import quart_cors\nfrom library.utilities import quart_cors\n\nfrom library.procedures import webprocedure\nfrom library.utilities import userhelper\nimport discord\nimport asyncio\nimport os\n\n\nclass API(Thread):\n\n def __init__(self, bot):\n Thread.__init__(self)\n self.daemon = True\n self.bot = bot\n # Initiate API\n self.loop = bot.loop\n self.app = Quart(__name__)\n self.app = quart_cors.cors(\n self.app, allow_origin=\"https://mctdiscord.azurewebsites.net\") # https://mct.funergydev.com * https://mctdiscord.azurewebsites.net\n\n self.flow = webprocedure.WebProcedure(bot)\n self.userhelper = userhelper.UserHelper(bot)\n\n self.start()\n\n def run(self):\n @self.app.route('/api/v1/modules')\n async def modules():\n module_dict = {}\n\n for module in self.flow.modules_list:\n role = await self.userhelper.get_role(uid=module)\n module_dict[role.name] = '{}'.format(module)\n\n return jsonify(modules=module_dict)\n\n @self.app.route('/api/v1/user/')\n async def get_user(userid):\n user = discord.utils.get(\n discord.utils.get(self.bot.guilds, name='MCT').members, id=int(userid))\n return jsonify(name=user.name), 200\n\n @self.app.route('/api/v1/user/hash/')\n async def get_hashed_user(userid):\n uid = await self.flow.get_procedure(userid)\n if not uid:\n return jsonify(status='Hash not found'), 500\n user = discord.utils.get(\n discord.utils.get(self.bot.guilds, name='MCT').members, id=int(uid))\n return jsonify(name=user.name), 200\n\n @self.app.route('/api/v1/user_count')\n async def user_count():\n return jsonify(count=discord.utils.get(self.bot.guilds, name='MCT').member_count)\n\n @self.app.route('/api/v1/user/hash//roles', methods=['POST', 'GET'])\n async def give_user_roles(userid):\n if request.method == 'POST':\n data = await request.get_json()\n uid = await self.flow.get_procedure(userid)\n if not uid:\n return jsonify(status='Hash not found'), 500\n user = discord.utils.get(\n discord.utils.get(self.bot.guilds, name='MCT').members, id=int(uid))\n\n await self.userhelper.remove_roles(user)\n print(data)\n for role in data['roles']:\n if role not in self.userhelper.role_whitelist:\n await self.userhelper.add_role(user, uid=int(role))\n\n await user.send('I have given you access to the modules you have requested.')\n await self.flow.end_procedure(userid)\n return jsonify(roles_given=data['roles']), 200\n elif request.method == 'GET':\n return jsonify(roles=100), 200\n\n # self.app.run(host=\"0.0.0.0\", port=5000,\n # debug=False, use_reloader=True, loop=self.loop)\n self.app.run(host=\"0.0.0.0\", port=443,\n debug=False, use_reloader=True, loop=self.loop, keyfile='{}/certs/privkey.pem'.format(self.bot.root_path),\n certfile='{}/certs/cert.pem'.format(self.bot.root_path))\n","sub_path":"library/services/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":3504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"265314114","text":"# -*- coding: utf-8 -*-\n\nfrom pymongo import *\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\n\n\nclass LibrarycrawlPipeline(object):\n def __init__(self):\n mongodb = MongoClient('localhost', 27017)\n db = mongodb.books\n self.collection = db.books\n self.errorCollection = db.errors\n def process_item(self, item, spider):\n try:\n self.collection.insert(item)\n except:\n errData = {'_id':item['_id'], 'ISBN':item['ISBN']}\n self.errorCollection.insert(errData)\n return item\n","sub_path":"libraryCrawl/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"310291558","text":"from odoo import models\n\n\nclass ReportBomStructure(models.AbstractModel):\n\n _inherit = 'report.mrp.report_bom_structure'\n\n def _get_bom(self, bom_id=False, product_id=False, line_qty=False, line_id=False, level=False):\n\n bom = self.env['mrp.bom'].browse(bom_id)\n bom_quantity = line_qty\n\n lines = super(ReportBomStructure, self)._get_bom(bom_id=bom_id, product_id=product_id, line_qty=line_qty, line_id=line_id, level=level)\n\n if len(bom.sub_products) == 1:\n byproduct = bom.sub_products[0].product_id\n lines['product_byproduct'] = byproduct\n lines['bom_prod_name_byproduct'] = byproduct.display_name\n lines['price_byproduct'] = byproduct.uom_id._compute_price(byproduct.standard_price, bom.product_uom_id) * bom_quantity\n\n unit_cost = bom.product_tmpl_id.standard_price\n for byproduct in bom.sub_products.mapped('product_id'):\n unit_cost += byproduct.standard_price\n for component in bom.bom_line_ids.mapped('product_id'):\n unit_cost += component.standard_price\n lines['unit_cost'] = unit_cost\n\n return lines\n","sub_path":"bom_spread_cost/abstracts.py","file_name":"abstracts.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"373067618","text":"#!/usr/bin/python\n\nimport sys, getopt, re\nimport mybasic\n\ndef main(sampPattern='(.*)', inFileN='', outFileN=''):\n\tinFile = sys.stdin\n\tif inFileN != '':\n\t\tinFile = open(inFileN, 'r')\n\toutFile = sys.stdout\n\tif outFileN != '':\n\t\toutFile = open(outFileN, 'w')\n\t\n\tfor line in inFile:\n\t\tdataL = line[:-1].split('\\t')\n#\t\tsampN = re.match(sampPattern, dataL[0]).group(1)\n\t\t(sid, postfix) = re.match(sampPattern, dataL[0]).groups()\n\t\tif postfix != 'T':\n\t\t\tsampN = sid + '_' + postfix\n\t\telse:\n\t\t\tsampN = sid\n\t\tif float(dataL[1]) < 0:\n\t\t\toutFile.write('%s\\tND\\tND\\n' % sampN)\n\t\telse:\n\t\t\toutFile.write('%s\\t%s\\t%s\\n' % (sampN, int(float(dataL[1])*100), int(float(dataL[2])*100)))\n\toutFile.flush()\n\toutFile.close()\n\tinFile.close()\n\nif __name__ == '__main__':\n\toptL, argL = getopt.getopt(sys.argv[1:],'i:o:t',[])\n\toptH = mybasic.parseParam(optL)\n\n\tinFile = ''\n\tif '-i' in optH:\n\t\tinFile = optH['-i']\n\toutFile = ''\n\tif '-o' in optH:\n\t\toutFile = optH['-o']\n\n\tmain('(.*)_([TXC].{,2})_[NSKT]{2}', inFile, outFile)\n","sub_path":"Integration/prepDB_xsq_purity.py","file_name":"prepDB_xsq_purity.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"225135170","text":"\r\n\r\nimport requests\r\nimport urllib\r\nimport os.path\r\nimport pandas as pd\r\nfrom time import sleep\r\nfrom MongodbConnector import mongodbConnector\r\nfrom pget.down import Downloader\r\n\r\nroot_folder = os.path.abspath(os.path.join(os.path.dirname('__file__'),\r\n 'datagov_source_datasets', 'local'))\r\n\r\ntry:\r\n mongo = mongodbConnector()\r\nexcept:\r\n raise\r\n\r\n\r\ndef fetchGovData(domain, dataset_count, datasets_colln):\r\n url = \"http://catalog.data.gov/api/3/action/package_list?q={0}&rows=1000&start=\"\r\n\r\n start_val = 0\r\n\r\n while True:\r\n req_url = url.format(domain) + str(start_val)\r\n print(req_url)\r\n\r\n r = requests.get(req_url)\r\n json_resp = r.json()\r\n results = json_resp[\"result\"][\"results\"]\r\n\r\n if len(results) > 0:\r\n mongo.bulk_mongo_insert(datasets_colln, results)\r\n else:\r\n print(\"NO MORE DATASETS TO DOWNLOAD.\")\r\n break\r\n\r\n start_val += 1000\r\n\r\n print(\"Available datasets in this domain are :: \" +\r\n str(json_resp[\"result\"][\"count\"]))\r\n\r\n\r\ndef extractFromJSON(domain, datasets_colln):\r\n datasets_cursor = datasets_colln.find()\r\n\r\n print(datasets_cursor)\r\n #files_download = 10\r\n for dataset in datasets_cursor:\r\n \"\"\"if files_download == 0:\r\n break\r\n files_download = files_download - 1\"\"\"\r\n \r\n dataset_name = dataset[\"name\"]\r\n print(\">>>> \" + dataset_name)\r\n \r\n available_formats = {}\r\n res_format = None\r\n res_urls = {}\r\n other_formats = {}\r\n for i, res in enumerate(dataset[\"resources\"]):\r\n available_formats[i] = {\r\n \"format\": res[\"format\"],\r\n \"url\" : res[\"url\"],\r\n \"filename\" : res[\"id\"]\r\n }\r\n\r\n #print(available_formats)\r\n for a in available_formats.values():\r\n if 'JSONL' in a[\"format\"]:\r\n res_format = 'JSONL'\r\n break\r\n elif 'jsonl' in a[\"format\"]:\r\n res_format = 'jsonl'\r\n break\r\n elif 'CSV' in a[\"format\"]:\r\n res_format = 'CSV'\r\n break\r\n elif 'csv' in a[\"format\"]:\r\n res_format = 'csv'\r\n break\r\n elif 'JSON' in a[\"format\"]:\r\n res_format = 'JSON'\r\n break\r\n elif 'json' in a[\"format\"]:\r\n res_format = 'json'\r\n break\r\n \"\"\"elif 'XLS' in a[\"format\"]:\r\n res_format = 'XLS'\r\n break\r\n elif 'xls' in a[\"format\"]:\r\n res_format = 'xls'\r\n break\r\n else:\r\n other_formats[a[\"url\"]] = a[\"filename\"]\r\n with open(\"fileformats.txt\", \"a+\") as fileformats:\r\n fileformats.write(a[\"filename\"] + ',' + a[\"format\"] + ',' \\\r\n + a[\"url\"] + '\\n')\"\"\"\r\n\r\n if res_format is None:\r\n continue\r\n #res_urls = other_formats\r\n else:\r\n #continue\r\n for a in available_formats.values():\r\n if res_format in a[\"format\"]:\r\n res_urls[a[\"url\"]] = a[\"filename\"]\r\n\r\n os.chdir(root_folder)\r\n\r\n if not os.path.isdir(dataset_name):\r\n os.mkdir(dataset_name)\r\n os.chdir(dataset_name)\r\n\r\n print(str(res_format) + \" :: \" + str(len(res_urls)))\r\n\r\n for res_url in res_urls.keys():\r\n if res_format is None:\r\n file_name = res_urls[res_url]\r\n else:\r\n file_name = res_urls[res_url] + \".\" + (res_format).lower()\r\n\r\n print(\"Downloading... \" + file_name)\r\n #print(\"... from >> \" + res_url)\r\n try:\r\n if not os.path.isfile(file_name):\r\n sleep(1)\r\n downloader = Downloader(res_url, file_name, 8)\r\n downloader.start()\r\n downloader.wait_for_finish()\r\n \r\n \"\"\"resp = urllib.request.urlopen(res_url)\r\n resp_content = resp.read()\r\n print(\"Writing...\")\r\n with open(file_name, 'wb') as res_file:\r\n res_file.write(resp_content)\"\"\"\r\n except:\r\n print(\"Error @ \" + dataset_name)\r\n continue\r\n \r\n\r\ndef main():\r\n\r\n datasets_colln = mongo.initialize_mongo('health')\r\n\r\n #domains = [\"health\", \"finance\", \"manufacturing\", \"consumer\", \"climate\", \r\n # \"local\", \"energy\"]\r\n domains = [\"local\"]\r\n\r\n for d in domains:\r\n #fetchGovData(d, 1000, datasets_colln)\r\n extractFromJSON(d, datasets_colln)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"datagov_datasets.py","file_name":"datagov_datasets.py","file_ext":"py","file_size_in_byte":4845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"619155240","text":"import tornado\nfrom tornado import gen\nfrom tornado.testing import AsyncTestCase\n\nfrom pubnub.pubnub_tornado import PubNubTornado, SubscribeListener\nfrom tests.helper import pnconf_sub_copy\nfrom tests.integrational.tornado.tornado_helper import connect_to_channel, disconnect_from_channel\nfrom tests.integrational.tornado.vcr_tornado_decorator import use_cassette_and_stub_time_sleep\n\n\nclass TestPubNubAsyncWhereNow(AsyncTestCase):\n def setUp(self):\n super(TestPubNubAsyncWhereNow, self).setUp()\n self.pubnub = PubNubTornado(pnconf_sub_copy(), custom_ioloop=self.io_loop)\n\n @use_cassette_and_stub_time_sleep(\n 'tests/integrational/fixtures/tornado/where_now/single_channel.yaml',\n filter_query_parameters=['uuid', 'pnsdk', 'l_pres'])\n @tornado.testing.gen_test(timeout=15)\n def test_where_now_single_channel(self):\n ch = \"where-now-tornado-ch\"\n uuid = \"where-now-tornado-uuid\"\n self.pubnub.config.uuid = uuid\n\n yield connect_to_channel(self.pubnub, ch)\n yield gen.sleep(10)\n env = yield self.pubnub.where_now() \\\n .uuid(uuid) \\\n .future()\n\n channels = env.result.channels\n\n assert len(channels) == 1\n assert channels[0] == ch\n\n yield disconnect_from_channel(self.pubnub, ch)\n self.pubnub.stop()\n self.stop()\n\n @use_cassette_and_stub_time_sleep(\n 'tests/integrational/fixtures/tornado/where_now/multiple_channels.yaml',\n filter_query_parameters=['uuid', 'pnsdk', 'l_pres'])\n @tornado.testing.gen_test(timeout=15)\n def test_multiple_channels(self):\n ch1 = \"where-now-tornado-ch1\"\n ch2 = \"where-now-tornado-ch2\"\n uuid = \"where-now-tornado-uuid\"\n self.pubnub.config.uuid = uuid\n\n callback_messages = SubscribeListener()\n self.pubnub.add_listener(callback_messages)\n self.pubnub.subscribe().channels(ch1).execute()\n yield callback_messages.wait_for_connect()\n\n self.pubnub.subscribe().channels(ch2).execute()\n yield gen.sleep(5)\n\n env = yield self.pubnub.where_now() \\\n .uuid(uuid) \\\n .future()\n\n channels = env.result.channels\n\n assert len(channels) == 2\n assert ch1 in channels\n assert ch2 in channels\n\n yield disconnect_from_channel(self.pubnub, [ch1, ch2])\n self.pubnub.stop()\n self.stop()\n","sub_path":"tests/integrational/tornado/test_where_now.py","file_name":"test_where_now.py","file_ext":"py","file_size_in_byte":2406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"285286242","text":"def printTable(lst):\n print()\n #find longest string in each column for the column widths\n colWidths = [0]*len(lst)\n for x in range(len(lst)):\n for i in range(len(lst[x])):\n if len(lst[x][i]) > colWidths[x]:\n colWidths[x] = len(lst[x][i])\n\n # find rjust() length by getting largest column width\n colWidths.sort()\n width = colWidths[-1]\n\n #Now print the table\n for x in range(len(lst[0])):\n for i in range(len(lst)):\n print(lst[i][x].rjust(width), end='')\n print() \n print()\n\n\ntableData = [['apples', 'oranges', 'cherries', 'banana'],\n ['Alice', 'Bob', 'Carol', 'David'],\n ['dogs', 'cats', 'moose', 'goose']]\n\nprintTable(tableData)","sub_path":"CS160/ChrisHaley/Lab3/table_printer.py","file_name":"table_printer.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"471637317","text":"from bot_logger import logger\nimport discord\nimport json\n\n\nclass PokemonEvent:\n \"\"\"Stores Pokemon Events\"\"\"\n\n def __init__(self, bot, config_data):\n self.bot = bot\n self.config_data = config_data\n self.happy_hour = False\n self.night_vendor = False\n self.event_data = self.load_event_file()\n\n def load_event_file(self):\n \"\"\"\n Checks to see if there's a valid events.json file and loads it\n \"\"\"\n try:\n with open('events.json') as events:\n return json.load(events)\n except FileNotFoundError:\n msg = (\"FileNotFoundError: \"\n \"'events.json' file was not found\")\n logger.error(msg)\n raise Exception(msg)\n except Exception as e:\n print(\"An error has occured. See error.log.\")\n logger.error(\"Exception: {}\".format(str(e)))\n\n async def _send_event_start_msg(self, msg):\n \"\"\"\n Sends a message to the channel that an event has started\n\n @param msg - event message to tell the server\n \"\"\"\n pokemon_channel = ''\n for channel in self.bot.get_all_channels():\n if channel.name == \"event\":\n pokemon_channel = channel.id\n pokemon_channel_obj = self.bot.get_channel(pokemon_channel)\n em = discord.Embed(title=\"Event Started\",\n description=msg,\n colour=0x00FF00)\n await self.bot.send_message(pokemon_channel_obj,\n embed=em)\n\n async def _send_event_end_msg(self, msg):\n \"\"\"\n Sends a message to the channel that an event has ended\n\n @param msg - event message to tell the server\n \"\"\"\n pokemon_channel = ''\n for channel in self.bot.get_all_channels():\n if channel.name == \"event\":\n pokemon_channel = channel.id\n pokemon_channel_obj = self.bot.get_channel(pokemon_channel)\n em = discord.Embed(title=\"Event Ended\",\n description=msg,\n colour=0xFF0000)\n await self.bot.send_message(pokemon_channel_obj,\n embed=em)\n\n async def activate_happy_hour(self):\n \"\"\"\n Activates happy hour event\n \"\"\"\n self.happy_hour = True\n happy_hour_event = self.event_data[\"happy_hour_event\"]\n msg = (\"**Happy hour has started! During happy \"\n \"hour, the catch cooldown has \"\n \"been cut in half, and the shiny rate is {}x higher. \"\n \"Good luck @everyone!**\"\n \"\".format(happy_hour_event[\"shiny_rate_multiplier\"]))\n await self._send_event_start_msg(msg)\n\n async def deactivate_happy_hour(self):\n \"\"\"\n Deactivates happy hour event\n \"\"\"\n self.happy_hour = False\n msg = \"**Happy hour has ended.**\"\n await self._send_event_end_msg(msg)\n\n async def activate_night_vendor(self):\n \"\"\"\n Activates night vendor event\n \"\"\"\n self.night_vendor = True\n msg = (\"**The Night Vendor has arrived! Use the `{0}vendor i` \"\n \"command for info on what's he's trading. If you're \"\n \"interested in the trade, type `{0}vendor t` to make. \"\n \"the trade. If you don't like the roll, type `{0}vendor r` \"\n \"to re-roll what the vendor has for sale.**\"\n \"\".format(self.config_data[\"cmd_prefix\"]))\n await self._send_event_start_msg(msg)\n\n async def deactivate_night_vendor(self):\n \"\"\"\n Deactivates night vendor event\n \"\"\"\n self.night_vendor = False\n msg = (\"**The night vendor has vanished.**\")\n await self._send_event_end_msg(msg)\n","sub_path":"cogs/modules/pokemon_event.py","file_name":"pokemon_event.py","file_ext":"py","file_size_in_byte":3806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"5518728","text":"import numpy as np\nfrom scipy import linalg\n\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\ndef plot(ax1, ax2, rest, vertices, neighbors, handles, scores):\n edges = get_edges(vertices, neighbors)\n handle_points = np.array(list(handles.values()))\n\n ax1.clear()\n ax1.set_axis_off()\n\n ax1.set_title('Point Visualization')\n ax1.plot(edges[0], edges[1], edges[2], 'b--', linewidth=0.2)\n # ax1.scatter(rest[:,0], rest[:,1], rest[:,2], 'g.')\n ax1.scatter(vertices[:,0], vertices[:,1], vertices[:,2], c='c')\n ax1.scatter(handle_points[:,0], handle_points[:,1], handle_points[:,2], c='r')\n\n ax2.set_title('Objective Function')\n ax2.plot(scores, 'r')\n\n plt.pause(0.01)\n\n\ndef make_R(c):\n return linalg.expm(make_Cx(c))\n\n\ndef make_Cx(c):\n return np.array([[ 0.0, -c[2], c[1]],\n [ c[2], 0.0, -c[0]],\n [-c[1], c[0], 0.0]])\n\n\ndef energy(rest, vertices, c_list, neighbors, handles, alpha):\n result = 0.0\n\n for i, p_i_neighbors in neighbors.items():\n R = make_R(c_list[i])\n\n for j in p_i_neighbors:\n u = np.dot(R, rest[i] - rest[j])\n v = vertices[i] - vertices[j]\n\n result += sq_norm(u - v)\n\n for i, h_i in handles.items():\n result += alpha * sq_norm(vertices[i] - h_i)\n\n return result\n\n\ndef sq_norm(x):\n return np.inner(x, x)\n\n\ndef get_edges(vertices, neighbors):\n edges = [[], [], []]\n seen = set()\n\n for u, u_neighbors in neighbors.items():\n for v in u_neighbors:\n if (u, v) in seen:\n continue\n\n seen.add((u, v))\n\n for i in range(3):\n edges[i].append(vertices[u][i])\n edges[i].append(vertices[v][i])\n edges[i].append(float('NaN'))\n\n return edges\n\n\ndef initialize(n):\n rest = [[(i, j, 0.0) for j in range(n)] for i in range(n)]\n rest = np.array(rest).reshape((-1, 3))\n\n vertices = [[(i, j, 0.0) for j in range(n)] for i in range(n)]\n vertices = np.array(vertices).reshape((-1, 3))\n\n c_list = [np.array([1e-5, 1e-5, 1e-5]) for _ in range(n * n)]\n\n handles = {0: (0.0, 0.0, -0.5),\n n-1: (0.0, n-1, -0.5),\n (n // 2) * n + n // 2: (n // 2, n // 2, 1.0),\n (n-1) * n: (n-1, 0.0, -0.5),\n (n-1) * n + n-1: (n-1, n-1, -0.5)}\n\n for key, val in handles.items():\n handles[key] = np.array(val)\n\n triangles = list()\n\n for i in range(n):\n for j in range(n):\n a = (i-1, j-1)\n b = (i-1, j)\n c = (i, j-1)\n d = (i, j)\n\n skip = False\n\n for x in (a, b, c, d):\n if x[0] < 0 or x[1] < 0:\n skip = True\n\n if skip:\n continue\n\n triangles.append((a, b, d))\n triangles.append((a, c, d))\n\n neighbors = dict()\n\n for a_b_c in triangles:\n for i in range(3):\n for j in range(i+1, 3):\n x = a_b_c[i][0] * n + a_b_c[i][1]\n y = a_b_c[j][0] * n + a_b_c[j][1]\n\n if x not in neighbors:\n neighbors[x] = set()\n\n if y not in neighbors:\n neighbors[y] = set()\n\n neighbors[x].add(y)\n neighbors[y].add(x)\n\n return rest, vertices, c_list, handles, neighbors\n\n\ndef get_dRdc(c):\n def get_dthetadci(i):\n return c[i] / theta\n\n def get_a(i):\n lhs = theta ** 2 * np.sin(theta) * get_dthetadci(i)\n rhs = -(1.0 - np.cos(theta)) * 2 * c[i]\n bot = theta ** 4\n\n return (lhs + rhs)\n\n def get_b(i):\n top = theta * np.cos(theta) - np.sin(theta) * get_dthetadci(i)\n bot = theta ** 2\n\n return top\n\n theta = np.linalg.norm(c)\n\n dRdcx_lhs = np.array([[0.0, c[1], c[2]],\n [c[1], -2.0 * c[0], 0.0],\n [c[2], 0.0, -2.0 * c[0]]])\n dRdcx_rhs = np.array([[0.0, 0.0, 0.0],\n [0.0, 0.0, -1.0],\n [0.0, 1.0, 0.0]])\n dRdcx = get_a(0) * dRdcx_lhs + get_b(0) * dRdcx_rhs\n\n dRdcy_lhs = np.array([[-2.0 * c[1], c[0], 0.0],\n [c[0], 0.0, c[2]],\n [0.0, c[2], -2.0 * c[1]]])\n dRdcy_rhs = np.array([[0.0, 0.0, 1.0],\n [0.0, 0.0, 0.0],\n [-1.0, 0.0, 0.0]])\n dRdcy = get_a(1) * dRdcy_lhs + get_b(1) * dRdcy_rhs\n\n dRdcz_lhs = np.array([[-2.0 * c[2], 0.0, c[0]],\n [ 0.0, -2.0 * c[2], c[1]],\n [ c[0], c[1], 0.0]])\n dRdcz_rhs = np.array([[0.0, -1.0, 0.0],\n [1.0, 0.0, 0.0],\n [0.0, 0.0, 0.0]])\n dRdcz = get_a(2) * dRdcz_lhs + get_b(2) * dRdcz_rhs\n\n return np.array([dRdcx.flatten(), dRdcy.flatten(), dRdcz.flatten()])\n\n\ndef get_dc(rest, vertices, c_list, neighbors):\n result = list()\n\n for i, p_i_neighbors in neighbors.items():\n c = c_list[i]\n R = make_R(c)\n\n dRdc = get_dRdc(c)\n dfdR = np.zeros((3, 3))\n\n for r in range(3):\n for c in range(3):\n for j in p_i_neighbors:\n u = vertices[i] - vertices[j]\n v = rest[i] - rest[j]\n\n dfdR[r,c] += 2.0 * (np.dot(R[r,:], u) - v[r]) * u[c]\n\n dfdc = np.dot(dRdc, dfdR.flatten())\n\n result.append(dfdc)\n\n return result\n\n\ndef get_dp(rest, vertices, c_list, neighbors, handles, alpha):\n result = list()\n\n for i, p_i_neighbors in sorted(neighbors.items()):\n Ri = make_R(c_list[i])\n\n dp = np.zeros(3)\n\n for j in p_i_neighbors:\n Rj = make_R(c_list[j])\n\n dp += -2.0 * (np.dot(Ri, rest[i] - rest[j]) - (vertices[i] - vertices[j]))\n dp += 2.0 * (np.dot(Rj, rest[j] - rest[i]) - (vertices[j] - vertices[i]))\n\n if i in handles:\n dp += -2.0 * alpha * (handles[i] - vertices[i])\n\n result.append(np.copy(dp))\n\n return np.array(result)\n\n\ndef minimize(rest, vertices, c_list, neighbors, handles, alpha, h=5e-3):\n dcs = get_dc(rest, vertices, c_list, neighbors)\n dps = get_dp(rest, vertices, c_list, neighbors, handles, alpha)\n\n for i, dc in enumerate(dcs):\n c_list[i] = c_list[i] - h * dc\n\n for i, dp in enumerate(dps):\n vertices[i] = vertices[i] - h * dp\n\n return vertices, c_list\n\n\ndef main(n=20, alpha=100.0):\n fig = plt.figure()\n\n ax1 = fig.add_subplot(121, projection='3d')\n ax2 = fig.add_subplot(122)\n\n rest, vertices, c_list, handles, neighbors = initialize(n)\n\n scores = list()\n\n while True:\n scores.append(\n energy(rest, vertices, c_list, neighbors, handles, alpha))\n print(scores[-1])\n\n plot(ax1, ax2, rest, vertices, neighbors, handles, scores)\n\n vertices, c_list = minimize(rest, vertices, c_list,\n neighbors, handles, alpha)\n\n\nif __name__ == '__main__':\n np.random.seed(0)\n\n plt.ion()\n plt.show()\n\n main()\n","sub_path":"math/cs395t_graphics_hw2_problem1a.py","file_name":"cs395t_graphics_hw2_problem1a.py","file_ext":"py","file_size_in_byte":7122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"209033986","text":"#!/usr/bin/env python\n\nimport numpy as np\nimport sysconfig\nfrom functools import partial\nfrom cffi import FFI\nfrom runpy import run_path\nfrom pathlib import Path\n\nutil = run_path(Path(__file__).absolute().parent.parent / \"util.py\")\n\nffi = FFI()\nffi.cdef(\"double dtw(double* x, double* y, int size_x, int size_y);\")\nmy_dir = Path(__file__).absolute().parent\ndllib = ffi.dlopen(\n str(my_dir / (\"cdtw\" + sysconfig.get_config_var(\"EXT_SUFFIX\")))\n)\n\n\ndef serie_pair_index_generator(number):\n \"\"\" generator for pair index (i, j) such that i < j < number\n\n :param number: the upper bound\n :returns: pairs (lower, greater)\n :rtype: a generator\n \"\"\"\n return (\n (_idx_greater, _idx_lower)\n for _idx_greater in range(number)\n for _idx_lower in range(_idx_greater)\n )\n\n\ndef cDTW(serie_a, serie_b):\n a_ptr = ffi.cast(\"double*\", serie_a.ctypes.data)\n b_ptr = ffi.cast(\"double*\", serie_b.ctypes.data)\n ret = dllib.dtw(a_ptr, b_ptr, len(serie_a), len(serie_b))\n return ret\n\n\ndef cort(s1, s2):\n \"\"\" Computes the cort between serie one and two (assuming they have the same length)\n\n :param s1: the first serie (or any iterable over floats64)\n :param s2: the second serie (or any iterable over floats64)\n :returns: the cort distance\n :rtype: float64\n\n \"\"\"\n d1 = np.diff(s1)\n d2 = np.diff(s2)\n num = np.dot(d1, d2.T)\n norm1 = np.dot(d1, d1.T)\n norm2 = np.dot(d2, d2.T)\n if np.abs(norm1) < 0.00001:\n norm1 = 1.0\n if np.abs(norm2) < 0.00001:\n norm2 = 1.0\n return num / np.sqrt(norm1 * norm2)\n\n\ndef compute(series, nb_series):\n gen = serie_pair_index_generator(nb_series)\n\n _dist_mat_dtw = np.zeros((nb_series, nb_series), dtype=np.float64)\n _dist_mat_cort = np.zeros((nb_series, nb_series), dtype=np.float64)\n for t1, t2 in gen:\n dist_dtw = cDTW(series[t1], series[t2])\n _dist_mat_dtw[t1, t2] = dist_dtw\n _dist_mat_dtw[t2, t1] = dist_dtw\n dist_cort = 0.5 * (1 - cort(series[t1], series[t2]))\n _dist_mat_cort[t1, t2] = dist_cort\n _dist_mat_cort[t2, t1] = dist_cort\n\n return _dist_mat_dtw, _dist_mat_cort\n\n\nmain = partial(util[\"main\"], compute)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"pyfiles/dtw_cort_dist/V3_c_dtw_cort_vect/dtw_cort_dist_mat.py","file_name":"dtw_cort_dist_mat.py","file_ext":"py","file_size_in_byte":2230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"552361678","text":"\n# Imports necesarios\nimport numpy as np\nimport pandas as pd\nimport seaborn as sb\nimport matplotlib.pyplot as plt\n\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\n#plt.rcParams['figure.figsize'] = (16, 9)\n#plt.style.use('ggplot')\nfrom sklearn import linear_model\nfrom sklearn.metrics import mean_squared_error, r2_score\n\ndata = pd.read_csv(\"./denuncias_violencia_familiar2.csv\", encoding = \"ISO-8859-1\")\ndata_habitantes = pd.read_csv(\"./habitantes_por_departamento.csv\", encoding = \"ISO-8859-1\")\n\ndepartamentos = []\naños = []\ndenuncias = []\n#habitantess_por_departamentos = [379384, 1083519, 405759, 1382730, 616176, 1341012, 994494, 1205527, 347639, 721047, 850765, 1246038, 1778080, 1197260, 9485405, 883510, 141070, 174863, 254065, 1856809, 1172697, 813381, 329332, 224863, 496459]\nhabitantess_por_departamentos = []\n\n#Leyendo numero de habitantes por departamentos\nfor index, row in data_habitantes.iterrows():\n if index > 0:\n habitantess_por_departamentos.append(row[1])\n\n#Leyendo data de delitos\nindex = 0\nfor index, row in data.iterrows():\n i = 0\n for col in row:\n if i > 0 and index > 0:\n departamentos.append(habitantess_por_departamentos[index-1])\n años.append(2004 + i)\n denuncias.append(col)\n i = i + 1\n\ndataX2 = pd.DataFrame()\ndataX2[\"departamentos\"] = departamentos\ndataX2[\"años\"] = años\n\n#training\nXY_train = np.array(dataX2)\nz_train = np.array(denuncias)\n\nregresion = linear_model.LinearRegression() #.Ridge(alpha=.1)\n\nregresion.fit(XY_train, z_train)\n\nz_pred = regresion.predict(XY_train)\n\n#print(regresion.score(XY_train, z_pred))\n\n# Veamos los coeficienetes obtenidos, En nuestro caso, serán la Tangente\nprint('Coefficients: \\n', regresion.coef_)\n# Este es el valor donde corta el eje Y (en X=0)\nprint('Independent term: \\n', regresion.intercept_)\n# Error Cuadrado Medio\nprint(\"Mean squared error: %.2f\" % mean_squared_error(z_train, z_pred))\n# Puntaje de Varianza. El mejor puntaje es un 1.0\nprint('Variance score: %.2f' % r2_score(z_train, z_pred))\n\n\ndepartamentos_new = []\naños_new = []\n\nfor index, row in data.iterrows():\n i = 0\n for i in range(2018, 2028):\n departamentos_new.append(habitantess_por_departamentos[index-1])\n años_new.append(i)\n\ndataX2_new = pd.DataFrame()\ndataX2_new[\"departamentos\"] = departamentos_new\ndataX2_new[\"años\"] = años_new\n\n#training\nXY_new = np.array(dataX2_new)\n\nz_new = regresion.predict(XY_new)\n\nfig = plt.figure()\nax = Axes3D(fig)\n\n# Creamos una malla, sobre la cual graficaremos el plano\nxx, yy = np.meshgrid(np.linspace(0, 10000, num=10), np.linspace(2004, 2030, num=10))\n\n# calculamos los valores del plano para los puntos x e y\nnuevoX = (regresion.coef_[0] * xx)\nnuevoY = (regresion.coef_[1] * yy)\n\n# calculamos los correspondientes valores para z. Debemos sumar el punto de intercepción\nz = (nuevoX + nuevoY + regresion.intercept_)\n\n# Graficamos el plano\nax.plot_surface(xx, yy, z, alpha=0.2, cmap='hot')\n\n# Graficamos en azul los puntos en 3D\nax.scatter(XY_train[:, 0], XY_train[:, 1], z_train, c='blue', s=2, label='Entrenamiento: año 2005 - 2017')\n\n# Graficamos en rojo, los puntos que\nax.scatter(XY_train[:, 0], XY_train[:, 1], z_pred, c='red', s=2, label='Predicción del entrenamiento: año 2005 - 2017')\n\n# Graficamos en rojo, los puntos que\nax.scatter(XY_new[:, 0], XY_new[:, 1], z_new, c='orange', s=2, label='Predicción futura: año 2018 - 2028')\n\nax.legend()\n\n# con esto situamos la \"camara\" con la que visualizamos\n\n#Vista inclinada\nax.view_init(elev=50., azim=45)\n\n#Vista frontal con la perspectiva en los años\nax.view_init(elev=0., azim=5)\n\nax.set_xlabel('Departamentos (Número de habitantes)')\nax.set_ylabel('Año')\nax.set_zlabel('Denuncias por Violencia Familiar')\nax.set_title('Regresión Lineal', loc='left')\n\nax.set\n\nplt.show()","sub_path":"denuncias_por_violencia_familiar.py","file_name":"denuncias_por_violencia_familiar.py","file_ext":"py","file_size_in_byte":3822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"624789695","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /opt/anaconda3/lib/python3.7/site-packages/tests/test_projectcreator.py\n# Compiled at: 2019-07-15 11:59:22\n# Size of source mod 2**32: 1102 bytes\nimport unittest, os, sys, shutil\nsys.path.append('.')\nfrom reademptionlib.projectcreator import ProjectCreator\n\nclass TestProjectCreator(unittest.TestCase):\n\n def setUp(self):\n self.root_folder_name = 'a_test_project'\n self.projectcreator = ProjectCreator()\n\n def tearDown(self):\n if os.path.exists(self.root_folder_name):\n shutil.rmtree(self.root_folder_name)\n\n def test_create_root_folder(self):\n self.projectcreator.create_root_folder(self.root_folder_name)\n assert os.path.exists(self.root_folder_name)\n shutil.rmtree(self.root_folder_name)\n\n def test_create_subfolders(self):\n self.projectcreator.create_root_folder(self.root_folder_name)\n subfolders = ['test_a', 'test_b', 'test_c']\n subfolders = [self.root_folder_name + '/' + subfolder for subfolder in subfolders]\n self.projectcreator.create_subfolders(subfolders)\n for subfolder in subfolders:\n assert os.path.exists(subfolder)\n\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"pycfiles/READemption-0.5.0.linux-x86_64.tar/test_projectcreator.cpython-37.py","file_name":"test_projectcreator.cpython-37.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"317218220","text":"import gym\n\n# create environment\nenv = gym.make(\"Acrobot-v1\")\n\n# define episodes\nepisodes = 100\n\nfor e in range(episodes):\n # start \n observation = env.reset()\n\n # define params\n done = False\n step_index = 0\n\n while not done:\n # render \n env.render()\n # your agent here (this takes random actions)\n action = env.action_space.sample() \n # observate\n observation, reward, done, info = env.step(action)\n \n # print out values\n step_index += 1\n print(\"Step {}:\".format(step_index))\n print(\"action: {}\".format(action))\n print(\"observation: {}\".format(observation))\n print(\"reward: {}\".format(reward))\n print(\"done: {}\".format(done))\n print(\"info: {}\".format(info))\n\n print(\"Episode finished after {} steps\".format(step_index + 1))\n\nprint('Did not solve after {} episodes '.format(e))\n\nif e == episodes - 1:\n env.close()\n input('Did not solve after {} episodes '.format(episodes))","sub_path":"samples/(3) Acrobot-v1.py","file_name":"(3) Acrobot-v1.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"543421433","text":"# Import needed libraries\nfrom sklearn.model_selection import KFold\nimport tensorflow as tf\nimport numpy as np\nimport os\nimport random\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error\n\n# Test the Keras version\ntfk = tf.keras\ntfkl = tf.keras.layers\nprint(tf.__version__)\n\n# Random seed for reproducibility\nseed = 42\nrandom.seed(seed)\nos.environ['PYTHONHASHSEED'] = str(seed)\nnp.random.seed(seed)\ntf.random.set_seed(seed)\ntf.compat.v1.set_random_seed(seed)\n\n# Load wine file\nwine = pd.read_csv(r'winequality.csv', sep=';')\nprint(wine.info())\nwine.head()\nwine.describe()\n\n# ----------------------------------------------------------\n\n# Create train set, test set, train val set\nX_train_val, X_test, y_train_val, y_test = train_test_split(\n wine[wine.columns[:-1]],\n pd.DataFrame(wine['quality'], columns=['quality']),\n test_size=0.1,\n random_state=seed,\n stratify=pd.DataFrame(wine['quality'], columns=['quality'])\n)\nprint(X_train_val.shape, y_train_val.shape)\nprint(X_test.shape, y_test.shape)\n\n# Inspect the target\nplt.figure(figsize=(15, 5))\nsns.histplot(data=y_train_val, x='quality', kde=True)\nplt.show()\n\n# Normalize both features and target\nmax_df = X_train_val.max()\nmin_df = X_train_val.min()\nmax_t = y_train_val.max()\nmin_t = y_train_val.min()\nX_train_val = (X_train_val - min_df)/(max_df - min_df)\ny_train_val = (y_train_val - min_t)/(max_t - min_t)\nprint('Wine dataset shape', X_train_val.shape)\nprint('Target shape', y_train_val.shape)\nX_train_val.describe()\n\n# Inspect the target after normalization\nplt.figure(figsize=(15, 5))\nsns.histplot(data=y_train_val, x='quality', kde=True)\nplt.show()\n\n# Normalize the test set with the same parameters of training set\nX_test = (X_test - min_df)/(max_df - min_df)\ny_test = (y_test - min_t)/(max_t - min_t)\n\n# ----------------------------------------------------------\n\n# Hold out\ninput_shape = X_train_val.shape[1:]\nbatch_size = 256\nepochs = 1000\n\n\ndef monitor(histories, names, colors, early_stopping=1):\n assert len(histories) == len(names)\n assert len(histories) == len(colors)\n plt.figure(figsize=(15, 6))\n for idx in range(len(histories)):\n plt.plot(histories[idx]['mse'][:-early_stopping], label=names[idx] +\n ' Training', alpha=.4, color=colors[idx], linestyle='--')\n plt.plot(histories[idx]['val_mse'][:-early_stopping],\n label=names[idx]+' Validation', alpha=.8, color=colors[idx])\n plt.ylim(0.0075, 0.02)\n plt.title('Mean Squared Error')\n plt.legend(bbox_to_anchor=(1, 1))\n plt.grid(alpha=.3)\n plt.show()\n\n\ndef plot_residuals(model, X_, y_):\n X_['sort'] = y_\n X_ = X_.sort_values(by=['sort'])\n y_ = np.expand_dims(X_['sort'], 1)\n X_.drop(['sort'], axis=1, inplace=True)\n\n y_pred = model.predict(X_)\n MSE = mean_squared_error(y_, y_pred)\n\n print('Mean Squared Error (MSE): %.4f' % MSE)\n\n mpl.rcParams.update(mpl.rcParamsDefault)\n sns.set(font_scale=1.1, style=None, palette='Set1')\n plt.figure(figsize=(15, 5))\n plt.scatter(np.arange(len(y_pred)), y_pred,\n label='Prediction', color='#1f77b4')\n plt.scatter(np.arange(len(y_)), y_, label='True', color='#d62728')\n\n for i in range(len(y_)):\n if(y_[i] >= y_pred[i]):\n plt.vlines(i, y_pred[i], y_[i], alpha=.5)\n else:\n plt.vlines(i, y_[i], y_pred[i], alpha=.5)\n\n plt.legend()\n plt.grid(alpha=.3)\n plt.show()\n\n\nhistories = []\nnames = []\ncolors = []\nval_scores = []\ntest_scores = []\n\nX_train, X_val, y_train, y_val = train_test_split(\n X_train_val,\n y_train_val,\n test_size=len(X_test),\n random_state=seed,\n stratify=y_train_val\n)\nprint(X_train.shape, y_train.shape)\nprint(X_val.shape, y_val.shape)\n\n# ----------------------------------------------------------\n\n# Default model\n\n\ndef build_default_model(input_shape):\n\n # Build the neural network layer by layer\n input_layer = tfkl.Input(shape=input_shape, name='Input')\n hidden_layer1 = tfkl.Dense(units=256, activation='relu', name='Hidden1',\n kernel_initializer=tfk.initializers.GlorotUniform(seed=seed))(input_layer)\n hidden_layer2 = tfkl.Dense(units=128, activation='relu', name='Hidden2',\n kernel_initializer=tfk.initializers.GlorotUniform(seed=seed))(hidden_layer1)\n hidden_layer3 = tfkl.Dense(units=64, activation='relu', name='Hidden3',\n kernel_initializer=tfk.initializers.GlorotUniform(seed=seed))(hidden_layer2)\n output_layer = tfkl.Dense(units=1, activation='linear', name='Output',\n kernel_initializer=tfk.initializers.GlorotUniform(seed=seed))(hidden_layer3)\n\n # Connect input and output through the Model class\n model = tfk.Model(inputs=input_layer,\n outputs=output_layer, name='default_model')\n\n # Compile the model\n learning_rate = 1e-3\n opt = tfk.optimizers.Adam(learning_rate)\n loss = tfk.losses.MeanSquaredError()\n mtr = ['mse']\n model.compile(loss=loss, optimizer=opt, metrics=mtr)\n\n # Return the model\n return model\n\n\ndefault_model = build_default_model(input_shape)\ndefault_model.summary()\n# tfk.utils.plot_model(default_model)\n\n# Train\ndefault_history = default_model.fit(\n x=X_train,\n y=y_train,\n validation_data=(X_val, y_val),\n batch_size=batch_size,\n epochs=epochs\n).history\n\nplt.figure(figsize=(15, 5))\nplt.plot(default_history['mse'], label='Training', alpha=.8, color='#ff7f0e')\nplt.plot(default_history['val_mse'],\n label='Validation', alpha=.8, color='#4D61E2')\nplt.ylim(0, 0.025)\nplt.title('Mean Squared Error')\nplt.legend(loc='upper right')\nplt.grid(alpha=.3)\nplt.show()\n\nprint('Train Performance')\nplot_residuals(default_model, X_train.copy(), y_train.copy())\nprint('Validation Performance')\nplot_residuals(default_model, X_val.copy(), y_val.copy())\n\n# ----------------------------------------------------------\n\n# Early stopping\npatience = 150\nearly_stopping = tfk.callbacks.EarlyStopping(\n monitor='val_mse', mode='min', patience=patience, restore_best_weights=True)\n\nearlystopping_model = build_default_model(input_shape)\nearlystopping_model.summary()\n# tfk.utils.plot_model(earlystopping_model)\n\nearlystopping_history = earlystopping_model.fit(\n x=X_train,\n y=y_train,\n validation_data=(X_val, y_val),\n batch_size=batch_size,\n epochs=epochs,\n callbacks=[early_stopping]\n).history\n\nplt.figure(figsize=(15, 5))\nplt.plot(default_history['mse'], label='Training', alpha=.3, color='#ff7f0e')\nplt.plot(default_history['val_mse'],\n label='Validation', alpha=.3, color='#4D61E2')\nplt.plot(earlystopping_history['mse'],\n label='Training (early stopping)', alpha=.8, color='#ff7f0e')\nplt.plot(earlystopping_history['val_mse'],\n label='Validation (early stopping)', alpha=.8, color='#4D61E2')\nplt.ylim(0, 0.03)\nplt.title('Mean Squared Error')\nplt.legend(loc='upper right')\nplt.grid(alpha=.3)\nplt.show()\n\nprint('Train Performance')\nplot_residuals(earlystopping_model, X_train.copy(), y_train.copy())\nprint('Validation Performance')\nplot_residuals(earlystopping_model, X_val.copy(), y_val.copy())\n\n# Store results\nval_scores.append(mean_squared_error(\n y_val, earlystopping_model.predict(X_val)))\nprint('Validation MSE %.4f' % val_scores[0])\ntest_scores.append(mean_squared_error(\n y_test, earlystopping_model.predict(X_test)))\nhistories.append(earlystopping_history)\nnames.append('Default')\ncolors.append('#ff7f0e')\nmonitor(histories, names, colors, patience)\n\n# Save, delete and load a Keras model\nearlystopping_model.save('DefaultModel')\n#del earlystopping_model\n#earlystopping_model = tfk.models.load_model('DefaultModel')\n#print('Validation MSE: %.4f' % mean_squared_error(y_val, earlystopping_model.predict(X_val)))\n\n# ----------------------------------------------------------\n\n# Weight decay (Regularization techniques)\n\n\ndef build_l2_model(input_shape):\n\n # Build the neural network layer by layer\n input_layer = tfkl.Input(shape=input_shape, name='Input')\n hidden_layer1 = tfkl.Dense(units=256, activation='relu', name='Hidden1',\n kernel_initializer=tfk.initializers.GlorotUniform(\n seed=seed),\n kernel_regularizer=tf.keras.regularizers.l2(1e-5))(input_layer)\n hidden_layer2 = tfkl.Dense(units=128, activation='relu', name='Hidden2',\n kernel_initializer=tfk.initializers.GlorotUniform(\n seed=seed),\n kernel_regularizer=tf.keras.regularizers.l2(1e-5))(hidden_layer1)\n hidden_layer3 = tfkl.Dense(units=64, activation='relu', name='Hidden3',\n kernel_initializer=tfk.initializers.GlorotUniform(\n seed=seed),\n kernel_regularizer=tf.keras.regularizers.l2(1e-5))(hidden_layer2)\n output_layer = tfkl.Dense(units=1, activation='linear', name='Output',\n kernel_initializer=tfk.initializers.GlorotUniform(\n seed=seed),\n kernel_regularizer=tf.keras.regularizers.l2(1e-5))(hidden_layer3)\n\n # Connect input and output through the Model class\n model = tfk.Model(inputs=input_layer,\n outputs=output_layer, name='l2_model')\n\n # Compile the model\n learning_rate = 1e-3\n opt = tfk.optimizers.Adam(learning_rate)\n loss = tfk.losses.MeanSquaredError()\n mtr = ['mse']\n model.compile(loss=loss, optimizer=opt, metrics=mtr)\n\n # Return the model\n return model\n\n\nl2_model = build_l2_model(input_shape)\nl2_model.summary()\n# tfk.utils.plot_model(l2_model)\n\nhistory_l2 = l2_model.fit(\n x=X_train,\n y=y_train,\n validation_data=(X_val, y_val),\n batch_size=batch_size,\n epochs=epochs,\n callbacks=[early_stopping]\n).history\n\n# Store results\nval_scores.append(mean_squared_error(y_val, l2_model.predict(X_val)))\nprint('Validation MSE %.4f' % val_scores[1])\ntest_scores.append(mean_squared_error(y_test, l2_model.predict(X_test)))\nhistories.append(history_l2)\nnames.append('Weight Decay')\ncolors.append('#4D61E2')\nmonitor(histories, names, colors, patience)\n\n# ----------------------------------------------------------\n\n# Dropout (Regularization techniques)\n\n\ndef build_dropout_model(input_shape):\n\n # Build the neural network layer by layer\n input_layer = tfkl.Input(shape=input_shape, name='Input')\n hidden_layer1 = tfkl.Dense(units=256, activation='relu', name='Hidden1',\n kernel_initializer=tfk.initializers.GlorotUniform(seed=seed))(input_layer)\n hidden_layer1 = tfkl.Dropout(0.4, seed=seed)(hidden_layer1)\n hidden_layer2 = tfkl.Dense(units=128, activation='relu', name='Hidden2',\n kernel_initializer=tfk.initializers.GlorotUniform(seed=seed))(hidden_layer1)\n hidden_layer2 = tfkl.Dropout(0.4, seed=seed)(hidden_layer2)\n hidden_layer3 = tfkl.Dense(units=64, activation='relu', name='Hidden3',\n kernel_initializer=tfk.initializers.GlorotUniform(seed=seed))(hidden_layer2)\n hidden_layer3 = tfkl.Dropout(0.4, seed=seed)(hidden_layer3)\n output_layer = tfkl.Dense(units=1, activation='linear', name='Output',\n kernel_initializer=tfk.initializers.GlorotUniform(\n seed=seed),\n kernel_regularizer=tf.keras.regularizers.l2(1e-5))(hidden_layer3)\n\n # Connect input and output through the Model class\n model = tfk.Model(inputs=input_layer,\n outputs=output_layer, name='dropout_model')\n\n # Compile the model\n learning_rate = 1e-3\n opt = tfk.optimizers.Adam(learning_rate)\n loss = tfk.losses.MeanSquaredError()\n mtr = ['mse']\n model.compile(loss=loss, optimizer=opt, metrics=mtr)\n\n # Return the model\n return model\n\n\ndropout_model = build_dropout_model(input_shape)\ndropout_model.summary()\n# tfk.utils.plot_model(dropout_model)\n\nhistory_dropout = dropout_model.fit(\n x=X_train,\n y=y_train,\n validation_data=(X_val, y_val),\n batch_size=batch_size,\n epochs=epochs,\n callbacks=[early_stopping]\n).history\n\n# Store results\nval_scores.append(mean_squared_error(y_val, dropout_model.predict(X_val)))\nprint('Validation MSE %.4f' % val_scores[2])\ntest_scores.append(mean_squared_error(y_test, dropout_model.predict(X_test)))\nhistories.append(history_dropout)\nnames.append('Dropout')\ncolors.append('#7DD667')\nmonitor(histories, names, colors, patience)\n\n# ----------------------------------------------------------\n\n# Dropout + l2-norm (Regularization techniques)\n\n\ndef build_dropout_l2_model(input_shape):\n\n # Build the neural network layer by layer\n input_layer = tfkl.Input(shape=input_shape, name='Input')\n hidden_layer1 = tfkl.Dense(units=256, activation='relu', name='Hidden1',\n kernel_initializer=tfk.initializers.GlorotUniform(\n seed=seed),\n kernel_regularizer=tf.keras.regularizers.l2(2e-6))(input_layer)\n hidden_layer1 = tfkl.Dropout(0.3, seed=seed)(hidden_layer1)\n hidden_layer2 = tfkl.Dense(units=128, activation='relu', name='Hidden2',\n kernel_initializer=tfk.initializers.GlorotUniform(\n seed=seed),\n kernel_regularizer=tf.keras.regularizers.l2(2e-6))(hidden_layer1)\n hidden_layer2 = tfkl.Dropout(0.3, seed=seed)(hidden_layer2)\n hidden_layer3 = tfkl.Dense(units=64, activation='relu', name='Hidden3',\n kernel_initializer=tfk.initializers.GlorotUniform(\n seed=seed),\n kernel_regularizer=tf.keras.regularizers.l2(2e-6))(hidden_layer2)\n hidden_layer3 = tfkl.Dropout(0.3, seed=seed)(hidden_layer3)\n output_layer = tfkl.Dense(units=1, activation='linear', name='Output',\n kernel_initializer=tfk.initializers.GlorotUniform(\n seed=seed),\n kernel_regularizer=tf.keras.regularizers.l2(2e-6))(hidden_layer3)\n\n # Connect input and output through the Model class\n model = tfk.Model(inputs=input_layer, outputs=output_layer,\n name='dropout_l2_model')\n\n # Compile the model\n learning_rate = 1e-3\n opt = tfk.optimizers.Adam(learning_rate)\n loss = tfk.losses.MeanSquaredError()\n mtr = ['mse']\n model.compile(loss=loss, optimizer=opt, metrics=mtr)\n\n # Return the model\n return model\n\n\ndropoutl2_model = build_dropout_l2_model(input_shape)\ndropoutl2_model.summary()\n# tfk.utils.plot_model(dropoutl2_model)\n\nhistory_dropoutl2 = dropoutl2_model.fit(\n x=X_train,\n y=y_train,\n validation_data=(X_val, y_val),\n batch_size=batch_size,\n epochs=epochs,\n callbacks=[early_stopping]\n).history\n\n# Store results\nval_scores.append(mean_squared_error(y_val, dropoutl2_model.predict(X_val)))\nprint('Validation MSE %.4f' % val_scores[3])\ntest_scores.append(mean_squared_error(y_test, dropoutl2_model.predict(X_test)))\nhistories.append(history_dropoutl2)\nnames.append('Dropout + L2')\ncolors.append('#B951D0')\nmonitor(histories, names, colors, patience)\n\nplt.figure(figsize=(15, 6))\nplt.plot(default_history['val_mse'], alpha=.3, color='#ff7f0e')\nplt.plot(earlystopping_history['val_mse'][:-patience],\n alpha=.8, color='#ff7f0e', label='Default')\nplt.plot(history_l2['val_mse'], alpha=.3, color='#4D61E2')\nplt.plot(history_l2['val_mse'][:-patience], alpha=.8,\n color='#4D61E2', label='Weight Decay')\nplt.plot(history_dropout['val_mse'], alpha=.3, color='#7DD667')\nplt.plot(history_dropout['val_mse'][:-patience],\n alpha=.8, color='#7DD667', label='Dropout')\nplt.plot(history_dropoutl2['val_mse'], alpha=.3, color='#B951D0')\nplt.plot(history_dropoutl2['val_mse'][:-patience], alpha=.8,\n color='#B951D0', label='Dropout + Weight Decay')\nplt.ylim(0.0115, 0.025)\nplt.title('Mean Squared Error')\nplt.legend(loc='upper right')\nplt.grid(alpha=.3)\nplt.show()\n\nplt.figure(figsize=(15, 6))\nplt.bar(names, val_scores, color=colors, alpha=.8)\nplt.ylim(0, .015)\nplt.title('Validation MSE')\nplt.grid(alpha=.3, axis='y')\nplt.show()\n\nplt.figure(figsize=(15, 6))\nplt.bar(names, test_scores, color=colors, alpha=.8)\nplt.ylim(0.01, .016)\nplt.title('Validation MSE')\nplt.grid(alpha=.3, axis='y')\nplt.show()\n\nprint('Train Performance with Best Model')\nplot_residuals(dropout_model, X_train.copy(), y_train.copy())\nprint('Validation Performance with Best Model')\nplot_residuals(dropout_model, X_val.copy(), y_val.copy())\nprint('Test Performance with Best Model')\nplot_residuals(dropout_model, X_test.copy(), y_test.copy())\n\n# ----------------------------------------------------------\n\n# K-Fold\n\nnum_folds = 10\n\nhistories = []\nscores = []\n\nkfold = KFold(n_splits=num_folds, shuffle=True, random_state=seed)\n\nfor fold_idx, (train_idx, valid_idx) in enumerate(kfold.split(X_train_val, y_train_val)):\n\n print(\"Starting training on fold num: {}\".format(fold_idx+1))\n\n model = build_dropout_model(input_shape)\n\n history = model.fit(\n x=X_train_val.iloc[train_idx],\n y=y_train_val.iloc[train_idx],\n validation_data=(\n X_train_val.iloc[valid_idx], y_train_val.iloc[valid_idx]),\n batch_size=batch_size,\n epochs=100,\n callbacks=[early_stopping]\n ).history\n\n score = model.evaluate(\n X_train_val.iloc[valid_idx], y_train_val.iloc[valid_idx])\n scores.append(score[1])\n\n histories.append(history)\n\ncolors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf']\n\nprint(\"MSE\")\nprint(\"Mean: {}; STD: {}\".format(np.mean(scores).round(4), np.std(scores).round(4)))\n\nplt.figure(figsize=(15,6))\nfor fold_idx in range(num_folds):\n plt.plot(histories[fold_idx]['val_mse'], color=colors[fold_idx], label='Fold N°{}'.format(fold_idx+1))\n plt.ylim(0.011, 0.03)\n plt.title('Mean Squared Error')\n plt.legend(loc='upper right')\n plt.grid(alpha=.3)\nplt.show()","sub_path":"wine-train.py","file_name":"wine-train.py","file_ext":"py","file_size_in_byte":18323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"489113451","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import gen_nn_ops\n\n# http://stackoverflow.com/questions/38340791/guided-back-propagation-in-tensor-flow\ndef guided_backprop_op(fn, relus, X):\n assert len(relus)>0, \"no relus\"\n oplist = [X] + [op for op in relus]\n next_relu = oplist.pop()\n Dafter = tf.gradients(fn, next_relu)[0]\n Dafter_thresh= tf.to_float(Dafter < 0.0)*Dafter\n print(next_relu)\n while len(oplist):\n last_relu = next_relu\n next_relu = oplist.pop()\n print(next_relu)\n try:\n Dafter = tf.gradients(last_relu, next_relu, grad_ys=Dafter_thresh)[0]\n except:\n print(\"tf.gradients failed for:\\n last_relu=%s\\n next_relu=%s\" % (last_relu, next_relu))\n import IPython\n IPython.embed()\n Dafter_thresh = tf.to_float(Dafter < 0.0)*Dafter\n if Dafter_thresh.get_shape()[0] == 1:\n Dafter_thresh = tf.squeeze(Dafter_thresh,[0])\n return Dafter\n\n### this doesn't work, still get the same unguided map\n# https://gist.github.com/falcondai/561d5eec7fed9ebf48751d124a77b087\n#@ops.RegisterGradient(\"GuidedRelu\")\n#def _GuidedReluGrad(op, grad):\n# return tf.zeros(grad.get_shape())\n# return tf.select(0. < grad, gen_nn_ops._relu_grad(grad, op.outputs[0]), tf.zeros(grad.get_shape()))\n\nclass SaliencyMap(object):\n def __init__(self, model):\n self.model = model\n self.sess = model.sess\n self.img_processed = model.X_processed\n self.img_raw = model.X_placeholder\n self.logit2bprop = {}\n self.logit2gbprop = {}\n g = tf.get_default_graph()\n X = self.img_processed\n for logit in range(model.nnet.logits.get_shape()[1]):\n fn = model.nnet.logits[:,logit]\n self.logit2bprop[logit] = tf.gradients(fn, self.img_processed)[0]\n self.logit2gbprop[logit] = guided_backprop_op(fn, model.nnet.after_relus, X)\n\n def calc(self, raw_img, logit, fn='gbprop'):\n assert len(raw_img.shape)==4\n assert raw_img.shape[0]==1, \"only do batch size of 1\"\n assert fn in ['bprop', 'gbprop'], \"fn must be one of bprop or gpprop, but it is %s\" % fn\n ops = [self.img_processed]\n feed_dict = {self.img_raw:raw_img}\n if fn == 'bprop':\n ops.append(self.logit2bprop[logit])\n img_processed, dimg = self.sess.run(ops, feed_dict=feed_dict)\n elif fn == 'gbprop':\n ops.append(self.logit2gbprop[logit])\n img_processed, dimg = self.sess.run(ops, feed_dict=feed_dict)\n\n assert len(img_processed.shape)==4\n assert img_processed.shape[0]==1\n img_processed = img_processed[0,:,:,:]\n \n assert len(dimg.shape)==4\n assert dimg.shape[0]==1\n dimg = dimg[0,:,:,:]\n\n return img_processed, dimg\n \n","sub_path":"psmlearn/saliencymaps.py","file_name":"saliencymaps.py","file_ext":"py","file_size_in_byte":2974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"358282263","text":"import requests, json, pprint, re\nfrom bs4 import BeautifulSoup as bs4\n\nbase_site = 'https://hub.arcgis.com/datasets?source=City%20of%20Washington%2C%20DC'\nbase_site = 'http://maps2.dcgis.dc.gov/dcgis/rest/services'\ndata_directory = '/DCGIS_DATA/'\n#query structure is base_website + / path / to / dataset# / query?option1=1&option2=2\ndefault_query = '/query?where=1%3D1&f=pjson'\n\nresults = requests.get(base_site+data_directory)\nsoup = bs4(results.content,'html.parser')\nsoupA = soup.find_all(re.compile(\".href=\"))\n\nwith open('output.txt','w') as f:\n for item in soupA:\n f.write(str(item))\n f.write('\\n')\n","sub_path":"ODDC.py","file_name":"ODDC.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"288459376","text":"from wheel import *\nfrom secret import *\nfrom player import *\nfrom utils import *\nfrom override import *\nimport string\n\nvowels = ['a', 'e', 'i', 'o', 'u']\n\nclass Board:\n\t\"\"\"\n\t>>> w = Wheel()\n\t>>> p = Player('Player 1', w)\n\t>>> s = SecretPhrase('testing this out', 'test')\n\t>>> b = Board(s, p)\n\t\"\"\"\n\tdef __init__(self, secret, players=[]):\n\t\tself.secret = secret\n\t\tself.category = self.secret.category\n\t\tself.players = players\n\n\tdef get_letter(self, player):\n\t\tprint(f'\\n\\n{self.secret.show_status()}')\n\t\tprint(f\"\\n\\n{player.name}'s Score: {player.score}\\nDo you want to buy a vowel?\")\n\t\tbuy_vowel = input()\n\n\t\tif buy_vowel == 'OVERRIDE':\n\t\t\ttry:\n\t\t\t\tOverride(self, player)\n\t\t\texcept AssertionError as e:\n\t\t\t\tprint(e)\n\t\t\t\tself.get_letter(player)\n\t\telse:\n\t\t\ttry:\n\t\t\t\tassert buy_vowel in yes or buy_vowel in no, 'Please enter yes or no.'\n\t\t\texcept AssertionError as e:\n\t\t\t\tprint(e)\n\t\t\t\tself.get_letter(player)\n\t\t\n\t\tif buy_vowel in yes:\n\t\t\ttry:\n\t\t\t\tassert player.score >= 200, 'Your score is too low to buy a vowel.'\n\t\t\texcept AssertionError as e:\n\t\t\t\tprint(e)\n\t\t\t\tself.get_letter(player)\n\t\t\tprint('\\n\\nPlease enter your vowel.\\n')\n\t\t\tletter = input()\n\t\t\t\n\t\t\ttry:\n\t\t\t\tassert letter in string.ascii_lowercase, 'Please enter a lowercase letter.'\n\t\t\texcept AssertionError as e:\n\t\t\t\tprint(e)\n\t\t\t\tself.get_letter(player)\n\t\t\t\n\t\t\tplayer.score -= 200\n\t\t\tself.secret.guess_letter(letter, player)\n\t\telse:\n\t\t\t#print('\\n\\nPress Enter to spin!\\n')\n\t\t\t#input()\n\t\t\tvalue = player.spin()\n\t\t\tprint(f'\\n\\nYou spun: {value}\\nPlease enter a letter.')\n\t\t\tletter = input()\n\n\t\t\ttry:\n\t\t\t\tassert letter in string.ascii_lowercase, 'Please enter a lowercase letter.'\n\t\t\texcept AssertionError as e:\n\t\t\t\tprint(e)\n\t\t\t\tself.get_letter(player)\n\n\t\t\ttry:\n\t\t\t\tassert letter not in vowels, \"You didn't buy a vowel!\"\n\t\t\texcept AssertionError as e:\n\t\t\t\tprint(e)\n\t\t\t\tself.get_letter(player)\n\n\t\t\tnum_correct = self.secret.guess_letter(letter, player)\n\t\t\tplayer.score += num_correct * value\n\t\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nif __name__ == '__main__':\n\timport doctest\n\tdoctest.testmod()","sub_path":"wheel_of_fortune/board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":2024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"410268421","text":"import pytest\nfrom shopping_basket.basket_pricer.speical_offers import SpecialOffers\n\n\nclass TestOffer:\n def get_instance(self):\n return SpecialOffers()\n\n @pytest.mark.parametrize(\n \"rule_template\", [\"buy $buy get $free free\", \"$discount% discount\"]\n )\n def test_get_rule_func(self, rule_template):\n special_offer = self.get_instance()\n assert callable(special_offer.get_rule_func(rule_template))\n\n @pytest.mark.parametrize(\"rule_template\", [None, \"\", \"no rule template\"])\n def test_get_rule_func_fail(self, rule_template):\n special_offer = self.get_instance()\n with pytest.raises(NotImplementedError):\n special_offer.get_rule_func(rule_template)\n\n @pytest.mark.parametrize(\n \"products, discount, expected\",\n [([{\"name\": \"Biscuits\", \"price\": 1.20}], 25, 1.20 * 0.25), ([], 25, 0)],\n )\n def test_rule_discount(self, products, discount, expected):\n special_offer = self.get_instance()\n _discount = special_offer.rule_discount(products, discount=discount)\n assert _discount == expected\n\n @pytest.mark.parametrize(\n \"products, buy, free, expected\",\n [\n (\n [{\"name\": \"Biscuits\", \"price\": 1.20} for _ in range(2)],\n 2,\n 1,\n 0,\n ),\n (\n [{\"name\": \"Biscuits\", \"price\": 1.20} for _ in range(3)],\n 2,\n 1,\n 1.20,\n ),\n (\n [{\"name\": \"Biscuits\", \"price\": 1.20} for _ in range(6)],\n 2,\n 1,\n 1.20 * 2,\n ),\n ([], 2, 1, 0),\n ],\n )\n def test_rule_buy_x_get_y_free(self, products, buy, free, expected):\n special_offer = self.get_instance()\n _discount = special_offer.rule_buy_x_get_y_free(products, buy=buy, free=free)\n assert _discount == expected\n","sub_path":"shopping_basket/shopping_basket_tests/basket_pricer_tests/test_special_offers.py","file_name":"test_special_offers.py","file_ext":"py","file_size_in_byte":1951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"394826423","text":"# Copyright 2015 Red Hat, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n#\n\nfrom oslo_config import cfg\nfrom oslo_log import log as logging\n\nfrom osc_lib.i18n import _\n\nfrom tripleoclient import constants\nfrom tripleoclient import utils as oooutils\nfrom tripleoclient.v1.overcloud_deploy import DeployOvercloud\nfrom tripleoclient.workflows import package_update\n\nCONF = cfg.CONF\nlogging.register_options(CONF)\nlogging.setup(CONF, '')\n\n\nclass CephUpgrade(DeployOvercloud):\n \"\"\"Run heat stack update for overcloud nodes to run Ceph upgrade.\"\"\"\n\n log = logging.getLogger(__name__ + \".CephUpgrade\")\n\n def get_parser(self, prog_name):\n parser = super(CephUpgrade, self).get_parser(prog_name)\n parser.add_argument('--container-registry-file',\n dest='container_registry_file',\n default=None,\n help=_(\"Optional path to file with container \"\n \"registry data for the update\"),\n )\n parser.add_argument('--ceph-ansible-playbook',\n action=\"store\",\n default=\"/usr/share/ceph-ansible\"\n \"/infrastructure-playbooks\"\n \"/rolling_update.yml\",\n help=_('Path to switch the ceph-ansible playbook '\n 'used for update. '))\n return parser\n\n def take_action(self, parsed_args):\n self.log.debug(\"take_action(%s)\" % parsed_args)\n clients = self.app.client_manager\n\n stack = oooutils.get_stack(clients.orchestration,\n parsed_args.stack)\n\n stack_name = stack.stack_name\n registry = oooutils.load_container_registry(\n self.log, parsed_args.container_registry_file)\n\n # Run update\n ceph_ansible_playbook = parsed_args.ceph_ansible_playbook\n # Run Overcloud deploy (stack update)\n # In case of update and upgrade we need to force the\n # update_plan_only. The heat stack update is done by the\n # package_update mistral action\n parsed_args.update_plan_only = True\n\n # Add the upgrade-prepare.yaml environment to set noops etc\n templates_dir = (parsed_args.templates or\n constants.TRIPLEO_HEAT_TEMPLATES)\n parsed_args.environment_files = oooutils.prepend_environment(\n parsed_args.environment_files, templates_dir,\n constants.CEPH_UPGRADE_PREPARE_ENV)\n\n super(CephUpgrade, self).take_action(parsed_args)\n package_update.update(clients, container=stack_name,\n container_registry=registry,\n ceph_ansible_playbook=ceph_ansible_playbook)\n package_update.get_config(clients, container=stack_name)\n self.log.info(\"Ceph Upgrade on stack {0} complete.\".format(\n parsed_args.stack))\n","sub_path":"tripleoclient/v1/overcloud_ceph_upgrade.py","file_name":"overcloud_ceph_upgrade.py","file_ext":"py","file_size_in_byte":3532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"46211448","text":"from tkinter import *\r\ndef changebutton():\r\n but.destroy()\r\n secondbut=Button(root,text=\"changed\")\r\n secondbut.pack()\r\nif __name__=='__main__':\r\n root=Tk()\r\n global but\r\n but= Button(root,text=\"button\",command=changebutton)\r\n but.pack()\r\nroot.mainloop()","sub_path":"tic-tac-toe/test3.py","file_name":"test3.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"80668893","text":"#!/usr/bin/python\n\n\n\"\"\"\n\nThis tests that all the PLE4SHIP games launch, except for doom; we\nexplicitly check that it isn't defined.\n\n\n\"\"\"\n\n\nimport nose\nimport numpy as np\nimport unittest\n\nNUM_STEPS=150\n\nclass NaiveAgent():\n def __init__(self, actions):\n self.actions = actions\n def pickAction(self, reward, obs):\n return self.actions[np.random.randint(0, len(self.actions))]\n\n\nclass MyTestCase(unittest.TestCase):\n\n def run_a_game(self,game):\n from ple4ship import PLE4SHIP\n p = PLE4SHIP(game,display_screen=True)\n agent = NaiveAgent(p.getActionSet())\n p.init()\n reward = p.act(p.NOOP)\n for i in range(NUM_STEPS):\n obs = p.getScreenRGB()\n reward = p.act(agent.pickAction(reward,obs))\n\n def test_waterworld(self):\n from ple4ship.games.waterworld import WaterWorld\n game = WaterWorld()\n self.run_a_game(game)\n\n\nif __name__ == \"__main__\":\n nose.runmodule()\n","sub_path":"tests/test_ple.py","file_name":"test_ple.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"71176860","text":"import unittest\nimport mailroom_fp as m\n\n\nclass MailroomFPTest(unittest.TestCase):\n _donor = m.Donor(m.DonorSuite.__name__)\n ds = m.DonorSuite([_donor])\n ui = m.UI(ds)\n\n def test_add_name(self):\n self._donor = m.Donor('Harry Potter')\n self.assertEqual(self._donor.name, 'Harry Potter')\n # Passed\n\n def test_add_donation(self):\n self._donor.add_donation('100')\n self._donor.add_donation(7500)\n self.assertEqual(self._donor.donations, [100.0, 7500.0])\n self._donor.add_donation(['13', 360, '250'])\n self.assertEqual(self._donor.donations, [100.0, 7500.0, 13.0, 360.0, 250.0])\n # Passed\n\n def test_avg_donation(self):\n self._donor.donations = []\n self._donor.add_donation(100)\n self._donor.add_donation(400)\n self._donor.add_donation(500)\n self._donor.add_donation(360)\n self.assertEqual(self._donor.avg_donation, 340)\n # Passed\n\n def test_thank_you(self):\n self._donor.donations = []\n self._donor.add_donation(500)\n thank_you = self._donor.get_thank_you()\n\n expected = '''\n Dear Harry Potter,\n \n Thank you for your support through your most recent contribution of $500.00. \n Your generosity over this year has been instrumental in moving us towards our\n fundraising goal of $100,000.00 to benefit local charities. On behalf of all \n the members of the Foundation, we thank you for your generosity and look forward\n to working with you in the future to build a better world!\n \n Best wishes,\n\n Foundation Board of Directors\n \\n'''\n self.assertEqual(thank_you, expected)\n # Passed\n\n def test_get_row(self):\n self._donor.name = 'Harry Potter'\n self._donor.donations = []\n self._donor.avg_donation = 0\n\n expected = 'Harry Potter | $ 0.00 | 0 | $ 0.00 '\n self.assertEqual(self._donor.get_report_row_header(16), expected)\n # Passed\n\n def test_quit(self):\n with self.assertRaises(SystemExit):\n self.ui.quit_program()\n # Passed\n\n def test_donor_projections(self):\n self._donor.donations = []\n self._donor.add_donation(50000) # donor 1 -- Harry Potter\n self._donor.add_donation(2500)\n\n donor2 = m.Donor('Ron Weasley') # donor 2 -- Ron Weasley\n donor2.add_donation(1100)\n donor2.add_donation(3500)\n donor2.add_donation(200)\n self.ds.add_donor(donor2)\n\n donor3 = m.Donor('Hermione Granger') # donor 3 -- Hermione Granger\n donor3.add_donation(4000)\n donor3.add_donation(75)\n donor3.add_donation(20000)\n self.ds.add_donor(donor3)\n new_ds = self.ds.matching_factor(2, 2000, 20000)\n new_ds.sum_all_donations()\n self.assertEqual(new_ds.sum_donations, 60000)\n # Passed\n\n def test_count_projections(self):\n self._donor.donations = []\n self._donor.add_donation(50000) # donor 1 -- Harry Potter\n self._donor.add_donation(200)\n self._donor.add_donation(132)\n self._donor.add_donation(6500)\n self._donor.add_donation(20)\n\n sec_ds = self.ds.count_matches(20, 200)\n # self.assertEqual(sec_ds, 2)\n self.skipTest('Not able to get to run properly, returns 1 instead of 2.')\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"students/MicahBraun/Lesson 10/mailroom_test_fp.py","file_name":"mailroom_test_fp.py","file_ext":"py","file_size_in_byte":3638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"643446610","text":"import tensorflow as tf\nimport tensorflow.contrib.eager as tfe\ntf.enable_eager_execution(device_policy=tfe.DEVICE_PLACEMENT_SILENT)\nfrom model import Model\nfrom constants import *\nfrom utils import *\nimport cv2\nimport os\n\n# load model\nwith tf.device(DEVICE):\n model = Model()\n checkpoint = tfe.Checkpoint(model=model, optimizer_step=tf.train.get_or_create_global_step())\n checkpoint.restore(tf.train.latest_checkpoint(CHECKPOINT_DIR))\n print('Model loaded')\n \n# perform prediction\nif not os.path.exists(DIR_IMGS_OUT):\n os.makedirs(DIR_IMGS_OUT)\n\nMODE = 1 # webcam:0, video:1, imgs:2\nif MODE == 0:\n video = 0\nelif MODE == 1:\n video = VIDEO_IN\n\nif MODE == 0 or MODE == 1:\n with tf.device(DEVICE):\n video_capture = cv2.VideoCapture(video)\n idx_img = 0\n while video_capture.isOpened():\n # read image\n ret, img = video_capture.read()\n \n if ret==True:\n img = process_img(img)\n \n # predict\n output = model.predict(img)\n \n # write images\n img_out = draw_output(img[0], output[0].numpy())\n #cv2.imshow('prediction', img_out)\n cv2.imwrite(DIR_IMGS_OUT+ '/{:04}'.format(idx_img) + '.jpg', img_out)\n \n if cv2.waitKey(1) & 0xFF == ord('\\x1b'):\n break\n else:\n break\n \n idx_img += 1\n\nif MODE == 2:\n filenames = sorted(os.listdir(DIR_TEST))\n with tf.device(DEVICE):\n for filename in filenames:\n # read and process image\n path = os.path.join(DIR_TEST, filename)\n img = cv2.imread(path)\n img = process_img(img)\n \n # predict\n output = model.predict(img)\n \n # write images\n img_out = draw_output(img[0], output[0].numpy())\n path_out = os.path.join(DIR_IMGS_OUT, filename)\n cv2.imwrite(path_out, cv2.cvtColor(img_out, cv2.COLOR_RGB2BGR))\n print('Saved image')\n","sub_path":"src/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":2073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"207519550","text":"from django.conf import settings\n\nimport numpy as np\nimport cv2\n\ndef opencv_dface(path):\n img=cv2.imread(path,1)\n \n\n if(type(img) is np.ndarray):\n baseUrl = settings.MEDIA_ROOT_URL + settings.MEDIA_URL\n face_cascade=cv2.CascadeClassifier(baseUrl+'haarcascade_frontalface_default.xml')\n\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n faces = face_cascade.detectMultiScale(gray,1.3,5)\n\n for (x,y,w,h) in faces:\n cv2.rectangle(img,(x,y),(x+w,y+h),(25,25,100),2)\n cv2.imwrite(path,img)\n else:\n print('something error')\n \n\n\n","sub_path":"1semester/Application/FaceDetect/FaceDetection/opencv_dface.py","file_name":"opencv_dface.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"78636603","text":"from sklearn.feature_extraction.text import TfidfVectorizer\nimport numpy as np\nimport os\ndef label_vector(DA_label):\n #vector_label=np.zeros((56,1))\n label=[\"s\",'qy','qw','qr','qrr','qo','qh','fg','fh','h','b','bk','ba','bh','aa','aap','na','ar','arp','nd','ng','am','no','co','cs','cc','f','br','bu','r','m','bs','bc','bsc','df','e','2','bd','by','fa','ft','fw','x','z','%','%-','%--','fe','t','tc','j','t1','t3','d','g','rt']\n index=0\n label_text=0\n for line in label:\n if(DA_label==line):\n label_text=index\n index +=1\n return label_text\n\n\ndef TF_IDF(path_folder):\n data=[]\n for name_file in os.listdir(path_folder):\n path_file=os.path.join(path_folder,name_file) \n file_=open(path_file,\"r\")\n for line in file_.read().split('\\n'):\n try:\n data.append(line.split(\"__\")[2])\n except IndexError:\n number=0\n vectorizer = TfidfVectorizer()\n X = vectorizer.fit_transform(data)\n return vectorizer\ndef data_train(path_file1,path_folder):\n \n vectorizer=TF_IDF(path_folder)\n file_1=open(path_file1,\"r\")\n data1=[]\n label_data=[] \n for line1 in file_1.read().split('\\n'):\n try:\n data1.append(line1.split(\"__\")[2])\n label_data.append(label_vector(line1.split(\"__\")[1].split(\":\")[0].split('^')[0].split(\"|\")[0].split(\".\")[0]))\n except IndexError:\n number=0\n Y=vectorizer.transform(data1).toarray()\n label_data=np.array(label_data).reshape((len(label_data),1))\n return Y,label_data\n","sub_path":"MRDA.py","file_name":"MRDA.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"201041682","text":"# -*- coding: utf-8 -*-\n# @Time: 2020/4/25 14:40\n# @Author: ZhangRui\n\nfrom django.core.management.base import BaseCommand\nfrom apps.WLhelper.urls import get_all_urlpattern_set\nfrom apps.wlauth.models import WLPermissions\n\nclass Command(BaseCommand):\n\n def save_permissions(self, url_set):\n '''\n 保存权限,每个url 保存3条数据\n :param url_set: 一个集合\n :return: 保存成功返回的对象\n '''\n permissions = []\n for item in url_set:\n url_name = item.url_name\n namespace = item.namespace\n for describe in [x[0] for x in WLPermissions.DESCRIBES]:\n permission = WLPermissions(url_name=url_name,\n namespace=namespace,\n describe=describe)\n permissions.append(permission)\n WLPermissions.objects.all().delete()\n return WLPermissions.objects.bulk_create(permissions)\n\n def handle(self, *args, **options):\n '''\n 生成权限信息\n :param args:\n :param options:\n :return:\n '''\n url_set = get_all_urlpattern_set()\n self.stdout.write(self.style.NOTICE(f'共{len(url_set)}个URL。'))\n op = input(\"生成权限信息将会清空旧的权限信息,是否确认?(y/n)\")\n if op.lower() == 'y':\n try:\n objs = self.save_permissions(url_set)\n except Exception as e:\n print(e)\n self.stdout.write(self.style.ERROR(str(e)))\n else:\n self.stdout.write(self.style.SUCCESS(f'成功保存{len(objs)}个权限。'))\n else:\n self.stdout.write(self.style.WARNING('放弃生成权限信息!'))\n","sub_path":"weiliao/apps/wlauth/management/commands/initpermissions.py","file_name":"initpermissions.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"88791348","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 31 14:15:06 2016\n\n@author: tai\n\"\"\"\nimport sys\nsys.path.append(\"algorithm\")\nsys.path.append(\"algorithm/input\")\nimport numpy as np\nimport main_cut_flow_algorithm as algo\nimport math\nimport possible as p\n\n\ndef count(nodes):\n \n nb_nodes_alloc = 0\n nb_empty = 0\n nb_max_load = 0\n for n in nodes.values():\n nb_empty_local = 0\n res = n.count_resources_available()\n for kind in res.keys():\n for ide in res[kind].keys():\n nb_empty_local += res[kind][ide]\n if(nb_empty_local!=n.nb_max_i*n.nb_max_r):\n nb_empty += nb_empty_local \n nb_nodes_alloc +=1\n nb_max_load += n.nb_max_i*n.nb_max_r\n if(nb_nodes_alloc==0):\n return (0,0)\n overload = nb_empty/nb_max_load\n return (nb_nodes_alloc,overload)\n \ndef count_flow(flows,pseudo_flows,semi_flows):\n count_flow = 0\n count_unit = 0 \n for f in flows.values():\n count_flow += 1\n count_unit += f.unit\n ide = np.array([])\n for sf in semi_flows.values():\n if(not abs(sf.id) in ide):\n count_flow+=1\n count_unit += sf.unit\n ide = np.append(ide,abs(sf.id))\n for pf in pseudo_flows.values():\n count_flow +=1\n count_unit += pf.unit\n return (count_flow,count_unit)\n \ndef check_chain(pc): \n gb = pc.global_path\n sp = pc.shortest_path\n \n if(pc.x==None or pc.y==None or pc.z==None): \n return False\n \n if( gb[0]!= sp[0] or gb[np.size(gb)-1] != sp[np.size(sp)-1] ):\n return False\n \n (boolean,gb) = check_function(gb,pc.x)\n if(boolean == False):\n return boolean\n \n (boolean,gb) = check_function(gb,pc.y)\n if(boolean == False):\n return boolean \n\n (boolean,gb) = check_function(gb,pc.z)\n if(boolean == False):\n return boolean\n \n return True\n\n \ndef check_function(gb,v): \n lv = np.where(gb==v)\n if(np.size(lv)==0):\n return (False,gb)\n lv = lv[0][0]\n return (True,gb[lv:]) \n \n \ndef cost(pcs,flow_unit_price,flows,nodes,node_price):\n fc = algo.flow_cost(pcs,flow_unit_price,flows)\n cn = count(nodes)\n nb_nodes = cn[0]\n return (fc,nb_nodes*node_price)\n \ndef global_path_vs_shortest_path(pcs):\n gb_result = 0\n sp_result = 0\n for pc in pcs.values():\n gb = np.size(pc.global_path)\n sp = np.size(pc.shortest_path)\n gb_result += gb\n sp_result += sp\n \n return gb_result/sp_result\n \ndef flow_cost_limit_theoritical(flows,flow_unit_price):\n cost = 0\n for f in flows.values():\n unit = f.unit\n sp = f.path\n cost += unit*flow_unit_price*(np.size(sp)-1)\n \n return cost\n \ndef node_cost_limit_theoritical(nodes,flows,node_price,cpu):\n \n nodes_id = np.array([])\n count = 0\n \n boolean = False\n while(boolean==False):\n if( count > len(nodes) ):\n return (math.inf,math.inf)\n nodes_id = np.append(nodes_id,count)\n count += 1\n (boolean,ea,eb,ec,ed,empty) = p.is_possible_for_greedy_algo(nodes,nodes_id,flows,cpu)\n \n nb_nodes_th = np.size(nodes_id)\n node_cost_th = nb_nodes_th*node_price\n return (node_cost_th,nb_nodes_th,ea,eb,ec,ed,empty)\n \n \n","sub_path":"vnf_placement_project/Simulation/manager/count_result.py","file_name":"count_result.py","file_ext":"py","file_size_in_byte":3468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"98224135","text":"import random\n\n\n##\n## Welcome screen\n##\n# read and print welcome screen txt file\nhangman_welcome_screen_file = open('hangman_welcome_screen.txt', 'r')\nHANGMAN_ASCII_ART = hangman_welcome_screen_file.read()\nprint(HANGMAN_ASCII_ART)\n\n##\n## Get random number of guesses\n##\n\nMAX_TRIES = random.randint(5,10)\nprint(MAX_TRIES)\n\n##\n## Start playing: input guess string\n##\nguess_word_input = input(\"Please enter a word:\")\nprint(\" _\"* len(guess_word_input))\n\n##\n## input guess letter\n##\nguess_letter_input = input(\"Guess a letter:\")\nis_alpha = guess_letter_input.isalpha()\nis_single = (len(guess_letter_input)==1)\n\nif (is_alpha) and (is_single):\n guess_letter_input = guess_letter_input.lower()\n print(guess_letter_input)\nelif (is_alpha) and not (is_single):\n print(\"E1\")\nelif not (is_alpha) and (is_single):\n print(\"E2\")\nelse: # not (is_alpha) and not (is_single)\n print(\"E3\")\n\t","sub_path":"ex-4.3.py","file_name":"ex-4.3.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"22552430","text":"# Evaluate\nimport os\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport shutil\nfrom tqdm import tqdm\nimport utils.model_utils as model_utils\nimport torch.distributed as dist\nfrom utils.train_utils import sum_bbox_ratios, reduce_loss_dict\n\nimport utils.vis_utils as vis_utils\nfrom maskrcnn_benchmark.utils.comm import synchronize, get_rank\nfrom utils.utils_misc import *\n\n\n\n\ndef save_checkpoint(state, is_best, filename='checkpoint.pth.tar', epoch=0, tid=0, checkpoint_path='.'):\n if epoch is not None:\n filename = os.path.join(checkpoint_path, filename.replace('.pth.tar', '_epoch%d_tid%d_.pth.tar'%(epoch, tid)))\n torch.save(state, filename)\n print('Saved to ...' + filename)\n if is_best:\n print(\"best\", state[\"eval_loss\"])\n shutil.copyfile(filename, os.path.join(checkpoint_path, 'model_best.pth.tar'))\n else:\n print(\"NOT best\", state[\"eval_loss\"])\n\ndef eval_epoch_cvpr_RCNN(model, validation_loader, epoch, tid, device, writer, scheduler, best_loss, logger, opt, max_iter=-1, if_vis=False, if_loss=True, prepostfix='', savepath=''):\n eval_loss_list, eval_loss_horizon_list, eval_loss_pitch_list, eval_loss_roll_list, eval_loss_vfov_list = [], [], [], [], []\n if opt.distributed:\n rank = dist.get_rank()\n else:\n rank = 0\n\n loss_func = nn.CrossEntropyLoss()\n model.eval()\n\n return_dict_list = []\n return_dict_epoch = {}\n\n with torch.no_grad():\n with tqdm(total=len(validation_loader)) as t:\n t.set_description('Ep.{} Eval'.format(epoch))\n\n for i, (im_paths_SUN360, inputSUN360_Image_yannickTransform_list, \\\n horizon_dist_gt, pitch_dist_gt, roll_dist_gt, vfov_dist_gt, metadata, \\\n pitch_list, roll_list, vfov_list, horizon_list, focal_length_35mm_eq_list, sensor_size_list, W_list, H_list, idx1, idx2, idx3, idx4) in enumerate(validation_loader):\n\n horizon_dist_gt, pitch_dist_gt, roll_dist_gt, vfov_dist_gt = horizon_dist_gt.to(device), pitch_dist_gt.to(device), roll_dist_gt.to(device), vfov_dist_gt.to(device)\n horizon_idx_gt, pitch_idx_gt, roll_idx_gt, vfov_idx_gt = idx1.to(device), idx2.to(device), idx3.to(device), idx4.to(device)\n\n list_of_oneLargeBbox_list_cpu = model_utils.oneLargeBboxList(W_list, H_list)\n list_of_oneLargeBbox_list = [bbox_list_array.to(device) for bbox_list_array in list_of_oneLargeBbox_list_cpu]\n\n input_dict_misc = {'rank': rank, 'data': 'SUN360', 'device': device, 'tid': tid}\n output_RCNN = model(input_dict_misc = input_dict_misc, image_batch_list=inputSUN360_Image_yannickTransform_list, list_of_oneLargeBbox_list=list_of_oneLargeBbox_list)\n output_horizon = output_RCNN['output_horizon']\n output_pitch = output_RCNN['output_pitch']\n output_roll = output_RCNN['output_roll']\n output_vfov = output_RCNN['output_vfov']\n\n if if_loss:\n # loss_horizon = nn.functional.kl_div(nn.functional.log_softmax(output_horizon, dim=1), horizon_dist_gt, reduction='batchmean')\n # loss_pitch = nn.functional.kl_div(nn.functional.log_softmax(output_pitch, dim=1), pitch_dist_gt, reduction='batchmean')\n # loss_roll = nn.functional.kl_div(nn.functional.log_softmax(output_roll, dim=1), roll_dist_gt, reduction='batchmean')\n # loss_vfov = nn.functional.kl_div(nn.functional.log_softmax(output_vfov, dim=1), vfov_dist_gt, reduction='batchmean')\n loss_horizon = loss_func(output_horizon, horizon_idx_gt)\n loss_pitch = loss_func(output_pitch, pitch_idx_gt)\n loss_roll = loss_func(output_roll, roll_idx_gt)\n loss_vfov = loss_func(output_vfov, vfov_idx_gt)\n\n loss_dict = {'loss_horizon': loss_horizon, 'loss_pitch': loss_pitch, \\\n 'loss_roll': loss_roll, 'loss_vfov': loss_vfov}\n loss_dict_reduced = reduce_loss_dict(loss_dict, mark=i, logger=logger)\n loss_reduced = sum(loss for loss in loss_dict_reduced.values())\n\n # loss = loss_horizon + loss_pitch + loss_roll + loss_vfov\n\n eval_loss_list.append(loss_reduced.item())\n eval_loss_horizon_list.append(loss_dict_reduced['loss_horizon'].item())\n eval_loss_pitch_list.append(loss_dict_reduced['loss_pitch'].item())\n eval_loss_roll_list.append(loss_dict_reduced['loss_roll'].item())\n eval_loss_vfov_list.append(loss_dict_reduced['loss_vfov'].item())\n toreport = {\n \"loss\": loss_reduced.item(),\n \"horizon\": loss_dict_reduced['loss_horizon'].item(),\n \"pitch\": loss_dict_reduced['loss_pitch'].item(),\n \"roll\": loss_dict_reduced['loss_roll'].item(),\n \"vfov\": loss_dict_reduced['loss_vfov'].item(),\n }\n t.set_postfix(**toreport)\n t.update()\n\n if i < 5 and if_vis:\n _, return_dict = vis_utils.vis_SUN360(tid, savepath, im_paths_SUN360, output_horizon, output_pitch, output_roll, output_vfov, horizon_list, pitch_list, roll_list, vfov_list, focal_length_35mm_eq_list, sensor_size_list, rank, \\\n if_vis=i < 10, if_save=True, logger=logger, prepostfix=prepostfix, idx_sample=i)\n return_dict_list.append(return_dict)\n\n synchronize()\n\n if max_iter != -1 and i > max_iter:\n break\n\n if if_loss:\n eval_loss_sum_SUN360 = sum(eval_loss_list) / len(validation_loader)\n eval_loss_horizon = sum(eval_loss_horizon_list) / len(validation_loader)\n eval_loss_pitch = sum(eval_loss_pitch_list) / len(validation_loader)\n eval_loss_roll = sum(eval_loss_roll_list) / len(validation_loader)\n eval_loss_vfov = sum(eval_loss_vfov_list) / len(validation_loader)\n t.set_postfix(loss=eval_loss_sum_SUN360)\n\n if rank == 0:\n writer.add_scalar('loss_eval/eval_loss_sum_SUN360', eval_loss_sum_SUN360, tid)\n writer.add_scalar('loss_eval/eval_loss_horizon', eval_loss_horizon, tid)\n writer.add_scalar('loss_eval/eval_loss_pitch', eval_loss_pitch, tid)\n writer.add_scalar('loss_eval/eval_loss_roll', eval_loss_roll, tid)\n writer.add_scalar('loss_eval/eval_loss_vfov', eval_loss_vfov, tid)\n\n writer.add_histogram('loss/eval_loss_hist', np.asarray(eval_loss_list), tid, bins=\"doane\")\n writer.add_histogram('loss/eval_loss_horizon_hist', np.asarray(eval_loss_horizon_list), tid, bins=\"doane\")\n writer.add_histogram('loss/eval_loss_pitch_hist', np.asarray(eval_loss_pitch_list), tid, bins=\"doane\")\n writer.add_histogram('loss/eval_loss_roll_hist', np.asarray(eval_loss_roll_list), tid, bins=\"doane\")\n writer.add_histogram('loss/eval_loss_vfov_hist', np.asarray(eval_loss_vfov_list), tid, bins=\"doane\")\n\n return_dict_epoch.update({'eval_loss_sum_SUN360': eval_loss_sum_SUN360})\n\n # writer.flush()\n\n if if_vis and rank == 0:\n horizon_all = merge_list_of_lists([return_dict['horizon_list'] for return_dict in return_dict_list])\n pitch_all = merge_list_of_lists([return_dict['pitch_list'] for return_dict in return_dict_list])\n roll_all = merge_list_of_lists([return_dict['roll_list'] for return_dict in return_dict_list])\n vfov_all = merge_list_of_lists([return_dict['vfov_list'] for return_dict in return_dict_list])\n f_mm_all = merge_list_of_lists([return_dict['f_mm_list'] for return_dict in return_dict_list])\n\n writer.add_histogram('dist/horizon_all', np.asarray(horizon_all), tid, bins=\"doane\")\n writer.add_histogram('dist/pitch_all', np.asarray(pitch_all)/np.pi*180., tid, bins=\"doane\")\n writer.add_histogram('dist/roll_all', np.asarray(roll_all)/np.pi*180., tid, bins=\"doane\")\n writer.add_histogram('dist/vfov_all', np.asarray(vfov_all)/np.pi*180., tid, bins=\"doane\")\n writer.add_histogram('dist/f_mm_all', np.asarray(f_mm_all), tid, bins=\"doane\")\n\n # writer.flush()\n\n # if if_loss and scheduler is not None:\n # scheduler.step(eval_loss)\n #\n # # Save checkpoint\n # is_best = False\n # if eval_loss < best_loss:\n # is_best = True\n # best_loss = eval_loss\n #\n # # checkpoint = {\n # # 'epoch': epoch,\n # # 'tid': tid,\n # # 'state_dict': model.state_dict(),\n # # 'train_loss': train_loss,\n # # 'eval_loss': eval_loss,\n # # 'optimizer': optimizer.state_dict(),\n # # }\n # #\n # # save_checkpoint(checkpoint, is_best, epoch=epoch, tid=tid, checkpoint_path=checkpoint_path)\n # # del checkpoint\n #\n # model.train()\n\n return return_dict_epoch\n","sub_path":"RELEASE_SUN360_camPred_minimal/eval_epoch_cvpr_RCNN.py","file_name":"eval_epoch_cvpr_RCNN.py","file_ext":"py","file_size_in_byte":9447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"432771046","text":"# Authors: Sam Champer, Andi Nosler\n# Partially uses some starter code from Daniel van Flymen (https://github.com/dvf/blockchain)\n# along with lots of additional code by the authors to implement the specific needs of a blockchain enabled election.\n\nfrom uuid import uuid4\nfrom flask import Flask, jsonify, request, render_template\nfrom argparse import ArgumentParser\nfrom blockchain import Blockchain\nimport requests\nfrom time import sleep\nfrom werkzeug.contrib.fixers import ProxyFix\nfrom urllib.parse import urlparse\nimport atexit\nfrom simplelog import *\n\n\n# Instantiate the blockchain node in flask:\napp = Flask(__name__)\n\n# Generate a globally unique address for this node:\nnode_identifier = str(uuid4()).replace('-', '')\n\n# Instantiate the blockchain for this node:\nblockchain = Blockchain()\n\n\n@app.route('/')\n@app.route('/index')\n@app.route('/index.html')\ndef index():\n \"\"\"\n Render the index page.\n \"\"\"\n return render_template('index.html')\n\n\n@app.route('/chain/', methods=['GET'])\ndef full_chain():\n \"\"\"\n App route to call for sending the chain.\n \"\"\"\n response = {\n 'chain': blockchain.chain,\n 'length': len(blockchain.chain),\n }\n return jsonify(response), 200\n\n\n@app.route('/nodes/', methods=['GET'])\ndef send_node_list():\n \"\"\"\n App route to call to return a list of all nodes this node is connected to.\n \"\"\"\n response = {'nodes': list(blockchain.nodes)}\n return jsonify(response), 200\n\n\n@app.route('/results/get_results/', methods=['GET'])\ndef fetch_results():\n \"\"\"\n If a user is checking the results of the vote, pull the latest chain,\n resolve conflicts, and then display the wallet balances of all the candidates.\n \"\"\"\n blockchain.resolve_conflicts()\n # If, at some point in the past, there was a parallel operation that resulted in the node failing to respond\n # to another node's request, this node may have been incorrectly pruned from that node's list of active nodes.\n # To correct this, send a reciprocation request to all nodes that just responded by sending this node a chain.\n # This might add a tiny bit to server overhead, and it solves a parallelization problem that probably won't happen,\n # but it makes the system a tiny bit more robust:\n for node in blockchain.nodes:\n try:\n requests.post(\"http://\" + node + \"/recip/\", json={'port': port})\n except:\n continue\n # This node may have had the most up to date chain, yet still have pending transactions.\n # if so, add a block into which any pending transactions can be added.\n if blockchain.current_transactions:\n last_block = blockchain.last_block\n proof = blockchain.proof_of_work(last_block)\n previous_hash = blockchain.hash(last_block)\n blockchain.new_block(proof, previous_hash)\n\n # Now that we have the most up to date chain, fetch the candidates' wallet balances.\n with open(\"vote_params.txt\", 'r') as f:\n vote_params = f.read()\n candidates = vote_params.split(\"Candidates:\")[1].split('\\n')\n candidates = list(filter(lambda x: x != \"\", candidates))\n\n data = dict()\n for candidate in candidates:\n data[candidate] = blockchain.balance_check(candidate)\n return jsonify(data), 200\n\n\n@app.route('/results/', methods=['GET'])\ndef display_results():\n \"\"\"\n App route for the results page.\n \"\"\"\n return render_template('results.html')\n\n\n@app.route('/vote/', methods=['post'])\ndef submit_vote():\n \"\"\"\n Receive a post from the HTML with the information for\n a new vote transaction.\n \"\"\"\n vote_number = int(request.form[\"id\"])\n signature = request.form[\"key\"]\n recipient = request.form[\"candidate\"]\n sender = blockchain.get_transactor(vote_number)\n if not sender:\n # Failure if user trying to cast non-existent vote.\n return jsonify({\"status\": \"fail\"})\n\n vote = blockchain.new_transaction(\n sender=sender,\n recipient=recipient,\n amount=1,\n signature=signature,\n vote_number=vote_number\n )\n if not blockchain.valid_transaction(vote, blockchain.chain):\n return jsonify({\"status\": \"fail\"})\n if not blockchain.valid_balance(vote):\n return jsonify({\"status\": \"fail\"})\n # Do the above checks in order to display to html if the vote is valid.\n # Note: the blockchain will do these checks independently, so even if a malicous\n # party were to remove these checks from their code and then start a node and connect\n # to the other nodes, an illegitamate transaction still won't be accepted, since\n # each legitimate node will perform these checks on the transaction before\n # accepting a chain with this transaction in it.\n\n # Transaction appears valid. Add it and any pending transactions to a new block:\n # Run the proof of work algorithm to get the next proof:\n last_block = blockchain.last_block\n proof = blockchain.proof_of_work(last_block)\n previous_hash = blockchain.hash(last_block)\n block = blockchain.new_block(proof, previous_hash)\n # Return fail if transaction somehow was not properly placed in the block.\n if not block['transactions']:\n return jsonify({\"status\": \"fail\"})\n if vote not in block['transactions']:\n return jsonify({\"status\": \"fail\"})\n # Transaction successfully added to new block. Broadcast the new transaction to other nodes.\n broadcast_transaction(vote)\n # HTML will now redirect to page for checking vote.\n return jsonify({\"status\": \"success\"})\n\n\ndef broadcast_transaction(transaction):\n \"\"\"\n Broadcast a valid transaction that ths node received to\n every node that this one is linked to.\n :param transaction: a vote transaction\n \"\"\"\n if len(blockchain.nodes):\n log(\"BROADCASTING TRANSACTION TO CONNECTED NODES.\")\n for node in blockchain.nodes:\n try:\n attempts = 0\n while attempts < 2:\n response = requests.post(\"http://\" + node + \"/external_transaction/\",\n json={'sender': transaction['sender'],\n 'recipient': transaction['recipient'],\n 'amount': transaction['amount'],\n 'signature': transaction['signature'],\n 'vote_number': transaction['vote_number']\n })\n attempts += 1\n if response:\n break\n else:\n sleep(1)\n except:\n continue\n\n\n@app.route('/external_transaction/', methods=['post'])\ndef external_transaction():\n \"\"\"\n Add a transaction from an external source to the list of\n pending transactions for the next block. Don't actually bother\n checking the transaction: it will be checked next time a new block\n is formed, which will occur when/if another vote is cast on this server,\n or when someone checks the results of the vote on this server.\n \"\"\"\n log(\"RECEIVED TRANSACTION FROM EXTERNAL SOURCE.\")\n values = request.get_json(force=True)\n sender = values['sender']\n recipient = values['recipient']\n amount = int(values['amount'])\n signature = values['signature']\n vote_number = int(values['vote_number'])\n vote = blockchain.new_transaction(\n sender=sender,\n recipient=recipient,\n amount=amount,\n signature=signature,\n vote_number=vote_number\n )\n return jsonify(vote), 200\n\n\n@app.route('/recip/', methods=['post'])\ndef reciprocate_acknowledgement():\n \"\"\"\n Route that requests that this node reciprocate acknowledgement of a remote node.\n \"\"\"\n values = request.get_json(force=True)\n log(\"RECEIVED RECIPROCATION REQUEST FROM {}\".format(request.remote_addr + \":\" + str(values['port'])))\n blockchain.register_node(request.remote_addr + \":\" + str(values['port']))\n response = {\n 'message': 'New node added',\n 'nodes': list(blockchain.nodes)\n }\n return jsonify(response), 200\n\n\n@app.route('/remove/', methods=['post'])\ndef remove_node():\n \"\"\"\n App route for a terminating node to call in order to remove itself from other nodes.\n \"\"\"\n values = request.get_json(force=True)\n log(\"RECEIVED REQUEST TO REMOVE NODE: {}\".format(request.remote_addr + \":\" + str(values['port'])))\n blockchain.remove_node(request.remote_addr + \":\" + str(values['port']))\n response = {\n 'message': 'Node removed',\n 'nodes': list(blockchain.nodes)\n }\n return jsonify(response), 200\n\n\ndef initialize(chain_source):\n \"\"\"\n Link up to an election node or a new election miner node\n and import a blockchain from that node.\n \"\"\"\n if chain_source[-1] != '/':\n chain_source += '/'\n input_source = chain_source[:]\n\n parsed_url = urlparse(chain_source)\n if parsed_url.netloc:\n chain_source = parsed_url.netloc\n elif parsed_url.path:\n # Accepts a URL like '192.168.0.5:5000'.\n chain_source = parsed_url.path\n else:\n raise ValueError('Invalid source URL. Maybe it was a typo?')\n\n blockchain.register_node(input_source)\n print(\"\\n Querying source: {}\".format(\"http://\" + chain_source + \"/nodes/\"))\n response = None\n for i in range(5):\n try:\n response = requests.get(\"http://\" + chain_source + \"/nodes/\")\n if response.status_code:\n break\n except:\n print(\" Connection to {} source failed, retrying. Attempt {} of 5\".format(\n \"default\" if input_source == \"http://127.0.0.1:4999/\" else \"specified\", i + 1))\n sleep(2)\n i += 1\n if i == 4:\n print(\"\\n ***Connection failed. Maybe that server isn't alive right now? Please try again. ***\")\n quit()\n\n # Nodes only respond 200 if they are peer nodes, not an initiation node,\n # which simply shuts down after it passes on the blockchain.\n if response.status_code == 200:\n # List of nodes connected to our target source.\n connected_nodes = response.json()['nodes']\n # Ask for recip with target source:\n response = requests.post(\"http://\" + chain_source + \"/recip/\", json={'port': port})\n if len(connected_nodes):\n print(\" Registering nodes connected to target node and requesting reciprocation.\")\n for node in connected_nodes:\n response = None\n try:\n response = requests.post(\"http://\" + node + \"/recip/\", json={'port': port})\n if response:\n blockchain.register_node(node)\n except:\n continue\n print(\" Connected established with the following nodes:\")\n for node in blockchain.nodes:\n print(\" {}\".format(node))\n\n initialize_from_source = blockchain.resolve_conflicts()\n # A key feature of using blockchains in an election is that votes cannot be 'mined' after the\n # initial blockchain is set up, though transactions can still be added to blocks with zero value.\n blockchain.value_lock()\n if initialize_from_source:\n print(\"\\n ***Local blockchain has been initialized to match the specified source!***\\n\")\n else:\n print(\"\\n ***Failed to import blockchain from the specified source. \"\n \"Try a different source or maybe just panic?***\")\n quit()\n\n if response.status_code == 204:\n # If the target node was an initialization type node, it is terminated after it passes on a chain.\n blockchain.remove_node(input_source[:-1]) # The [:-1] removes the slash from the end of the source address.\n\n\ndef exit_func():\n print(\"\\n Shutting down node...\")\n for node in blockchain.nodes:\n # Have one of the other nodes resolve the chain, so that if this node has the longest chain,\n # the chain is sent over to a node that is not exiting. This is not strictly necessary,\n # since transactions are shared between nodes as come in, but this should still help keep things clean.\n try:\n response = requests.get(\"http://\" + node + \"/resolve/\")\n if response:\n break\n except:\n continue\n # Tell other nodes to remove this node from their lists of nodes.\n # Not strictly necessary, just less time wasted pinging this address later.\n for node in blockchain.nodes:\n try:\n requests.post(\"http://\" + node + \"/remove/\", json={'port': port})\n except:\n continue\n print(\" Have a nice day.\")\n\n\napp.wsgi_app = ProxyFix(app.wsgi_app)\n\n\nif __name__ == '__main__':\n atexit.register(exit_func)\n parser = ArgumentParser()\n parser.add_argument('-p', '--port', default=5000, type=int, help='port to listen on')\n parser.add_argument('-src', '--source', default=\"http://127.0.0.1:4999/\", type=str,\n help='port to listen on')\n parser.add_argument('-log', '--logging', dest='log_output', action='store_true',\n help=' Add -log to output more verbose logging statements.')\n parser.set_defaults(log_output=False)\n args = parser.parse_args()\n if args.log_output:\n init_logger()\n port = args.port\n source = args.source\n initialize(source)\n # Initialize the app on the desired port:\n app.run(host='0.0.0.0', port=port, threaded=True)\n","sub_path":"vote_manager_node.py","file_name":"vote_manager_node.py","file_ext":"py","file_size_in_byte":13583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"150127998","text":"import sqlite3\nfrom io import BytesIO\nfrom flask import send_file\nfrom random import seed, randint\nimport time\nseed(int(time.time()))\n\ndef getConnection():\n _Connection, x = getCommitalConnection()\n return _Connection\n\ndef getCommitalConnection():\n _DB_FullPath = 'app/webpages.db'\n _Connection = sqlite3.connect(_DB_FullPath)\n _Cursor = _Connection.cursor()\n return _Cursor, _Connection\n\ndef commitChanges(_Connection):\n _Connection.commit()\n\n\n'''\nDB Layout\nKey:\nPID - PageID\nUID - UserID\nTID - Thread ID\nP_ID - Post ID\nDOC - Date of Creation\nIcon - Path to Icon image\n PAGES:PID|Name\n PAGES_DATA:PID|Path|LoginReq|Description|Icon\n PAGES_CONTENT:PID|Content\n PAGES_HISTORY:PID|UID|ChangeDTG|Content\n USERS:UID|Username\n USERS_DATA:UID|PW|Email|Verified|DOC|Admin\n FORUM:TID|ThreadName\n FORUM_THREAD_DATA:TID|Visible|DOC|UID|Content\n FORUM_POSTS:TID|P_ID|UID\n FORUM_POST_DATA:P_ID|UID|DOC|Content|Visible\n'''\n\ndef get_data(table, ID):\n _Cursor = getConnection()\n return _Cursor.execute('SELECT * FROM \"' + table + '\" WHERE ID IS \"' + str(ID) + '\"').fetchall()\n\ndef _get_page(PID):\n _Cursor = getConnection()\n return _Cursor.execute('SELECT Content FROM \"PAGES_CONTENT\" WHERE PID = \"' + str(PID) + '\"').fetchall()[0][0]\n\ndef _get_image(PID):\n _Cursor = getConnection()\n return _Cursor.execute('SELECT Image FROM \"PAGES_CONTENT\" WHERE PID = \"' + str(PID) + '\"').fetchall()[0][0]\n\ndef _get_user(UID, PW):\n # If curosor returns valid user, return true\n return False\n\ndef login(UN, PW):\n _Cursor = getConnection()\n _Cursor.execute('SELECT TOP FROM \"PAGES_CONTENT\" WHERE PID = \"' + str(PID) + '\"').fetchall()\n\ndef pageAccess(UID, PageName, PageType):\n return True\n\ndef _page_exists(PageName, PageType):\n return _get_PID(PageName, PageType) != False\n\ndef _get_PID(PageName, PageType):\n _Cursor = getConnection()\n try:\n PID = _Cursor.execute('SELECT PID FROM \"PAGES\" WHERE NAME = \"' + PageName + '\" AND Type = \"' + PageType + '\"').fetchall()[0][0]\n return PID\n except:\n return False\n return False\n\n\ndef get_page(PageName, PageType):\n _Cursor = getConnection()\n try:\n PID = _get_PID(PageName, PageType)\n if PID == False:\n return _get_page(_get_PID('404', 'html'))\n elif PageType == 'png':\n img = _get_image(PID)\n return send_file(\n BytesIO(img),\n mimetype='image/png'\n )\n else:\n return _get_page(PID)\n except:\n return _get_page(_get_PID('404', 'html'))\n\ndef _getUniquePID():\n _Cursor = getConnection()\n PID = randint(10, 100000)\n exists = (len(_Cursor.execute('SELECT * FROM \"PAGES\" WHERE PID = ' + str(PID)).fetchall()) != 0)\n return PID\n\n\ndef save_page(PageName, PageType, Content):\n _Cursor, _Connection = getCommitalConnection()\n if _page_exists(PageName, PageType):\n PID = _get_PID(PageName, PageType)\n # Add current copy to History\n # Add User ID and junk\n # Save New Contents\n _Cursor.execute('DELETE FROM \"PAGES_CONTENT\" WHERE PID = ' + str(PID))\n _Cursor.execute('INSERT INTO \"PAGES_CONTENT\" (PID, Content) VALUES(' + str(PID) + ', \"' + Content.replace(\"\\\"\", \"\\\"\\\"\") + '\")')\n commitChanges(_Connection)\n else:\n PID = _getUniquePID()\n _Cursor.execute('INSERT INTO \"PAGES\" (PID, Name, Type) VALUES(' + PID + ', \"' + PageName + '\", \"' + PageType + '\")')\n _Cursor.execute('INSERT INTO \"PAGES_CONTENT\" (PID, Content) VALUES(' + PID + ', \"' + Content.replace(\"\\\"\", \"\\\"\\\"\") + '\")')\n commitChanges(_Connection)\n\n ","sub_path":"app/database_access.py","file_name":"database_access.py","file_ext":"py","file_size_in_byte":3688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"221185203","text":"import sys\r\nif len(sys.argv) < 2:\r\n\tprint(sys.argv[0]+' file')\r\n\tsys.exit(1)\r\n\r\nimport os\r\nfilePath = sys.argv[1]\r\nfileName = os.path.basename(filePath)\r\nf = open(filePath, 'r+')\r\nfileContent = f.read()\r\nimport re\r\nf.truncate(0)\r\nf.seek(0)\r\nf.write( re.sub(fileName[:-3]+'Lexer', fileName[:-3] , fileContent) )\r\n","sub_path":"tests/tools/pypatch.py","file_name":"pypatch.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"386491459","text":"import numpy as np\nimport time\nimport logging\nimport cloudpickle\n\nfrom hypersched.tune import ResourceTrainable\nfrom hypersched.function import OptimusFunction\n\nfrom ray import tune\nfrom ray.tune.trial import Resources\n\nlogger = logging.getLogger(__name__)\n\nDEFAULT_SCALING = {1: 1, 2: 2, 4: 4, 8: 8, 16: 16}\n\n\nDEFAULT_CONFIG = {\n \"seed\": None,\n \"delay\": 0.1,\n \"startup_delay\": 0.001,\n \"param1\": 0.1,\n \"param2\": 0.1,\n \"param3\": 0.1,\n \"scaling\": None,\n \"dummy\": False,\n}\n\nDEFAULT_HSPACE = {\n \"param1\": tune.sample_from(lambda spec: np.random.exponential(0.1)),\n \"param2\": tune.sample_from(lambda _: np.random.rand()),\n \"param3\": tune.sample_from(lambda _: np.random.rand()),\n}\n\nDEFAULT_MULTIJOB_CONFIG = {\n # Model setup time can be 20, overall first epoch setup can take up to 100\n \"min_allocation\": 5,\n \"max_allocation\": 500,\n \"time_attr\": \"training_iteration\",\n}\n\n\nclass OptimusTrainable(ResourceTrainable):\n dummy = False\n metric = \"mean_accuracy\"\n\n @classmethod\n def to_atoms(cls, resource):\n return int(resource.cpu)\n\n @classmethod\n def to_resources(cls, atoms):\n return Resources(cpu=atoms, gpu=0)\n\n def _setup(self, config):\n self.iter = 0\n self._next_iteration_start = time.time()\n self._time_so_far = 0\n if config.get(\"dummy\"):\n self.dummy = True\n if config.get(\"seed\"):\n np.random.seed(config[\"seed\"])\n self._delay = config[\"delay\"]\n time.sleep(config.get(\"startup_delay\", 0))\n params = [\n config[\"param1\"],\n config[\"param2\"],\n config[\"param3\"],\n ]\n self._initial_samples_per_step = 500\n self.func = OptimusFunction(\n params=params, scaling=self.config[\"scaling\"]\n )\n\n def _train(self):\n time.sleep(self._delay / self.func.scaling(self.atoms))\n self.iter += 1\n if self.dummy:\n return {\n \"mean_loss\": -self.iter,\n \"mean_accuracy\": self.iter,\n \"samples\": self._initial_samples_per_step,\n }\n new_loss = self.func.step(1, self.iter)\n return {\n \"mean_loss\": float(new_loss),\n \"mean_accuracy\": (2 - new_loss) / 2,\n \"samples\": self._initial_samples_per_step,\n }\n\n def _save(self, checkpoint_dir):\n return {\n \"func\": cloudpickle.dumps(self.func),\n \"seed\": np.random.get_state(),\n }\n\n def _restore(self, checkpoint):\n self.func = cloudpickle.loads(checkpoint[\"func\"])\n np.random.set_state(checkpoint[\"seed\"])\n","sub_path":"hypersched/tune/trainables/toy_trainable.py","file_name":"toy_trainable.py","file_ext":"py","file_size_in_byte":2641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"105021927","text":"import os\nimport sys\nimport pytz\nfrom datetime import datetime, timedelta\n\nCURRENT_DIR = os.path.dirname(__file__)\nsys.path.append(CURRENT_DIR)\n\nfrom utils.db_imports import import_dataset\n\nDATASET_NAME = 'COVID-19 - Vaccinations'\nOUTPUT_PATH = os.path.join(CURRENT_DIR, '../grapher/')\nOUTPUT_CSV_PATH = os.path.join(OUTPUT_PATH, f\"{DATASET_NAME}.csv\")\nZERO_DAY = \"2020-01-21\"\n\ndef update_db():\n time_str = (datetime.now() - timedelta(minutes=10)).astimezone(pytz.timezone('Europe/London')).strftime(\"%-d %B, %H:%M\")\n source_name = f\"Official data collated by Our World in Data – Last updated {time_str} (London time)\"\n import_dataset(\n dataset_name=DATASET_NAME,\n namespace='owid',\n csv_path=OUTPUT_CSV_PATH,\n default_variable_display={\n 'yearIsDay': True,\n 'zeroDay': ZERO_DAY\n },\n source_name=source_name,\n slack_notifications=False\n )\n","sub_path":"scripts/scripts/global_vaccinations.py","file_name":"global_vaccinations.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"226453548","text":"#!/usr/bin/env python3\n\nbin_l = bin(int(input().strip()))\nbin_r = bin(int(input().strip()))\n\nlen_l = len(bin_l) - 2\nlen_r = len(bin_r) - 2\n\nif len_l != len_r:\n len_longer = max(len_l, len_r)\n print(pow(2, len_longer) - 1)\nelse:\n first_diff = 0\n for i in range(len_l):\n if bin_l[i + 2] != bin_r[i + 2]:\n first_diff = len_l - i\n break\n print(pow(2, first_diff) - 1) \n","sub_path":"hacker_rank/algorithm/10.bit_manipulation/02.maximizing_xor.py","file_name":"02.maximizing_xor.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"273897057","text":"import pytest\nimport tempfile\n\nfrom calliope import examples\nfrom calliope.utils import AttrDict\nfrom . import common\nfrom .common import assert_almost_equal, solver, solver_io\n\ndef create_and_run_model(override=\"\"):\n locations = \"\"\"\n locations:\n 1:\n techs: ['ccgt', 'demand_power', 'unmet_demand_power']\n override:\n ccgt:\n constraints:\n e_cap.max: 40\n demand_power:\n constraints:\n r: -10\n 2:\n techs: ['ccgt', 'demand_power', 'unmet_demand_power']\n override:\n ccgt:\n constraints:\n e_cap.max: 30\n costs:\n e_cap: 3000\n demand_power:\n constraints:\n r: -10\n links:\n 1,2:\n hvac:\n constraints:\n e_cap.max: 100\n \"\"\"\n config_run = \"\"\"\n mode: plan\n model: ['{techs}', '{locations}']\n subset_t: ['2005-01-01', '2005-01-01']\n \"\"\"\n override = AttrDict.from_yaml_string(override)\n override.set_key('solver', solver)\n override.set_key('solver_io', solver_io)\n with tempfile.NamedTemporaryFile(delete=False) as f:\n f.write(locations.encode('utf-8'))\n f.read()\n model = common.simple_model(config_run=config_run,\n config_locations=f.name,\n override=override)\n model.run()\n return model\n\nclass TestModel:\n @pytest.fixture(scope='module')\n def model(self, override=\"\"):\n locations = \"\"\"\n locations:\n 1:\n techs: []\n 2:\n techs: ['demand_power']\n override:\n demand_power:\n constraints:\n r: -90\n sub1,sub2:\n within: 1\n techs: ['ccgt']\n override:\n ccgt:\n constraints:\n e_cap.max: 60\n links:\n 1,2:\n hvac:\n constraints:\n e_eff: 0.90\n e_cap.max: 100\n \"\"\"\n config_run = \"\"\"\n mode: plan\n model: ['{techs}', '{locations}']\n subset_t: ['2005-01-01', '2005-01-02']\n \"\"\"\n override = AttrDict.from_yaml_string(override)\n override.set_key('solver', solver)\n override.set_key('solver_io', solver_io)\n with tempfile.NamedTemporaryFile(delete=False) as f:\n f.write(locations.encode('utf-8'))\n f.read()\n model = common.simple_model(config_run=config_run,\n config_locations=f.name,\n override=override)\n model.run()\n return model\n\n def test_model_solves(self, model):\n assert str(model.results.solver.termination_condition) == 'optimal'\n\n def test_model_balanced(self, model):\n sol = model.solution\n assert sol['e'].loc[dict(c='power', y='ccgt')].sum(dim='x').mean() == 100\n assert (sol['e'].loc[dict(c='power', y='hvac:1')].sum(dim='x') ==\n -1 * sol['e'].loc[dict(c='power', y='demand_power')].sum(dim='x')).all()\n\n def test_model_costs(self, model):\n sol = model.solution\n assert_almost_equal(sol['summary'].to_pandas().loc['ccgt', 'levelized_cost_monetary'], 0.1)\n\n def test_one_way(self):\n \"\"\"\n Check that one_way transmission can be forced using 'one_way' in model\n formulation.\n \"\"\"\n override=\"\"\"\n override.links:\n X1,N1:\n heat_pipes:\n constraints:\n one_way: true\n N1,X2:\n heat_pipes:\n constraints:\n one_way: true\n N1,X3:\n heat_pipes:\n constraints:\n one_way: true\n \"\"\"\n model = examples.UrbanScale(override=AttrDict.from_yaml_string(override))\n\n model.run()\n sol = model.solution\n\n # Usual urban scale model has non-zero transmission along each of these\n # links, one_way forces them to zero\n assert_almost_equal(sol.c_con.loc[\n dict(y='heat_pipes:N1', c='heat', x='X2')], 0, 0.1)\n assert_almost_equal(sol.c_con.loc[\n dict(y='heat_pipes:N1', c='heat', x='X3')], 0, 0.1)\n assert_almost_equal(sol.c_con.loc[\n dict(y='heat_pipes:X1', c='heat', x='N1')], 0, 0.1)\n","sub_path":"calliope/test/test_model_transmission.py","file_name":"test_model_transmission.py","file_ext":"py","file_size_in_byte":5043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"214285201","text":"import re\n\nexp = ['([A-Za-z|ää|öö])\\\\1', 'ks', 'ts', 'k(u[aeiouy])', 'k', 'j', '[ä|ö]']\nexp2 = ['\\\\1', 'x', 'z', 'q\\\\1', 'c', 'i', 'e']\n\n\ndef edit_file(teksti, tiedosto2, ofile):\n print(\"Modifying spelling and writing to file\", ofile, \"...\")\n teksti = teksti.lower()\n for i, e in zip(exp, exp2):\n teksti = re.sub(i, e, teksti)\n tiedosto2.write(teksti)\n\n\n\n\n\ndef main():\n while True:\n try:\n ifile = str(input(\"Enter your input file: \"))\n tiedosto1 = open(ifile, \"r\")\n ofile = input(\"Enter your output file: \")\n with tiedosto1 as f:\n teksti = f.read()\n tiedosto2 = open(ofile, \"w\")\n edit_file(teksti, tiedosto2, ofile)\n print(\"Done!\")\n\n tiedosto1.close()\n tiedosto2.close()\n break\n\n\n\n except FileNotFoundError:\n print(\"The file you entered was not found. Please try again.\\n\")\n\n\n\nmain()","sub_path":"assignment_10_1.py","file_name":"assignment_10_1.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"541976834","text":"from django.urls import path\nfrom . import views\n\napp_name = \"main\"\n\n\nurlpatterns = [\n\n\n path(\"register\", views.register_request, name=\"register\"),\n\npath('user_details/', views.user_details, name='user_details'),\npath(\"login\", views.login_request, name=\"login\"),\npath(\"\", views.home,),\n#path(\"homepage\", views.login_request, name=\"homepage\"),\npath('homepage', views.home_view,),\npath('edit', views.update,),\npath('delete', views.destroy,),\npath('user_updated', views.edit_view,),\npath('user_deleted', views.delete_view,),\npath('login_done', views.login_view,),\n\n\n]","sub_path":"HarshAssignments/users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"66968458","text":"import gym\n\nfrom stable_baselines.common.policies import MlpPolicy\nfrom stable_baselines.common.vec_env import DummyVecEnv, VecNormalize\nfrom stable_baselines import PPO2\nfrom gym_unity.envs import UnityEnv\nimport os\nimport psutil\n\ndef main():\n env_id = \"hopper\"\n # env_id = \"walker\"\n if psutil.MACOS:\n env_path = os.path.join('envs', env_id)\n elif psutil.WINDOWS:\n env_path = os.path.join('envs', env_id, 'Unity Environment.exe')\n env = UnityEnv(env_path)\n env = DummyVecEnv([lambda: env]) # The algorithms require a vectorized environment to run\n # Automatically normalize the input features\n # env = VecNormalize(env, norm_obs=True, norm_reward=False,clip_obs=10.)\n env = VecNormalize(env)\n tensorboard_log = os.path.join(\"summaries\", env_id)\n\n model = PPO2(MlpPolicy, env, \n verbose=2, tensorboard_log=tensorboard_log\n )\n model.learn(total_timesteps=1000000)\n os.makedirs('models', exist_ok=True)\n model.save(os.path.join(\"models\", \"walker_ppo2_simple\"))\n\nif __name__ == '__main__':\n main()","sub_path":"train_simple.py","file_name":"train_simple.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"142328001","text":"\nimport openpyxl as op\nimport psycopg2\nfrom psycopg2.extras import DictCursor #чтобы выводился список вместо кортежа\n\n\nclass postgree_connect:\n\n\t''' При создании экземпляра прописыватьимя бд для подключения'''\n\t''' имя DB прописывать строкой '''\n\n\tdef __init__(self, db_name):\n\t\tself.conn = psycopg2.connect(dbname = db_name, user = 'postgres', \n password = '2378951', host = 'localhost')\n\t\tself.cursor = self.conn.cursor(cursor_factory = DictCursor)\n \n\tdef insert(self, table_name, values):\n\t\t''' Для инсерта прописывать имя таблицы строкой'''\n\t\t''' Values - передавать кортежем '''\n\n\t\t#self.cursor.execute(\"INSERT INTO {} VALUES ('ABC', '32213121') \".format(table_name)) \n\t\tself.cursor.execute(\"INSERT INTO {} VALUES {}\".format(table_name, values)) \n\t\tself.conn.commit() #сохраняем изменения в БД\n\n\tdef show_table(self, table_name):\n\t\t''' Для показа всех данных прописывать имя таблицы строкой'''\n\n\t\tself.cursor.execute(\"SELECT * FROM {}\".format(table_name)) \n\t\tfor row in self.cursor:\n\t\t\tprint(row)\n\n\tdef close(self):\t\t\n\t\t''' всегда закрывать каретку и БД'''\n\n\t\tself.cursor.close() #закрываем каретку/курсор\n\t\tself.conn.close() #закрываем БД\n\n\n\tdef __str__(self):\n\t\treturn 'class Object postgree DB'\n\n\n\nclass open_xlsx:\n\n\tdef __init__(self, file_name, sheet_number):\n\t\t''' Путь к файлу передавать строкой '''\n\t\t''' Номер листа для копирования передавать целочисленным'''\n\t\tself.wb = op.load_workbook(filename = file_name)\n\t\tself.sheets = self.wb.sheetnames #загружаем список листов\n\n\t\tself.ws = self.wb[self.sheets[sheet_number-1]] #передаем номер листа\n\t\t''' Здесь обращение происходит по индексу, поэтому -> минус один'''\n\n\t\t\t\t\n\n\tdef take_the_values(self, name_column_end, num_of_line):\n\t\t''' Имя колонки писать строкой, этот аргумент определяет конец считывания '''\n\t\t''' Выводит одну строку под определенным номером '''\n\n\t\tinsert_spisok = []\n\t\tfor column in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', \n\t\t\t\t\t \t\t'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',\n\t\t\t\t\t \t\t'AA', 'AB', 'AC', 'AD', 'AE', 'AF', 'AG' \t\t\t\t ]:\n\t\t\t\t\t\t\n\t\t\tinsert_spisok.append(self.ws[column][num_of_line].value) \n\n\t\t\tif column == name_column_end: # условие отвечающее за выход из цикла, если цикл дошел до нуной нам буквы \n\t\t\t\tbreak\n\n\t\treturn tuple(insert_spisok)\n\n\nop_xl_1 = open_xlsx('d:/Users/A/Desktop/тест.xlsx', 1)\n\na = postgree_connect('test_db')\na.insert('experts.expert_test', op_xl_1.take_the_values('B', 1))\na.show_table('experts.expert_test')\na.close()\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"264667198","text":"import math\n\ndef solution(answers):\n n = len(answers)\n a = [1,2,3,4,5]\n b = [2,1,2,3,2,4,2,5]\n c = [3, 3, 1, 1, 2, 2, 4, 4, 5, 5]\n score = [0,0,0]\n result = []\n \n for i, v in enumerate(answers):\n if v == a[i % len(a)]:\n score[0] += 1\n if v == b[i % len(b)]:\n score[1] += 1\n if v == c[i % len(c)]:\n score[2] += 1\n\n\n for i, v in enumerate(score):\n if v == max(score):\n result.append(i+1)\n \n return result\n\narg = [1,2,3,4,5]\nprint(solution(arg))","sub_path":"programmers/모의고사.py","file_name":"모의고사.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"441418175","text":"# -*- coding: utf-8 -*-\n\n__author__ = 'Sabina Langås'\n__email__ = 'sabinal@nmbu.no'\n\n\ndef letter_freq(txt):\n \"\"\"\n This code counts the frequency of the letters in the string that´s entered by the user of the code\n\n Arguments:\n tx {str}: input for the user of the code, a string\n\n Returns:\n returns a dictionary where key is the characters in txt and the value is the number of occurrences in txt\n \"\"\"\n freq = {}\n txt_lower = txt.lower()\n\n for char in txt_lower:\n if char in freq.keys():\n freq[char] += 1\n else:\n freq[char] = 1\n return freq\n\n\nif __name__ == '__main__':\n text = input('Please enter text to analyse: ')\n frequencies = letter_freq(text)\n frequencies_sorted = dict(sorted(frequencies.items()))\n\n for letter, count in frequencies_sorted.items():\n print('{:3}{:10}'.format(letter, count))\n","sub_path":"src/sabina_langas_ex/ex01/letter_counts.py","file_name":"letter_counts.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"279192775","text":"#!/home/bespontoff/PycharmProjects/checkio/venv/bin/checkio --domain=py run the-territory-of-go\n\n# This is the second mission about theGo game. The first one is theEaten Go Stones.\n# In this mission you'll learn how to count territory in the Go. Pay attention that this mission is simplified compared to the real game and its rules (the tests won't have cases where stones of one color are on the territory of the other player).\n# \n# So what is the territory in the Go game? It’s all unoccupied points which are surrounded by the complete and solid stone border consisting of stones of the same color. It can be a form in the center of the board which creates the border by using just the stones, or it can also be the form positioned near the edge of the board which uses it to complete the boundary. The complete and solid boundary is considered to be the one that consists of the stones connected to each other only vertically and horizontally. Also it should be 'closed'.\n# Look at the picture below. It displays the input data and the conception of the 'territory':\n# \n# \n# \n# Your task is to count the territory that belongs to each player. For this example the answer is: {'B': 13, 'W': 12}.\n# \n# Input:Two-dimensional array (the list of the strings).\n# \n# Output:Dictionary with the amount of each player's territory.\n# \n# Precondition:\n# Board - 9х9, 7x7, 5x5\n# \n# \n# END_DESC\n\ndef territory(board):\n #replace this for solution\n return board\n\nif __name__ == '__main__':\n print(\"Example:\")\n print(territory(['++B++++++',\n '+BB++++++',\n 'BB+++++++',\n '+++++++++',\n '+++++++++',\n '++WWW++++',\n '++W+W++++',\n '++WWW++++',\n '+++++++++']))\n\n #These \"asserts\" using only for self-checking and not necessary for auto-testing\n assert territory(['++B++++++',\n '+BB++++++',\n 'BB+++++++',\n '+++++++++',\n '+++++++++',\n '++WWW++++',\n '++W+W++++',\n '++WWW++++',\n '+++++++++']) == {'B': 3, 'W': 1}\n print(\"Coding complete? Click 'Check' to earn cool rewards!\")","sub_path":"solutions/Codeship/the_territory_of_go.py","file_name":"the_territory_of_go.py","file_ext":"py","file_size_in_byte":2305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"643176344","text":"# Ordenamieno por incercion consite en iterar una sola vez sobre el arreglo e ir insertando el valor en su posicion de tal forma\r\n# que los valores menor a el queden a su izq y los valores mayor a el queden a su derecha lo cual requiere que una vez insertado el dato\r\n# los valores a la derecha se reccoran. ese tipo de ordenamiento que usamos cuando jugabamos cartas\r\n\r\ndef Ordenamiento (list):\r\n for cursor in range (1,len(list)):\r\n for all in range (0,cursor):\r\n if list[all] > list[cursor]:\r\n\r\n tmp = list[cursor]\r\n for r in range (cursor,all,-1):\r\n\r\n list[r] = list[r-1]\r\n list[all] = tmp\r\n\r\n\r\n\r\n\r\n return list\r\n\r\nlist = [7,2,8,3,6,1]\r\na = Ordenamiento(list)\r\nprint(a)\r\n","sub_path":"Est Datos/Examples/ordenamiento por insercion.py","file_name":"ordenamiento por insercion.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"651934184","text":"################################################################\n# Author : yiorgosynkl (find me in Github: https://github.com/yiorgosynkl)\n# Date created : 20200626\n# Problem link : https://leetcode.com/problems/sum-root-to-leaf-numbers/\n################################################################\n\nfrom collections import deque \n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\n\nclass Solution:\n # dfs with stack \n # def sumNumbers(self, root: TreeNode) -> int:\n # if root == None: return 0\n # stack, ans = [(root, 0)], 0\n # while stack:\n # node, val = stack.pop()\n # if node.left == None and node.right == None:\n # ans += val*10 + node.val\n # else:\n # if node.left: stack.append((node.left, val*10 + node.val))\n # if node.right: stack.append((node.right, val*10 + node.val))\n # return ans\n \n # bfs with queue\n # def sumNumbers(self, root: TreeNode) -> int:\n # if root == None: return 0\n # q, ans = deque([(root, 0)]), 0\n # while q:\n # node, val = q.popleft()\n # if node.left == None and node.right == None:\n # ans += val*10 + node.val\n # else:\n # if node.left: q.append((node.left, val*10 + node.val))\n # if node.right: q.append((node.right, val*10 + node.val))\n # return ans\n \n # recursive dfs with chars\n def sumNumbers(self, root: TreeNode) -> int:\n if root == None: return 0\n self.count = 0\n def dfs(node, num):\n num += str(node.val)\n if node.left == None and node.right == None:\n self.count += int( num )\n else:\n if node.left: dfs(node.left, num )\n if node.right: dfs(node.right, num )\n dfs(root, '')\n return self.count\n \n # recursive dfs with nums\n # def sumNumbers(self, root: TreeNode) -> int:\n # self.ans = 0\n # def dfs(node, val):\n # if node:\n # val = val*10 + node.val\n # if not node.left and not node.right:\n # self.ans += val\n # else:\n # dfs(node.left, val)\n # dfs(node.right, val)\n # dfs(root, 0)\n # return self.ans\n \n","sub_path":"30_day_challenge_2020_June/129_sum_root_to_leaf_numbers_day.py","file_name":"129_sum_root_to_leaf_numbers_day.py","file_ext":"py","file_size_in_byte":2531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"392700150","text":"\"\"\"\r\nPopup Eintrag Bearbeiten für pystunden.\r\n\"\"\"\r\nfrom tkinter import *\r\nfrom tkinter.ttk import *\r\nfrom tkinter.simpledialog import Dialog\r\nimport os\r\n\r\n\r\nclass EintraegeBearbeiten(Dialog):\r\n \"\"\"\r\n Popupfenster Eintrag Bearbeiten.\r\n \"\"\"\r\n def __init__(self, t_ids_bezahlt, master=None):\r\n # Variablen zum Populieren der Radio Buttons\r\n self.t_ids_bezahlt = t_ids_bezahlt\r\n # Setzt result None\r\n self.result = None\r\n \r\n self.master = master\r\n if os.name == 'nt':\r\n self.master.iconbitmap(\"pystunden.ico\")\r\n self.master.title(\"Einträge bearbeiten\")\r\n \r\n self.l6 = Label(self.master, text=\"Bezahlt: *\", width=20, anchor=E)\r\n self.l6.grid(row=5, sticky=W+E, padx=5, pady=5)\r\n \r\n self.radio_buttons = Label(self.master)\r\n self.radio_buttons.grid(row=5, column=1, padx=5, pady=5, sticky=W)\r\n \r\n self.bezahlt = StringVar(self.master)\r\n \r\n self.bezahlt_ja = Radiobutton(self.radio_buttons, text=\"Ja\", \r\n variable=self.bezahlt, value=str(\"Ja\"))\r\n self.bezahlt_ja.grid(row=0, column=0, padx=5, pady=5, sticky=W)\r\n self.bezahlt_nein = Radiobutton(self.radio_buttons, text=\"Nein\", \r\n variable=self.bezahlt, \r\n value=str(\"Nein\"))\r\n self.bezahlt_nein.grid(row=0, column=1, padx=5, pady=5, sticky=W) \r\n \r\n self.button = Button(self.master, text=\"Speichern\", width=20, \r\n command=self.save)\r\n self.button.grid(row=6, column=0, padx=5, pady=5) \r\n \r\n self.button = Button(self.master, text=\"Beenden\", width=20,\r\n command=self.master.destroy)\r\n self.button.grid(row=6, column=1, sticky=E, padx=5, pady=5)\r\n \r\n # Populiert die Radio Buttons. Hier wird ein set verwendet, um den Ver-\r\n # gleich der \"bezahlt\" variablen einfach zu machen.\r\n bezahlt_set = set()\r\n for id, bezahlt in self.t_ids_bezahlt:\r\n bezahlt_set.add(bezahlt)\r\n \r\n if len(bezahlt_set) == 1:\r\n for status in bezahlt_set:\r\n self.bezahlt.set(status)\r\n else:\r\n pass\r\n \r\n # Bindings\r\n self.master.bind(\"\", self.save)\r\n # Verhindert Größenänderungen\r\n self.master.update()\r\n self.master.minsize(self.master.winfo_width(), \r\n self.master.winfo_height())\r\n self.master.maxsize(self.master.winfo_width(), \r\n self.master.winfo_height())\r\n\r\n def save(self, event=None):\r\n \"\"\"\r\n Setzt die Variable result als Ergebnis.\r\n \"\"\"\r\n bezahlt = self.bezahlt.get()\r\n \r\n if bezahlt:\r\n result = []\r\n for id, status in self.t_ids_bezahlt:\r\n result.append((bezahlt, id))\r\n \r\n self.result = result\r\n\r\n self.master.destroy()\r\n \r\n","sub_path":"popup_bearbeiten_bezahlt.py","file_name":"popup_bearbeiten_bezahlt.py","file_ext":"py","file_size_in_byte":3094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"2515824","text":"import sh\n\nfrom django.db import models\n\nfrom api_ipf.settings import CONF_DIR, LOG_DIR\n\n\nclass ConfigFile(models.Model):\n \"\"\"\n Model that stores a serialized configuration of a IPFilter. Configuration\n file is defined by unique title, form, path where is stored, creation and\n modification time. Allowed forms are ipf, ipf6, ipnat and ippool.\n \"\"\"\n title = models.CharField(max_length=100, primary_key=True)\n form = models.CharField(max_length=10, default='ipf')\n directory = models.FileField(upload_to=CONF_DIR, blank=True)\n created = models.DateTimeField(auto_now_add=True)\n modified = models.DateTimeField(auto_now=True)\n\n def get_form(self):\n \"\"\"\n Method that returns form of a configuration file for processes of\n activation and check configuration file at its upload.\n\n :return: file's form\n \"\"\"\n return self.form\n\n\nclass LogFile(models.Model):\n \"\"\"\n Model that stores a serialized log of a IPFilter log management. Log file\n is defined by unique title, path where is stored and creation time.\n \"\"\"\n title = models.CharField(max_length=100, primary_key=True)\n directory = models.FileField(null=True)\n created = models.DateTimeField(auto_now_add=True)\n\n def save(self, *args, **kwargs):\n \"\"\"\n Method that overrides basic save method. Log is created in a specific\n location defined in LOG_CONF and logging is redirect to the log by\n command ipmon -aD.\n\n :return: path to the log\n \"\"\"\n path = ''.join([LOG_DIR, self.title, '.log'])\n open(path, 'a').close()\n self.log = path\n sh.ipmon('-aD', path)\n super(LogFile, self).save(*args, **kwargs)\n return self.directory","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"389481313","text":"import pandas as pd\nfrom sklearn.model_selection import train_test_split, GridSearchCV\nimport numpy as np\nfrom sklearn.svm import SVC\nfrom sklearn.ensemble import AdaBoostClassifier, RandomForestClassifier\nfrom sklearn.cluster import KMeans\nfrom sklearn.manifold import TSNE\nfrom matplotlib import pyplot as plt\nimport json\nfrom scipy import stats\nfrom scipy.stats import ttest_1samp\nimport os\nfrom utils import makedir\n\ndef get_word_vectors(embed, words, labels=None):\n X_words = []\n X_emb = []\n Y_emb = []\n\n for i, x in enumerate(words):\n if x in embed:\n X_emb.append(embed[x])\n X_words.append(x)\n if labels is not None:\n Y_emb.append(labels[i])\n X_emb = np.array(X_emb)\n Y_emb = np.array(Y_emb).ravel()\n\n if labels is None:\n return X_words, X_emb\n else:\n return X_words, X_emb, Y_emb\n\ndef clustering(embed, save_dir, embed_name):\n data = pd.read_csv(\"data/stereotype_list.csv\")\n X = data[\"male\"].values.tolist() + data[\"female\"].values.tolist()\n Y = [1] * len(data) + [0] * len(data)\n\n X_words, X_emb, Y_emb = get_word_vectors(embed, X, Y)\n\n X_embedded = TSNE(n_components=2, random_state=1).fit_transform(X_emb)\n X_male = X_embedded[Y_emb == 1]\n X_female = X_embedded[Y_emb == 0]\n plt.figure()\n plt.scatter(X_male[:, 0], X_male[:, 1], label=\"male-biased\", color='b', s=10, alpha=.55)\n plt.scatter(X_female[:, 0], X_female[:, 1], label=\"female-biased\", color='C1', s=10, alpha=.55)\n plt.legend()\n plt.savefig(makedir([save_dir, \"clustering\"], \"{}_plot.png\".format(embed_name)))\n\n kmeans = KMeans(n_clusters=2, random_state=0).fit(X_emb)\n score = (kmeans.labels_ == Y_emb).mean()\n score = max(score, 1- score)\n\n score = pd.DataFrame([[score]], columns=[\"score\"])\n score.to_csv(makedir([save_dir, \"clustering\"], \"{}_acc.csv\".format(embed_name)), index=False)\n return score\n\ndef gender_classification(embed, ml_model, save_dir, embed_name):\n data = pd.read_csv(\"data/GenderWords.csv\")\n X = data[\"word\"].values\n Y = data[\"label\"].values\n\n X_words, X_emb, Y_emb = get_word_vectors(embed, X, Y)\n X_train, X_test, y_train, y_test = train_test_split(X_emb, Y_emb, test_size=0.2, random_state=1)\n\n param_dict = {\n \"svc\": {'kernel':('linear', 'rbf'), 'C':[1, 10, 100]},\n \"rf\": {\"max_depth\": [1, 25, 50, 75, 100]},\n \"boost\": {\"learning_rate\": [1e-7, 1e-5, 1e-3, 1, 10]}\n }\n model_dict = {\n \"svc\": SVC(),\n \"rf\": RandomForestClassifier(n_estimators=100),\n \"boost\": AdaBoostClassifier(n_estimators=100)\n }\n\n model = model_dict[ml_model]\n parameters = param_dict[ml_model]\n clf = GridSearchCV(model, parameters)\n clf.fit(X_train, y_train)\n score = clf.score(X_test, y_test)\n\n score = pd.DataFrame([[score]], columns=[\"score\"])\n score.to_csv(makedir([save_dir, \"gender_classification\"], \"{}_acc.csv\".format(embed_name)), index=False)\n return score\n\ndef cosine(x, y):\n sim = x.dot(y) / (np.linalg.norm(x) * np.linalg.norm(y))\n return sim\n\ndef projection(embed, save_dir, embed_name):\n data = pd.read_csv(\"data/stereotype_list.csv\")\n X = data[\"male\"].values.tolist() + data[\"female\"].values.tolist()\n X_words, X_emb = get_word_vectors(embed, X)\n\n gender_direction = embed[\"he\"] - embed[\"she\"]\n gender_direction = gender_direction\n\n project = []\n for x in X_emb:\n sim = cosine(x, gender_direction)\n project.append(sim)\n project = np.array(project)\n avg_project = np.abs(project).mean()\n\n orders = np.argsort(project)\n\n plt.figure()\n plt.scatter(project, range(len(project)), s=10)\n plt.yticks([])\n plt.xlim([-0.5, 0.5])\n plt.xlabel(\"Similarity\")\n \n\n for i in range(5):\n plt.text(project[orders[i]], orders[i]+0.2, X_words[orders[i]])\n plt.text(project[orders[-(i+1)]], orders[-(i+1)]+0.2, X_words[orders[-(i+1)]])\n plt.savefig(makedir([save_dir, \"projection\"], \"{}_plot.png\".format(embed_name)))\n\n score = pd.DataFrame([[avg_project]], columns=[\"score\"])\n score.to_csv(makedir([save_dir, \"projection\"], \"{}_acc.csv\".format(embed_name)), index=False)\n return avg_project\n\ndef weat(embed, save_dir, embed_name):\n def association(w, M, F):\n s = 0\n for m in M:\n s += cosine(w, m) / len(M)\n for f in F:\n s -= cosine(w, f) / len(F)\n return s\n\n def S(X, Y, M, F):\n s = 0\n for x in X:\n s+= association(x, M, F)\n for y in Y:\n s-= association(y, M, F)\n return s\n\n def test(X, Y, M, F):\n s0 = S(X, Y, M, F)\n np.random.seed(1)\n U = np.vstack([X, Y])\n s_hat = []\n for i in range(10000):\n idx = np.random.permutation(len(U))\n X_hat = U[idx[:len(X)]]\n Y_hat = U[idx[len(X):]]\n si = S(X_hat, Y_hat, M, F)\n s_hat.append(si)\n\n s_hat = np.array(s_hat)\n\n pvalue = (s_hat > s0).mean()\n return pvalue\n\n with open(\"data/weat.json\") as f:\n data = json.load(f)\n\n vectors = {}\n for name, words in data.items():\n _, vectors[name] = get_word_vectors(embed, words)\n\n M = vectors[\"M\"]\n F = vectors[\"F\"]\n\n X = vectors[\"B1_X\"]\n Y = vectors[\"B1_Y\"]\n pvalues = test(X, Y, M, F)\n\n score = pd.DataFrame([pvalues], columns=[\"score\"])\n score.to_csv(makedir([save_dir, \"weat\"], \"{}_score.csv\".format(embed_name)), index=False)\n return pvalues\n\ndef analogy(embed, save_dir, embed_name):\n bias_analogy_f = open(\"data/Sembias\")\n\n definition_num = 0\n none_num = 0\n stereotype_num = 0\n total_num = 0\n sub_definition_num = 0\n sub_none_num = 0\n sub_stereotype_num = 0\n sub_size = 40\n\n sub_start = -(sub_size - sum(1 for line in open(\"data/Sembias\")))\n\n gender_v = embed['he'] - embed['she']\n for sub_idx, l in enumerate(bias_analogy_f):\n l = l.strip().split()\n max_score = -100\n for i, word_pair in enumerate(l):\n word_pair = word_pair.split(':')\n if word_pair[0] not in embed or word_pair[1] not in embed:\n continue\n pre_v = embed[word_pair[0]] - embed[word_pair[1]]\n score = cosine(gender_v, pre_v)\n if score > max_score:\n max_idx = i\n max_score = score\n if max_idx == 0:\n definition_num += 1\n if sub_idx >= sub_start:\n sub_definition_num += 1\n elif max_idx == 1 or max_idx == 2:\n none_num += 1\n if sub_idx >= sub_start:\n sub_none_num += 1\n elif max_idx == 3:\n stereotype_num += 1\n if sub_idx >= sub_start:\n sub_stereotype_num += 1\n total_num += 1\n\n definition_acc = definition_num / total_num\n stereotype_acc = stereotype_num / total_num\n none_acc = none_num / total_num\n\n score = pd.DataFrame([[definition_acc, stereotype_acc, none_acc]], columns=[\"definition_acc\", \"stereotype_acc\", \"none_acc\"])\n score.to_csv(makedir([save_dir, \"analogy\"], \"{}_score.csv\".format(embed_name)), index=False)\n\n return definition_acc, stereotype_acc, none_acc\n","sub_path":"evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":7200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"166242120","text":"import pickle\nimport tensorflow as tf\nimport numpy as np\nimport tf_util\nimport gym\nimport load_policy\n\ndef main():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('expert_policy_file', type=str)\n parser.add_argument('envname', type=str)\n parser.add_argument('--write_policy_to_file', type=str, default='')\n parser.add_argument('--training_steps', type=int, default=30000)\n args = parser.parse_args()\n\n expert_policy = load_policy.load_policy(args.expert_policy_file)\n\n import gym\n env = gym.make(args.envname)\n max_steps = env.spec.timestep_limit\n\n print('Running expert policy')\n expert_obs, expert_actions = run_expert_policy(expert_policy, env, max_steps, 3)\n\n expert_data = {\n 'observations': np.array(expert_obs),\n 'actions': np.array(expert_actions)\n }\n\n print('number of data points in data set:', len(expert_obs))\n\n training_set, test_set = split_data(expert_data, 0.8)\n print('training_set size:', training_set['observations'].shape[0])\n print('test_set size:', test_set['observations'].shape[0])\n\n training_set_size = training_set['observations'].shape[0]\n\n input_vector_size = training_set['observations'].shape[1]\n output_vector_size = training_set['actions'].shape[1]\n print('input', input_vector_size)\n print('output', output_vector_size)\n n_hidden_1 = 50\n n_hidden_2 = 50\n batch_size = 500\n training_steps = args.training_steps\n collect_data_step = 500\n display_step = 1000\n learning_rate = 0.01\n\n weights = {\n 'h1': tf.Variable(tf.random_normal([input_vector_size, n_hidden_1]), name='h1'),\n 'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2]), name='h2'),\n 'out': tf.Variable(tf.random_normal([n_hidden_2, output_vector_size]), name='out')\n }\n biases = {\n 'b1': tf.Variable(tf.random_normal([n_hidden_1]), name='b1'),\n 'b2': tf.Variable(tf.random_normal([n_hidden_2]), name='b2'),\n 'out': tf.Variable(tf.random_normal([output_vector_size]), name='out')\n }\n\n x = tf.placeholder(tf.float32, [None, input_vector_size], name='input')\n\n y = neural_net(x, weights, biases)\n y_ = tf.placeholder(tf.float32, [None, output_vector_size], name='actual_output')\n\n mse = tf.reduce_sum(tf.pow(y-y_, 2))/(2*training_set_size)\n # cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))\n\n train_step = tf.train.AdamOptimizer(learning_rate).minimize(mse)\n\n sess = tf.InteractiveSession()\n tf.global_variables_initializer().run()\n\n saver = tf.train.Saver()\n observations = training_set['observations']\n actions = training_set['actions']\n\n print('observations size', observations.shape[0])\n print('actions size', actions.shape[0])\n for step in range(training_steps):\n indices = np.random.choice(len(observations), batch_size, replace=False)\n batch_xs = observations[indices]\n batch_ys = actions[indices]\n sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})\n if (step + 1) % collect_data_step == 0:\n new_observations, new_actions = run_policy(sess, x, y, expert_policy, env, max_steps, 1)\n # amount = len(new_observations)\n # observations = observations[amount:]\n # actions = actions[amount:]\n observations = np.append(observations, np.array(new_observations), axis=0)\n actions = np.append(actions, np.array(new_actions), axis=0)\n if (step + 1) % display_step == 0:\n cost = sess.run(mse, feed_dict={x: test_set['observations'], y_: test_set['actions']})\n print(\"Step:\", '%04d' % (step + 1), \"cost=\", \"{:.9f}\".format(cost))\n\n\n print(\"Optimization Finished!\")\n cost = sess.run(mse, feed_dict={x: test_set['observations'], y_: test_set['actions']})\n\n # print(sess.run(y, feed_dict={x: test_set['observations'][0:1]}))\n # print(\"h1\", sess.run(weights['h1']))\n print(\"cost=\", \"{:.9f}\".format(cost))\n if args.write_policy_to_file:\n print('writing policy file', args.write_policy_to_file)\n saver.save(sess, args.write_policy_to_file)\n\ndef run_expert_policy(expert_policy, env, max_steps, iterations):\n with tf.Session():\n tf_util.initialize()\n observations = []\n actions = []\n for i in range(iterations):\n obs = env.reset()\n done = False\n totalr = 0.\n steps = 0\n while not done:\n action = expert_policy(obs[None,:])\n observations.append(obs)\n actions.append(action.flatten())\n obs, r, done, _ = env.step(action)\n totalr += r\n steps += 1\n #if steps % 100 == 0: print(\"%i/%i\" % (steps, max_steps))\n if steps >= max_steps:\n break\n return observations, actions\n\n\ndef run_policy(sess, x, trained_policy, expert_policy, env, max_steps, iterations):\n observations = []\n actions = []\n for i in range(iterations):\n obs = env.reset()\n done = False\n totalr = 0.\n steps = 0\n while not done:\n action = sess.run(trained_policy, feed_dict={x: [obs]})\n observations.append(obs)\n expert_action = expert_policy(obs[None,:])\n actions.append(expert_action.flatten())\n obs, r, done, _ = env.step(action)\n totalr += r\n steps += 1\n #if steps % 100 == 0: print(\"%i/%i\" % (steps, max_steps))\n if steps >= max_steps:\n break\n print(\"Steps: %i/%i\" % (steps, max_steps))\n return observations, actions\n\ndef neural_net(x, weights, biases):\n # Hidden fully connected layer\n layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])\n layer_1 = tf.nn.elu(layer_1)\n # Hidden fully connected layer\n layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])\n layer_2 = tf.nn.elu(layer_2)\n # Output fully connected layer with a neuron for each class\n out_layer = tf.matmul(layer_2, weights['out']) + biases['out']\n out_layer = tf.identity(out_layer, name=\"neural_net\")\n return out_layer\n\n\ndef split_data(data_set, test_data_perc):\n test_data_end_index = round(test_data_perc * data_set['observations'].shape[0])\n test_set = {\n 'observations': data_set['observations'][:test_data_end_index],\n 'actions': data_set['actions'][:test_data_end_index]\n }\n validation_set = {\n 'observations': data_set['observations'][test_data_end_index:],\n 'actions': data_set['actions'][test_data_end_index:]\n }\n return test_set, validation_set\n\n\ndef read_expert_data(filename):\n with open(filename, 'rb') as f:\n expert_data = pickle.loads(f.read())\n expert_data['actions'] = get_clean_actions(expert_data)\n return expert_data\n\ndef get_clean_actions(expert_data):\n actions_raw = expert_data['actions']\n actions = np.empty((actions_raw.shape[0], actions_raw.shape[2]))\n for i in range (0, actions_raw.shape[0]):\n actions[i] = actions_raw[i].flatten()\n return actions\n\nif __name__ == '__main__':\n main()","sub_path":"hw1/dagger.py","file_name":"dagger.py","file_ext":"py","file_size_in_byte":7197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"3829109","text":"i = 0\npositivos = 0\nnegativos = 0\nneutros = 0\n\nwhile i < 20:\n numero = int(input(\"Numero \" + str(i + 1) + \":\"))\n if numero >= 1:\n positivos += 1\n elif numero <= -1:\n negativos += 1\n else:\n neutros += 1\n i += 1\n\nprint(\"Positivos: \" + str(positivos))\nprint(\"Negativos: \" + str(negativos))\nprint(\"Neutros: \" + str(neutros))\n","sub_path":"ejercicio3.py","file_name":"ejercicio3.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"574034983","text":"from django.conf import settings\nfrom django.contrib.auth.base_user import AbstractBaseUser, BaseUserManager\nfrom django.db import models\nfrom multiselectfield import MultiSelectField\nfrom .school_datas import college_list\nfrom django.dispatch import receiver\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom profiles.models import Student, Teacher\nfrom comments.models import Comment\nfrom posts.models import Post\nfrom .choices import COLLEGES, INTERESTS_CHOICES, RANKING_CHOICES\n\n\nclass UserManager(BaseUserManager):\n #custom create_user method\n def create_user(self, email, password=None, is_student=True):\n if not email:\n raise ValueError('Users must have an email address')\n if is_student == None:\n raise ValueError('You must provide a status')\n user = self.model(\n email = self.normalize_email(email),\n )\n user.set_password(password)\n user.is_student = is_student\n user.save(using=self._db)\n print(user)\n return user\n\n #Custom create_super_user method\n def create_superuser(self, email, password=None, is_student=False):\n user = self.create_user(\n email = email,\n password = password,\n )\n user.is_student = is_student\n user.admin = True\n user.is_superuser = True\n user.is_staff = True\n user.save(using=self._db)\n return user\n\n\nclass User(AbstractBaseUser):\n school = MultiSelectField(\n max_length = 400,\n choices = COLLEGES,\n max_choices=3,\n blank=True\n )\n interests = MultiSelectField(\n max_length = 7,\n choices = INTERESTS_CHOICES,\n blank=True,\n )\n\n email = models.EmailField(\n max_length=50,\n unique=True,\n blank=False,\n null=False\n )\n followers = models.ManyToManyField(\n \"User\",\n related_name = \"followers_list\",\n blank=True,\n )\n following = models.ManyToManyField(\n \"User\",\n related_name = \"following_list\",\n blank=True\n )\n date_joined = models.DateTimeField(auto_now_add=True)\n first_name = models.CharField(max_length=50)\n last_name = models.CharField(max_length=70)\n birth_date = models.DateField(null=True, blank=True)\n reputation = models.PositiveIntegerField(default=0)\n is_active = models.BooleanField(default=True)\n rank = models.CharField(choices=RANKING_CHOICES, max_length=5, default=\"basic\")\n is_staff = models.BooleanField(default=False)\n admin = models.BooleanField(default=False)\n is_superuser = models.BooleanField(default=False)\n bio = models.TextField(\n max_length=300,\n default=\"default Bio\",\n blank=True\n )\n is_student = models.BooleanField(blank=True, null=True)\n objects = UserManager()\n #Setting email to be the main source of authentication\n USERNAME_FIELD = 'email'\n\n #Super User Only\n REQUIRED_FIELDS = ['password']\n\n #def get_absolute_url(self):\n #use reverse + nom de l'url de view\n\n def __str__(self):\n return str(self.email)\n\n def get_full_name(self):\n return f\"{self.first_name} {self.last_name}\"\n\n def get_short_name(self):\n return self.first_name\n\n def set_user_league(self):\n if 15 <= self.reputation < 40:\n self.rank = \"gold\"\n elif 40 <= self.reputation < 80:\n self.rank = \"platinium\"\n else:\n self.rank = \"diamond\"\n\n @property\n def is_admin(self):\n print(f\" is {self.email} admin ? \")\n return self.admin\n\n @property\n def status(self):\n if self.is_student == None:\n \"visitor\"\n elif self.is_student == True:\n return \"Student\"\n else:\n return \"Teacher\"\n\n def has_perm(self, obj=None):\n return True\n\n def has_module_perms(self, obj=None):\n return True\n\n def get_sent_friendrequest(self):\n fqs = FriendRequest.objects.filter(from_user=self.id)\n return fqs\n\n def get_friendrequest(self):\n fqs = FriendRequest.objects.filter(to_user=self.id)\n return fqs\n\n def get_user_comments(self):\n qs = Comment.objects.filter(user=self)\n return qs\n\n def get_user_posts(self):\n qs = Post.objects.filter(user=self)\n return qs\n\n\n@receiver(post_save, sender = User)\ndef create_profile(sender, instance, created, **kwargs):\n if created:\n if instance.is_student is not None:\n if instance.is_student:\n student = Student.objects.create(user=instance)\n #Keeping track of user_profile changes\n print(\"creation of instance STUDENT\")\n elif instance.is_student:\n teacher = Teacher.objects.create(user=instance)\n #Keeping track of user_profile changes\n print(\"Creation of instance Teacher ! \")\n\n#change user profile on update\n@receiver(post_save, sender = User)\ndef update_profile(sender, instance, created, **kwargs):\n #Check if the user has already been created\n if created == False:\n if instance.is_student is not None:\n #Check the new value of the is_student attribute\n if instance.is_student:\n #Check if the user was previously a Teacher, if yes, delete the actual Teacher instance\n prev_teacher = eacher.objects.filter(user=instance).first()\n if prev_teacher is not None:\n prev_teacher.delete()\n print(\"deleting the previous profile : Teahcer\")\n student = Student.objects.create(user = instance)\n student.save()\n #Do the exact same thing if is_student is False, but delete the existing Student instance\n elif instance.is_student == False:\n prev_student = Student.objects.filter(user=instance).first()\n if prev_student is not None:\n prev_student.delete()\n print(\"deleting the previous profile : Student\")\n teacher = Teacher.objects.create(user = instance)\n teacher.save()\n else:\n print(\"Created\")\n\n#FriendRequest Model\nclass FriendRequest(models.Model):\n from_user = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n on_delete = models.CASCADE,\n related_name = \"from_user\"\n )\n to_user = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n on_delete = models.CASCADE,\n related_name = \"to_user\"\n )\n datestamp = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return f\"Request sent frm {self.from_user} to {self.to_user}\"\n\n\n\n#Enable each user to add experiences\nclass Experience(models.Model):\n user = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n on_delete = models.CASCADE,\n )\n date = models.DateField()\n content = models.TextField(max_length=300, blank=True, null=True)\n school = MultiSelectField(\n max_length = 400,\n choices = COLLEGES,\n max_choices=3,\n blank=True\n )\n #Replace Company name with auto fill\n company = models.CharField(max_length=300, blank=True, null=True)\n\n def __str__(self):\n if self.company:\n return f\"{self.user}a eu une expérience chez {self.company}\"\n else:\n return f\"{self.user}a eu une expérience chez {self.school}\"\n","sub_path":"schoolprojectv2/users/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":7473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"525310173","text":"n = int(input('Enter a number greater than 1:'))\ni = 0\n\nwhile n!=1:\n if n%2 == 0:\n n /= 2\n i = i+1\n else:\n n = n*3 + 1\n i = i+1\n\nprint('Number of steps:',i)\n","sub_path":"collatz.py","file_name":"collatz.py","file_ext":"py","file_size_in_byte":191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"287774162","text":"from queue import Queue\nfrom common.safe_socket import SafeSocket\nfrom server.server import Server\nfrom server.udp_thread_socket import UDPThreadSocket\n\nclass UDPServer(Server):\n SOCKET_TYPE = SafeSocket.UDP\n\n def __init__(self, server_addr, storage_path, operations_chain):\n super(UDPServer, self).__init__(server_addr, storage_path, operations_chain)\n self.queues = {}\n\n def start(self):\n print(f'\\nUDPServer started\\nAddress: {self.server_addr}\\nStorageDir: {self.storage_path})')\n\n while True:\n data, addr = self.sock.recv(None)\n print(f'Recived from -> address: {addr}')\n addr_socket = self.queues.get(addr, None)\n if not addr_socket:\n addr_queue = Queue()\n conn = self.sock.accept(addr)\n addr_socket = UDPThreadSocket(conn, addr_queue, self.queues, self.sock)\n self.queues[addr] = addr_socket\n cli_req = data.decode()\n threaded_op = self.operations_chain.delegate(addr_socket, addr, self.storage_path, cli_req)\n threaded_op and self.threads.append(threaded_op)\n else:\n addr_socket.put(data)\n","sub_path":"server/udp_server.py","file_name":"udp_server.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"582562421","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\n\nclass SomeNet(nn.Module):\n '''\n In : (N, sentence_len)\n Out: (N, sentence_len, embd_size)\n '''\n def __init__(self,\n seq_len,\n vocab_size,\n embd_size,\n n_layers,\n kernel,\n out_chs,\n res_block_count,\n ans_size):\n \n super(SomeNet, self).__init__()\n self.res_block_count = res_block_count\n # self.embd_size = embd_size\n\n self.word_embeddings = nn.Embedding(vocab_size, embd_size)\n \n self.dropout = 0.5\n self.hidden_dim = 128\n self.num_layers = 2\n\n self.lstm = nn.LSTM(embd_size, self.hidden_dim // 2 , n_layers, dropout=self.dropout, bidirectional=True)\n\n self.output = nn.Linear(self.hidden_dim, ans_size)\n\n # # nn.Conv2d(in_channels, out_channels, kernel_size, stride=1, padding=0, ...\n # self.conv_0 = nn.Conv2d(1, out_chs, kernel_size=kernel, padding=(2, 0))\n # self.b_0 = nn.Parameter(torch.randn(1, out_chs, 1, 1))\n # self.conv_gate_0 = nn.Conv2d(1, out_chs, kernel_size=kernel, padding=(2, 0))\n # self.c_0 = nn.Parameter(torch.randn(1, out_chs, 1, 1))\n\n # self.conv = nn.ModuleList([nn.Conv2d(out_chs, out_chs, (kernel[0], 1), padding=(2, 0)) for _ in range(n_layers)])\n # self.conv_gate = nn.ModuleList([nn.Conv2d(out_chs, out_chs, (kernel[0], 1), padding=(2, 0)) for _ in range(n_layers)])\n # self.b = nn.ParameterList([nn.Parameter(torch.randn(1, out_chs, 1, 1)) for _ in range(n_layers)])\n # self.c = nn.ParameterList([nn.Parameter(torch.randn(1, out_chs, 1, 1)) for _ in range(n_layers)])\n\n # self.fc = nn.Linear(out_chs*seq_len, ans_size)\n def attention(self, out, state):\n\n \"\"\"\n Use attention to compute soft alignment score between each hidden state and the last hidden state (torch.bmm: batch matrix multiplication)\n \"\"\"\n\n hidden = state.squeeze(0)\n attn_weights = torch.bmm(out, hidden.unsqueeze(2)).squeeze(2)\n soft_attn_weights = F.softmax(attn_weights, 1)\n new_hidden = torch.bmm(out.transpose(1, 2), soft_attn_weights.unsqueeze(2)).squeeze(2)\n\n return new_hidden\n \n def forward(self, X):\n # x: (N, seq_len)\n # print(X.size())\n\n #Word embeddings\n embedded = self.word_embeddings(X)\n embedded = embedded.permute(1,0,2)\n\n #Batch size\n batch_size = X.size(0)\n\n #Initial hidden state\n h0 = Variable(torch.zeros(2*self.num_layers*5, batch_size, self.hidden_dim // 2)).cuda()\n c0 = Variable(torch.zeros(2*self.num_layers*5, batch_size, self.hidden_dim // 2)).cuda()\n \n #print(h0.size(), c0.size())\n\n #Forward state\n output, (hidden_state, cell_state) = self.lstm(embedded, (h0, c0))\n\n x = self.output(output[-1])\n\n return x\n","sub_path":"labs/lab3/model_2.py","file_name":"model_2.py","file_ext":"py","file_size_in_byte":3018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"415095616","text":"# Copyright (c) 2018 Frederik Beaujean\n# Copyright (c) 2016, 2017, 2018 Danny van Dyk\n#\n# This file is part of the EOS project. EOS is free software;\n# you can redistribute it and/or modify it under the terms of the GNU General\n# Public License version 2, as published by the Free Software Foundation.\n#\n# EOS is distributed in the hope that it will be useful, but WITHOUT ANY\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 59 Temple\n# Place, Suite 330, Boston, MA 02111-1307 USA\n\nimport eos\nimport h5py\nimport numpy\nimport os\nimport sys\n\nclass FileFormatError(Exception):\n def __init__(self, expected, found):\n self.expected = expected\n self.found = found\n\n def __str__(self):\n return 'Expected file format %s, found %s instead' % (self.expected, self.found)\n\n\nclass HDF5DataFile:\n def __init__(self):\n # generate map\n self.variable_indices = { self.parameters[i][0].decode('ascii'): i for i in range(0, len(self.parameters)) }\n\n\nclass PMCDataFile(HDF5DataFile):\n def __init__(self, file):\n # open the input file for reading\n self.file = h5py.File(file, 'r')\n\n # check that the input file has format=PMC\n if 'format' not in self.file.attrs:\n eos.warn('input file does not have attribute \\'format\\'; assuming format \\'PMC\\'')\n elif 'PMC' != self.file.attrs['format']:\n raise FileFormatError('PMC', self.file.attrs['format'])\n\n # extract parameter descriptions of the n-tuples\n self.parameters = None\n if '/descriptions/parameters' in self.file:\n self.parameters = self.file['/descriptions/parameters']\n else:\n RuntimeError('input file has no valid parameter descriptions: is it corrupted?')\n\n super().__init__()\n\n\n def __del__(self):\n self.file.close()\n\n\n \"\"\"Retrieve data\"\"\"\n def data(self):\n step = 'final'\n\n if '/data/%s/samples' % step not in self.file:\n RuntimeError('input file does not contain stored data for step %s' % step)\n\n dataset = self.file['/data/%s/samples' % step]\n\n return numpy.array(dataset[:])\n\n\nclass MCMCDataFile(HDF5DataFile):\n def __init__(self, file):\n # open the input file for reading\n self.file = h5py.File(file, 'r')\n\n # check that the input file has format=MCMC\n if 'format' not in self.file.attrs:\n eos.warn('input file does not have attribute \\'format\\'; assuming format \\'MCMC\\'')\n elif 'MCMC' != self.file.attrs['format']:\n raise FileFormatError('MCMC', self.file.attrs['format'])\n\n # extract parameter descriptions of the n-tuples\n self.parameters = None\n if '/descriptions/main run/chain #0/parameters' in self.file:\n self.parameters = self.file['/descriptions/main run/chain #0/parameters']\n elif '/descriptions/prerun/chain #0/parameters' in self.file:\n self.parameters = self.file['/descriptions/prerun/chain #0/parameters']\n else:\n RuntimeError('input file has no valid parameter descriptions: is it corrupted?')\n\n super().__init__()\n\n\n def __del__(self):\n self.file.close()\n\n\n \"\"\"Retrieve data\"\"\"\n def data(self):\n groupname = 'main run'\n\n if 'main run' not in self.file:\n eos.warn('input file does not contain results from a main run')\n groupname = 'prerun'\n\n group = self.file[groupname]\n\n # start with no data\n data = None\n\n # append each dataset to data\n for chainname in group:\n chain = group[chainname]\n dset = chain['samples']\n\n if data is None:\n data = numpy.array(dset[:])\n else:\n data = numpy.append(data, dset[:], axis=0)\n\n return data\n\n \"\"\"Retrieve the modes of the chains\"\"\"\n def modes(self):\n groupname = 'main run'\n\n if 'main run' not in self.file:\n eos.warn('input file does not contain results from a main run')\n groupname = 'prerun'\n\n group = self.file[groupname]\n\n # start with no data\n result = []\n\n # append each dataset to data\n for chainname in group:\n chain = group[chainname]\n dset = chain['stats/mode']\n\n log_posterior = dset[-1][-1]\n mode = dset[-1][0:-1]\n\n result.append((mode, log_posterior))\n\n return result\n\nclass UncertaintyDataFile(HDF5DataFile):\n def __init__(self, file):\n self.name = file\n # open the input file for reading\n self.file = h5py.File(file, 'r')\n\n # check that the input file has format=PMC\n if 'format' not in self.file.attrs:\n eos.warn('input file does not have attribute \\'format\\'; assuming format \\'UNC\\'')\n elif 'UNC' != self.file.attrs['format']:\n raise FileFormatError('UNC', self.file.attrs['format'])\n\n # extract parameter descriptions of the n-tuples\n self.parameters = []\n if '/descriptions/parameters' in self.file:\n for i in range(len(self.file['/descriptions/observables'])):\n desc = self.file['/descriptions/observables/%d' % i]\n name = desc.attrs.get(\"name\")\n kinematics = desc.attrs.get(\"kinematics\")\n self.parameters.append([name, kinematics, sys.float_info.min, sys.float_info.max])\n else:\n RuntimeError('input file has no valid parameter descriptions: is it corrupted?')\n\n super().__init__()\n\n\n def __del__(self):\n self.file.close()\n\n\n \"\"\"Retrieve data\"\"\"\n def data(self):\n if '/data/observables' not in self.file:\n RuntimeError('input file does not contain stored observables' % step)\n\n dataset = self.file['/data/observables']\n data = numpy.array(dataset[:])\n\n # adjust min,max range for each parameter based on data\n for i in range(0, len(self.parameters)):\n self.parameters[i][2] = numpy.min(data[:, i])\n self.parameters[i][3] = numpy.max(data[:, i])\n\n return data\n\n\n\"\"\" open HDF5 data file regardless of file type \"\"\"\ndef load_data_file(name):\n basename = os.path.basename(name)\n if basename.startswith('mcmc'):\n return MCMCDataFile(name)\n elif basename.startswith('pmc'):\n return PMCDataFile(name)\n elif basename.startswith('unc'):\n return UncertaintyDataFile(name)\n else:\n raise RuntimeError('cannot determine HDF5 file type based on the file name')\n\n","sub_path":"python/eos/data/hdf5.py","file_name":"hdf5.py","file_ext":"py","file_size_in_byte":6820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"313227000","text":"import sys,os,glob\n#args\n#1. txt folder\n#2. genomename eg. hg19\n#3. StarVersion '2.7.0f' or '2.6.0b'\nbasefolder=sys.argv[1] # QC or rawQC folder\nstarversion=sys.argv[3] # required: valid options are '2.7.0f' or '2.6.0b'\ntxts=glob.glob(basefolder+\"/*.txt\")\nread_lengths=[]\nfor f in txts:\n line=list(filter(lambda x:x.startswith(b'Sequence length'),list(map(lambda x:x.strip(),open(f).readlines()))))[0]\n if b'-' in line:\n read_lengths.append(int(line.split(b'\\t')[1].split(b'-')[1]))\n else:\n read_lengths.append(int(line.split(b'\\t')[1]))\na=max(read_lengths)-1\nallrls=[50,75,100,125,150]\nif not a in allrls:\n for rl in allrls:\n if a > rl:\n continue\n else:\n a=rl\n break\na=str(a)\ngenome2resource=dict()\nfor i in list(map(lambda x:x.strip().split(\"\\t\"),open(\"/genome2resources.tsv\").readlines())):\n if not i[0] in genome2resource:\n genome2resource[i[0]]=dict()\n genome2resource[i[0]][i[1]]=i[2]\nprint(genome2resource[sys.argv[2]]['{}_{}'.format(a, starversion)])\n","sub_path":"RNA-seq/Apps/ccbr_star/resources/get_starindexid.py","file_name":"get_starindexid.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"396990589","text":"import sys\nsys.setrecursionlimit(1 << 20)\nINF = float('inf')\n\n\ndef read_int_list():\n return list(map(int, input().split()))\n\n\ndef read_ints():\n return map(int, input().split())\n\n\nimport copy\n\n\ndef main():\n N = int(input())\n M = []\n cur = [1, 2, 3, 4, 5, 6]\n M.append(copy.copy(cur))\n for i in range(29):\n j = i % 5\n k = j + 1\n cur[j], cur[k] = cur[k], cur[j]\n # print(cur)\n M.append(copy.copy(cur))\n # break\n # print(len(M), M[0])\n print(''.join([str(v) for v in M[N % 30]]))\n\n\nmain()\n","sub_path":"abc/abc4c.py","file_name":"abc4c.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"66408624","text":"import numpy as np\nfrom util import append_df\nfrom executor import Executor\n\n\nclass Flipper(Executor):\n \"\"\"\n 1. unwind using fixed period or reverse alphas\n 2. limit position and order size to 1, not incremental adjustment\n 3. simple initial and unwind\n \"\"\"\n\n def __init__(self, **kwargs):\n super(Flipper, self).__init__(**kwargs)\n self.hold_period = kwargs.get('hold_period', 300)\n self.rewards = kwargs.get('rewards', 10e-05)\n self.field = kwargs.get('filed', 'mid')\n self.name = kwargs.get('name', '%s_%d' % (self.name, self.hold_period))\n\n def cross_session(self, df, i):\n if i == len(df)-1:\n return True\n return (df.ts[i+1] - df.ts[i]) > 600\n\n def execute(self, df):\n num = len(df)\n if num == 0: return\n\n order = np.repeat(0, num)\n cashin = np.repeat(0., num)\n pos = np.repeat(0, num)\n avgpx = np.repeat(0., num)\n update = np.repeat(df.ts[0], num)\n\n def build(i, size, row):\n order[i] = size\n update[i] = row.ts\n pos[i] = pos[i-1] + order[i]\n cashin[i] = order[i] * row[self.field]\n if pos[i] != 0:\n avgpx[i] = (avgpx[i-1]*pos[i-1] + cashin[i])/pos[i]\n\n def unwind(i, row):\n order[i] = -pos[i]\n cashin[i] = order[i] * row[self.field]\n update[i] = row.ts\n pos[i] = pos[i-1] + order[i]\n\n for i, r in df.iterrows():\n if i == 0: continue\n alpha = r['alpha']\n ts = r['ts']\n px = r[self.field]\n\n pos[i] = pos[i-1]\n avgpx[i] = avgpx[i-1]\n update[i] = update[i-1]\n\n # unwind position\n if pos[i-1] != 0 and (ts - update[i] > self.hold_period or self.cross_session(df, i)):\n unwind(i, r)\n\n # init position\n if alpha > self.rewards and pos[i-1] < 1:\n build(i, 1, r)\n if alpha < -self.rewards and pos[i-1] > -1:\n build(i, -1, r)\n\n self.df = df[['ts','time','alpha',self.field]]\n append_df(self.df, {'order':order, 'pos':pos, 'avgpx':avgpx, 'cashin': cashin, 'update':update})\n self.summary()\n self.report()\n return self.df\n\n def summary(self):\n diff_px = self.df[self.field] - self.df.avgpx\n self.df['unreal_pnl'] = diff_px * self.df.pos\n self.df['real_pnl'] = -1 * diff_px * self.df.order\n self.df['commission'] = self.df.cashin.abs() * self.commission\n self.df['cum_pnl'] = self.df.real_pnl.cumsum()\n self.df['net_pnl'] = self.df.cum_pnl - self.df.commission.cumsum()\n","sub_path":"execute/flipper.py","file_name":"flipper.py","file_ext":"py","file_size_in_byte":2688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"1177062","text":"#!/usr/bin/python\n\n__author__ = 's1549117'\n# reducer code\n\nimport sys\n\nprevious_student_id = None # the previous student\nstudent = None # the current student\ntag = None\nprev_tag = False\n\nfor line in sys.stdin:\n line = line.strip()\n tokens = line.split()\n id = tokens[0]\n tag = tokens[1]\n if tag == \"student\":\n # sanity check\n if previous_student_id != id:\n # save the student so that we can use it if he has marks\n student = tokens[2] # store the name\n if prev_tag != \"student\" and prev_tag:\n sys.stdout.write(\"\\n\") # output new line only when the previous tag was not a student nor the first\n # line\n previous_student_id = id # this is the current student being written to stdout\n elif tag == \"mark\":\n # sanity check\n if previous_student_id == id:\n subject = tokens[2]\n score = tokens[3]\n if student:\n studentStr = \"{0} -->\".format(student)\n sys.stdout.write(studentStr)\n student = None\n subjectStr = \" ({0},{1})\".format(subject, score)\n sys.stdout.write(subjectStr)\n prev_tag = tag\n# reducer end","sub_path":"task8/python/reducer.py","file_name":"reducer.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"382933076","text":"from django.conf.urls import url\nfrom django.contrib import admin\nfrom django.contrib.auth.views import login, logout\nfrom django.views.generic.base import RedirectView\n\nfrom annotator.views import *\n\nadmin.site.site_header = 'BeaverDam'\n\nurlpatterns = [\n url(r'^$', home),\n url(r'^verify/$', verify_list),\n\n url(r'^video/(\\d+)/$', video, name='video'),\n url(r'^video/(\\d+)/next/$', next_unannotated),\n url(r'^video/(\\d+)/verify/$', verify),\n url(r'^annotation/(\\d+)/$', AnnotationView.as_view()),\n\n url(r'^login/$', login, \n {'template_name': 'admin/login.html', \n 'extra_context': {'site_header': 'BeaverDam Login'}\n }, name='login'),\n url(r'^logout/$', logout),\n url(r'^accounts/', RedirectView.as_view(url='/')),\n url(r'^admin/', admin.site.urls),\n]\n","sub_path":"beaverdam/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"365113468","text":"# -*- coding: utf-8 -*-\n# ---\n# @Institution: Automation,T&E,Turing,HQ\n# @Time: 2021/7/19\n# @File: test_sensors_17_2.py\n# @Author: pengleiyang\n# @E-mail: pengleiyang@huaqin.com\n# @Desc: 设计机械臂控制,进行尝试\n# @update: Record important updates\n# ---\n\nimport os\nimport socket\nimport json\nimport time\nimport unittest\nimport warnings\nimport uiautomator2 as u2\nfrom utils.device_info_util.device_info import DeviceInfo\n\n\nclass AccelerometerMeasurementTests(unittest.TestCase):\n def setUp(self):\n support_device = 'HQ60CT3016'\n warnings.simplefilter('ignore', ResourceWarning) # 屏蔽警报信息\n print(\"测试开始\")\n print(\"获取手机设备信息!\")\n self.device = DeviceInfo()\n print(support_device)\n self.devices = self.device.check_device()[0]\n self.devices.remove(support_device)\n self.test_device = self.devices[0]\n self.d = u2.connect(self.test_device) # 连接待测设备\n self.d.unlock()\n print(\"解锁成功\")\n\n\n def connect_et_controller(self, ip, port=8055):\n # 连接机器人\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n sock.connect((ip, port))\n return True, sock\n except Exception as e:\n sock.close()\n return False, e\n\n def disconnect_et_controller(self, sock):\n if sock:\n sock.close()\n sock = None\n else:\n sock = None\n\n def send_cmd(self, sock, cmd, params=None, id=1):\n if not params:\n params = []\n else:\n params = json.dumps(params)\n sendStr = \"{{\\\"method\\\":\\\"{0}\\\",\\\"params\\\":{1},\\\"jsonrpc\\\":\\\"2.0\\\",\\\"id \\\":{2}}}\".format(cmd, params, id) + \"\\n\"\n try:\n sock.sendall(bytes(sendStr,\"utf-8\"))\n ret = sock.recv(1024)\n jdata = json.loads(str(ret, \"utf-8\"))\n if \"result\" in jdata.keys():\n return True, json.loads(jdata[\"result\"]), jdata[\"id\"]\n elif \"error\" in jdata.keys():\n return False, jdata[\"error\"].jdata[\"id\"]\n else:\n return False, None, None\n except Exception as e:\n print(\"fail:\", str(e))\n return False, None, None\n\n def test_sensors_17_2(self):\n # adb 获取屏幕是否为自动亮度\n os.system(\"adb -s \" + self.test_device + \" shell settings get system screen_brightness_mode\")\n # adb 更改屏幕为自动亮度\n os.system(\"adb -s \" + self.test_device + \" shell settings put system screen_brightness_mode 1\")\n state = DeviceInfo().get_device_wifi_state(self.test_device)\n\n\n\n\n\n\n\n\n","sub_path":"testcases/sensors/test_sensors_17_2.py","file_name":"test_sensors_17_2.py","file_ext":"py","file_size_in_byte":2690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"643219760","text":"# coding: utf-8\n\n\"\"\"\n DFC\n\n DFC is a scalable object-storage based caching system with Amazon and Google Cloud backends. # noqa: E501\n\n OpenAPI spec version: 1.1.0\n Contact: dfcdev@exchange.nvidia.com\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\n\nclass PrefetchTargetStatistics(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator.\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n openapi_types = {\n 'xaction_details': 'list[XactionDetails]',\n 'num_files_prefetched': 'int',\n 'num_bytes_prefetched': 'int'\n }\n\n attribute_map = {\n 'xaction_details': 'xactionDetails',\n 'num_files_prefetched': 'numFilesPrefetched',\n 'num_bytes_prefetched': 'numBytesPrefetched'\n }\n\n def __init__(self, xaction_details=None, num_files_prefetched=None, num_bytes_prefetched=None): # noqa: E501\n \"\"\"PrefetchTargetStatistics - a model defined in OpenAPI\"\"\" # noqa: E501\n\n self._xaction_details = None\n self._num_files_prefetched = None\n self._num_bytes_prefetched = None\n self.discriminator = None\n\n if xaction_details is not None:\n self.xaction_details = xaction_details\n if num_files_prefetched is not None:\n self.num_files_prefetched = num_files_prefetched\n if num_bytes_prefetched is not None:\n self.num_bytes_prefetched = num_bytes_prefetched\n\n @property\n def xaction_details(self):\n \"\"\"Gets the xaction_details of this PrefetchTargetStatistics. # noqa: E501\n\n\n :return: The xaction_details of this PrefetchTargetStatistics. # noqa: E501\n :rtype: list[XactionDetails]\n \"\"\"\n return self._xaction_details\n\n @xaction_details.setter\n def xaction_details(self, xaction_details):\n \"\"\"Sets the xaction_details of this PrefetchTargetStatistics.\n\n\n :param xaction_details: The xaction_details of this PrefetchTargetStatistics. # noqa: E501\n :type: list[XactionDetails]\n \"\"\"\n\n self._xaction_details = xaction_details\n\n @property\n def num_files_prefetched(self):\n \"\"\"Gets the num_files_prefetched of this PrefetchTargetStatistics. # noqa: E501\n\n\n :return: The num_files_prefetched of this PrefetchTargetStatistics. # noqa: E501\n :rtype: int\n \"\"\"\n return self._num_files_prefetched\n\n @num_files_prefetched.setter\n def num_files_prefetched(self, num_files_prefetched):\n \"\"\"Sets the num_files_prefetched of this PrefetchTargetStatistics.\n\n\n :param num_files_prefetched: The num_files_prefetched of this PrefetchTargetStatistics. # noqa: E501\n :type: int\n \"\"\"\n\n self._num_files_prefetched = num_files_prefetched\n\n @property\n def num_bytes_prefetched(self):\n \"\"\"Gets the num_bytes_prefetched of this PrefetchTargetStatistics. # noqa: E501\n\n\n :return: The num_bytes_prefetched of this PrefetchTargetStatistics. # noqa: E501\n :rtype: int\n \"\"\"\n return self._num_bytes_prefetched\n\n @num_bytes_prefetched.setter\n def num_bytes_prefetched(self, num_bytes_prefetched):\n \"\"\"Sets the num_bytes_prefetched of this PrefetchTargetStatistics.\n\n\n :param num_bytes_prefetched: The num_bytes_prefetched of this PrefetchTargetStatistics. # noqa: E501\n :type: int\n \"\"\"\n\n self._num_bytes_prefetched = num_bytes_prefetched\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, PrefetchTargetStatistics):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","sub_path":"python-client/openapi_client/models/prefetch_target_statistics.py","file_name":"prefetch_target_statistics.py","file_ext":"py","file_size_in_byte":5159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"650125540","text":"# Copyright 2018 The LongYan. All Rights Reserved.\n\n\"\"\"A script for gen random list.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport numpy as np\nimport tensorflow as tf\nimport cv2\nfrom os import walk\nfrom os.path import join\nimport time\nimport random\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--data_path', type=str, help='Image path.')\nargs = parser.parse_args()\n\ndef read_images(path):\n\n train_file_name = \"random_train_list.txt\"\n val_file_name = \"random_val_list.txt\"\n\n image_all_path = []\n label_all = []\n\n image_file_list = []\n for (root, dirs, files) in walk(path):\n if root == path:\n image_file_list = dirs\n \n # 10 clases image\n for image_class_file in image_file_list:\n real_image_class_file_path = join(path, image_class_file)\n \n # one class all images\n for (root, dirs, files) in walk(real_image_class_file_path):\n jpg_list = files\n for i,jpg_path in enumerate(jpg_list):\n jpg_list[i] = join(real_image_class_file_path, jpg_path)\n label_all.append(int(image_class_file))\n image_all_path.extend(jpg_list)\n \n num_images = len(image_all_path)\n \n np_image = np.array(image_all_path)\n np_label = np.array(label_all)\n\n random_index = range(num_images)\n random.shuffle(random_index)\n random_image = np_image[random_index]\n random_label = np_label[random_index]\n\n train_random_image = random_image[0:1000]\n train_random_label = random_label[0:1000]\n\n val_random_image = random_image[1000:]\n val_random_label = random_label[1000:]\n\n f = open(train_file_name, \"w\")\n for i in range(1000):\n txt_line = str(train_random_image[i]) + \",\" + str(train_random_label[i]) + \"\\n\"\n f.write(txt_line)\n f.close()\n\n f = open(val_file_name, \"w\")\n for i in range(num_images-1000):\n txt_line = str(val_random_image[i]) + \",\" + str(val_random_label[i]) + \"\\n\"\n f.write(txt_line)\n f.close()\n\ndef run():\n read_images(args.data_path)\n\nif __name__ == '__main__':\n run()","sub_path":"flowers17/gen_random_list_v1.py","file_name":"gen_random_list_v1.py","file_ext":"py","file_size_in_byte":2143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"128414833","text":"import sys\nimport threading\nfrom contextlib import contextmanager\nfrom queue import Queue\nfrom typing import Any, Dict, Generator, List, Optional, cast\n\nimport attr\nimport click\nimport yaml\nfrom yaml.serializer import Serializer\n\nfrom .. import constants\nfrom ..models import Interaction\nfrom ..runner import events\nfrom .context import ExecutionContext\nfrom .handlers import EventHandler\n\ntry:\n from yaml import CDumper as Dumper\nexcept ImportError:\n # pylint: disable=unused-import\n from yaml import Loader, Dumper # type: ignore\n\n\n# Wait until the worker terminates\nWRITER_WORKER_JOIN_TIMEOUT = 1\n\n\n@attr.s(slots=True) # pragma: no mutate\nclass CassetteWriter(EventHandler):\n \"\"\"Write interactions in a YAML cassette.\n\n A low-level interface is used to write data to YAML file during the test run and reduce the delay at\n the end of the test run.\n \"\"\"\n\n file_handle: click.utils.LazyFile = attr.ib() # pragma: no mutate\n queue: Queue = attr.ib(factory=Queue) # pragma: no mutate\n worker: threading.Thread = attr.ib(init=False) # pragma: no mutate\n\n def __attrs_post_init__(self) -> None:\n self.worker = threading.Thread(target=worker, kwargs={\"file_handle\": self.file_handle, \"queue\": self.queue})\n self.worker.start()\n\n def handle_event(self, context: ExecutionContext, event: events.ExecutionEvent) -> None:\n if isinstance(event, events.Initialized):\n # In the beginning we write metadata and start `http_interactions` list\n self.queue.put(Initialize())\n if isinstance(event, events.AfterExecution):\n # Seed is always present at this point, the original Optional[int] type is there because `TestResult`\n # instance is created before `seed` is generated on the hypothesis side\n seed = cast(int, event.result.seed)\n self.queue.put(Process(status=event.status.name.upper(), seed=seed, interactions=event.result.interactions))\n if isinstance(event, events.Finished):\n self.queue.put(Finalize())\n self.worker.join(WRITER_WORKER_JOIN_TIMEOUT)\n\n\n@attr.s(slots=True) # pragma: no mutate\nclass Initialize:\n \"\"\"Start up, the first message to make preparations before proceeding the input data.\"\"\"\n\n\n@attr.s(slots=True) # pragma: no mutate\nclass Process:\n \"\"\"A new chunk of data should be processed.\"\"\"\n\n status: str = attr.ib() # pragma: no mutate\n seed: int = attr.ib() # pragma: no mutate\n interactions: List[Interaction] = attr.ib() # pragma: no mutate\n\n\n@attr.s(slots=True) # pragma: no mutate\nclass Finalize:\n \"\"\"The work is done and there will be no more messages to process.\"\"\"\n\n\nclass StringSerializer(Serializer):\n \"\"\"Emit scalar values as strings.\n\n It is required to avoid possible issues with default YAML parsing.\n For example \"Norway problem\", where `- no` will be parsed to `[False]`, but we have strings everywhere\n therefore we need `- 'no'` and ['no'].\n \"\"\"\n\n def serialize_node(self, node: yaml.Node, parent: Optional[yaml.Node], index: int) -> None:\n # NOTE. This implementation is taken from the parent Serializer and adjusted for `ScalarNode` case and\n # for `MappingNode`.\n alias = self.anchors[node]\n self.serialized_nodes[node] = True\n self.descend_resolver(parent, index) # type: ignore\n if isinstance(node, yaml.ScalarNode):\n implicit = False, True\n self.emit(yaml.ScalarEvent(alias, node.tag, implicit, node.value, style=node.style)) # type: ignore\n elif isinstance(node, yaml.SequenceNode):\n implicit = node.tag == self.resolve(yaml.SequenceNode, node.value, True) # type: ignore\n self.emit(yaml.SequenceStartEvent(alias, node.tag, implicit, flow_style=node.flow_style)) # type: ignore\n index = 0\n for item in node.value:\n self.serialize_node(item, node, index)\n index += 1\n self.emit(yaml.SequenceEndEvent()) # type: ignore\n elif isinstance(node, yaml.MappingNode):\n implicit = node.tag == self.resolve(yaml.MappingNode, node.value, True) # type: ignore\n self.emit(yaml.MappingStartEvent(alias, node.tag, implicit, flow_style=node.flow_style)) # type: ignore\n for key, value in node.value:\n self.emit(yaml.ScalarEvent(alias, key.tag, (True, True), key.value, style=key.style)) # type: ignore\n self.serialize_node(value, node, key)\n self.emit(yaml.MappingEndEvent()) # type: ignore\n self.ascend_resolver() # type: ignore\n\n\nclass StringDumper(Dumper, StringSerializer):\n pass\n\n\ndef get_command_representation() -> str:\n \"\"\"Get how Schemathesis was run.\"\"\"\n # It is supposed to be executed from Schemathesis CLI, not via Click's `command.invoke`\n if not sys.argv[0].endswith(\"schemathesis\"):\n return \"\"\n args = \" \".join(sys.argv[1:])\n return f\"schemathesis {args}\"\n\n\ndef worker(file_handle: click.utils.LazyFile, queue: Queue) -> None:\n \"\"\"Write YAML to a file in an incremental manner.\"\"\"\n current_id = 0\n stream = file_handle.open()\n dumper = StringDumper(stream, sort_keys=False) # type: ignore\n StringSerializer.__init__(dumper) # type: ignore\n dumper.open() # type: ignore\n\n # Helpers\n\n def emit(*yaml_events: yaml.Event) -> None:\n for event in yaml_events:\n dumper.emit(event) # type: ignore\n\n @contextmanager\n def mapping() -> Generator[None, None, None]:\n emit(yaml.MappingStartEvent(anchor=None, tag=None, implicit=True))\n yield\n emit(yaml.MappingEndEvent())\n\n def key(name: str) -> yaml.ScalarEvent:\n \"\"\"Default style for mapping keys is without quotes.\"\"\"\n return yaml.ScalarEvent(anchor=None, tag=None, implicit=(True, True), value=name)\n\n def value(_value: str) -> yaml.ScalarEvent:\n \"\"\"Default style for mapping values is with quotes.\"\"\"\n return yaml.ScalarEvent(anchor=None, tag=None, implicit=(False, True), value=_value)\n\n def serialize_mapping(name: str, data: Dict[str, Any]) -> None:\n emit(key(name))\n node = dumper.represent_data(data) # type: ignore\n # C-extension is not introspectable\n dumper.anchor_node(node) # type: ignore\n dumper.serialize_node(node, None, 0) # type: ignore\n\n while True:\n item = queue.get()\n if isinstance(item, Initialize):\n emit(yaml.DocumentStartEvent(), yaml.MappingStartEvent(anchor=None, tag=None, implicit=True))\n emit(\n key(\"command\"),\n value(get_command_representation()),\n key(\"recorded_with\"),\n value(f\"Schemathesis {constants.__version__}\"),\n key(\"http_interactions\"),\n yaml.SequenceStartEvent(anchor=None, tag=None, implicit=True),\n )\n elif isinstance(item, Process):\n for interaction in item.interactions:\n with mapping():\n emit(\n key(\"id\"),\n value(str(current_id)),\n key(\"status\"),\n value(item.status),\n key(\"seed\"),\n value(str(item.seed)),\n key(\"elapsed\"),\n value(str(interaction.response.elapsed)),\n key(\"recorded_at\"),\n value(interaction.recorded_at),\n )\n serialize_mapping(\"request\", interaction.request.asdict())\n serialize_mapping(\"response\", interaction.response.asdict())\n current_id += 1\n else:\n emit(yaml.SequenceEndEvent(), yaml.MappingEndEvent(), yaml.DocumentEndEvent())\n # C-extension is not introspectable\n dumper.close() # type: ignore\n dumper.dispose() # type: ignore\n break\n","sub_path":"src/schemathesis/cli/cassettes.py","file_name":"cassettes.py","file_ext":"py","file_size_in_byte":7986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"78440823","text":"# Color image filtering\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom scipy import misc, signal\n\n\ndef convolve(x, h):\n (row1, col1) = x.shape\n (row2, col2) = h.shape\n\n convolved_image = np.zeros((row1 + row2 - 1, col1 + col2 - 1))\n\n h = h[::-1,::-1]\n\n b = np.lib.pad(x, [(row2 - 1, row2 - 1), (col2 - 1, col2 - 1)], mode='constant')\n\n for i in range(row1 + row2 - 1):\n for j in range(col1 + col2 - 1):\n k = b[i:i+row2, j:j+col2]\n temp = k * h\n convolved_image[i, j] = np.sum(temp)\n\n return convolved_image\n\ndef show_images(image1, image2, title1, title2):\n f, a = plt.subplots(1, 2)\n a[0].imshow(image1)\n a[0].set_title(title1)\n a[1].imshow(image2)\n a[1].set_title(title2)\n plt.show()\n# Load the rgb image\nrgb_image = misc.imread('kid.jpg')\n(row,col,_)=rgb_image.shape\n# Generate averaging filter\naveraging_filter = np.ones((3, 3)) / 9.0\n\n# Separate RGB into components\nR = rgb_image[:, :, 0]\nG = rgb_image[:, :, 1]\nB = rgb_image[:, :, 2]\n\naveraged_R = convolve(R, averaging_filter)\naveraged_G = convolve(G, averaging_filter)\naveraged_B = convolve(B, averaging_filter)\n\n\n# Combine RGB values into a single image\naveraged_image= np.zeros((row+2,col+2,3))\naveraged_image[:, :, 0] = averaged_R\naveraged_image[:, :, 1] = averaged_G\naveraged_image[:, :, 2] = averaged_B\n\n\nshow_images(rgb_image,averaged_image.astype(np.uint8), 'RGB', 'Filtered Image')\n\n","sub_path":"color_filter.py","file_name":"color_filter.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"608900066","text":"#!/usr/bin/env python\n#\n# Copyright (C) 2008-2014 Eli Carter \n# All rights reserved.\n#\n# This software is licensed as described in the file COPYING, which\n# you should have received as part of this distribution.\n\nfrom setuptools import setup, find_packages\n\nextra = {}\ntry:\n import babel\n del babel\n extra['message_extractors'] = {\n 'advancedworkflow': [\n ('**.py', 'python', None),\n ('**.html', 'genshi', None),\n ],\n }\n from trac.util.dist import get_l10n_cmdclass\n extra['cmdclass'] = get_l10n_cmdclass()\nexcept ImportError:\n pass\n\nsetup(\n name='TracAdvancedTicketWorkflow',\n version='1.2.2',\n author='Eli Carter',\n author_email='elicarter@retracile.net',\n license='3-Clause BSD',\n description='Advanced workflow operations Trac plugin',\n long_description='Provides more advanced workflow operations for Trac 1.2',\n url='https://trac-hacks.org/wiki/AdvancedTicketWorkflowPlugin',\n classifiers=['Framework :: Trac'],\n packages=find_packages(),\n package_data={\n 'advancedworkflow': [\n 'locale/*/LC_MESSAGES/*.mo',\n ],\n },\n test_suite='advancedworkflow.tests.test_suite',\n entry_points={'trac.plugins': [\n 'advancedworkflow.controller = advancedworkflow.controller'\n ]},\n install_requires=['Trac'],\n # zip_safe = False,\n **extra)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"418279329","text":"import unittest\nfrom time import sleep\n\nfrom selenium import webdriver\nfrom selenium.common.exceptions import TimeoutException, WebDriverException\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.firefox.options import Options\nfrom selenium.webdriver.remote.webelement import WebElement\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.ui import WebDriverWait\n\nuse_Firefox = True\n\n\nclass text_to_change(object):\n def __init__(self, locator, text):\n self.locator = locator\n self.text = text\n\n def __call__(self, driver):\n actual_text = EC._find_element(\n driver, self.locator).get_attribute(\"value\")\n return actual_text != self.text\n\n\n\nclass Test(unittest.TestCase):\n\n def setUp(self):\n if use_Firefox == True:\n self.driver = webdriver.Firefox()\n else:\n self.driver = webdriver.Chrome()\n\n\n def test_1(self):\n driver = self.driver\n driver.get(\"http://localhost/compiler\")\n title = driver.title\n self.assertIn(\"Solomonoff\", driver.title)\n \n\n skip: WebElement = WebDriverWait(driver, 3).until(\n EC.visibility_of_element_located((By.CSS_SELECTOR, '.enjoyhint_skip_btn')))\n webdriver.ActionChains(driver).move_to_element(skip).click().perform()\n editor = driver.find_element_by_css_selector('#editor > div.ace_scroller > div') \n webdriver.ActionChains(driver).move_to_element(editor).click().send_keys(\"x = 'tre':'00'\").perform()\n compile = driver.find_element_by_css_selector('#btnn')\n compile.click()\n replOutput = driver.find_element_by_id('outputField')\n replOutputText1 = replOutput.get_attribute(\"value\")\n replInput = driver.find_element_by_css_selector('#inputField > textarea')\n replInput.send_keys(':eval x \\'tre\\'')\n replInput.send_keys(Keys.RETURN)\n WebDriverWait(driver, 10).until(text_to_change((By.ID, \"outputField\"), replOutputText1))\n replOutputText2 = replOutput.get_attribute(\"value\")\n # print(replOutputText2.strip())\n self.assertEqual(replOutputText2.strip(), \"> :load\\n> :eval x 'tre'\\n'00'\")\n \n\n def test_2(self):\n driver = self.driver\n driver.get(\"http://localhost/compiler\")\n skip: WebElement = WebDriverWait(driver, 3).until(\n EC.visibility_of_element_located((By.CSS_SELECTOR, '.enjoyhint_skip_btn')))\n webdriver.ActionChains(driver).move_to_element(skip).click().perform()\n editor = driver.find_element_by_css_selector('#editor > div.ace_scroller > div') \n webdriver.ActionChains(driver).move_to_element(editor).click().send_keys(\"x = 'tre':'00'\").perform()\n compile = driver.find_element_by_css_selector('#btnn')\n compile.click()\n tips = driver.find_element_by_id('automataHtmlList').text\n self.assertIn(\"x\", tips)\n \n\n def test_3(self):\n driver = self.driver\n driver.get(\"http://localhost/compiler\")\n skip: WebElement = WebDriverWait(driver, 3).until(\n EC.visibility_of_element_located((By.CSS_SELECTOR, '.enjoyhint_skip_btn')))\n webdriver.ActionChains(driver).move_to_element(skip).click().perform()\n clear = driver.find_element_by_css_selector('body > main > div > div > div.first > span > button')\n clear.click()\n replOutput = driver.find_element_by_id('outputField').get_attribute(\"value\")\n self.assertEqual(replOutput, \"> :clear\\n\")\n \n\n \n\n def tearDown(self):\n self.driver.close()\n\nif __name__ == \"__main__\":\n unittest.main()","sub_path":"IntergationTests/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"447687908","text":"import numpy as np\n\n\nclass GradientDescent(object):\n def __init__(self, learning_rate, X, Y, **kwargs):\n self.theta_history = None\n self.cost_history = None\n self.learning_rate = learning_rate\n self.X = X\n self.Y = Y\n self._iterations = None\n self._theta = None\n self._callback = None\n self.__dict__.update(kwargs)\n\n @property\n def callback(self):\n return self._callback\n\n @callback.setter\n def callback(self, callback):\n self._callback = callback\n\n @property\n def theta(self):\n return self._theta\n\n @theta.setter\n def theta(self, theta):\n self._theta = theta\n\n @property\n def iterations(self):\n return self._iterations\n\n @iterations.setter\n def iterations(self, iterations):\n self._iterations = iterations\n\n def cost(self):\n m = len(self.Y)\n predictions = np.dot(self.X, self._theta)\n cost = (1 / 2 * m) * np.sum(np.square(predictions - self.Y))\n return cost\n\n def cal_cost(self, X, Y):\n m = len(Y)\n predictions = np.dot(X, self._theta)\n cost = (1 / 2 * m) * np.sum(np.square(predictions - Y))\n return cost\n\n def optimise(self):\n self.theta_history = np.zeros((self.iterations, self.X.shape[1]))\n self.cost_history = np.zeros(self.iterations)\n m = len(self.Y)\n for it in range(self.iterations):\n predictions = np.dot(self.X, self.theta)\n self.theta = self.theta - (1 / m) * self.learning_rate * (\n np.dot(self.X.T, predictions - self.Y)\n )\n self.theta_history[it, :] = self.theta.T\n self.cost_history[it] = self.cost()\n if getattr(self, \"callback\", None):\n self.callback(predictions, self.cost_history)\n\n def optimise_sgd(self):\n self.cost_history = np.zeros(self.iterations)\n m = len(self.Y)\n for it in range(self.iterations):\n cost = 0.0\n for i in range(m):\n rand_ind = np.random.randint(0, m)\n x_i = self.X[rand_ind, :].reshape(1, self.X.shape[1])\n y_i = self.Y[rand_ind].reshape(1, 1)\n predictions = np.dot(x_i, self.theta)\n self.theta = self.theta - (1 / m) * self.learning_rate * (\n np.dot(x_i.T, predictions - y_i)\n )\n cost += self.cal_cost(x_i, y_i)\n self.cost_history[it] = cost\n\n def optimise_mini_batch_gd(self, **kwargs):\n batch_size = kwargs.get(\"batch_size\", 20)\n cost_func = kwargs.get(\"func\", None)\n gradient = kwargs.get(\"gradient\", None)\n self.cost_history = np.zeros(self.iterations)\n m = len(self.Y)\n for it in range(self.iterations):\n cost = 0.0\n indices = np.random.permutation(m)\n X = self.X[indices]\n Y = self.Y[indices]\n for i in range(0, m, batch_size):\n x_i = X[i : i + batch_size]\n y_i = Y[i : i + batch_size]\n predictions = np.dot(x_i, self.theta)\n if gradient:\n grad = gradient(self.theta, x_i, y_i)\n self.theta = self.theta - self.learning_rate * grad\n else:\n self.theta = self.theta - (1 / m) * self.learning_rate * (\n np.dot(x_i.T, predictions - y_i)\n )\n if cost_func:\n cost += cost_func(self.theta, x_i, y_i)\n else:\n cost += self.cal_cost(x_i, y_i)\n self.cost_history[it] = cost\n\n def predict(self):\n return np.dot(self.X, self.theta)\n","sub_path":"optimizers/gradient_descent.py","file_name":"gradient_descent.py","file_ext":"py","file_size_in_byte":3747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"345082596","text":"import pandas as pd\nimport numpy as np\nimport scipy.spatial\nfrom collections import defaultdict\nimport matplotlib.pyplot as plt\nimport matplotlib\n\nENCODINGS = '/home/aidris/Videos/task/duplicateimages/ImageDuplication/src/encodings.pkl'\n\ndef similarity_metric(x1,x2,type = 'euclidean'):\n \"\"\"\n CURRENTLY IMPLEMENTS EUCLIDEAN METRIC\n\n \"\"\"\n if type == 'euclidean':\n return sum((x1-x2)**2)**(1/2)\n \n if type=='cosine':\n return scipy.spatial.distance.cosine(x1,x2)\n\n#LOAD ENCODINGS\n\ndef load_encodings(path=ENCODINGS):\n encodings = pd.read_pickle(path)\n #encodings.set_index(['images'],inplace=True)\n return encodings\n\ndata = load_encodings()\nprint(data.head())\n\nencodings = data['ENCODINGS'].values\nimages_ids = data['images'].values\ndistance=defaultdict(list)\nd=[]\nfor i,encoding1 in enumerate(encodings):\n dis=[]\n for j,encoding2 in enumerate(encodings):\n \n similarity = similarity_metric(encoding1,encoding2)\n distance[images_ids[i]].append([images_ids[j],similarity])\n dis.append([images_ids[j],similarity])\n d.append(dis)\n\ndict_={}\nfor i,list1 in enumerate(d):\n list1.sort(key=lambda x: x[1])\n dict_[images_ids[i]]=list1\n\n#print(dict_.keys())\n\nprint(dict_['../test_images/17 Oncoanaesthesia CEM Review_1/image26.jpg'][1:10])\n\nl = plt.imread('../test_images/17 Oncoanaesthesia CEM Review_1/image26.jpg')\nplt.imshow(l)\nplt.show()\ndata.set_index(['images'],inplace=True)\nprint(similarity_metric(data.loc['../test_images/17 Oncoanaesthesia CEM Review_1/image26.jpg','ENCODINGS'],data.loc['../test_images/17 Oncoanaesthesia CEM Review_1/image19.jpg','ENCODINGS'],type='cosine'))\n\n\n\n\n\n\n\n\n\n\"\"\"\n\n#print(data.head())\ndistances=[]\nresult_data=[]\n\nfor j, x2 in enumerate(data['ENCODINGS']):\n imp=[]\n for i,x1 in enumerate(data['ENCODINGS']):\n \n dist=similarity_metric(x1,x2)\n imp_data = [i,dist]\n imp.append(imp_data)\n distances.append(dist)\n result_data.append(imp)\n\n\n#result = np.array(result_data)\n#print(result.shape)\n #min_e= np.min(distances)\n #print('Index ',min_e)\n\n#n=[]\n#for list1 in result_data:\n# list1.sort(key=lambda x: x[1])\n# n.append(list1)\n#n=np.array(n)\n#print(n[145])\n \n#print(data.iloc[145,0])\n#print(data.iloc[142../test_images2/17 Oncoanaesthesia CEM Review_1/image1,0])\n\ndata.set_index(['images'],inplace=True)\nprint(data.head())\nprint(similarity_metric(data.loc['../test_images2/17 Oncoanaesthesia CEM Review_1/image26.png','ENCODINGS'],data.loc['../test_images2/17 Oncoanaesthesia CEM Review_1/image19.jpeg','ENCODINGS']))\nprint(similarity_metric(data.loc['../test_images2/17 Oncoanaesthesia CEM Review_1/image14.tiff','ENCODINGS'],data.loc['../test_images2/17 Oncoanaesthesia CEM Review_1/image14.tiff','ENCODINGS'],type='cosine'))\n\"\"\"\n\n\n\n\n\n\n","sub_path":"FILES/result.py","file_name":"result.py","file_ext":"py","file_size_in_byte":2802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"98911716","text":"from functools import partial\n\nimport re\n\nimport numpy as np\nimport dask.array as da\nfrom xarray import DataArray\n\nfrom numba import cuda, float32, prange, jit\n\nfrom xrspatial.utils import has_cuda\nfrom xrspatial.utils import cuda_args\nfrom xrspatial.utils import get_dataarray_resolution\n\n# 3rd-party\ntry:\n import cupy\nexcept ImportError:\n class cupy(object):\n ndarray = False\n\n\nDEFAULT_UNIT = 'meter'\nMETER = 1\nFOOT = 0.3048\nKILOMETER = 1000\nMILE = 1609.344\nUNITS = {'meter': METER, 'meters': METER, 'm': METER,\n 'feet': FOOT, 'foot': FOOT, 'ft': FOOT,\n 'miles': MILE, 'mls': MILE, 'ml': MILE,\n 'kilometer': KILOMETER, 'kilometers': KILOMETER, 'km': KILOMETER}\n\n\ndef _is_numeric(s):\n try:\n float(s)\n return True\n except ValueError:\n return False\n\n\ndef _to_meters(d, unit):\n return d * UNITS[unit]\n\n\ndef _get_distance(distance_str):\n # return distance in meters\n\n # spit string into numbers and text\n splits = [x for x in re.split(r'(-?\\d*\\.?\\d+)', distance_str) if x != '']\n if len(splits) not in [1, 2]:\n raise ValueError(\"Invalid distance.\")\n\n unit = DEFAULT_UNIT\n\n if len(splits) == 2:\n unit = splits[1]\n\n number = splits[0]\n if not _is_numeric(number):\n raise ValueError(\"Distance should be a positive numeric value.\\n\")\n\n distance = float(number)\n if distance <= 0:\n raise ValueError(\"Distance should be a positive.\\n\")\n\n unit = unit.lower()\n unit = unit.replace(' ', '')\n if unit not in UNITS:\n raise ValueError(\n \"Distance unit should be one of the following: \\n\"\n \"meter (meter, meters, m),\\n\"\n \"kilometer (kilometer, kilometers, km),\\n\"\n \"foot (foot, feet, ft),\\n\"\n \"mile (mile, miles, ml, mls)\")\n\n # convert distance to meters\n meters = _to_meters(distance, unit)\n return meters\n\n\ndef calc_cellsize(raster):\n \"\"\"\n Calculates cell size of an array based on its attributes.\n Default = meters. If lat-lon, units are converted to meters.\n\n Parameters\n ----------\n raster : xarray.DataArray\n 2D array of input values.\n\n Returns\n -------\n cellsize : tuple\n Tuple of (cellsize_x, cellsize_y).\n cellsize_x : float\n Size of cells in x-direction.\n cellsize_y : float\n Size of cells in y-direction.\n\n Examples\n --------\n .. plot::\n :include-source:\n\n import matplotlib.pyplot as plt\n import numpy as np\n import xarray as xr\n\n from xrspatial import generate_terrain\n from xrspatial.convolution import calc_cellsize\n\n\n # Generate Example Terrain\n W = 500\n H = 300\n\n template_terrain = xr.DataArray(np.zeros((H, W)))\n x_range=(-20e6, 20e6)\n y_range=(-20e6, 20e6)\n\n terrain_agg = generate_terrain(\n template_terrain, x_range=x_range, y_range=y_range\n )\n\n # Edit Attributes\n terrain_agg = terrain_agg.assign_attrs(\n {\n 'Description': 'Example Terrain',\n 'units': 'km',\n 'Max Elevation': '4000',\n }\n )\n\n terrain_agg = terrain_agg.rename({'x': 'lon', 'y': 'lat'})\n terrain_agg = terrain_agg.rename('Elevation')\n\n .. sourcecode:: python\n\n >>> print(terrain_agg[200:203, 200:202])\n \n array([[1264.02296597, 1261.947921 ],\n [1285.37105519, 1282.48079719],\n [1306.02339636, 1303.4069579 ]])\n Coordinates:\n * lon (lon) float64 -3.96e+06 -3.88e+06\n * lat (lat) float64 6.733e+06 6.867e+06 7e+06\n Attributes:\n res: (80000.0, 133333.3333333333)\n Description: Example Terrain\n units: km\n Max Elevation: 4000\n\n .. sourcecode:: python\n\n >>> # Calculate Cellsize\n >>> cellsize = calc_cellsize(terrain_agg)\n >>> print(cellsize)\n (80000.0, 133333.3333333333)\n \"\"\"\n if 'unit' in raster.attrs:\n unit = raster.attrs['unit']\n else:\n unit = DEFAULT_UNIT\n\n cellsize_x, cellsize_y = get_dataarray_resolution(raster)\n cellsize_x = _to_meters(cellsize_x, unit)\n cellsize_y = _to_meters(cellsize_y, unit)\n\n # When converting from lnglat_to_meters, could have negative cellsize in y\n return cellsize_x, np.abs(cellsize_y)\n\n\ndef _ellipse_kernel(half_w, half_h):\n # x values of interest\n x = np.linspace(-half_w, half_w, 2 * half_w + 1)\n # y values of interest, as a \"column\" array\n y = np.linspace(-half_h, half_h, 2 * half_h + 1)[:, None]\n\n # True for points inside the ellipse\n # (x / a)^2 + (y / b)^2 <= 1, avoid division to avoid rounding issue\n ellipse = (x * half_h) ** 2 + (y * half_w) ** 2 <= (half_w * half_h) ** 2\n return ellipse.astype(float)\n\n\ndef circle_kernel(cellsize_x, cellsize_y, radius):\n \"\"\"\n Generates a circular kernel of a given cellsize and radius.\n\n Parameters\n ----------\n cellsize_x : int\n Cell size of output kernel in x-direction.\n cellsize_y : int\n Cell size of output kernel in y-direction.\n radius : int\n Radius of output kernel.\n\n Returns\n -------\n kernel : NumPy Array of float values\n 2D array where values of 1 indicate the kernel.\n\n Examples\n --------\n .. sourcecode:: python\n\n >>> import xarray as xr\n >>> from xrspatial.convolution import circle_kernel\n\n >>> # Create Kernel\n >>> kernel = circle_kernel(1, 1, 3)\n >>> print(kernel)\n [[0. 0. 0. 1. 0. 0. 0.]\n [0. 1. 1. 1. 1. 1. 0.]\n [0. 1. 1. 1. 1. 1. 0.]\n [1. 1. 1. 1. 1. 1. 1.]\n [0. 1. 1. 1. 1. 1. 0.]\n [0. 1. 1. 1. 1. 1. 0.]\n [0. 0. 0. 1. 0. 0. 0.]]\n\n >>> kernel = circle_kernel(1, 2, 3)\n >>> print(kernel)\n [[0. 0. 0. 1. 0. 0. 0.]\n [1. 1. 1. 1. 1. 1. 1.]\n [0. 0. 0. 1. 0. 0. 0.]]\n \"\"\"\n # validate radius, convert radius to meters\n r = _get_distance(str(radius))\n\n kernel_half_w = int(r / cellsize_x)\n kernel_half_h = int(r / cellsize_y)\n\n kernel = _ellipse_kernel(kernel_half_w, kernel_half_h)\n return kernel\n\n\ndef annulus_kernel(cellsize_x, cellsize_y, outer_radius, inner_radius):\n \"\"\"\n Generates a annulus (ring-shaped) kernel of a given cellsize and radius.\n\n Parameters\n ----------\n cellsize_x : int\n Cell size of output kernel in x direction.\n cellsize_y : int\n Cell size of output kernel in y direction.\n outer_radius : int\n Outer ring radius of output kernel.\n inner_radius : int\n Inner circle radius of output kernel.\n\n Returns\n -------\n kernel : NumPy Array of float values.\n 2D array of 0s and 1s where values of 1 indicate the kernel.\n\n Examples\n --------\n .. sourcecode:: python\n\n >>> import xarray as xr\n >>> from xrspatial.convolution import annulus_kernel\n\n >>> # Create Kernel\n >>> kernel = annulus_kernel(1, 1, 3, 1)\n >>> print(kernel)\n [[0., 0., 0., 1., 0., 0., 0.],\n [0., 1., 1., 1., 1., 1., 0.],\n [0., 1., 1., 0., 1., 1., 0.],\n [1., 1., 0., 0., 0., 1., 1.],\n [0., 1., 1., 0., 1., 1., 0.],\n [0., 1., 1., 1., 1., 1., 0.],\n [0., 0., 0., 1., 0., 0., 0.]]\n\n >>> kernel = annulus_kernel(1, 2, 5, 2)\n >>> print(kernel)\n [[0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0.],\n [0., 1., 1., 1., 1., 0., 1., 1., 1., 1., 0.],\n [1., 1., 1., 0., 0., 0., 0., 0., 1., 1., 1.],\n [0., 1., 1., 1., 1., 0., 1., 1., 1., 1., 0.],\n [0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0.]])\n \"\"\"\n # Get the two circular kernels for the annulus\n kernel_outer = circle_kernel(cellsize_x, cellsize_y, outer_radius)\n kernel_inner = circle_kernel(cellsize_x, cellsize_y, inner_radius)\n\n # Need to pad kernel_inner to get it the same shape and centered\n # in kernel_outer\n pad_vals = np.array(kernel_outer.shape) - np.array(kernel_inner.shape)\n pad_kernel = np.pad(kernel_inner,\n # Pad ((before_rows, after_rows),\n # (before_cols, after_cols))\n pad_width=((pad_vals[0] // 2, pad_vals[0] // 2),\n (pad_vals[1] // 2, pad_vals[1] // 2)),\n mode='constant',\n constant_values=0)\n # Get annulus by subtracting inner from outer\n kernel = kernel_outer - pad_kernel\n return kernel\n\n\ndef custom_kernel(kernel):\n \"\"\"\n Validates a custom kernel. If the kernel is valid, returns itself.\n \"\"\"\n\n if not isinstance(kernel, np.ndarray):\n raise ValueError(\n \"Received a custom kernel that is not a Numpy array.\",\n \"The kernel received was of type {} and needs to be \"\n \"of type `ndarray`\".format(type(kernel))\n )\n else:\n rows, cols = kernel.shape\n\n if (rows % 2 == 0 or cols % 2 == 0):\n raise ValueError(\n \"Received custom kernel with improper dimensions.\",\n \"A custom kernel needs to have an odd shape, the supplied kernel \"\n \"has {} rows and {} columns.\".format(rows, cols)\n )\n return kernel\n\n\n@jit(nopython=True, nogil=True, parallel=True)\ndef _convolve_2d_numpy(data, kernel):\n \"\"\"\n Apply kernel to data image.\n \"\"\"\n # TODO: handle nan\n\n nx = data.shape[0]\n ny = data.shape[1]\n nkx = kernel.shape[0]\n nky = kernel.shape[1]\n wkx = nkx // 2\n wky = nky // 2\n\n out = np.zeros(data.shape, dtype=float32)\n out[:, :] = np.nan\n for i in prange(wkx, nx-wkx):\n iimin = max(i - wkx, 0)\n iimax = min(i + wkx + 1, nx)\n for j in prange(wky, ny-wky):\n jjmin = max(j - wky, 0)\n jjmax = min(j + wky + 1, ny)\n num = 0.0\n for ii in range(iimin, iimax, 1):\n iii = wkx + ii - i\n for jj in range(jjmin, jjmax, 1):\n jjj = wky + jj - j\n num += kernel[iii, jjj] * data[ii, jj]\n out[i, j] = num\n\n return out\n\n\ndef _convolve_2d_dask_numpy(data, kernel):\n pad_h = kernel.shape[0] // 2\n pad_w = kernel.shape[1] // 2\n _func = partial(_convolve_2d_numpy, kernel=kernel)\n out = data.map_overlap(_func,\n depth=(pad_h, pad_w),\n boundary=np.nan,\n meta=np.array(()))\n return out\n\n\n# https://www.vincent-lunot.com/post/an-introduction-to-cuda-in-python-part-3/\n@cuda.jit\ndef _convolve_2d_cuda(data, kernel, out):\n # expect a 2D grid and 2D blocks,\n # a kernel with odd numbers of rows and columns, (-1-)\n # a grayscale image\n\n # (-2-) 2D coordinates of the current thread:\n i, j = cuda.grid(2)\n\n # To compute the out at coordinates (i, j), we need to use delta_rows rows\n # of the array before and after the i_th row, as well as delta_cols columns\n # of the array before and after the j_th column:\n delta_rows = kernel.shape[0] // 2\n delta_cols = kernel.shape[1] // 2\n\n data_rows, data_cols = data.shape\n # (-3-) if the thread coordinates are outside of the data image,\n # we ignore the thread\n # currently, if the thread coordinates are in the edges,\n # we ignore the thread\n if i < delta_rows or i >= data_rows - delta_rows or \\\n j < delta_cols or j >= data_cols - delta_cols:\n return\n\n # The out at coordinates (i, j) is equal to\n # sum_{k, h} kernel[k, h] * data[i - k + delta_rows, j - h + delta_cols]\n # with k and h going through the whole kernel array:\n s = 0\n for k in range(kernel.shape[0]):\n for h in range(kernel.shape[1]):\n i_k = i - k + delta_rows\n j_h = j - h + delta_cols\n # (-4-) Check if (i_k, j_h) coordinates are inside the array:\n if (i_k >= 0) and (i_k < data_rows) and \\\n (j_h >= 0) and (j_h < data_cols):\n s += kernel[k, h] * data[i_k, j_h]\n out[i, j] = s\n\n\ndef _convolve_2d_cupy(data, kernel):\n out = cupy.empty(data.shape, dtype='f4')\n out[:, :] = cupy.nan\n griddim, blockdim = cuda_args(data.shape)\n _convolve_2d_cuda[griddim, blockdim](data, kernel, cupy.asarray(out))\n return out\n\n\ndef _convolve_2d_dask_cupy(data, kernel):\n msg = 'Upstream bug in dask prevents cupy backed arrays'\n raise NotImplementedError(msg)\n\n\ndef convolve_2d(data, kernel):\n \"\"\"\n Calculates, for all inner cells of an array, the 2D convolution of\n each cell via Numba. To account for edge cells, a pad can be added\n to the image array. Convolution is frequently used for image\n processing, such as smoothing, sharpening, and edge detection of\n images by eliminating spurious data or enhancing features in the\n data.\n\n Parameters\n ----------\n image : xarray.DataArray\n 2D array of values to processed and padded.\n kernel : array-like object\n Impulse kernel, determines area to apply impulse function for\n each cell.\n pad : bool, default=True\n To compute edges set to True.\n use-cuda : bool, default=True\n For parallel computing set to True.\n\n Returns\n -------\n convolve_agg : numpy.ndarray\n 2D array representation of the impulse function.\n\n Examples\n --------\n .. plot::\n :include-source:\n\n import numpy as np\n import xarray as xr\n from xrspatial import focal\n from xrspatial.convolution import convolve_2d\n\n # Create Data Array\n agg = xr.DataArray(np.array([[0, 0, 0, 0, 0, 0, 0],\n [0, 0, 2, 4, 0, 8, 0],\n [0, 2, 2, 4, 6, 8, 0],\n [0, 4, 4, 4, 6, 8, 0],\n [0, 6, 6, 6, 6, 8, 0],\n [0, 8, 8, 8, 8, 8, 0],\n [0, 0, 0, 0, 0, 0, 0]]),\n dims = [\"lat\", \"lon\"],\n attrs = dict(res = 1))\n height, width = agg.shape\n _lon = np.linspace(0, width - 1, width)\n _lat = np.linspace(0, height - 1, height)\n agg[\"lon\"] = _lon\n agg[\"lat\"] = _lat\n\n # Create Kernel\n kernel = focal.circle_kernel(1, 1, 1)\n\n # Create Convolution Data Array\n convolve_agg = convolve_2d(image = agg, kernel = kernel)\n\n .. sourcecode:: python\n\n >>> print(convolve_agg)\n [[ 0. 0. 4. 8. 0. 16. 0.]\n [ 0. 4. 8. 10. 18. 16. 16.]\n [ 4. 8. 14. 20. 24. 30. 16.]\n [ 8. 16. 20. 24. 30. 30. 16.]\n [12. 24. 30. 30. 34. 30. 16.]\n [16. 22. 30. 30. 30. 24. 16.]\n [ 0. 16. 16. 16. 16. 16. 0.]]\n \"\"\"\n # numpy case\n if isinstance(data, np.ndarray):\n out = _convolve_2d_numpy(data, kernel)\n\n # cupy case\n elif has_cuda() and isinstance(data, cupy.ndarray):\n out = _convolve_2d_cupy(data, kernel)\n\n # dask + cupy case\n elif has_cuda() and isinstance(data, da.Array) and \\\n type(data._meta).__module__.split('.')[0] == 'cupy':\n out = _convolve_2d_dask_cupy(data, kernel)\n\n # dask + numpy case\n elif isinstance(data, da.Array):\n out = _convolve_2d_dask_numpy(data, kernel)\n\n else:\n raise TypeError('Unsupported Array Type: {}'.format(type(data)))\n\n return out\n\n\ndef convolution_2d(agg, kernel):\n \"\"\"\n Calculates, for all inner cells of an array, the 2D convolution of\n each cell via Numba. To account for edge cells, a pad can be added\n to the image array. Convolution is frequently used for image\n processing, such as smoothing, sharpening, and edge detection of\n images by eliminating spurious data or enhancing features in the\n data.\n\n Parameters\n ----------\n agg : xarray.DataArray\n 2D array of values to processed and padded.\n kernel : array-like object\n Impulse kernel, determines area to apply impulse function for\n each cell.\n\n Returns\n -------\n convolve_agg : xarray.DataArray\n 2D array representation of the impulse function.\n \"\"\"\n\n # wrapper of convolve_2d\n out = convolve_2d(agg.data, kernel)\n\n return DataArray(out,\n coords=agg.coords,\n dims=agg.dims,\n attrs=agg.attrs)\n","sub_path":"xrspatial/convolution.py","file_name":"convolution.py","file_ext":"py","file_size_in_byte":16459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"77607326","text":"\n\n#calss header\nclass _DISCIPLE():\n\tdef __init__(self,): \n\t\tself.name = \"DISCIPLE\"\n\t\tself.definitions = [u'a person who believes in the ideas and principles of someone famous and tries to live the way that person does or did: ', u'the twelve men who followed Jesus during his life']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_disciple.py","file_name":"_disciple.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"623019596","text":"from django import forms\nfrom skills.models import KeySkill,TechSkill\nfrom betterforms.multiform import MultiModelForm\n\n\nclass KeySkillForm(forms.ModelForm):\n \n class Meta():\n model = KeySkill\n exclude = ['user','last_update_date']\n widgets = {'key_skill':forms.Textarea(attrs={'placeholder':\"eg. Project Management\",\n 'class':'form-control z-depth-1 description key-skill','rows':1\n })}\n def __init__(self,*args,**kwargs):\n super(KeySkillForm, self).__init__(*args, **kwargs)\n self.fields['key_skill'].label='Key Skill'\n\n\nclass TechSkillForm(forms.ModelForm):\n\n y = [ i for i in range(2019,0,-1) if i >= 1980 ]\n years = [(val,str(val)) for val in y ]\n\n last_used = forms.ChoiceField(\n widget=forms.Select(attrs={'class':'last-used'}),\n choices=years)\n\n class Meta():\n model=TechSkill\n exclude = ['user','last_update_date','rating']\n widgets = {'tech_skill':forms.TextInput(attrs={'placeholder':' eg. Java',\n 'class':'textinputclass1'\n }),\n 'tech_skill_description':forms.Textarea(attrs={'placeholder':'eg. Use only keywords to describe your skill',\n 'class':'form-control z-depth-1 description','rows':3\n }),\n 'version':forms.TextInput(attrs={'class':'textinputclass1'\n }),\n 'experience':forms.TextInput(attrs={'class':'textinputclass1'\n })\n }\n\n def __init__(self,*args,**kwargs):\n super(TechSkillForm, self).__init__(*args, **kwargs)\n self.fields['tech_skill'].label='Skill'\n self.fields['tech_skill_description'].label='Description'\n self.fields['version'].required=False\n self.fields['last_used'].required=False\n self.fields['experience'].required=False","sub_path":"Resume_Project/resume/skills/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"385871092","text":"'''\nAws_works.s3\n============\nIt's a python module with useful actions for those who works with amazon web services through boto3(aws high-level api).\n\nHere we focus in s3 bucket like an object. It means you set a bucket and execute some actions in it.\n============\n'''\n\nimport io\nimport json\nimport boto3\nimport numpy as np\nimport pandas as pd\n\n\nclient = boto3.client('s3')\n\ndef get_address(s3url):\n '''\n Input:\n path[string, list] = Expect an s3 ulr or a list of s3 urls;\n Return:\n bucket[string]\n path_file[string]\n Example:\n b, p = s3.get_address(\"s3://your-bucket/folder/file.format\")\n \n print(b)\n \"your-bucket\"\n print(p)\n \"folder/file.format\"\n '''\n if(s3url.startswith('s3://')):\n b = []\n p = []\n if(type(s3url)==str):\n s3url = [s3url]\n for url in s3url:\n tmp = url.split('/')[2]\n tmp2 = url.replace('s3://{}/'.format(tmp),'')\n b.append(tmp)\n p.append(tmp2)\n if((len(b)==1)&(len(p)==1)):\n return b[0], p[0]\n else:\n return b, p\n else:\n print('not a s3 url')\n\nclass s3_bucket:\n \n '''Here we attribute s3 a bucket to work'''\n Bucket = None\n \n def __init__(self, name):\n self.Bucket = name #public\n self.__alias = client #private\n \n def read_csv(self, path):\n '''\n Read from s3 a csv file\n -----------------------\n Input:\n path[string] = Expect the path to the csv file to be readed.\n -----------------------\n return:\n dataframe[pandas.dataframe]\n -----------------------\n Example:\n df = s3_bucket.read_csv(path='folder/file.csv')\n -----------------------\n '''\n try:\n obj = client.get_object(Bucket=self.Bucket, Key=path)['Body']\n df = pd.read_csv(io.BytesIO(obj.read()))\n return df\n except:\n raise\n \n def read_log(self, path, output_format='pandas'):\n '''\n Read from s3 a log file in json.\n -------------------------------\n Input:\n path[string] = Expect the path to the json file to be readed.\n output_format['pandas'|'json'] = Specify the format you desire as output, pandas is default.\n -------------------------------\n Return:\n Depending on output_format specified will return or a pandas.dataframe or a json.\n -------------------------------\n Example:\n logDF = s3_bucket.read_log(path='folder/file.json')\n -------------------------------\n '''\n obj = client.get_object(Bucket=self.Bucket,Key=path)['Body']\n jobj = json.loads(obj.read())\n if(output_format=='pandas'):\n logDf = pd.DataFrame(data=jobj['data'],columns=jobj['columns'],index=list(np.arange(0,len(jobj['data']))))\n return logDf\n if(output_format=='json'):\n return jobj\n else:\n print('output_format not specified correctly.')\n \n def write_csv(self, dataframe, path_name):\n '''\n Write a pandas dataframe into s3.\n ---------------------------------\n Input:\n dataframe[pandas.dataframe] = Expect a pandas dataframe to be written into s3.\n path_name[string] = Specify the path and name you desire to save your file.\n ---------------------------------\n Return:\n String - dataframe written into s3 bucket.\n ---------------------------------\n Example:\n s3_bucket.write_csv(dataframe, namefile='folder/my_dataframe')\n ---------------------------------\n '''\n namefile = namefile.replace('.csv','')\n obj = io.StringIO()\n dataframe.to_csv(obj, sep=',', index=False)\n client.put_object(Bucket=self.Bucket, Key=path_name+'.csv', Body=obj.getvalue())\n return 'dataframe written into s3.'\n \n def write_log(self, dictionary, path):\n '''\n This method writes logs into s3.\n --------------------------------\n Input:\n dictionary[dict] = Expect a dictionary structured as json files.\n Log description:\n To create a log file we highly recomed to create a json structure, such as:\n {\n 'columns' : [],\n 'data' : []\n }\n path[string] = Expect a string with the path to write the json.\n --------------------------------\n Return:\n String - 'log updated/created'\n --------------------------------\n Example:\n tmp = {\n 'columns':['A','B'],\n 'data':[[0.001, 0.002],[0.003, 0.004]]\n }\n s3_bucket.write_log(dictionary=tmp, path='sql_exec/logs/log.json')\n \n return:\n log updated\n --------------------------------\n '''\n try:\n obj = client.get_object(Bucket=self.Bucket, Key=path)['Body']\n jobj = json.loads(obj.read())\n jobj['data'] = jobj['data']+dictionary['data']\n client.put_object(Bucket=self.Bucket, Key=path, Body=json.dumps(jobj))\n return 'log updated'\n except:\n client.put_object(Bucket=self.Bucket, Key=path, Body=json.dumps(dictionary))\n return 'log created'","sub_path":"aws_works/s3.py","file_name":"s3.py","file_ext":"py","file_size_in_byte":5428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"32753610","text":"\"\"\"\nGiven an array nums of n integers where n > 1, return an array output such that output[i] is equal to the product of all the elements of nums except nums[i].\n\nExample:\n\nInput: [1,2,3,4]\nOutput: [24,12,8,6]\nConstraint: It's guaranteed that the product of the elements of any prefix or suffix of the array (including the whole array) fits in a 32 bit integer.\n\nNote: Please solve it without division and in O(n).\n\nFollow up:\nCould you solve it with constant space complexity? (The output array does not count as extra space for the purpose of space complexity analysis.)\n\nSolution : for O(N) complexity, use 2 lists to store products left of a number and right of a number. The product of the two is the ans\nGiven numbers [2, 3, 4, 5], regarding the third number 4, the product of array except 4 is 2*3*5 which consists of two parts: left 2*3 and right 5. The product is left*right. We can get lefts and rights:\n\nNumbers: 2 3 4 5\nLefts: 2 2*3 2*3*4\nRights: 3*4*5 4*5 5 \n\"\"\"\n\nfrom typing import List\n\nclass Solution:\n def productExceptSelf(self, nums: List[int]) -> List[int]:\n fwd = [1]*len(nums)\n bck = [1]*len(nums)\n for i in range(1,len(nums)):\n fwd[i]=fwd[i-1]*nums[i-1]\n bck[len(nums)-i-1]=bck[len(nums)-i]*nums[len(nums)-i]\n for i in range(len(nums)):\n fwd[i]=fwd[i] *bck[i]\n return fwd\n\ns = Solution()\nprint(s.productExceptSelf([1,2,3,4]))","sub_path":"Arrays_and_Strings/Product of Array Except Self.py","file_name":"Product of Array Except Self.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"644975743","text":"import numpy as np\nimport scipy.io\nimport matplotlib.pyplot as plt\n\n\ndef pca(y, d):\n mean = np.zeros((y.shape[0], 1))\n for i in range(y.shape[1]):\n mean += y[:, i]/y.size\n centred_y = np.zeros(y.shape)\n for i in range(y.shape[1]):\n temp = y[:, i] - mean\n centred_y[:, i] = temp.T\n u, s, v = np.linalg.svd(centred_y)\n print(u.shape)\n plt.plot(s)\n plt.show()\n x = np.zeros((d, y.shape[1]))\n for i in range(x.shape[1]):\n x[:, i] = np.matmul(u[:, :d].T, (centred_y[:, i]))\n return mean, u, x\n\n\ndef kpca(k, d):\n i = np.eye(k.shape[1])\n o = np.ones(k.shape[1])/k.shape[1]\n mul = i - o\n k_n = np.matmul(np.matmul(mul, k), mul)\n print(k_n.shape)\n w, v = np.linalg.eig(k_n)\n # print(v)\n for i in range(v.shape[1]):\n v[:, i] = v[:, i]/w[i]\n temp = v[:, :d].T\n x = np.matmul(temp, k)\n return x\n\n\ndef kernel(y, sig):\n k = np.zeros((y.shape[1], y.shape[1]))\n for i in range(y.shape[1]):\n for j in range(y.shape[1]):\n temp = np.linalg.norm(y[:, i] - y[:, j], ord=2)**2/(2*(sig**2))\n k[i, j] = np.exp(-temp)\n return k\n\n\ndata = scipy.io.loadmat('dataset1.mat')\nY = np.asmatrix(data['Y'])\nker = kernel(Y, 0.6)\nx = kpca(ker, 2)\nplt.scatter(x[0, :], x[1, :], c='blue')\nplt.show()\n","sub_path":"algo.py","file_name":"algo.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"474643030","text":"import flask\nimport json\nimport flask_cors\nimport os\nimport subprocess\nimport requests\nimport shutil\nimport pandas as pd\n\nhostIp = \"127.0.0.1\"\n\n\napp = flask.Flask(__name__)\nflask_cors.CORS(app)\n\n@app.route(\"/\")\ndef baseRoute():\n return flask.Response(\n status=403\n )\n\ndef fetchNodeDependencies(nodesData, linksData, key):\n nodeDependencies = []\n for link in linksData:\n if link[\"to\"] == key:\n if link[\"from\"] == 1 or nodesData[str(link[\"from\"])][\"text\"] == \"StartJob\":\n nodeDependencies.append('StartJob')\n else:\n if nodesData[str(link[\"from\"])]['category'] == \"Merge\":\n nodeDependencies.append(nodesData[str(link[\"from\"])]['text'])\n elif nodesData[str(link[\"from\"])]['category'] == \"spark\" or nodesData[str(link[\"from\"])]['category'] == \"python\" or nodesData[str(link[\"from\"])]['category'] == \"shell\":\n nodeDependencies.append(nodesData[str(link[\"from\"])]['text'] + \"_\" + nodesData[str(link[\"from\"])]['category'])\n else:\n nodeDependencies.append(nodesData[str(link[\"from\"])]['text'] + \"_\" + nodesData[str(link[\"from\"])]['type'])\n return nodeDependencies\n\ndef checkForDuplicateNodes(nodesData):\n nodesNames = []\n nodesCountDict = {}\n dupNodesTextList = []\n for node in nodesData:\n nodesNames.append(\n (node['text'] + node['type']).upper()\n )\n if node['text'] in list(nodesCountDict.keys()):\n nodesCountDict[node['text']] += 1\n else:\n nodesCountDict[node['text']] = 1\n for key, value in nodesCountDict.items():\n if value > 1:\n dupNodesTextList.append(key)\n if len(set(nodesNames)) == len(nodesNames):\n return False, \"\"\n else:\n return True, \", \".join(dupNodesTextList)\n\n@app.route(\"/submitjob\", methods=[\"POST\"])\ndef submit():\n if flask.request.content_type == \"application/json\":\n # try:\n payload = json.loads(flask.request.data.decode(\"ascii\"))\n clientName = payload[\"clientName\"]\n projectName = payload[\"projectName\"]\n workflowName = payload[\"workflowName\"]\n renameEndJob = payload[\"renameEndJob\"]\n graphData = payload[\"graphData\"]\n cwd = os.getcwd()\n try:\n shutil.rmtree(os.getcwd() + \"\\\\\" + workflowName)\n os.remove(os.getcwd() + \"\\\\\" + workflowName + \".zip\")\n except Exception as exp:\n pass\n newPath = os.path.join(cwd, workflowName)\n if not os.path.exists(newPath):\n os.makedirs(newPath)\n os.chdir(newPath)\n else:\n os.chdir(newPath)\n revoFileNameMapping = {\n \"filecheck\": \"FileCheckExecutor.py\",\n \"datatransfer\": \"OozieDataTransferExecutor.py\",\n \"landing\": \"OozieLandingExecutor.py\",\n \"dqm\": \"OozieDQMExecutor.py\",\n \"bre\": \"OozieBREWorkflowExecutor.py\",\n \"export\": \"OozieExportExecutor.py\"\n }\n nodesData = {}\n nodesData2 = graphData[\"nodeDataArray\"]\n nodesAreDuplicate, nodesDuplicateList = checkForDuplicateNodes(nodesData2)\n if nodesAreDuplicate is False:\n for node in nodesData2:\n nodesData[str(node['key'])] = node\n print(nodesData)\n linksData = graphData[\"linkDataArray\"]\n for node in nodesData2:\n key = node[\"key\"]\n if node['text'] == \"StartJob\" and node['category'] == \"Start\" and node['type'] == \"noop\":\n with open(\"StartJob.job\",\"w+\") as fp:\n command = \"#StartJob.Job\\ntype=noop\"\n fp.write(command)\n print(command)\n elif node['text'] == \"EndJob\" and node['category'] == \"End\" and node[\"type\"] == \"noop\":\n jobDependencies = fetchNodeDependencies(nodesData,linksData,key)\n with open(\"EndJob.job\",\"w+\") as fp:\n jobName = node['text']\n fileName = jobName + \".job\"\n command = \"#{}\\ntype=noop\\ndependencies={}\".format(\n fileName,\n \",\".join(jobDependencies)\n )\n fp.write(command)\n print(command)\n elif node['category'] == \"Merge\" and node[\"type\"] == \"noop\":\n jobDependencies = fetchNodeDependencies(nodesData,linksData,key)\n with open(node[\"text\"] + \".job\",\"w+\") as fp:\n jobName = node['text']\n fileName = jobName + \".job\"\n command = \"#{}\\ntype=noop\\ndependencies={}\".format(\n fileName,\n \",\".join(jobDependencies)\n )\n fp.write(command)\n print(command)\n else:\n jobDependencies = fetchNodeDependencies(nodesData,linksData,key)\n jobType = node['type']\n jobCategory = node['category']\n fileName = \"\"\n if jobCategory == \"revo\":\n if jobType in list(revoFileNameMapping.keys()):\n jobName = node['text'] + \"_\" + node['type']\n fileName = jobName + \".job\"\n command = \"#{}\\ntype=command\\ncommand=python {} {} {} {}\\ndependencies={}\".format(\n fileName,\n revoFileNameMapping[jobType],\n clientName,\n projectName,\n node['text'].upper(),\n \",\".join(jobDependencies)\n )\n elif jobType == \"noop\":\n jobName = node['text']\n fileName = jobName + \".job\"\n command = \"#{}\\ntype=noop\\ndependencies={}\".format(\n fileName,\n \",\".join(jobDependencies)\n )\n else:\n print(jobType)\n raise Exception(\"job type not supported\")\n elif jobCategory == \"shell\":\n jobName = node['text'] + \"_\" + jobCategory\n fileName = jobName + \".job\"\n command = \"#{}\\ntype=command\\ncommand=sh {}\\ndependencies={}\".format(\n fileName,\n jobType,\n \",\".join(jobDependencies)\n )\n elif jobCategory == \"python\":\n jobName = node['text'] + \"_\" + jobCategory\n fileName = jobName + \".job\"\n command = \"#{}\\ntype=command\\ncommand=python {}\\ndependencies={}\".format(\n fileName,\n jobType,\n \",\".join(jobDependencies)\n )\n elif jobCategory == \"spark\":\n jobName = node['text'] + \"_\" + jobCategory\n fileName = jobName + \".job\"\n command = \"#{}\\ntype=command\\ncommand=spark-submit {}\\ndependencies={}\".format(\n fileName,\n jobType,\n \",\".join(jobDependencies)\n )\n else:\n print(jobCategory)\n raise Exception(\"job category not supported: \" + jobCategory)\n with open(fileName,\"w+\") as fp:\n fp.write(command)\n try:\n if renameEndJob == \"true\":\n endjobSrc = newPath + \"\\\\EndJob.job\"\n endjobDest = newPath + \"\\\\\" + workflowName + \".job\"\n os.rename(endjobSrc, endjobDest)\n except Exception as exp:\n print(\"Warning: Renaming EndJob.job failed\")\n job_properties = \"working.dir=/home/hadoop/CODE/BackEnd/\"\n with open(\"job.properties\",\"w+\") as fp:\n fp.write(job_properties)\n os.chdir(cwd)\n subprocess.check_output(['zip','-r', workflowName + '.zip', workflowName])\n responseData = {\n \"status\": \"success\",\n \"message\": \"Zip file created\"\n }\n return flask.Response(\n response=json.dumps(responseData),\n status=200,\n mimetype='application/json'\n )\n else:\n responseData = {\n \"status\": \"success\",\n \"message\": \"Duplicate Nodes are present: \" + nodesDuplicateList\n }\n return flask.Response(\n response=json.dumps(responseData),\n status=200,\n mimetype='application/json'\n )\n # except Exception as exp:\n # data = {\n # \"status\": \"error\",\n # \"message\": str(exp)\n # }\n # return flask.Response(\n # response=json.dumps(data),\n # status=200,\n # mimetype='application/json'\n # )\n else:\n data = {\n \"status\": \"error\",\n \"message\": \"Only application/json accepted as payload\"\n }\n return flask.Response(\n response=json.dumps(data),\n status=200,\n mimetype='application/json'\n )\n\n\n@app.route(\"/mergeprojects\", methods=[\"POST\"])\ndef mergeProjects():\n if flask.request.content_type == \"application/json\":\n try:\n payload = json.loads(flask.request.data.decode(\"ascii\"))\n mergedProjectName = payload[\"mergedProjectName\"].strip()\n sourceProjectsList = payload[\"sourceProjectsList\"].split(\",\")\n sourceProjectsList = list(map(lambda x: x.strip(), sourceProjectsList))\n cwd = os.getcwd()\n print(mergedProjectName)\n print(sourceProjectsList)\n # Remove old dir if already present\n try:\n shutil.rmtree(cwd + \"\\\\\" + mergedProjectName)\n shutil.rmtree(cwd + \"\\\\\" + \"temp_data\")\n except Exception as exp:\n print(\"Error at line 247\" + str(exp))\n os.makedirs(cwd + \"\\\\\" + mergedProjectName)\n for project in sourceProjectsList:\n src = cwd + \"\\\\\" + project\n tgt = cwd + \"\\\\\" + \"temp_data\"\n shutil.copytree(src, tgt)\n if os.path.exists(cwd + \"\\\\\" + \"temp_data\" + \"\\\\\" + \"EndJob.job\"):\n os.rename(\n cwd + \"\\\\\" + \"temp_data\" + \"\\\\\" + \"EndJob.job\",\n cwd + \"\\\\\" + \"temp_data\" + \"\\\\\" + project + \".job\"\n )\n jobsList = os.listdir(tgt)\n for job in jobsList:\n shutil.copy(tgt + \"\\\\\" + job, cwd + \"\\\\\" + mergedProjectName + \"\\\\\" + job)\n shutil.rmtree(tgt)\n subprocess.check_output(['zip','-r', mergedProjectName + '.zip', mergedProjectName])\n data = {\n \"status\": \"success\",\n \"message\": \"Projects merged\"\n }\n return flask.Response(\n response=json.dumps(data),\n status=200,\n mimetype='application/json'\n )\n except Exception as exp:\n data = {\n \"status\": \"error\",\n \"message\": str(exp)\n }\n return flask.Response(\n response=json.dumps(data),\n status=200,\n mimetype='application/json'\n )\n else:\n data = {\n \"status\": \"error\",\n \"message\": \"Only application/json accepted as payload\"\n }\n return flask.Response(\n response=json.dumps(data),\n status=200,\n mimetype='application/json'\n )\n\n\n\n@app.route(\"/downloadcsv\", methods=[\"POST\"])\ndef downloadCsv():\n if flask.request.content_type == \"application/json\":\n try:\n payload = json.loads(flask.request.data.decode(\"ascii\"))\n graphData = payload[\"graphData\"]\n workflowname = payload[\"workflowname\"]\n nodesData = graphData[\"nodeDataArray\"]\n jobsListDf = pd.DataFrame(columns = ['job_name','job_type'])\n for job in nodesData:\n tempDf = pd.DataFrame(columns = ['job_name','job_type'])\n tempDf[\"job_name\"] = [job['text']]\n tempDf[\"job_type\"] = [job['type']]\n jobsListDf = jobsListDf.append(tempDf,sort = False).reset_index().drop(['index'], axis = 1)\n jobsListDf.to_csv(\n workflowname + \".csv\",\n index = False\n ) \n data = {\n \"status\": \"success\",\n \"message\": \"CSV downloaded\"\n }\n return flask.Response(\n response=json.dumps(data),\n status=200,\n mimetype='application/json'\n )\n except Exception as exp:\n data = {\n \"status\": \"error\",\n \"message\": str(exp)\n }\n return flask.Response(\n response=json.dumps(data),\n status=200,\n mimetype='application/json'\n )\n else:\n data = {\n \"status\": \"error\",\n \"message\": \"Only application/json accepted as payload\"\n }\n return flask.Response(\n response=json.dumps(data),\n status=200,\n mimetype='application/json'\n )\n\n\nif __name__ == \"__main__\":\n app.run(\n debug=True,\n host=\"0.0.0.0\"\n )\n","sub_path":"awd-v4-flask/old-app.py","file_name":"old-app.py","file_ext":"py","file_size_in_byte":14107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"351616925","text":"import datetime\nimport json\n\nfrom dateutil.tz import tzoffset\nfrom src.libs.django_utils.serialization.flexible_json_serializer import JSONSerializer\nfrom src.libs.django_utils.tests import FakeTestClass\n\n\ndef test_serializer_serializes_dict_with_model():\n serializer = JSONSerializer()\n test_class = FakeTestClass(name='Some Name', id=1, url='http://www.test.com', trusted_geo_data=False)\n dict_data = {'attrs': {'city': 'Brooklyn', 'posted_date': datetime.datetime(2013, 8, 29, 12, 55,\n tzinfo=tzoffset('EDT', -14400)),\n 'description': 'Beautiful 3 Bedroom 2 Full bath\\n\\nAmazing Finishes\\n\\nHuge '\n 'Backyard\\n\\n100% no fee By owner\\n\\nAll Bedrooms can fit King and Queen '\n 'sized beds\\n\\nSteps to the G train\\n\\nLaundry in the Building\\n\\nClose to '\n 'All your needs\\n\\nNo brokers Please\\n\\nCall or Text Danny @ 646 338 '\n '3852\\n\\n3526+56+5',\n 'title': '$2695 / 3br - 3 Bedroom 2 Full bath + Massive Backyard~Prime Location (bedstuy / '\n 'clinton hill)',\n 'url': 'http://newyork.craigslist.org/brk/abo/4033538277.html', 'broker_fee': False,\n 'price': 2695.0, 'state': 'NY', 'contact_phone_number': '(646) 338-3852',\n 'address': 'Nostrand Avenue & Vernon Avenue', 'lat': 40.6942608, 'bedroom_count': 3,\n 'lng': -73.9523367,\n 'formatted_address': 'Nostrand Avenue & Vernon Avenue, Brooklyn, NY 11205, USA',\n 'contact_name': 'bedstuy / clinton hill', 'listing_source': test_class, 'zip_code': '11205'}}\n\n serialized_data = serializer.serialize(dict_data)\n deserialized_data = json.loads(serialized_data)\n x = deserialized_data\n\ndef test_serializer_serializes_model_correctly():\n serializer = JSONSerializer()\n test_class = FakeTestClass(name='Some Name', id=1, url='http://www.test.com', trusted_geo_data=False)\n dict_data = {'test_model': test_class}\n\n serialized_data = serializer.serialize(dict_data)\n deserialized_data = json.loads(serialized_data)\n assert deserialized_data[\"test_model\"][\"model\"] == 'django_utils.faketestclass'\n","sub_path":"src/libs/django_utils/tests/unit/test_json_serializer.py","file_name":"test_json_serializer.py","file_ext":"py","file_size_in_byte":2408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"212523210","text":"# -*- coding: UTF-8 -*-\n# Copyright 2012-2018 Rumma & Ko Ltd\n# License: BSD (see file COPYING for details)\n\n\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\n\nfrom decimal import Decimal\n\nfrom django.conf import settings\nfrom django.db import models\n# from django.core.exceptions import ValidationError\n\nfrom lino.utils import SumCollector\n# from lino.utils.dates import AMONTH, ADAY\nfrom lino.api import dd, rt, _\n\nfrom lino_xl.lib.excerpts.mixins import Certifiable\nfrom lino_xl.lib.ledger.utils import myround\nfrom lino_xl.lib.accounts.choicelists import CommonAccounts\nfrom lino_xl.lib.ledger.mixins import ProjectRelated, VoucherItem\nfrom lino_xl.lib.ledger.mixins import PeriodRange\nfrom lino_xl.lib.ledger.models import Voucher\nfrom lino_xl.lib.sepa.mixins import Payable\n\nfrom .utils import ZERO, ONE\nfrom .choicelists import VatClasses, VatRegimes, VatAreas, VatRules\n\nDECLARED_IN = False\n\nclass PartnerDetailMixin(dd.DetailLayout):\n \"\"\"\n Defines a panel :attr:`ledger`, to be added as a tab panel to your\n layout's `main` element.\n\n .. attribute:: ledger\n\n Shows the tables `VouchersByPartner` and `MovementsByPartner`.\n \"\"\"\n if dd.is_installed('ledger'):\n ledger = dd.Panel(\"\"\"\n payment_term purchase_account\n vat.VouchersByPartner\n ledger.MovementsByPartner\n \"\"\", label=dd.plugins.ledger.verbose_name)\n else:\n ledger = dd.DummyPanel()\n\n\ndef get_default_vat_regime():\n return dd.plugins.vat.default_vat_regime\n\n\ndef get_default_vat_class():\n return dd.plugins.vat.default_vat_class\n\n\nclass VatTotal(dd.Model):\n # abstract base class for both voucher and item\n class Meta:\n abstract = True\n\n # price = dd.PriceField(_(\"Total\"),blank=True,null=True)\n total_incl = dd.PriceField(_(\"Total incl. VAT\"), blank=True, null=True)\n total_base = dd.PriceField(_(\"Total excl. VAT\"), blank=True, null=True)\n total_vat = dd.PriceField(_(\"VAT\"), blank=True, null=True)\n\n _total_fields = set('total_vat total_base total_incl'.split())\n # For internal use. This is the list of field names to disable\n # when `edit_totals` is False.\n\n edit_totals = True\n\n # def get_trade_type(self):\n # raise NotImplementedError()\n\n def disabled_fields(self, ar):\n fields = super(VatTotal, self).disabled_fields(ar)\n if self.edit_totals:\n rule = self.get_vat_rule(self.get_trade_type())\n if rule is None:\n fields.add('total_vat')\n fields.add('total_base')\n elif not rule.can_edit:\n fields.add('total_vat')\n else:\n fields |= self._total_fields\n return fields\n\n def reset_totals(self, ar):\n pass\n\n def get_vat_rule(self, tt):\n return None\n\n def total_base_changed(self, ar):\n # dd.logger.info(\"20150128 total_base_changed %r\", self.total_base)\n if self.total_base is None:\n self.reset_totals(ar)\n if self.total_base is None:\n return\n\n rule = self.get_vat_rule(self.get_trade_type())\n # dd.logger.info(\"20180813 %r\", rule)\n if rule is None:\n self.total_incl = None\n self.total_vat = None\n else:\n self.total_incl = myround(self.total_base * (ONE + rule.rate))\n self.total_vat = self.total_incl - self.total_base\n\n def total_vat_changed(self, ar):\n if self.total_vat is None:\n self.reset_totals(ar)\n if self.total_vat is None:\n return\n\n if self.total_base is None:\n self.total_base = ZERO\n self.total_incl = self.total_vat + self.total_base\n\n def total_incl_changed(self, ar):\n if self.total_incl is None:\n self.reset_totals(ar)\n if self.total_incl is None:\n return\n # assert not isinstance(self.total_incl,basestring)\n rule = self.get_vat_rule(self.get_trade_type())\n if rule is None:\n self.total_base = None\n self.total_vat = None\n else:\n self.total_base = myround(self.total_incl / (ONE + rule.rate))\n self.total_vat = myround(self.total_incl - self.total_base)\n \n\nclass ComputeSums(dd.Action):\n help_text = _(\"Compute sums\")\n button_text = \"Σ\"\n custom_handler = True\n readonly = False\n \n def get_action_permission(self, ar, obj, st):\n # if ar.data_iterator is None:\n # return False\n if not super(ComputeSums, self).get_action_permission(ar, obj, st):\n return False\n return True\n\n def run_from_ui(self, ar, **kw):\n obj = ar.selected_rows[0]\n obj.compute_totals()\n obj.full_clean()\n obj.save()\n ar.success(refresh=True)\n \n \nclass VatDocument(ProjectRelated, VatTotal):\n\n # refresh_after_item_edit = False\n\n class Meta:\n abstract = True\n\n vat_regime = VatRegimes.field()\n items_edited = models.BooleanField(default=False)\n compute_sums = ComputeSums()\n\n @classmethod\n def get_registrable_fields(cls, site):\n for f in super(VatDocument, cls).get_registrable_fields(site):\n yield f\n yield 'vat_regime'\n\n def compute_totals(self):\n if self.pk is None or not self.state.editable:\n return\n base = Decimal()\n vat = Decimal()\n for i in self.items.all():\n if i.total_base is not None:\n base += i.total_base\n if i.total_vat is not None:\n vat += i.total_vat\n self.total_base = myround(base)\n self.total_vat = myround(vat)\n self.total_incl = myround(vat + base)\n\n def get_payable_sums_dict(self):\n # implements sepa.mixins.Payable\n sums = SumCollector()\n tt = self.get_trade_type()\n # vat_account = tt.get_vat_account()\n # if vat_account is None:\n # raise Exception(\"No VAT account for %s.\" % tt)\n for i in self.items.order_by('seqno'):\n rule = i.get_vat_rule(tt)\n b = i.get_base_account(tt)\n ana_account = i.get_ana_account()\n if i.total_base:\n if b is None:\n msg = \"No base account for {0} (tt {1}, total_base {2})\"\n raise Warning(msg.format(i, tt, i.total_base))\n sums.collect(\n ((b, ana_account), self.project, i.vat_class, self.vat_regime),\n i.total_base)\n if i.total_vat and rule is not None:\n if not rule.vat_account:\n msg = _(\"This rule ({}) does not allow any VAT.\")\n raise Warning(msg.format(rule))\n \n vat_amount = i.total_vat\n if rule.vat_returnable:\n if rule.vat_returnable_account is None:\n acc_tuple = (b, ana_account)\n else:\n acc_tuple = (\n rule.vat_returnable_account.get_object(), None)\n sums.collect(\n (acc_tuple, self.project,\n i.vat_class, self.vat_regime),\n vat_amount)\n vat_amount = - vat_amount\n sums.collect(\n ((rule.vat_account.get_object(), None), self.project,\n i.vat_class, self.vat_regime),\n vat_amount)\n return sums\n\n def fill_defaults(self):\n super(VatDocument, self).fill_defaults()\n if not self.vat_regime:\n if self.partner_id:\n self.vat_regime = self.partner.vat_regime\n if not self.vat_regime:\n self.vat_regime = get_default_vat_regime()\n\n def update_item(self):\n if self.pk is None or not self.state.editable:\n return\n if self.items_edited or not self.edit_totals:\n return\n tt = self.journal.trade_type\n account = tt.get_partner_invoice_account(self.partner)\n if account is None:\n account = CommonAccounts.waiting.get_object()\n if account is None:\n raise Warning(\n _(\"{} is not configured\").format(\n CommonAccounts.waiting))\n kw = dict()\n if dd.is_installed('ana') and account.needs_ana:\n kw['ana_account'] = account.ana_account\n kw['account'] = account\n kw['total_incl'] = self.total_incl\n qs = self.items.all()\n if qs.count():\n item = qs[0]\n for k, v in kw.items():\n setattr(item, k, v)\n else:\n item = self.add_voucher_item(seqno=1, **kw)\n item.total_incl_changed(None)\n item.full_clean()\n item.save()\n\n \n def partner_changed(self, ar=None):\n self.vat_regime = None\n self.fill_defaults()\n # self.update_item() # called by after_ui_save()\n \n def after_ui_save(self, ar, cw):\n self.update_item()\n return super(VatDocument, self).after_ui_save(ar, cw)\n \n \n def full_clean(self, *args, **kw):\n super(VatDocument, self).full_clean(*args, **kw)\n if not self.edit_totals:\n self.compute_totals()\n\n def before_state_change(self, ar, old, new):\n if new.name == 'registered':\n self.compute_totals()\n elif new.name == 'draft':\n if not self.edit_totals:\n self.total_base = None\n self.total_vat = None\n self.total_incl = None\n super(VatDocument, self).before_state_change(ar, old, new)\n\n\nclass VatItemBase(VoucherItem, VatTotal):\n\n class Meta:\n abstract = True\n\n vat_class = VatClasses.field(blank=True, default=get_default_vat_class)\n\n def delete(self, **kw):\n super(VatItemBase, self).delete(**kw)\n v = self.voucher\n if v.edit_totals and v.items_edited:\n if not v.items.exists():\n v.items_edited = False\n v.save()\n\n def get_trade_type(self):\n return self.voucher.get_trade_type()\n\n def get_vat_class(self, tt):\n return dd.plugins.vat.get_vat_class(tt, self)\n\n def vat_class_changed(self, ar):\n # dd.logger.info(\"20121204 vat_class_changed\")\n if self.voucher.vat_regime.item_vat:\n self.total_incl_changed(ar)\n else:\n self.total_base_changed(ar)\n\n def get_base_account(self, tt):\n raise NotImplementedError\n\n def get_vat_rule(self, tt):\n if self.vat_class is None:\n self.vat_class = self.get_vat_class(tt)\n # we store it because there might come more calls, but we\n # don't save it because here's not the place to decide\n # this.\n \n # country = self.voucher.partner.country or \\\n # dd.plugins.countries.get_my_country()\n vat_area = VatAreas.get_for_country(\n self.voucher.partner.country)\n return VatRules.get_vat_rule(\n vat_area,\n trade_type=tt,\n vat_regime=self.voucher.vat_regime,\n vat_class=self.vat_class, \n date=self.voucher.entry_date)\n\n # def save(self,*args,**kw):\n # super(VatItemBase,self).save(*args,**kw)\n # self.voucher.full_clean()\n # self.voucher.save()\n\n def set_amount(self, ar, amount):\n self.voucher.fill_defaults()\n if self.voucher.vat_regime.item_vat: # unit_price_includes_vat\n self.total_incl = myround(amount)\n self.total_incl_changed(ar)\n else:\n self.total_base = myround(amount)\n self.total_base_changed(ar)\n\n def reset_totals(self, ar):\n # if self.voucher.items_edited:\n if self.voucher.edit_totals:\n total = Decimal()\n for item in self.voucher.items.exclude(id=self.id):\n total += item.total_incl\n # if total != self.voucher.total_incl:\n self.total_incl = self.voucher.total_incl - total\n self.total_incl_changed(ar)\n\n super(VatItemBase, self).reset_totals(ar)\n\n def before_ui_save(self, ar):\n if self.total_incl is None:\n self.reset_totals(ar)\n super(VatItemBase, self).before_ui_save(ar)\n\n def after_ui_save(self, ar, cw):\n \"\"\"\n After editing a grid cell automatically show new invoice totals.\n \"\"\"\n kw = super(VatItemBase, self).after_ui_save(ar, cw)\n if self.voucher.edit_totals and not self.voucher.items_edited:\n self.voucher.items_edited = True\n self.voucher.save()\n # if self.voucher.refresh_after_item_edit:\n # ar.set_response(refresh_all=True)\n # self.voucher.compute_totals()\n # self.voucher.full_clean()\n # self.voucher.save()\n return kw\n\n\nclass QtyVatItemBase(VatItemBase):\n\n class Meta:\n abstract = True\n\n unit_price = dd.PriceField(_(\"Unit price\"), blank=True, null=True)\n qty = dd.QuantityField(_(\"Quantity\"), blank=True, null=True)\n\n def unit_price_changed(self, ar=None):\n self.reset_totals(ar)\n\n def qty_changed(self, ar=None):\n self.reset_totals(ar)\n\n def reset_totals(self, ar=None):\n super(QtyVatItemBase, self).reset_totals(ar)\n # if self.voucher.edit_totals:\n # if self.qty:\n # if self.voucher.item_vat:\n # self.unit_price = self.total_incl / self.qty\n # else:\n # self.unit_price = self.total_base / self.qty\n\n if self.unit_price is not None and self.qty is not None:\n self.set_amount(ar, myround(self.unit_price * self.qty))\n\n\nclass VatDeclaration(Payable, Voucher, Certifiable, PeriodRange):\n \"\"\"Abstract base class for VAT declarations.\n\n A **VAT declaration** is when a company declares to its government\n how much sales and purchases they've done during a given period.\n\n A VAT declaration is a computed summary of ledger movements in an\n **observed period**, but it is also itself a ledger voucher which\n generates new movements in its own period.\n\n :class:`lino_xl.lib.sepa.Payable`\n :class:`lino_xl.lib.ledger.Voucher`\n :class:`lino_xl.lib.excerpts.Certifiable`\n :class:`lino_xl.lib.ledger.PeriodRange`\n\n .. attribute:: accounting_period\n\n \"\"\"\n\n class Meta:\n abstract = True\n \n def get_match(self):\n return self.get_default_match() # no manual match field\n\n def full_clean(self, *args, **kw):\n if self.entry_date:\n AP = rt.models.ledger.AccountingPeriod\n # declare the previous month by default \n if not self.start_period_id:\n self.start_period = AP.get_default_for_date(\n self.entry_date)\n # self.start_period = AP.get_default_for_date(\n # self.entry_date - AMONTH)\n \n # if not self.start_date:\n # self.start_date = (self.voucher_date-AMONTH).replace(day=1)\n # if not self.end_date:\n # self.end_date = self.start_date + AMONTH - ADAY\n # if self.voucher_date <= self.end_date:\n # raise ValidationError(\n # \"Voucher date must be after the covered period\")\n # self.compute_fields()\n super(VatDeclaration, self).full_clean(*args, **kw)\n\n def register_voucher(self, *args, **kwargs):\n # self.compute_fields()\n if DECLARED_IN:\n count = 0\n for doc in rt.models.ledger.Voucher.objects.filter(\n # journal=jnl,\n # year=self.accounting_period.year,\n # entry_date__month=month,\n journal__must_declare=True,\n entry_date__gte=self.start_date,\n entry_date__lte=self.end_date,\n declared_in__isnull=True\n ):\n #~ logger.info(\"20121208 a can_declare %s\",doc)\n count += 1\n doc.declared_in = self\n doc.save()\n #~ declared_docs.append(doc)\n if False: # write match to declared movements\n flt = self.get_period_filter(\n 'voucher__accounting_period',\n match='',\n account__clearable=True,\n account__needs_partner=False,\n voucher__journal__must_declare=True)\n qs = rt.models.ledger.Movement.objects.filter(**flt)\n for mvt in qs:\n mvt.match = self.get_match()\n mvt.save()\n super(VatDeclaration, self).register_voucher(*args, **kwargs)\n \n\n def deregister_voucher(self, *args, **kwargs):\n if DECLARED_IN:\n for doc in rt.models.ledger.Voucher.objects.filter(\n declared_in=self):\n doc.declared_in = None\n doc.save()\n \n if False: # remove match from declared movements\n flt = self.get_period_filter(\n 'voucher__accounting_period',\n match=self.get_match(),\n account__clearable=True,\n account__needs_partner=False,\n voucher__journal__must_declare=True)\n qs = rt.models.ledger.Movement.objects.filter(**flt)\n for mvt in qs:\n mvt.match = ''\n mvt.save()\n \n # deregister\n super(VatDeclaration, self).deregister_voucher(*args, **kwargs)\n \n \n # def before_state_change(self, ar, old, new):\n # if new.name == 'register':\n # self.compute_fields()\n # elif new.name == 'draft':\n # super(Declaration, self).before_state_change(ar, old, new)\n\n #~ def register(self,ar):\n #~ self.compute_fields()\n #~ super(Declaration,self).register(ar)\n #~\n #~ def deregister(self,ar):\n #~ for doc in ledger.Voucher.objects.filter(declared_in=self):\n #~ doc.declared_in = None\n #~ doc.save()\n #~ super(Declaration,self).deregister(ar)\n\n \n def get_payable_sums_dict(self):\n \"\"\"\n Implements\n :meth:`lino_xl.lib.sepa.Payable.get_payable_sums_dict`.\n\n As a side effect this updates values in the computed fields of\n this declaration.\n\n \"\"\"\n fields = self.fields_list.get_list_items()\n payable_sums = SumCollector()\n sums = dict() # field sums\n for fld in fields:\n if fld.editable:\n sums[fld.name] = getattr(self, fld.name)\n else:\n sums[fld.name] = Decimal('0.00') # ZERO\n\n flt = self.get_period_filter(\n 'voucher__accounting_period',\n # voucher__journal=jnl,\n # voucher__year=self.accounting_period.year,\n voucher__journal__must_declare=True)\n # voucher__declared_in__isnull=True)\n\n\n qs = rt.models.ledger.Movement.objects.filter(**flt)\n qs = qs.order_by('voucher__journal', 'voucher__number')\n\n # print(20170713, qs)\n\n for mvt in qs:\n for fld in fields:\n fld.collect_from_movement(\n self, mvt, sums, payable_sums)\n # if fld.is_payable:\n # print(\"20170802 after {} {} : {}\".format(\n # fld, mvt.amount, payable_sums))\n \n for fld in fields:\n fld.collect_from_sums(self, sums, payable_sums)\n\n # dd.logger.info(\"20170713 value in 55 is %s\", sums['F55'])\n\n #~ print 20121209, item_models\n #~ for m in item_models:\n #~ for m in rt.models_by_base(VatDocument):\n #~ for item in m.objects.filter(voucher__declaration=self):\n #~ logger.info(\"20121208 b document %s\",doc)\n #~ self.collect_item(sums,item)\n\n for fld in fields:\n if not fld.editable:\n setattr(self, fld.name, sums[fld.name])\n\n # self.full_clean()\n # self.save()\n return payable_sums\n","sub_path":"lino_xl/lib/vat/mixins.py","file_name":"mixins.py","file_ext":"py","file_size_in_byte":20217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"128516442","text":"from django.shortcuts import render\nfrom django.shortcuts import get_object_or_404, render_to_response\nfrom django.template.loader import render_to_string\nfrom django.views.decorators.http import require_POST\nfrom django.http import HttpResponse\nfrom .models import Comment\nfrom apps.event.models import Event\nimport json\n\n@require_POST\ndef main(request, event_url):\n event = get_object_or_404(Event, url=event_url.lower())\n \n name = request.POST.get('name')\n comment = request.POST.get('comment')\n \n errors = {}\n if name == None or len(name) == 0:\n errors[\"name\"] = \"Name is required.\"\n elif len(name) > 32:\n errors[\"name\"] = \"Name is too long.\"\n \n if comment == None or len(comment) == 0:\n errors[\"comment\"] = \"Comment is required.\"\n elif len(comment) > 512:\n errors[\"comment\"] = \"Comment text is too large.\"\n \n \n if errors:\n return HttpResponse(json.dumps({\"errors\": errors}))\n else:\n Comment.new(event, name, comment)\n \n comments = Comment.objects.filter(event=event).order_by('timestamp').reverse()\n update = render_to_string('module/comment-list.html', {'comments': comments})\n \n return HttpResponse(json.dumps({\"update\": update}))\n","sub_path":"wab/apps/comment/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"39876290","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon May 11 09:15:47 2020\r\n\r\n@author: louis_000\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon May 11 07:46:46 2020\r\n\r\n@author: louis_000\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Apr 14 16:31:05 2020\r\n@author: Florane_000\r\n\"\"\"\r\n\r\n#tkinter est tres specifique donc je te mets\r\n#plein de commentaires pour que tu comprennes bien \r\n#ce que j'ai fait\r\n\r\n\r\n\r\nfrom tkinter import *\r\nimport random\r\nimport time\r\n\r\ndef CommandeChoixOui(FrameEtape1_2):\r\n #global PersonnagePropose\r\n FrameEtape1_2.destroy()\r\n Etape2_1(PersonnagePropose,var_quest,var_date,points)\r\n\r\n\r\ndef CommandeChoixNon(FrameEtape1_2, canvas1, ListePersonnagePossible, BoutonValider1_2_Non,PersonnagePropose):\r\n #global PersonnagePropose\r\n # Vérifier si il existe encore des personnages\r\n if len(ListePersonnagePossible) != 0:\r\n ChoisirAleatoirementPersonnage(ListePersonnagePossible)\r\n AfficherPersonnage(FrameEtape1_2, canvas1, PersonnagePropose,Photo_visible)\r\n else: \r\n Label(FrameEtape1_2, text=\"C'était le dernier choix !!!\", bg=\"pink\").place(x=0, y=180) \r\n BoutonValider1_2_Non['state']=DISABLED\r\n \r\n \r\ndef ChoisirAleatoirementPersonnage(ListePersonnagePossible):\r\n #global PersonnagePropose\r\n # On choisit au hasard un indice de personnage dans la liste de ceux qui répondent aux critères\r\n IndicePersonnagePropose = random.choice(ListePersonnagePossible)\r\n # On supprime 'indice sélectionné de la liste de ceux qui répondent aux critères\r\n ListePersonnagePossible.remove(IndicePersonnagePropose) \r\n # On récupère toutes les caractéristiques du personnage dans la variable PersonnagePropose \r\n global PersonnagePropose\r\n PersonnagePropose=ListePersonnage[IndicePersonnagePropose] \r\n \r\n \r\ndef AfficherPersonnage(FrameEtape1_2, canvas1, PersonnagePropose,Photo_visible):\r\n # Effacer la photo\r\n #global Photo_visible\r\n if Photo_visible!=0:\r\n canvas1.delete(Photo_visible) \r\n # Afficher la photo\r\n Photo_visible=canvas1.create_image(50,290,image=PersonnagePropose[0])\r\n canvas1.place(x=0, y=0) \r\n Label(FrameEtape1_2, text=\" \", bg=\"pink\").place(x=0, y=120)\r\n Label(FrameEtape1_2, text=PersonnagePropose[4], bg=\"pink\").place(x=0, y=120)\r\n Label(FrameEtape1_2, text=\" \", bg=\"pink\").place(x=0, y=150)\r\n Label(FrameEtape1_2, text=\"Voulez-vous commencer à discuter avec \"+ PersonnagePropose[5] +\" ?\", bg=\"pink\").place(x=0, y=150)\r\n\r\n \r\ndef Etape2_1(PersonnagePropose,var_quest,var_date,points): \r\n \r\n \r\n for c in root.winfo_children():\r\n c.pack_forget()\r\n \r\n score=Compteur(points, var_date,PersonnagePropose)\r\n \r\n Frame2= Frame(root, bg=\"pink\", cursor = \"heart\")\r\n #Frame(fenetre)\r\n Frame2.pack()\r\n canvas2 = Canvas(Frame2, width=200, height=200, background=\"#FFFFFF\")\r\n Photo_date=canvas2.create_image(30,290,image=PersonnagePropose[0])\r\n canvas2.pack()\r\n \r\n \r\n Frame_questions=Frame(root)\r\n Frame_questions.pack() \r\n \r\n\r\n #for i in range(0,1):# le rang est le nombre de quests limite\r\n if score<1:\r\n quest=Questions(var_quest,Frame_questions,Frame2)\r\n \r\n if score==1:\r\n Frame_RDV=Frame(root,bg=\"pink\", cursor = \"heart\")\r\n Frame_RDV.pack()\r\n RDV=Label(Frame_RDV, text= \"Vous êtes très proches. Concluez! ♥ \", bg=\"pink\")\r\n RDV.pack() \r\n Date=Button(Frame2, text=\"Décrocher une date\", width=10, bg='pink', fg='navy')\r\n Date.pack()\r\n \r\ndef Questions(var_quest,Frame_questions,Frame2) : \r\n Frame_questions= Frame(root, bg=\"pink\", cursor = \"heart\")\r\n \r\n Frame_questions.pack() \r\n \r\n var_quest=var_quest+1\r\n \r\n nouvelle_question=open(\"question\"+str(var_quest)+\".txt\",\"r\", encoding=\"utf8\")\r\n texte_question=nouvelle_question.readline()\r\n Label(Frame2, text=texte_question).pack(padx=10, pady=10)\r\n \r\n \r\n \r\n \r\n Reponse0=Radiobutton(Frame_questions, text=nouvelle_question.readline(), variable=var_date, value=0)\r\n Reponse1=Radiobutton(Frame_questions, text=nouvelle_question.readline(), variable=var_date, value=1) \r\n Reponse2=Radiobutton(Frame_questions, text=nouvelle_question.readline(), variable=var_date, value=2)\r\n Reponse3=Radiobutton(Frame_questions, text=nouvelle_question.readline(), variable=var_date, value=3)\r\n \r\n Reponse0.pack()\r\n Reponse1.pack()\r\n Reponse2.pack()\r\n Reponse3.pack()\r\n \r\n Valider3=Button(Frame_questions, text=\"Valider\", font=(\"Tahoma\", 12), bg=\"#BE2121\", fg =\"white\" , width=20, command=lambda: Etape2_1(PersonnagePropose,var_quest,var_date,points))\r\n Valider3.pack()\r\n \r\n \r\ndef Compteur(points, var_date,PersonnagePropose) :\r\n \r\n chance=var_date.get()\r\n humeur=2\r\n if PersonnagePropose[6][chance]==PersonnagePropose[6][humeur]:\r\n if 0.3 1:\n next_outputs = []\n config = self.context.new_values({\"width0\": width, \"width1\": width})\n for i in range(0, len(outputs), 2):\n self.__unique_concat_id += 1\n name = \"__magma_backend_concat{}\".format(self.__unique_concat_id)\n module_definition.add_generator_instance(name, concat_generator, config)\n module_definition.connect(\n module_definition.select(\"{}.in0\".format(name)),\n outputs[i])\n module_definition.connect(\n module_definition.select(\"{}.in1\".format(name)),\n outputs[i + 1])\n next_outputs.append(module_definition.select(\"{}.out\".format(name)))\n width *= 2\n outputs = next_outputs\n source = outputs[0]\n else:\n for p, v in zip(port, value):\n self.connect(module_definition, p, v, output_ports)\n return\n elif isinstance(value, ArrayType) and all(x in {VCC, GND} for x in value):\n source = self.get_constant_instance(value, len(value),\n module_definition)\n elif value is VCC or value is GND:\n source = self.get_constant_instance(value, None, module_definition)\n else:\n source = module_definition.select(output_ports[value])\n module_definition.connect(\n source,\n module_definition.select(magma_port_to_coreir(port)))\n\n\n __unique_constant_id = -1\n def get_constant_instance(self, constant, num_bits, module_definition):\n if module_definition not in self.__constant_cache:\n self.__constant_cache[module_definition] = {}\n if constant not in self.__constant_cache[module_definition]:\n self.__unique_constant_id += 1\n\n bit_type_to_constant_map = {\n GND: 0,\n VCC: 1\n }\n if constant in bit_type_to_constant_map:\n value = bit_type_to_constant_map[constant]\n elif isinstance(constant, ArrayType):\n value = seq2int([bit_type_to_constant_map[x] for x in constant])\n else:\n raise NotImplementedError(value)\n if num_bits is None:\n config = self.context.new_values({\"value\": bool(value)})\n name = \"bit_const_{}_{}\".format(constant, self.__unique_constant_id)\n corebit_const_module = self.libs['corebit'].modules[\"const\"]\n module_definition.add_module_instance(name, corebit_const_module, config)\n else:\n gen_args = self.context.new_values({\"width\": num_bits})\n config = self.context.new_values({\"value\": value})\n name = \"const_{}_{}\".format(constant, self.__unique_constant_id)\n instantiable = self.get_instantiable(\"const\", \"coreir\")\n module_definition.add_generator_instance(name, instantiable, gen_args, config)\n return module_definition.select(\"{}.out\".format(name))\n # self.__constant_cache[module_definition][constant] = module_definition.select(\"{}.out\".format(name))\n return self.__constant_cache[module_definition][constant]\n\n def compile(self, defn):\n modules = {}\n pass_ = InstanceGraphPass(defn)\n pass_.run()\n for key, _ in pass_.tsortedgraph:\n if key.is_definition:\n modules[key.name] = self.compile_definition(key)\n return modules\n\ndef compile(main, file_name):\n modules = CoreIRBackend().compile(main)\n modules[main.coreir_name].save_to_file(file_name)\n","sub_path":"magma/backend/coreir_.py","file_name":"coreir_.py","file_ext":"py","file_size_in_byte":11625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"503759908","text":"#!/usr/bin/python3\n\"\"\"Class State\n\"\"\"\nfrom sqlalchemy import Column, Integer, String\nfrom sqlalchemy.ext.declarative import declarative_base\n\n\nBase = declarative_base()\n\n\nclass State(Base):\n \"\"\"State class:\n inherits from Base Tips\n links to the MySQL table states\n\n class attributes:\n id: that represents a column of an auto\n generated, unique integer, can’t be null and is a primary key\n name: that represents a column of a string with maximum 128\n characters and can’t be null.\n \"\"\"\n __tablename__ = 'cities'\n id = Column(Integer, primary_key=True)\n name = Column(String(128), nullable=False)\n","sub_path":"0x0F-python-object_relational_mapping/model_state.py","file_name":"model_state.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"205820009","text":"#!/usr/bin/env python\n\n# Copyright 2016 The Python-Twitter Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# ------------------------------------------------------------------------\n# Change History\n# 2010-10-01\n# Initial commit by @jsteiner207\n#\n# 2014-12-29\n# PEP8 update by @radzhome\n#\n# 2016-05-07\n# Update for Python3 by @jeremylow\n#\n\nfrom __future__ import print_function\nimport twitter\nimport time\n\nCONSUMER_KEY = 'pvqN3SWhEe2jXgXX7YHMG0Wtr'\nCONSUMER_SECRET = 'CGemTMEn7PerXFzsn1jYUdg1wChXbGl56gnEUBaRtG82KApfb8'\nACCESS_TOKEN = '1482695930-17eM4E9B370mdfDTAr0TXqDRiHXYQDfdRHtnNjo'\nACCESS_TOKEN_SECRET = 'qU7NRInGledbiH3smoTZMkhHl8mCjyClf8YRKqcNeGPwO'\n\n\n# Create an Api instance.\napi = twitter.Api(consumer_key='pvqN3SWhEe2jXgXX7YHMG0Wtr',\n consumer_secret='CGemTMEn7PerXFzsn1jYUdg1wChXbGl56gnEUBaRtG82KApfb8',\n access_token_key='1482695930-17eM4E9B370mdfDTAr0TXqDRiHXYQDfdRHtnNjo',\n access_token_secret='qU7NRInGledbiH3smoTZMkhHl8mCjyClf8YRKqcNeGPwO')\n\nusers = api.GetFriendsPaged(screen_name='glenzenjin',cursor=-1,count=2)\nprint(users);\nprint([u.screen_name for u in users[2]])\n\napi.CreateFriendship( screen_name='maria_ana_cris')\napi.DestroyFriendship( screen_name='maria_ana_cris')\ntime.sleep(5)\n","sub_path":"examples/view_friends_nviogne.py","file_name":"view_friends_nviogne.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"589425866","text":"# -*- coding: utf-8 -*-\n\nimport json\nfrom app.myauth.config import getUserData\nimport django.middleware.csrf\n\n\ndef get(request):\n config = {}\n\n try:\n with open('app/home/fixtures/config.json') as f:\n content = f.read()\n f.close()\n except IOError:\n content = '[]'\n config = json.loads(content)\n\n config['host'] = request.get_host()\n config['hostName'] = request.get_host().decode('idna')\n config['csrf_token'] = django.middleware.csrf.get_token(request)\n\n userDataConfig = getUserData(request).copy()\n config.update(userDataConfig)\n\n return config\n","sub_path":"app/home/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"18033094","text":"#!/usr/bin/env python\n# coding: utf-8\n\n\nimport numpy as np\nimport metrics\n\nclass LSVMClassifier():\n def __init__(self, lamda=1):\n self._theta = None\n self._omiga = None\n self._b = 0\n self._lamda = 1\n\n def _J(self, theta, X_b, y_train, lamda):\n '''u = y_train * X_b.dot(self._theta)\n sum = 0\n for i in range(X_b.shape[0]):\n if u[i] < 1:\n sum += (1-u[i])\n cost = 0.5*self._lamda*np.sum(theta[1:]**2) + sum\n return cost'''\n sum = 0\n for i in range(X_b.shape[0]):\n u = y_train[i] * np.sum(X_b[i].dot(self._theta))\n if u<1:\n sum += 1-u\n cost = (1/2) * self._lamda * np.sum(theta[1:]**2) + sum\n return cost\n \n def _dJ(self, theta, X_b, y_train, lamda):\n '''u = y_train * X_b.dot(self._theta)\n sum = 0\n for i in range(X_b.shape[0]):\n if u[i] < 1:\n sum = sum - y_train.reshape(622,-1)[i] * X_b[i]\n gradient = sum + self._lamda*theta\n return gradient'''\n sum = 0\n for i in range(X_b.shape[0]):\n u = y_train[i] * np.sum(X_b[i].dot(self._theta))\n if u<1:\n sum -= y_train[i]*X_b[i]\n gradient = self._lamda * theta + sum\n return gradient\n\n def fit(self, X_train, y_train, max_iter=1e4, epsilon=1e-4, eta=0.1):\n ss = y_train.copy()#????????????\n for i in range(len(ss)):\n if ss[i] == 0:\n ss[i] = -1\n X_b = np.hstack([X_train, np.ones((len(X_train),1))])\n theta = np.ones(X_b.shape[1]) \n self._theta = theta\n self._omiga = self._theta[:-1]\n cur_iter = 0\n \n for iter in range(int(max_iter)) : \n gradient = self._dJ(theta, X_b, ss, self._lamda)\n lasttheta = theta\n theta = theta - eta * gradient\n if (abs(self._J(theta, X_b, ss, self._lamda) - self._J(lasttheta, X_b, ss, self._lamda)) < epsilon):\n break\n cur_iter += 1 \n \n self._theta = theta\n self._b = self._theta[-1]\n self._omiga = self._theta[:-1]\n return self\n \n def predict(self, X_test):\n X_b = np.hstack([X_test, np.ones((len(X_test),1))])\n '''y_pred = np.array(X_b.dot(self._theta))\n for i in range(len(y_pred)):\n if y_pred[i] > 0:\n y_pred[i] == 1\n else: y_pred[i] == 0'''\n y_pred = np.array((X_b.dot(self._theta)>0), dtype='int')\n return y_pred\n \n def score(self, X_test, y_test, scoring=metrics.acc_score):\n y_pred = self.predict(X_test)\n score = scoring(y_pred, y_test)\n return score\n","sub_path":"bagging/LSVMClassifier.py","file_name":"LSVMClassifier.py","file_ext":"py","file_size_in_byte":2767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"642167696","text":"#!/usb/bin/env python\nimport os\n\nimport numpy as np\nfrom Cython.Build import cythonize\nfrom setuptools import setup, find_packages\nfrom setuptools.extension import Extension\n\n\ndef getresourcefiles():\n print('Generating resource list',flush=True)\n reslist=[]\n for directory, subdirs, files in os.walk('cct/resource'):\n reslist.extend([os.path.join(directory,f).split('/',1)[1] for f in files])\n print('Generated resource list:\\n '+'\\n '.join(x for x in reslist)+'\\n',flush=True)\n return reslist\n\ndef update_languagespec():\n from cct.core.commands import Command\n allcommands=sorted([c.name for c in Command.allcommands()])\n with open('cct/resource/language-specs/cct.lang.in','rt', encoding='utf-8') as fin:\n with open('cct/resource/language-specs/cct.lang', 'wt', encoding='utf-8') as fout:\n for l in fin:\n if l.startswith('% KEYWORDS %'):\n for c in allcommands:\n fout.write(' %s\\n'%c)\n else:\n fout.write(l)\n print('Updated language spec. Command list:\\n'+', '.join(allcommands))\n\n\nextensions = [Extension(\"cct.core.utils.radint\", [\"cct/core/utils/radint.pyx\"], include_dirs=[np.get_include()])]\n\n \nupdate_languagespec()\nsetup(name='cct', version='1.3.1', author='Andras Wacha',\n author_email='awacha@gmail.com', url='http://github.com/awacha/cct',\n description='CREDO Control Tool',\n packages=find_packages(),\n # cmdclass = {'build_ext': build_ext},\n ext_modules=cythonize(extensions),\n install_requires=['numpy>=1.0.0', 'scipy>=0.7.0', 'matplotlib', 'sastool', 'pymodbustcp'],\n entry_points={'gui_scripts':['cct = cct.gui.mainwindow:run'],\n },\n keywords=\"saxs sans sas small-angle scattering x-ray instrument control\",\n license=\"\",\n package_data={'': getresourcefiles()},\n # include_package_data=True,\n zip_safe=False,\n )\n","sub_path":"pypi_install_script/cct-1.3.1.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"523094181","text":"# -*- coding: utf-8 -*-\n# 偏移 字段 长度 默认值 描述\n# 0x0000 FileType 2 0x8000 文件类型(0x8000 ORD 文件)\n# 0x0002 Version 1 0x10 文件定义版本号\n# 0x0003 Reserved 61 0x00 保留字\n# 0x0040 PreviewWidth 2 0x0000 预览图参考宽度\n# 0x0042 PreviewHeight 2 0x0000 预览图参考高度\n# 0x0044 BlockCount 2 0x0000 光带段数\n# 0x0046 Reserved 10 0x00 保留字\n# 0x0050 Reserved 48 0x00 保留字\n\n# 0x0080 Block0 PixCount 2 0x01,0x00 Block0 像素点数\n# 0x0082 Block0 DefaultColor 3 R,G,B Block0 默认颜色\n# 0x0085 Reserved 11 0x00 保留字\n# 0x0090 Block0Data N 0x01,0x00 可变 Block0 像素点数\n# 其它 Block 数据\n# FileCrc 4 0x00 前面所有数据的和较验\n\n\nimport struct\nimport os\nfrom road_conf import ROAD_INFO_1, ROAD_INFO_2, ROAD_INFO_3\n\ntable = [\n 0X0000, 0XC0C1, 0XC181, 0X0140, 0XC301, 0X03C0, 0X0280, 0XC241,\n 0XC601, 0X06C0, 0X0780, 0XC741, 0X0500, 0XC5C1, 0XC481, 0X0440,\n 0XCC01, 0X0CC0, 0X0D80, 0XCD41, 0X0F00, 0XCFC1, 0XCE81, 0X0E40,\n 0X0A00, 0XCAC1, 0XCB81, 0X0B40, 0XC901, 0X09C0, 0X0880, 0XC841,\n 0XD801, 0X18C0, 0X1980, 0XD941, 0X1B00, 0XDBC1, 0XDA81, 0X1A40,\n 0X1E00, 0XDEC1, 0XDF81, 0X1F40, 0XDD01, 0X1DC0, 0X1C80, 0XDC41,\n 0X1400, 0XD4C1, 0XD581, 0X1540, 0XD701, 0X17C0, 0X1680, 0XD641,\n 0XD201, 0X12C0, 0X1380, 0XD341, 0X1100, 0XD1C1, 0XD081, 0X1040,\n 0XF001, 0X30C0, 0X3180, 0XF141, 0X3300, 0XF3C1, 0XF281, 0X3240,\n 0X3600, 0XF6C1, 0XF781, 0X3740, 0XF501, 0X35C0, 0X3480, 0XF441,\n 0X3C00, 0XFCC1, 0XFD81, 0X3D40, 0XFF01, 0X3FC0, 0X3E80, 0XFE41,\n 0XFA01, 0X3AC0, 0X3B80, 0XFB41, 0X3900, 0XF9C1, 0XF881, 0X3840,\n 0X2800, 0XE8C1, 0XE981, 0X2940, 0XEB01, 0X2BC0, 0X2A80, 0XEA41,\n 0XEE01, 0X2EC0, 0X2F80, 0XEF41, 0X2D00, 0XEDC1, 0XEC81, 0X2C40,\n 0XE401, 0X24C0, 0X2580, 0XE541, 0X2700, 0XE7C1, 0XE681, 0X2640,\n 0X2200, 0XE2C1, 0XE381, 0X2340, 0XE101, 0X21C0, 0X2080, 0XE041,\n 0XA001, 0X60C0, 0X6180, 0XA141, 0X6300, 0XA3C1, 0XA281, 0X6240,\n 0X6600, 0XA6C1, 0XA781, 0X6740, 0XA501, 0X65C0, 0X6480, 0XA441,\n 0X6C00, 0XACC1, 0XAD81, 0X6D40, 0XAF01, 0X6FC0, 0X6E80, 0XAE41,\n 0XAA01, 0X6AC0, 0X6B80, 0XAB41, 0X6900, 0XA9C1, 0XA881, 0X6840,\n 0X7800, 0XB8C1, 0XB981, 0X7940, 0XBB01, 0X7BC0, 0X7A80, 0XBA41,\n 0XBE01, 0X7EC0, 0X7F80, 0XBF41, 0X7D00, 0XBDC1, 0XBC81, 0X7C40,\n 0XB401, 0X74C0, 0X7580, 0XB541, 0X7700, 0XB7C1, 0XB681, 0X7640,\n 0X7200, 0XB2C1, 0XB381, 0X7340, 0XB101, 0X71C0, 0X7080, 0XB041,\n 0X5000, 0X90C1, 0X9181, 0X5140, 0X9301, 0X53C0, 0X5280, 0X9241,\n 0X9601, 0X56C0, 0X5780, 0X9741, 0X5500, 0X95C1, 0X9481, 0X5440,\n 0X9C01, 0X5CC0, 0X5D80, 0X9D41, 0X5F00, 0X9FC1, 0X9E81, 0X5E40,\n 0X5A00, 0X9AC1, 0X9B81, 0X5B40, 0X9901, 0X59C0, 0X5880, 0X9841,\n 0X8801, 0X48C0, 0X4980, 0X8941, 0X4B00, 0X8BC1, 0X8A81, 0X4A40,\n 0X4E00, 0X8EC1, 0X8F81, 0X4F40, 0X8D01, 0X4DC0, 0X4C80, 0X8C41,\n 0X4400, 0X84C1, 0X8581, 0X4540, 0X8701, 0X47C0, 0X4680, 0X8641,\n 0X8201, 0X42C0, 0X4380, 0X8341, 0X4100, 0X81C1, 0X8081, 0X4040\n]\n\n\ndef CRC(crc, byte):\n lower = crc >> 8\n return (lower ^ table[((crc) ^ (byte & 0xffffffff)) & 0xff])\n\n\ndef CalcCRC(data, size):\n crc = 0\n for i in range(size):\n crc = CRC(crc, data[i])\n return crc\n\n\ndef getFileCRC(_path, block_size):\n f = open(_path, \"rb\")\n read_data = f.read(block_size)\n f.close()\n read_data_arr = [ord(c) for c in read_data]\n return CalcCRC(read_data_arr, block_size)\n\n\ndef get_checksum(_path, block_size):\n f = open(_path, \"rb\")\n read_data = f.read(block_size)\n f.close()\n read_data_arr = [c for c in read_data]\n return sum(read_data_arr)\n\n\ndef gen_file_1st(file_name, dict_para):\n WriteFileData = open(file_name, 'wb')\n\n FileType = 0x8000\n WriteFileData.write(struct.pack(\"H\", FileType))\n\n Version = 0x10\n WriteFileData.write(struct.pack(\"B\", Version))\n\n Reserved = 0x00\n for i in range(61):\n WriteFileData.write(struct.pack(\"B\", Reserved))\n\n PreviewWidth = 128\n WriteFileData.write(struct.pack(\"H\", PreviewWidth))\n\n PreviewHeight = 96\n WriteFileData.write(struct.pack(\"H\", PreviewHeight))\n\n BlockCount = 0x1\n WriteFileData.write(struct.pack(\"H\", BlockCount))\n\n Reserved = 0x00\n for i in range(10 + 48):\n WriteFileData.write(struct.pack(\"B\", Reserved))\n\n Block0_PixCount = dict_para[\"w\"] * dict_para[\"h\"]\n WriteFileData.write(struct.pack(\"H\", Block0_PixCount))\n\n DefaultColor_R = dict_para[\"r\"]\n WriteFileData.write(struct.pack(\"B\", DefaultColor_R))\n DefaultColor_G = dict_para[\"g\"]\n WriteFileData.write(struct.pack(\"B\", DefaultColor_G))\n DefaultColor_B = dict_para[\"b\"]\n WriteFileData.write(struct.pack(\"B\", DefaultColor_B))\n\n Reserved = 0x00\n for i in range(11):\n WriteFileData.write(struct.pack(\"B\", Reserved))\n\n for i in range(dict_para[\"y\"], dict_para[\"y\"] + dict_para[\"h\"]):\n for j in range(dict_para[\"x\"], dict_para[\"x\"] + dict_para[\"w\"]):\n WriteFileData.write(struct.pack(\"H\", j))\n WriteFileData.write(struct.pack(\"H\", i))\n\n WriteFileData.close()\n\n\ndef gen_file(file_name, dict_para, num=1):\n file_name = \"%d/%s\" % (num, file_name)\n gen_file_1st(file_name, dict_para)\n file_size = os.path.getsize(file_name)\n check_sum = get_checksum(file_name, file_size) & 0xffffffff\n\n WriteFileData = open(file_name, 'ab')\n WriteFileData.write(struct.pack(\"I\", check_sum))\n WriteFileData.close()\n\n\ndict_g = {\"r\": 0x00, \"g\": 0xff, \"b\": 0x00}\ndict_r = {\"r\": 0xff, \"g\": 0x00, \"b\": 0x00}\ndict_y = {\"r\": 0xff, \"g\": 0xff, \"b\": 0x00}\n\n\n# 1号诱导屏\ndef gen_you_dao_1():\n dict_all_g_1 = {\"x\": 0, \"y\": 0, \"w\": 112, \"h\": 32, \"r\": 0x00, \"g\": 0xff, \"b\": 0x00}\n dict_all_g_2 = {\"x\": 0, \"y\": 40, \"w\": 112, \"h\": 16, \"r\": 0x00, \"g\": 0xff, \"b\": 0x00}\n gen_file('R001', dict_all_g_1, 1)\n gen_file('R002', dict_all_g_2, 1)\n\n dict_qing_ji_da_dao_1_1_g = dict(ROAD_INFO_1[\"2002-2\"], **dict_g)\n dict_qing_ji_da_dao_1_1_r = dict(ROAD_INFO_1[\"2002-2\"], **dict_r)\n dict_qing_ji_da_dao_1_1_y = dict(ROAD_INFO_1[\"2002-2\"], **dict_y)\n gen_file('R003', dict_qing_ji_da_dao_1_1_g, 1)\n gen_file('R004', dict_qing_ji_da_dao_1_1_r, 1)\n gen_file('R005', dict_qing_ji_da_dao_1_1_y, 1)\n\n dict_qing_ji_da_dao_1_2_g = dict(ROAD_INFO_1[\"3003-2\"], **dict_g)\n dict_qing_ji_da_dao_1_2_r = dict(ROAD_INFO_1[\"3003-2\"], **dict_r)\n dict_qing_ji_da_dao_1_2_y = dict(ROAD_INFO_1[\"3003-2\"], **dict_y)\n gen_file('R006', dict_qing_ji_da_dao_1_2_g, 1)\n gen_file('R007', dict_qing_ji_da_dao_1_2_r, 1)\n gen_file('R008', dict_qing_ji_da_dao_1_2_y, 1)\n\n dict_qing_ji_da_dao_1_3_g = dict(ROAD_INFO_1[\"4004-2\"], **dict_g)\n dict_qing_ji_da_dao_1_3_r = dict(ROAD_INFO_1[\"4004-2\"], **dict_r)\n dict_qing_ji_da_dao_1_3_y = dict(ROAD_INFO_1[\"4004-2\"], **dict_y)\n gen_file('R009', dict_qing_ji_da_dao_1_3_g, 1)\n gen_file('R010', dict_qing_ji_da_dao_1_3_r, 1)\n gen_file('R011', dict_qing_ji_da_dao_1_3_y, 1)\n\n dict_qing_ji_da_dao_1_4_g = dict(ROAD_INFO_1[\"5005-2\"], **dict_g)\n dict_qing_ji_da_dao_1_4_r = dict(ROAD_INFO_1[\"5005-2\"], **dict_r)\n dict_qing_ji_da_dao_1_4_y = dict(ROAD_INFO_1[\"5005-2\"], **dict_y)\n gen_file('R012', dict_qing_ji_da_dao_1_4_g, 1)\n gen_file('R013', dict_qing_ji_da_dao_1_4_r, 1)\n gen_file('R015', dict_qing_ji_da_dao_1_4_y, 1)\n\n dict_tmp = dict(ROAD_INFO_1[\"yun_du_lu_1\"], **dict_g)\n gen_file('R061', dict_tmp, 1)\n dict_tmp = dict(ROAD_INFO_1[\"yun_du_lu_2\"], **dict_g)\n gen_file('R062', dict_tmp, 1)\n dict_tmp = dict(ROAD_INFO_1[\"yun_du_lu_3\"], **dict_g)\n gen_file('R063', dict_tmp, 1)\n dict_tmp = dict(ROAD_INFO_1[\"yun_du_lu_4\"], **dict_g)\n gen_file('R064', dict_tmp, 1)\n dict_tmp = dict(ROAD_INFO_1[\"yun_du_lu_2\"], **dict_r)\n gen_file('R065', dict_tmp, 1)\n dict_tmp = dict(ROAD_INFO_1[\"yun_du_lu_3\"], **dict_r)\n gen_file('R066', dict_tmp, 1)\n dict_tmp = dict(ROAD_INFO_1[\"yun_du_lu_2\"], **dict_y)\n gen_file('R067', dict_tmp, 1)\n dict_tmp = dict(ROAD_INFO_1[\"yun_du_lu_3\"], **dict_y)\n gen_file('R068', dict_tmp, 1)\n\n dict_tmp = dict(ROAD_INFO_1[\"jiao_tong_lu_1\"], **dict_g)\n gen_file('R069', dict_tmp, 1)\n dict_tmp = dict(ROAD_INFO_1[\"jiao_tong_lu_2\"], **dict_g)\n gen_file('R070', dict_tmp, 1)\n dict_tmp = dict(ROAD_INFO_1[\"jiao_tong_lu_3\"], **dict_g)\n gen_file('R071', dict_tmp, 1)\n dict_tmp = dict(ROAD_INFO_1[\"jiao_tong_lu_4\"], **dict_g)\n gen_file('R072', dict_tmp, 1)\n dict_tmp = dict(ROAD_INFO_1[\"jiao_tong_lu_2\"], **dict_r)\n gen_file('R073', dict_tmp, 1)\n dict_tmp = dict(ROAD_INFO_1[\"jiao_tong_lu_3\"], **dict_r)\n gen_file('R074', dict_tmp, 1)\n dict_tmp = dict(ROAD_INFO_1[\"jiao_tong_lu_2\"], **dict_y)\n gen_file('R075', dict_tmp, 1)\n dict_tmp = dict(ROAD_INFO_1[\"jiao_tong_lu_3\"], **dict_y)\n gen_file('R076', dict_tmp, 1)\n\n dict_tmp = dict(ROAD_INFO_1[\"lv_lin_lu_1\"], **dict_g)\n gen_file('R077', dict_tmp, 1)\n dict_tmp = dict(ROAD_INFO_1[\"lv_lin_lu_2\"], **dict_g)\n gen_file('R078', dict_tmp, 1)\n dict_tmp = dict(ROAD_INFO_1[\"lv_lin_lu_3\"], **dict_g)\n gen_file('R079', dict_tmp, 1)\n dict_tmp = dict(ROAD_INFO_1[\"lv_lin_lu_4\"], **dict_g)\n gen_file('R080', dict_tmp, 1)\n dict_tmp = dict(ROAD_INFO_1[\"lv_lin_lu_2\"], **dict_r)\n gen_file('R081', dict_tmp, 1)\n dict_tmp = dict(ROAD_INFO_1[\"lv_lin_lu_3\"], **dict_r)\n gen_file('R082', dict_tmp, 1)\n dict_tmp = dict(ROAD_INFO_1[\"lv_lin_lu_2\"], **dict_y)\n gen_file('R083', dict_tmp, 1)\n dict_tmp = dict(ROAD_INFO_1[\"lv_lin_lu_3\"], **dict_y)\n gen_file('R084', dict_tmp, 1)\n\n dict_jing_yuan_da_dao_g = {\"x\": 0, \"y\": 24, \"w\": 112, \"h\": 8, \"r\": 0x00, \"g\": 0xff, \"b\": 0x00}\n gen_file('R085', dict_jing_yuan_da_dao_g, 1)\n\n\n# 2号诱导屏\ndef gen_you_dao_2():\n dict_all_g_1 = {\"x\": 0, \"y\": 0, \"w\": 112, \"h\": 32, \"r\": 0x00, \"g\": 0xff, \"b\": 0x00}\n dict_all_g_2 = {\"x\": 0, \"y\": 40, \"w\": 112, \"h\": 8, \"r\": 0x00, \"g\": 0xff, \"b\": 0x00}\n gen_file('R021', dict_all_g_1, 2)\n gen_file('R022', dict_all_g_2, 2)\n\n dict_qing_ji_da_dao_2_1_g = dict(ROAD_INFO_2[\"qing_ji_da_dao_1\"], **dict_g)\n dict_qing_ji_da_dao_2_1_r = dict(ROAD_INFO_2[\"qing_ji_da_dao_1\"], **dict_r)\n dict_qing_ji_da_dao_2_1_y = dict(ROAD_INFO_2[\"qing_ji_da_dao_1\"], **dict_y)\n # gen_file('R023', dict_qing_ji_da_dao_2_1_g)\n gen_file('R035', dict_qing_ji_da_dao_2_1_g, 2)\n gen_file('R024', dict_qing_ji_da_dao_2_1_r, 2)\n gen_file('R025', dict_qing_ji_da_dao_2_1_y, 2)\n\n dict_qing_ji_da_dao_2_2_g = dict(ROAD_INFO_2[\"4004-4\"], **dict_g)\n dict_qing_ji_da_dao_2_2_r = dict(ROAD_INFO_2[\"4004-4\"], **dict_r)\n dict_qing_ji_da_dao_2_2_y = dict(ROAD_INFO_2[\"4004-4\"], **dict_y)\n gen_file('R026', dict_qing_ji_da_dao_2_2_g, 2)\n gen_file('R027', dict_qing_ji_da_dao_2_2_r, 2)\n gen_file('R028', dict_qing_ji_da_dao_2_2_y, 2)\n\n dict_qing_ji_da_dao_2_3_g = dict(ROAD_INFO_2[\"3003-4\"], **dict_g)\n dict_qing_ji_da_dao_2_3_r = dict(ROAD_INFO_2[\"3003-4\"], **dict_r)\n dict_qing_ji_da_dao_2_3_y = dict(ROAD_INFO_2[\"3003-4\"], **dict_y)\n gen_file('R029', dict_qing_ji_da_dao_2_3_g, 2)\n gen_file('R030', dict_qing_ji_da_dao_2_3_r, 2)\n gen_file('R031', dict_qing_ji_da_dao_2_3_y, 2)\n\n dict_qing_ji_da_dao_2_4_g = dict(ROAD_INFO_2[\"2002-4\"], **dict_g)\n dict_qing_ji_da_dao_2_4_r = dict(ROAD_INFO_2[\"2002-4\"], **dict_r)\n dict_qing_ji_da_dao_2_4_y = dict(ROAD_INFO_2[\"2002-4\"], **dict_y)\n gen_file('R032', dict_qing_ji_da_dao_2_4_g, 2)\n gen_file('R033', dict_qing_ji_da_dao_2_4_r, 2)\n gen_file('R036', dict_qing_ji_da_dao_2_4_y, 2)\n\n dict_tmp = dict(ROAD_INFO_2[\"jiao_tong_lu_1\"], **dict_g)\n gen_file('R101', dict_tmp, 2)\n dict_tmp = dict(ROAD_INFO_2[\"jiao_tong_lu_2\"], **dict_g)\n gen_file('R102', dict_tmp, 2)\n dict_tmp = dict(ROAD_INFO_2[\"jiao_tong_lu_3\"], **dict_g)\n gen_file('R103', dict_tmp, 2)\n dict_tmp = dict(ROAD_INFO_2[\"jiao_tong_lu_4\"], **dict_g)\n gen_file('R104', dict_tmp, 2)\n dict_tmp = dict(ROAD_INFO_2[\"jiao_tong_lu_2\"], **dict_r)\n gen_file('R105', dict_tmp, 2)\n dict_tmp = dict(ROAD_INFO_2[\"jiao_tong_lu_3\"], **dict_r)\n gen_file('R106', dict_tmp, 2)\n dict_tmp = dict(ROAD_INFO_2[\"jiao_tong_lu_2\"], **dict_y)\n gen_file('R107', dict_tmp, 2)\n dict_tmp = dict(ROAD_INFO_2[\"jiao_tong_lu_3\"], **dict_y)\n gen_file('R108', dict_tmp, 2)\n\n dict_tmp = dict(ROAD_INFO_2[\"yun_du_lu_1\"], **dict_g)\n gen_file('R109', dict_tmp, 2)\n dict_tmp = dict(ROAD_INFO_2[\"yun_du_lu_2\"], **dict_g)\n gen_file('R110', dict_tmp, 2)\n dict_tmp = dict(ROAD_INFO_2[\"yun_du_lu_3\"], **dict_g)\n gen_file('R111', dict_tmp, 2)\n dict_tmp = dict(ROAD_INFO_2[\"yun_du_lu_4\"], **dict_g)\n gen_file('R112', dict_tmp, 2)\n dict_tmp = dict(ROAD_INFO_2[\"yun_du_lu_2\"], **dict_r)\n gen_file('R113', dict_tmp, 2)\n dict_tmp = dict(ROAD_INFO_2[\"yun_du_lu_3\"], **dict_r)\n gen_file('R114', dict_tmp, 2)\n dict_tmp = dict(ROAD_INFO_2[\"yun_du_lu_2\"], **dict_y)\n gen_file('R115', dict_tmp, 2)\n dict_tmp = dict(ROAD_INFO_2[\"yun_du_lu_3\"], **dict_y)\n gen_file('R116', dict_tmp, 2)\n\n dict_tmp = dict(ROAD_INFO_2[\"xin_shi_da_dao_1\"], **dict_g)\n gen_file('R117', dict_tmp, 2)\n dict_tmp = dict(ROAD_INFO_2[\"xin_shi_da_dao_2\"], **dict_g)\n gen_file('R118', dict_tmp, 2)\n dict_tmp = dict(ROAD_INFO_2[\"xin_shi_da_dao_3\"], **dict_g)\n gen_file('R119', dict_tmp, 2)\n dict_tmp = dict(ROAD_INFO_2[\"xin_shi_da_dao_4\"], **dict_g)\n gen_file('R120', dict_tmp, 2)\n dict_tmp = dict(ROAD_INFO_2[\"xin_shi_da_dao_2\"], **dict_r)\n gen_file('R121', dict_tmp, 2)\n dict_tmp = dict(ROAD_INFO_2[\"xin_shi_da_dao_3\"], **dict_r)\n gen_file('R122', dict_tmp, 2)\n dict_tmp = dict(ROAD_INFO_2[\"xin_shi_da_dao_2\"], **dict_y)\n gen_file('R123', dict_tmp, 2)\n dict_tmp = dict(ROAD_INFO_2[\"xin_shi_da_dao_3\"], **dict_y)\n gen_file('R124', dict_tmp, 2)\n\n dict_jing_yuan_da_dao_g = {\"x\": 0, \"y\": 24, \"w\": 112, \"h\": 8, \"r\": 0x00, \"g\": 0xff, \"b\": 0x00}\n gen_file('R125', dict_jing_yuan_da_dao_g, 2)\n\n\n# 3号诱导屏\ndef gen_you_dao_3():\n dict_all_g_1 = {\"x\": 0, \"y\": 0, \"w\": 112, \"h\": 24, \"r\": 0x00, \"g\": 0xff, \"b\": 0x00}\n dict_all_g_2 = {\"x\": 0, \"y\": 32, \"w\": 112, \"h\": 8, \"r\": 0x00, \"g\": 0xff, \"b\": 0x00}\n gen_file('R041', dict_all_g_1, 3)\n gen_file('R042', dict_all_g_2, 3)\n\n dict_qing_ji_da_dao_3_1_g = dict(ROAD_INFO_3[\"qing_ji_da_dao_1\"], **dict_g)\n dict_qing_ji_da_dao_3_1_r = dict(ROAD_INFO_3[\"qing_ji_da_dao_1\"], **dict_r)\n dict_qing_ji_da_dao_3_1_y = dict(ROAD_INFO_3[\"qing_ji_da_dao_1\"], **dict_y)\n gen_file('R043', dict_qing_ji_da_dao_3_1_g, 3)\n gen_file('R044', dict_qing_ji_da_dao_3_1_r, 3)\n gen_file('R045', dict_qing_ji_da_dao_3_1_y, 3)\n\n dict_qing_ji_da_dao_3_2_g = dict(ROAD_INFO_3[\"1001-2\"], **dict_g)\n dict_qing_ji_da_dao_3_2_r = dict(ROAD_INFO_3[\"1001-2\"], **dict_r)\n dict_qing_ji_da_dao_3_2_y = dict(ROAD_INFO_3[\"1001-2\"], **dict_y)\n gen_file('R046', dict_qing_ji_da_dao_3_2_g, 3)\n gen_file('R047', dict_qing_ji_da_dao_3_2_r, 3)\n gen_file('R048', dict_qing_ji_da_dao_3_2_y, 3)\n\n dict_qing_ji_da_dao_3_3_g = dict(ROAD_INFO_3[\"2002-2\"], **dict_g)\n dict_qing_ji_da_dao_3_3_r = dict(ROAD_INFO_3[\"2002-2\"], **dict_r)\n dict_qing_ji_da_dao_3_3_y = dict(ROAD_INFO_3[\"2002-2\"], **dict_y)\n gen_file('R049', dict_qing_ji_da_dao_3_3_g, 3)\n gen_file('R050', dict_qing_ji_da_dao_3_3_r, 3)\n gen_file('R051', dict_qing_ji_da_dao_3_3_y, 3)\n\n dict_tmp = dict(ROAD_INFO_3[\"shen_gong_lu_1\"], **dict_g)\n gen_file('R131', dict_tmp, 3)\n dict_tmp = dict(ROAD_INFO_3[\"shen_gong_lu_2\"], **dict_g)\n gen_file('R132', dict_tmp, 3)\n dict_tmp = dict(ROAD_INFO_3[\"shen_gong_lu_3\"], **dict_g)\n gen_file('R133', dict_tmp, 3)\n dict_tmp = dict(ROAD_INFO_3[\"shen_gong_lu_4\"], **dict_g)\n gen_file('R134', dict_tmp, 3)\n dict_tmp = dict(ROAD_INFO_3[\"shen_gong_lu_2\"], **dict_r)\n gen_file('R135', dict_tmp, 3)\n dict_tmp = dict(ROAD_INFO_3[\"shen_gong_lu_3\"], **dict_r)\n gen_file('R136', dict_tmp, 3)\n dict_tmp = dict(ROAD_INFO_3[\"shen_gong_lu_2\"], **dict_y)\n gen_file('R137', dict_tmp, 3)\n dict_tmp = dict(ROAD_INFO_3[\"shen_gong_lu_3\"], **dict_y)\n gen_file('R138', dict_tmp, 3)\n\n dict_tmp = dict(ROAD_INFO_3[\"xin_shi_da_dao_1\"], **dict_g)\n gen_file('R139', dict_tmp, 3)\n dict_tmp = dict(ROAD_INFO_3[\"xin_shi_da_dao_2\"], **dict_g)\n gen_file('R140', dict_tmp, 3)\n dict_tmp = dict(ROAD_INFO_3[\"xin_shi_da_dao_3\"], **dict_g)\n gen_file('R141', dict_tmp, 3)\n dict_tmp = dict(ROAD_INFO_3[\"xin_shi_da_dao_4\"], **dict_g)\n gen_file('R142', dict_tmp, 3)\n dict_tmp = dict(ROAD_INFO_3[\"xin_shi_da_dao_2\"], **dict_r)\n gen_file('R143', dict_tmp, 3)\n dict_tmp = dict(ROAD_INFO_3[\"xin_shi_da_dao_3\"], **dict_r)\n gen_file('R144', dict_tmp, 3)\n dict_tmp = dict(ROAD_INFO_3[\"xin_shi_da_dao_2\"], **dict_y)\n gen_file('R145', dict_tmp, 3)\n dict_tmp = dict(ROAD_INFO_3[\"xin_shi_da_dao_3\"], **dict_y)\n gen_file('R146', dict_tmp, 3)\n\n dict_ren_min_da_dao_g = {\"x\": 0, \"y\": 16, \"w\": 112, \"h\": 8, \"r\": 0x00, \"g\": 0xff, \"b\": 0x00}\n gen_file('R147', dict_ren_min_da_dao_g, 3)\n\n\nif __name__ == \"__main__\":\n gen_you_dao_1()\n gen_you_dao_2()\n gen_you_dao_3()\n","sub_path":"onbonbx/BX-6Q/gen_binary.py","file_name":"gen_binary.py","file_ext":"py","file_size_in_byte":17442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"282988264","text":"from texttable import Texttable\nimport re\n\n\nclass Table():\n\n def __init__(self, name=\"\", file_name=\"\"):\n self.name = name\n self.db_name = \"\"\n self.class_name = \"\"\n self.columns = {}\n self.relationships = []\n\n if file_name != \"\":\n self.load_from_file(file_name)\n\n # LOAD table from .BAS file\n def load_from_file(self, file_name):\n\n try:\n file_obj = open(file_name, 'r')\n lines = file_obj.readlines()\n\n # trim list\n lines = [line.strip() for line in lines]\n\n # get class name\n self.class_name = file_name[:-4]\n\n # 6: table name\n s = re.search(r\"Table: (\\w+)\", lines[6], re.I)\n self.name = s.group(1)\n\n # 7: database name\n s = re.search(r\"Database: (\\w+)\", lines[7], re.I)\n self.db_name = s.group(1)\n\n # get columns\n columns_start = lines.index(\"'\")\n columns_end = lines.index(\"'\")\n\n if columns_start is not None and columns_end is not None:\n for i in range(columns_start+1, columns_end):\n if lines[i] is not '':\n col = Column(from_code=lines[i])\n if col.is_valid:\n self.add_column(col)\n else:\n print('Error: [{}] is not a valid column code.'.format(lines[i]))\n\n\n # get Relationships\n rel_start = lines.index(\"'\")\n rel_end = lines.index(\"'\")\n\n if rel_start is not None and rel_end is not None:\n for i in range(rel_start+1, rel_end+1):\n rel = Relationship(from_code=lines[i])\n if rel.is_valid:\n self.add_rel(rel)\n # Success\n return True\n\n except:\n # print(\"Error while parsing table file! [{}]\".format(file_name))\n return False\n\n # add Relationship\n def add_rel(self, rel):\n self.relationships.append(rel)\n\n # add column to table\n def add_column(self, col):\n self.columns[col.field] = col\n\n def del_column(self, col_name):\n return self.columns.pop(col_name)\n\n def get_column(self, col_name):\n return self.columns[col_name]\n\n # comentario de linha: CTRL + ;\n # def get_columns_csv(self):\n # csv = \"\"\n # for col_name in self.columns:\n # csv += self.columns[col_name].to_string() + \", \"\n # return csv[:-2]\n\n def get_columns_csv(self, include_autoinc_field=False):\n s = ''\n for field in self.columns:\n if self.columns[field].auto_increment:\n if include_autoinc_field is True:\n s += self.columns[field].to_string() + ', '\n else:\n continue\n s += self.columns[field].to_string() + ', '\n return s[:-2]\n\n def get_all_columns(self):\n return self.columns\n\n def get_file_name(self):\n return self.class_name + '.bas'\n\n\n def get_sqlite_create_table_string(self, use_default_value=False):\n query = 'CREATE TABLE IF NOT EXISTS [{}] ('.format(self.name) \n col = Column()\n for col_name in self.columns:\n col = self.columns[col_name]\n query += '{} {}'.format(col.field, col.get_sql_type())\n query += ' PRIMARY KEY' if col.primary_key else ''\n query += ' ASC AUTOINCREMENT' if col.auto_increment else ''\n query += ' NOT NULL' if col.not_null else ''\n query += ' REFERENCES {} ({})'.format(col.foreign_key[0], col.foreign_key[1]) if col.foreign_key != () else ''\n query += ' DEFAULT (0)' if use_default_value else ''\n query += ', '\n query = query[:-2] + ')'\n return query\n\n def print_info(self):\n tbl = Texttable()\n tbl.set_deco(tbl.HEADER | tbl.BORDER)\n tbl.header([\"Field\", \"Type\", \"PrimaryKey\", \"AutoInc\", \"NotNull\", \"ForeignKey\"])\n for col_name in self.columns:\n col = self.columns[col_name]\n tbl.add_row([col.field, col.type, col.primary_key, col.auto_increment, col.not_null, col.foreign_key])\n\n print(\"[Table name = '{}' class_name = '{}' Columns = {}]\".format(self.name, self.class_name, len(self.columns)))\n print(tbl.draw())\n print(\"[Relationships]\")\n [print(\"> \" + item.to_string()) for item in self.relationships]\n print(\"_\" * 30)\n\n\nclass Database():\n \n def __init__(self, name='', b4j_version='6.0'):\n self.name = name\n self.path = \"\"\n self.tables = {}\n self.b4j_version = b4j_version\n \n def add_table(self, table):\n self.tables[table.name] = table\n\n def del_table(self, table_name):\n self.tables.pop(table_name)\n\n def get_table(self, table_name):\n return self.tables[table_name]\n\n def get_data_access_file_name(self):\n return self.name + 'DataAccess.bas'\n\n def get_table_cname(self, table_name):\n try:\n return self.tables[table_name].class_name\n except:\n return '{table_cname}'\n\n def get_file_name(self):\n return self.name + '.db'\n \n def print_info(self):\n print('> Database: {}'.format(self.name))\n print('> Tables [{}]'.format(len(self.tables)))\n for tbl in self.tables:\n print('>>> {}'.format(self.tables[tbl].name))\n\n\n\n\n\n\n\n\n\n\nclass Column():\n def __init__(self, field=\"\", type=\"\", auto_increment=False, not_null=False, primary_key=False, foreign_key=(), from_code=\"\"):\n self.field = field\n self.type = type\n self.auto_increment = auto_increment\n self.not_null = not_null\n self.primary_key = primary_key\n self.foreign_key = foreign_key\n\n # is valid flag\n self.is_valid = False\n\n if from_code != \"\":\n if self.parse_from_code(from_code):\n self.is_valid = True\n\n def get_attr_csv(self):\n if self.foreign_key != ():\n t, f = self.foreign_key\n return 'ForeignKey({0}.{1})'.format(t, f)\n return '{0}{1}{2}'.format('PrimaryKey, ' if self.primary_key else '',\n 'NotNull, ' if self.not_null else '',\n 'AutoIncrement, ' if self.auto_increment else '')[:-2]\n\n\n \n def to_string(self):\n # String with Field declaration only\n # Ex: Public [Field] As [Type]\n return \"{} As {}\".format(self.field, self.type)\n \n \n def to_full_string(self):\n # Full string with attributes\n # Ex: Public [Field] As [Type] '[AutoIncrement, NotNull, ForeignKey(Table2.Id)]\n return 'Public ' + self.to_string() + \" '[{}]\".format(self.get_attr_csv())\n\n def parse_from_code(self, code):\n\n # Expression Ex: Public Field1 As String '[AutoIncrement, NotNull, ForeignKey(Table2.Id)]\n try:\n exprObj = re.match(r\"\\w+ (\\w+) As (\\w+)\", code, re.I)\n if exprObj:\n # field / type\n self.field = exprObj.group(1)\n self.type = exprObj.group(2)\n\n # parse args\n if re.search(r\"AutoIncrement\", code, re.I):\n self.auto_increment = True\n if re.search(r\"PrimaryKey\", code, re.I):\n self.primary_key = True\n if re.search(r\"NotNull\", code, re.I):\n self.not_null = True\n if re.search(r\"ForeignKey\", code, re.I):\n # parse foreign key\n fk = re.search(r\"ForeignKey\\((\\w+)\\.(\\w+)\\)\", code, re.I)\n self.foreign_key = (fk.group(1), fk.group(2))\n return True\n else:\n return False # no match\n except:\n # error: return false\n return False\n\n # Converts to B4j type\n def get_b4j_type(self):\n sqlt = self.type.lower()\n if 'integer' in sqlt:\n return 'Int'\n elif 'blob' in sqlt:\n return 'Object'\n else:\n return self.type\n\n # Converts to SQLite Type\n def get_sql_type(self):\n b4jt = self.type.lower()\n if 'int' in b4jt:\n return 'Integer'\n elif 'boolean' in b4jt:\n return 'Integer'\n elif 'object' in b4jt:\n return 'Blob'\n else:\n return self.type\n\n\n def print_info(self):\n tbl = Texttable()\n tbl.header([\"Field\", \"Type\", \"PrimaryKey\", \"AutoInc\", \"NotNull\", \"ForeignKey\"])\n tbl.add_row([self.field, self.type, self.primary_key, self.auto_increment, self.not_null, self.foreign_key])\n print(tbl.draw())\n\n\n\n\n\n\n\n\n\n\n\n\n\nclass Relationship():\n\n def __init__(self, table1=\"\", field1=\"\", table2=\"\", field2=\"\", rel_type=\"\", from_code=\"\"):\n self.table1 = table1\n self.field1 = field1\n self.rel_type = rel_type\n self.table2 = table2\n self.field2 = field2\n\n # is_valid flag\n self.is_valid = False\n\n if from_code != \"\":\n if self.parse_from_code(from_code):\n self.is_valid = True\n\n def parse_from_code(self, code):\n r = re.search(r\"(\\w+)\\.(\\w+) \\<(\\w+)\\> (\\w+)\\.(\\w+)\", code, re.I)\n if r:\n self.table1 = r.group(1)\n self.field1 = r.group(2)\n self.rel_type = r.group(3)\n self.table2 = r.group(4)\n self.field2 = r.group(5)\n return True\n else:\n return False\n\n def to_string(self):\n return \"%s.%s <%s> %s.%s\" % (self.table1, self.field1, self.rel_type, self.table2, self.field2)\n","sub_path":"sqlm/models/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":9796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"32680463","text":"import pandas as pd\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nfrom sklearn.model_selection import train_test_split\r\n\r\ndf = pd.read_csv('Dataset.csv', encoding='latin-1') #To encode it in utf-8\r\n\r\ndf_names = df\r\ndf_names.Type.replace({'P':0,'A':1,'L':2,'O': 3},inplace=True)\r\n\r\nXfeatures =df_names['Name']\r\ncv = CountVectorizer()\r\n\r\nX = cv.fit_transform(Xfeatures)\r\ny = df_names.Type\r\n\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)\r\n\r\ntree = DecisionTreeClassifier()\r\ntree.fit(X,y)\r\n\r\ndef MachineLearningAlgo(noun):\r\n\r\n type = \"\"\r\n\r\n test_name = [noun]\r\n\r\n vector = cv.transform(test_name).toarray()\r\n\r\n if tree.predict(vector) == 0:\r\n type = \"Person\"\r\n elif tree.predict(vector) == 1:\r\n type = \"Animal\"\r\n elif tree.predict(vector) == 2:\r\n type = \"Place\"\r\n elif tree.predict(vector) == 3:\r\n type = \"Object\"\r\n\r\n return type\r\n\r\n","sub_path":"machineLearning_part.py","file_name":"machineLearning_part.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"309987634","text":"from torch.utils.data import DataLoader, Dataset\nfrom torchvision import datasets, transforms\nimport sys\nsys.path.append('..')\n\n\ndef load_data(batch_size:int = 100, root_folder:str='data', dataset:str='MNIST'):\n transform = transforms.Compose([transforms.ToTensor()])\n if dataset == 'EMNIST':\n # train every letter and digit ('byclass')\n trainset = datasets.EMNIST(root=root_folder, split='byclass', train=True, download=True, transform=transform)\n testset = datasets.EMNIST(root=root_folder, split='byclass', train=False, download=True, transform=transform)\n\n elif dataset == 'MNIST':\n # train only letters ('byclass')\n trainset = datasets.EMNIST(root=root_folder, split='digits', train=True, download=True, transform=transform)\n testset = datasets.EMNIST(root=root_folder, split='digits', train=False, download=True, transform=transform)\n\n\n train_loader = DataLoader(trainset, batch_size=batch_size, shuffle=True)\n test_loader = DataLoader(trainset, batch_size=batch_size, shuffle=False)\n\n return train_loader, test_loader\n","sub_path":"utils/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"595578063","text":"import textwrap\n\nfrom setuptools import setup, find_packages\n\n\n# def blah(scm_version):\n# import pdb; pdb.set_trace() # XXX BREAKPOINT\n# return '1.1.1'\n\n\nwith open('README.md') as f_readme:\n readme = f_readme.read()\n\nsetup(\n name='get-changed-subprojects',\n description='Get changed subprojects between two commits in a Github monorepo',\n long_description=readme,\n license='MIT',\n # version='0.0.1',\n use_scm_version={\n 'root': '../..',\n 'relative_to': __file__,\n # 'version_scheme': blah,\n },\n packages=find_packages(exclude=['tests', 'examples']),\n install_requires=[\n ],\n setup_requires=[\n 'setuptools_scm',\n ],\n extras_require={\n 'test': [\n 'pytest',\n 'pytest-cov',\n 'pytest-flake8',\n ],\n },\n include_package_data=True,\n zip_safe=False,\n entry_points={\n 'console_scripts': [\n 'get_changed_subprojects=git.get_changed_subprojects:main'\n ]\n },\n classifiers=textwrap.dedent(\n \"\"\"\n Development Status :: 3 - Alpha\n Intended Audience :: Developers\n License :: OSI Approved :: MIT License\n Environment :: Console\n Programming Language :: Python :: 3\n Programming Language :: Python :: 3.6\n \"\"\"\n ).strip().splitlines(),\n)\n","sub_path":"packages/get-changed-subprojects/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"299889018","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# from __future__ import print_function, division\nfrom __future__ import division\nimport functools, itertools, operator, math\n\nis_even = lambda x: (x % 2) == 0\n\ndef is_perfect_square(n):\n\t\"\"\"Returns True if n is a perfect square; False otherwise.\"\"\"\n\tif n < 0:\n\t\treturn False\n\ttest = int(n**(0.5))\n\treturn test * test == n\n\ndef is_integer(n):\n\t\"\"\"Returns True if the number n is an integer number; False otherwise.\"\"\"\n\treturn round(n) == n\n\ndef palindrome(s):\n\t\"\"\"Returns True if the string (or number) s is a palindrome.\"\"\"\n\ts = str(s)\n\tfor i in range(len(s) // 2):\n\t\tif s[i] != s[len(s)-i-1]:\n\t\t\treturn False\n\treturn True\n\ndef gcd(*numbers):\n\t\"\"\"Returns the greatest common divisor of two or more numbers.\"\"\"\n\tif len(numbers) > 2:\n\t\treturn functools.reduce(gcd, numbers)\n\ta, b = max(numbers), min(numbers)\n\tif b == 0:\n\t\treturn a\n\treturn gcd(b, a % b)\n\t\ndef lcm(*numbers):\n\t\"\"\"Returns the least common multiple of two or more numbers.\"\"\"\n\treturn functools.reduce(\n\t\tlambda a, b: (a * b) // gcd(a, b),\n\t\tnumbers\n\t\t)\n\n_FIBONACCI = [0, 1]\ndef fibonacci(n):\n\t\"\"\"Returns the nth fibonacci number.\"\"\"\n\tglobal _FIBONACCI\n\tfor i in range(len(_FIBONACCI), n + 1):\n\t\t_FIBONACCI.append(_FIBONACCI[i - 1] + _FIBONACCI[i - 2])\n\treturn _FIBONACCI[n]\n\ndef fibonacci_upto(n):\n\t\"\"\"Returns all fibonacci numbers small than n.\"\"\"\n\ti, last = 0, 0\n\twhile last < n:\n\t\tlast = fibonacci(i)\n\t\ti += 1\n\t\tif last < n:\n\t\t\tyield last\n\t\telse:\n\t\t\treturn\n\ndef fibonacci_first(n):\n\t\"\"\"Returns the first n fibonacci numbers.\"\"\"\n\tfor i in range(n):\n\t\tyield fibonacci(i)\n\n_TRIANGULAR = [0]\ndef triangular(n):\n\t\"\"\"Returns the nth triangular number.\"\"\"\n\tglobal _TRIANGULAR\n\tfor i in range(len(_TRIANGULAR), n + 1):\n\t\t_TRIANGULAR.append(_TRIANGULAR[i - 1] + i)\n\treturn _TRIANGULAR[n]\n\ndef triangulars_upto(n):\n\t\"\"\"Returns all triangular numbers small than n.\"\"\"\n\ti, last = 0, 0\n\twhile last < n:\n\t\tlast = triangular(i)\n\t\ti += 1\n\t\tif last < n:\n\t\t\tyield last\n\t\telse:\n\t\t\treturn\n\ndef triangulars_first(n):\n\t\"\"\"Returns the first n triangular numbers.\"\"\"\n\tfor i in range(n):\n\t\tyield triangular(i)\n\ndef is_triangular(n):\n\t\"\"\"Returns whether the number n is a triangular number or not.\"\"\"\n\treturn is_perfect_square(8*n + 1)\n\n_PENTAGONAL = [0]\ndef pentagonal(n):\n\t\"\"\"Returns the nth pentagonal number.\"\"\"\n\tglobal _PENTAGONAL\n\tfor i in range(len(_PENTAGONAL), n + 1):\n\t\t_PENTAGONAL.append(\n\t\t\ti * (3*i - 1) // 2\n\t\t)\n\treturn _PENTAGONAL[n]\n\ndef pentagonals_upto(n):\n\t\"\"\"Returns all pentagonal numbers small than n.\"\"\"\n\ti, last = 0, 0\n\twhile last < n:\n\t\tlast = pentagonal(i)\n\t\ti += 1\n\t\tif last < n:\n\t\t\tyield last\n\t\telse:\n\t\t\treturn\n\ndef pentagonals_first(n, start=0):\n\t\"\"\"Returns the first n pentagonal numbers.\"\"\"\n\tfor i in range(start, n):\n\t\tyield pentagonal(i)\n\ndef pentagonals(n=0):\n\t\"\"\"Yields all the pentagonal numbers, starting with the nth number.\"\"\"\n\tfor i in itertools.count(n):\n\t\tyield pentagonal(i)\n\ndef is_pentagonal(n):\n\t\"\"\"Returns whether the number n is a pentagonal number or not.\"\"\"\n\treturn is_integer((1 + (24*n + 1)**(0.5)) / 6)\n\n_FACTORIAL = [1, 1]\ndef factorial(n):\n\t\"\"\"Returns the factorial of n.\"\"\"\n\tglobal _FACTORIAL\n\tfor i in range(len(_FACTORIAL), n + 1):\n\t\t_FACTORIAL.append(_FACTORIAL[i - 1] * i)\n\treturn _FACTORIAL[n]\n\n_PRIMES = [2, 3, 5, 7, 11, 13]\ndef prime(n):\n\t\"\"\"Returns the nth prime number.\"\"\"\n\tglobal _PRIMES\n\tfor i in range(len(_PRIMES), n + 1):\n\t\t_PRIMES.append(next_prime(_PRIMES[-1]))\n\treturn _PRIMES[n]\n\ndef next_prime(n):\n\t\"\"\"Returns the first prime number following n.\"\"\"\n\tcandidate = n + 2\n\twhile True:\n\t\tif is_prime(candidate):\n\t\t\treturn candidate\n\t\tcandidate += 2\n\ndef is_prime(n):\n\t\"\"\"Returns whether n is a prime number or not.\"\"\"\n\tif n in _PRIMES: \n\t\treturn True\n\tupper_limit, i = math.sqrt(n), 0\n\twhile prime(i) <= upper_limit:\n\t\tif n % _PRIMES[i] == 0:\n\t\t\treturn False\n\t\ti += 1\n\treturn True\n\ndef primes_upto(n):\n\t\"\"\"Returns all prime numbers smaller than n.\"\"\"\n\ti, last = 0, 0\n\twhile last < n:\n\t\tlast = prime(i)\n\t\ti += 1\n\t\tif last < n:\n\t\t\tyield last\n\t\telse:\n\t\t\treturn\n\ndef primes_first(n):\n\t\"\"\"Returns the first n prime numbers.\"\"\"\n\tfor i in range(n):\n\t\tyield prime(i)\n\ndef eratosthenes_sieve(n):\n\t\"\"\"Returns all the prime numbers smaller than n using the Eratostenes Sieve.\"\"\"\n\tupper_limit = int(n**0.5)+1\n\tif upper_limit <= 4:\n\t\treturn [2, 3]\n\tnumbers = range(2, n)\n\tfor p in eratosthenes_sieve(upper_limit):\n\t\tnumbers = list(\n\t\t\tfilter(\n\t\t\t\tlambda i: (i == p) or (i % p != 0),\n\t\t\t\tnumbers\n\t\t\t\t)\n\t\t\t)\n\treturn numbers\n\n_FACTORS = {1: [1]}\ndef factorize(n):\n\t\"\"\"Returns the list of prime factors of the number n.\"\"\"\n\tif n in _FACTORS: return _FACTORS[n]\n\tfactors = []\n\tupper_limit = int(n ** 0.5) + 1\n\tfor p in primes_upto(upper_limit):\n\t\tif p * p > n: break\n\t\twhile n % p == 0:\n\t\t\tfactors.append(p)\n\t\t\tn = n // p\n\tif n > 1: factors.append(n)\n\t_FACTORS[n] = factors\n\treturn factors\n\ndef divisors(n):\n\t\"\"\"Returns the list of all the divisors of the number n.\"\"\"\n\tdivisors = set([1])\n\tfactors = factorize(n)\n\tfor i in range(1, len(factors)):\n\t\tcombinations = map(\n\t\t\t\tlambda i: functools.reduce(operator.mul, i),\n\t\t\t\titertools.combinations(factors, i)\n\t\t\t\t)\n\t\tfor i in combinations:\n\t\t\tdivisors.add(i)\n\treturn divisors\n\ndef collatz(n):\n\t\"\"\"Return the Collatz sequence starting with the number n.\"\"\"\n\tsequence = [n]\n\twhile n != 1:\n\t\tif is_even(n):\n\t\t\tn = n // 2\n\t\telse:\n\t\t\tn = 3 * n + 1\n\t\tsequence.append(n)\n\treturn sequence\n\ndef sum_naturals(n):\n\t\"\"\"Returns the sum of all the natural numbers up to and including n.\"\"\"\n\treturn n * (n + 1) // 2\n\n_NUMBERS = {\n\t0: 'zero', \n\t1: 'one', \n\t2: 'two', \n\t3: 'three', \n\t4: 'four', \n\t5: 'five', \n\t6: 'six', \n\t7: 'seven', \n\t8: 'eight', \n\t9: 'nine',\n\t10: 'ten', \n\t11: 'eleven', \n\t12: 'twelve', \n\t13: 'thirteen', \n\t14: 'fourteen', \n\t15: 'fifteen', \n\t16: 'sixteen', \n\t17: 'seventeen', \n\t18: 'eighteen', \n\t19: 'nineteen',\n\t20: 'twenty',\n\t30: 'thirty',\n\t40: 'forty',\n\t50: 'fifty',\n\t60: 'sixty',\n\t70: 'seventy',\n\t80: 'eighty',\n\t90: 'ninety',\n\t}\ndef to_words(number):\n\t\"\"\"Returns the number n written out in English words.\"\"\"\n\tstring = \"\"\n\tn = number\n\tthousands = n // 1000\n\tn -= thousands * 1000\n\thundreds = n // 100\n\tn -= hundreds * 100\n\tdecades = n // 10\n\tn -= decades * 10\n\tunits = n\n\t#print(number)\n\tif thousands:\n\t\t#print(\"THOU\")\n\t\tstring += \"%s thousand\"%_NUMBERS[thousands]\n\t\t#print(string)\n\tif hundreds:\n\t\t#print(\"HUND\")\n\t\tif thousands:\n\t\t\tstring += \" and \"\n\t\tstring += \"%s hundred\"%_NUMBERS[hundreds]\n\t\t#print(string)\n\tif decades == 1:\n\t\tunits += 10\n\t\tdecades = 0\n\tif decades:\n\t\t#print(\"DECA\")\n\t\tif hundreds or thousands:\n\t\t\tstring += \" and \"\n\t\tif decades > 1:\n\t\t\tstring += \"%s\"%_NUMBERS[decades * 10]\n\t\t#print(string)\n\tif units:\n\t\t#print(\"UNIT\")\n\t\tif not decades and (hundreds or thousands):\n\t\t\tstring += \" and \"\n\t\tif decades:\n\t\t\tstring += \"-\"\n\t\tstring += _NUMBERS[units]\n\treturn string\n\ndef is_leap_year(year):\n\t\"\"\"Returns whether the year passed as a parameter is a leap year or not.\"\"\"\n\treturn (year % 4 == 0) and (year % 100 != 0 or year % 400 == 0)\n\t\ndef year_length(year):\n\t\"\"\"Returns the length of the year passed as a parameter.\"\"\"\n\treturn 366 if is_leap_year(year) else 365\n\ndef month_length(month, year):\n\treturn _MONTHS_LENGTH[month] + (1 if month == 2 and is_leap_year(year) else 0)\n\n_MONTHS_LENGTH = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\nWEEKDAYS = [\"Sunday\", \"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\"]\n_STARTING_YEAR = 1900\ndef get_day(day, month, year):\n\t\"\"\"Given a date in the format day, month, year, returns the number of days since the 1st January 1900.\"\"\"\n\t_day = 0\n\tfor _year in range(_STARTING_YEAR, year):\n\t\t_day += year_length(_year)\n\tfor _month in range(1, month):\n\t\t_day += month_length(_month, year)\n\t\t# days += _MONTHS_LENGTH[month]\n\t\t# days += 1 if month == 2 and is_leap_year(year) else 0\n\t_day += day\n\treturn _day\n\ndef get_weekday(day, month, year):\n\t\"\"\"Returns the day of the week of the date passed as parameter (Sunday is 0, Monday is 1).\"\"\"\n\treturn get_day(day, month, year) % 7\n\ndef proper_divisors(n):\n\t\"\"\"Returns the list of proper divisors of n (numbers less than n which divide evenly into n).\"\"\"\n\treturn list(\n\t\t\tfilter(\n\t\t\t\tlambda i: i < n,\n\t\t\t\tdivisors(n)\n\t\t\t)\n\t\t)\n\ndef are_amicable(a, b):\n\t\"\"\"Returns whether the numbers a and b are amicable or not (a and b are an amicable pair if a!=b, d(a)=b and d(b)=a, where d(n) is the sum of the proper divisors of n).\"\"\"\n\treturn a != b \\\n\t\tand sum(proper_divisors(a)) == b \\\n\t\tand sum(proper_divisors(b)) == a\n\ndef is_pandigital(n):\n\t\"\"\"Returns whether n is a pandigital number or not.\n\tAlgorithm taken from http://stackoverflow.com/questions/2484892/fastest-algorithm-to-check-if-a-number-is-pandigital, with some small alterations.\"\"\"\n\tdigits, count = 0, 0\n\twhile n > 0:\n\t\ttmp = digits\n\t\tshift = (n % 10) - 1\n\t\tif shift < 0: \n\t\t\treturn False\n\t\tdigits |= 1 << shift\n\t\tif tmp == digits:\n\t\t\treturn False\n\t\tcount += 1\n\t\tn //= 10\n\treturn digits == (1 << count) - 1\n\ndef generate_pandigitals(n):\n\t\"\"\"Returns all the n-digits, n-pandigital numbers.\"\"\"\n\tdigits = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n\tif n == 1:\n\t\treturn digits\n\treturn map(\n\t\tlambda i: int(\n\t\t\t''.join(\n\t\t\t\tstr(d) for d in i\n\t\t\t\t)\n\t\t\t),\n\t\titertools.permutations(digits[:n])\n\t\t)\n\ndef score_word(word):\n\treturn sum(\n\t\t\tmap(\n\t\t\t\tlambda c: 1 + ord(c) - ord('a'),\n\t\t\t\tword.lower()\n\t\t\t)\n\t\t)\n\nif __name__ == \"__main__\":\n\tprint(list(fibonacci_first(20)))\n\tprint(list(fibonacci_upto(100)))\n\tprint(list(triangulars_first(20)))\n\tprint(list(triangulars_upto(100)))\n\tprint(list(primes_first(20)))\n\tprint(list(primes_upto(100)))\n\tprint(factorize(12))\n\tprint(palindrome(\"ABBA\"))\n\tprint(palindrome(\"baba\"))\n\tprint(palindrome(\"amanaplanacanalpanama\"))\n\tprint(palindrome(\"atoyotasatoyota\"))\n\tprint(palindrome(\"As well as some non-palindromes.\"))\n\tprint(gcd(30, 20, 10, 5), gcd(21,6,2))\n\tprint(lcm(4,6), lcm(21, 6, 2), lcm(8,9,21))\n\tprint(gcd(8, 9, 21))\n\tprint(lcm(1,2,3,4,5,6,7,8,9,10))\n\tprint(list(primes_upto(101)))\n\t#print(eratosthenes_sieve(10000))\n\tprint(divisors(28))","sub_path":"lib.py","file_name":"lib.py","file_ext":"py","file_size_in_byte":9917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"556818210","text":"# -*- coding: utf-8 -*-\n#\n# Copyright 2018 ICON Foundation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pytest\n\nfrom iconservice import Address\nfrom iconservice.base.address import AddressPrefix\nfrom iconservice.base.exception import InvalidParamsException\nfrom iconservice.database.db import IconScoreDatabase\nfrom iconservice.iconscore.context.context import ContextContainer\nfrom iconservice.iconscore.icon_container_db import ContainerUtil, DictDB, ArrayDB, VarDB\nfrom iconservice.iconscore.icon_score_context import IconScoreContextType, IconScoreContext\nfrom tests import create_address\n\n\n@pytest.fixture(scope=\"function\")\ndef score_db(context_db):\n return IconScoreDatabase(create_address(), context_db)\n\n\n@pytest.fixture(scope=\"function\", autouse=True)\ndef context(score_db):\n context = IconScoreContext(IconScoreContextType.DIRECT)\n context.current_address = score_db.address\n\n ContextContainer._push_context(context)\n yield context\n ContextContainer._clear_context()\n\n\nclass TestIconContainerDB:\n ADDRESS = create_address(AddressPrefix.CONTRACT)\n\n @pytest.mark.parametrize(\"args, value_type, expected_value\", [\n (0, int, 1),\n (1, int, 2),\n (2, int, 3),\n ((3, 0), int, 4),\n ((3, 1), int, 5),\n ((3, 2), int, 6),\n ((4, 0), int, 7),\n ((4, 1), int, 8),\n ((4, 2), int, 9),\n ((4, 3, 0), int, 10),\n ((4, 3, 1), int, 11),\n ((4, 3, 2), int, 12),\n (5, Address, ADDRESS),\n (6, int, 0)\n ])\n def test_nested_list(self, score_db, args, value_type, expected_value):\n test_list = [1, 2, 3, [4, 5, 6], [7, 8, 9, [10, 11, 12]], self.ADDRESS]\n ContainerUtil.put_to_db(score_db, 'test_list', test_list)\n\n if isinstance(args, tuple):\n assert ContainerUtil.get_from_db(score_db, 'test_list', *args, value_type=value_type) == expected_value\n else:\n assert ContainerUtil.get_from_db(score_db, 'test_list', args, value_type=value_type) == expected_value\n\n @pytest.mark.parametrize(\"args, value_type, expected_value\", [\n (1, str, 'a'),\n ((2, 0), str, 'a'),\n ((2, 1), str, 'b'),\n ((2, 2, 0), str, 'c'),\n ((2, 2, 1), str, 'd'),\n ((3, 'a'), int, 1),\n (4, Address, ADDRESS),\n ])\n def test_nested_dict(self, score_db, args, value_type, expected_value):\n test_dict = {1: 'a', 2: ['a', 'b', ['c', 'd']], 3: {'a': 1}, 4: self.ADDRESS}\n ContainerUtil.put_to_db(score_db, 'test_dict', test_dict)\n\n if isinstance(args, tuple):\n assert ContainerUtil.get_from_db(score_db, 'test_dict', *args, value_type=value_type) == expected_value\n else:\n assert ContainerUtil.get_from_db(score_db, 'test_dict', args, value_type=value_type) == expected_value\n\n @pytest.mark.parametrize(\"args, value_type, expected_value\", [\n (0, int, 1),\n (1, int, 2),\n (2, int, 3),\n (3, Address, ADDRESS),\n ])\n def test_tuple(self, score_db, args, value_type, expected_value):\n test_tuple = tuple([1, 2, 3, self.ADDRESS])\n ContainerUtil.put_to_db(score_db, 'test_tuple', test_tuple)\n\n assert ContainerUtil.get_from_db(score_db, 'test_tuple', args, value_type=value_type) == expected_value\n\n @staticmethod\n def _check_the_db_prefix_format(name):\n prefix: bytes = ContainerUtil.create_db_prefix(DictDB, name)\n assert prefix == b'\\x01|' + name.encode()\n\n def test_dict_depth1(self, score_db):\n name = 'test_dict'\n test_dict = DictDB(name, score_db, value_type=int)\n self._check_the_db_prefix_format(name)\n\n test_dict['a'] = 1\n test_dict['b'] = 2\n\n test_dict['b'] += 1\n\n assert test_dict['a'] == 1\n assert test_dict['b'] == 3\n\n def test_dict_other_Key(self, score_db):\n name = 'test_dict'\n test_dict = DictDB(name, score_db, depth=2, value_type=int)\n self._check_the_db_prefix_format(name)\n\n addr1 = create_address(1)\n addr2 = create_address(0)\n test_dict['a'][addr1] = 1\n test_dict['a'][addr2] = 2\n\n assert test_dict['a'][addr1] == 1\n assert test_dict['a'][addr2] == 2\n\n def test_dict_depth2(self, score_db):\n name = 'test_dict'\n test_dict = DictDB(name, score_db, depth=3, value_type=int)\n self._check_the_db_prefix_format(name)\n\n test_dict['a']['b']['c'] = 1\n test_dict['a']['b']['d'] = 2\n test_dict['a']['b']['e'] = 3\n test_dict['a']['b']['f'] = 4\n\n assert test_dict['a']['b']['c'] == 1\n\n def test_success_array1(self, score_db):\n test_array = ArrayDB('test_array', score_db, value_type=int)\n\n range_size = 3\n\n for i in range(range_size):\n test_array.put(i)\n\n for i in range(range_size):\n assert test_array[i] == i\n\n cant_find_value = range_size\n assert (cant_find_value in test_array) is False\n assert len(test_array) == range_size\n\n for e, i in zip(test_array, range(range_size)):\n assert e == i\n\n assert test_array[-1] == range(range_size)[-1]\n\n def test_success_array2(self, score_db):\n test_array = ArrayDB('test_array', score_db, value_type=int)\n\n range_size = 3\n expect_array = []\n\n for i in range(range_size):\n expect_array.append(i)\n test_array.put(i)\n\n for index, e in enumerate(test_array):\n assert e == expect_array[index]\n\n def test_success_array3(self, score_db):\n test_array = ArrayDB('test_array', score_db, value_type=int)\n\n range_size = 3\n expect_array = []\n\n for i in range(range_size):\n expect_array.append(i)\n test_array.put(i)\n\n if 0 in test_array:\n pass\n else:\n raise Exception()\n\n if \"a\" in test_array:\n raise Exception()\n else:\n pass\n\n def test_success_array4(self, score_db):\n test_array = ArrayDB('test_array', score_db, value_type=int)\n\n test_array.put(1)\n test_array.put(2)\n\n with pytest.raises(InvalidParamsException):\n var = test_array[2]\n print(var)\n\n def test_negative_index_access_in_array_db(self, score_db):\n array = ArrayDB('array', score_db, value_type=int)\n\n size = 10\n for i in range(size):\n array.put(i)\n\n negative_index = -1\n for _ in range(size):\n index = size + negative_index\n assert array[index] == array[negative_index]\n negative_index -= 1\n\n @pytest.mark.parametrize(\"value_type, expected_value\", [\n (int, 10 ** 19 + 1),\n (Address, create_address(AddressPrefix.CONTRACT)),\n (Address, create_address(AddressPrefix.EOA))\n ])\n def test_var_db(self, score_db, value_type, expected_value):\n test_var = VarDB('test_var', score_db, value_type=value_type)\n assert test_var._db != score_db\n assert test_var._db._prefix == b'\\x02'\n\n test_var.set(expected_value)\n\n assert test_var.get() == expected_value\n\n @pytest.mark.parametrize(\"collection, key_or_index\", [\n ({\"dummy_key\": \"dummy_value\"}, \"not_exists_key\"),\n ([\"dummy_list\"], 3)\n ])\n @pytest.mark.parametrize(\"value_type, expected_value\", [\n (int, 0),\n (str, \"\"),\n (bytes, None),\n (Address, None)\n ])\n def test_default_value_of_container_db(self, score_db, value_type, expected_value, collection, key_or_index):\n # TEST: Check the default value of collection object (dict, list)\n ContainerUtil.put_to_db(score_db, 'test_collection', collection)\n actual_value = ContainerUtil.get_from_db(score_db, 'test_collection', key_or_index, value_type=value_type)\n\n assert actual_value == expected_value\n\n @pytest.mark.parametrize(\"value_type, expected_value\", [\n (int, 0),\n (str, \"\"),\n (bytes, None),\n (Address, None)\n ])\n def test_default_value_of_var_db(self, score_db, value_type, expected_value):\n # var_db\n test_var = VarDB('test_var', score_db, value_type=value_type)\n assert test_var.get() == expected_value\n\n def test_array_db(self, score_db):\n name = \"TEST\"\n testarray = ArrayDB(name, score_db, value_type=int)\n assert testarray._db != score_db\n assert testarray._db._prefix == ContainerUtil.create_db_prefix(ArrayDB, name)\n\n testarray.put(1)\n testarray.put(3)\n testarray.put(5)\n testarray.put(7)\n assert len(testarray) == 4\n assert testarray.pop() == 7\n assert testarray.pop() == 5\n assert len(testarray) == 2\n\n def test_array_db2(self, score_db):\n name = \"TEST\"\n testarray = ArrayDB(name, score_db, value_type=int)\n assert testarray._db != score_db\n assert testarray._db._prefix == ContainerUtil.create_db_prefix(ArrayDB, name)\n\n testarray.put(1)\n testarray.put(2)\n testarray.put(3)\n testarray.put(4)\n\n assert testarray[0] == 1\n assert testarray[1] == 2\n assert testarray[2] == 3\n assert testarray[3] == 4\n\n assert testarray[-1] == 4\n assert testarray[-2] == 3\n assert testarray[-3] == 2\n assert testarray[-4] == 1\n\n testarray[0] = 5\n testarray[1] = 6\n testarray[2] = 7\n testarray[3] = 8\n\n assert testarray[0] == 5\n assert testarray[1] == 6\n assert testarray[2] == 7\n assert testarray[3] == 8\n\n testarray[-1] = 4\n testarray[-2] = 3\n testarray[-3] = 2\n testarray[-4] = 1\n\n assert testarray[-1] == 4\n assert testarray[-2] == 3\n assert testarray[-3] == 2\n assert testarray[-4] == 1\n\n with pytest.raises(InvalidParamsException):\n testarray[5] = 1\n a = testarray[5]\n\n @pytest.mark.parametrize(\"prefix, score_db_cls, expected_prefix\", [\n ('a', ArrayDB, b'\\x00|a'),\n ('dictdb', DictDB, b'\\x01|dictdb'),\n ])\n def test_container_util(self, prefix, score_db_cls, expected_prefix):\n actual_prefix: bytes = ContainerUtil.create_db_prefix(score_db_cls, prefix)\n assert actual_prefix == expected_prefix\n\n def test_when_create_var_db_prefix_using_container_util_should_raise_error(self):\n with pytest.raises(InvalidParamsException):\n ContainerUtil.create_db_prefix(VarDB, 'vardb')\n","sub_path":"tests/unit_test/iconscore/test_icon_container_db.py","file_name":"test_icon_container_db.py","file_ext":"py","file_size_in_byte":10991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"208365990","text":"import trio\n\nfrom ._highlevel_open_tcp_stream import DEFAULT_DELAY\n\n__all__ = [\n \"open_ssl_over_tcp_stream\", \"open_ssl_over_tcp_listeners\",\n \"serve_ssl_over_tcp\"\n]\n\n\n# It might have been nice to take a ssl_protocols= argument here to set up\n# NPN/ALPN, but to do this we have to mutate the context object, which is OK\n# if it's one we created, but not OK if it's one that was passed in... and\n# the one major protocol using NPN/ALPN is HTTP/2, which mandates that you use\n# a specially configured SSLContext anyway! I also thought maybe we could copy\n# the given SSLContext and then mutate the copy, but it's no good:\n# copy.copy(SSLContext) seems to succeed, but the state is not transferred!\n# For example, with CPython 3.5, we have:\n# ctx = ssl.create_default_context()\n# assert ctx.check_hostname == True\n# assert copy.copy(ctx).check_hostname == False\n# So... let's punt on that for now. Hopefully we'll be getting a new Python\n# TLS API soon and can revisit this then.\nasync def open_ssl_over_tcp_stream(\n host,\n port,\n *,\n https_compatible=False,\n ssl_context=None,\n # No trailing comma b/c bpo-9232 (fixed in py36)\n happy_eyeballs_delay=DEFAULT_DELAY\n):\n \"\"\"Make a TLS-encrypted Connection to the given host and port over TCP.\n\n This is a convenience wrapper that calls :func:`open_tcp_stream` and\n wraps the result in an :class:`~trio.ssl.SSLStream`.\n\n This function does not perform the TLS handshake; you can do it\n manually by calling :meth:`~trio.ssl.SSLStream.do_handshake`, or else\n it will be performed automatically the first time you send or receive\n data.\n\n Args:\n host (bytes or str): The host to connect to. We require the server\n to have a TLS certificate valid for this hostname.\n port (int): The port to connect to.\n https_compatible (bool): Set this to True if you're connecting to a web\n server. See :class:`~trio.ssl.SSLStream` for details. Default:\n False.\n ssl_context (:class:`~ssl.SSLContext` or None): The SSL context to\n use. If None (the default), :func:`ssl.create_default_context`\n will be called to create a context.\n happy_eyeballs_delay (float): See :func:`open_tcp_stream`.\n\n Returns:\n trio.ssl.SSLStream: the encrypted connection to the server.\n\n \"\"\"\n tcp_stream = await trio.open_tcp_stream(\n host,\n port,\n happy_eyeballs_delay=happy_eyeballs_delay,\n )\n if ssl_context is None:\n ssl_context = trio.ssl.create_default_context()\n return trio.ssl.SSLStream(\n tcp_stream,\n ssl_context,\n server_hostname=host,\n https_compatible=https_compatible,\n )\n\n\nasync def open_ssl_over_tcp_listeners(\n port, ssl_context, *, host=None, https_compatible=False, backlog=None\n):\n \"\"\"Start listening for SSL/TLS-encrypted TCP connections to the given port.\n\n Args:\n port (int): The port to listen on. See :func:`open_tcp_listeners`.\n ssl_context (~ssl.SSLContext): The SSL context to use for all incoming\n connections.\n host (str, bytes, or None): The address to bind to; use ``None`` to bind\n to the wildcard address. See :func:`open_tcp_listeners`.\n https_compatible (bool): See :class:`~trio.ssl.SSLStream` for details.\n backlog (int or None): See :class:`~trio.ssl.SSLStream` for details.\n\n \"\"\"\n tcp_listeners = await trio.open_tcp_listeners(\n port, host=host, backlog=backlog\n )\n ssl_listeners = [\n trio.ssl.SSLListener(\n tcp_listener,\n ssl_context,\n https_compatible=https_compatible,\n ) for tcp_listener in tcp_listeners\n ]\n return ssl_listeners\n\n\nasync def serve_ssl_over_tcp(\n handler,\n port,\n ssl_context,\n *,\n host=None,\n https_compatible=False,\n backlog=None,\n handler_nursery=None,\n task_status=trio.TASK_STATUS_IGNORED\n):\n \"\"\"Listen for incoming TCP connections, and for each one start a task\n running ``handler(stream)``.\n\n This is a thin convenience wrapper around\n :func:`open_ssl_over_tcp_listeners` and :func:`serve_listeners` – see them\n for full details.\n\n .. warning::\n\n If ``handler`` raises an exception, then this function doesn't do\n anything special to catch it – so by default the exception will\n propagate out and crash your server. If you don't want this, then catch\n exceptions inside your ``handler``, or use a ``handler_nursery`` object\n that responds to exceptions in some other way.\n\n When used with ``nursery.start`` you get back the newly opened listeners.\n See the documentation for :func:`serve_tcp` for an example where this is\n useful.\n\n Args:\n handler: The handler to start for each incoming connection. Passed to\n :func:`serve_listeners`.\n\n port (int): The port to listen on. Use 0 to let the kernel pick\n an open port. Ultimately passed to :func:`open_tcp_listeners`.\n\n ssl_context (~ssl.SSLContext): The SSL context to use for all incoming\n connections. Passed to :func:`open_ssl_over_tcp_listeners`.\n\n host (str, bytes, or None): The address to bind to; use ``None`` to bind\n to the wildcard address. Ultimately passed to\n :func:`open_tcp_listeners`.\n\n https_compatible (bool): Set this to True if you want to use\n \"HTTPS-style\" TLS. See :class:`~trio.ssl.SSLStream` for details.\n\n backlog (int or None): See :class:`~trio.ssl.SSLStream` for details.\n\n handler_nursery: The nursery to start handlers in, or None to use an\n internal nursery. Passed to :func:`serve_listeners`.\n\n task_status: This function can be used with ``nursery.start``.\n\n Returns:\n This function only returns when cancelled.\n\n \"\"\"\n listeners = await trio.open_ssl_over_tcp_listeners(\n port,\n ssl_context,\n host=host,\n https_compatible=https_compatible,\n backlog=backlog\n )\n await trio.serve_listeners(\n handler,\n listeners,\n handler_nursery=handler_nursery,\n task_status=task_status\n )\n","sub_path":"trio/_highlevel_ssl_helpers.py","file_name":"_highlevel_ssl_helpers.py","file_ext":"py","file_size_in_byte":6137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"317010998","text":"import discord\nfrom discord.ext import commands\n\nimport sys\nimport os\nimport psutil\n\ndef developer_only():\n async def predicate(ctx):\n devlopers = [349300676461068288]\n return ctx.author.id in developers\n return commands.check(predicate)\n\nclass Dev(commands.Cog):\n def __init__(self, client_instance, bot_instance):\n self.client = client_instance\n self.bot = bot_instance\n\n def is_developer(self, user_id: int):\n return user_id in self.bot.developers\n\n def _get_ram_usage(self):\n memory = psutil.virtual_memory()\n total_memory = round(memory.total / 1000000000, 2)\n available_memory = round(memory.available / 1000000000, 2)\n memory_used = round(total_memory - available_memory, 2)\n memory_percent = round(total_memory / available_memory, 2)\n\n return \"{0}% - {1}GB / {2}GB used ({3}GB available)\".format(memory_percent, memory_used, total_memory, available_memory)\n\n @commands.group()\n @developer_only()\n async def settings(self, ctx):\n if ctx.invoked_subcommand is None:\n await ctx.send(\":x: Invalid settings command. Try again.\")\n\n @settings.command()\n @developer_only()\n async def view(self, ctx, setting):\n if setting.lower() != \"all\":\n # attempt to get the value of a setting\n try:\n value = self.bot.settings[\"{0}\".format(ctx.guild.id)][setting]\n await ctx.send(\"Setting '{0}' is currently set to '{1}' ({2})\".format(setting, value, type(value)))\n except KeyError:\n # undefined setting\n await ctx.send(\":x: No setting '{0}' found. Try again.\".format(setting))\n else:\n # send settings file for the guild\n file = discord.File(fp=self.bot.directory + \"guilds/{0}/settings.txt\".format(ctx.guild.id), filename=\"RoboWhalo_{0}_settings.txt\".format(ctx.guild.name))\n await ctx.send(file=file)\n\n @settings.command()\n @developer_only()\n async def set(self, ctx, setting, value):\n if setting not in self.bot.settings[\"{0}\".format(ctx.guild.id)]:\n await ctx.send(\":x: No setting '{0}' found. Try again.\".format(setting))\n else:\n if value in [\"True\", \"False\", \"None\"]:\n value = eval(value)\n else:\n try:\n if \".\" in value:\n value = float(value)\n else:\n value = int(value)\n except ValueError:\n # no conversion needed\n pass\n\n guild_settings = self.bot.settings[str(ctx.guild.id)]\n guild_settings[str(setting)] = value\n\n # reconfig settings\n with open(self.bot.directory + \"guilds/{0}/settings.txt\".format(ctx.guild.id), \"r\") as file:\n lines = file.readlines()\n\n with open(self.bot.directory + \"guilds/{0}/settings.txt\".format(ctx.guild.id), \"w\") as file:\n for line in lines:\n if setting not in line:\n file.write(\"{0}\".format(line))\n else:\n file.write(\"{0} {1}\\n\".format(setting, value))\n\n await ctx.send(\":white_check_mark: Setting '{0}' has been set to '{1}' ({2})\".format(setting, value, type(value)))\n\n @settings.command()\n @developer_only()\n async def reconfig(self, ctx):\n \"\"\"\n DEV ONLY - Reconfigures the bot's settings cache\n \"\"\"\n \n self.bot.settings = self.bot.config_guild_settings()\n await ctx.send(\":white_check_mark: Guild settings reconfigured.\")\n \n @commands.group()\n async def server(self, ctx):\n \"\"\"\n Bot server related commands\n \"\"\"\n\n if ctx.invoked_subcommand is None:\n await ctx.send(\":x: Invalid server command, try again.\")\n \n @server.command()\n @developer_only()\n async def kill(self, ctx):\n \"\"\"\n DEV ONLY - Forces the bot to shutdown\n \"\"\"\n\n sys.exit()\n\n @server.command()\n async def stats(self, ctx):\n \"\"\"\n Shows the bot server stats\n \"\"\"\n \n embed = discord.Embed(title=\"__**RoboWhalo Server Statistics**__\", color=discord.Color.blue())\n embed.add_field(name=\"Platform\", value=sys.platform)\n embed.add_field(name=\"Python Version\", value=sys.version)\n embed.add_field(name=\"C API Version\", value=sys.api_version)\n embed.add_field(name=\"discord.py API Version\", value=discord.__version__)\n embed.add_field(name=\"Server Uptime\", value=self.bot.get_uptime())\n embed.add_field(name=\"Total Servers\", value=len(self.client.guilds))\n embed.add_field(name=\"Total Users\", value=len(self.client.users))\n embed.add_field(name=\"Total CPU Usage\", value=str(psutil.cpu_percent(None, False)) + \"%\")\n embed.add_field(name=\"Total RAM Usage\", value=self._get_ram_usage())\n\n await ctx.send(embed=embed)\n\n @server.group()\n @developer_only()\n async def directory(self, ctx):\n \"\"\"\n Server directory related commands\n \"\"\"\n \n if ctx.invoked_subcommand is None:\n await ctx.send(\":x: Invalid server directory command. Try again.\")\n\n @directory.command()\n @developer_only()\n async def create(self, ctx, *, path: str):\n \"\"\"\n DEV ONLY - Creates a new directory in Ox0777 mode\n \"\"\"\n \n try:\n os.mkdir(path)\n await ctx.send(\":white_check_mark: New directory at {0} created.\".format(path))\n except os.error as err:\n await ctx.send(\":x: An error occured while trying to make the directory at '{0}' - {1}.\".format(path, err))\n\n @directory.command()\n @developer_only()\n async def config_guild(self, ctx, guildid=None):\n \"\"\"\n DEV ONLY - Creates a new guild drectory in Ox0777 mode\n \"\"\"\n \n if guildid is None:\n guildid = ctx.guild.id\n else:\n guildid = int(guildid)\n\n # check if the guild id is valid\n if self.client.get_guild(guildid) is None:\n await ctx.send(\":x: Invalid guild id, try again.\")\n try:\n os.mkdir(self.bot.directory + \"guilds/{0}\".format(ctx.guild.id))\n except os.error as err:\n await ctx.send(\":x: An error occured while trying to make the directory at '{0}' - {1}.\".format(self.bot.directory + \"guilds/{0}\".format(ctx.guild.id), err))\n return\n\n try:\n os.mkdir(self.bot.directory + \"guilds/{0}/\".format(ctx.guild.id) + \"tags\")\n except os.error as err:\n await ctx.send(\":x: An error occured while trying to make the directory at '{0}' - {1}.\".format(self.bot.directory + \"guilds/{0}/\".format(ctx.guild.id) + \"tags\", err))\n\n # copy settings file\n with open(self.bot.directory + \"guilds/BASE/settings.txt\", \"r\") as file:\n lines = file.readlines()\n\n with open(self.bot.directory + \"guilds/{0}/settings.txt\".format(ctx.guild.id), \"w\") as file:\n for line in lines:\n file.write(line)\n\n # copy filters file\n with open(self.bot.directory + \"guilds/BASE/filters.txt\", \"r\") as file:\n lines = file.readlines()\n\n with open(self.bot.directory + \"guilds/{0}/filters.txt\".format(ctx.guild.id), \"w\") as file:\n for line in lines:\n file.write(line)\n\n # reconfigure the bot's internal guild settings\n self.bot.settings = self.bot.config_guild_settings()\n\n await ctx.send(\":white_check_mark: New guild directory configured.\")\n","sub_path":"dev.py","file_name":"dev.py","file_ext":"py","file_size_in_byte":7688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"259134060","text":"#!Python2.6\r\n# -*- encoding: utf-8 -*-\r\n\r\nimport os\r\nimport re\r\nimport urllib2\r\n\r\nfrom google.appengine.ext import webapp\r\nfrom google.appengine.ext.webapp import template\r\nfrom amazon_search import AmazonRequest\r\n\r\nclass AmazonItemSearch(webapp.RequestHandler):\r\n ''' Amazon Product Advertising API を利用し、キーワード検索を行う\r\n example http://typea-mixi01.appspot.com/am_is?q=book\r\n parameters \r\n q : query keyword\r\n s : view style \r\n image : image list\r\n text : text only\r\n ul : unnumbered list\r\n image_text : image and text\r\n image_text_table : image and text table layout\r\n c : keyword encoding\r\n '''\r\n def get(self):\r\n try:\r\n keyword = self.request.GET['q']\r\n #keyword = keyword.encode('utf-8')\r\n except:\r\n keyword = 'Amazon'\r\n \r\n try:\r\n style = self.request.GET['s'] # style\r\n except:\r\n style = 'image' \r\n\r\n try:\r\n coding = self.request.GET['c'] # style\r\n except:\r\n coding = None \r\n\r\n #パラメータのデコード\r\n #@see http://www.findxfine.com/default/495.html\r\n #FireFox のアドレスバーに漢字を打つとUTF-8でないコードにエンコードされてしまう?\r\n \r\n #keyword = urllib2.unquote(keyword)\r\n if keyword == '':\r\n keyword = 'amazon'\r\n #if coding:\r\n # keyword = unicode(keyword, coding)\r\n \r\n amazon_request = AmazonRequest() \r\n search_index = 'Books'\r\n\r\n item_list = amazon_request.request(keyword, search_index)\r\n\r\n # retry \r\n if len(item_list) == 0:\r\n keyword = keyword.split(' ')[0]\r\n item_list = amazon_request.request(keyword, search_index)\r\n\r\n if len(item_list) == 0:\r\n m = re.match(r'(?P\\w+)', keyword)\r\n if m:\r\n keyword = m.group('kw')\r\n item_list = amazon_request.request(keyword, search_index)\r\n\r\n if len(item_list) == 0:\r\n keyword = 'Computer'\r\n item_list = amazon_request.request(keyword, search_index)\r\n \r\n context = {\r\n 'style':style,\r\n 'keyword':keyword,\r\n 'item_list':item_list\r\n }\r\n \r\n path = os.path.join(os.path.dirname(__file__), 'amazon_ads.html')\r\n self.response.out.write(template.render(path, context))\r\n\r\nclass CreateLinks(webapp.RequestHandler):\r\n '''Amazon へのリンクを作成する '''\r\n def post(self):\r\n keyword = self.request.POST['q']\r\n keyword = keyword.encode('utf-8')\r\n keyword = urllib2.unquote(keyword) \r\n \r\n style = self.request.POST['style']\r\n search_index = self.request.POST['search_index']\r\n \r\n amazon_request = AmazonRequest() \r\n item_list = amazon_request.request(keyword, search_index)\r\n links = ''\r\n if len(item_list) > 0:\r\n links_template_values = {\r\n 'style':style,\r\n 'keyword':keyword,\r\n 'item_list':item_list\r\n }\r\n links_path = os.path.join(os.path.dirname(__file__), 'amazon_ads.html')\r\n links = template.render(links_path,links_template_values)\r\n \r\n template_values = {\r\n 'keyword':keyword,\r\n 'links':links,\r\n 'search_index':search_index,\r\n 'style':style,\r\n }\r\n path = os.path.join(os.path.dirname(__file__), 'create_links.html')\r\n self.response.out.write(template.render(path,template_values))\r\n\r\n def get(self):\r\n template_values = {\r\n 'keyword':'',\r\n 'links':'',\r\n 'search_index':'Books',\r\n 'style':'ul',\r\n }\r\n path = os.path.join(os.path.dirname(__file__), 'create_links.html')\r\n self.response.out.write(template.render(path,template_values))\r\n","sub_path":"typea-mixi01/src/amazon/amazon_utils.py","file_name":"amazon_utils.py","file_ext":"py","file_size_in_byte":4188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"114540464","text":"# %%\n# ==============================================================================\n# IMPORT PACKAGES\n# ==============================================================================\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\n\ndef parser(x):\n time = pd.datetime.strptime(x, '%d-%b-%Y %H:%M:%S')\n return time\n\n\nstart = pd.datetime(2015, 5, 15, 1)\nend = pd.datetime(2016, 5, 14, 1)\ndt = 3\nrep = 10\nrep_end = start.replace(year=start.year + rep)\nslr = 0.003\n\ndf = pd.read_csv(file, parse_dates=['datetime'],\n date_parser=parser, index_col='datetime')\ndf1 = df[(df.index >= start) & (df.index < end) & (df.index.minute == 0)]\ndf_out = df1['pressure'] - np.mean(df1['pressure'])\n\ntides = df_out\n\nnew_index = pd.DatetimeIndex(start=start, end=rep_end, freq='H')\n\ntides_rep = np.tile(tides.values, rep + 1)\ntides_rep = tides_rep[:len(new_index)]\n\nslr_rep = np.linspace(0, rep * slr, num=len(tides_rep))\n\nout = tides_rep + slr_rep\n\n# %%\n# ==============================================================================\n# PARAMATERIZE AND RUN DELTA Z\n# ==============================================================================\n\n# gs = 0.03\n# ws = ((gs/1000)**2*1650*9.8)/0.018\n# rho = 700\n# SSC = 0.2\n# dP = 0\n# dO = 0\n# dSub = 0.002\n# z0 = 0\n# heads = tides\n# time = tides.index\n# dM = 0\n\n# C0 = np.zeros(len(heads))\n# C = np.zeros(len(heads))\n# z = np.zeros(len(heads)+1)\n# z[0:2] = z0\n# dz = np.zeros(len(heads))\n# dh = np.zeros(len(heads))\n# dt = (time[1]-time[0]).total_seconds()\n# j = 1\n# for h in heads[1:]:\n# dh[j] = (h-heads[j-1])/dt\n# C0[j] = 0\n# if h > z[j]:\n# if dh[j] > 0:\n# C0[j] = 0.69*SSC*(h-z[j])\n# C[j] = (C0[j]*(h-heads[j-1])+C[j-1]*(h-z[j])) / \\\n# (2*h-heads[j-1]-z[j]+ws/dt)\n# else:\n# C[j] = (C[j-1]*(h-z[j]))/(h-z[j]+ws/dt)\n# else:\n# C[j] = 0\n# dz[j] = (ws*C[j]/rho)*dt\n# z[j+1] = z[j] + dz[j] + dO - dP - dM/(8760/(dt/3600))\n# j = j + 1\n\n# z = z[1:]\n\n\n# %%\nstart = pd.datetime(2015, 5, 15, 1)\nend = pd.datetime(2016, 5, 14, 1)\ndt = 3\nrep = 10\n\ngs = 0.03\nws = ((gs/1000)**2*1650*9.8)/0.018\nrho = 700\nSSC = 0.2\ndP = 0\ndO = 0\ndSub = 0.002\nz0 = 0\ndM = 0\n\nindex = new_index\nheads = out\n\ncolumns = ['h', 'dh', 'C0', 'C', 'z', 'dz']\ndf = pd.DataFrame(index=index, columns=columns)\ndf[:] = 0\ndt = (index[1]-index[0]).total_seconds()\ndf.loc[:, 'h'] = tides_rep\ndf.loc[:, 'dh'] = df.loc[:, 'h'].diff()/dt\n\ndef calc_c0(h, dh, z, A, ssc):\n if (h > z and dh > 0):\n return A * ssc * (h - z)\n else:\n return 0\n\ndef calc_c(c0, h, h_min_1, dh, c_min_1, z, ws, dt):\n if (h > z and dh > 0):\n return (c0 * (h-h_min_1) + c_min_1 * (h - z)) / (2 * h - h_min_1 - z + ws / dt)\n elif (h > z and dh < 0):\n return (c_min_1 * (h - z)) / (h - z + ws / dt)\n else:\n return 0\n\ndef calc_dz(c, ws, rho, dt):\n return (ws * c / rho) * dt\n\ndef calc_z(z_min_1, dz_min_1, dO, dP):\n return z_min_1 + dz_min_1 + dO - dP\n\nfor t in df.index[1:]:\n t_min_1 = t - pd.Timedelta(hours=1)\n df.loc[t,'z'] = calc_z(df.at[t_min_1,'z'],df.at[t_min_1,'dz'],0,0)\n df.loc[t,'C0'] = calc_c0(df.at[t,'h'], df.at[t,'dh'], df.at[t,'z'], A, SSC)\n df.loc[t,'C'] = calc_c(df.at[t,'C0'], df.at[t,'h'],df.at[t_min_1,'h'], df.at[t,'dh'], df.at[t_min_1, 'C'], df.at[t,'z'], ws, dt)\n df.loc[t,'dz'] = calc_dz(df.at[t,'C'], ws, rho, dt)\n\nfor t in df.index[1:]:\n t1 = t\n t2 = df.index[t+1]\n if h > df.z[j]:\n if df.dh[j] > 0:\n df.loc[t1, 'C0'] = 0.69 * SSC * (h - df.z[j])\n df.C[j] = (df.C0[j] * (h - df.h[j - 1]) + df.C[j - 1] *\n (h - df.z[j])) / (2 * h - df.h[j - 1] - df.z[j] + ws / dt)\n else:\n df.C[j] = (df.C[j - 1] * (h - df.z[j])) / (h - df.z[j] + ws / dt)\n else:\n df.C[j] = 0\n df.dz[j] = (ws * df.C[j] / rho) * dt\n df.z[j + 1] = df.z[j] + df.dz[j] + dO - dP - dM / (8760 / (dt/3600))\n j = j + 1\n\n# %%\n","sub_path":"scripts/debug_script.py","file_name":"debug_script.py","file_ext":"py","file_size_in_byte":3989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"83596152","text":"import asyncio\nimport io\nimport math\nimport re\nimport urllib.parse\nfrom datetime import datetime\n\nimport aiohttp\nimport discord.game\nimport pytz\nfrom PIL import Image\nfrom lxml import html\nfrom pytz import timezone\n\nimport discordant.utils as utils\nfrom discordant import Discordant\n\n\n@Discordant.register_command(\"help\", [\"info\", \"h\", \"cmds\", \"commands\"],\n context=True)\nasync def _help(self, args, message, context):\n \"\"\"!help [command/section]\n displays command help and information.\"\"\"\n sections = {}\n for cmd in self._commands.values():\n if cmd.perm_func and not cmd.perm_func(self, context.author):\n continue\n if cmd.section in sections:\n sections[cmd.section].append(cmd)\n else:\n sections[cmd.section] = [cmd]\n cmd = utils.get_cmd(self, args) if args else None\n if cmd:\n if cmd.perm_func and not cmd.perm_func(self, context.author):\n await self.send_message(\n message.channel, \"You are not authorized to use this command.\")\n return\n await self.send_message(message.channel, cmd.help)\n return\n if args:\n if args in sections:\n sections = {args: sections[args]}\n else:\n await self.send_message(message.channel,\n \"Command could not be found.\")\n return\n msg = None\n try:\n await utils.send_long_message(self, message.author, _help_menu(\n sections))\n await self.send_message(\n message.author,\n \"type !help [command/section] to display more information \"\n \"about a certain command or section.\")\n await self.send_message(\n message.author,\n \"**command help syntax**:\\n\"\n \"[] optional argument\\n\"\n \"<> required argument\\n\"\n \"\\\\* any number of arguments\\n\"\n \"k=v kwargs style argument (each key-value pair is \"\n \"separated by space, and the key and value are separated by the\"\n \" \\\"=\\\" character).\\n\"\n \"\\\\*\\\\* any number of kwargs\")\n except discord.errors.Forbidden:\n msg = await self.send_message(\n message.channel, \"Please enable your PMs.\")\n if message.server:\n if not msg:\n msg = await self.send_message(\n message.channel, \"Check your PMs.\")\n await _delete_after(self, 5, [message, msg])\n\n\ndef _help_menu(sections):\n output = \"**commands**:\"\n for section, cmd_list in sections.items():\n tab_4 = \" \" * 4\n output += \"\\n __{}__:\\n\".format(section) + \\\n \"\\n\".join([tab_4 + \"*{}* - \".format(cmd.aliases[0]) +\n cmd.help.replace(\"\\n\", tab_4 + \"\\n\").split(\n \" - \", 1)[1] for cmd in cmd_list])\n return output\n\n\ndef _tz_args(args):\n if not args:\n return False\n split = args.split()\n len_s = len(split)\n return len_s == 1 or len_s >= 3, split\n\n\n@Discordant.register_command(\"timezone\", [\"tz\"], arg_func=_tz_args)\nasync def _convert_timezone(self, args_split, message):\n \"\"\"!timezone