diff --git "a/5240.jsonl" "b/5240.jsonl" new file mode 100644--- /dev/null +++ "b/5240.jsonl" @@ -0,0 +1,403 @@ +{"seq_id":"229125070","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# Author: Nicolas Bessi\n# Copyright 2014 Camptocamp SA\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\nimport requests\nfrom urllib import quote, urlencode\nfrom urlparse import parse_qs, urljoin\nimport simplejson as json\n\n\nclass SalesForceOauth2MAnager(object):\n\n def __init__(self, backend_record):\n \"\"\"\n \"\"\"\n self.backend = backend_record\n self.base_login_url = 'https://login.salesforce.com/'\n self.authorization_url = \"services/oauth2/authorize\"\n self.token_url = \"services/oauth2/token\"\n self.redirect_uri = urljoin(self.backend.callback_url,\n \"salesforce/oauth\")\n if self.backend.sandbox:\n self.base_login_url = \"https://test.salesforce.com/\"\n\n def authorize_url(self, scope='', **kwargs):\n \"\"\"\n Returns the callback url to redirect the user after authorization\n \"\"\"\n\n oauth_params = {\n 'redirect_uri': self.redirect_uri,\n 'client_id': self.backend.consumer_key,\n 'scope': scope\n }\n oauth_params.update(kwargs)\n return \"%s%s?%s\" % (\n self.base_login_url,\n quote(self.authorization_url),\n urlencode(oauth_params)\n )\n\n def get_token(self, **kwargs):\n \"\"\"\n Requests an access token\n \"\"\"\n url = \"%s%s\" % (self.base_login_url, quote(self.token_url))\n data = {'code': self.backend.consumer_code,\n 'grant_type': 'authorization_code',\n 'redirect_uri': self.redirect_uri,\n 'client_id': self.backend.consumer_key,\n 'client_secret': self.backend.consumer_secret}\n data.update(kwargs)\n response = requests.post(url, data=data)\n\n if isinstance(response.content, basestring):\n try:\n content = json.loads(response.content)\n except ValueError:\n content = parse_qs(response.content)\n else:\n content = response.content\n return content\n\n def refresh_token(self, **kwargs):\n \"\"\"\n Requests an access token\n \"\"\"\n url = \"%s%s\" % (self.base_login_url, quote(self.token_url))\n data = {'refresh_token': self.backend.consumer_refresh_token,\n 'client_id': self.backend.consumer_key,\n 'client_secret': self.backend.consumer_secret,\n 'grant_type': 'refresh_token'}\n data.update(kwargs)\n headers = {'Content-Type': 'application/x-www-form-urlencoded'}\n response = requests.post(url, data=data, headers=headers)\n if isinstance(response.content, basestring):\n try:\n content = json.loads(response.content)\n except ValueError:\n content = parse_qs(response.content)\n else:\n content = response.content\n return content\n","sub_path":"connector_salesforce/lib/oauth2_utils.py","file_name":"oauth2_utils.py","file_ext":"py","file_size_in_byte":3731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"545186216","text":"# ahora con tres numeros aleatorios\r\n\r\n\r\nfrom random import randint #importa solo una funcion de esta libreria\r\n\r\nlista=[]\r\nfor i in range(3):\r\n lista.append(randint(0,100))\r\n\r\nprint(lista)\r\n \r\nn=randint(0, 100)\r\nprint(n)\r\nadivinado=False\r\n\r\nfor i in range(5):\r\n a=int(input(\"ingrese su valor: \"))\r\n print(a)\r\n\r\n if a in lista:\r\n print(\"ganaste\")\r\n adivinado = True\r\n break #sentencia de salto para salir del ciclo for \r\n else:\r\n print(\"sigue intentando\")\r\n\r\nif not adivinado:\r\n print(\"el numero aleatorio fue: \" + str(n))\r\n \r\n \r\n","sub_path":"aleatorios4.py","file_name":"aleatorios4.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"120218240","text":"\"\"\"\nWrite a function, persistence, that takes in a positive parameter num and \nreturns its multiplicative persistence, which is the number of times you must \nmultiply the digits in num until you reach a single digit.\n\npersistence(39) # returns 3, because 3*9=27, 2*7=14, 1*4=4\n # and 4 has only one digit\n \npersistence(999) # returns 4, because 9*9*9=729, 7*2*9=126,\n # 1*2*6=12, and finally 1*2=2\n\npersistence(4) # returns 0, because 4 is already a one-digit number\n\n\"\"\"\n\n\ndef persistence(n):\n if n < 10: return 0\n p=1\n for i in str(n): p*=int(i)\n return 1 + persistence(p) \n\nprint(persistence(39))","sub_path":"Persistent Bugger.py","file_name":"Persistent Bugger.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"538671547","text":"import dash\n#import dash_core_components as dcc\nimport dash_html_components as html\nimport pandas as pd\n\n# making data frame from csv file\ndf = pd.read_csv('test.csv')\n# sorting data frame by Team and then By names\ndf.sort_values(['ssid', 'ntp', 'dBm'], axis=0, ascending=[True, True, True], inplace=True)\ndf.to_csv('max.csv', index=False)\ndf1 = pd.read_csv('max.csv')\ndf2 = df1.groupby(['ssid']).max()\ndf2.to_csv('maxgroup.csv', index=False)\n\ndf4 = pd.read_csv('maxgroup.csv')\n(x0, y0) = df4.shape\ndf5 = pd.read_csv('max.csv')\ndf6 = df5.groupby(['ssid']).max()\ncol_name=df4.columns.tolist()\ncol_name.insert(0, 'ssid')\ndf4 = df4.reindex(columns=col_name)\ndf4['ssid']= df6.index.values.tolist()\ndf4.to_csv('maxoutput.csv', index=False)\ndef generate_table(dataframe, max_rows=x0):\n return html.Table(\n # Header\n [html.Tr([html.Th(col) for col in dataframe.columns])] +\n\n # Body\n [html.Tr([\n html.Td(dataframe.iloc[i][col]) for col in dataframe.columns\n ]) for i in range(min(len(dataframe), max_rows))]\n )\n\n\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\n\napp.layout = html.Div(children=[\n html.H4(children='csv to read'),\n generate_table(df4)\n])\n\nif __name__ == '__main__':\n app.run_server(debug=True)","sub_path":"maxoutput.py","file_name":"maxoutput.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"254657876","text":"# uncompyle6 version 3.6.7\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: build/bdist.linux-x86_64/egg/pyrpl/hardware_modules/scope.py\n# Compiled at: 2017-08-29 09:44:06\nimport time\nfrom .dsp import all_inputs, dsp_addr_base, InputSelectRegister\nfrom ..acquisition_module import AcquisitionModule\nfrom ..async_utils import MainThreadTimer, PyrplFuture, sleep\nfrom ..pyrpl_utils import sorted_dict\nfrom ..attributes import *\nfrom ..modules import HardwareModule\nfrom ..pyrpl_utils import time\nfrom ..widgets.module_widgets import ScopeWidget\nlogger = logging.getLogger(name=__name__)\ndata_length = 16384\n\nclass DecimationRegister(SelectRegister):\n \"\"\"\n Careful: changing decimation changes duration and sampling_time as well\n \"\"\"\n\n def set_value(self, obj, value):\n SelectRegister.set_value(self, obj, value)\n obj.__class__.duration.value_updated(obj, obj.duration)\n obj.__class__.sampling_time.value_updated(obj, obj.sampling_time)\n\n\nclass DurationProperty(SelectProperty):\n\n def get_value(self, obj):\n return obj.sampling_time * float(obj.data_length)\n\n def validate_and_normalize(self, obj, value):\n value = float(value)\n options = self.options(obj).keys()\n try:\n return min([ opt for opt in options if opt >= value ], key=lambda x: abs(x - value))\n except ValueError:\n obj._logger.info('Selected duration is longer than physically possible with the employed hardware. Picking longest-possible value %s. ', max(options))\n return max(options)\n\n def set_value(self, obj, value):\n \"\"\"sets returns the duration of a full scope sequence the rounding\n makes sure that the actual value is longer or equal to the set value\"\"\"\n obj.sampling_time = float(value) / obj.data_length\n\n\nclass SamplingTimeProperty(SelectProperty):\n\n def get_value(self, obj):\n return 8e-09 * float(obj.decimation)\n\n def validate_and_normalize(self, obj, value):\n value = float(value)\n options = self.options(obj).keys()\n try:\n return min([ opt for opt in options if opt <= value ], key=lambda x: abs(x - value))\n except ValueError:\n obj._logger.info('Selected sampling time is shorter than physically possible with the employed hardware. Picking shortest-possible value %s. ', min(options))\n return min(options)\n\n def set_value(self, instance, value):\n \"\"\"sets or returns the time separation between two subsequent\n points of a scope trace the rounding makes sure that the actual\n value is shorter or equal to the set value\"\"\"\n instance.decimation = float(value) / 8e-09\n\n\nclass ContinuousRollingFuture(PyrplFuture):\n \"\"\"\n This Future object is the one controlling the acquisition in\n rolling_mode. It will never be fullfilled (done), since rolling_mode\n is always continuous, but the timer/slot mechanism to control the\n rolling_mode acquisition is encapsulated in this object.\n \"\"\"\n DELAY_ROLLING_MODE_MS = 20\n current_avg = 1\n\n def __init__(self, module):\n super(ContinuousRollingFuture, self).__init__()\n self._module = module\n self._timer = MainThreadTimer(self.DELAY_ROLLING_MODE_MS)\n self._timer.timeout.connect(self._get_rolling_curve)\n\n def _get_rolling_curve(self):\n if not self._module._is_rolling_mode_active():\n return\n if not self._module.running_state == 'running_continuous':\n return\n data_x, datas = self._module._get_rolling_curve()\n self._module._emit_signal_by_name('display_curve', [data_x,\n datas])\n self.data_avg = datas\n self.data_x = data_x\n self._timer.start()\n\n def start(self):\n self._module._start_acquisition_rolling_mode()\n self._timer.start()\n\n def pause(self):\n self._timer.stop()\n\n def _set_run_continuous(self):\n \"\"\"\n Dummy function: ContinuousRollingFuture instance is always\n \"_run_continuous\"\n \"\"\"\n pass\n\n\nclass Scope(HardwareModule, AcquisitionModule):\n addr_base = 1074790400\n name = 'scope'\n _widget_class = ScopeWidget\n _gui_attributes = [\n 'input1',\n 'input2',\n 'duration',\n 'average',\n 'trigger_source',\n 'trigger_delay',\n 'threshold',\n 'hysteresis',\n 'ch1_active',\n 'ch2_active',\n 'xy_mode']\n _setup_attributes = _gui_attributes + ['rolling_mode', 'running_state']\n data_length = data_length\n rolling_mode = BoolProperty(default=True, doc='In rolling mode, the curve is continuously acquired and translated from the right to the left of the screen while new data arrive.', call_setup=True)\n\n @property\n def inputs(self):\n return list(all_inputs(self).keys())\n\n input1 = InputSelectRegister(-addr_base + dsp_addr_base('asg0') + 0, options=all_inputs, default='in1', ignore_errors=True, doc='selects the input signal of the module')\n input2 = InputSelectRegister(-addr_base + dsp_addr_base('asg1') + 0, options=all_inputs, default='in2', ignore_errors=True, doc='selects the input signal of the module')\n _reset_writestate_machine = BoolRegister(0, 1, doc='Set to True to reset writestate machine. Automatically goes back to false.')\n _trigger_armed = BoolRegister(0, 0, doc='Set to True to arm trigger')\n _trigger_sources = sorted_dict({'off': 0, 'immediately': 1, \n 'ch1_positive_edge': 2, \n 'ch1_negative_edge': 3, \n 'ch2_positive_edge': 4, \n 'ch2_negative_edge': 5, \n 'ext_positive_edge': 6, \n 'ext_negative_edge': 7, \n 'asg0': 8, \n 'asg1': 9, \n 'dsp': 10}, sort_by_values=True)\n trigger_sources = _trigger_sources.keys()\n _trigger_source_register = SelectRegister(4, doc='Trigger source', options=_trigger_sources)\n trigger_source = SelectProperty(default='immediately', options=_trigger_sources.keys(), doc=\"Trigger source for the scope. Use 'immediately' if no synchronisation is required. Trigger_source will be ignored in rolling_mode.\", call_setup=True)\n _trigger_debounce = IntRegister(144, doc='Trigger debounce time [cycles]')\n trigger_debounce = FloatRegister(144, bits=20, norm=125000000.0, doc='Trigger debounce time [s]')\n threshold = FloatRegister(8, bits=14, norm=8192, doc='trigger threshold [volts]')\n hysteresis = FloatRegister(32, bits=14, norm=8192, doc='hysteresis for trigger [volts]')\n\n @property\n def threshold_ch1(self):\n self._logger.warning('The scope attribute \"threshold_chx\" is deprecated. Please use \"threshold\" instead!')\n return self.threshold\n\n @threshold_ch1.setter\n def threshold_ch1(self, v):\n self._logger.warning('The scope attribute \"threshold_chx\" is deprecated. Please use \"threshold\" instead!')\n self.threshold = v\n\n @property\n def threshold_ch2(self):\n self._logger.warning('The scope attribute \"threshold_chx\" is deprecated. Please use \"threshold\" instead!')\n return self.threshold\n\n @threshold_ch2.setter\n def threshold_ch2(self, v):\n self._logger.warning('The scope attribute \"threshold_chx\" is deprecated. Please use \"threshold\" instead!')\n self.threshold = v\n\n @property\n def hysteresis_ch1(self):\n self._logger.warning('The scope attribute \"hysteresis_chx\" is deprecated. Please use \"hysteresis\" instead!')\n return self.hysteresis\n\n @hysteresis_ch1.setter\n def hysteresis_ch1(self, v):\n self._logger.warning('The scope attribute \"hysteresis_chx\" is deprecated. Please use \"hysteresis\" instead!')\n self.hysteresis = v\n\n @property\n def hysteresis_ch2(self):\n self._logger.warning('The scope attribute \"hysteresis_chx\" is deprecated. Please use \"hysteresis\" instead!')\n return self.hysteresis\n\n @hysteresis_ch2.setter\n def hysteresis_ch2(self, v):\n self._logger.warning('The scope attribute \"hysteresis_chx\" is deprecated. Please use \"hysteresis\" instead!')\n self.hysteresis = v\n\n _trigger_delay_register = IntRegister(16, doc='number of decimated data after trigger written into memory [samples]')\n trigger_delay = FloatProperty(min=-10, max=8e-09 * 1073741824, doc=\"delay between trigger and acquisition start.\\nnegative values down to -duration are allowed for pretrigger. In trigger_source='immediately', trigger_delay is ignored.\", call_setup=True)\n _trigger_delay_running = BoolRegister(0, 2, doc='trigger delay running (register adc_dly_do)')\n _adc_we_keep = BoolRegister(0, 3, doc='Scope resets trigger automatically (adc_we_keep)')\n _adc_we_cnt = IntRegister(44, doc='Number of samles that have passed since trigger was armed (adc_we_cnt)')\n current_timestamp = LongRegister(348, bits=64, doc='An absolute counter ' + 'for the time [cycles]')\n trigger_timestamp = LongRegister(356, bits=64, doc='An absolute counter ' + 'for the trigger time [cycles]')\n _decimations = sorted_dict({2 ** n:2 ** n for n in range(0, 17)}, sort_by_values=True)\n decimations = _decimations.keys()\n decimation = DecimationRegister(20, doc='decimation factor', default=8192, options=_decimations, call_setup=True)\n sampling_times = [ 8e-09 * dec for dec in decimations ]\n sampling_time = SamplingTimeProperty(options=sampling_times)\n durations = [ st * data_length for st in sampling_times ]\n duration = DurationProperty(options=durations)\n _write_pointer_current = IntRegister(24, doc='current write pointer position [samples]')\n _write_pointer_trigger = IntRegister(28, doc='write pointer when trigger arrived [samples]')\n average = BoolRegister(40, 0, doc='Enables averaging during decimation if set to True')\n voltage_in1 = FloatRegister(340, bits=14, norm=8192, doc='in1 current value [volts]')\n voltage_in2 = FloatRegister(344, bits=14, norm=8192, doc='in2 current value [volts]')\n voltage_out1 = FloatRegister(356, bits=14, norm=8192, doc='out1 current value [volts]')\n voltage_out2 = FloatRegister(360, bits=14, norm=8192, doc='out2 current value [volts]')\n ch1_firstpoint = FloatRegister(65536, bits=14, norm=8192, doc='1 sample of ch1 data [volts]')\n ch2_firstpoint = FloatRegister(131072, bits=14, norm=8192, doc='1 sample of ch2 data [volts]')\n pretrig_ok = BoolRegister(364, 0, doc='True if enough data have been acquired to fill the pretrig buffer')\n ch1_active = BoolProperty(default=True, doc='should ch1 be displayed in the gui?')\n ch2_active = BoolProperty(default=True, doc='should ch2 be displayed in the gui?')\n xy_mode = BoolProperty(default=False, doc='in xy-mode, data are plotted vs the other channel (instead of time)')\n\n def _ownership_changed(self, old, new):\n \"\"\"\n If the scope was in continuous mode when slaved, it has to stop!!\n \"\"\"\n if new is not None:\n self.stop()\n return\n\n @property\n def _rawdata_ch1(self):\n \"\"\"raw data from ch1\"\"\"\n x = np.array(self._reads(65536, self.data_length), dtype=np.int16)\n x[(x >= 8192)] -= 16384\n return x\n\n @property\n def _rawdata_ch2(self):\n \"\"\"raw data from ch2\"\"\"\n x = np.array(self._reads(131072, self.data_length), dtype=np.int16)\n x[(x >= 8192)] -= 16384\n return x\n\n @property\n def _data_ch1(self):\n \"\"\" acquired (normalized) data from ch1\"\"\"\n return np.array(np.roll(self._rawdata_ch1, -(self._write_pointer_trigger + self._trigger_delay_register + 1)), dtype=np.float) / 8192\n\n @property\n def _data_ch2(self):\n \"\"\" acquired (normalized) data from ch2\"\"\"\n return np.array(np.roll(self._rawdata_ch2, -(self._write_pointer_trigger + self._trigger_delay_register + 1)), dtype=np.float) / 8192\n\n @property\n def _data_ch1_current(self):\n \"\"\" (unnormalized) data from ch1 while acquisition is still running\"\"\"\n return np.array(np.roll(self._rawdata_ch1, -(self._write_pointer_current + 1)), dtype=np.float) / 8192\n\n @property\n def _data_ch2_current(self):\n \"\"\" (unnormalized) data from ch2 while acquisition is still running\"\"\"\n return np.array(np.roll(self._rawdata_ch2, -(self._write_pointer_current + 1)), dtype=np.float) / 8192\n\n @property\n def times(self):\n duration = self.duration\n trigger_delay = self.trigger_delay\n if self.trigger_source != 'immediately':\n return np.linspace(trigger_delay - duration / 2.0, trigger_delay + duration / 2.0, self.data_length, endpoint=False)\n else:\n return np.linspace(0, duration, self.data_length, endpoint=False)\n\n def wait_for_pretrigger(self):\n \"\"\" sleeps until scope trigger is ready (buffer has enough new data)\"\"\"\n while not self.pretrig_ok:\n sleep(0.001)\n\n def curve_ready(self):\n \"\"\"\n Returns True if new data is ready for transfer\n \"\"\"\n return not self._trigger_armed and not self._trigger_delay_running and self._setup_called\n\n def _curve_acquiring(self):\n \"\"\"\n Returns True if data is in the process of being acquired, i.e.\n waiting for trigger event or for acquisition of data after\n trigger event.\n \"\"\"\n return (self._trigger_armed or self._trigger_delay_running) and self._setup_called\n\n def _get_ch(self, ch):\n if ch not in (1, 2):\n raise ValueError('channel should be 1 or 2, got ' + str(ch))\n if ch == 1:\n return self._data_ch1\n return self._data_ch2\n\n @property\n def data_x(self):\n return self.times\n\n def _get_curve(self):\n \"\"\"\n Simply pack together channel 1 and channel 2 curves in a numpy array\n \"\"\"\n return np.array((self._get_ch(1), self._get_ch(2)))\n\n def _remaining_time(self):\n \"\"\"\n :returns curve duration - ellapsed duration since last setup() call.\n \"\"\"\n return self.duration - (time() - self._last_time_setup)\n\n def _data_ready(self):\n \"\"\"\n :return: True if curve is ready in the hardware, False otherwise.\n \"\"\"\n return self.curve_ready()\n\n def _start_acquisition(self):\n \"\"\"\n Start acquisition of a curve in rolling_mode=False\n \"\"\"\n autosave_backup = self._autosave_active\n self._autosave_active = False\n self._setup_called = True\n self._reset_writestate_machine = True\n if self.trigger_source == 'immediately':\n self._trigger_delay_register = self.data_length\n else:\n delay = int(np.round(self.trigger_delay / self.sampling_time)) + self.data_length // 2\n if delay <= 0:\n delay = 1\n elif delay > 4294967295:\n delay = 4294967295\n self._trigger_delay_register = delay\n self._trigger_armed = True\n self._trigger_source_register = self.trigger_source\n self._autosave_active = autosave_backup\n self._last_time_setup = time()\n\n def _start_acquisition_rolling_mode(self):\n self._start_acquisition()\n self._trigger_source_register = 'off'\n self._trigger_armed = True\n\n def _rolling_mode_allowed(self):\n \"\"\"\n Only if duration larger than 0.1 s\n \"\"\"\n return self.duration > 0.1\n\n def _is_rolling_mode_active(self):\n \"\"\"\n Rolling_mode property evaluates to True and duration larger than 0.1 s\n \"\"\"\n return self.rolling_mode and self._rolling_mode_allowed()\n\n def _get_ch_no_roll(self, ch):\n if ch not in (1, 2):\n raise ValueError('channel should be 1 or 2, got ' + str(ch))\n if ch == 1:\n return self._rawdata_ch1 * 1.0 / 8192\n return self._rawdata_ch2 * 1.0 / 8192\n\n def _get_rolling_curve(self):\n datas = np.zeros((2, len(self.times)))\n wp0 = self._write_pointer_current\n times = self.times\n times -= times[(-1)]\n for ch, active in (\n (\n 0, self.ch1_active),\n (\n 1, self.ch2_active)):\n if active:\n datas[ch] = self._get_ch_no_roll(ch + 1)\n\n wp1 = self._write_pointer_current\n for index, active in [(0, self.ch1_active),\n (\n 1, self.ch2_active)]:\n if active:\n data = datas[index]\n to_discard = (wp1 - wp0) % self.data_length\n data = np.roll(data, self.data_length - wp0)[to_discard:]\n data = np.concatenate([[np.nan] * to_discard, data])\n datas[index] = data\n\n return (\n times, datas)\n\n def save_curve(self):\n \"\"\"\n Saves the curve(s) that is (are) currently displayed in the gui in\n the db_system. Also, returns the list [curve_ch1, curve_ch2]...\n \"\"\"\n d = self.setup_attributes\n curves = [None, None]\n for ch, active in [(0, self.ch1_active),\n (\n 1, self.ch2_active)]:\n if active:\n d.update({'ch': ch, 'name': self.curve_name + ' ch' + str(ch + 1)})\n curves[ch] = self._save_curve(self._run_future.data_x, self._run_future.data_avg[ch], **d)\n\n return curves\n\n def _new_run_future(self):\n if self._is_rolling_mode_active() and self.running_state == 'running_continuous':\n self._run_future.cancel()\n self._run_future = ContinuousRollingFuture(self)\n else:\n super(Scope, self)._new_run_future()","sub_path":"pycfiles/pyrple-0.3.tar/scope.py","file_name":"scope.py","file_ext":"py","file_size_in_byte":17550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"554181825","text":"# Desenvolva um programa que leia o primeiro termo e a razão de uma PA. \n# No final, mostre os 10 primeiros termos dessa progressão.\n\n# a1 = int(input(\"Qual vai ser o 1° termo? \"))\n# n = 10\n# r = int(input(\"Qual vai ser a razão? \"))\n# an = a1 + (n - 1) * r\n# n = 0\n# for c in range(a1, an + 1, r):\n# n = n + 1\n# print(f\"O {n} termo é: {c}\")\n\nprimeiro = int(input(\"Primeiro termo: \"))\nrazão = int(input(\"Razão: \"))\ndécimo = primeiro + (10 - 1) * razão\nfor c in range(primeiro, décimo + razão, razão):\n print(f\"{c} \", end='-> ')\nprint(\"ACABOU\")\n","sub_path":"exercicios/desafio051.py","file_name":"desafio051.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"402701863","text":"import h5py\nimport numpy as np\nimport mne\n\nfrom matplotlib.pyplot import imshow\nfrom numpy import log, mean\nfrom .util import eeg_to_montage\n\n\nclass AvgEpochsTFR:\n \"\"\"\n This class contains the PSD of a set of Epochs. It stores the data of\n the psds of each epoch. The psds are calculated with the Library mne.\n Attributes:\n ============\n picks (array[int]) : Contains the picked channels\n tfr (EpochsTFR) : Contains the EpochsTFR data computed by mne\n\n Methods:\n ============\n __init__ : Computes the EpochsTFR data\n plot_time_freq : Plot the time-frequency display\n plot_freq_ch : Plot the frequency-channel display\n plot_time_ch : Plot the time-channel display\n \"\"\"\n # ------------------------------------------------------------------------\n def __init__(self, epochs=None, freqs=None, n_cycles=None,\n method='multitaper', time_bandwidth=4., n_fft=512, width=1,\n picks=None, type='all'):\n \"\"\"\n Initialize the class with an instance of EpochsTFR corresponding\n to the method.\n \"\"\"\n self.cmap = 'jet'\n self.method = method\n\n if hasattr(epochs, 'data'):\n self.evoked = True\n else:\n self.evoked = False\n\n if epochs is not None:\n if type == 'eeg':\n epochs = epochs.copy().pick_types(meg=False, eeg=True)\n elif type == 'mag':\n epochs = epochs.copy().pick_types(meg='mag')\n elif type == 'grad':\n epochs = epochs.copy().pick_types(meg='grad')\n else:\n epochs = epochs.copy()\n self.info = epochs.info\n\n if picks is not None:\n self.picks = picks\n else:\n self.picks = list(range(0, len(epochs.info['ch_names'])))\n for bad in epochs.info['bads']:\n try:\n bad_pick = epochs.info['ch_names'].index(bad)\n self.picks.remove(bad_pick)\n except Exception as e:\n print(e)\n\n montage = eeg_to_montage(epochs)\n if montage is not None:\n # First we create variable head_pos for a correct plotting\n self.pos = montage.get_pos2d()\n scale = 1 / (self.pos.max(axis=0) - self.pos.min(axis=0))\n center = 0.5 * (self.pos.max(axis=0) + self.pos.min(axis=0))\n self.head_pos = {'scale': scale, 'center': center}\n\n # Handling of possible channels without any known coordinates\n no_coord_channel = False\n try:\n names = montage.ch_names\n indices = [names.index(epochs.info['ch_names'][i])\n for i in self.picks]\n self.pos = self.pos[indices, :]\n except Exception as e:\n print(e)\n no_coord_channel = True\n\n # If there is not as much positions as the number of Channels\n # we have to eliminate some channels from the data of topomaps\n if no_coord_channel:\n from mne.channels import read_montage\n from numpy import array\n\n index = 0\n self.pos = [] # positions\n # index in the self.data of channels with coordinates\n self.with_coord = []\n\n for i in self.picks:\n ch_name = epochs.info['ch_names'][i]\n try:\n ch_montage = read_montage(\n montage.kind, ch_names=[ch_name])\n coord = ch_montage.get_pos2d()\n self.pos.append(coord[0])\n self.with_coord.append(index)\n except Exception as e:\n print(e)\n index += 1\n self.pos = array(self.pos)\n\n else:\n self.with_coord = [i for i in range(len(self.picks))]\n\n else: # If there is no montage available\n self.head_pos = None\n self.with_coord = []\n\n if method == 'multitaper':\n from mne.time_frequency import tfr_multitaper\n self.params = dict(freqs=freqs, n_cycles=n_cycles,\n time_bandwidth=time_bandwidth)\n\n if self.evoked:\n self.tfr, self.itc = tfr_multitaper(\n epochs, freqs, n_cycles,\n time_bandwidth=time_bandwidth,\n picks=self.picks), None\n else:\n self.tfr, self.itc = tfr_multitaper(\n epochs, freqs, n_cycles,\n time_bandwidth=time_bandwidth,\n picks=self.picks, return_itc=True)\n\n if method == 'morlet':\n from mne.time_frequency import tfr_morlet\n self.params = dict(freqs=freqs, n_cycles=n_cycles)\n if self.evoked:\n self.tfr, self.itc = tfr_morlet(\n epochs, freqs, n_cycles,\n picks=self.picks), None\n else:\n self.tfr, self.itc = tfr_morlet(\n epochs, freqs, n_cycles,\n picks=self.picks, return_itc=True)\n\n if method == 'stockwell':\n from mne.time_frequency import tfr_stockwell\n # The stockwell function does not handle picks like the two\n # other ones ...\n picked_ch_names = [epochs.info['ch_names'][i]\n for i in self.picks]\n picked = epochs.copy().pick_channels(picked_ch_names)\n self.params = dict(fmin=freqs[0], fmax=freqs[-1], n_fft=n_fft,\n width=width)\n if self.evoked:\n self.tfr, self.itc = tfr_stockwell(\n picked, fmin=freqs[0], fmax=freqs[-1],\n n_fft=n_fft, width=width), None\n else:\n self.tfr, self.itc = tfr_stockwell(\n picked, fmin=freqs[0], fmax=freqs[-1],\n n_fft=n_fft, width=width, return_itc=True)\n else:\n # Only for initializing an empty class...\n self.tfr = None\n self.itc = None\n\n # ------------------------------------------------------------------------\n def init(self, epochs=None, freqs=None, n_cycles=None,\n method='multitaper', time_bandwidth=4., n_fft=512, width=1,\n picks=None, type='all'):\n \"\"\"Init and returns.\"\"\"\n\n self.__init__(epochs=epochs, freqs=freqs, n_cycles=n_cycles,\n method=method, time_bandwidth=time_bandwidth,\n n_fft=n_fft, width=width, picks=picks, type=type)\n return self\n\n # ------------------------------------------------------------------------\n def init_from_hdf(self, fname):\n \"\"\"Init from hdf file.\"\"\"\n channel_types = mne.io.pick.get_channel_types()\n\n # Start by initializing everything\n f = h5py.File(fname, 'r+')\n dic = f['mnepython']\n freqs = dic['key_freqs'][()]\n times = dic['key_times'][()]\n method = ''.join([chr(x) for x in dic['key_method'][()]])\n chs = dic['key_info']['key_chs']\n tfr_data = np.zeros((\n len([ch for ch in chs.keys()]), len(freqs), len(times)))\n itc_data = np.copy(tfr_data)\n names = []\n locs = []\n ch_types = []\n for i, key in enumerate(chs.keys()):\n tfr_data[i, :, :] = dic['key_data'][key]['key_tfr'][()]\n try: # Simply try to get the itc data if it exists\n itc_data[i, :, :] = dic['key_data'][key]['key_itc'][()]\n except Exception:\n pass\n ch = chs[key]\n ch_val = ch['key_kind'][()][0]\n for t, rules in channel_types.items():\n for key, vals in rules.items():\n try:\n if ch['key_' + key] not in np.array(vals):\n break\n except Exception:\n break\n else:\n ch_types.append(t)\n name = ''.join([chr(x) for x in ch['key_ch_name']])\n loc = ch['key_loc'][()][0:3]\n names.append(name)\n locs.append(loc)\n locs = np.array(locs)\n self.picks = [i for i in range(len(names))]\n montage = mne.channels.Montage(locs, names, 'custom',\n [i for i in range(len(locs))])\n # First we create variable head_pos for a correct plotting\n self.pos = montage.get_pos2d()\n\n scale = 1 / (self.pos.max(axis=0) - self.pos.min(axis=0))\n center = 0.5 * (self.pos.max(axis=0) + self.pos.min(axis=0))\n self.head_pos = {'scale': scale, 'center': center}\n\n # Handling of possible channels without any known coordinates\n no_coord_channel = False\n try:\n names = montage.ch_names\n indices = self.picks\n self.pos = self.pos[indices, :]\n except Exception as e:\n print(e)\n no_coord_channel = True\n\n # If there is not as much positions as the number of Channels\n # we have to eliminate some channels from the data of topomaps\n if no_coord_channel:\n from mne.channels import read_montage\n from numpy import array\n\n index = 0\n self.pos = [] # positions\n # index in the self.data of channels with coordinates\n self.with_coord = []\n\n for i in self.picks:\n ch_name = epochs.info['ch_names'][i]\n try:\n ch_montage = read_montage(\n montage.kind, ch_names=[ch_name])\n coord = ch_montage.get_pos2d()\n self.pos.append(coord[0])\n self.with_coord.append(index)\n except Exception as e:\n print(e)\n index += 1\n self.pos = array(self.pos)\n\n else:\n self.with_coord = [i for i in range(len(self.picks))]\n\n self.info = mne.create_info(names, 1, montage=montage,\n ch_types='eeg')\n # eeg is just a trick to not raise valueError...\n self.tfr = mne.time_frequency.AverageTFR(\n self.info, tfr_data, times, freqs, len(self.picks))\n if np.count_nonzero(itc_data):\n self.evoked = False\n self.itc = mne.time_frequency.AverageTFR(\n self.info, itc_data, times, freqs, len(self.picks))\n else:\n self.evoked = True\n self.itc = None\n return self\n\n # ------------------------------------------------------------------------\n def plot_time_freq(self, index_channel, ax,\n vmin=None, vmax=None, log_display=False):\n \"\"\"\n Plot the averaged epochs time-frequency plot for a given channel.\n \"\"\"\n data = self.tfr.data[index_channel, :, :]\n if log_display:\n data = 10 * log(data / mean(data))\n extent = [self.tfr.times[0], self.tfr.times[-1],\n self.tfr.freqs[0], self.tfr.freqs[-1]]\n return ax.imshow(data, extent=extent, aspect='auto',\n origin='lower', vmax=vmax, vmin=vmin, cmap=self.cmap)\n\n # ------------------------------------------------------------------------\n def plot_itc(self, index_channel, ax,\n vmin=None, vmax=None, log_display=False):\n \"\"\"\n Plot the averaged epochs itc plot for a given channel.\n \"\"\"\n data = self.itc.data[index_channel, :, :]\n if log_display:\n data = 10 * log(data / mean(data))\n extent = [self.itc.times[0], self.itc.times[-1],\n self.itc.freqs[0], self.itc.freqs[-1]]\n return ax.imshow(data, extent=extent, aspect='auto',\n origin='lower', vmax=vmax, vmin=vmin, cmap=self.cmap)\n\n # ------------------------------------------------------------------------\n def plot_freq_ch(self, time_index, ax,\n vmin=None, vmax=None, log_display=False):\n \"\"\"Plot the averaged epochs frequency-channel plot for a given time.\"\"\"\n data = self.tfr.data[:, :, time_index]\n if log_display:\n data = 10 * log(data / mean(data))\n extent = [self.tfr.freqs[0], self.tfr.freqs[-1],\n .5, len(self.picks)+.5]\n return ax.imshow(data, extent=extent, aspect='auto',\n origin='lower', vmax=vmax, vmin=vmin, cmap=self.cmap)\n\n # ------------------------------------------------------------------------\n def plot_time_ch(self, freq_index, ax,\n vmin=None, vmax=None, log_display=False):\n \"\"\"\n Plot the averaged epochs time-channel plot for a given frequency\n range.\n \"\"\"\n data = self.tfr.data[:, freq_index, :]\n if log_display:\n data = 10 * log(data / mean(data))\n extent = [self.tfr.times[0], self.tfr.times[-1],\n .5, len(self.picks)+.5]\n return ax.imshow(data, extent=extent, aspect='auto',\n origin='lower', vmax=vmax, vmin=vmin, cmap=self.cmap)\n\n # ------------------------------------------------------------------------\n def save_hdf5(self, path, overwrite=True):\n \"\"\"Save data as hdf5 file.\"\"\"\n from mne.externals.h5io import write_hdf5\n\n if self.evoked:\n data = [{self.info['ch_names'][i]: self.info['ch_names'][i],\n 'tfr': self.tfr.data[i, :, :]}\n for i in range(len(self.info['ch_names']))]\n else:\n data = [{self.info['ch_names'][i]: self.info['ch_names'][i],\n 'tfr': self.tfr.data[i, :, :],\n 'itc': self.itc.data[i, :, :]}\n for i in range(len(self.info['ch_names']))]\n\n out = dict(freqs=self.tfr.freqs,\n times=self.tfr.times,\n data=data,\n info=self.info,\n method=self.method,\n parameters=self.params)\n write_hdf5(path, out, title='mnepython', overwrite=overwrite)\n","sub_path":"mnelab/tfr/backend/avg_epochs_tfr.py","file_name":"avg_epochs_tfr.py","file_ext":"py","file_size_in_byte":14765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"311992659","text":"# Copyright 2016 Mirantis, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom fuelweb_test.settings import DEPLOYMENT_MODE\nfrom fuelweb_test.tests.base_test_case import TestBasic\n\n\nclass TestLoadBase(TestBasic):\n \"\"\"\n\n This class contains basic methods for different load tests scenarios.\n\n \"\"\"\n\n def prepare_load_ceph_ha(self):\n \"\"\"Prepare cluster in HA mode with ceph for load tests\n\n Scenario:\n 1. Create cluster\n 2. Add 3 nodes with controller + ceph-osd roles\n 3. Add 2 node with compute role\n 4. Deploy the cluster\n 5. Make snapshot\n\n Duration 70m\n Snapshot prepare_load_ceph_ha\n \"\"\"\n\n if self.env.d_env.has_snapshot(\"prepare_load_ceph_ha\"):\n return\n\n self.env.revert_snapshot(\"ready_with_5_slaves\")\n\n self.show_step(1, initialize=True)\n cluster_id = self.fuel_web.create_cluster(\n name=self.__class__.__name__,\n mode=DEPLOYMENT_MODE,\n settings={\n 'volumes_ceph': True,\n 'images_ceph': True,\n 'volumes_lvm': False,\n 'osd_pool_size': \"3\"\n }\n )\n self.show_step(2)\n self.show_step(3)\n self.fuel_web.update_nodes(\n cluster_id,\n {\n 'slave-01': ['controller', 'ceph-osd'],\n 'slave-02': ['controller', 'ceph-osd'],\n 'slave-03': ['controller', 'ceph-osd'],\n 'slave-04': ['compute'],\n 'slave-05': ['compute']\n }\n )\n\n self.show_step(4)\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n self.fuel_web.verify_network(cluster_id)\n self.fuel_web.run_ostf(\n cluster_id=cluster_id)\n\n self.show_step(5)\n self.env.make_snapshot(\"prepare_load_ceph_ha\", is_make=True)\n","sub_path":"fuelweb_test/tests/tests_strength/test_load_base.py","file_name":"test_load_base.py","file_ext":"py","file_size_in_byte":2431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"510988221","text":"import logging; logging.basicConfig(level=logging.INFO)\n\nimport asyncio, os, json, time\nfrom datetime import datetime\n\nfrom aiohttp import web\n\n# -*- coding: utf-8 -*-\nimport logging\nlogging.basicConfig(level=logging.INFO)\nimport os,json,time,asyncio\nfrom datetime import datetime\nfrom aiohttp import web\n\n# def index(request): # 原始简单的url处理函数\n# return web.Response(body=b'

Awesome

',content_type='text/html')\n\ndef init_jinja2(app, **kw): #初始化 jinja2的 env\n pass\n\nasync def logger_factory(app, handler):\n async def logger(request):\n logging.info('Request: %s %s' % (request.method, request.path))\n return (await handler(request))\n return logger\n\n# 生产 post 提交的数据\nasync def data_factory(app, handler):\n pass\n\n#将url处理函数的返回值 转换成 response 对象\nasync def response_factory(app, handler):\n pass\n return response\n\n# 将blog 评论的发布时间 转换成 多少时间以前\ndef datetime_filter(t):\n delta = int(time.time() - t)\n if delta < 60:\n return u'1分钟前'\n if delta < 3600:\n return u'%s分钟前' % (delta // 60)\n if delta < 86400:\n return u'%s小时前' % (delta // 3600)\n if delta < 604800:\n return u'%s天前' % (delta // 86400)\n dt = datetime.fromtimestamp(t)\n return u'%s年%s月%s日' % (dt.year, dt.month, dt.day)\n\nasync def init(loop):\n #db = configs.configs.db\n await orm.create_pool(loop=loop,)\n #DeprecationWarning: loop argument is deprecated\n app = web.Application(loop = loop,middlewares=[ #拦截器 一个URL在被某个函数处理前,可以经过一系列的middleware的处理。\n logger_factory, response_factory #工厂模式\n ])\n init_jinja2(app, filters=dict(datetime=datetime_filter))\n add_routes(app, 'handlers')\n add_static(app)\n\n # DeprecationWarning: Application.make_handler(...) is deprecated, use AppRunner API instead\n runner = web.AppRunner(app)\n await runner.setup()\n site = web.TCPSite(runner, '192.168.2.101', 9000)\n logging.info('server started at http://192.168.2.101:9000...')\n await site.start()\n \n #以前的写法\n # srv = await loop.create_server(app.make_handler(), '192.168.2.101', 9000)\n # logging.info('server started at http://192.168.2.101:9000...')\n # return srv\n\nloop = asyncio.get_event_loop()\nloop.run_until_complete(init(loop))\nloop.run_forever()\n\ndef index(request):\n return web.Response(body=b'

Awesome

')\n\n@asyncio.coroutine\nasync def init(loop):\n logging.info('server started at http://127.0.0.1:9000...')\n app = web.Application(loop = loop)\n app.router.add_route(\"get\",\"/\",index)\n runner=web.AppRunner(app)\n await runner.setup()\n site = web.TCPSite(runner, 'localhost', 8080)\n await site.start()\n return site\nloop = asyncio.get_event_loop()\nloop.run_until_complete(init(loop))\nloop.run_forever()","sub_path":"rubbish_coding/Test_WebAPP/www/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":2915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"276626272","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\nimport requests\nfrom requests.exceptions import HTTPError\nimport time\nimport sys\nfrom bs4 import BeautifulSoup\nimport logging\nfrom apis import clean_text\n\n\ndef get_all_text(url):\n \"\"\"Retrieves all text in paragraphs.\n\n :param str url: The URL to scrap.\n\n :rtype: str :return: Text in the URL.\n \"\"\"\n\n try:\n response = requests.get(url)\n\n # If the response was successful, no Exception will be raised\n response.raise_for_status()\n except HTTPError as http_err:\n print(f'HTTP error occurred: {http_err}') # Python 3.6\n return None\n # sys.exit()\n except Exception as err:\n print(f'Other error occurred: {err}') # Python 3.6\n return None\n # sys.exit()\n\n soup = BeautifulSoup(response.text, \"lxml\")\n\n text = \"\"\n for i in soup.find_all('p'): # soup.select\n # i.encode(\"utf-8\") # default\n # Delete citations (e.g. \"The Alhambra is a UNESCO World Heritage Site.[2]\")\n text += i.get_text() + '\\n'\n\n text = clean_text.del_nonAscii(clean_text.del_refs(text))\n return text\n\n\ndef get_text_maxChars(url, maxChars):\n \"\"\"Retrieves all text in paragraphs up to a limit of characters.\n\n :param str url: The URL to scrap. \n :param str maxChars: Maximum number of characters to return. \n :rtype: str :return: Text in the URL.\n \"\"\"\n try:\n response = requests.get(url)\n\n # If the response was successful, no Exception will be raised\n response.raise_for_status()\n except HTTPError as http_err:\n print(f'HTTP error occurred: {http_err}') # Python 3.6\n return None\n # sys.exit()\n except Exception as err:\n print(f'Other error occurred: {err}') # Python 3.6\n return None\n # sys.exit()\n\n soup = BeautifulSoup(response.text, \"lxml\")\n\n text = \"\"\n l_text = 0\n for i in soup.find_all('p'): # soup.select\n l_paragraph = len(i.text)\n if l_text + l_paragraph > maxChars:\n break\n text += i.get_text() + '\\n'\n l_text += l_paragraph + 1\n\n logging.debug(l_text)\n text = clean_text.del_nonAscii(clean_text.del_coordinates(clean_text.del_refs(text)))\n return text\n\n\ndef get_entry_text(url):\n \"\"\"Retrieves text in paragraphs at Wikipedia header.\n\n :param str url: The URL to scrap. \n :rtype: str :return: Text in the URL.\n \"\"\"\n try:\n response = requests.get(url)\n\n # If the response was successful, no Exception will be raised\n response.raise_for_status()\n except HTTPError as http_err:\n print(f'HTTP error occurred: {http_err}') # Python 3.6\n return None\n # sys.exit()\n except Exception as err:\n print(f'Other error occurred: {err}') # Python 3.6\n return None\n # sys.exit()\n\n soup = BeautifulSoup(response.text, \"lxml\")\n # This will get the div\n div_container = soup.find('div', class_='mw-parser-output')\n # print(div_container)\n\n text = \"\"\n parag = None\n # Get first paragraph\n for child in div_container.children:\n # print(child)\n if ((child.name == 'p') and (len(child) > 1)):\n parag = child\n # print((child.prettify()))\n break\n\n if parag is None:\n return text\n else:\n text += parag.get_text() # + '\\n'\n\n # Then search in following contiainers that are paragraphs\n for sibling in parag.next_siblings:\n if sibling.name != 'p':\n break\n # print('Next sibling:', sibling)\n if len(sibling) > 1:\n text += sibling.get_text() # + '\\n'\n\n text = clean_text.del_nonAscii(clean_text.del_coordinates(clean_text.del_refs(text)))\n return text\n\n\nif __name__ == \"__main__\":\n URL = 'https://en.wikipedia.org/wiki/Alhambra'\n\n # from google_search import google_search, google_fast_search\n # URL = google_search('Torre ifel', num_res=1, lang='es')[0]\n # URL = google_fast_search('Torre ifel', lang='es')\n\n print(f'Searching in {URL} ...')\n # print(get_all_text(URL))\n # print(get_text_maxChars(URL, 1000))\n out_text = get_entry_text(URL)\n\n print(out_text)\n\n # with open('results_es.txt', 'w') as file:\n # file.write(out_text)\n","sub_path":"telegramBot/apis/web_scrap.py","file_name":"web_scrap.py","file_ext":"py","file_size_in_byte":4266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"398996553","text":"import pygame\nfrom pygame.locals import *\nfrom rpgstage import rpgstage\nfrom rpgmap import rpgmap\nfrom rpggamestatus import rpggamestatus \nfrom rpgchar import rpgchar\nfrom tile import tileloader\n#from rpgwindow import rpgwindow\nfrom rpgevent import rpgevent\n\nimport Window.msgwindow as msgw\n\nclass App:\n\tMAINLOOPSTATUS_PLAY = 1\n\tMAINLOOPSTATUS_MSG = 2\n\tMAINLOOPSTATUS_DASH = 4\n\n\tdef __init__(self):\n\t\tself.screen = pygame.display.set_mode((320,320), 0, 32)\n\t\tpygame.display.set_caption(\"testtilemap\")\n\t\t\n\t\tmmtldr = tileloader(src = 'til.png')\n\t\tmmchldr = tileloader(src = 'char.png')\n\t\tself.gamestatus = rpggamestatus()\n\t\tself.gamestatus.createdic()\n\n\t\t#-------Load stages\n\t\t_map = rpgmap(tldr=mmtldr)\n\t\t_map.load(\"test2.tmx\")\n\t\tself.stage1 = rpgstage()\n\t\tself.stage1.add_map(_map)\n\n\t\t_ev1 = rpgevent()\n\t\tself.stage1.add_event(_ev1)\n\t\t\n#\t\tgameinfo = {'screen': self.screen, \n#\t\t\t\t\t'stage' : self.stage1}\n\t\t#-------------\n\n\t\tself.character = rpgchar(chldr = mmchldr, chpt = (3,0))\n#\t\tself.character.img = mmchldr.get((0,0)) \n\t\tself.gamestatus.dic['screen'] = self.screen\n\t\tself.gamestatus.dic['stage'] = self.stage1\n\t\n\t\tself.KeyStatus = { \"up\" : 0,\n\t\t\t\t\t\t \"down\" : 0,\n\t\t\t\t\t\t \"left\" : 0,\n\t\t\t\t\t\t \"right\" : 0, \n\t\t\t\t\t\t \"enter\" : 0}\n\n\n#\t\tself.testwin1 = rpgwindow()\n#\t\tself.testwin1.set_rect(0, 0, 100, 40)\n\n\n\n\t\t#---------- Main loop status \n\t\tself.MainLoopStatus = App.MAINLOOPSTATUS_PLAY\n\n\t\tself.msgwin = None\n\n\tdef mainloop(self):\n\t\tclock = pygame.time.Clock()\n\t\twhile True:\n\t\t\tfor event in pygame.event.get():\n\t\t\t\tif event.type == QUIT: self.quit()\n\t\t\t\tif event.type == KEYDOWN : self.keydown(event)\n\t\t\t\tif event.type == KEYUP : self.keyup(event)\n\t\t\t\t\t\t\t\n\t\t\tself.draw()\n\t\t\tself.update()\n\n\t\t\tpygame.display.update()\n\t\t\tclock.tick(100)\n\n\tdef update(self):\n\t\t# Update All things here\n\t\tif self.MainLoopStatus == App.MAINLOOPSTATUS_PLAY:\n\t\t\tif self.KeyStatus[\"up\"]: self.character.cmd(\"up\")\n\t\t\tif self.KeyStatus[\"down\"]: self.character.cmd(\"down\")\n\t\t\tif self.KeyStatus[\"left\"]: self.character.cmd(\"left\")\n\t\t\tif self.KeyStatus[\"right\"]: self.character.cmd(\"right\")\n\t\t\t\n\t\tif self.MainLoopStatus == App.MAINLOOPSTATUS_MSG:\n\t\t\tif self.msgwin == None :\n\t\t\t\tself.MainLoopStatus = App.MAINLOOPSTATUS_PLAY\n\t\t\t\treturn\n\t\t\tself.msgwin.handlekey(self.KeyStatus)\n\t\t\tif self.msgwin.isexit():\n\t\t\t\tself.msgwin = None\n\n\t\tif self.MainLoopStatus == App.MAINLOOPSTATUS_DASH:\n\t\t\tpass\n\n\t\tself.character.update()\n\n\tdef draw(self):\n\t\tif self.MainLoopStatus == App.MAINLOOPSTATUS_PLAY or self.MainLoopStatus == App.MAINLOOPSTATUS_MSG:\n\t\t\tself.screen.fill((125,125,125))\n\t\t\tself.stage1.draw(self.screen, (0,0))\n\t\t\tself.character.draw(self.screen)\n\t\t\tif self.MainLoopStatus == App.MAINLOOPSTATUS_MSG:\n\t\t\t\tif self.msgwin == None: return\n\t\t\t\tself.msgwin.draw(self.screen)\n\n\tdef showmsg(self, msgwin):\n\t\tself.msgwin = msgwin\n\t\tself.MainLoopStatus = App.MAINLOOPSTATUS_MSG\n\n\tdef keyup(self,event):\n\t\tprint(\"->\", event.key)\n\t\tif event.key == K_UP : self.KeyStatus[\"up\"] = False\n\t\tif event.key == K_DOWN : self.KeyStatus[\"down\"] = False\n\t\tif event.key == K_LEFT : self.KeyStatus[\"left\"] = False\n\t\tif event.key == K_RIGHT : self.KeyStatus[\"right\"] = False\n\t\tif event.key == 13 : self.KeyStatus[\"enter\"] = False\n\n\tdef keydown(self,event):\n\t\tprint(\"<-\", event.key)\n\t\tif event.key == K_UP : self.KeyStatus[\"up\"] = True\n\t\tif event.key == K_DOWN : self.KeyStatus[\"down\"] = True\n\t\tif event.key == K_LEFT : self.KeyStatus[\"left\"] = True\n\t\tif event.key == K_RIGHT : self.KeyStatus[\"right\"] = True\n\t\tif event.key == 13 : self.KeyStatus[\"enter\"] = True\n\t\t\t\n\t\tif event.key == 97:\n\t\t\tmm = msgw.msgwindow(\"Test~\")\n\t\t\tself.showmsg(mm)\n\n\tdef quit(self):\n\t\tpygame.display.quit()\n\t\tpygame.quit()\n\t\texit()\n\n\ndef main():\n\tpygame.init()\n\tpygame.font.init()\n\tapp = App()\n\tapp.mainloop()\n\n\t\nif __name__ == \"__main__\":\n\tmain()\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"127857895","text":"#-*- coding:utf-8 -*-\n\n#past.view.settings\n\nfrom flask import g, flash, request, render_template, redirect\nfrom past import app\nfrom past import config\nfrom past.model.user import User\nfrom past.utils import is_valid_email\n\nfrom .utils import require_login\n\n@app.route(\"/settings\", methods=[\"GET\", \"POST\"])\n@require_login(\"/settings\")\ndef settings():\n intros = [g.user.get_thirdparty_profile(x).get(\"intro\") for x in config.OPENID_TYPE_DICT.values()]\n intros = filter(None, intros)\n\n if request.method == \"POST\":\n email = request.form.get(\"email\")\n if email and is_valid_email(email):\n r = g.user.set_email(email)\n if r:\n flash(u'个人信息更新成功', 'tip')\n else:\n flash(u'电子邮箱已被占用了', 'error')\n else:\n flash(u'电子邮箱更新失败', 'error')\n return render_template(\"settings.html\", **locals())\n\n","sub_path":"past/view/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"351392377","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport logging\nfrom liquidluck.options import g, settings\nfrom liquidluck.utils import import_object, walk_dir\n\nfrom liquidluck.writers.base import load_jinja\n\n\ndef load_settings(path):\n config = {}\n execfile(path, {}, config)\n\n for key in config:\n setting = config[key]\n if isinstance(setting, dict):\n settings[key].update(setting)\n else:\n settings[key] = setting\n\n g.output_directory = os.path.abspath(settings.output)\n g.static_directory = os.path.abspath(settings.static_output)\n logging.info('Load Settings Finished')\n\n\ndef load_posts(path):\n g.source_directory = path\n readers = []\n for name in settings.readers:\n reader = settings.readers[name]\n if reader:\n readers.append(import_object(reader))\n\n def detect_reader(filepath):\n for Reader in readers:\n reader = Reader(filepath)\n if reader.support():\n return reader.run()\n return None\n\n for filepath in walk_dir(path):\n post = detect_reader(filepath)\n if not post:\n g.pure_files.append(filepath)\n elif not post.date:\n g.pure_pages.append(post)\n elif post.public:\n g.public_posts.append(post)\n else:\n g.secure_posts.append(post)\n\n g.public_posts = sorted(g.public_posts, key=lambda o: o.date, reverse=True)\n g.secure_posts = sorted(g.secure_posts, key=lambda o: o.date, reverse=True)\n\n logging.info('Load Posts Finished')\n\n\ndef write_posts():\n writers = []\n for name in settings.writers:\n writer = settings.writers[name]\n if writer:\n writers.append(import_object(writer)())\n\n load_jinja()\n\n for writer in writers:\n writer.run()\n\n\ndef build(config='settings.py'):\n load_settings(config)\n load_posts(settings.source)\n write_posts()\n","sub_path":"liquidluck/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":1935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"87012337","text":"from catalogue import Catalogue\nfrom etatApp import EtatApp\nfrom action import ActionManager\nimport os\nfrom documentFactory import DocumentFactoryPrincipale\n\n\ncatalogue = Catalogue.getInstance()\nfactory = DocumentFactoryPrincipale.getInstance()\nfactory.loadFactoryPlugins()\n\n# Création des documents du catalogue\nmain_dir = os.path.dirname(__file__)\nfor file in os.listdir(main_dir + \"/documents\"):\n if file.endswith(\".txt\"): # On ne traite que les fichiers txt\n doc = factory.creerDocument(file)\n catalogue.ajouterDocument(doc)\n\nprint(catalogue.getDocument())\n\n# Gestion des actions\nactionManager = ActionManager.getInstance()\nactionManager.loadActionPlugins()\n\nwhile EtatApp.getInstance().getEtat():\n\n # Afficher commandes dispo\n print(\"Que voulez vous faire ?\")\n actionManager.afficherCommandesDispo()\n\n # Récupérer entrée utilisateur\n choix = input(\"Votre choix ? #> \")\n\n # Exécuter la commande\n actionManager.executerEntreeUtilisateur(choix)\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"451800791","text":"#-*- coding: utf-8 -*-\n\n#직전의 예제 add(a, b)\n#파라미터가 리스트인지, 튜플인지, 이런것들을 구분해 줘야함\n\n#*tuple\n#**dict\n\ndef family_info(name, *family_name, **info):\n print(\"my name is \", name)\n print(\"my family consist ..\")\n print(family_name)\n \n for name in family_name:\n print(name)\n\n print(\"=\"*20)\n\n for key in info_key():\n print(key, \":\", info[key])\n\nmy_fam = (\"아버지\", \"엄마\", \"형\", \"동생\")\nmy_dict = {\"가훈\":\"잘먹고 잘살자\", \"지역\":\"부산\"}\nfamily_info(\"홍길동\", \"아버지\", \"엄마\", \"아들\", 가훈=\"우리집 가훈\", 집=\"부산\")\n\ndef my_string(*args, seperator = \"/\"):\n return seperator, join(args)\n\nmy_string(\"빨\",\"주\",\"노\",\"초\",\"파\",\"남\",\"보\")\n","sub_path":"python/c05_var_argument.py","file_name":"c05_var_argument.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"328997062","text":"from Drinks.models import RecipeD, IngredientsD, LinkD, Category\nimport random\n\ningredients = open('data/ingredientsD.txt')\ncategory = open('data/categoriesD.txt')\ndrinks = open('data/Drinks.txt')\nlink = open('data/linkD.txt')\n\n\ndef ingredient_p(text):\n text = text.split(\";\")[0]\n aux = IngredientsD(name=text, price=random.randint(500, 3000))\n aux.save()\n\n\ndef link_p(text):\n text = text.split(\";\")\n print(text)\n recipe_aux = RecipeD.objects.filter(pk=int(text[0])+1).get()\n ingredient_aux = IngredientsD.objects.filter(pk=int(text[1])+1).get()\n recipe_aux.price += ingredient_aux.price\n recipe_aux.save()\n aux = LinkD(amount=text[2][:-1], ingredient=ingredient_aux, recipe=recipe_aux)\n aux.save()\n\n\ndef category_p(text):\n text = text.split(\";\")[0]\n aux = Category(name=text)\n aux.save()\n\n\ndef drinks_p(text):\n text = text.split(\";\")\n aux = RecipeD(name=text[1], direction=text[4][:-1], url=text[3], category=Category.objects.filter(pk=int(text[2])+1).get().name, price=0)\n aux.save()\n\n\nfor line in ingredients.readlines():\n ingredient_p(line)\n\nfor line3 in category.readlines():\n category_p(line3)\n\nfor line4 in drinks.readlines():\n drinks_p(line4)\n\nfor line2 in link.readlines():\n link_p(line2)\n\n\ningredients.close()\ncategory.close()\ndrinks.close()\nlink.close()\n","sub_path":"Dparser.py","file_name":"Dparser.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"273852990","text":"import numpy as np\nfrom scipy.stats import ttest_ind\n\nstep_len = 1\nsample_len = 10\nline = []\ntotal_line_number = 0\nfor x2 in reversed(list(open(\"spy.txt\"))):\n if x2 != 'null':\n x1 = x2.split('\\t')\n #print(x1)\n line.append(float(x1[4]))\n total_line_number += 1\nx = np.array(line)\n#print(x)\ny_total = x.astype(float)\ny1=y_total[-sample_len:]\nstart_point = 0\ndrop_counter = 0\nsum =0\ndrop_point = []\nfor start_point in range(0,len(y_total-sample_len-1),sample_len):\n y=y_total[start_point:start_point+sample_len]\n if len(y) == sample_len:\n #print (\"y\",y)\n #print (\"y1\",y1)\n t, p = ttest_ind(y, y1, equal_var=False)\n #print (\"ttest_ind: t = %f p = %f\", t, p)\n #print(y)\n if p < 0.011 :\n drop_counter += 1\n print (\"turn point happened\",y_total[start_point+sample_len])\n drop_point.append(start_point+sample_len+1)\n y1 = y\nif drop_counter > 0:\n print( \"there are turnning point\", drop_counter,\"times\", drop_point )\nwith open(\"spy.txt\") as myfile:\n lines = myfile.readlines()\nfor drop_line in drop_point:\n print(lines[total_line_number-int(drop_line)])\n\n\nmyfile.close()\n__author__ = 'joezhou'\n","sub_path":"t_test_spy.py","file_name":"t_test_spy.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"494037059","text":"import os, sys\nimport torch\nimport shutil\nimport torchvision\nimport numpy as np\nimport itertools\nimport subprocess\nimport random\nimport torch.nn as nn\nimport torch.optim as optim\nimport matplotlib.pyplot as plt\nimport torch.nn.functional as F\nimport cv2\nimport torchvision.models as models\nimport torchvision.transforms as transforms\nfrom PIL import Image\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nfrom skimage.measure import compare_psnr as psnr\nfrom skimage.measure import compare_ssim as ski_ssim\nfrom pietorch import Home\nfrom pietorch import data_convertors as Convertors\nfrom pietorch import N_modules as n_mods\nfrom pietorch.M_DuRN import EnDNet\n\n# Hyper Params\ndata_name1 = 'go_pro'\ndata_name2 = 'RESIDE'\ndata_name3 = 'rain_zhanghe' # DID_MDN\ndata_name4 = 'DIV'\nPretrained = './trainedmodel/MBN.pt'\n\nYCRCB_blur = True\nYCRCB_haze = False\nYCRCB_rain = True\nYCRCB_jpeg = True\njpeg_align_k = 16\nbch_size = 1\ngpus = 1\nm_seed_gpu = 8223752412272754\nm_seed_cpu = 8526081014239199321\n\n# Use the seeds. \nif m_seed_gpu is not None and m_seed_cpu is not None:\n if gpus == 1:\n torch.cuda.manual_seed(m_seed_gpu)\n else:\n torch.cuda.manual_seed_all(m_seed_gpu)\n torch.manual_seed(m_seed_cpu)\n print(\"Use specific seeds.\")\nelse:\n m_seed_gpu = torch.cuda.initial_seed()\n m_seed_cpu = torch.initial_seed() \n \n# Set paths\nYOUR_DATA_ROOT = ''\ntestroot1 = YOUR_DATA_ROOT+data_name1+'/test/' # GoPro dataset\ntestroot2 = YOUR_DATA_ROOT+data_name2+'/sots_indoor_test/' # RESIDE-SOTS\ntestroot3 = YOUR_DATA_ROOT+data_name3+'/test/' # DID_MDN\ntestroot4 = YOUR_DATA_ROOT+'LIVE1/' # LIVE1\n\n\n# GoPro\ntest1_list_pth = Home+'lists/'+data_name1+'/blur/test_list.txt'\ntest1_labels_pth = Home+'lists/'+data_name1+'/label/test_list.txt'\n\n# RESIDE\ntest2_list_pth = Home+'lists/'+data_name2+'_indoor/sots_test_list.txt'\n\n# Rain_zhanghe\ntest3_list_pth = Home+'lists/'+data_name3+'/testlist.txt'\n\n# Jpeg\ntest4_list_pth = Home+'lists/LIVE1/imlist.txt'\n\n\n# Set transformers\ntransform = transforms.ToTensor()\n\n# Set convertors\n# GoPro\nblur_test_cvt = Convertors.ConvertImageSet_GoPro(testroot1, test1_list_pth, test1_labels_pth,\n transform=transform, crop_size=None,\n with_data_aug=False, resize_to=None)\n# Jpeg\njpeg_test_cvt = Convertors.ConvertImageSet_JpegCompress(testroot4, test4_list_pth,\n crop_size=None, transform=transform,\n with_data_aug=False, Vars=[10], \n align_k=jpeg_align_k)\n\n# Set data_loaders\nblur_testloader = DataLoader(blur_test_cvt, batch_size=1, shuffle=False, num_workers=1)\njpeg_testloader = DataLoader(jpeg_test_cvt, batch_size=1, shuffle=False, num_workers=1) \n\n\n# Make net\ncleaner = EnDNet(img_dim=3).cuda()\ncleaner.load_state_dict(torch.load(Pretrained))\nif gpus!= 1:\n cleaner = nn.DataParallel(cleaner, device_ids=range(gpus))\nelse:\n cleaner = cleaner\ncleaner.eval()\n\nwith torch.no_grad():\n ave_psnr = 0.0\n ave_ssim = 0.0\n ct_num = 0 \n for test_iter, data in enumerate(jpeg_testloader):\n img_data, label_data = data \n img, img_M, img_C = img_data\n label, label_M, label_C = label_data \n \n img = Variable(img, requires_grad=False).cuda() \n img_M = Variable(img_M, requires_grad=False).cuda() \n img_C = Variable(img_C, requires_grad=False).cuda() \n \n res_C, res_M, res = cleaner(img_C, img_M, img, 'jpeg')\n res = res.data.cpu().numpy()[0]\n res[res>1] = 1\n res[res<0] = 0\n res*= 255\n res = res.astype(np.uint8)\n res = res.transpose((1,2,0))\n \n label = label.numpy()[0]\n label*= 255\n label = label.astype(np.uint8)\n label = label.transpose((1,2,0))\n \n if YCRCB_jpeg:\n res = cv2.cvtColor(res, cv2.COLOR_RGB2YCR_CB)[:,:,0]\n label = cv2.cvtColor(label, cv2.COLOR_RGB2YCR_CB)[:,:,0]\n ave_psnr+= psnr( res, label, data_range=255)\n ave_ssim+= ski_ssim(res, label, data_range=255, multichannel=False)\n ct_num+= 1\n else:\n ave_psnr+= psnr( res, label, data_range=255)\n ave_ssim+= ski_ssim(res, label, data_range=255, multichannel=True)\n ct_num+= 1\n print('psnr_jpeg: '+str(ave_psnr/float(ct_num))+'.')\n print('ssim_jpeg: '+str(ave_ssim/float(ct_num))+'.')\n\n \n ave_psnr = 0.0\n ave_ssim = 0.0\n ct_num = 0 \n for test_iter, te_info in enumerate(open(test3_list_pth).read().splitlines()):\n te_pair_pth = testroot3+te_info\n te_pair = Image.open(te_pair_pth)\n pair_w, pair_h = te_pair.size\n \n img = te_pair.crop((0, 0, pair_w/2, pair_h))\n te_label = te_pair.crop((pair_w/2, 0, pair_w, pair_h))\n \n img = np.asarray(img)\n te_label = np.asarray(te_label)\n \n img_M = cv2.resize(img, \n (int(img.shape[1]*0.5),\n int(img.shape[0]*0.5)),\n interpolation=cv2.INTER_CUBIC)\n \n img_C = cv2.resize(img_M, \n (int(img_M.shape[1]*0.5),\n int(img_M.shape[0]*0.5)),\n interpolation=cv2.INTER_CUBIC)\n \n img = transform(img).unsqueeze(0)\n img_M = transform(img_M).unsqueeze(0)\n img_C = transform(img_C).unsqueeze(0)\n \n img = Variable(img, requires_grad=False).cuda()\n img_M = Variable(img_M, requires_grad=False).cuda()\n img_C = Variable(img_C, requires_grad=False).cuda()\n \n res = cleaner(img_C, img_M, img, 'rain')\n res = res.data.cpu().numpy()[0]\n res[res>1] = 1\n res[res<0] = 0\n res*= 255\n res = res.astype(np.uint8)\n res = res.transpose((1,2,0))\n \n if YCRCB_rain:\n res = cv2.cvtColor(res, cv2.COLOR_RGB2YCR_CB)[:,:,0]\n te_label = cv2.cvtColor(te_label, cv2.COLOR_RGB2YCR_CB)[:,:,0]\n ave_psnr+= psnr( res, te_label, data_range=255)\n ave_ssim+= ski_ssim(res, te_label, data_range=255, multichannel=False)\n ct_num+= 1\n else:\n ct_num+= 1\n ave_psnr+= psnr(res, te_label, data_range=255)\n ave_ssim+= ski_ssim(res, te_label, data_range=255, multichannel=True)\n \n print('psnr_rain: '+str(ave_psnr/float(ct_num))+'.')\n print('ssim_rain: '+str(ave_ssim/float(ct_num))+'.')\n\n \n ave_psnr = 0.0\n ave_ssim = 0.0\n ct_num = 0\n for test_iter, te_info in enumerate(open(test2_list_pth).read().splitlines()):\n te_label_pth = testroot2+'labels/'+te_info.split(' ')[0] \n te_label = Image.open(te_label_pth)\n te_label = np.asarray(te_label)\n \n for v in np.arange(1, 11, 1):\n te_haze_name = te_info.split('.')[0]+'_'+str(v)+'.png'\n te_haze_pth = testroot2+'images/'+te_haze_name\n \n img = Image.open(te_haze_pth)\n img = np.asarray(img)\n img_M = cv2.resize(img, \n (int(img.shape[1]*0.5),\n int(img.shape[0]*0.5)),\n interpolation=cv2.INTER_CUBIC)\n \n img_C = cv2.resize(img_M, \n (int(img_M.shape[1]*0.5),\n int(img_M.shape[0]*0.5)),\n interpolation=cv2.INTER_CUBIC)\n \n img = transform(img).unsqueeze(0)\n img_M = transform(img_M).unsqueeze(0)\n img_C = transform(img_C).unsqueeze(0)\n \n img = Variable(img, requires_grad=False).cuda()\n img_M = Variable(img_M, requires_grad=False).cuda()\n img_C = Variable(img_C, requires_grad=False).cuda()\n \n res = cleaner(img_C, img_M, img, 'haze')\n res = res.data.cpu().numpy()[0]\n res[res>1] = 1\n res[res<0] = 0\n res*= 255\n res = res.astype(np.uint8)\n res = res.transpose((1,2,0))\n \n if YCRCB_haze:\n res = cv2.cvtColor(res, cv2.COLOR_RGB2YCR_CB)[:,:,0]\n te_label = cv2.cvtColor(te_label, cv2.COLOR_RGB2YCR_CB)[:,:,0]\n ave_psnr+= psnr( res, te_label, data_range=255)\n ave_ssim+= ski_ssim(res, te_label, data_range=255, multichannel=False)\n ct_num+= 1\n else:\n ave_psnr+= psnr(res, te_label, data_range=255)\n ave_ssim+= ski_ssim(res, te_label, data_range=255, multichannel=True)\n ct_num+= 1\n print('psnr_haze: '+str(ave_psnr/float(ct_num))+'.')\n print('ssim_haze: '+str(ave_ssim/float(ct_num))+'.')\n \n \n ave_psnr = 0.0\n ave_ssim = 0.0\n ct_num = 0\n for test_iter, data in enumerate(blur_testloader):\n img_data, label_data = data \n img, img_M, img_C = img_data\n label, label_M, label_C = label_data \n \n img = Variable(img, requires_grad=False).cuda() \n img_M = Variable(img_M, requires_grad=False).cuda() \n img_C = Variable(img_C, requires_grad=False).cuda() \n \n res_C, res_M, res = cleaner(img_C, img_M, img, 'blur')\n res = res.data.cpu().numpy()[0]\n res[res>1] = 1\n res[res<0] = 0\n res*= 255\n res = res.astype(np.uint8)\n res = res.transpose((1,2,0))\n \n label = label.numpy()[0]\n label*= 255\n label = label.astype(np.uint8)\n label = label.transpose((1,2,0))\n \n if YCRCB_blur:\n res = cv2.cvtColor(res, cv2.COLOR_RGB2YCR_CB)[:,:,0]\n label = cv2.cvtColor(label, cv2.COLOR_RGB2YCR_CB)[:,:,0]\n ave_psnr+= psnr( res, label, data_range=255)\n ave_ssim+= ski_ssim(res, label, data_range=255, multichannel=False)\n ct_num+= 1\n else:\n ave_psnr+= psnr( res, label, data_range=255)\n ave_ssim+= ski_ssim(res, label, data_range=255, multichannel=True)\n ct_num+= 1\n print('psnr_blur: '+str(ave_psnr/float(ct_num))+'.')\n print('ssim_blur: '+str(ave_ssim/float(ct_num))+'.') \n print('Testing done.')\n \n\n\n","sub_path":"test_MBN.py","file_name":"test_MBN.py","file_ext":"py","file_size_in_byte":10501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"349570852","text":"from GamePlatform.GameBoard import GameBoard\nfrom GamePlatform.GameRules import detect_mill, is_blocked, who_has_less_than_three\nfrom GamePlatform.Player import Player\nfrom GamePlatform.StdinController import StdinController\nfrom TournamentManager.MatchResult import MatchResult\n\nfrom GameEngine.ComputerController import ComputerController\nfrom time import sleep\n\nTURNS_FOR_DRAW = 400\nDEFAULT_STONES = 9\n\n\nclass GameManager:\n def __init__(self, renderer, tournament_players):\n self.renderer = renderer\n self.players = []\n self.tournament_players = tournament_players\n self.winner = None\n\n # Convert\n stone_type = \"black\"\n for tournament_player in tournament_players:\n player = Player(stone_type, tournament_player, DEFAULT_STONES)\n if(tournament_player.cpu == True):\n player.set_controller(ComputerController(player, complexity = tournament_player.cpu_level))\n else:\n player.set_controller(StdinController(player))\n self.players.append(player)\n stone_type = \"white\"\n\n self.board = GameBoard()\n\n self.phase = 1\n self.step_count = 0\n\n self.phase = 1\n self.step_count = 0\n\n def start_game(self):\n winner = self.game_step()\n self.renderer.render_winner(winner)\n if winner is not None:\n self.winner = winner.tournament_player\n return winner.tournament_player\n else:\n return None\n\n def game_step(self):\n is_game_over = False\n winner = None\n\n while not is_game_over:\n sleep(0.2) # For CPU Agains CPU\n if self.step_count > TURNS_FOR_DRAW:\n winner = None\n return winner\n\n current_player = self.players[self.step_count % len(self.players)]\n\n self.renderer.begin_render()\n self.renderer.render_phase(self.phase)\n self.renderer.print_player_stones(self.players)\n self.renderer.render(self.board)\n self.renderer.print_player_turn(current_player)\n\n move = current_player.controller.make_move(self.board, self.phase)\n if(move == 0):\n if(self.step_count % 2 == 0):\n current_player = self.players[(self.step_count % len(self.players))+1]\n else:\n current_player = self.players[(self.step_count % len(self.players))-1]\n return current_player\n n_mills = detect_mill(self.board, move)\n if n_mills > 0:\n self.renderer.begin_render()\n self.renderer.render_phase(self.phase)\n self.renderer.print_player_stones(self.players)\n self.renderer.render(self.board)\n self.renderer.print_player_turn(current_player)\n current_player.controller.remove_stone(self.board)\n\n other_players = [\n player for player in self.players\n if player is not current_player\n ]\n\n if self.phase == 1:\n move.player.n_stones -= 1\n if self.phase == 1 and self.should_start_phase2():\n self.phase = 2\n\n if self.phase == 2:\n if is_blocked(self.board, other_players[0]):\n winner = current_player\n return winner\n has_less_than_three = who_has_less_than_three(\n self.board, self.players)\n if (has_less_than_three is not None):\n other_players = [\n player for player in self.players\n if player is not has_less_than_three\n ]\n winner = other_players[0]\n return winner\n\n self.step_count += 1\n\n def should_start_phase2(self):\n return all(player.n_stones == 0 for player in self.players)\n","sub_path":"GamePlatform/GameManager.py","file_name":"GameManager.py","file_ext":"py","file_size_in_byte":3975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"252964017","text":"\"\"\"-----------------------------------------------------------------------------\nPURPOSE : Custom GUI for amendment reason and type selection.\n This is triggered from FValidation_cal.\nREQUESTER, DEPATMENT : Nhlanhleni Mchunu, PCG\nPROJECT : Fix the Front - CAL\nDEVELOPER : Libor Svoboda\n--------------------------------------------------------------------------------\n\nHISTORY\n================================================================================\nDate Change no Developer Description\n--------------------------------------------------------------------------------\n2018-11-13 CHG1001100033 Libor Svoboda Initial Implementation\n\"\"\"\nimport acm\nimport FUxCore\nfrom cal_config import AMEND_REASONS, AMEND_REASONS_BACKDATE, AMEND_TYPE_BACKDATE\n\n\nclass AmendReasonDialog(FUxCore.LayoutDialog):\n \n caption = ''\n \n def __init__(self):\n self._reason_selection = []\n self._type_selection = []\n \n @classmethod\n def populate_list_control(cls, lc, values):\n lc.RemoveAllItems()\n lc_root = lc.GetRootItem()\n for value in values:\n child = lc_root.AddChild()\n child.SetData(value)\n child.Label(value)\n \n @classmethod\n def select(cls, lc, index=0):\n lc_root = lc.GetRootItem()\n for child_index, child in enumerate(lc_root.Children()):\n if child_index == index:\n child.Select(True)\n else:\n child.Select(False)\n \n def _set_callbacks(self):\n return\n \n def HandleCreate(self, dlg, layout):\n self._fux_dlg = dlg\n self._fux_dlg.Caption(self.caption)\n self._ok_btn = layout.GetControl('ok')\n self._ok_btn.Enabled(False)\n \n self._amend_reason = layout.GetControl('amend_reason')\n self._amend_reason.EnableMultiSelect(False)\n self.populate_list_control(self._amend_reason, self._reason_selection)\n \n self._amend_type = layout.GetControl('amend_type')\n self._amend_type.EnableMultiSelect(False)\n self.populate_list_control(self._amend_type, self._type_selection)\n self._set_callbacks()\n \n def HandleApply(self):\n selected_reason = str(self._amend_reason.GetSelectedItem().GetData())\n selected_type = str(self._amend_type.GetSelectedItem().GetData())\n return {'amend_reason': selected_reason, \n 'amend_type': selected_type}\n\n\nclass AmendReasonDialogStandard(AmendReasonDialog):\n \n caption = 'Please select Amendment reason'\n \n def __init__(self):\n self._reason_selection = list(AMEND_REASONS.keys())\n self._type_selection = []\n \n def _set_callbacks(self):\n self._amend_reason.AddCallback('SelectionChanged', \n self._on_selection_changed_reason, self)\n self._amend_type.AddCallback('SelectionChanged', \n self._on_selection_changed_type, self)\n self.select(self._amend_reason)\n \n def _on_selection_changed_reason(self, _arg1, _arg2):\n selected_item = self._amend_reason.GetSelectedItem()\n if not selected_item:\n self._ok_btn.Enabled(False)\n return\n selected_reason = str(selected_item.GetData())\n type_selection = AMEND_REASONS[selected_reason]\n self.populate_list_control(self._amend_type, type_selection)\n self.select(self._amend_type)\n self._ok_btn.Enabled(True)\n\n def _on_selection_changed_type(self, _arg1, _arg2):\n selected_item = self._amend_type.GetSelectedItem()\n if not selected_item:\n self.select(self._amend_type)\n\n\nclass AmendReasonDialogBackdate(AmendReasonDialog):\n \n caption = 'Please select Amendment reason and type (Backdate)'\n \n def __init__(self):\n self._reason_selection = AMEND_REASONS_BACKDATE\n self._type_selection = AMEND_TYPE_BACKDATE\n \n def _on_selection_changed(self, _arg1, _arg2):\n selected_reason = self._amend_reason.GetSelectedItem()\n selected_type = self._amend_type.GetSelectedItem()\n if selected_reason and selected_type:\n self._ok_btn.Enabled(True)\n else:\n self._ok_btn.Enabled(False)\n \n def _set_callbacks(self):\n self._amend_reason.AddCallback('SelectionChanged', \n self._on_selection_changed, self)\n self._amend_type.AddCallback('SelectionChanged', \n self._on_selection_changed, self)\n\n\ndef create_layout():\n b = acm.FUxLayoutBuilder()\n b.BeginVertBox('None')\n b. AddLabel('amend_reason_label', 'Amendment reason')\n b. AddList('amend_reason', numlines=10, width=80)\n b. AddLabel('amend_type_label', 'Amendment type')\n b. AddList('amend_type', numlines=4, width=80)\n b. BeginHorzBox('None')\n b. AddFill()\n b. AddButton('ok', 'OK')\n b. AddButton('cancel', 'Cancel')\n b. EndBox()\n b.EndBox()\n return b\n\n\ndef start_dialog(shell, backdate=False):\n builder = create_layout()\n if backdate:\n amend_dialog = AmendReasonDialogBackdate()\n else:\n amend_dialog = AmendReasonDialogStandard()\n result = acm.UX().Dialogs().ShowCustomDialogModal(shell, builder, amend_dialog)\n return result\n\n","sub_path":"Python modules/cal_gui.py","file_name":"cal_gui.py","file_ext":"py","file_size_in_byte":5397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"164973906","text":"import threading\r\nimport random\r\nimport time\r\n\r\nfilosophers_num = 5\r\niters = 10\r\n\r\n\r\nclass Philosopher(threading.Thread):\r\n\r\n def __init__(self, number, left_fork, right_fork, iterations):\r\n threading.Thread.__init__(self)\r\n self.number = number\r\n self.leftFork = left_fork\r\n self.iterations = iterations\r\n self.rightFork = right_fork\r\n self.resultTime = 0\r\n\r\n def run(self):\r\n while self.iterations > 0:\r\n time.sleep(random.uniform(0, 1))\r\n print('Philosopher %d is hungry' % self.number)\r\n self.getForksAndEat()\r\n self.iterations -= 1\r\n\r\n def getForksAndEat(self):\r\n start_time = time.time()\r\n while True:\r\n self.leftFork.acquire(True)\r\n locked = self.rightFork.acquire(True)\r\n if locked: break\r\n elapsed_time = time.time() - start_time\r\n self.resultTime += elapsed_time\r\n self.eat()\r\n self.leftFork.release()\r\n self.rightFork.release()\r\n\r\n def eat(self):\r\n print('%d starts eating' % self.number)\r\n time.sleep(random.uniform(0, 1))\r\n print('%d finishes eating and leaves to think' % self.number)\r\n\r\n\r\nif __name__ == '__main__':\r\n forks = [threading.Lock() for i in range(filosophers_num)]\r\n\r\n philosophers = [Philosopher(i, forks[i % 5], forks[(i + 1) % 5], iters) for i in range(5)]\r\n random.seed(275538)\r\n for p in philosophers: p.start()\r\n result_array = []\r\n for p in philosophers:\r\n p.join()\r\n result_array.append(p.resultTime / iters)\r\n print(\"Now we're finishing.\")\r\n res_sum = 0\r\n for r in result_array:\r\n res_sum += r\r\n res_sum /= len(result_array)\r\n print(res_sum)\r\n","sub_path":"lab7/python/NaivePhilosopher.py","file_name":"NaivePhilosopher.py","file_ext":"py","file_size_in_byte":1741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"580198778","text":"# from PsyNeuLink.Components.Mechanisms.ProcessingMechanisms.Deprecated.LinearMechanism import *\nfrom PsyNeuLink.Components.Mechanisms.AdaptiveMechanisms.ControlMechanisms.EVCMechanism import EVCMechanism\nfrom PsyNeuLink.Components.Mechanisms.ProcessingMechanisms.DDM import *\nfrom PsyNeuLink.Components.Mechanisms.ProcessingMechanisms.TransferMechanism import *\nfrom PsyNeuLink.Components.Process import process\nfrom PsyNeuLink.Components.Projections.ControlProjection import ControlProjection\nfrom PsyNeuLink.Components.System import system\nfrom PsyNeuLink.Globals.Keywords import *\n\n# Preferences:\nDDM_prefs = ComponentPreferenceSet(\n prefs = {\n kpVerbosePref: PreferenceEntry(False,PreferenceLevel.INSTANCE),\n kpReportOutputPref: PreferenceEntry(True,PreferenceLevel.INSTANCE)})\n\nprocess_prefs = ComponentPreferenceSet(reportOutput_pref=PreferenceEntry(False,PreferenceLevel.INSTANCE),\n verbose_pref=PreferenceEntry(True,PreferenceLevel.INSTANCE))\n\n# Mechanisms:\nInput = TransferMechanism(name='Input',\n # params={MONITOR_FOR_CONTROL:[MonitoredOutputStatesOption.PRIMARY_OUTPUT_STATES]}\n # prefs=DDM_prefs\n # prefs={VERBOSE_PREF: False,\n # REPORT_OPUTPUT_PREF: True}\n )\nReward = TransferMechanism(name='Reward',\n # params={MONITOR_FOR_CONTROL:[PROBABILITY_UPPER_THRESHOLD,(RESPONSE_TIME, -1, 1)]}\n )\nDecision = DDM(function=BogaczEtAl(drift_rate=(1.0, ControlProjection(function=Linear, allocation_samples=[0.1, .5])),\n threshold=(1.0, ControlProjection(function=Linear, allocation_samples=[0.1, .5])),\n noise=(0.5),\n starting_point=(0),\n t0=0.45),\n prefs = DDM_prefs,\n name='Decision')\n\n# Processes:\nTaskExecutionProcess = process(\n default_input_value=[0],\n pathway=[(Input, 0), IDENTITY_MATRIX, (Decision, 0)],\n prefs = process_prefs,\n name = 'TaskExecutionProcess')\n\nRewardProcess = process(\n default_input_value=[0],\n pathway=[(Reward, 1)],\n prefs = process_prefs,\n name = 'RewardProcess')\n\n# System:\nmySystem = system(processes=[TaskExecutionProcess, RewardProcess],\n controller=EVCMechanism,\n enable_controller=True,\n monitor_for_control=[Reward, DDM_PROBABILITY_UPPER_THRESHOLD, (DDM_RESPONSE_TIME, -1, 1)],\n # monitor_for_control=[Input, PROBABILITY_UPPER_THRESHOLD,(RESPONSE_TIME, -1, 1)],\n # monitor_for_control=[MonitoredOutputStatesOption.ALL_OUTPUT_STATES],\n name='EVC Test System')\n\n# Show characteristics of system:\nmySystem.show()\nmySystem.controller.show()\n\n# Specify stimuli for run:\n# two ways to do so:\n\n# - as a dictionary of stimulus lists; for each entry:\n# key is name of an origin mechanism in the system\n# value is a list of its sequence of stimuli (one for each trial)\ninputList = [0.5, 0.123]\nrewardList = [20, 20]\n# stim_list_dict = {Input:[0.5, 0.123],\n# Reward:[20, 20]}\nstim_list_dict = {Input:[[0.5], [0.123]],\n Reward:[[20], [20]]}\n\n# - as a list of trials;\n# each item in the list contains the stimuli for a given trial,\n# one for each origin mechanism in the system\ntrial_list = [[0.5, 20], [0.123, 20]]\nreversed_trial_list = [[Reward, Input], [20, 0.5], [20, 0.123]]\n\n# Create printouts function (to call in run):\ndef show_trial_header():\n print(\"\\n############################ TRIAL {} ############################\".format(CentralClock.trial))\n\ndef show_results():\n\n results = sorted(zip(mySystem.terminalMechanisms.outputStateNames, mySystem.terminalMechanisms.outputStateValues))\n print('\\nRESULTS (time step {}): '.format(CentralClock.time_step))\n print ('\\tDrift rate control signal (from EVC): {}'.format(Decision.parameterStates[DRIFT_RATE].value))\n print ('\\tThreshold control signal (from EVC): {}'.format(Decision.parameterStates[THRESHOLD].value))\n import re\n for result in results:\n result_0 = re.sub('[\\[,\\],\\n]','',str(result[0]))\n result_1 = re.sub('[\\[,\\],\\n]','',str(float(result[1])))\n print(\"\\t{}: {}\".format(result_0, result_1))\n\n# Run system:\n\nmySystem.controller.reportOutputPref = False\n\n# mySystem.run(inputs=trial_list,\n# # mySystem.run(inputs=reversed_trial_list,\nmySystem.run(inputs=stim_list_dict,\n call_before_trial=show_trial_header,\n call_after_time_step=show_results\n )\n","sub_path":"Scripts/MISCELLANEOUS SCRIPTS/EVC System Laming Validation Test Script COPY.py","file_name":"EVC System Laming Validation Test Script COPY.py","file_ext":"py","file_size_in_byte":4707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"561481774","text":"import discord\nfrom discord.ext import commands\nimport asyncio\nfrom utils import permissions, database, logging\n\nclass Staff(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command()\n @commands.guild_only()\n @permissions.trial_mod_only()\n async def move(self, ctx, channel: discord.TextChannel, amount:int = 10):\n \"\"\"Temporarily mutes all the users who recently talked in the channel\"\"\"\n\n messages = await channel.history(limit=amount).flatten()\n\n muted = []\n\n for message in messages:\n if not ctx.channel.permissions_for(message.author).manage_channels or not ctx.channel.permissions_for(message.author).manage_messages:\n muted.append(message.author)\n await message.channel.set_permissions(message.author, send_messages=False, reason='Moving channels')\n\n await ctx.send(f\":grey_exclamation: {', '.join([u.mention for u in muted])} Please move this conversation to {channel.mention}\")\n await logging.log(ctx, f':left_right_arrow: {str(ctx.author)} (`{ctx.author.id}`) moved the conversation from {ctx.channel.mention} to {channel.mention}')\n\n await asyncio.sleep(30)\n\n for m in muted:\n await message.channel.set_permissions(m, send_messages=None, reason='Done moving channels')\n\n @commands.command()\n @commands.guild_only()\n @permissions.trial_mod_only()\n async def announce(self, ctx, channel:discord.TextChannel, role:discord.Role, *, content):\n \"\"\"Sends an announcement with a role ping in a specified channel\"\"\"\n\n await role.edit(mentionable=True)\n await channel.send(f'{role.mention} {content}')\n await role.edit(mentionable=False)\n\n await ctx.send(f':white_check_mark: Announcement sent to role {role.name} in channel {channel.mention}')\n\n\n\ndef setup(bot):\n bot.add_cog(Staff(bot))\n","sub_path":"cogs/staffutils.py","file_name":"staffutils.py","file_ext":"py","file_size_in_byte":1889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"292258997","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport re\nfrom .items import ServHuuzItem\nfrom scrapy.utils.project import get_project_settings\n\nfrom pymongo import MongoClient\nfrom urllib.parse import urlparse\nfrom urllib.parse import urljoin\n\nfrom scrapy.conf import settings\n\n\nimport random\nfrom scrapy.spidermiddlewares.httperror import HttpError\nfrom twisted.internet.error import DNSLookupError\nfrom twisted.internet.error import TimeoutError\n\n\nimport logging\nfrom scrapy.utils.log import configure_logging\n\nconfigure_logging(install_root_handler=False)\nlogging.basicConfig(\n filename='mail_nav.log',\n format='%(levelname)s: %(message)s',\n level=logging.DEBUG\n)\nimport re\n\n\n\n\n\nclass SpidHuuzSpider_f(scrapy.Spider):\n name = \"mail_nav\"\n allowed_domains = []#\n start_urls = []\n\n connection = MongoClient(\n settings['MONGODB_SERVER'],\n settings['MONGODB_PORT']\n )\n db = connection[settings['MONGODB_DB']]\n collection = db[settings['MONGODB_COLLECTION']]\n\n\n current_category = 'General Contractors'\n\n person_web_sites = [x['website'] for x in collection.find() if x['email_from_web_site_flag'] is not True and x['category'] == 'General Contractors' and x['website'] !='']\n \n\n\n\n start_urls = person_web_sites\n logging.info('#####FROM BASE #### %s ########## CHECK ##### &^^ &^^& #####' % (person_web_sites))\n #logging.info('#####FROM BASE #### %s ########## CHECK ##### &^^ IIIIIIIIIII &^^& #####' % (domain_test))\n\n\n def start_requests333(self):\n def get_domain(url):\n \"\"\"Return top two domain levels from URI\"\"\"\n re_3986_enhanced = re.compile(r\"\"\"\n # Parse and capture RFC-3986 Generic URI components.\n ^ # anchor to beginning of string\n (?: (?P [^:/?#\\s]+): )? # capture optional scheme\n (?://(?P [^/?#\\s]*) )? # capture optional authority\n (?P [^?#\\s]*) # capture required path\n (?:\\?(?P [^#\\s]*) )? # capture optional query\n (?:\\#(?P [^\\s]*) )? # capture optional fragment\n $ # anchor to end of string\n \"\"\", re.MULTILINE | re.VERBOSE)\n re_domain = re.compile(r\"\"\"\n # Pick out top two levels of DNS domain from authority.\n (?P[^.]+\\.[A-Za-z]{2,6}) # $domain: top two domain levels.\n (?::[0-9]*)? # Optional port number.\n $ # Anchor to end of string.\n \"\"\", \n re.MULTILINE | re.VERBOSE)\n result = \"\"\n m_uri = re_3986_enhanced.match(url)\n if m_uri and m_uri.group(\"authority\"):\n auth = m_uri.group(\"authority\")\n m_domain = re_domain.search(auth)\n if m_domain and m_domain.group(\"domain\"):\n result = m_domain.group(\"domain\");\n return result\n\n\n\n\n\n\n for person_website in self.start_urls:\n domain_test = get_domain(person_website)\n if domain_test:\n self.allowed_domains.append(domain_test)\n\n for url in self.start_urls:\n self.logger.info('#####HEADER FB#### %s ########## CHECK ##### &^^ IIIIII &^^& #####' % (url))\n yield scrapy.Request(url, dont_filter=True, meta={'req_url':url},callback=self.parse, errback=self.errback_httpbin)\n\n\n\n def parse(self,response):\n self.logger.info('#####EMAIL NONE HAVE NAV### %s ########## CHECK ##### &^^ /o\\ &^^& #####' % (response.xpath('//*[contains(@class, \"men\")]/@href')))\n for x in response.xpath('//*[contains(@class, \"nav\")]'):\n self.logger.info('#####EMAIL NONE HAVE NAV### %s ########## CHECK #### \\o/ #####' % (x))\n newurl = urljoin(response.url, x.extract())\n self.logger.info('#####EMAIL NONE HAVE NAV### %s ########## CHECK ##### &^^ &^^& #####' % (newurl))\n yield scrapy.Request(url = newurl, callback=self.parse1,errback=self.errback_httpbin)\n\n\n\n def parse1(self, response):\n self.state['items_count'] = self.state.get('items_count', 0) + 1\n\n self.logger.info('###item count %s########## %s ########## CHECK ########## %s' % \n (\n self.state['items_count'],\n response.request.url,\n response.url \n ))\n\n # self.state['items_count'] = self.state.get('items_count', 0) + 1\n item = ServHuuzItem()\n self.logger.info('#####HEADER FB#### %s ########## CHECK ##### &^^ %s&^^& #####' % (response.request.url))\n #if response.request.url != response.url:\n #self.logger.info('#####HEADER FB#### %s ########## CH12341231313ECK ##### &^^ %s&^^& #####' % (response.request.url,response.headers['location']))\n\n if response.status in [301, 302] and 'Location' in response.headers:\n newurl = urljoin(request.url, response.headers['location'])\n # or \n #newurl = response.headers['location']\n self.logger.info('#####HEADER FB#### %s ########## CHECK ##### &^^ %s&^^& #####' % (newurl,request.meta))\n yield Request(url = newurl, meta = request.meta, callback=self.parse, errback=self.errback_httpbin) \n\n \n i_a=[]\n email = response.xpath(\"//*[contains(text(), '@')]\").extract()\n mailsrch = re.compile(r'[\\w\\-][\\w\\-\\.]+@[\\w\\-][\\w\\-\\.]+[a-zA-Z]{1,4}')\n for _ in mailsrch.findall(''.join(email)):\n if _ not in i_a:\n i_a.append(_)\n #item = ' '.join(i_a)\n if i_a is None:\n return\n else:\n item['email_from_facebook'] = ' '.join(i_a)\n item['email_from_facebook_flag'] = True\n # item['facebook_acc'] = re.split(r'/about/[?]ref=page_internal',response.meta['req_url'])[0]\n # item['facebook_checked'] = re.split(r'/about/[?]ref=page_internal',response.url)[0]\n #item['doc_id'] = self._id\n #print('####################################','IN Pipe spid_huuz_f !!!', response.url, '#############', '?!?!!?!?!??!?!?!!?')\n \n yield item\n\n\n\n\n def add_errback(self, request):\n self.logger.debug(\"add_errback: patching %r\" % request)\n\n # this is a hack to trigger a DNS error randomly\n rn = random.randint(0, 2)\n if rn == 1:\n newurl = request.url.replace('httpbin.org', 'httpbin.organisation')\n self.logger.debug(\"add_errback: patching url to %s\" % newurl)\n return request.replace(url=newurl,\n errback=self.errback_httpbin)\n\n # this is the general case: adding errback to all requests\n return request.replace(errback=self.errback_httpbin)\n\n\n\n def errback_httpbin(self, failure):\n # log all errback failures,\n # in case you want to do something special for some errors,\n # you may need the failure's type\n item = ServHuuzItem()\n self.logger.error(repr(failure))\n\n if failure.check(HttpError):\n # you can get the response\n response = failure.value.response\n # self.logger.error('HttpError on %s ####### %s', response.url,response.meta['req_url'])\n item['email_from_facebook'] = str(response)\n item['email_from_facebook_flag'] = True\n # item['facebook_acc'] = re.split(r'/about/[?]ref=page_internal',response.meta['req_url'])[0]\n item['facebook_checked'] = True\n #item['doc_id'] = self._id\n #print('####################################','IN Pipe spid_huuz_f !!!', response.url, '#############', '?!?!!?!?!??!?!?!!?')\n \n # yield item\n\n elif failure.check(DNSLookupError):\n # this is the original request\n request = failure.request\n # self.logger.error('DNSLookupError on %s#####1 %s', request.url,response.meta['req_url'])\n item['email_from_facebook'] = (request)\n item['email_from_facebook_flag'] = True\n # item['facebook_acc'] = re.split(r'/about/[?]ref=page_internal',response.meta['req_url'])[0]\n item['facebook_checked'] = True\n \n #item['doc_id'] = self._id\n #print('####################################','IN Pipe spid_huuz_f !!!', response.url, '#############', '?!?!!?!?!??!?!?!!?')\n \n # yield item\n\n elif failure.check(TimeoutError):\n request = failure.request\n # self.logger.error('TimeoutError on %s ######2 %s', request.url,response.meta['req_url'])\n item['email_from_facebook'] = (request)\n item['email_from_facebook_flag'] = True\n # item['facebook_acc'] = re.split(r'/about/[?]ref=page_internal',response.meta['req_url'])[0]\n item['facebook_checked'] = True\n #item['doc_id'] = self._id\n #print('####################################','IN Pipe spid_huuz_f !!!', response.url, '#############', '?!?!!?!?!??!?!?!!?')\n \n yield item","sub_path":"serv_huuz/spiders/mail_nav.py","file_name":"mail_nav.py","file_ext":"py","file_size_in_byte":9245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"215694301","text":"# ONLY EDIT FUNCTIONS MARKED CLEARLY FOR EDITING\n\nimport numpy as np\n\ndef question01(portfolios): # input is an array of portfolios represented as their decimal values [p1,...,pN] where N<=100\n # each has a maximum value of 2^16-1\n max = 0\n for i in range(len(portfolios)):\n p = portfolios[i]\n for j in range(i+1, len(portfolios)):\n q = portfolios[j]\n if p ^ q > max:\n max = p ^ q\n\n return max\n","sub_path":"q1.py","file_name":"q1.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"421569590","text":"# 群组聊天场景,有多个用户接收与发送消息\n# 每个群组使用有序集合来保存参加群组的用户,其中集合成员为用户名,成员的值为用户在群组内接收到的最大消息 ID\n# 用户也使用有序集合来记录自己参加的所有群组,其中集合成员为群组 ID,成员的值为用户在群组内接收到的最大消息 ID\nimport time\nimport json\nimport redis\nfrom lock_timeout import acquire_lock_with_timeout, release_lock\n\n\ndef create_chat(conn: redis.Redis, sender, recipients: list, message, chat_id=None):\n chat_id = chat_id or str(conn.incr('ids:chat'))\n recipients.append(sender)\n recipientsd = dict((r, 0) for r in recipients)\n pipeline = conn.pipeline(True)\n pipeline.zadd('chat:' + chat_id, **recipientsd)\n for recipient in recipients:\n pipeline.zadd('seen:' + recipient, chat_id, 0)\n pipeline.execute()\n return send_message(conn, chat_id, sender, message)\n\n\ndef send_message(conn: redis.Redis, chat_id, sender, message):\n identifier = acquire_lock_with_timeout(conn, 'chat:' + chat_id, 1)\n if not identifier:\n raise Exception(\"Couldn't obtain the lock\")\n try:\n msg_id = conn.incr('ids:msg:' + chat_id)\n now = time.time()\n packed = json.dumps({\n 'id': msg_id,\n 'ts': now,\n 'sender': sender,\n 'message': message,\n })\n conn.zadd('msgs:' + chat_id, packed, msg_id)\n finally:\n release_lock(conn, 'chat:' + chat_id, identifier)\n return chat_id\n\n\ndef fetch_pending_messages(conn: redis.Redis, recipient):\n seen = conn.zrange('seen:' + recipient, 0, -1, withscores=True)\n pipeline = conn.pipeline()\n for chat_id, seen_id in seen:\n # 找出 msgs 中从 seen_id 以来的新消息\n pipeline.zrangebyscore('msgs:' + chat_id, seen_id + 1, 'inf')\n chat_info = list(zip(seen, pipeline.execute()))\n for i, ((chat_id, seen_id), messages) in enumerate(chat_info):\n if not messages:\n continue\n messages[:] = map(json.loads, messages)\n # 读到的最新消息\n seen_id = messages[-1]['id']\n # 更新 chat 中的最新消息 id\n conn.zadd('chat:' + chat_id, recipient, seen_id)\n # 获取 chat 中所有人都阅读过的消息\n min_id = conn.zrange('chat:' + chat_id, 0, 0, withscores=True)\n # 更新 seen 中的最新消息 id\n pipeline.zadd('seen:' + recipient, chat_id, seen_id)\n if min_id:\n # 从 msgs 删除所有人都阅读过的消息\n pipeline.zremrangebyscore('msgs:' + chat_id, 0, min_id[0][1])\n # 重新格式化 chat_info\n chat_info[i] = (chat_id, messages)\n pipeline.execute()\n return chat_info\n\n\ndef join_chat(conn: redis.Redis, chat_id, user):\n # 加入群组时最新的消息 ID\n message_id = int(conn.get('ids:msg:' + chat_id))\n pipeline = conn.pipeline(True)\n pipeline.zadd('chat:' + chat_id, user, message_id)\n pipeline.zadd('seen:' + user, chat_id, message_id)\n pipeline.execute()\n\n\ndef leave_chat(conn: redis.Redis, chat_id, user):\n pipeline = conn.pipeline(True)\n pipeline.zrem('chat:' + chat_id, user)\n pipeline.zrem('seen:' + user, chat_id)\n # 查询群组剩余成员数量\n pipeline.zcard('chat:' + chat_id)\n if not pipeline.execute()[-1]:\n # 删除群组\n pipeline.delete('msgs:' + chat_id)\n pipeline.delete('ids:msg:' + chat_id)\n pipeline.execute()\n else:\n # 找出被所有成员阅读过的消息\n oldest = conn.zrange('chat:' + chat_id, 0, 0, withscores=True)\n conn.zremrangebyscore('msgs:' + chat_id, 0, oldest[0][1])\n","sub_path":"Redis/group.py","file_name":"group.py","file_ext":"py","file_size_in_byte":3688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"490623787","text":"import sys\r\nimport logging\r\nimport httplib2\r\nfrom mimetypes import guess_type\r\nfrom apiclient.discovery import build\r\nfrom apiclient.http import MediaFileUpload\r\nfrom apiclient.errors import ResumableUploadError\r\nfrom oauth2client.client import OAuth2WebServerFlow\r\nfrom oauth2client.file import Storage\r\n\r\n\r\nlogging.basicConfig(level=\"ERROR\")\r\ntoken_file = sys.path[0] + '/auth_token.txt'\r\nCLIENT_ID = '669177415122-aan128gfm671kjq5mieai6d6qfkdhus4.apps.googleusercontent.com'\r\nCLIENT_SECRET = 'Z_3OVkwt_Dp7TrvXpuq7SFW2'\r\nOAUTH_SCOPE = 'https://www.googleapis.com/auth/drive.file'\r\nREDIRECT_URI = 'urn:ietf:wg:oauth:2.0:oob'\r\n\r\ndef file_ops(file_path):\r\n mime_type = guess_type(file_path)[0]\r\n mime_type = mime_type if mime_type else 'text/plain'\r\n file_name = file_path.split('/')[-1]\r\n return file_name, mime_type\r\n\r\ndef create_token_file(token_file):\r\n flow = OAuth2WebServerFlow(\r\n CLIENT_ID,\r\n CLIENT_SECRET,\r\n OAUTH_SCOPE,\r\n redirect_uri=REDIRECT_URI\r\n )\r\n authorize_url = flow.step1_get_authorize_url()\r\n print('Go to the following link in your browser: ' + authorize_url)\r\n code = input('Enter verification code: ').strip()\r\n credentials = flow.step2_exchange(code)\r\n storage = Storage(token_file)\r\n storage.put(credentials)\r\n return storage\r\n\r\ndef createFolder(folderName,folder_id=None):\r\n http = authorize(\"./auth_token.txt\", None)\r\n drive_service = build('drive', 'v3', http=http)\r\n body = {\r\n 'name': folderName,\r\n 'mimeType': \"application/vnd.google-apps.folder\"\r\n }\r\n if folder_id:\r\n body['parents']=[{'id':folder_id}]\r\n root_folder = drive_service.files().create(body = body).execute()\r\n return root_folder['id']\r\n\r\ndef authorize(token_file, storage):\r\n if storage is None:\r\n storage = Storage(token_file)\r\n credentials = storage.get()\r\n http = httplib2.Http()\r\n credentials.refresh(http)\r\n http = credentials.authorize(http)\r\n return http\r\n\r\n\r\ndef upload_file(file_path, file_name, mime_type,folder_id=None):\r\n drive_service = build('drive', 'v2', http=http)\r\n media_body = MediaFileUpload(file_path,mimetype=mime_type,resumable=True)\r\n body = {\r\n 'title': file_name,\r\n 'description': 'backup',\r\n 'mimeType': mime_type\r\n }\r\n if folder_id:\r\n body['parents']=[{'id':folder_id}]\r\n permissions = {\r\n 'role': 'reader',\r\n 'type': 'anyone',\r\n 'value': None,\r\n 'withLink': True\r\n }\r\n file = drive_service.files().insert(body=body, media_body=media_body).execute()\r\n drive_service.permissions().insert(fileId=file['id'], body=permissions).execute()\r\n file = drive_service.files().get(fileId=file['id']).execute()\r\n download_url = file.get('webContentLink')\r\n return download_url\r\n\r\nhttp=None\r\ndef getLink(file_path,folder_id):\r\n global http\r\n try:\r\n with open(file_path) as f: \r\n pass\r\n except IOError as e:\r\n print(e)\r\n sys.exit(1)\r\n try:\r\n with open('./auth_token.txt') as f: pass\r\n except IOError:\r\n http = authorize(token_file, create_token_file(token_file))\r\n http = authorize(\"./auth_token.txt\", None)\r\n file_name, mime_type = file_ops(file_path)\r\n try:\r\n return upload_file(file_path, file_name, mime_type,folder_id)\r\n except ResumableUploadError as e:\r\n print(\"Error occured while first upload try:\", e)\r\n print(\"Trying one more time.\")\r\n return upload_file(file_path, file_name, mime_type,folder_id)\r\n","sub_path":"dirve.py","file_name":"dirve.py","file_ext":"py","file_size_in_byte":3550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"604927438","text":"####REGLAS\n#Casilla ocupada, las siguientes en la misma condicion >2=celula muerta\n#Casilla ocupada, nro de celulas <3=celula muere superpoblacion\n#Casilla libre, =3 crelulas adyacentes = celula viva sgte turno\n\nfrom random import randint\n\ndef definircelda():\n numero = randint(0,1)\n return numero\n\ndef crearmatriz(size):\n matriz = []\n for fila in range(size):\n matriz.append([])\n for columna in range(size):\n matriz[fila].append(definircelda())\n return matriz\n\ndef printmatriz(matriz):\n for fila in (matriz):\n print(fila)\n\nmatriz = crearmatriz(5)\nmatriz[0][1] = 1\nprintmatriz(matriz)\n\n\nmatriz = crearmatriz(5)\ndef modifmatriz(matriz,fila,columna):\n matriz[fila][columna]=\"-\"\n return matriz\n\nmodifmatriz(matriz,2,2)\nprintmatriz(matriz)\n\ndef vidacelula(celula):\n if celula==1:\n return True\n else:\n return False\n\ndef cantvidacelula(matriz):\n size = len(matriz)\n contar = 0\n for fila in range(size):\n for columna in range(size):\n a= vidacelula(matriz[fila][columna])\n if a==True:\n contar = contar + 1\n return contar\n\nmatriz = crearmatriz(5)\nprintmatriz(matriz)\nprint(cantvidacelula(matriz),\"celulas vivas\")\n","sub_path":"juegodelavida3.py","file_name":"juegodelavida3.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"252078437","text":"# -*- coding: utf-8 -*-\n\nimport sys\nimport os\nimport glob\nimport string\n\nfrom PIL import Image\n\n\n_, parentpath, ignorefile = sys.argv\ndirlist = os.listdir(parentpath)\nfor f_ignore in ignorefile.split(','):\n try:\n dirlist.remove(f_ignore)\n except:\n pass\nfor dirname in dirlist:\n os.chdir(parentpath + dirname + '/')\n files = glob.glob(u'*.png')\n if files:\n for f_name in files:\n try:\n original = Image.open(f_name).convert('RGBA')\n except Exception as e:\n print('ERROR> open')\n print(e)\n try:\n alphamask = original.split()[3]\n except Exception as e:\n print('ERROR> split')\n print(e)\n try:\n bgwhite = Image.new('RGB', original.size, (255,255,255))\n except Exception as e:\n print('ERROR> bg new')\n print(e)\n try:\n bgwhite.paste(original, None, alphamask)\n except Exception as e:\n print('ERROR> paste')\n print(e)\n try:\n bgwhite.save(f_name, quality=100)\n except Exception as e:\n print('ERROR> save')\n print(e)\n","sub_path":"stampwhiteback.py","file_name":"stampwhiteback.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"547248066","text":"import datetime\nfrom astral import Astral\nimport requests\ncity_name = 'Topeka'\na = Astral()\na.solar_depression = 'civil'\ncity = a[city_name]\n\nsun = city.sun()\n\nsunrise = sun['sunrise']\nsunset = sun['sunset']\nmoon_phase = city.moon_phase()\n\nif moon_phase == 0:\n\tmoon_phasestr = \"New Moon\"\nelif moon_phase >= 1 and moon_phase < 7:\n\tmoon_phasestr = \"Waxing Crescent\"\nelif moon_phase == 7:\n\tmoon_phasestr = \"First Quarter\"\nelif moon_phase >=8 and moon_phase < 14:\n\tmoon_phasestr = \"Waxing Gibbous\"\nelif moon_phase == 14:\n\tmoon_phasestr = \"Full\"\nelif moon_phase > 14 and moon_phase <21:\n\tmoon_phasestr = \"Waning Gibbous\"\nelif moon_phase == 21:\n\tmoon_phasestr = \"Third Quarter\"\nelif moon_phase > 21:\n\tmoon_phasestr = \"Waning Crescent\"\n\n\nsunrise = sunrise.strftime(\"%I:%M %p\")\nsunset = sunset.strftime(\"%I:%M %p\")\n\n\n\nsunriseurl = \"http://localhost:3030/widgets/sunrise\"\n\nsunrisetext='{ \"auth_token\": \"YOUR_AUTH_TOKEN\", \"value\": \"' + sunrise + '\" }'\n\n\nsunseturl = \"http://localhost:3030/widgets/sunset\"\n\nsunsettext='{ \"auth_token\": \"YOUR_AUTH_TOKEN\", \"value\": \"' + sunset + '\" }'\n\nmoonurl = \"http://localhost:3030/widgets/moonphase\"\n\nmoontext='{ \"auth_token\": \"YOUR_AUTH_TOKEN\", \"value\": \"' + moon_phasestr + '\" }'\n\n\nresult=requests.post(sunseturl, data = sunsettext)\n\n\nresult=requests.post(sunriseurl, data = sunrisetext)\nresult=requests.post(moonurl, data = moontext)\n","sub_path":"bin/astronomy.py","file_name":"astronomy.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"40845973","text":"#iQuant3D-termial (ZEBRA) March 2, 2020\nimport re\nimport os\nimport os.path\nimport sys\nimport glob\nimport csv\nimport xlrd\nimport shutil\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport scipy.ndimage as ndimage\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import LogNorm\nfrom statistics import mean, median,variance,stdev\nfrom PIL import Image\nfrom skimage import data\nfrom sklearn.cluster import KMeans\nfrom scipy.signal import argrelmax\nfrom scipy import fftpack\n\nclass pycolor:\n BLACK = '\\033[30m'\n RED = '\\033[31m'\n GREEN = '\\033[32m'\n YELLOW = '\\033[33m'\n BLUE = '\\033[34m'\n PURPLE = '\\033[35m'\n CYAN = '\\033[36m'\n WHITE = '\\033[37m'\n END = '\\033[0m'\n BOLD = '\\038[1m'\n UNDERLINE = '\\033[4m'\n INVISIBLE = '\\033[08m'\n REVERCE = '\\033[07m'\n\nclass iq3t():\n def __init__(self,folder,standard_element,washout=20,threshold=10000,split=5,band_width=100):\n print('[ '+pycolor.YELLOW+'Welcome'+pycolor.END+' ] iQuant3D-terminal (ZEBRA)')\n self.standard_element = standard_element\n self.washout = washout\n self.folder = os.getcwd()+'/'+folder\n self.band_width = band_width\n self.noise_cut_factor = 3\n self.elements = []\n self.threshold = threshold\n self.vmin = 0\n self.vmax = 0\n self.frag = 0\n self.split = split\n\n def get_element_list(self,filepath):\n print('[ '+pycolor.YELLOW+'Processing'+pycolor.END+' ] '+filepath)\n elements = pd.read_csv(filepath,skiprows=13,header=None,dtype='str',low_memory=False)[0:1]\n names = [str(elements[i][0]).split('|')[0].replace(' ','') for i in range(len(elements.columns))]\n self.elements = names[1:len(names)-1]\n #print(names)\n return names[1:len(names)-1]\n\n def noise_cut(self,n,data):\n y = []\n for i in range(n):y.append(0)\n for i in range(n,len(data)-n):\n if data[i-n]-data[i+n] == 0:y.append(0)\n else:y.append(data[i])\n for i in range(n):y.append(0)\n return y\n\n def time_stamp(self,filepath,standard_element):\n ts = []\n elements = pd.read_csv(filepath,skiprows=13,header=None,dtype='str',low_memory=False)[0:1]\n names = [str(elements[i][0]).split('|')[0].replace(' ','') for i in range(len(elements.columns))]\n #print(names)\n df = pd.read_csv(filepath,skiprows=15,names=names,low_memory=False)\n #print(df)\n frag = self.threshold\n pco_std = self.noise_cut(self.noise_cut_factor,df[standard_element])\n count,i,i_init,linenum = -1E5,0,0,0\n for t in pco_std:\n if t > frag:\n if i_init == 0:\n i_init = i\n count = 0\n if t < frag:\n count += 1\n if count >= self.washout:\n x = df['Time'][i_init-1 :i-self.washout-1]\n y = df[standard_element][i_init-1 :i-self.washout-1]\n if len(y) > 50:\n ts.append([x.min(),x.max()])\n i_init = 0\n linenum += 1\n count = 0\n i += 1\n width,fixed_ts = [],[]\n for i in ts:\n width.append(i[1]-i[0])\n front_anchor = []\n for i in range(len(ts)-1):\n front_anchor.append(ts[i+1][0]-ts[i][0])\n peak_span = mean(front_anchor)\n t = int(ts[0][0])\n for i in ts:\n fixed_ts.append([t-2,t+int(mean(width))+2])\n t += peak_span\n #return fixed_ts\n return ts\n\n def time_stamp_zebra(self,filepath,standard_element):\n elements = pd.read_csv(filepath,skiprows=13,header=None,dtype='str',low_memory=False)[0:1]\n names = [str(elements[i][0]).split('|')[0].replace(' ','') for i in range(len(elements.columns))]\n #print(names)\n df = pd.read_csv(filepath,skiprows=15,names=names,low_memory=False)\n frag = self.threshold\n times,elms,ts = [],[],[]\n state = 0\n for i in range(len(df[standard_element])):\n if state == 0 and df[standard_element][i] > frag:\n state = 1\n t,e = [],[]\n if state == 1:\n t.append(df['Time'][i])\n #print(df['Time'][i])\n e.append(df[standard_element][i])\n #times.append(df['Time'][i])\n #elms.append(df['53Cr'][i])\n if state == 1 and df[standard_element][i] < frag:\n #ts.append(pd.Series(t).mean())\n\n #elms.append(e)\n #t,e = [],[]\n #plt.plot(t,e,color='red')\n #print(t.mean())\n if len(t) > self.split:\n times.append(pd.Series(t).mean())\n #plt.plot(t,e,color='red')\n state = 0\n #print(state)\n #for i in range(len([df['53Cr'])):\n for i in range(len(times)-1):\n if i % 2 == 0:\n ts.append([times[i],times[i+1]])\n #print(times)\n #print(len(ts))\n \"\"\"\n time = df[df['53Cr'] > frag]['Time']\n elm = df[df['53Cr'] > frag]['53Cr']\n \"\"\"\n #plt.plot(df['Time'],df['53Cr'])\n #plt.scatter(times,elms)\n #plt.show()\n return ts\n\n def iq3_imaging(self,filepath,standard_element,imaging_element,time_stamp):\n elements = pd.read_csv(filepath,skiprows=13,header=None,low_memory=False)[0:1]\n names = [str(elements[i][0]).split('|')[0].replace(' ','') for i in range(len(elements.columns))]\n df = pd.read_csv(filepath,skiprows=15,names=names,low_memory=False)\n\n #peak_analysis\n target = imaging_element\n merged_line = pd.DataFrame()\n fig = plt.figure(figsize=(15,3))\n ax = fig.add_subplot(111)\n plt.rcParams['lines.linewidth'] = 0.3\n plt.plot(df['Time'],df[target],color='black',linewidth=0.3)\n linenum = 0\n for tsp in time_stamp:\n y = df.query('%d < Time < %f' % (tsp[0],tsp[1]))[target]\n merged_line['line'+str(linenum)] = pd.Series(list(y))\n ax.axvspan(tsp[0],tsp[1],color = \"lightgray\")\n linenum += 1\n\n #plt.show()\n outname = filepath.split('.')[0]+'_'+imaging_element+'_signal.pdf'\n print('[ '+pycolor.GREEN+'Generate'+pycolor.END+' ] '+outname)\n plt.savefig(outname)\n print('[ '+pycolor.BLUE+'Success'+pycolor.END+' ] '+outname)\n plt.close()\n\n outname = filepath.split('.')[0]+'_'+imaging_element+'.xlsx'\n print('[ '+pycolor.GREEN+'Generate'+pycolor.END+' ] '+outname)\n merged_line.T.to_excel(outname, sheet_name=imaging_element)\n print('[ '+pycolor.BLUE+'Success'+pycolor.END+' ] '+outname)\n backsignal = 1E4\n merged_line = merged_line + backsignal\n\n #plt.figure()\n sns.set()\n plt.style.use('dark_background')\n #grid_kws = {\"height_ratios\": (.9, .05), \"hspace\": .1}\n #fig, (ax, cbar_ax) = plt.subplots(2, gridspec_kw=grid_kws, figsize=(9,9))\n plt.figure()\n ax = plt.subplot(111)\n #GaussianBlur\n #img_raw = cv2.imread(merged_line.T, 1)\n #img = cv2.cvtColor(img_raw, cv2.COLOR_BGR2GRAY)\n\n #sns.heatmap(merged_line.T,cmap='gnuplot2',xticklabels=False,yticklabels=False,norm=LogNorm(vmin=merged_line.values.min(), vmax=merged_line.values.max()),ax=ax,cbar_ax=cbar_ax,cbar_kws={\"orientation\": \"horizontal\"})\n #sns.heatmap(merged_line.T,cmap='jet',xticklabels=False,yticklabels=False,norm=LogNorm(vmin=merged_line.values.min(), vmax=merged_line.values.max()),ax=ax,cbar=False)\n sns.heatmap(merged_line.T,cmap='jet',xticklabels=False,yticklabels=False,norm=LogNorm(),ax=ax,cbar=False,robust=True)\n #ax.set_title(imaging_element,color='white',fontsize=18, fontweight=\"bold\")\n #outname = filepath.split('.')[0]+'_'+imaging_element+'_mapping.pdf'\n #print('[ '+pycolor.GREEN+'Generate'+pycolor.END+' ] '+outname)\n #plt.savefig(outname)\n #print('[ '+pycolor.BLUE+'Success'+pycolor.END+' ] '+outname)\n plt.tight_layout()\n outname = filepath.split('.')[0]+'_'+imaging_element+'_mapping.png'\n print('[ '+pycolor.GREEN+'Generate'+pycolor.END+' ] '+outname)\n plt.savefig(outname)\n print('[ '+pycolor.BLUE+'Success'+pycolor.END+' ] '+outname)\n plt.style.use('default')\n plt.close('all')\n\n def iq3_imaging_rapid(self,filepath,standard_element,imaging_element,time_stamp):\n elements = pd.read_csv(filepath,skiprows=13,header=None)[0:1]\n names = [str(elements[i][0]).split('|')[0].replace(' ','') for i in range(len(elements.columns))]\n df = pd.read_csv(filepath,skiprows=15,names=names)\n #peak_analysis\n target = imaging_element\n merged_line = pd.DataFrame()\n linenum = 0\n for tsp in time_stamp:\n y = df.query('%d < Time < %f' % (tsp[0],tsp[1]))[target]\n merged_line['line'+str(linenum)] = pd.Series(list(y))\n linenum += 1\n\n merged_line = merged_line +1E5\n\n sns.set()\n plt.style.use('dark_background')\n sns.heatmap(merged_line.T,cmap='jet',xticklabels=False,yticklabels=False,norm=LogNorm(vmin=merged_line.values.min(), vmax=merged_line.values.max()),cbar=False)\n outname = filepath.split('.')[0]+'_'+imaging_element+'_mapping.png'\n print('[ '+pycolor.GREEN+'Generate'+pycolor.END+' ] '+outname)\n plt.tight_layout()\n plt.savefig(outname,facecolor=\"black\", edgecolor=\"black\")\n print('[ '+pycolor.BLUE+'Success'+pycolor.END+' ] '+outname)\n plt.close()\n\n def finishing(self):\n print('[ '+pycolor.YELLOW+'Moving '+pycolor.END+' ] *.xlsx > result')\n dirname = self.folder+'/result'\n if os.path.isdir(dirname) == False:os.mkdir(dirname)\n os.system('mv '+self.folder+'/*.xlsx '+self.folder+'/result')\n\n print('[ '+pycolor.YELLOW+'Moving '+pycolor.END+' ] *signal.pdf > signal')\n dirname = self.folder+'/signal'\n if os.path.isdir(dirname) == False:os.mkdir(dirname)\n os.system('mv '+self.folder+'/*signal.pdf '+self.folder+'/signal')\n\n #print('[ '+pycolor.GREEN+'Moving '+pycolor.END+' ] *mapping.pdf > mapping')\n #dirname = os.getcwd()+'/mapping'\n #if os.path.isdir(dirname) == False:os.mkdir(dirname)\n #os.system('mv *mapping.pdf mapping')\n\n print('[ '+pycolor.YELLOW+'Moving '+pycolor.END+' ] *mapping.png > mapping')\n dirname = self.folder+'/mapping'\n if os.path.isdir(dirname) == False:os.mkdir(dirname)\n os.system('mv '+self.folder+'/*mapping.png '+self.folder+'/mapping')\n\n def clustering(self,cnumber):\n if os.path.isdir(self.folder+'/mapping_group') == True:\n os.system('rm -rf '+self.folder+'/mapping_group')\n print('[ '+pycolor.YELLOW+'Remove'+pycolor.END+' ] data/mapping_group')\n\n for path in os.listdir(self.folder+'/mapping'):\n #path = path.split('.')[0]\n if os.path.isdir(self.folder+'/mapping_convert') == False:os.mkdir(self.folder+'/mapping_convert')\n #if os.path.isdir(self.folder+'/mapping_group') == False:os.mkdir(self.folder+'/mapping_group')\n img = Image.open(f'{self.folder}/mapping/{path}')\n img = img.convert('RGB')\n img_resize = img.resize((200, 200))\n path = path.split('.')[0]\n img_resize.save(f'{self.folder}/mapping_convert/{path}.jpg')\n feature = np.array([data.imread(f'{self.folder}/mapping_convert/{path}') for path in os.listdir(self.folder+'/mapping_convert')])\n feature = feature.reshape(len(feature), -1).astype(np.float64)\n model = KMeans(n_clusters=cnumber).fit(feature)\n labels = model.labels_\n for label, path in zip(labels, os.listdir(self.folder+'/mapping_convert')):\n os.makedirs(f'{self.folder}/mapping_group/{label}', exist_ok=True)\n shutil.copyfile(f\"{self.folder}/mapping/{path.replace('.jpg', '.png')}\", f\"{self.folder}/mapping_group/{label}/{path.replace('.jpg', '.png')}\")\n print('[ '+pycolor.BLUE+'Clustering'+pycolor.END+' ] '+ path + ' > ' + str(label))\n\n def multi_layer(self,element):\n with np.errstate(invalid='ignore'):\n outname = self.folder+'/'+element+'_3D.png'\n print('[ '+pycolor.GREEN+'Generate'+pycolor.END+' ] '+outname)\n datalist = sorted(glob.glob(self.folder+'/result/*'+element+'.xlsx'))\n input_book = pd.read_excel(datalist[0], index_col=0)\n vmin = input_book.values.min()\n vmax = input_book.values.max()\n plt.figure(figsize=(6*len(datalist),6))\n\n for i in range(len(datalist)):\n plt.subplot(1,len(datalist),i+1)\n input_book = pd.read_excel(datalist[i], index_col=0)\n sns.set()\n plt.style.use('dark_background')\n input_book = input_book + 5E3\n if self.frag == 0:\n self.vmin = vmax\n self.vmax = vmax\n self.frag = 1\n #sns.heatmap(input_book,cmap='jet',xticklabels=False,yticklabels=False,norm=LogNorm(vmin=self.vmin, vmax=self.vmax),cbar=False)\n sns.heatmap(input_book,cmap='jet',xticklabels=False,yticklabels=False,norm=LogNorm(),cbar=False,robust=True)\n print('[ '+pycolor.GREEN+'Sectioning'+pycolor.END+' ] '+datalist[i])\n plt.title(element+' (Layer='+datalist[i].split('/')[-1].split('_')[0]+')',color='white',fontsize=18, fontweight=\"bold\")\n plt.tight_layout()\n plt.savefig(outname)\n print('[ '+pycolor.BLUE+'Success'+pycolor.END+' ] '+outname)\n dirname = self.folder+'/multi_layer'\n if os.path.isdir(dirname) == False:os.mkdir(dirname)\n os.system('mv '+self.folder+'/*.png '+self.folder+'/multi_layer')\n print('[ '+pycolor.YELLOW+'Moving '+pycolor.END+' ] '+element+'_3D.png > multi_layer')\n plt.close('all')\n\n def normalize(self,element):\n datalist = glob.glob(self.folder+'/*.csv')\n #[self.get_element_list(filepath) for filepath in datalist]\n for target in self.elements:\n #outname = self.folder+'/mapping/'+target+'_per'+element+'.png'\n #print(outname)\n #print('[ '+pycolor.GREEN+'Generate'+pycolor.END+' ] '+outname)\n print('[ '+pycolor.GREEN+'Normalizing'+pycolor.END+' ] '+self.folder+'/mapping/*'+target+'_mapping.png')\n datalist_c = sorted(glob.glob(self.folder+'/result/*'+element+'.xlsx'))\n datalist_e = sorted(glob.glob(self.folder+'/result/*'+target+'.xlsx'))\n for i in range(len(datalist_c)):\n input_book_c = pd.read_excel(datalist_c[i], index_col=0)\n input_book_e = pd.read_excel(datalist_e[i], index_col=0)\n nimage = input_book_e/input_book_c\n outname = self.folder+'/mapping/'+datalist_c[i].split('/')[-1].split('_')[0]+'_'+target+'_per'+element+'.png'\n #outname = datalist_c[i].split('/')[-1].split('_')[0] + '_' + outname\n #print(outname)\n sns.heatmap(nimage,cmap='jet',xticklabels=False,yticklabels=False,cbar=True,robust=True)\n plt.tight_layout()\n plt.savefig(outname)\n plt.close('all')\n print('[ '+pycolor.BLUE+'Success'+pycolor.END+' ] '+outname)\n #plt.show()\n\n def finish_code(self):\n print('[ '+pycolor.YELLOW+'Shutdown'+pycolor.END+' ] Thank you for always using iQuant3D-terminal (ZEBRA).')\n\n def run(self,norm='13C'):\n with np.errstate(invalid='ignore'):\n datalist = glob.glob(self.folder+'/*.csv')\n #ts = self.time_stamp(datalist[0],self.standard_element)\n ts = self.time_stamp_zebra(datalist[0],self.standard_element)\n [[self.iq3_imaging(filepath,self.standard_element, ie, ts) for ie in self.get_element_list(filepath)] for filepath in datalist]\n self.finishing()\n self.normalize(norm)\n\n def run_rapid(self):\n datalist = glob.glob(self.folder+'/*.csv')\n #ts = self.time_stamp(datalist[0],self.standard_element)\n ts = self.time_stamp_zebra(datalist[0],self.standard_element)\n [[self.iq3_imaging_rapid(filepath,self.standard_element, ie, ts) for ie in self.get_element_list(filepath)] for filepath in datalist]\n self.finishing()\n\n def run_test(self):\n datalist = glob.glob(self.folder+'/*.csv')\n #ts = self.time_stamp(datalist[0],self.standard_element)\n ts = self.time_stamp_zebra(datalist[0],self.standard_element)\n #print(ts)\n self.iq3_imaging(datalist[0],self.standard_element, self.standard_element, ts)\n dirname = self.folder+'/test_scan'\n if os.path.isdir(dirname) == False:os.mkdir(dirname)\n os.system('mv '+self.folder+'/*.xlsx '+self.folder+'/test_scan')\n os.system('mv '+self.folder+'/*signal.pdf '+self.folder+'/test_scan')\n os.system('mv '+self.folder+'/*mapping.png '+self.folder+'/test_scan')\n print('[ '+pycolor.YELLOW+'Checking'+pycolor.END+' ] Please check /data/test_scan folder.')\n","sub_path":"iQuant3D-terminal/iquant3d_terminal.py","file_name":"iquant3d_terminal.py","file_ext":"py","file_size_in_byte":17360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"156826864","text":"from django.core.exceptions import PermissionDenied\nfrom django.shortcuts import get_object_or_404\nfrom django.contrib.auth.decorators import login_required\nfrom debates.models import Topic\nfrom functools import wraps\n\n\ndef mod_required(topic=False):\n def methodwrap(function):\n @wraps(function)\n def wrap(request, *args, **kwargs):\n user = request.user\n if (not callable(topic)) and topic:\n topica = get_object_or_404(Topic, args[0])\n ismod = user.ismodof(topica)\n else:\n ismod = user.ismod()\n if ismod:\n return function(request, *args, **kwargs)\n else:\n raise PermissionDenied\n return login_required(wrap)\n if callable(topic):\n return methodwrap(topic)\n else:\n return methodwrap\n\n\ndef gmod_required(function):\n @wraps(function)\n def wrap(request, *args, **kwargs):\n if request.user.isgmod():\n return function(request, *args, **kwargs)\n else:\n raise PermissionDenied\n return login_required(wrap)\n\n\ndef admin_required(function):\n @wraps(function)\n def wrap(request, *args, **kwargs):\n if request.user.isadmin():\n return function(request, *args, **kwargs)\n else:\n raise PermissionDenied\n return login_required(wrap)\n","sub_path":"accounts/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"176538974","text":"import dbtext, os\n\nimport sqlite3\n\nfrom customer_data_access import CustomerDataAccess\nfrom customer_sync import CustomerSync, ConflictException\nfrom model_objects import ExternalCustomer\n\n\ndef main():\n testdbname = \"ttdb_\" + str(os.getpid()) # some temporary name not to clash with other tests\n with dbtext.Sqlite3_DBText(testdbname) as db:\n # Arrange\n db.create(sqlfile=\"empty_db.sql\")\n\n # Act\n with open(\"incoming.json\", \"r\") as f:\n externalRecord = ExternalCustomer.from_json(f.read())\n\n conn = sqlite3.connect(f\"{testdbname}.db\")\n customerSync = CustomerSync(CustomerDataAccess(conn))\n\n try:\n customerSync.syncWithDataLayer(externalRecord)\n except ConflictException as e:\n print(f\"ConflictException: {e}\")\n\n # Assert\n db.dumptables(\"csync\", \"*\", usemaxcol=\"\")\n\nif __name__ == \"__main__\":\n main()","sub_path":"python/test/texttest_fixture.py","file_name":"texttest_fixture.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"30961908","text":"# coding: utf-8\n# Copyright 2017 Vauxoo (https://www.vauxoo.com) \n# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl).\n\"\"\"File to inherit res_partnet to get name & address to partner with your RUC\n\"\"\"\nimport json\nfrom urllib.parse import urlencode\nimport requests\n\n#SE AÑADIO _,\nfrom odoo import _, api, fields, models\n\nfrom odoo.exceptions import Warning\n\n\n\nclass ResPartner(models.Model):\n \"\"\"Inherit res.partner to get your name & address from the xml returned by\n SUNAT\n \"\"\"\n _inherit = 'res.partner'\n\n registration_name = fields.Char('Registration Name', size=128, index=True, )\n catalog_06_id = fields.Many2one('einvoice.catalog.06','Tipo Doc.', index=True)\n state = fields.Selection([('habido','Habido'),('nhabido','No Habido')],'State')\n\n\n @api.onchange('vat')\n def onchange_vat(self): \n if self.catalog_06_id and self.catalog_06_id.code == '1':\n self.conexion_validadorDNI()\n if self.catalog_06_id and self.catalog_06_id.code == '6':\n self.conexion_validadorRUC()\n\n#=============================================VALIDADOR DE DNI================================================================================\n @api.one\n def conexion_validadorDNI(self): \n if not self.vat:\n return False\n if self.catalog_06_id and self.catalog_06_id.code == '1':\n #Valida DNI\n if self.vat and len(self.vat) != 8:\n raise Warning('El Dni debe tener 8 caracteres')\n else:\n #DECLARO LA URL DE LA RENIEC PARA HACER LA CONSULTA POR EL DNI\n try :\n dni=self.vat\n urlapi = ('http://aplicaciones007.jne.gob.pe/srop_publico/Consulta/Afiliado/GetNombresCiudadano?DNI=').strip()\n urlapi = urlapi+self.vat\n response = requests.get(urlapi)\n result = response.text\n #ASIGNANDO LOS DATOS OBTENIDOS A LA VARIABLE NOMBRE \n if result: \n data = result.split('|')\n if data[0]:\n self.name = data[0]+' '+data[1]+' '+data[2]\n self.street=\"\"\n self.city=\"\" \n else: \n raise Warning(\"Dni incorrecto\")\n except:\n raise Warning('No se pudo procesar la consulta:'\n '\\n\\n* Problemas al conectar a Reniec\\n* Los Datos son incorrectos'\n '\\n\\n Por favor, verificar los datos. ')\n \n \n\n#==============================================VALIDADOR DE RUC===============================================================================\n @api.one\n def conexion_validadorRUC(self): \n if not self.vat:\n return False\n elif self.catalog_06_id and self.catalog_06_id.code == '6':\n # Valida RUC\n if self.vat and len(self.vat) != 11:\n raise Warning('El Ruc debe tener 11 caracteres')\n else:\n ruc=self.vat \n # param = self.env['ir.config_parameter']\n #DECLARACION DEL TOKEN\n params = {\n 'token': ('1230f56556b7d80106bc80fc2d6d545003bf18e34fd5d234dfb10bb8780ca1f7').strip(),\n }\n #DECLARACION DEL URL DE LA SUNAT\n api_url = ('http://sunat.vauxoo.com').strip()\n if not (all(params.values()) and api_url):\n return\n params.update({\n \n 'rfc': ruc \n \n })\n try:\n #SE UNE EN UNA VARIABLE EL URL Y EL TOKEN\n base_url = '%(url)s/rfc?%(params)s' % {\n 'url': api_url,\n 'params': urlencode(params)}\n client = requests.get(base_url)\n #OBTENIENDO DATOS DEL CLIENTE EN LA VARIABLE RESULT\n result = client.ok and json.loads(client.text) or {\n 'error_message': True} \n #MUESTRA UN MENSAJE POR PROBLEMAS DE CONEXION CON LA SUNAT\n except BaseException as e:\n result = {'error_message': e}\n if result.get('error_message'):\n raise Warning('No se pudo procesar la consulta:'\n '\\n\\n* Problemas al conectar a Sunat\\n* Los Datos son incorrectos'\n '\\n\\n Por favor, verificar los datos. ')\n #EL RESUL SE ALMACENA EN UN DICCIONARIO \n vals=[]\n vals.append(result)\n #SE REALIZA UN FOR LOS DATOS QUE TRAE PARA CADA CAMPO\n for r in vals: \n self.name=r['name']\n self.street=r['street']\n self.zip=r['city']\n \n @api.model\n def create_from_ui(self, partner):\n part_id = super(ResPartner, self).create_from_ui(partner)\n partner_id = self.browse(part_id)\n if partner_id.catalog_06_id.code == '1':\n partner_id.conexion_validadorDNI()\n if partner_id.catalog_06_id.code == '6':\n partner_id.conexion_validadorRUC()\n return part_id","sub_path":"odoope_ruc_validation/models/res_partner_new.py","file_name":"res_partner_new.py","file_ext":"py","file_size_in_byte":5571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"38692634","text":"#IQ Test: https://www.codewars.com/kata/552c028c030765286c00007d\n\ndef iq_test(numbers):\n numList = numbers.split()\n count = 0\n oddIndex = 0\n evenIndex = 0\n for num in numList:\n if(int(num) % 2 == 0):\n count += 1\n oddIndex = numList.index(num) + 1\n else:\n count -= 1\n evenIndex = numList.index(num) + 1\n return evenIndex if count > 0 else oddIndex\n","sub_path":"Python/IQTest.py","file_name":"IQTest.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"258477848","text":"import matplotlib as mpl\nimport matplotlib.pylab as plt\nimport numpy as np\n# pylab 은 matplotlib의 서브패키지로\n# matplotlib의 수치해석 시각화 며여을 그대로 사용할 수 있도록\n# API를 포장한 명령어 집합을 제공한다. \n\nplt.title(\"plot\") # 표 타이틀 설정 \nplt.plot([1,4,9,16],[1,2,3,4], c=\"b\",lw=5, ls=\"--\", marker = \"s\",ms =15, mec =\"g\", mew=5, mfc = \"r\")# 출력할 데이터 리스트 \n#plot c = 선색깔 , lw = 선 굵기, ls= 선 스타일, maker= 마커종류, ms = 마커크기, mec =마커 선 색깔, mew = 마커선 굵기, mfc= 마커매부 색깔 \n# 데ㅇ이터의 위치를 나타내는 기호를 마커라고 한다. 마커의 종류는 검색해 볼 것\n\nfont1 = {'family': 'NanumMyeongjo', 'size':24,'color': 'black'}\n# 각 축의 이름을 정할수도 있다. \n# 축의 이름에 대한 텍스트 설정을 정할때 위와같이 미리 딕셔너리에 넣어서 \n# fontdict이라는 파라미터로 추가한다. \nplt.xlabel(\"엑스 축\",fontdict = font1)\nplt.ylabel('와이 축', fontdict=font1)\nplt.show()# 시각화 명령을 실제 차트로 렌더링 \n\n\nX = np.linspace(-np.pi, np.pi, 256)\n# x 축 범위설정 -원주율 ~ 원주율을 256 단계로 쪼개서 설정 \nc = np.cos(X) # x의 범위에 대한 코사인값 \nplt.title(\"x축과 y축의 tick label 설정\",fontdict=font1)\nplt.plot(X,c)\nplt.xticks([-np.pi,-np.pi / 2,0, np.pi / 2, np.pi],\n [r'$-\\pi', r'$-\\pi/2$',r'$0$',r'$+\\pi/2$',r'$+\\pi$'])\n # $$ 사이에 latex 표기법에 의해 수식도 넣을 수 있다. \n# x축상에서 위치표시지점\nplt.yticks([-0.2,0,0.5])\n# 위에서 지정한x틱 y틱 교차지점에 그래프에 표시된다.\nplt.show()\n# 그래프 위에 그리드 표시 \nt = np.arange(0.,5.,0.2)# 시작, 끝, 스텝 \nplt.title(\"라인 플롯에서 여러개의 선 그리기\",fontdict=font1)\nplt.plot(t,t,'r--', t,0.5*t**2,'bs:', t,0.2*t**3,'g^-')\n# x축, y축 t , 붉은 점선으로 표시, x축 t y축 t제곱*0.5 파란\nplt.show()\n\nX = np.arange(2)\nY = np.random.randint(0,20,20)\nS = np.abs(np.random.randn(20))*100\nC = np.random.randint(0,20,20)\n\nscatter = plt.scatter(X,Y,s=S,c=C, label='A')\nplt.xlim(X[0]-1,X[-1]+1)\nplt.ylim(np.min(Y-1),np.max(Y+1))\n\nplt.title('scatter',pad=10)\nplt.xlabel('X axis',labelpad=10)\nplt.ylabel('Y axis',labelpad=10)\nplt.xticks(np.linspace)\n","sub_path":"1.Python_basic/matplot_practices.py","file_name":"matplot_practices.py","file_ext":"py","file_size_in_byte":2358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"527568657","text":"#!/usr/bin/python\r\n\r\n\r\nimport collections\r\nimport getopt\r\nimport sys\r\nimport traceback\r\n\r\nfrom command_management.commander import Commander\r\nfrom utils.python.logger import report_error, throw_exception, LoggerException, report_message_with_datetime\r\n\r\n\r\n# = = = = = = = = = = = = = = = = = =\r\n\r\ndef report_usage():\r\n\tthrow_exception('Usage','\\n' +\r\n 'python run_command.py -m -n \\n' +\r\n 'python run_command.py --command --config ')\r\n\r\n\r\n# = = = = = = = = = = = = = = = = = =\r\n\r\ndef parse_args(argv):\r\n try:\r\n command_file = ''\r\n config_file = ''\r\n opts, args = getopt.getopt(argv, 'm:n:', ['command=', 'config='])\r\n for opt, arg in opts:\r\n if opt in (\"-m\", \"--command\"):\r\n command_file = arg\r\n elif opt in (\"-n\", \"--config\"):\r\n config_file = arg\r\n else:\r\n report_usage()\r\n if command_file == '' or config_file == '':\r\n report_usage()\r\n args = collections.namedtuple('Args', ['command_file', 'config_file'])\r\n return args(command_file, config_file)\r\n except getopt.GetoptError:\r\n report_usage()\r\n\r\n\r\n# = = = = = = = = = = = = = = = = = =\r\n\r\ndef main(argv):\r\n try:\r\n args = parse_args(argv)\r\n commander = Commander()\r\n commander.init_commander(args.command_file, args.config_file)\r\n commander.start_procedure('main')\r\n except LoggerException as logger_exception:\r\n report_error('main', traceback.format_exc() + logger_exception.error_message)\r\n except:\r\n report_error('main', 'unexpected error!!!\\n' + traceback.format_exc())\r\n else:\r\n report_message_with_datetime('main', 'Congrats, all modules are finished with no error.')\r\n\r\n# = = = = = = = = = = = = = = = = = =\r\n\r\nif __name__ == '__main__':\r\n main(sys.argv[1:])\r\n","sub_path":"src/run_commands.py","file_name":"run_commands.py","file_ext":"py","file_size_in_byte":1922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"37789049","text":"from django.conf.urls import patterns, url\nfrom vgicollector import views\n\nurlpatterns = patterns('',\n url(r'^register/$', views.register, name='register'),\n url(r'^exist/$', views.exist),\n # url(r'^register/$', views.register, name='register'),\n url(r'^login/$', views.login),\n url(r'^test/$', views.test),\n url(r'^collect/$', views.vgi_collection),\n # url(r'^addcontact/$', views.add_contact),\n # url(r'^sendvoicemsg/$',views.send_voice_msg), \n # url(r'^receivevoicemsg/$',views.retrieve_oldest_voice_msg), \n # url(r'^accounts/logout/$', logout),\n # url(r'^accounts/profile/$',views.user_profile),\n # url(r'^accounts/modify/nickname/$',views.modify_nickname),\n # url(r'^accounts/modify/portrait/$',views.modify_portrait),\n # url(r'^accounts/modify/facebookid/$',views.modify_facebookID),\n # url(r'^accounts/modify/devicetoken/$',views.modify_device_token),\n # url(r'^accounts/findfriends/via/phone/$',views.findfriends_via_phone),\n # url(r'^accounts/findfriends/via/facebookid/$',views.findfriends_via_facebookid),\n\n \n)","sub_path":"vgicollector/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"209955876","text":"\"\"\"\n下面的文件将会从csv文件中读取读取短信与电话记录,\n你将在以后的课程中了解更多有关读取文件的知识。\n\"\"\"\nimport csv\nwith open('texts.csv', 'r') as f:\n reader = csv.reader(f)\n texts = list(reader)\n\nwith open('calls.csv', 'r') as f:\n reader = csv.reader(f)\n calls = list(reader)\n\n\n\"\"\"\n任务0:\n?通话记录最后一条记录是什么短信记录的第一条记录是什么?\n输出信息:\n\"First record of texts, texts at time