diff --git "a/2240.jsonl" "b/2240.jsonl" new file mode 100644--- /dev/null +++ "b/2240.jsonl" @@ -0,0 +1,758 @@ +{"seq_id":"584367276","text":"import sys\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtWidgets import QWidget,QGridLayout,QLCDNumber,QSlider,QApplication,QToolTip,QPushButton\n\nclass EventUi(QWidget):\n def __init__(self):\n super().__init__()\n self.initUI()\n def initUI(self):\n lcd = QLCDNumber(self)\n sld = QSlider(Qt.Horizontal,self)\n sld.setToolTip(\"左右滑动试下\")\n btn = QPushButton(\"点我有惊喜\")\n grid = QGridLayout()\n\n grid.addWidget(lcd)\n grid.addWidget(sld)\n grid.addWidget(btn)\n\n self.setLayout(grid)\n sld.valueChanged.connect(lcd.display)\n btn.clicked.connect(lcd.hide)\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n event = EventUi()\n event.setGeometry(300,300,400,300)\n event.show()\n\n sys.exit(app.exec_())","sub_path":"event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"444469342","text":"import json \nimport numpy as np\nimport argparse\n\nimport sys\nsys.path.insert(0,'../..')\n\nfrom utils import general_multiplication_tensor\n\ndef mzn_data_generator(N,M,P,R,dest):\n data_dict = {}\n data_dict['N'] = N\n data_dict['M'] = M\n data_dict['P'] = P\n data_dict['R'] = R\n T = general_multiplication_tensor(N,M,P)\n data_dict['Tlist'] = T.tolist()\n\n file_name = f'fmm_{N}_{M}_{P}_{R}.json'\n file_location = dest+'/'+file_name\n\n with open(file_location, \"w\") as outfile:\n json.dump(data_dict, outfile)\n\nif __name__ == \"__main__\":\n # Parse command line arguments\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"-N\", \"--N\", default=1, type=int, help=\"N\")\n parser.add_argument(\"-M\", \"--M\", default=1, type=int, help=\"M\")\n parser.add_argument(\"-P\", \"--P\", default=1, type=int, help=\"P\")\n parser.add_argument(\"-R\", \"--R\", default=1, type=int, help=\"R\")\n parser.add_argument(\"-dest\", \"--dest\", help=\"datafile destination location\")\n args = vars(parser.parse_args())\n\n mzn_data_generator(args['N'],args['M'],args['P'],args['R'], args['dest'])","sub_path":"src/model/mzn/mzn_data_generator.py","file_name":"mzn_data_generator.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"613852724","text":"class User(object):\n\t__instance=None\n\tdef __init__(self,name):\n\t\tself.name=name\n\n\tdef __new__(cls,name):\n\t\tif not cls.__instance: #保证object.__new__(cls)方法只会调用一次\n\t\t\tcls.__instance=object.__new__(cls) #单例模式2\n\t\treturn cls.__instance\n\nu1=User('job')\nu2=User('jack')\nprint(u1==u2)\nprint('u1的内存地址:%s,u2的内存地址:%s'%(id(u1),id(u2)))\n\n","sub_path":"python/Day6/单例模式2.py","file_name":"单例模式2.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"603333337","text":"#!/usr/bin/env python3\n\nimport os, sys, itertools\n\nheadword_marker='Homonym Number'\nouputfolder=\"ByHeadword\"\n# filename='nzdc_export_A.txt'\n\n# Count the arguments\narguments = len(sys.argv) - 1\n# Output argument-wise\nposition = 1\nwhile (arguments >= position):\n print (\"Parameter %i: %s\" % (position, sys.argv[position]))\n \n filename = sys.argv[position]\n dictionary_letter=filename[-5]\n position = position + 1\n\n #make sure directory exists\n letter_output_folder=ouputfolder+'/'+dictionary_letter\n if not os.path.exists(letter_output_folder):\n os.makedirs(letter_output_folder)\n\n headword_loc=[]\n\n linecount=0\n with open(filename) as myFile:\n for num, line in enumerate(myFile,1):\n linecount=linecount+1\n if headword_marker in line:\n headword_loc.append(num)\n\n # #print headwords\n # for loc in headword_loc:\n # print(num)\n # headword = linecache.getline(filename,loc-1)\n # print(headword)\n\n #correct_headword loc\n headword_loc = [x-2 for x in headword_loc]\n headwords=len(headword_loc)\n headword_loc.append(linecount)\n\n multiple_headword_count=0\n\n # print first headword section\n for i in range(headwords-1):\n #get the headword text\n headword_text=[]\n with open(filename,'r') as f:\n for line in itertools.islice(f,headword_loc[i],headword_loc[i+1]-1):\n headword_text.append(line)\n headword = headword_text[0].replace('/',' OR ')\n headword = \" \".join(headword.split())\n headword = headword.lower()\n outputfilename=letter_output_folder+'/'+headword\n\n #handle multiple same named headwords\n if os.path.exists(outputfilename):\n multiple_headword_count=multiple_headword_count+1\n outputfilename = outputfilename + \" entry \" +str(multiple_headword_count)\n else:\n multiple_headword_count=0\n with open(outputfilename,'x') as f:\n for line in headword_text:\n f.write(line)\n print(\"wrote: \",outputfilename)\n \n\n\n","sub_path":"db files Nov 2015/splitter.py","file_name":"splitter.py","file_ext":"py","file_size_in_byte":2168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"355291439","text":"# -*- coding: utf-8 -*- \nimport tensorflow as tf\nfrom sklearn.datasets import load_iris\nimport numpy as np\nimport pandas as pd\n \n\niris=load_iris()\niris_data=iris.data\niris_target=iris.target\n\niris_target1=pd.get_dummies(iris_target).values\nprint(iris_data.shape)\n \nX=iris_data\nprint(X.shape)\n\nx=tf.placeholder(dtype=tf.float32,shape=[None,4],name=\"input\")\ny=tf.placeholder(dtype=tf.float32,shape=[None,3],name=\"output\") #三分类\n\nw=tf.get_variable(\"weight\",shape=[4,3],dtype=tf.float32,initializer=tf.truncated_normal_initializer(stddev=0.1))\nbais=tf.get_variable(\"bais\",shape=[3],dtype=tf.float32,initializer=tf.constant_initializer(0))\ny_out=tf.nn.bias_add(tf.matmul(x,w),bais)\n\nloss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=y_out))\n \naccuracy=tf.reduce_mean(tf.cast(tf.equal(tf.arg_max(y,1),tf.arg_max(y_out,1)),tf.float32))\ntrain_step=tf.train.AdamOptimizer().minimize(loss)\n\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for i in range(3001):\n sess.run(train_step,feed_dict={x:X,y:iris_target1})\n if i%500==0:\n accuracy_print=sess.run(accuracy,feed_dict={x:X,y:iris_target1})\n print(accuracy_print)\n\n ","sub_path":"Android_Tensorflow/testTF/TF_sotmax.py","file_name":"TF_sotmax.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"352142983","text":"import json\n\nfrom django.db import models\n# Create your models here.\nfrom django.db import models\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.utils.safestring import mark_safe\nfrom model_utils.models import TimeStampedModel\nfrom tworaven_apps.utils.json_helper import format_json_for_admin\nfrom tworaven_apps.ta2_interfaces.static_vals import STATUS_VAL_FAILED_PRECONDITION\nfrom tworaven_apps.utils.view_helper import get_session_key\n\n# Create your models here.\nSERVICE_TYPE_ROOK = 'ROOK SERVICE'\nSERVICE_TYPE_D3M = 'D3M SERVICE'\nSERVICE_TYPE_NOT_SPECIFIED = 'service not specified'\nSERVICE_TYPES = (SERVICE_TYPE_ROOK,\n SERVICE_TYPE_D3M,\n SERVICE_TYPE_NOT_SPECIFIED)\nSERVICE_TYPE_CHOICES = ((x, x) for x in SERVICE_TYPES)\n\nclass ServiceCallEntry(TimeStampedModel):\n\n service_type = models.CharField(choices=SERVICE_TYPE_CHOICES,\n max_length=100)\n\n call_type = models.CharField(max_length=255,\n help_text='Name of D3M call or zeligapp')\n\n session_id = models.CharField(max_length=255,\n blank=True,\n db_index=True,\n help_text='Used for grouping calls together')\n\n user = models.ForeignKey(settings.AUTH_USER_MODEL,\n on_delete=models.CASCADE,\n blank=True,\n null=True)\n\n outgoing_url = models.URLField(blank=True)\n request_msg = models.TextField(blank=True)\n\n response_msg = models.TextField(blank=True)\n status_code = models.CharField(max_length=50, blank=True)\n success = models.BooleanField(default=False)\n\n def __str__(self):\n return '%s - (%s)' % (self.call_type, self.created)\n\n class Meta:\n ordering = ('-created',)\n verbose_name = 'Service Call Request'\n verbose_name_plural = 'Service Call Requests'\n\n def request_msg_json(self):\n return format_json_for_admin(self.request_msg)\n\n def response_msg_json(self):\n return format_json_for_admin(self.response_msg)\n\n def add_error_message(self, msg, status_code='n/a'):\n \"\"\"Shortcut to populate fields for a failed response\"\"\"\n self.success = False\n self.response_msg = msg\n self.status_code = status_code\n\n def add_success_message(self, msg, status_code=200):\n \"\"\"Shortcut to populate fields for a successful response\"\"\"\n self.success = True\n self.response_msg = msg\n self.status_code = status_code\n\n @staticmethod\n def record_d3m_call():\n return settings.RECORD_D3M_SERVICE_ROUTING\n\n @staticmethod\n def get_rook_entry(request_obj, call_type, outgoing_url, request_msg):\n \"\"\"Init ServiceCallEntry object for a ROOK call\"\"\"\n assert request_obj is not None,\\\n \"request_obj cannot be None\"\n session_id = get_session_key(request_obj)\n\n user = None\n if request_obj.user.is_authenticated:\n user = request_obj.user\n\n return ServiceCallEntry(call_type=call_type,\n service_type=SERVICE_TYPE_ROOK,\n outgoing_url=outgoing_url,\n session_id=session_id,\n user=user,\n request_msg=request_msg)\n\n @staticmethod\n def get_dm3_entry(request_obj, call_type, request_msg):\n \"\"\"Init ServiceCallEntry object for a D3M call\"\"\"\n assert request_obj is not None,\\\n \"request_obj cannot be None\"\n\n session_id = get_session_key(request_obj)\n\n user = None\n if request_obj.user.is_authenticated:\n user = request_obj.user\n\n if settings.TA2_STATIC_TEST_MODE:\n outgoing_url = '(no TA2, static test mode)'\n else:\n outgoing_url = settings.TA2_TEST_SERVER_URL\n\n return ServiceCallEntry(call_type=call_type,\n service_type=SERVICE_TYPE_D3M,\n outgoing_url=outgoing_url,\n session_id=session_id,\n user=user,\n request_msg=request_msg)\n\n def save_d3m_response(self, json_dict):\n \"\"\"Save the gRPC log response\"\"\"\n if not json_dict:\n err_dict = dict(success=False, message='json response was none')\n self.add_error_message(json.dumps(err_dict), '(n/a)')\n\n if self.is_failed_failed_response(json_dict):\n self.add_error_message(json.dumps(json_dict), '(n/a)')\n else:\n self.add_success_message(json.dumps(json_dict), '(n/a)')\n self.save()\n\n\n def is_failed_failed_response(self, json_dict):\n \"\"\"Does this JSON indicate failure?\"\"\"\n if json_dict is None:\n return True\n\n if str(json_dict).find(STATUS_VAL_FAILED_PRECONDITION) > -1:\n return True\n\n if not hasattr(json_dict, 'items'):\n \"\"\"There is a valid gRPC response which is a list\"\"\"\n return False\n\n if 'success' in json_dict:\n if json_dict['success'] is False:\n return True\n\n # check for something like this:\n # '\"responseInfo\": { \"status\": { \"code\": \"FAILED_PRECONDITION\",'\n #\n if 'responseInfo' in json_dict:\n if 'status' in json_dict['responseInfo']:\n if 'code' in json_dict['responseInfo']['status']:\n if json_dict['responseInfo']['status']['code'] != 'OK':\n return True\n\n # check for something like this:\n # { \"status\": { \"code\": \"FAILED_PRECONDITION\",'\n #\n if 'status' in json_dict:\n if 'code' in json_dict['status']:\n if json_dict['status']['code'] != 'OK':\n return True\n\n\n return False\n","sub_path":"tworaven_apps/call_captures/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"326824937","text":"import subprocess \n\nfilename = \"cricket.txt\"\nprint('host', 'IP', 'root_pwd')\nlines = [line.rstrip('\\n') for line in open(filename)]\nfor line in lines:\n a = line.split()\n # output = call([\"slcli\", \"vs\", \"credentials\", a[0]])\n result = subprocess.run([\"slcli\", \"vs\", \"credentials\", a[0]], stdout=subprocess.PIPE)\n res = result.stdout.decode('utf-8').split()\n print(a[1], a[2], str(res[1]))\n\n","sub_path":"python code/get_credentials.py","file_name":"get_credentials.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"431220090","text":"#!/usr/bin/python\n\nimport argparse\nimport subprocess\nimport yaml\nimport json\n\ndef docker_run(config):\n for db, properties in config.items():\n if properties.get('runIntegrationTests', False):\n if 'dockerImage' in properties:\n cmd = \"docker run -d -p {port_map} --name {name} {image}:{version}\".format(\n port_map = properties['dockerPortMapping'],\n name = properties['dockerName'],\n image = properties['dockerImage'],\n version = properties['dockerImageVersion'])\n print(cmd)\n run(cmd)\n elif 'dockerName' in properties:\n cmd = \"docker start {name}\".format(name = properties['dockerName'])\n print(cmd)\n run(cmd)\n\ndef docker_rm(config):\n for db, properties in config.items():\n if properties.get('runIntegrationTests', False):\n if 'dockerImage' in properties:\n cmd = \"docker rm -f {name}\".format(name = properties['dockerName'])\n print(cmd)\n run(cmd)\n elif 'dockerName' in properties:\n cmd = \"docker stop {name}\".format(name = properties['dockerName'])\n print(cmd)\n run(cmd)\n\ndef run(cmd):\n try: \n p = subprocess.Popen(\n cmd,\n stdout = subprocess.PIPE, \n stderr = subprocess.STDOUT,\n close_fds = True,\n shell = True)\n out, err = p.communicate()\n if out:\n if (p.returncode != 0):\n print(out)\n return out\n if err:\n print(err)\n finally:\n if p is not None:\n try: p.kill()\n except: pass\n\ndef replace_hosts_in(config):\n for db, properties in config.items():\n if properties.get('runIntegrationTests', False):\n if 'dockerName' in properties:\n container_ip = get_ip_for(properties['dockerName'])\n conn_string_with_ip = properties['dockerConnectionString'].replace(\n 'DBHOST',container_ip)\n properties['dockerConnectionString'] = conn_string_with_ip\n return yaml.dump(config, default_flow_style=False)\n\ndef get_ip_for(docker_name):\n cmd = \"docker network inspect bridge\"\n docker_bridge_json = run(cmd)\n bridge_info = json.loads(docker_bridge_json)\n containers = bridge_info[0]['Containers']\n for id, properties in containers.items():\n if properties['Name'] == docker_name:\n return str(properties['IPv4Address'].split('/')[0])\n\ndef read_config(config_file):\n with open(config_file) as config:\n return yaml.load(config)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"config\", help=\"YAML integration test config file\")\n parser.add_argument(\"--run\", help=\"run containers\", action=\"store_true\")\n parser.add_argument(\"--rm\", help=\"remove containers\", action=\"store_true\")\n parser.add_argument(\"--add_docker_hosts\", help=\"return config file with fixed connection strings\", action=\"store_true\")\n args = parser.parse_args()\n\n yaml_config = read_config(args.config)\n\n if args.run:\n docker_run(yaml_config)\n elif args.rm:\n docker_rm(yaml_config)\n elif args.add_docker_hosts:\n print(replace_hosts_in(yaml_config))\n","sub_path":"jdbc-adapter/integration-test-data/socker.py","file_name":"socker.py","file_ext":"py","file_size_in_byte":3393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"333230569","text":"import pytest\nimport nway.nway_matching as nway\nimport os\nfrom jinja2 import Template\nimport json\nimport numpy as np\nimport copy\nimport contextlib\nimport PIL.Image\nfrom pathlib import Path\n\n\nTEST_FILE_DIR = os.path.join(\n os.path.dirname(os.path.abspath(__file__)),\n 'test_files')\n\n\n@pytest.fixture(scope='module')\ndef table_to_prune():\n fname = os.path.join(\n TEST_FILE_DIR,\n \"test0\",\n \"table_before_pruning.json\")\n with open(fname, 'r') as f:\n j = json.load(f)\n table = np.array(j['table'])\n score = np.array(j['score'])\n yield (table, score)\n\n\n@pytest.fixture(scope='function')\ndef input_file(tmpdir):\n thistest = os.path.join(TEST_FILE_DIR, 'test0')\n myinput = os.path.join(thistest, 'input.json')\n with open(myinput, 'r') as f:\n template = Template(json.dumps(json.load(f)))\n output_dir = str(tmpdir.mkdir(\"nway_test\"))\n rendered = json.loads(\n template.render(\n output_dir=output_dir,\n test_files_dir=str(thistest)))\n rendered['log_level'] = \"DEBUG\"\n yield rendered\n\n\n@pytest.mark.parametrize(\n \"sizes, context\",\n [\n (\n [(100, 100), (100, 100), (100, 100)],\n contextlib.nullcontext()),\n (\n [(100, 100), (100, 99), (100, 100)],\n pytest.warns(\n UserWarning,\n match=r\"not all experiments have the same size.*\"))])\ndef test_nway_size_mismatch_exception(tmpdir, sizes, context):\n impaths = []\n for i, size in enumerate(sizes):\n impath = tmpdir / f\"test_{i}.png\"\n with PIL.Image.new(size=size, mode='L') as im:\n im.save(str(impath))\n impaths.append(impath)\n with context:\n nway.check_image_sizes(impaths)\n\n\n@pytest.fixture\ndef sub_experiments(tmpdir, request):\n exps = []\n for i in range(3):\n subdir = tmpdir / f\"{i}\"\n subdir.mkdir()\n avg_path = subdir / request.param.get(\"avg_fname\")\n with open(avg_path, \"w\") as fp:\n fp.write(\"content\")\n exps.append(\n {'ophys_average_intensity_projection_image': str(avg_path)})\n if request.param.get(\"max_exists\"):\n max_path = subdir / \"maxInt_a13a.png\"\n with open(max_path, \"w\") as fp:\n fp.write(\"content\")\n yield exps\n\n\n@pytest.mark.parametrize(\n \"sub_experiments, context\",\n [\n (\n {\n \"avg_fname\": \"avgInt_a1X.png\",\n \"max_exists\": True},\n contextlib.nullcontext()),\n (\n {\n \"avg_fname\": \"XYZ_avgInt_a1X.png\",\n \"max_exists\": True},\n pytest.raises(\n nway.NwayException,\n match=r\"flag 'substitute_max_for_avg' only.*\")),\n (\n {\n \"avg_fname\": \"avgInt_a1X.png\",\n \"max_exists\": False},\n pytest.raises(\n nway.NwayException,\n match=r\"attempted to substitute .*\")),\n ],\n indirect=['sub_experiments'])\ndef test_substitute_max_projection(sub_experiments, context):\n with context:\n exps = nway.substitute_max_projection(sub_experiments)\n for exp in exps:\n p = Path(exp['ophys_average_intensity_projection_image'])\n assert p.name == \"maxInt_a13a.png\"\n\n\ndef test_nway_exception(tmpdir, input_file):\n args = copy.deepcopy(input_file)\n args['experiment_containers']['ophys_experiments'] = \\\n [args['experiment_containers']['ophys_experiments'][0]]\n output_dir = str(tmpdir.mkdir(\"nway_exception\"))\n args['output_directory'] = output_dir\n args['assignment_solver'] = 'Blossom'\n\n nwmatch = nway.NwayMatching(input_data=args, args=[])\n with pytest.raises(nway.NwayException):\n nwmatch.run()\n\n\n@pytest.mark.parametrize('greduce', ['keepmin', 'popmax'])\ndef test_default_nway(tmpdir, input_file, greduce):\n args = copy.deepcopy(input_file)\n output_dir = str(tmpdir.mkdir(\"nway_default\"))\n args['output_json'] = os.path.join(output_dir, 'output.json')\n args['pruning_method'] = greduce\n args['include_original'] = True\n n = nway.NwayMatching(input_data=args, args=[])\n\n assert n.args['assignment_solver'] == 'Blossom'\n\n n.run()\n\n with open(n.args['output_json'], 'r') as f:\n oj = json.load(f)\n\n assert len(oj['nway_matches']) > 650\n nave = np.array([len(i) for i in oj['nway_matches']]).mean()\n assert nave > 1.5\n\n nexp = len(args['experiment_containers']['ophys_experiments'])\n\n npairs = int(nexp * (nexp - 1) / 2)\n\n assert npairs == len(oj['pairwise_results'])\n","sub_path":"tests/test_nway.py","file_name":"test_nway.py","file_ext":"py","file_size_in_byte":4784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"43965933","text":"from __future__ import print_function\r\nimport os\r\nimport pandas as pd\r\nimport json\r\nimport sys\r\n\r\ndef eprint(*args, **kwargs):\r\n print(*args, file=sys.stderr, **kwargs)\r\n\r\ninput_file = os.path.join(os.getcwd(),'Results', 'features', 'removed_filtered_all_features_v4.out')\r\nfeatures_folder = os.path.join(os.getcwd(),'Results', 'features', 'monthly_features_full2')\r\n\r\ndf = pd.read_csv(input_file, encoding='latin-1')\r\n#print(set(df['file']))\r\nfiles = set(df['file'])\r\n\r\ndates = []\r\n\r\nfor filename in files:\r\n year, month, _ = filename.split('_')\r\n dates.append((int(year), int(month)))\r\n\r\n#print(sorted(dates))\r\n\r\ndates = sorted(dates)\r\n\r\n\r\n\r\ndic = {}\r\nfor yr_,m_ in dates:\r\n filename = str(yr_)+'_'+str(m_)+'_spam_features.out'\r\n print(filename)\r\n tmp_df = pd.read_csv(os.path.join(features_folder, filename))\r\n\r\n for (yr, m) in dates:\r\n if int(m) not in [1,6]:\r\n continue \r\n print(yr,m)\r\n filename = str(yr) + '_' + str(m) + '_spam.csv'\r\n #print(*list(df[df['file']==filename]['feature']), sep = \"\\n\")\r\n top_features = list(df[df['file']==filename]['feature'])\r\n\r\n #print(f)\r\n #print(tmp_df[tmp_df['feature'].isin(top_features)]['feature'])\r\n #print('-------')\r\n #print(set(top_features) - set(tmp_df[tmp_df['feature'].isin(top_features)]['feature']))\r\n #non_existant_features = set(top_features) - set(tmp_df[tmp_df['feature'].isin(top_features)]['feature'])\r\n if str((yr,m)) not in dic:\r\n dic[str((yr,m))] = {}\r\n for feature in top_features:\r\n if feature not in dic[str((yr,m))]:\r\n dic[str((yr,m))][feature] = []\r\n #print(tmp_df[tmp_df['feature'] == feature])\r\n tmp = tmp_df[tmp_df['feature'] == feature]\r\n #print(tmp.shape[0])\r\n weight = 0 if (tmp.shape[0]==0) else tmp.iloc[0]['weight']\r\n dic[str((yr,m))][feature].append(weight)\r\n #break\r\n #eprint(dic)\r\n #break\r\nprint(dic)\r\n\r\njson.dump(dic, open(os.path.join(os.getcwd(),'Results', 'features', 'graph_input_filtered_removed_skipped.json'),'w'))","sub_path":"analyse_monthly_features_spam_skipped.py","file_name":"analyse_monthly_features_spam_skipped.py","file_ext":"py","file_size_in_byte":2136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"625254274","text":"from .linear_model import LinearModel\nimport numpy as np\n\nclass DifferentiableLinearModel(LinearModel):\n def __init__(self, theta0, theta1):\n super().__init__(theta0, theta1)\n\n def improve_theta1(self, dataset):\n first_error_deriv_wrt_theta1 = np.sum(\n 2 * (dataset.y - self(dataset.x)) * (-dataset.x)\n )\n second_error_derivative_wrt_theta1 = np.sum(\n 2 * (-dataset.x) * (-dataset.x)\n )\n\n delta = -(\n first_error_deriv_wrt_theta1\n / second_error_derivative_wrt_theta1\n )\n\n self.theta1 += delta\n","sub_path":"00_linear_least_squares/lib/differentiable_linear_model.py","file_name":"differentiable_linear_model.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"106576786","text":"# coding: utf-8\n\"\"\"\n flask_wtf.csrf\n ~~~~~~~~~~~~~~\n\n CSRF protection for Flask.\n\n :copyright: (c) 2013 by Hsiaoming Yang.\n\"\"\"\n\nimport hashlib\nimport os\nimport warnings\nfrom functools import wraps\n\nfrom flask import Blueprint, current_app, request, session\nfrom itsdangerous import BadData, URLSafeTimedSerializer\nfrom werkzeug.exceptions import BadRequest\nfrom werkzeug.security import safe_str_cmp\n\nfrom ._compat import FlaskWTFDeprecationWarning, string_types, urlparse\n\n__all__ = ('generate_csrf', 'validate_csrf', 'CsrfProtect')\n\n\ndef _get_secret_key(secret_key=None):\n if not secret_key:\n secret_key = current_app.config.get('WTF_CSRF_SECRET_KEY', current_app.secret_key)\n\n if not secret_key:\n raise Exception('Must provide secret_key to use CSRF.')\n\n return secret_key\n\n\ndef generate_csrf(secret_key=None, token_key='csrf_token'):\n \"\"\"Generate a CSRF token. The token is cached for a request, so multiple\n calls to this function will generate the same token.\n\n During testing, it might be useful to access the signed token in\n ``request.csrf_token`` and the raw token in ``session['csrf_token']``.\n\n :param secret_key: Used to securely sign the token. Default is\n ``WTF_CSRF_SECRET_KEY`` or ``SECRET_KEY``.\n :param token_key: key where token is stored in session for comparision.\n \"\"\"\n\n if not getattr(request, token_key, None):\n if token_key not in session:\n session[token_key] = hashlib.sha1(os.urandom(64)).hexdigest()\n\n s = URLSafeTimedSerializer(_get_secret_key(secret_key), salt='wtf-csrf-token')\n setattr(request, token_key, s.dumps(session[token_key]))\n\n return getattr(request, token_key)\n\n\ndef validate_csrf(data, secret_key=None, time_limit=None, token_key='csrf_token'):\n \"\"\"Check if the given data is a valid CSRF token. This compares the given\n signed token to the one stored in the session.\n\n :param data: The signed CSRF token to be checked.\n :param secret_key: Used to securely sign the token. Default is\n ``WTF_CSRF_SECRET_KEY`` or ``SECRET_KEY``.\n :param time_limit: Number of seconds that the token is valid. Default is\n ``WTF_CSRF_TIME_LIMIT`` or 3600 seconds (60 minutes).\n :param token_key: key where token is stored in session for comparision.\n \"\"\"\n\n if not data or token_key not in session:\n return False\n\n s = URLSafeTimedSerializer(_get_secret_key(secret_key), salt='wtf-csrf-token')\n\n if time_limit is None:\n time_limit = current_app.config.get('WTF_CSRF_TIME_LIMIT', 3600)\n\n try:\n token = s.loads(data, max_age=time_limit)\n except BadData:\n return False\n\n return safe_str_cmp(session[token_key], token)\n\n\nclass CsrfProtect(object):\n \"\"\"Enable CSRF protection globally for a Flask app.\n\n ::\n\n app = Flask(__name__)\n csrf = CsrfProtect(app)\n\n Checks the ``csrf_token`` field sent with forms, or the ``X-CSRFToken``\n header sent with JavaScript requests. Render the token in templates using\n ``{{ csrf_token() }}``.\n\n See the :ref:`csrf` documentation.\n \"\"\"\n\n def __init__(self, app=None):\n self._exempt_views = set()\n self._exempt_blueprints = set()\n\n if app:\n self.init_app(app)\n\n def init_app(self, app):\n app.config.setdefault('WTF_CSRF_ENABLED', True)\n app.config.setdefault('WTF_CSRF_CHECK_DEFAULT', True)\n app.config['WTF_CSRF_METHODS'] = set(app.config.get(\n 'WTF_CSRF_METHODS', ['POST', 'PUT', 'PATCH', 'DELETE']\n ))\n app.config.setdefault('WTF_CSRF_HEADERS', ['X-CSRFToken', 'X-CSRF-Token'])\n app.config.setdefault('WTF_CSRF_SSL_STRICT', True)\n\n app.jinja_env.globals['csrf_token'] = generate_csrf\n app.context_processor(lambda: {'csrf_token': generate_csrf})\n\n @app.before_request\n def _csrf_protect():\n if not app.config['WTF_CSRF_ENABLED']:\n return\n\n if not app.config['WTF_CSRF_CHECK_DEFAULT']:\n return\n\n if request.method not in app.config['WTF_CSRF_METHODS']:\n return\n\n if not request.endpoint:\n return\n\n view = app.view_functions.get(request.endpoint)\n\n if not view:\n return\n\n if request.blueprint in self._exempt_blueprints:\n return\n\n dest = '%s.%s' % (view.__module__, view.__name__)\n\n if dest in self._exempt_views:\n return\n\n self.protect()\n\n def _get_csrf_token(self):\n # find the ``csrf_token`` field in the subitted form\n # if the form had a prefix, the name will be\n # ``{prefix}-csrf_token``\n for key in request.form:\n if key.endswith('csrf_token'):\n csrf_token = request.form[key]\n\n if csrf_token:\n return csrf_token\n\n for header_name in current_app.config['WTF_CSRF_HEADERS']:\n csrf_token = request.headers.get(header_name)\n\n if csrf_token:\n return csrf_token\n\n return None\n\n def protect(self):\n if request.method not in current_app.config['WTF_CSRF_METHODS']:\n return\n\n if not validate_csrf(self._get_csrf_token()):\n self._error_response('CSRF token missing or incorrect.')\n\n if request.is_secure and current_app.config['WTF_CSRF_SSL_STRICT']:\n if not request.referrer:\n self._error_response('Referrer checking failed - no Referrer.')\n\n good_referrer = 'https://%s/' % request.host\n\n if not same_origin(request.referrer, good_referrer):\n self._error_response('Referrer checking failed - origin does not match.')\n\n request.csrf_valid = True # mark this request is csrf valid\n\n def exempt(self, view):\n \"\"\"Mark a view or blueprint to be excluded from CSRF protection.\n\n ::\n\n @app.route('/some-view', methods=['POST'])\n @csrf.exempt\n def some_view():\n ...\n\n ::\n\n bp = Blueprint(...)\n csrf.exempt(bp)\n\n \"\"\"\n\n if isinstance(view, Blueprint):\n self._exempt_blueprints.add(view.name)\n return view\n\n if isinstance(view, string_types):\n view_location = view\n else:\n view_location = '%s.%s' % (view.__module__, view.__name__)\n\n self._exempt_views.add(view_location)\n return view\n\n def _error_response(self, reason):\n raise CsrfError(reason)\n\n def error_handler(self, view):\n \"\"\"Register a function that will generate the response for CSRF errors.\n\n .. deprecated:: 0.14\n Use the standard Flask error system with\n ``@app.errorhandler(CsrfError)`` instead. This will be removed in\n version 1.0.\n\n The function will be passed one argument, ``reason``. By default it will\n raise a :class:`~flask_wtf.csrf.CsrfError`. ::\n\n @csrf.error_handler\n def csrf_error(reason):\n return render_template('error.html', reason=reason)\n\n Due to historical reasons, the function may either return a response\n or raise an exception with :func:`flask.abort`.\n \"\"\"\n\n warnings.warn(FlaskWTFDeprecationWarning(\n '\"@csrf.error_handler\" is deprecated. Use the standard Flask error '\n 'system with \"@app.errorhandler(CsrfError)\" instead. This will be'\n 'removed in 1.0.'\n ), stacklevel=2)\n\n @wraps(view)\n def handler(reason):\n response = current_app.make_response(view(reason))\n raise CsrfError(response.get_data(as_text=True), response=response)\n\n self._error_response = handler\n return view\n\n\nclass CsrfError(BadRequest):\n \"\"\"Raise if the client sends invalid CSRF data with the request.\n\n Generates a 400 Bad Request response with the failure reason by default.\n Customize the response by registering a handler with\n :meth:`flask.Flask.errorhandler`.\n \"\"\"\n\n description = 'CSRF token missing or incorrect.'\n\n\ndef same_origin(current_uri, compare_uri):\n current = urlparse(current_uri)\n compare = urlparse(compare_uri)\n\n return (\n current.scheme == compare.scheme\n and current.hostname == compare.hostname\n and current.port == compare.port\n )\n","sub_path":"flask_wtf/csrf.py","file_name":"csrf.py","file_ext":"py","file_size_in_byte":8437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"193559502","text":"# Copyright (c) LinkedIn Corporation. All rights reserved. Licensed under the BSD-2 Clause license.\n# See LICENSE in the project root for license information.\n\nfrom falcon import HTTP_201, HTTPError\nfrom ujson import dumps as json_dumps\nfrom ... import db\nfrom ...auth import debug_only\nfrom ...utils import load_json_body\n\ncolumns = {\n 'id': '`role`.`id` as `id`',\n 'name': '`role`.`name` as `name`'\n}\n\nall_columns = ', '.join(columns.values())\n\nconstraints = {\n 'id': '`role`.`id` = %s',\n 'id__eq': '`role`.`id` = %s',\n 'id__ne': '`role`.`id` != %s',\n 'id__lt': '`role`.`id` < %s',\n 'id__le': '`role`.`id` <= %s',\n 'id__gt': '`role`.`id` > %s',\n 'id__ge': '`role`.`id` >= %s',\n 'name': '`role`.`name` = %s',\n 'name__eq': '`role`.`name` = %s',\n 'name__contains': '`role`.`name` LIKE CONCAT(\"%%\", %s, \"%%\")',\n 'name__startswith': '`role`.`name` LIKE CONCAT(%s, \"%%\")',\n 'name__endswith': '`role`.`name` LIKE CONCAT(\"%%\", %s)'\n}\n\n\ndef on_get(req, resp):\n fields = req.get_param_as_list('fields', transform=columns.__getitem__)\n cols = ', '.join(fields) if fields else all_columns\n query = 'SELECT %s FROM `role`' % cols\n where_params = []\n where_vals = []\n for key in req.params:\n val = req.get_param(key)\n if key in constraints:\n where_params.append(constraints[key])\n where_vals.append(val)\n where_queries = ' AND '.join(where_params)\n if where_queries:\n query = '%s WHERE %s' % (query, where_queries)\n\n connection = db.connect()\n cursor = connection.cursor(db.DictCursor)\n cursor.execute(query, where_vals)\n data = cursor.fetchall()\n cursor.close()\n connection.close()\n resp.body = json_dumps(data)\n\n\n@debug_only\ndef on_post(req, resp):\n data = load_json_body(req)\n new_role = data['name']\n connection = db.connect()\n cursor = connection.cursor()\n try:\n cursor.execute('INSERT INTO `role` (`name`) VALUES (%s)', new_role)\n connection.commit()\n except db.IntegrityError as e:\n err_msg = str(e.args[1])\n if 'Duplicate entry' in err_msg:\n err_msg = 'role \"%s\" already existed' % new_role\n raise HTTPError('422 Unprocessable Entity', 'IntegrityError', err_msg)\n finally:\n cursor.close()\n connection.close()\n\n resp.status = HTTP_201\n","sub_path":"src/oncall/api/v0/roles.py","file_name":"roles.py","file_ext":"py","file_size_in_byte":2344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"88493231","text":"'''\n @file graph.py\n @author Marcus Edel\n\n Functions to plot graphs.\n'''\n\nimport os, sys, inspect\n\n# Import the util path, this method even works if the path contains\n# symlinks to modules.\ncmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(\n os.path.split(inspect.getfile(inspect.currentframe()))[0], '../util')))\nif cmd_subfolder not in sys.path:\n sys.path.insert(0, cmd_subfolder)\n\nfrom misc import *\nfrom log import *\n\nimport matplotlib\n\nmatplotlib.use(\"Agg\")\n\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as mticker\nimport re\nimport collections\n\n\n# Use this colors to plot the graph.\ncolors = ['#3366CC', '#DC3912', '#FF9900', '#FFFF32', '#109618', '#990099',\n '#DD4477', '#AAAA11', '#22AA99']\n\n'''\nGenerate a bar chart with the specified informations.\n\n@param results - Contains the values to plot.\n@param libraries - A list that contains the names of the libraries.\n@param fileName - The filename of the line chart.\n@param bestlib - The name of the library which should be compared with the other\nlibraries.\n@param backgroundColor - The color of the image background.\n@return The dataset count, total time, failure count, timeout count,\nbest libray count, timing data.\n'''\ndef GenerateBarChart(results, libraries, fileName, bestlib=\"mlpack\",\n backgroundColor=\"#FFFFFF\", textColor=\"#6e6e6e\", gridColor=\"#6e6e6e\"):\n # Bar chart settings.\n lineWidth = 0.1\n barWidth = 0.15\n opacity = 0.9\n fill = True\n windowWidth = 8.1\n windowHeight = 3.3\n gridLineWidth = 0.2\n\n # Create figure and set the color.\n matplotlib.rc('axes', facecolor=backgroundColor)\n matplotlib.rcParams.update({'font.size': 8})\n fig = plt.figure(figsize=(windowWidth, windowHeight),\n facecolor=backgroundColor, dpi=100)\n plt.rc('lines', linewidth=lineWidth)\n ax = plt.subplot(1,1,1)\n\n # Set the grid style.\n ax.yaxis.grid(True, linestyle='-', linewidth=gridLineWidth, color=gridColor)\n ax.xaxis.grid(False)\n ax.spines['left'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.get_xaxis().tick_bottom()\n ax.get_yaxis().tick_left()\n ax.spines['bottom'].set_linewidth(gridLineWidth)\n\n # Data structures to set the legend and the right postions for the bar chart.\n legendIndex = []\n color = {}\n chartHandler = []\n legendNames = []\n nextBar = 0\n legendPosition = 0\n legendBegin = 0\n\n # use this variable to count the time.\n totalTime = 0\n # Use this variable to count the timeouts.\n timeouts = 0\n # Use this variable to count the failures.\n failure = 0\n\n # Use this data structures to generate the timing table and the progress bar.\n timingData = {}\n\n # Use this variable to get use the data for the right library.\n l = 0\n # Iterate through the data and plot the bar chart.\n for result in results:\n for i, data in enumerate(result):\n # The time value.\n time = data[3]\n # The name of the dataset.\n dataset = data[8]\n\n # Save the timing data for the timing table.\n if dataset in timingData:\n timingData[dataset][l] = time\n else:\n timingData[dataset] = ['-' for x in range(len(libraries))]\n timingData[dataset][l] = time\n\n # We can only plot scalar values so we jump over the other.\n if time == \"failure\":\n failure += 1\n continue\n elif str(time).count(\">\") > 0:\n timeouts += 1\n continue\n\n totalTime += time\n l += 1\n\n timingData = collections.OrderedDict(sorted(timingData.items()))\n\n tmp = timingData.copy()\n if failure > 0 or timeouts > 0:\n # Get the maximum value of the results.\n values = [item for sublist in timingData.values() for item in sublist]\n maxValue = [v if isFloat(v) else 1 for v in values]\n maxValue = max(maxValue)\n else:\n maxValue = 0\n\n for key, values in timingData.items():\n l = 0\n legendIndex.append(nextBar)\n for value in values:\n color = colors[l % len(colors)]\n\n if isFloat(value):\n plt.bar(nextBar, value, barWidth, alpha=opacity, color=color,\n fill=fill,lw=0.2)\n else:\n plt.bar(nextBar, maxValue, barWidth, alpha=opacity, color=\"gray\",\n fill=fill, lw=0.2)\n\n time = value if isFloat(value) else 10\n\n nextBar += barWidth\n l += 1\n nextBar += (4 * barWidth)\n\n # Create a proxy artist for the legend.\n handler = []\n for l, library in enumerate(libraries):\n color = colors[l % len(colors)]\n handler.append(plt.Rectangle((0, 0), 1, 1, fc=color, alpha=0.6))\n\n handler.append(plt.Rectangle((0, 0), 1, 1, fc=\"gray\", alpha=0.6))\n\n # Set the label for the x-axis.\n plt.xticks(legendIndex , list(timingData.keys()), rotation=30, ha='right')\n\n # Set the color and the font of the x-axis and y-axis label.\n ax.tick_params(axis='both', which='major', labelsize=8, labelcolor=textColor)\n ax.tick_params(axis='both', which='minor', labelsize=6, labelcolor=textColor)\n\n # Create the legend above the bar chart.\n lgd = ax.legend(handler, libraries + [\"failure/ timeout\"], loc='upper center',\n bbox_to_anchor=(0.5, 1.3 + (0.2 * len(libraries) / 6)), fancybox=True,\n shadow=False, ncol=6, fontsize=8)\n lgd.get_frame().set_linewidth(0)\n for label in lgd.get_texts():\n label.set_color(textColor)\n\n # Set axis labels.\n plt.ylabel(\"time [s]\", color=textColor)\n\n # Save the bar chart.\n fig.tight_layout()\n fig.savefig(fileName, bbox_extra_artists=(lgd,), bbox_inches='tight',\n facecolor=fig.get_facecolor(), edgecolor='none', format='png', dpi=100)\n plt.close()\n\n # Count the time in which bestlib is the best.\n bestLibCount = 0\n try:\n bestLibIndex = libraries.index(bestlib)\n except ValueError:\n pass\n else:\n for dataset, results in timingData.items():\n results = [v if isFloat(v) else float('Inf') for v in results]\n if bestLibIndex == results.index(min(results)):\n bestLibCount += 1\n\n return (len(timingData), totalTime, failure, timeouts, bestLibCount, timingData)\n\n'''\nGenerate a line chart with the specified informations.\n\n@param data - Contains the information for the line chart.\n@param fileName - The filename of the line chart.\n@param backgroundColor - The color of the image background.\n@param windowWidth - The width of the chart.\n@prama windowHeight - The height of the chart.\n'''\ndef GenerateSingleLineChart(data, fileName, backgroundColor=\"#FFFFFF\",\n windowWidth=8.1, windowHeight=1.3, textColor=\"#6e6e6e\", gridColor=\"#6e6e6e\"):\n\n # Generate a list of scalar values. Use the privious or next elemnt to fill\n # the gap.\n def NormalizeData(data):\n i = 0\n while len(data) != i:\n if not data[i]:\n if i > 0 and data[i - 1]:\n data[i] = data[i - 1]\n else:\n del data[i]\n i -= 1\n i += 1\n return data\n\n # Line chart settings.\n lineWidth = 1.5\n opacity = 0.9\n gridLineWidth = 0.2\n\n # Create figure and set the color.\n matplotlib.rc('axes', facecolor=backgroundColor)\n matplotlib.rcParams.update({'font.size': 8})\n fig = plt.figure(figsize=(windowWidth, windowHeight),\n facecolor=backgroundColor, dpi=100)\n plt.rc('lines', linewidth=lineWidth)\n ax = plt.subplot(1,1,1)\n\n # Set the grid style.\n ax.yaxis.grid(True, linestyle='-', linewidth=gridLineWidth, color=gridColor)\n ax.xaxis.grid(False)\n ax.spines['left'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.get_xaxis().tick_bottom()\n ax.get_yaxis().tick_left()\n ax.spines['bottom'].set_linewidth(gridLineWidth)\n\n # Set ticks for the x-axis.\n myLocator = mticker.MultipleLocator(1)\n ax.xaxis.set_major_locator(myLocator)\n\n data = NormalizeData(data)\n\n # If we only have a single value we don't want to start from zero so we\n # double the data.\n if len(data) == 1:\n data += data\n\n # Create the data for the x-axis.\n X = list(range(len(data)))\n\n # Plot the line chart.\n plt.plot(X, data, color=colors[0], alpha=opacity, linewidth=1.7)\n\n # Set the color and the font of the x-axis and y-axis labels.\n ax.tick_params(axis='both', which='major', labelsize=8, labelcolor=textColor)\n ax.tick_params(axis='both', which='minor', labelsize=6, labelcolor=textColor)\n\n # Set axis labels.\n plt.ylabel(\"time [s]\", color=textColor)\n plt.xlabel(\"benchmark build\", color=textColor)\n\n # Save the line chart.\n fig.tight_layout()\n fig.savefig(fileName, bbox_inches='tight', facecolor=fig.get_facecolor(),\n edgecolor='none', dpi=100)\n plt.close()\n\n'''\nGenerate a memory chart with the specified informations.\n\n@param massiflogFile - The massif logfile.\n@param fileName - The filename of the memory chart.\n@param backgroundColor - The color of the image background.\n'''\ndef CreateMassifChart(massiflogFile, fileName, backgroundColor=\"#FFFFFF\",\n textColor=\"6e6e6e\", gridColor=\"#6e6e6e\"):\n lineWidth = 1.5\n opacity = 0.9\n windowWidth = 8.1\n windowHeight = 1.3\n gridLineWidth = 0.2\n\n # Create figure and set the color.\n matplotlib.rc('axes', facecolor=backgroundColor)\n matplotlib.rcParams.update({'font.size': 8})\n fig = plt.figure(figsize=(windowWidth, windowHeight),\n facecolor=backgroundColor, dpi=100)\n plt.rc('lines', linewidth=lineWidth)\n ax = plt.subplot(1,1,1)\n\n # Set the grid style.\n ax.yaxis.grid(True, linestyle='-', linewidth=gridLineWidth, color=gridColor)\n ax.xaxis.grid(False)\n ax.spines['left'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.get_xaxis().tick_bottom()\n ax.get_yaxis().tick_left()\n ax.spines['bottom'].set_linewidth(gridLineWidth)\n\n # Read the massif logfile.\n try:\n with open(massiflogFile, \"r\") as fid:\n content = fid.read()\n except IOError as e:\n Log.Fatal(\"Exception: \" + str(e))\n return\n\n # Parse the massif logfile.\n memHeapB = [(int(i) / 1024) + 0.0001 for i in re.findall(r\"mem_heap_B=(\\d*)\", content)]\n memHeapExtraB = [(int(i) / 1024) + 0.0001 for i in re.findall(r\"mem_heap_extra_B=(\\d*)\", content)]\n memStackB = [(int(i) / 1024) + 0.0001 for i in re.findall(r\"mem_stacks_B=(\\d*)\", content)]\n\n # Plot the memory information.\n X = list(range(len(memHeapExtraB)))\n X = [x+0.0001 for x in X]\n plt.fill_between(X, memHeapExtraB, 0, color=\"#109618\", alpha=0.6)\n plt.fill_between(X, memHeapExtraB, memHeapB, color=\"#DC3912\", alpha=0.6)\n plt.fill_between(X, memHeapExtraB, memStackB, color=\"#3366CC\", alpha=0.6)\n\n # Set the color and the font of the x-axis and y-axis labels.\n ax.tick_params(axis='both', which='major', labelsize=8, labelcolor=textColor)\n ax.tick_params(axis='both', which='minor', labelsize=6, labelcolor=textColor)\n\n # Create a proxy artist, because fill_between hasn't a chart handler.\n p1 = plt.Rectangle((0, 0), 1, 1, fc=\"#109618\", alpha=0.6)\n p2 = plt.Rectangle((0, 0), 1, 1, fc=\"#DC3912\", alpha=0.6)\n p3 = plt.Rectangle((0, 0), 1, 1, fc=\"#3366CC\", alpha=0.6)\n\n # Set axis labels.\n plt.ylabel(\"memory [KB]\", color=textColor)\n plt.xlabel(\"snapshot\", color=textColor)\n\n # Create the legend above the memory chart.\n lgd = ax.legend((p1, p2, p3),\n (\"mem heap B\", \"mem heap extra B\", \"mem stacks B\"), loc='upper center',\n bbox_to_anchor=(0.5, 1.3), fancybox=True, shadow=False, ncol=8, fontsize=8)\n lgd.get_frame().set_linewidth(0)\n for label in lgd.get_texts():\n label.set_color(textColor)\n\n # Save the memory chart.\n fig.tight_layout()\n fig.savefig(fileName, bbox_extra_artists=(lgd,), bbox_inches='tight',\n facecolor=fig.get_facecolor(), edgecolor='none', format='png', dpi=100)\n plt.close()\n","sub_path":"util/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":11466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"304147214","text":"#!/home/stepsizestrategies/.local/bin/python3\n\n\n\nfrom django.conf.urls import url,include\n\n\nimport strategies.views\n\n\n\n# Uncomment the next lines to enable the admin:\n# from django.conf.urls import include\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = [\n # Examples:\n\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n url(r'^admin/', admin.site.urls),\n\n url(r'^$', strategies.views.index, name='index'),\n\n url(r'^login/$', strategies.views.login_view, name=\"login\"),\n\n url(r'^register/$', strategies.views.register_view, name=\"register_view\"),\n\n url(r'^logout/$', strategies.views.logout_view, name=\"logout\"),\n\n url(r'^contact/$', strategies.views.about, name=\"contact\"),\n url(r'^find/$', strategies.views.find, name=\"explore\"),\n url(r'^listing/$', strategies.views.listing, name=\"listing\"),\n url(r'^profile/$', strategies.views.profile, name=\"profile\"),\n url(r'^addProduct/$', strategies.views.addProduct, name=\"addProduct\"),\n\n\n\n\n\n\n]","sub_path":"stepsizestrategies/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"490423005","text":"from mcpi.minecraft import Minecraft\nimport time\nimport my_song_player\n\n#initialize the global variables and constant parameters\n\nhome_position = [86,22,111]\n\n\ndef fly_sing_loop(mc):\n count_fly = 0\n stayed_time = 0\n song_dict = my_song_player.get_songs()\n ser = my_song_player.test_com()\n # endless loop\n while True:\n print(\"stay_time\"+str(stayed_time))\n time.sleep(0.5)\n pos=mc.player.getTilePos()\n mc.postToChat(\"please go to home x=86 y=22 z=111 for 15s to fly\")\n mc.postToChat(\"x:\"+str(pos.x)+\"y:\"+str(pos.y)+\"z:\"+str(pos.z))\n '''\n if pos.x==home_position[0] and pos.y==home_position[1] and pos.z==home_position[2]:\n mc.postToChat(\"welcome home\")\n stayed_time = stayed_time + 1\n if stayed_time==16:\n # fly above\n mc.player.setTilePos(home_position[0],home_position[1]+30,home_position[2])\n stayed_time = 0\n # play a song\n song_name, song_content = list(song_dict.items())[count_fly%3]\n my_song_player.play_single_song(ser, song_name, song_content)\n count_fly = count_fly + 1\n # get back to home from the sky\n mc.player.setTilePos(home_position[0],home_position[1],home_position[2])\n else:\n stayed_time = 0\n '''\n if pos.x==home_position[0] and pos.y==home_position[1] and pos.z==home_position[2]:\n mc.postToChat(\"welcome home\")\n stayed_time = stayed_time + 1\n resp = ser.readline()\n rs = str(resp)\n if 'ON' in rs:\n print(\"got ON\")\n # fly above\n mc.player.setTilePos(home_position[0], home_position[1] + 30, home_position[2])\n stayed_time = 0\n # play a song\n song_name, song_content = list(song_dict.items())[count_fly % 3]\n my_song_player.play_single_song(ser, song_name, song_content)\n count_fly = count_fly + 1\n # get back to home from the sky\n mc.player.setTilePos(home_position[0], home_position[1], home_position[2])\n if 'OFF' in rs:\n print(\"got OFF\")\n else:\n stayed_time=0\n\n\ndef main():\n mc=Minecraft.create()\n # set the start position at home\n mc.player.setTilePos(home_position[0],home_position[1],home_position[2])\n fly_sing_loop(mc)\n\nif __name__ == '__main__':\n main()","sub_path":"students/ChenYizhou/gohome_sing.py","file_name":"gohome_sing.py","file_ext":"py","file_size_in_byte":2512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"645034160","text":"from flask import Flask,render_template, request, url_for, redirect, flash\r\nimport flask_login\r\nfrom flask import Blueprint\r\nfrom extensions import db, login_manager\r\nfrom models import User, Markets, MarketsPro\r\nfrom utilities import data, markets, markets_data, marketspro_data, get_trend, send_mail\r\nimport threading\r\n\r\n\r\n\r\n\r\n\r\nmain = Blueprint('main', __name__)\r\n\r\n@login_manager.user_loader\r\ndef user_loader(name):\r\n if name not in data:\r\n return\r\n\r\n user = User()\r\n user.id = name\r\n return user\r\n\r\n\r\n@login_manager.request_loader\r\ndef request_loader(request):\r\n name = request.form.get('name')\r\n if name not in data:\r\n return\r\n\r\n user = User()\r\n user.id = name\r\n\r\n user.is_authenticated = request.form['password'] == data[name]['password']\r\n\r\n return user\r\n\r\n\r\n\r\n@main.route('/')\r\ndef index_get():\r\n all_markets = Markets.query.order_by(Markets.name).all()\r\n return render_template('index.html',all_markets=all_markets, data=markets_data )\r\n\r\n@main.route('/pro')\r\n@flask_login.login_required\r\ndef pro_get():\r\n all_markets = MarketsPro.query.order_by(MarketsPro.name).all()\r\n return render_template('pro.html',all_markets=all_markets, data=marketspro_data)\r\n\r\n@main.route('/', methods=['POST'])\r\ndef index_post():\r\n err_msg = ''\r\n new_market = request.form.get('market')\r\n markets = {\r\n 'S&P 500' : 'SP500',\r\n 'GOLD' : 'GOLDAMGBD228NLBM',\r\n 'BITCOIN' : 'CBBTCUSD'\r\n } \r\n if new_market:\r\n existing_market = Markets.query.filter_by(name=new_market).first()\r\n if not existing_market:\r\n symbol = markets[new_market]\r\n new_trend = get_trend(symbol)\r\n data = Markets(name=new_market, symbol=symbol, trend=new_trend)\r\n db.session.add(data)\r\n db.session.commit()\r\n else:\r\n err_msg = 'MARKET ALREADY ADDED. BECOME A PRO MEMBER AND GET ACCESS TO MORE FINANCIAL INSTRUMENTS!'\r\n\r\n if err_msg:\r\n flash(err_msg)\r\n else:\r\n flash('MARKET ADDED. BECOME A PRO MEMBER AND GET ACCESS TO MORE FINANCIAL INSTRUMENTS!')\r\n return redirect(url_for('main.index_get'))\r\n\r\n@main.route('/pro', methods=['POST'])\r\n@flask_login.login_required\r\ndef pro_post():\r\n err_msg = ''\r\n new_market = request.form.get('market') \r\n if new_market:\r\n existing_market = MarketsPro.query.filter_by(name=new_market).first()\r\n if not existing_market:\r\n symbol = markets[new_market]\r\n new_trend = get_trend(symbol)\r\n data = MarketsPro(name=new_market, symbol=symbol, trend=new_trend)\r\n db.session.add(data)\r\n db.session.commit()\r\n else:\r\n err_msg = 'MARKET ALREADY ADDED'\r\n\r\n if err_msg:\r\n flash(err_msg)\r\n else:\r\n flash('MARKET ADDED')\r\n return redirect(url_for('main.pro_get'))\r\n\r\n\r\n\r\n@main.route('/delete/')\r\ndef delete(id):\r\n market = Markets.query.get_or_404(id)\r\n db.session.delete(market)\r\n db.session.commit()\r\n return redirect(url_for('main.index_get'))\r\n\r\n@main.route('/delete_pro/')\r\n@flask_login.login_required\r\ndef delete_pro(id):\r\n market = MarketsPro.query.get_or_404(id)\r\n db.session.delete(market)\r\n db.session.commit()\r\n return redirect(url_for('main.pro_get'))\r\n\r\n@main.route('/update/')\r\ndef update(id):\r\n market = Markets.query.get_or_404(id)\r\n symbol = market.symbol\r\n new_trend = get_trend(symbol)\r\n market.trend = new_trend\r\n db.session.commit()\r\n return redirect(url_for('main.index_get'))\r\n\r\n@main.route('/update_pro/')\r\n@flask_login.login_required\r\ndef update_pro(id):\r\n market = MarketsPro.query.get_or_404(id)\r\n symbol = market.symbol\r\n new_trend = get_trend(symbol)\r\n market.trend = new_trend\r\n db.session.commit()\r\n return redirect(url_for('main.pro_get'))\r\n\r\n@main.route('/login', methods=['GET', 'POST'])\r\ndef login():\r\n if request.method == 'GET':\r\n return render_template('login.html')\r\n\r\n try:\r\n name = request.form['name']\r\n if request.form['password'] == data[name]['password']:\r\n user = User()\r\n user.id = name\r\n flask_login.login_user(user)\r\n return redirect(url_for('main.pro_get'))\r\n flash('WRONG NAME OR PASSWORD!')\r\n return redirect(url_for('main.login'))\r\n except KeyError:\r\n flash('WRONG NAME OR PASSWORD!')\r\n return redirect(url_for('main.login'))\r\n\r\n\r\n\r\n@main.route('/logout')\r\n@flask_login.login_required\r\ndef logout():\r\n flask_login.logout_user()\r\n return render_template('logout.html')\r\n\r\n@main.route('/register', methods=['GET', 'POST'])\r\ndef register():\r\n if request.method == \"POST\":\r\n name = request.form['name']\r\n email = request.form['email']\r\n t1 = threading.Thread(target=send_mail, args=[name, email])\r\n t1.start()\r\n flash('THANK YOU. PLEASE CHECK YOUR EMAIL!')\r\n return redirect(url_for('main.login'))\r\n return render_template('register.html')\r\n\r\n\r\n@login_manager.unauthorized_handler\r\ndef unauthorized_handler():\r\n return render_template('unauthorized.html')\r\n\r\n@main.app_errorhandler(404)\r\ndef not_found(e):\r\n return render_template('404.html'), 404\r\n\r\n@main.app_errorhandler(500)\r\ndef internal_server_error(e):\r\n return render_template('500.html'), 500\r\n","sub_path":"routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":5343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"450761011","text":"# Named Entity Recognition on Medical Data (BIO Tagging)\n# Bio-Word2Vec Embeddings Source and Reference: https://github.com/ncbi-nlp/BioWordVec\n\nimport os\nimport re\nimport torch\nimport pickle\nfrom torch import nn\nfrom torch import optim\nimport torch.nn.functional as F\n\nimport numpy as np\nimport random\n\nfrom DNC.dnc import DNC_Module # Importing DNC Implementation\n\nclass task_NER():\n\n def __init__(self):\n self.name = \"NER_task_bio\"\n\n # Controller Params\n self.controller_size = 128\n self.controller_layers = 1\n\n # Head Params\n self.num_read_heads = 1\n self.num_write_heads = 1\n\n # Processor Params\n self.num_inputs = 200 # Length of Embeddings\n self.num_outputs = 7 # Class size\n\n # Memory Params\n self.memory_N = 128\n self.memory_M = 128\n\n # Training Params\n self.num_batches = -1\n self.save_batch = 5 # Saving model after every save_batch number of batches\n self.batch_size = 10\n self.num_epoch = 4\n\n # Optimizer Params\n self.adam_lr = 1e-4\n self.adam_betas = (0.9, 0.999)\n self.adam_eps = 1e-8\n\n # Handles\n self.machine = None\n self.loss = None\n self.optimizer = None\n\n # Class Dictionaries\n self.labelDict = None # Label Dictionary - Labels to Index\n self.reverseDict = None # Inverse Label Dictionary - Index to Labels\n\n # File Paths\n self.concept_path_train = \"../medical_data/train_data/concept\" # Path to train concept files\n self.text_path_train = \"../medical_data/train_data/txt\" # Path to train text summaries\n self.concept_path_test = \"../medical_data/test_data/concept\" # Path to test concept files\n self.text_path_test = \"../medical_data/test_data/txt\" # Path to test text summaries\n self.save_path = \"../medical_data/cleaned_files\" # Save path\n self.embed_dic_path = \"../medical_data/embeddings/bio_embedding_dictionary.dat\" # Word2Vec embeddings Dictionary path\n self.random_vec = \"../medical_data/embeddings/random_vec.dat\" # Path to random embedding (Used to create new vectors)\n self.model_path = \"../saved_models/\" # Stores Trained Models\n\n # Miscellaneous\n self.padding_symbol = np.full((self.num_inputs), 0.01) # Padding symbol embedding\n\n def get_task_name(self):\n return self.name\n\n def init_dnc(self):\n self.machine = DNC_Module(self.num_inputs, self.num_outputs, self.controller_size, self.controller_layers, self.num_read_heads, self.num_write_heads, self.memory_N, self.memory_M)\n\n def init_loss(self):\n self.loss = nn.CrossEntropyLoss(reduction = 'mean') # Cross Entropy Loss -> Softmax Activation + Cross Entropy Loss\n\n def init_optimizer(self):\n self.optimizer = optim.Adam(self.machine.parameters(), lr = self.adam_lr, betas = self.adam_betas, eps = self.adam_eps)\n\n def calc_loss(self, Y_pred, Y):\n # Y: dim -> (sequence_len x batch_size)\n # Y_pred: dim -> (sequence_len x batch_size x num_outputs)\n loss_vec = torch.empty(Y.shape[0], dtype=torch.float32)\n for i in range(Y_pred.shape[0]):\n loss_vec[i] = self.loss(Y_pred[i], Y[i])\n return torch.mean(loss_vec)\n\n def calc_cost(self, Y_pred, Y): # Calculates % Cost\n # Y: dim -> (sequence_len x batch_size)\n # Y_pred: dim -> (sequence_len x batch_size x sequence_width)\n\n '''\n Note: \n 1). For considering an prediction to be True Positive, prediction must match completely with labels entity (not partially). Else it is False Negative.\n 2). For considering a prediction to be False Positive, it must be full entity (BIII) and not completely match the label entity.\n '''\n\n # Stores correct class labels for each entity type\n class_bag = {}\n class_bag['problem'] = 0 # Total labels\n class_bag['test'] = 0 # Total labels\n class_bag['treatment'] = 0 # Total labels\n class_bag['problem_cor'] = 0 # Correctly classified labels\n class_bag['test_cor'] = 0 # Correctly classified labels\n class_bag['treatment_cor'] = 0 # Correctly classified labels\n class_bag['problem_fp'] = 0 # False positive classified labels\n class_bag['test_fp'] = 0 # False positive classified labels\n class_bag['treatment_fp'] = 0 # False positive classified labels\n \n pred_class = np.transpose(F.softmax(Y_pred, dim=2).max(2)[1].numpy()).reshape(-1) # Predicted class. dim -> (sequence_len*batch_size)\n Y = np.transpose(Y.numpy()).reshape(-1) # Converting to NumPy Array and linearizing\n cor_pred = (Y == pred_class).astype(np.int) # Comparing Prediction and Labels to find correct predictions\n\n class_bag['word_pred_acc'] = np.divide(np.sum(cor_pred), cor_pred.size)*100.0 # % Accuracy of Correctly Predicted Words (Not Entities)\n\n # Getting the beginning index of all the entities\n beg_idx = list(np.where(np.in1d(Y, [0, 2, 4]))[0])\n\n # Getting the end index of all the entities (All the Index previous of 'Other'/'Begin' and not equal to 'Other')\n target = np.where(np.in1d(Y, [0, 2, 4, 6]))[0] - 1\n if target[0] == -1:\n target = target[1:]\n end_idx = list(target[np.where(Y[target] != 6)[0]])\n if Y[-1] != 6:\n end_idx.append(Y.size-1)\n\n assert len(beg_idx) == len(end_idx) # Sanity Check\n class_bag['total'] = len(beg_idx) # Total number of Entities\n\n # Counting Entities\n sum_vec = np.cumsum(cor_pred) # Calculates cumulative summation of predicted vector\n for b, e in zip(beg_idx, end_idx):\n idx_range = e-b+1 # Entity span\n sum_range = sum_vec[e]-sum_vec[b]+1 # Count of entity elements which are predicted correctly\n\n lab = self.reverseDict[Y[b]][2:] # Extracting entity type (Problem, Test or Treatment)\n class_bag[lab] = class_bag[lab]+1 # Getting count of each entities\n \n if sum_range == idx_range: # +1 if entity is classified correctly\n class_bag[lab+'_cor'] = class_bag[lab+'_cor']+1\n\n # Detecting False Positives\n # Getting the beginning index of all the entities in Predicted Results\n beg_idx_p = list(np.where(np.in1d(pred_class, [0, 2, 4]))[0])\n \n for b in beg_idx_p:\n if cor_pred[b] == 0:\n lab = self.reverseDict[pred_class[b]][2:]\n class_bag[lab+'_fp'] = class_bag[lab+'_fp']+1\n\n return class_bag\n \n def print_word(self, token_class): # Prints the Class name from Class number\n word = self.reverseDict[token_class]\n print(word + \"\\n\")\n\n def clip_grads(self): # Clipping gradients for stability\n \"\"\"Gradient clipping to the range [10, 10].\"\"\"\n parameters = list(filter(lambda p: p.grad is not None, self.machine.parameters()))\n for p in parameters:\n p.grad.data.clamp_(-10, 10)\n\n def initialize_labels(self): # Initializing label dictionaries for Labels->IDX and IDX->Labels\n self.labelDict = {} # Label Dictionary - Labels to Index\n self.reverseDict = {} # Inverse Label Dictionary - Index to Labels\n\n # Using BIEOS labelling scheme\n self.labelDict['b-problem'] = 0 # Problem - Beginning \n self.labelDict['i-problem'] = 1 # Problem - Inside\n self.labelDict['b-test'] = 2 # Test - Beginning\n self.labelDict['i-test'] = 3 # Test - Inside\n self.labelDict['b-treatment'] = 4 # Treatment - Beginning\n self.labelDict['i-treatment'] = 5 # Treatment - Inside\n self.labelDict['o'] = 6 # Outside Token\n\n # Making Inverse Label Dictionary\n for k in self.labelDict.keys():\n self.reverseDict[self.labelDict[k]] = k\n\n # Saving the diictionaries into a file\n self.save_data([self.labelDict, self.reverseDict], os.path.join(self.save_path, \"label_dicts_bio.dat\"))\n\n def parse_concepts(self, file_path): # Parses the concept file to extract concepts and labels\n conceptList = [] # Stores all the Concept in the File\n\n f = open(file_path) # Opening and reading a concept file\n content = f.readlines() # Reading all the lines in the concept file\n f.close() # Closing the concept file\n\n for x in content: # Reading each line in the concept file\n dic = {}\n\n # Cleaning and extracting the entities, labels and their positions in the corresponding medical summaries\n x = re.sub('\\n', ' ', x)\n x = re.sub(r'\\ +', ' ', x)\n x = x.strip().split('||')\n\n temp1, label = x[0].split(' '), x[1].split('=')[1][1:-1]\n\n temp1[0] = temp1[0][3:]\n temp1[-3] = temp1[-3][0:-1]\n entity = temp1[0:-2]\n\n if len(entity) >= 1:\n lab = ['i']*len(entity)\n lab[0] = 'b'\n lab = [l+\"-\"+label for l in lab]\n else:\n print(\"Data in File: \" + file_path + \", not in expected format..\")\n exit()\n\n noLab = [self.labelDict[l] for l in lab]\n sLine, sCol = int(temp1[-2].split(\":\")[0]), int(temp1[-2].split(\":\")[1])\n eLine, eCol = int(temp1[-1].split(\":\")[0]), int(temp1[-1].split(\":\")[1])\n \n '''\n # Printing the information\n print(\"------------------------------------------------------------\")\n print(\"Entity: \" + str(entity))\n print(\"Entity Label: \" + label)\n print(\"Labels - BIO form: \" + str(lab))\n print(\"Labels Index: \" + str(noLab))\n print(\"Start Line: \" + str(sLine) + \", Start Column: \" + str(sCol))\n print(\"End Line: \" + str(eLine) + \", End Column: \" + str(eCol))\n print(\"------------------------------------------------------------\")\n '''\n\n # Storing the information as a dictionary\n dic['entity'] = entity # Entity Name (In the form of list of words)\n dic['label'] = label # Common Label\n dic['BIO_labels'] = lab # List of BIO labels for each word\n dic['label_index'] = noLab # Labels in the index form\n dic['start_line'] = sLine # Start line of the concept in the corresponding text summaries\n dic['start_word_no'] = sCol # Starting word number of the concept in the corresponding start line\n dic['end_line'] = eLine # End line of the concept in the corresponding text summaries\n dic['end_word_no'] = eCol # Ending word number of the concept in the corresponding end line\n\n # Appending the concept dictionary to the list\n conceptList.append(dic)\n\n return conceptList # Returning the all the concepts in the current file in the form of dictionary list\n\n def parse_summary(self, file_path): # Parses the Text summaries\n file_lines = [] # Stores the lins of files in the list form\n tags = [] # Stores corresponding labels for each word in the file (Default label: 'o' [Outside])\n default_label = len(self.labelDict)-1 # default_label is \"7\" (Corresponding to 'Other' entity) \n # counter = 1 # Temporary variable used during print\n\n f = open(file_path) # Opening and reading a concept file\n content = f.readlines() # Reading all the lines in the concept file\n f.close()\n\n for x in content:\n x = re.sub('\\n', ' ', x)\n x = re.sub(r'\\ +', ' ', x)\n file_lines.append(x.strip().split(\" \")) # Spliting the lines into word list and Appending each of them in the file list\n tags.append([default_label]*len(file_lines[-1])) # Assigining the default_label to all the words in a line\n '''\n # Printing the information\n print(\"------------------------------------------------------------\")\n print(\"File Lines No: \" + str(counter))\n print(file_lines[-1])\n print(\"\\nCorresponding labels:\")\n print(tags[-1])\n print(\"------------------------------------------------------------\")\n counter += 1\n '''\n assert len(tags[-1]) == len(file_lines[-1]), \"Line length is not matching labels length...\" # Sanity Check\n return file_lines, tags\n\n def modify_labels(self, conceptList, tags): # Modifies the default labels of each word in text files with the true labels from the concept files\n for e in conceptList: # Iterating over all the dictionary elements in the Concept List\n if e['start_line'] == e['end_line']: # Checking whether concept is spanning over a single line or multiple line in the summary\n tags[e['start_line']-1][e['start_word_no']:e['end_word_no']+1] = e['label_index'][:]\n else:\n start = e['start_line']\n end = e['end_line']\n beg = 0\n for i in range(start, end+1): # Distributing labels over multiple lines in the text summaries\n if i == start:\n tags[i-1][e['start_word_no']:] = e['label_index'][0:len(tags[i-1])-e['start_word_no']]\n beg = len(tags[i-1])-e['start_word_no']\n elif i == end:\n tags[i-1][0:e['end_word_no']+1] = e['label_index'][beg:]\n else:\n tags[i-1][:] = e['label_index'][beg:beg+len(tags[i-1])]\n beg = beg+len(tags[i-1])\n return tags\n\n def print_data(self, file, file_lines, tags): # Prints the given data\n counter = 1\n\n print(\"\\n************ Printing details of the file: \" + file + \" ************\\n\")\n for x in file_lines:\n print(\"------------------------------------------------------------\")\n print(\"File Lines No: \" + str(counter))\n print(x)\n print(\"\\nCorresponding labels:\")\n print([self.reverseDict[i] for i in tags[counter-1]])\n print(\"\\nCorresponding Label Indices:\")\n print(tags[counter-1])\n print(\"------------------------------------------------------------\")\n counter += 1\n\n def save_data(self, obj_list, s_path): # Saves the file into the binary file using Pickle\n # Note: The 'obj_list' must be a list and none other than that\n pickle.dump(tuple(obj_list), open(s_path,'wb'))\n\n def acquire_data(self, task): # Read all the concept files to get concepts and labels, proces them and save them\n data = {} # Dictionary to store all the data objects (conceptList, file_lines, tags) each indexed by file name\n\n if task == 'train': # Determining the task type to assign the data path accordingly\n t_path = self.text_path_train\n c_path = self.concept_path_train\n else:\n t_path = self.text_path_test\n c_path = self.concept_path_test\n\n for f in os.listdir(t_path):\n f1 = f.split('.')[0] + \".con\"\n if os.path.isfile(os.path.join(c_path, f1)):\n conceptList = self.parse_concepts(os.path.join(c_path, f1)) # Parsing concepts and labels from the corresponding concept file\n file_lines, tags = self.parse_summary(os.path.join(t_path, f)) # Parses the document summaries to get the written notes\n tags = self.modify_labels(conceptList, tags) # Modifies he default labels to each word with the true labels from the concept files\n data[f1] = [conceptList, file_lines, tags] # Storing each object in dictionary\n # self.print_data(f, file_lines, tags) # Printing the details\n return data\n\n def structure_data(self, data_dict): # Structures the data in proper trainable form\n final_line_list = [] # Stores words of all the files in separate sub-lists\n final_tag_list = [] # Stores tags of all the files in separate sub-lists\n\n for k in data_dict.keys(): # Extracting data from each pre-processed file in dictionary\n file_lines = data_dict[k][1] # Extracting story\n tags = data_dict[k][2] # Extracting corresponding labels\n\n # Creating empty lists\n temp1 = []\n temp2 = []\n\n # Merging all the lines in file into a single list. Same for corresponding labels\n for i in range(len(file_lines)):\n temp1.extend(file_lines[i])\n temp2.extend(tags[i])\n \n assert len(temp1) == len(temp2), \"Word length not matching Label length for story in \" + str(k) # Sanity Check\n\n final_line_list.append(temp1)\n final_tag_list.append(temp2)\n \n assert len(final_line_list) == len(final_tag_list), \"Number of stories not matching number of labels list\" # Sanity Check\n return final_line_list, final_tag_list\n \n def padding(self, line_list, tag_list): # Pads stories with padding symbol to make them of same length \n diff = 0\n max_len = 0\n outside_class = len(self.labelDict)-1 # Classifying padding symbol as \"outside\" term\n\n # Calculating Max Summary Length\n for i in range(len(line_list)):\n if len(line_list[i])>max_len:\n max_len = len(line_list[i])\n\n for i in range(len(line_list)):\n diff = max_len - len(line_list[i])\n line_list[i].extend([self.padding_symbol]*diff)\n tag_list[i].extend([outside_class]*diff)\n assert (len(line_list[i]) == max_len) and (len(line_list[i]) == len(tag_list[i])), \"Padding unsuccessful\" # Sanity check\n return np.asarray(line_list), np.asarray(tag_list) # Making NumPy array of size (batch_size x story_length x word size) and (batch_size x story_length x 1) respectively\n\n def embed_input(self, line_list): # Converts words to vector embeddings\n final_list = [] # Stores embedded words\n summary = None # Temp variable\n word = None # Temp variable\n temp = None # Temp variable\n\n embed_dic = pickle.load(open(self.embed_dic_path, 'rb')) # Loading word2vec dictionary using Pickle\n r_embed = pickle.load(open(self.random_vec, 'rb')) # Loading Random embedding\n\n for i in range(len(line_list)): # Iterating over all the summaries\n summary = line_list[i]\n final_list.append([]) # Reserving space for curent summary\n\n for j in range(len(summary)):\n word = summary[j].lower()\n if word in embed_dic: # Checking for existence of word in dictionary\n final_list[-1].append(embed_dic[word])\n else:\n temp = r_embed[:] # Copying the values of the list\n random.shuffle(temp) # Randomly shuffling the word embedding to make it unique\n temp = np.asarray(temp, dtype=np.float32) # Converting to NumPy array\n final_list[-1].append(temp)\n return final_list\n\n def prepare_data(self, task='train'): # Preparing all the data necessary\n line_list, tag_list = None, None\n\n '''\n line_list is the list of rows, where each row is a list of all the words in a medical summary\n Similar is the case for tag_list, except, it stores labels for each words\n '''\n\n if not os.path.exists(self.save_path):\n os.mkdir(self.save_path) # Creating a new directory if it does not exist else reading previously saved data\n \n if not os.path.exists(os.path.join(self.save_path, \"label_dicts_bio.dat\")):\n self.initialize_labels() # Initialize label to index dictionaries\n else:\n self.labelDict, self.reverseDict = pickle.load(open(os.path.join(self.save_path, \"label_dicts_bio.dat\"), 'rb')) # Loading Label dictionaries\n \n if not os.path.exists(os.path.join(self.save_path, \"object_dict_bio_\"+str(task)+\".dat\")):\n data_dict = self.acquire_data(task) # Read data from file\n line_list, tag_list = self.structure_data(data_dict) # Structures the data into proper form\n line_list = self.embed_input(line_list) # Embeds input data (words) into embeddings\n self.save_data([line_list, tag_list], os.path.join(self.save_path, \"object_dict_bio_\"+str(task)+\".dat\"))\n else:\n line_list, tag_list = pickle.load(open(os.path.join(self.save_path, \"object_dict_bio_\"+str(task)+\".dat\"), 'rb')) # Loading Data dictionary\n return line_list, tag_list\n\n def get_data(self, task='train'):\n line_list, tag_list = self.prepare_data(task)\n\n # Shuffling stories\n story_idx = list(range(0, len(line_list)))\n random.shuffle(story_idx)\n\n num_batch = int(len(story_idx)/self.batch_size)\n self.num_batches = num_batch\n\n # Out Data\n x_out = []\n y_out = []\n \n counter = 1\n\n for i in story_idx:\n if num_batch<=0:\n break\n\n x_out.append(line_list[i])\n y_out.append(tag_list[i])\n\n if counter % self.batch_size == 0:\n counter = 0\n \n # Padding and converting labels to one hot vectors\n x_out_pad, y_out_pad = self.padding(x_out, y_out)\n x_out_array = torch.tensor(x_out_pad.swapaxes(0, 1), dtype=torch.float32) # Converting from (batch_size x story_length x word size) to (story_length x batch_size x word size)\n y_out_array = torch.tensor(y_out_pad.swapaxes(0, 1), dtype=torch.long) # Converting from (batch_size x story_length x 1) to (story_length x batch_size x 1)\n\n x_out = []\n y_out = []\n num_batch -= 1\n\n yield (self.num_batches - num_batch), x_out_array, y_out_array\n counter += 1\n\n def train_model(self):\n # Here, the model is optimized using Cross Entropy Loss.\n loss_list = []\n seq_length = []\n last_batch = 0\n\n # self.load_model(1, 99, 13) # Loading Pre-Trained model to train further\n\n for j in range(self.num_epoch):\n for batch_num, X, Y in self.get_data(task='train'):\n self.optimizer.zero_grad() # Making old gradients zero before calculating the fresh ones\n self.machine.initialization(self.batch_size) # Initializing states\n Y_out = torch.empty((X.shape[0], X.shape[1], self.num_outputs), dtype=torch.float32) # dim: (seq_len x batch_size x num_output)\n\n # Feeding the DNC network all the data first and then predicting output\n # by giving zero vector as input and previous read states and hidden vector\n # and thus training vector this way to give outputs matching the labels\n\n embeddings = self.machine.backward_prediction(X) # Creating embeddings from data for backward calculation\n temp_size = X.shape[0]\n\n for i in range(temp_size):\n Y_out[i, :, :], _ = self.machine(X[i], embeddings[temp_size-i-1]) # Passing Embeddings from backwards\n\n loss = self.calc_loss(Y_out, Y)\n loss.backward()\n self.clip_grads()\n self.optimizer.step()\n\n class_bag = self.calc_cost(Y_out, Y)\n\n corr = class_bag['problem_cor']+class_bag['test_cor']+class_bag['treatment_cor']\n tot = class_bag['total']\n\n loss_list += [loss.item()]\n seq_length += [Y.shape[0]]\n\n if (batch_num % self.save_batch) == 0:\n self.save_model(j, batch_num)\n\n last_batch = batch_num\n print(\"Epoch: \" + str(j) + \"/\" + str(self.num_epoch) + \", Batch: \" + str(batch_num) + \"/\" + str(self.num_batches) + \", Loss: {0:.2f}, \".format(loss.item()) + \\\n \"Batch Accuracy (Entity Prediction): {0:.2f} %, \".format((float(corr)/float(tot))*100.0) + \"Batch Accuracy (Word Prediction): {0:.2f} %\".format(class_bag['word_pred_acc']))\n self.save_model(j, last_batch)\n\n def test_model(self): # Testing the model\n correct = 0\n total = 0\n result_dict = {}\n result_dict['total_problem'] = 0 # Total labels in data\n result_dict['total_test'] = 0 # Total labels in data\n result_dict['total_treatment'] = 0 # Total labels in data\n result_dict['correct_problem'] = 0 # Correctly classified labels\n result_dict['correct_test'] = 0 # Correctly classified labels\n result_dict['correct_treatment'] = 0 # Correctly classified labels\n result_dict['false_positive_problem'] = 0 # False Positive labels\n result_dict['false_positive_test'] = 0 # False Positive labels\n result_dict['false_positive_treatment'] = 0 # False Positive labels\n print(\"\\n\")\n\n for batch_num, X, Y in self.get_data(task='test'):\n self.machine.initialization(self.batch_size) # Initializing states\n Y_out = torch.empty((X.shape[0], X.shape[1], self.num_outputs), dtype=torch.float32) # dim: (seq_len x batch_size x num_output)\n\n # Feeding the DNC network all the data first and then predicting output\n # by giving zero vector as input and previous read states and hidden vector\n # and thus training vector this way to give outputs matching the labels\n\n embeddings = self.machine.backward_prediction(X) # Creating embeddings from data for backward calculation\n temp_size = X.shape[0]\n\n for i in range(temp_size):\n Y_out[i, :, :], _ = self.machine(X[i], embeddings[temp_size-i-1])\n\n class_bag = self.calc_cost(Y_out, Y)\n\n corr = class_bag['problem_cor']+class_bag['test_cor']+class_bag['treatment_cor']\n tot = class_bag['total']\n\n result_dict['total_problem'] = result_dict['total_problem'] + class_bag['problem']\n result_dict['total_test'] = result_dict['total_test'] + class_bag['test']\n result_dict['total_treatment'] = result_dict['total_treatment'] + class_bag['treatment']\n result_dict['correct_problem'] = result_dict['correct_problem'] + class_bag['problem_cor']\n result_dict['correct_test'] = result_dict['correct_test'] + class_bag['test_cor']\n result_dict['correct_treatment'] = result_dict['correct_treatment'] + class_bag['treatment_cor']\n result_dict['false_positive_problem'] = result_dict['false_positive_problem'] + class_bag['problem_fp']\n result_dict['false_positive_test'] = result_dict['false_positive_test'] + class_bag['test_fp']\n result_dict['false_positive_treatment'] = result_dict['false_positive_treatment'] + class_bag['treatment_fp']\n\n correct += corr\n total += tot\n print(\"Test Example \" + str(batch_num) + \"/\" + str(self.num_batches) + \" processed, Batch Accuracy: {0:.2f} %, \".format((float(corr)/float(tot))*100.0) + \"Batch Accuracy (Word Prediction): {0:.2f} %\".format(class_bag['word_pred_acc']))\n \n result_dict['accuracy'] = (float(correct)/float(total))*100.0\n result_dict = self.calc_metrics(result_dict)\n print(\"\\nOverall Entity Prediction Accuracy: {0:.2f} %\".format(result_dict['accuracy']))\n return result_dict\n\n def calc_metrics(self, result_dict): # Calculates Certain Metrices\n precision_p = float(result_dict['correct_problem'])/float(result_dict['correct_problem'] + result_dict['false_positive_problem']) # Problem Precision\n recall_p = float(result_dict['correct_problem'])/float(result_dict['total_problem']) # Problem Recall\n\n precision_ts = float(result_dict['correct_test'])/float(result_dict['correct_test'] + result_dict['false_positive_test']) # Test Precision\n recall_ts = float(result_dict['correct_test'])/float(result_dict['total_test']) # Test Recall\n\n precision_tr = float(result_dict['correct_treatment'])/float(result_dict['correct_treatment'] + result_dict['false_positive_treatment']) # Treatment Precision\n recall_tr = float(result_dict['correct_treatment'])/float(result_dict['total_treatment']) # Treatment Recall\n\n f_score_p = 2*precision_p*recall_p/(precision_p+recall_p) # Problem F1 Score\n f_score_ts = 2*precision_ts*recall_ts/(precision_ts+recall_ts) # Test F1 Score\n f_score_tr = 2*precision_tr*recall_tr/(precision_tr+recall_tr) # Treatment F1 Score\n\n result_dict['problem_precision'] = precision_p\n result_dict['problem_recall'] = recall_p\n result_dict['problem_f1'] = f_score_p\n result_dict['test_precision'] = precision_ts\n result_dict['test_recall'] = recall_ts\n result_dict['test_f1'] = f_score_ts\n result_dict['treatment_precision'] = precision_tr\n result_dict['treatment_recall'] = recall_tr\n result_dict['treatment_f1'] = f_score_tr\n result_dict['macro_average_f1'] = (f_score_p + f_score_ts + f_score_tr)/3.0 # Macro Average F1 Score\n\n # Micro Average F1 Score\n correct_sum = result_dict['correct_problem'] + result_dict['correct_test'] + result_dict['correct_treatment']\n fp_sum = result_dict['false_positive_problem'] + result_dict['false_positive_test'] + result_dict['false_positive_treatment']\n total_sum = result_dict['total_problem'] + result_dict['total_test'] + result_dict['total_treatment']\n \n precision_avg = float(correct_sum)/float(correct_sum + fp_sum)\n recall_avg = float(correct_sum)/float(total_sum)\n result_dict['micro_average_f1'] = 2*precision_avg*recall_avg/(precision_avg+recall_avg)\n\n return result_dict\n\n def save_model(self, curr_epoch, curr_batch):\n # Here 'start_epoch' and 'start_batch' params below are the 'epoch' and 'batch' number from which to start training after next model loading\n # Note: It is recommended to start from the 'start_epoch' and not 'start_epoch' + 'start_batch', because batches are formed randomly\n if not os.path.exists(os.path.join(self.model_path, self.name)):\n os.mkdir(os.path.join(self.model_path, self.name))\n state_dic = {'task_name': self.name, 'start_epoch': curr_epoch + 1, 'start_batch': curr_batch + 1, 'state_dict': self.machine.state_dict(), 'optimizer_dic' : self.optimizer.state_dict()}\n filename = self.model_path + self.name + \"/\" + self.name + \"_\" + str(curr_epoch) + \"_\" + str(curr_batch) + \"_saved_model.pth.tar\"\n torch.save(state_dic, filename)\n\n def load_model(self, option, epoch, batch):\n path = self.model_path + self.name + \"/\" + self.name + \"_\" + str(epoch) + \"_\" + str(batch) + \"_saved_model.pth.tar\"\n if option == 1: # Loading for training\n checkpoint = torch.load(path)\n self.machine.load_state_dict(checkpoint['state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer_dic'])\n else: # Loading for testing\n checkpoint = torch.load(path)\n self.machine.load_state_dict(checkpoint['state_dict'])\n self.machine.eval()","sub_path":"code/dnc_code/tasks/ner_task_bio.py","file_name":"ner_task_bio.py","file_ext":"py","file_size_in_byte":32808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"602435564","text":"from .translate import toArrayOfSizes\nfrom ..blueprint.rectangle import Rectangle\nfrom ..message import catch, ERROR, WARNING, OK\nfrom .element import Element\nfrom ..rect import Rect\nfrom .color import Color\nfrom .const import *\n\nclass Form(Element):\n\tdef __init__(self, name):\n\t\tsuper(Form, self).__init__(name)\n\t\tself.type = \"form\"\n\n\t\t# Specific to the Grid element\n\t\tself.attributes = {\n\t\t\t\"rows\": \"1\",\n\t\t\t\"cols\": \"1\",\n\t\t\t\"drag-window\": False,\n\t\t\t\"background-color\": \"none\",\n\t\t\t\"padding\": \"0px\"\n\t\t}\n\n\tdef placeChildren(self, rect, n_child):\n\t\t# Reset children\n\t\tself.child_rects = []\n\n\t\t# Apply padding\n\t\tgrid_rect = rect.getPaddingRect(self.attributes[\"padding\"])\n\n\t\trows, rows_typ = catch(\n\t\t\ttoArrayOfSizes, (self.attributes[\"rows\"], grid_rect.h),\n\t\t\tERROR, self.name + \" .rows\")\n\n\t\tcols, cols_typ = catch(\n\t\t\ttoArrayOfSizes, (self.attributes[\"cols\"], grid_rect.w),\n\t\t\tERROR, self.name + \" .cols\")\n\n\t\tsr = grid_rect.y; sc = grid_rect.x\n\t\tfor r, row_ in enumerate(rows):\n\t\t\trow = int(row_)\n\t\t\tif rows_typ[r] == \"%\":\n\t\t\t\trow = int(row_ * grid_rect.h)\n\t\t\tfor c, col_ in enumerate(cols):\n\t\t\t\tcol = int(col_)\n\t\t\t\tif cols_typ[c] == \"%\":\n\t\t\t\t\tcol = int(col_ * grid_rect.w)\n\n\t\t\t\tself.child_rects.append(Rect(sc, sr, col, row))\n\t\t\t\tsc += col\n\t\t\tsr += row\n\t\t\tsc = grid_rect.x\n\n\tdef build(self, renderer, rect):\n\t\t# Fetch colors\n\t\tcolors = {\n\t\t\t\"background\": Color[self.attributes[\"background-color\"]]\n\t\t}\n\n\n\t\t### Bluid blueprint ###\n\t\tself._clearBlueprint()\n\n\t\tif colors[\"background\"] is not None:\n\t\t\tself._addNewPrimitive(Rectangle, renderer, rect, colors[\"background\"])\n","sub_path":"antlia/elements/form.py","file_name":"form.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"124983794","text":"#!/usr/bin/env python3\n\n\nimport sys\nimport os.path\nfrom libsbml import readSBML\nimport rpSBML\n\n\n# Python code t get difference of two lists\n# Using set()\ndef Diff(li1, li2):\n return (list(set(li1) - set(li2)))\n\nPRINT = False\n\n\n\ndef main (args):\n \"\"\"Usage: rpUnicity \n \"\"\"\n\n\n if len(args) != 2:\n print(\"\\n\" + \"Usage: rpUnicity \" + \"\\n\")\n return 1\n\n path = args[1]\n files = []\n\n # for f_or_d in os.listdir(path):\n # if os.path.isfile(f_or_d) and '.xml' in f_or_d:\n # files.append(os.path.join(r, file))\n\n # r=root, d=directories, f = files\n for r, d, f in os.walk(path):\n for file in f:\n if '.xml' in file:\n files.append(os.path.join(r, file))\n\n d_pathways = {}\n\n for filename in files:\n\n document = readSBML(filename)\n\n if document.getNumErrors() > 0:\n printLine(\"Encountered the following SBML errors:\" )\n document.printErrors()\n return 1\n\n level = document.getLevel()\n version = document.getVersion()\n\n model = document.getModel()\n\n if model is None:\n print(\"No model present.\" )\n return 1\n\n idString = \" id: \"\n if level == 1:\n idString = \"name: \"\n id = \"(empty)\"\n if model.isSetId():\n id = model.getId()\n\n if PRINT:\n PrintInfos1(filename, level, version, idString, id, model)\n\n # Read RP Annotations\n groups = model.getPlugin('groups')\n rpsbml = rpSBML.rpSBML('test')\n\n\n # Get Reactions\n reactions = {}\n for member in groups.getGroup('rp_pathway').getListOfMembers():\n object = model.getReaction(member.getIdRef())\n reactions[member.getIdRef()] = rpsbml.readBRSYNTHAnnotation(object.getAnnotation())\n\n\n # Get Species\n species = {}\n for specie in model.getListOfSpecies():\n species[specie.getId()] = rpsbml.readBRSYNTHAnnotation(specie.getAnnotation())\n\n # print()\n # print(\"REACTIONS\")\n # print(reactions)\n # print()\n # print(\"SPECIES\")\n # print(species)\n # print()\n\n # Pathways dict\n d_reactions = {}\n\n # Select Reactions already loaded (w/o Sink one then)\n for reaction in reactions:\n\n d_reactions[reactions[reaction]['smiles']] = {}\n\n # Fill the reactants in a dedicated dict\n d_reactants = {}\n for reactant in model.getReaction(reaction).getListOfReactants():#inchikey / inchi sinon miriam sinon IDs\n # Il faut enregistrer toutes les infos (inchi, miriam, ids)\n d_reactants[species[reactant.getSpecies()]['inchikey']] = reactant.getStoichiometry()\n # Put all reactants dicts in reactions dict for which smiles notations are the keys\n d_reactions[reactions[reaction]['smiles']]['Reactants'] = d_reactants\n\n # Fill the products in a dedicated dict\n d_products = {}\n for product in model.getReaction(reaction).getListOfProducts():\n d_products[species[product.getSpecies()]['inchikey']] = product.getStoichiometry()\n # Put all products dicts in reactions dict for which smiles notations are the keys\n d_reactions[reactions[reaction]['smiles']]['Products'] = d_products\n\n d_pathways[filename] = d_reactions\n\n if PRINT:\n PrintInfos2(reaction, d_reactions)\n\n unique_pathways = []\n unique_files = []\n\n for file,pathway in d_pathways.items():\n if pathway not in unique_pathways:\n unique_pathways += [pathway]\n unique_files += [file]\n\n # print(files)\n # print()\n print(unique_files)\n # print()\n # print(Diff(files,unique_files))\n\n\n return 0\n\n\ndef PrintInfos1(filename, level, version, idString, id, model):\n print(\"\\n\"\n + \"File: \" + filename\n + \" (Level \" + str(level) + \", version \" + str(version) + \")\" )\n\n print(\" \"\n + idString\n + id )\n\n if model.isSetSBOTerm():\n print(\" model sboTerm: \" + model.getSBOTerm() )\n\n print(\"functionDefinitions: \" + str(model.getNumFunctionDefinitions()) )\n print(\" unitDefinitions: \" + str(model.getNumUnitDefinitions()) )\n print(\" compartmentTypes: \" + str(model.getNumCompartmentTypes()) )\n print(\" specieTypes: \" + str(model.getNumSpeciesTypes()) )\n print(\" compartments: \" + str(model.getNumCompartments()) )\n print(\" species: \" + str(model.getNumSpecies()) )\n print(\" parameters: \" + str(model.getNumParameters()) )\n print(\" initialAssignments: \" + str(model.getNumInitialAssignments()) )\n print(\" rules: \" + str(model.getNumRules()) )\n print(\" constraints: \" + str(model.getNumConstraints()) )\n print(\" reactions: \" + str(model.getNumReactions()) )\n print(\" events: \" + str(model.getNumEvents()) )\n print(\"\\n\")\n\ndef PrintInfos2(reac_name, reaction):\n print('\\033[1m' + reac_name + '\\033[0m')\n print(reaction)\n print()\n\n\nif __name__ == '__main__':\n main(sys.argv)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"50000204","text":"import os\n\nfrom dogehouse import DogeClient\nfrom dogehouse.events import (\n HandRaisedEvent, ReadyEvent, RoomJoinEvent,\n MessageEvent, UserJoinEvent, UserLeaveEvent,\n)\n\ntoken = os.getenv(\"TOKEN\", '')\nrefresh_token = os.getenv(\"REFRESH_TOKEN\", '')\n\ndoge = DogeClient(token, refresh_token)\n\n\n@doge.on_ready\nasync def make_my_room(event: ReadyEvent) -> None:\n print(f\"Successfully connected as @{event.user.username}!\")\n await doge.create_room('Hello dogehouse.py!')\n\n\n@doge.on_room_join\nasync def joined_room(event: RoomJoinEvent) -> None:\n print(\"Joined room\", event.room.name)\n\n\n@doge.on_user_join\nasync def greet_user(event: UserJoinEvent) -> None:\n await doge.send_message(f\"Hello @{event.user.username}\")\n await doge.send_message(f\"Hi, I sent you a whisper!\", whisper_to=[event.user])\n\n\n@doge.on_user_leave\nasync def user_left(event: UserLeaveEvent) -> None:\n await doge.send_message(f\"Bye @{event.user.username}\")\n\n\n@doge.on_message\nasync def echo_message(event: MessageEvent) -> None:\n msg = event.message\n print(f'@{msg.author.username} sent {msg.content}')\n\n\n@doge.on_hand_raise\nasync def accept_speaker_request(event: HandRaisedEvent) -> None:\n await doge.add_speaker(event.user_id)\n await doge.send_message(f'Gave speaker permissions to: {event.user_id}')\n\n\n@doge.command\nasync def echo(event: MessageEvent) -> None:\n msg = event.message\n await doge.send_message(f'@{msg.author.username} said {msg.content}')\n\ndoge.run()\n","sub_path":"examples/basic_bot.py","file_name":"basic_bot.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"104161075","text":"import itertools\nimport sys\nimport time\nimport random\n\nfrom pyspark import SparkConf, SparkContext\nfrom pyspark.sql import SparkSession\n\n\ndef cmp(key1, key2):\n \"\"\"\n\n :param key1:\n :param key2:\n :return:\n \"\"\"\n return (key1, key2) if key1 < key2 else (key2, key1)\n\n\ndef export2File(result_array, file_path):\n \"\"\"\n export list content to a file\n :param result_array: a list of dict\n :param file_path: output file path\n :return: nothing, but a file\n \"\"\"\n with open(file_path, 'w+') as output_file:\n for id_array in result_array:\n output_file.writelines(str(id_array)[1:-1] + \"\\n\")\n output_file.close()\n\n\ndef update_dict(dict_obj, key, increment):\n \"\"\"\n update the value with the same key, rather than replace it\n :param dict_obj:\n :param key:\n :param increment:\n :return:\n \"\"\"\n old_weight = dict_obj[key]\n dict_obj[key] = float(old_weight + increment)\n return dict_obj\n\n\ndef extend_dict(dict_obj, increment_dict):\n \"\"\"\n same as list extend\n :param dict_obj:\n :param increment_dict:\n :return:\n \"\"\"\n for key, value in increment_dict.items():\n if key in dict_obj.keys():\n dict_obj = update_dict(dict_obj, key, value)\n else:\n dict_obj[key] = value\n return dict_obj\n\n\nclass GraphFrame(object):\n\n def __init__(self, vertexes, edges):\n \"\"\"\n\n :param vertexes: list of vertexes [1,2,3,4...]\n :param edges: a big dict(vertex: (list of vertex it connected)\n \"\"\"\n self.vertexes = vertexes\n self.vertex_weight_dict = dict()\n self.__init_weight_dict__()\n\n self.edges = edges\n self.__init_adjacent_matrix__(edges)\n\n # variable using for compute betweenness\n self.betweenness_result_dict = dict()\n self.betweenness_result_tuple_list = None\n\n # variable using for compute modularity\n self.best_communities = None\n\n def __init_weight_dict__(self):\n [self.vertex_weight_dict.setdefault(vertex, 1) for vertex in self.vertexes]\n\n def __init_adjacent_matrix__(self, edges):\n \"\"\"\n build a set which contain all edge pair\n :param edges: original edges (a big dict (vertex: [list of vertex it connected]))\n :return:\n \"\"\"\n self.original_edges = edges\n self.m = self._count_edges(edges)\n\n # build adjacent matrix for original edges\n edge_set = set()\n for start_node, end_nodes in edges.items():\n for end_node in end_nodes:\n edge_set.add(cmp(start_node, end_node))\n self.A_matrix = edge_set\n\n def _count_edges(self, edges):\n \"\"\"\n :param edges: a big dict(vertex: (list of vertex it connected)\n :return:\n \"\"\"\n visited = set()\n count = 0\n for start_node, end_nodes in edges.items():\n for end_node in end_nodes:\n key = cmp(start_node, end_node)\n if key not in visited:\n visited.add(key)\n count += 1\n return count\n\n def _build_tree(self, root):\n # root set in level 0 and no parent\n tree = dict()\n tree[root] = (0, list())\n\n # since BFS only visit each node once,\n # so use visited variable to save these records\n visited = set()\n\n need2visit = list()\n need2visit.append(root)\n\n while len(need2visit) > 0:\n parent_node = need2visit.pop(0)\n visited.add(parent_node)\n for children in self.edges[parent_node]:\n if children not in visited:\n visited.add(children)\n tree[children] = (tree[parent_node][0] + 1, [parent_node])\n need2visit.append(children)\n elif tree[parent_node][0] + 1 == tree[children][0]:\n tree[children][1].append(parent_node)\n\n return {k: v for k, v in sorted(tree.items(), key=lambda kv: -kv[1][0])}\n\n def _traverse_tree(self, tree_dict):\n \"\"\"\n traverse the tree and compute weight for each edge\n :param tree_dict: {'2GUjO7NU88cPXpoffYCU8w': (9, ['a48HhwcmjFLApZhiax41IA']), ...\n :return:\n \"\"\"\n weight_dict = self.vertex_weight_dict.copy()\n shortest_path_dict = self._find_num_of_paths(tree_dict)\n result_dict = dict()\n for key, value in tree_dict.items():\n if len(value[1]) > 0:\n denominator = sum([shortest_path_dict[parent] for parent in value[1]])\n for parent in value[1]:\n temp_key = cmp(key, parent)\n contribution = float(float(weight_dict[key]) * int(shortest_path_dict[parent]) / denominator)\n result_dict[temp_key] = contribution\n # update every parent node weight\n weight_dict = update_dict(weight_dict, parent, contribution)\n\n return result_dict\n\n def _find_num_of_paths(self, tree_dict):\n \"\"\"\n find how many the number of shortest path each node has\n :param tree_dict: {'2GUjO7NU88cPXpoffYCU8w': (9, ['a48HhwcmjFLApZhiax41IA']), ...\n :return: {'y6jsaAXFstAJkf53R4_y4Q': 1, '0FVcoJko1kfZCrJRfssfIA': 1, '2quguRdKBzul ...\n \"\"\"\n level_dict = dict()\n shortest_path_dict = dict()\n for child_node, level_parents in tree_dict.items():\n level_dict.setdefault(level_parents[0], []) \\\n .append((child_node, level_parents[1]))\n\n for level in range(0, len(level_dict.keys())):\n for (child_node, parent_node_list) in level_dict[level]:\n if len(parent_node_list) > 0:\n shortest_path_dict[child_node] = sum([shortest_path_dict[parent]\n for parent in parent_node_list])\n else:\n shortest_path_dict[child_node] = 1\n return shortest_path_dict\n\n def computeBetweenness(self):\n \"\"\"\n compute betweenness of each edge pair\n :return: list of tuple(pair, float)\n => e.g. [(('0FVcoJko1kfZCrJRfssfIA', 'bbK1mL-AyYCHZncDQ_4RgA'), 189.0), ...\n \"\"\"\n self.betweenness_result_dict = dict()\n for node in self.vertexes:\n # 1.The algorithm begins by performing a breadth-first search\n # (BFS) of the graph, starting at the vertex X in all vertexes list\n # =>{'2GUjO7NU88cPXpoffYCU8w': (9, ['a48HhwcmjFLApZhiax41IA']),\n # '6YmRpoIuiq8I19Q8dHKTHw': (9, ['a48Hh\n bfs_tree = self._build_tree(root=node)\n # 2. Label each node by the number of shortest\n # paths that reach it from the root node\n # actually, this step has been done in the first step,\n # since the len of value[1] is exactly the number of shortest path\n # 3. Calculate for each edge e, the sum over all nodes\n # Y (of the fraction) of the shortest paths from the root\n # X to Y that go through edge e\n temp_result_dict = self._traverse_tree(bfs_tree)\n\n self.betweenness_result_dict = extend_dict(self.betweenness_result_dict,\n temp_result_dict)\n\n # 4. Divide by 2 to get true betweenness\n self.betweenness_result_dict = \\\n dict(map(lambda kv: (kv[0], float(kv[1] / 2)),\n self.betweenness_result_dict.items()))\n\n self.betweenness_result_tuple_list = sorted(\n self.betweenness_result_dict.items(), key=lambda kv: (-kv[1], kv[0][0]))\n\n return self.betweenness_result_tuple_list\n\n def extractCommunities(self):\n \"\"\"\n extract communities from butch of edge pairs\n :return:\n \"\"\"\n max_modularity = float(\"-inf\")\n # reuse the betweenness dict\n if len(self.betweenness_result_tuple_list) > 0:\n # cut edges with highest betweenness\n self._cut_highest_btw_edge(self.betweenness_result_tuple_list)\n self.best_communities, max_modularity = self._computeModularity()\n # recompute and update self.betweenness_result_tuple_list\n self.betweenness_result_tuple_list = self.computeBetweenness()\n\n while True:\n # cut edges with highest betweenness\n self._cut_highest_btw_edge(self.betweenness_result_tuple_list)\n communities, current_modularity = self._computeModularity()\n self.betweenness_result_tuple_list = self.computeBetweenness()\n print(\"current_modularity -> \", current_modularity)\n if current_modularity < max_modularity:\n # break when elbow point shows\n break\n else:\n # when current_modularity > max_modularity happens:\n # we still need to cut the edges\n self.best_communities = communities\n max_modularity = current_modularity\n\n return sorted(self.best_communities, key=lambda item: (len(item), item))\n\n def _cut_highest_btw_edge(self, edge_btw_tuple_list):\n \"\"\"\n remove edges with highest betweenness and also update the self.edges\n :param edge_btw_tuple_list: need to be a [sorted] list, sorted by value\n :return:\n \"\"\"\n # this is the edge you need to cut\n temp_value = 0\n # if there have multiple pair have same highest bet score,\n # we cut them in one loop\n need2cut_edges_list = list()\n for item in edge_btw_tuple_list:\n if temp_value == 0: temp_value = item[1]\n if temp_value == item[1]: need2cut_edges_list.append(item[0])\n if temp_value > item[1]: break\n\n for edge_pair in need2cut_edges_list:\n if self.edges[edge_pair[0]] is not None:\n self.edges[edge_pair[0]].remove(edge_pair[1])\n\n if self.edges[edge_pair[1]] is not None:\n self.edges[edge_pair[1]].remove(edge_pair[0])\n\n def _computeModularity(self):\n \"\"\"\n compute the modularity based on communities we get\n :return: a list of communities and a float number => modularity\n \"\"\"\n\n # 1. detect communities from current edge_pairs\n communities = self._detectCommunities()\n\n # 2. compute modularity based on the communities\n # 2.1 count original graph's edge number => self.m\n # 2.2 build adjacent matrix => self.A_matrix\n temp_sum = 0\n for cluster in communities:\n for node_pair in itertools.combinations(list(cluster), 2):\n temp_key = cmp(node_pair[0], node_pair[1])\n k_i = len(self.edges[node_pair[0]])\n k_j = len(self.edges[node_pair[1]])\n A = 1 if temp_key in self.A_matrix else 0\n temp_sum += float(A - (k_i * k_j / (2 * self.m)))\n return communities, float(temp_sum / (2 * self.m))\n\n def _detectCommunities(self):\n \"\"\"\n detect communities based on self.edge\n basically, we randomly pick one root and find all connected node with root\n and then do the same thing on the rest of node\n :return: a list of set() which contain communities\n \"\"\"\n communities = list() # result will be return\n need2visit = list() # a stack actually\n temp_node_set = set() # using to save each communities\n visited = set() # track which node has been visited\n\n # random pick a root to detect communities\n random_root = self.vertexes[random.randint(0, len(self.vertexes) - 1)]\n temp_node_set.add(random_root)\n need2visit.append(random_root)\n # if still has some node we haven't visit, do the loop\n while len(visited) != len(self.vertexes):\n while len(need2visit) > 0:\n parent_node = need2visit.pop(0)\n temp_node_set.add(parent_node)\n visited.add(parent_node)\n for children in self.edges[parent_node]:\n if children not in visited:\n temp_node_set.add(children)\n need2visit.append(children)\n visited.add(children)\n\n communities.append(sorted(temp_node_set))\n temp_node_set = set()\n if len(self.vertexes) > len(visited):\n # pick one from rest of unvisited nodes\n need2visit.append(set(self.vertexes).difference(visited).pop())\n\n return communities\n\n\nif __name__ == '__main__':\n start = time.time()\n # define input variables\n filter_threshold = \"7\"\n input_csv_path = \"data/ub_sample_data.csv\"\n betweenness_file_path = \"out/task2_bet3.txt\"\n community_file_path = \"out/task2_com3.txt\"\n\n # filter_threshold = sys.argv[1]\n # input_csv_path = sys.argv[2]\n # betweenness_file_path = sys.argv[3]\n # community_file_path = sys.argv[4]\n\n conf = SparkConf().setMaster(\"local\") \\\n .setAppName(\"ay_hw_4_task2\") \\\n .set(\"spark.executor.memory\", \"4g\") \\\n .set(\"spark.driver.memory\", \"4g\")\n sc = SparkContext(conf=conf)\n sparkSession = SparkSession(sc)\n sc.setLogLevel(\"WARN\")\n\n # read the original json file and remove the header\n raw_data_rdd = sc.textFile(input_csv_path)\n header = raw_data_rdd.first()\n uid_bidxes_dict = raw_data_rdd.filter(lambda line: line != header) \\\n .map(lambda line: (line.split(',')[0], line.split(',')[1])) \\\n .groupByKey().mapValues(lambda bids: sorted(list(bids))) \\\n .collectAsMap()\n\n uid_pairs = list(itertools.combinations(list(uid_bidxes_dict.keys()), 2))\n\n edge_list = list()\n vertex_set = set()\n for pair in uid_pairs:\n if len(set(uid_bidxes_dict[pair[0]]).intersection(\n set(uid_bidxes_dict[pair[1]]))) >= int(filter_threshold):\n edge_list.append(tuple(pair))\n edge_list.append(tuple((pair[1], pair[0])))\n vertex_set.add(pair[0])\n vertex_set.add(pair[1])\n\n # => ['B7IvZ26ZUdL2jGbYsFVGxQ', 'jnn504CkjtfbYIwBquWmBw', 'sBqCpEUn0qYdpSF4Db\n vertexes = sc.parallelize(sorted(list(vertex_set))).collect()\n\n # => {'39FT2Ui8KUXwmUt6hnwy-g': ['0FVcoJko1kfZCrJRfssfIA', '1KQi8Ym\n edges = sc.parallelize(edge_list).groupByKey() \\\n .mapValues(lambda uidxs: sorted(list(set(uidxs)))).collectAsMap()\n\n graph_frame = GraphFrame(vertexes, edges)\n betweenness_result = graph_frame.computeBetweenness()\n # export your finding\n export2File(betweenness_result, betweenness_file_path)\n\n communities_result = graph_frame.extractCommunities()\n # export your finding\n export2File(communities_result, community_file_path)\n\n print(\"Duration: %d s.\" % (time.time() - start))\n","sub_path":"task2_3.py","file_name":"task2_3.py","file_ext":"py","file_size_in_byte":14812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"106648690","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Signal handling for core app.\"\"\"\n\nimport logging\nfrom urllib.parse import urlparse\n\nfrom corsheaders import signals\nfrom django.conf import settings\nfrom django.db.models import Count, Q\nfrom django.db.models.signals import pre_delete\nfrom django.dispatch import Signal, receiver\nfrom rest_framework.permissions import SAFE_METHODS\n\nfrom readthedocs.oauth.models import RemoteOrganization\nfrom readthedocs.projects.models import Domain, Project\n\n\nlog = logging.getLogger(__name__)\n\nWHITELIST_URLS = [\n '/api/v2/footer_html',\n '/api/v2/search',\n '/api/v2/docsearch',\n '/api/v2/sustainability',\n]\n\nwebhook_github = Signal(providing_args=['project', 'data', 'event'])\nwebhook_gitlab = Signal(providing_args=['project', 'data', 'event'])\nwebhook_bitbucket = Signal(providing_args=['project', 'data', 'event'])\n\n\ndef decide_if_cors(sender, request, **kwargs): # pylint: disable=unused-argument\n \"\"\"\n Decide whether a request should be given CORS access.\n\n This checks that:\n * The URL is whitelisted against our CORS-allowed domains\n * The Domain exists in our database, and belongs to the project being queried.\n\n Returns True when a request should be given CORS access.\n \"\"\"\n if 'HTTP_ORIGIN' not in request.META:\n return False\n host = urlparse(request.META['HTTP_ORIGIN']).netloc.split(':')[0]\n\n # Don't do domain checking for this API for now\n if request.path_info.startswith('/api/v2/sustainability'):\n return True\n\n # Don't do domain checking for APIv2 when the Domain is known\n if request.path_info.startswith('/api/v2/') and request.method in SAFE_METHODS:\n domain = Domain.objects.filter(domain__icontains=host)\n if domain.exists():\n return True\n\n valid_url = False\n for url in WHITELIST_URLS:\n if request.path_info.startswith(url):\n valid_url = True\n break\n\n if valid_url:\n project_slug = request.GET.get('project', None)\n try:\n project = Project.objects.get(slug=project_slug)\n except Project.DoesNotExist:\n log.warning(\n 'Invalid project passed to domain. [%s:%s]',\n project_slug,\n host,\n )\n return False\n\n domain = Domain.objects.filter(\n Q(domain__icontains=host),\n Q(project=project) | Q(project__subprojects__child=project),\n )\n if domain.exists():\n return True\n\n return False\n\n\n@receiver(pre_delete, sender=settings.AUTH_USER_MODEL)\ndef delete_projects_and_organizations(sender, instance, *args, **kwargs):\n # Here we count the owner list from the projects that the user own\n # Then exclude the projects where there are more than one owner\n # Add annotate before filter\n # https://github.com/rtfd/readthedocs.org/pull/4577\n # https://docs.djangoproject.com/en/2.1/topics/db/aggregation/#order-of-annotate-and-filter-clauses # noqa\n projects = (\n Project.objects.annotate(num_users=Count('users')\n ).filter(users=instance.id\n ).exclude(num_users__gt=1)\n )\n\n # Here we count the users list from the organization that the user belong\n # Then exclude the organizations where there are more than one user\n oauth_organizations = (\n RemoteOrganization.objects.annotate(num_users=Count('users')\n ).filter(users=instance.id\n ).exclude(num_users__gt=1)\n )\n\n projects.delete()\n oauth_organizations.delete()\n\n\nsignals.check_request_enabled.connect(decide_if_cors)\n","sub_path":"readthedocs/core/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":3709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"639072789","text":"#import RPi.GPIO as GPIO\nimport time\nimport threading\nimport os\n\nclass WaterOperation(threading.Thread):\n def __init__(self):\n self.motor = 0\n self.OPEN_TERM = 0.2\n self.STOP_TERM = 3\n self.CLOSE_TERM = 0.2\n self.WATER_DONE = \"WATER_DONE\"\n threading.Thread.__init__(self)\n self.waterEvent = threading.Event()\n self.sem = threading.Semaphore(1)\n\n def run(self):\n #self.p.start(0)\n while not self.waterEvent.isSet():\n try:\n self.waterEvent.wait()\n print(\"Water 시작!\")\n #self.motor.ChangeDutyCycle(8)\n time.sleep(self.OPEN_TERM)\n #self.motor.ChangeDutyCycle(6.8)\n time.sleep(self.STOP_TERM)\n #self.motor.ChangeDutyCycle(5)\n time.sleep(self.CLOSE_TERM)\n #self.motor.stop()\n print(\"Water Complete\")\n\n if os.path.exists(\"fifo\"):\n fifo = open(\"fifo\", \"w\")\n print(\"send message to fifo.\")\n fifo.write(self.WATER_DONE)\n\n self.waterEvent.clear()\n\n except KeyboardInterrupt:\n print(\"Water operation interrupt\")\n self.waterEvent.clear()\n #self.motor.stop()\n\n def setPin(self, p):\n self.motor = p\n\n def isWaterOnUnlock(self):\n return self.waterEvent.isSet()\n\n def setWaterOperation(self):\n self.waterEvent.set()","sub_path":"main/RaspberryPi/WaterOperation.py","file_name":"WaterOperation.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"646295983","text":"import pymysql\nimport random\nimport time\nimport datetime\nimport RPi.GPIO as gpio\nimport re\n\n\nprint(\"Initiating Climate Monitoring\")\n\ndef start():\n try:\n while True:\n temp, humidity = readprobe()\n date = datetime.datetime.now()\n location = 'Lakehouse'\n query = \"INSERT INTO temperature (datetime,location,sensor,temp,humidity) VALUES((SELECT NOW()),'\"+location+\"','RasPiAlpha',\"+str(temp)+\",\"+str(humidity)+\");\"\n conn = pymysql.connect(\n host='68.178.143.147',\n port=3306,\n user='midakbatemp',\n passwd='R!gretem030',\n database='midakbatemp'\n )\n cur = conn.cursor()\n cur.execute(query)\n cur.close()\n conn.close()\n time.sleep(600)\n except KeyboardInterrupt:\n print(\"Ended.\")\n\ndef readprobe():\n temp = []\n humidity = []\n for i in range(10):\n data,datalen = testdata()\n temp += [float(data[2])*9/5+32]\n humidity += [float(data[0])]\n mtemp = median(temp)\n mhumidity = median(humidity)\n return(mtemp,mhumidity)\n\ndef median(lyst):\n lyst = sorted(lyst)\n if len(lyst) < 1:\n return None\n print(len(lyst))\n if len(lyst) %2 == 1:\n return lyst[int(((len(lyst)+1)/2)-1)]\n else:\n return float(sum(lyst[int((len(lyst)/2)-1):int((len(lyst)/2)+1)]))/2.0\n\ndef testdata():\n data,datalen = probereading()\n if datalen == 5:\n if int(data[0])+int(data[1])+int(data[2])+int(data[3]) == int(data[4]):\n return(data,datalen)\n else:\n data,datlen = testdata()\n else:\n data,datalen = testdata()\n return(data,datalen)\n\ndef probereading():\n data = \"\"\n pinno = 4\n\n gpio.setmode(gpio.BCM)\n gpio.setup(pinno,gpio.OUT)\n gpio.output(pinno,gpio.HIGH)\n time.sleep(0.02)\n gpio.output(pinno,gpio.LOW)\n time.sleep(0.025)\n gpio.setup(pinno, gpio.IN, pull_up_down=gpio.PUD_UP)\n\n for i in range(0,1000):\n data += str(gpio.input(pinno))\n codeddata,bitdata = decode(data)\n return(codeddata,len(codeddata))\n\ndef decode(data):\n newdata = []\n item = ''\n trip = 1\n for i in data:\n if i == '0':\n if trip == 0:\n newdata += [item]\n trip = 1\n item = ''\n else:\n trip = 0\n item += '1'\n newdata += [item]\n cnewdata = ''\n for i in newdata:\n if len(i) > 2:\n cnewdata += '1'\n else:\n cnewdata += '0'\n cnewdata = cnewdata.lstrip(\"1\")\n decdata = []\n for i in range(int(len(cnewdata)/8)):\n num = bin2dec(cnewdata[i*8:(i+1)*8])\n decdata += [num]\n return(decdata,cnewdata)\n\ndef bin2dec(string_num):\n return str(int(string_num, 2))\n\nstart()\n","sub_path":"ClimateMonitor.py","file_name":"ClimateMonitor.py","file_ext":"py","file_size_in_byte":2832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"164135931","text":"\"\"\"\nIntensityID widget\n\"\"\"\n\n# Standard library modules.\n\n# Third party modules.\nfrom qtpy.QtWidgets import QComboBox\n\n# Local modules.\nfrom pyhmsa_gui.spec.condition.condition import _ConditionWidget\n\nfrom pyhmsa_measurement.spec.condition.intensityid import \\\n IntensityID, _INTENSITY_TYPES, _INTENSITY_MEASURES\n\n\n# Globals and constants variables.\n\nclass IntensityIDWidget(_ConditionWidget):\n def __init__(self, parent=None):\n _ConditionWidget.__init__(self, IntensityID, parent)\n\n def _init_ui(self):\n print(\"TEST HUHU\")\n # Controls\n self._cb_type = QComboBox()\n self._cb_type.addItems([None] + list(_INTENSITY_TYPES))\n self._cb_measure = QComboBox()\n self._cb_measure.addItems([None] + list(_INTENSITY_MEASURES))\n\n # Layouts\n layout = _ConditionWidget._init_ui(self)\n layout.addRow(\"Type\", self._cb_type)\n layout.addRow(\"Measure\", self._cb_measure)\n\n # Signals\n self._cb_type.currentIndexChanged.connect(self.edited)\n self._cb_measure.currentIndexChanged.connect(self.edited)\n\n return layout\n\n def _create_parameter(self):\n return self.CLASS(None, None)\n\n def parameter(self, parameter=None):\n parameter = _ConditionWidget.parameter(self, parameter)\n parameter.type = self._cb_type\n parameter.measure = self._cb_measure\n return parameter\n\n def setParameter(self, condition):\n _ConditionWidget.setParameter(self, condition)\n self._cb_type.setCurrentIndex(self._cb_type.findText(condition.type))\n self._cb_measure.setCurrentIndex(self._cb_measure.findText(condition.measure))\n\n def setReadOnly(self, state):\n _ConditionWidget.setReadOnly(self, state)\n self._cb_type.setEnabled(not state)\n self._cb_measure.setEnabled(not state)\n\n def isReadOnly(self):\n return _ConditionWidget.isReadOnly(self) and \\\n not self._cb_type.isEnabled() and \\\n not self._cb_measure.isEnabled()\n\n def hasAcceptableInput(self):\n return _ConditionWidget.hasAcceptableInput(self)\n","sub_path":"pyhmsa_gui/spec/condition/intensityid.py","file_name":"intensityid.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"106820772","text":"#! /usr/bin/env python2\n#Attention: Additionally bedtools for Linux has to be installed on the system (sudo apt-get install bedtools)\n\nimport mysql.connector\nimport pysam\nimport pybedtools\n\n__author__ = 'Priska Lang'\n\n\nclass Assignment1:\n \"\"\"\n Provides code for working with the bam file AHG00096.chrom11.ILLUMINA.bwa.GBR.low_coverage.20120522.bam\n and the gene USH1C.\n\n This class parses the two files and calculates the following properties using the mysql.connector,\n pysam and pybedtools module:\n - sam header\n - properly paired reads of gene\n - gene reads with indels\n - calculate average coverage\n - number of mapped reads\n - gene symbols\n - regions of gene\n - numbers of exons\n For more information see: http://pysam.readthedocs.io/en/latest/api.html, https://daler.github.io/pybedtools/\n and http://bedtools.readthedocs.io/en/latest/content/tools/genomecov.html.\n This code fetches the gene coordinates and provides methods for getting the listed properties above and\n \"print_summary\" for printing the results.\n \"\"\"\n def __init__(self):\n \"\"\"\n The constructor method creates a pysam and a bedtool object from the bam file, fetches the gene USH1C and\n stores it to a dict. Furthermore it stores just the first gene from the crated dict, a list of the properly\n paired reads and the reads with indels. Additionally it counts how often they occur in the bam file. Also\n the mapped reads of the bam file are counted.\n \"\"\"\n ## Your gene of interest\n self.gene = \"USH1C\"\n # Read a file in BAM format:\n infileName = \"/home/brisi/PycharmProjects/medizinische_genomanalysen_2017_assignment_1/HG00096.chrom11.ILLUMINA.bwa.GBR.low_coverage.20120522.bam\"\n # Create pysam file\n if infileName.endswith(\".bam\"):\n self.samfile = pysam.AlignmentFile(infileName, \"rb\")\n # Create pybedtools file\n self.bedtoolfile = pybedtools.BedTool(infileName)\n # Create myGene and fill it with all occurrences of my gene of interest\n self.myGenes = {\"name2\":[],\"name\":[],\"chrom\":[],\"start\":[],\"end\":[],\"exon\":[]}\n self.fetch_gene_coordinates(\"hg19\", \"USH1C.TXT\")\n # Select the first gene from all occurrences\n self.myGene = {\"name2\":[],\"name\":[],\"chrom\":[],\"start\":[],\"end\":[],\"exon\":[]}\n for key in sorted(self.myGenes.keys()):\n self.myGene[key] = self.myGenes[key][0]\n self.properlyPairedReads = [] # declaration of list for properly paired reads\n self.cProperlyPairedReads = 0 # counter for counting properly paired reads\n self.cMappedReads = 0 # counter for counting mapped reads\n self.geneReadsWithIndels = [] # declaration list of gene reads with indels\n self.cGeneReadsWithIndels = 0 # counter for counting the gene reads with indels\n # determining relevant data in pysam file\n for read in self.samfile.fetch(self.myGene[\"chrom\"].strip(\"chr\"), self.myGene[\"start\"], self.myGene[\"end\"]):\n # determining the porperly paired reads\n if read.is_proper_pair:\n self.properlyPairedReads.append(read)\n self.cProperlyPairedReads += 1\n # counting the mapped reads\n if not read.is_unmapped:\n self.cMappedReads += 1\n # determining the reads with indels\n if not read.cigartuples is None and (read.cigartuples[0][0] == 1 or read.cigartuples[0][0] == 2): # insertion = 1, deletion = 2\n self.geneReadsWithIndels.append(read)\n self.cGeneReadsWithIndels += 1\n\n\n def fetch_gene_coordinates(self, genome_reference, file_name):\n \"\"\"\n Fetches the gene coordinates of genome_reference by using the mysql.connector and saves it as file_name.\n Furthermore it fills the empty dict, created within the constructor method.\n\n :param genome_reference: the name of the reference genome\n :param file_name: the name of the output file\n \"\"\"\n print(\"Connecting to UCSC to fetch data\")\n\n ## Open connection\n cnx = mysql.connector.connect(host='genome-mysql.cse.ucsc.edu', user='genomep', passwd='password',\n db=genome_reference)\n\n ## Get cursor\n cursor = cnx.cursor()\n\n ## Build query fields\n query_fields = [\"refGene.name2\",\n \"refGene.name\",\n \"refGene.chrom\",\n \"refGene.txStart\",\n \"refGene.txEnd\",\n \"refGene.strand\",\n \"refGene.exonCount\",\n \"refGene.exonStarts\",\n \"refGene.exonEnds\"]\n\n ## Build query\n query = \"SELECT DISTINCT %s from refGene\" % \",\".join(query_fields)\n\n ## Execute query\n cursor.execute(query)\n\n ## Write my gene of interest to file and create it's self.objects\n with open(file_name, \"w\") as fh:\n for row in cursor:\n if row[0] == self.gene:\n fh.write(str(row) + \"\\n\")\n self.myGenes[\"name2\"].append(row[0])\n self.myGenes[\"name\"].append(row[1])\n self.myGenes[\"chrom\"].append(row[2])\n self.myGenes[\"start\"].append(row[3])\n self.myGenes[\"end\"].append(row[4])\n self.myGenes[\"exon\"].append(row[6])\n\n ## Close cursor & connection\n cursor.close()\n cnx.close()\n\n print(\"Done fetching data\")\n\n def get_sam_header(self):\n \"\"\"\n Prints the header of the bam file.\n \"\"\"\n print(\"\\nheader:\")\n print(self.samfile.header[\"RG\"])\n print(self.samfile.header[\"CO\"])\n print(self.samfile.header[\"HD\"])\n # remove comments for printing the whole header object\n #for key in self.samfile.header:\n # print key\n # print self.samfile.header[key]\n #todo: embellish print format of sam header\n\n def get_properly_paired_reads_of_gene(self):\n \"\"\"\n Returns the properly pared reads of pysam file self.samfile and prints them if you remove the comment signs\n in lines 149-150: #for line in self.properlyPairedReads: and #print line. Additionally it prints the number\n of properly paired reads.\n\n :return: a list of the properly paired reads of the gene\n \"\"\"\n # remove comment signs for output on screen\n #for line in self.properlyPairedReads:\n # print line\n print(\"\\n{} Properly paired reads found.\".format(self.cProperlyPairedReads))\n return(self.properlyPairedReads)\n\n def get_gene_reads_with_indels(self):\n \"\"\"\n Returns the gene reads with indels from pysam file self.samfile and prints them if you remove the comment\n signs in lines 163-164: #for read in self.geneReadsWithIndels: and #print read. Additionally it prints the\n number of gene reads with indels.\n\n :return: a list of the gene reads with indels\n \"\"\"\n # remove comment signs for output on screen\n #for read in self.geneReadsWithIndels:\n # print read\n print(\"\\n{} gene reads with indels found.\".format(self.cGeneReadsWithIndels))\n return(self.geneReadsWithIndels)\n\n def calculate_average_coverage(self,coverageType):\n \"\"\"\n Calculates the total average coverage of the bedtool file self.bedtoolfile using the genome_coverage method\n from pybedtools. Afterwards it determines the total average coverage and the gene average coverage and\n prints them if coverageType is \"both\". If coverageType is \"gene\", it just prints the genes coverage and\n returns it. Otherwise it prints the total coverage and returns it.\n\n :param coverageType: the type of coverage that should be printed/returned. Possible values are \"both\",\n \"total\" and \"gene\"\n\n :return the coverage value if param coverageType is not set to \"both\"\n \"\"\"\n self.coverageValues = self.bedtoolfile.genome_coverage(bg=True)\n cbam = 0 # for counting the lines with coverage values in self.bedtoolfile\n cGene = 0 # for counting the lines with coverage values for my gene\n sumValuesBam = 0 # for summing up the coverage values of the self.bedtoolfile\n sumValuesGene = 0 # for summing up the coverage values for my gene\n for line in self.coverageValues:\n cbam += 1\n sumValuesBam += int(line[3])\n # check if the read is within my gene of interest\n if (self.myGene[\"start\"] <= int(line[1])) and (self.myGene[\"end\"] >= int(line[2])):\n sumValuesGene += int(line[3])\n cGene += 1\n if coverageType == \"both\":\n print(\"\\ntotal average coverage:\")\n print(float(sumValuesBam) / float(cbam))\n print(\"\\ngene average coverage:\")\n print(sumValuesGene / float(cGene))\n else:\n if coverageType == \"gene\":\n print(\"\\ngene average coverage:\")\n print(sumValuesGene / float(cGene))\n return(sumValuesGene / float(cGene))\n else:\n print(\"\\ntotal average coverage:\")\n print(float(sumValuesBam) / float(cbam))\n return(float(sumValuesBam) / float(cbam))\n\n def get_number_mapped_reads(self):\n \"\"\"\n Prints the number of mapped reads found in the pysam file self.samfile.\n \"\"\"\n print(\"\\nnumber of mapped reads: \")\n print(self.cMappedReads)\n\n def get_gene_symbol(self):\n \"\"\"\n Prints the gene symbol of the first gene in self.myGenes.\n \"\"\"\n print(\"\\ngene symbol:\")\n print(self.myGenes[\"name2\"][0])\n\n def get_region_of_gene(self):\n \"\"\"\n Prints the region of gene self.myGene\n\n :return: a string of gene name, start and end per row, separated with tab\n \"\"\"\n print(\"\\nregion of gene \" + self.myGene[\"name2\"] + \": \" + \"start: \" + str(self.myGene[\"start\"]) + \" end: \" + \\\n str(self.myGene[\"end\"]))\n return(self.myGene[\"name2\"] + \" \" + str(self.myGenes[\"start\"]) + \" \" + str(self.myGenes[\"end\"]) + \" \\n\")\n\n def get_number_of_exons(self):\n \"\"\"\n Prints the list of number of exons of the gene in self.myGene.\n \"\"\"\n print(\"\\nnumber of exons:\")\n print(self.myGene[\"exon\"])\n\n def print_summary(self):\n \"\"\"\n print_summary calls all methods above except the constructor method and fetch_gene_coordinates().\n\n :Example:\n\n For the given input files and not activated printing in self.get_properly_paired_reads_of_gene(),\n self.get_gene_reads_with_indels() and self.get_region_of_gene() the following is printed on the\n console:\n\n :Example:\n Assignment 1\n Connecting to UCSC to fetch data\n Done fetching data\n\n header:\n [{'LB': '2845856850', 'CN': 'WUGSC', 'DS': 'SRP001294', 'SM': 'HG00096', 'PI': '206', 'ID': 'SRR062634', 'PL': 'ILLUMINA'}, {'LB': '2845856850', 'CN': 'WUGSC', 'DS': 'SRP001294', 'SM': 'HG00096', 'PI': '206', 'ID': 'SRR062635', 'PL': 'ILLUMINA'}, {'LB': '2845856850', 'CN': 'WUGSC', 'DS': 'SRP001294', 'SM': 'HG00096', 'PI': '206', 'ID': 'SRR062641', 'PL': 'ILLUMINA'}]\n ['$known_indels_file(s) = ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/technical/reference/phase2_mapping_resources/ALL.wgs.indels_mills_devine_hg19_leftAligned_collapsed_double_hit.indels.sites.vcf.gz', '$known_indels_file(s) .= ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/technical/reference/phase2_mapping_resources/ALL.wgs.low_coverage_vqsr.20101123.indels.sites.vcf.gz', '$known_sites_file(s) = ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/technical/reference/phase2_mapping_resources/ALL.wgs.dbsnp.build135.snps.sites.vcf.gz']\n {'SO': 'coordinate', 'VN': '1.0'}\n\n 2351 Properly paired reads found.\n\n 26 gene reads with indels found.\n\n total average coverage:\n 5.60829419857\n\n gene average coverage:\n 5.29590887959\n\n number of mapped reads:\n 2382\n\n gene symbol:\n USH1C\n\n region of gene USH1C: start: 17515441 end: 17565963\n\n number of exons:\n 21\n \"\"\"\n self.get_sam_header()\n self.get_properly_paired_reads_of_gene()\n self.get_gene_reads_with_indels()\n self.calculate_average_coverage(\"both\")\n self.get_number_mapped_reads()\n self.get_gene_symbol()\n self.get_region_of_gene()\n self.get_number_of_exons()\n\n# cue:\nif __name__ == '__main__':\n print(\"Assignment 1\")\n assignment1 = Assignment1()\n assignment1.print_summary()","sub_path":"assignment1.py","file_name":"assignment1.py","file_ext":"py","file_size_in_byte":13378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"554167306","text":"#!/usr/bin/env python3\n\n\"\"\"快速入门画图神器 Matplotlib\"\"\"\n\nimport logging\n# 导入相关模块\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nlogging.basicConfig(level=logging.INFO)\n\n# 首先通过 np.linspace 方式生成 x,它包含了 50 个元素的数组,这 50 个元素均匀的分布在 [0, 2pi] 的区间上。然后通过 np.sin(x) 生成 y。\nx=np.linspace(0,2*np.pi,50)\ny=np.sin(x)\n\n# 画一个简单的图形\ndef draw1():\n # 设置 figure\n # 你可以认为Matplotlib绘制的图形都在一个默认的 figure 中,当然了,你可以自己创建 figure,好处就是可以控制更多的参数,常见的就是控制图形的大小,这里创建一个 figure,设置大小为 (6, 3)。\n plt.figure(figsize=(6,3))\n\n # 有了 x 和 y 数据之后,我们通过 plt.plot(x, y) 来画出图形,并通过 plt.show() 来显示。\n # 绘制出图形之后,我们可以自己调整更多的样式,比如颜色、点、线\n # 比如 'y*-' ,其中 y 表示黄色,* 表示 星标的点,- 表示实线。\n plt.plot(x,y,'y*-',label='sin(x)')\n\n # 有时候,可能需要在一个图纸里绘制多个图形,这里我们同时绘制了 (x, y), (x, y * 2)两个图形。\n plt.plot(x,y*2,'m--',label='2sin(x)') \n # 蓝色b绿色g红色r青色c品红m黄色y黑色k白色w\n # 点.像素,圆o方形s三角形^\n # 直线-虚线--点线:点划线-.\n\n # 设置legend(配合label才有显示)\n plt.legend(loc='best')\n \n # 设置标题\n plt.title(\"sin(x) & 2sin(x)\")\n \n \n\n# 设置坐标轴\ndef draw2():\n draw1()\n # 通过 xlim 和 ylim 来设限定轴的范围,通过 xlabel 和 ylabel 来设置轴的名称。\n plt.xlim(0,np.pi+1)\n plt.ylim(-3,3)\n plt.xlabel('X')\n plt.ylabel('Y')\n\n# 可以通过 xticks 和 yticks 来设置轴的刻度\ndef draw3():\n draw1()\n plt.xticks((0,np.pi*0.5,np.pi,np.pi*1.5,np.pi*2))\n plt.yticks((-2.0,-1.5,-1.0,-0.5,0,0.5,1.0,1.5,2))\n\n# 添加注释\ndef draw4():\n plt.plot(x,y)\n x0=np.pi\n y0=0\n # 画出标注点\n plt.scatter(x0,y0,s=50)\n # 'sin(np.pi)=%s' % y0 代表标注的内容,可以通过字符串 %s 将 y0 的值传入字符串;\n # 参数 xycoords='data' 是说基于数据的值来选位置;\n # xytext=(+30, -30) 和 textcoords='offset points' 表示对于标注位置的描述 和 xy 偏差值,即标注位置是 xy 位置向右移动 30,向下移动30;\n # arrowprops 是对图中箭头类型和箭头弧度的设置,需要用 dict 形式传入。\n plt.annotate('sin(np.ni)=%s'%y0,xy=(np.pi,0),xycoords='data',xytext=(+30,-30),textcoords='offset points',fontsize=16,arrowprops=dict(arrowstyle='->',connectionstyle=\"arc3,rad=.2\"))\n # 也可以使用 plt.text 函数来添加注释\n # plt.text(0.5, -0.25, \"sin(np.pi) = 0\", fontdict={'size': 16, 'color': 'r'})\n\n# 使用子图\ndef draw5():\n # 有时候我们需要将多张子图展示在一起,可以使用 subplot() 实现。即在调用 plot() 函数之前需要先调用 subplot() 函数。\n ax1=plt.subplot(2,2,1) # (行,列,活跃区)\n plt.plot(x,np.sin(x),'r')\n plt.subplot(2,2,2,sharey=ax1) # 与 ax1 共享y轴\n plt.plot(x,2*np.sin(x),'g')\n ax2=plt.subplot(2,2,3)\n plt.plot(x,np.cos(x),'b')\n plt.subplot(2,2,4,sharey=ax2)\n plt.plot(x,2*np.cos(x),'y')\n # 上面的 subplot(2, 2, x) 表示将图像窗口分为 2 行 2 列。x 表示当前子图所在的活跃区。\n\n# 有时候我们需要不同大小的子图\ndef draw6():\n plt.subplot(2,1,1) # (行,列,活跃区)\n plt.plot(x,np.sin(x),'r')\n # 解释下为什么活跃区为 4,因为上一步中使用 plt.subplot(2, 1, 1) 将整个图像窗口分为 2 行 1 列, 第1个小图占用了第1个位置, 也就是整个第1行. 这一步中使用 plt.subplot(2, 3, 4) 将整个图像窗口分为 2 行 3 列, 于是整个图像窗口的第1行就变成了3列, 也就是成了3个位置, 于是第2行的第1个位置是整个图像窗口的第4个位置。\n ax1=plt.subplot(2,3,4)\n plt.plot(x,2*np.sin(x),'g')\n plt.subplot(2,3,5,sharey=ax1)\n plt.plot(x,np.cos(x),'b')\n plt.subplot(2,3,6,sharey=ax1)\n plt.plot(x,2*np.cos(x),'y')\n\n# 散点图\ndef colorbar():\n k=500\n x=np.random.rand(k)\n y=np.random.rand(k)\n logging.info(x[:10])\n # INFO:root:[0.39439996 0.6414791 0.71569981 0.30267446 0.15495104 0.38977327 0.41445457 0.97683185 0.19737104 0.62186213]\n logging.info(y[:10])\n # INFO:root:[0.13488103 0.13076497 0.49393922 0.94226616 0.81893848 0.94828212 0.736714 0.75390657 0.74986627 0.61326751]\n size=np.random.rand(k)*50 # 生成每个点的大小\n colour=np.arctan2(x,y) # 生成每个点的颜色大小\n plt.scatter(x,y,s=size,c=colour)\n plt.colorbar() # 添加颜色栏\n # 上面我们首先生成了要绘制的数据的点x 和 y,接下来为每个数据点生成控制大小的数组 size,然后未每个数据点生成控制颜色的数组 colour。最后通过 colorbar() 来增加一个颜色栏。\n\n# 柱状图\ndef bar():\n k=10\n x=np.arange(k)\n logging.info(x)\n # INFO:root:[0 1 2 3 4 5 6 7 8 9]\n y=np.random.rand(k)\n logging.info(y)\n # INFO:root:[0.88245081 0.03883995 0.94164021 0.02237071 0.80930982 0.93510309 0.03257937 0.01406703 0.30612616 0.02356784]\n plt.bar(x,y) # 画出 x 和 y 的柱状图\n # 增加数值\n for x,y in zip(x,y):\n plt.text(x,y,'%0.2f'%y,ha='center',va='bottom')\n # 生成数据 x 和 y 之后,调用 plt.bar 函数绘制出柱状图,然后通过 plt.text 标注数值,设置参数 ha='center' 横向居中对齐,设置 va='bottom'纵向底部(顶部)对齐。\n\n# 中文乱码解决(曲线图示侠)\ndef chinese():\n # Matplotlib 有个让人恼火的问题是,默认情况下,Matplotlib 中文会乱码。\n x = ['北京', '上海', '深圳', '广州']\n y = [60000, 58000, 50000, 52000]\n # 其实只需要配置下后台字体即可\n plt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签\n plt.rcParams['axes.unicode_minus']=False #用来正常显示负号\n plt.plot(x, y)\n\nif __name__=='__main__':\n # 通过dict实现switch\n sample={1:draw1,2:draw2,3:draw3,4:draw4,5:draw5,6:draw6}\n shape={1:colorbar,2:bar,3:chinese}\n\n # 修改get值调用不同方法\n # sample.get(6)()\n shape.get(3)()\n\n plt.show()\n","sub_path":"python-sample/vendor/matplotlib/guide.py","file_name":"guide.py","file_ext":"py","file_size_in_byte":6392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"464068500","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport cv2\nimport io\nimport os\nfrom google.cloud import vision\nfrom google.cloud.vision import types\nimport base64\nimport json\nfrom imageio import imread\n\ndef parse_crop_instructions(data):\n '''\n * input:\n python dict of following format\n {\n \"imageData\": \"base64\",\n \"x\": \"0\",\n \"y\": \"0\",\n \"width\": \"32\", \n \"height\": \"100\",\n \"scaleX\": \"0\",\n \"scaleY\": \"0\",\n \"rotate\": \"90\"\n }\n * output: \n cropped image\n '''\n\n image_raw = base64.decodestring(bytes(data['imageData'], 'utf-8'))\n image = imread(image_raw)\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n cv2.imwrite(\"scratch_encoder.jpg\", image)\n # https://stackoverflow.com/questions/12511408/accepting-json-image-file-in-python\n\n crop_x = int(data[\"x\"])\n crop_y = int(data[\"y\"])\n crop_w = int(data[\"width\"])\n crop_h = int(data[\"height\"])\n \n if crop_w == 0 or crop_h == 0:\n return image\n \n else:\n ocv_box = [crop_x, crop_y, crop_w, crop_h]\n print(ocv_box)\n cropped_image = crop_image(image, ocv_box)\n\n return cropped_image\n\nimport time\nimport urllib\nfrom urllib.parse import urlparse\nimport email.utils as eut\nimport json\nimport socket\nfrom geolite2 import geolite2\nfrom newspaper import Article\n\ndef return_rms_payload(image, lang, debug=True):\n file_name = \"/tmp/\"+str(time.time())+\".jpg\"\n cv2.imwrite(file_name, image)\n \n print(\"text...\")\n text_ann = detect_text(file_name, lang)\n \n print(\"reverse image search...\")\n annotations = detect_web(file_name, 5, False)\n annotations = detect_web(file_name, 5, True)\n \n output = {}\n output[\"results\"] = False\n output[\"full_matches\"] = []\n output[\"partial_matches\"] = []\n output[\"similar_images\"] = []\n output[\"best_guess\"] = []\n output[\"image_text\"] = text_ann\n \n if annotations.best_guess_labels:\n for label in annotations.best_guess_labels:\n output[\"best_guess\"].append(label.label)\n \n for page in annotations.pages_with_matching_images:\n page_url = page.url\n article = Article(page_url)\n article.download()\n article.parse()\n page_summary = article.text[:100]\n page_title = article.title\n print(\"\\nFull Matches...\")\n for image in page.full_matching_images:\n try:\n img_url = str(image.url).split(\"?\")[0]\n conn = urllib.request.urlopen(img_url, timeout=3)\n date_time = conn.headers['last-modified']\n date_time = eut.parsedate(date_time)\n unixtime = time.mktime(date_time)\n if debug:\n print(\"FULL\")\n print(img_url, end=\"\\n: \")\n print(unixtime)\n ip_a = socket.gethostbyname(urlparse(img_url).netloc)\n reader = geolite2.reader()\n country = reader.get(ip_a)['country']['names']['en']\n \n output[\"full_matches\"].append({\"page_url\": page_url, \"image_url\": img_url,\n \"page_title\": page_title, \"page_summary\": page_summary,\n \"unix_time\": unixtime, \"country\": country})\n except Exception as e:\n if debug:\n print(e, date_time)\n\n print(\"\\nPartial Matches...\")\n for image in page.partial_matching_images:\n try:\n img_url = str(image.url).split(\"?\")[0]\n conn = urllib.request.urlopen(img_url, timeout=3)\n date_time = conn.headers['last-modified']\n date_time = eut.parsedate(date_time)\n unixtime = time.mktime(date_time)\n if debug:\n print(img_url, end=\"\\n: \")\n print(unixtime)\n ip_a = socket.gethostbyname(urlparse(img_url).netloc)\n reader = geolite2.reader()\n country = reader.get(ip_a)['country']['names']['en']\n \n output[\"partial_matches\"].append({\"page_url\": page_url, \"image_url\": img_url,\n \"page_title\": page_title, \"page_summary\": page_summary,\n \"unix_time\": unixtime, \"country\": country})\n except Exception as e:\n if debug:\n print(e, date_time)\n print(\"\\nSimilar Images...\")\n for image in annotations.visually_similar_images:\n try:\n img_url = str(image.url).split(\"?\")[0]\n conn = urllib.request.urlopen(img_url, timeout=3)\n date_time = conn.headers['last-modified']\n date_time = eut.parsedate(date_time)\n unixtime = time.mktime(date_time)\n if debug:\n print(img_url, end=\"\\n: \")\n print(unixtime)\n ip_a = socket.gethostbyname(urlparse(img_url).netloc)\n reader = geolite2.reader()\n country = reader.get(ip_a)['country']['names']['en']\n \n output[\"similar_images\"].append({\"page_url\": page_url, \"image_url\": img_url,\n \"page_title\": page_title, \"page_summary\": page_summary,\n \"unix_time\": unixtime, \"country\": country})\n except Exception as e:\n if debug:\n print(e, date_time)\n \n print(len(output[\"full_matches\"])+len(output[\"partial_matches\"])+len(output[\"similar_images\"]))\n if (len(output[\"full_matches\"])+len(output[\"partial_matches\"])+len(output[\"similar_images\"])) < 1:\n output[\"results\"] = True\n output[\"errors\"] = \"No images can be found by GCP Vision API\"\n\n return json.dumps(output)\n\nimport base64\nimport json\n\n# {\n# \"imageData\": \"skdjgdfkuhskfh\",\n# \"x\": \"0\",\n# \"y\": \"0\",\n# \"width\": \"32\", \n# \"height\": \"100\",\n# \"scaleX\": \"0\",\n# \"scaleY\": \"0\",\n# \"rotate\": \"90\"\n# }\n\n\n## according to the order in the workflow\n\n\n# def parse_image_for_smart_crop_suggestions(json_string):\n# data = json.loads(json_string)\n# # https://stackoverflow.com/questions/2835559/parsing-values-from-a-json-file\n\n# image = base64.decodestring(data['imageData'])\n# # https://stackoverflow.com/questions/12511408/accepting-json-image-file-in-python\n \n# json_output = {}\n# json[\"x\"] = None\n# json[\"y\"] = None\n# json[\"width\"] = None\n# json[\"height\"] = None\n \n# return json.dump(json_output)\n\n\n\n# def parse_crop_instructions(json_string):\n# '''\n# input: json file of following format\n# {\n# \"imageData\": \"skdjgdfkuhskfh...in..base64\",\n# \"x\": \"0\",\n# \"y\": \"0\",\n# \"width\": \"32\", \n# \"height\": \"100\",\n# \"scaleX\": \"0\",\n# \"scaleY\": \"0\",\n# \"rotate\": \"90\"\n# }\n \n# output: json file of the following format\n# {\n# \"imageData\": \"skdjgdfkuhskfh...in..base64\",\n# }\n# '''\n \n# data = json.loads(json_string)\n# # https://stackoverflow.com/questions/2835559/parsing-values-from-a-json-file\n\n# image = base64.decodestring(data['imageData'])\n# # https://stackoverflow.com/questions/12511408/accepting-json-image-file-in-python\n\n# crop_x = data[\"x\"]\n# crop_y = data[\"y\"]\n# crop_w = data[\"width\"]\n# crop_h = data[\"height\"]\n \n# ocv_box = [crop_x, crop_y, crop_w, crop_h]\n# cropped_image = crop_image(image, ocv_box)\n \n# output_json_dict = {}\n# output_json_dict[\"imageData\"] = cropped_image\n# output_json = json.dumps(output_json)\n \n# return output_json\n \n\ndef ingest_image_disk(file_dir):\n image = cv2.imread(file_dir)\n return image\n\n\ndef crop_image(image, ocv_box):\n [x,y,w,h] = ocv_box\n cropped = image[y:y+h, x:x+w]\n return cropped\n\n\ndef hflip_image(image):\n hflip = cv2.flip(image.copy(), 0)\n return hflip\n\ndef vflip_image(image):\n vflip = cv2.flip(image.copy(), 1)\n return vflip\n\ndef plot_cv_image(image):\n plt.axis(\"off\")\n plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))\n plt.show()\n\ndef suggest_smart_crop(img, num_boxes, debug=False):\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n thresh_gray = int(np.average(gray))\n ret,thresh = cv2.threshold(gray,thresh_gray,255,cv2.THRESH_BINARY_INV)\n \n im2, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n\n img_ann = np.copy(img)\n draft_boxes = []\n boxes_size = []\n\n for contour in contours:\n x,y,w,h = cv2.boundingRect(contour)\n if w > 100 or h > 100:\n cv2.rectangle(img_ann,(x,y),(x+w,y+h),(0,0,255),3)\n draft_boxes.append([x,y,w,h])\n boxes_size.append(int(w*h))\n \n output_boxes = []\n \n boxes_size.sort()\n thres_box_size = boxes_size[-num_boxes] - 1\n \n for contour in contours:\n x,y,w,h = cv2.boundingRect(contour)\n if (int(w*h)) > thres_box_size:\n output_boxes.append([x,y,w,h])\n \n if debug:\n plt.figure(1)\n plt.subplot(1,2,1)\n plt.axis(\"off\")\n plt.title(\"Original Image\")\n plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n plt.subplot(1,2,2)\n plt.axis(\"off\")\n plt.title(\"Grayscale Image\")\n plt.imshow(cv2.cvtColor(gray, cv2.COLOR_GRAY2RGB))\n plt.show()\n plt.imshow(thresh)\n plt.show()\n plt.imshow(cv2.cvtColor(img_ann, cv2.COLOR_BGR2RGB))\n print(\" x,y,w,h:\\n\", output_boxes)\n print(boxes_size)\n print(\"Generating output images\")\n for i, box in enumerate(output_boxes):\n [x,y,w,h] = box\n output_img = img[y:y+h, x:x+w]\n plt.figure(i)\n plt.title(\"output_\"+str(i)+\".jpg; size: \" + str(boxes_size[i]))\n plt.axis(\"off\")\n plt.imshow(cv2.cvtColor(output_img, cv2.COLOR_BGR2RGB))\n plt.show()\n \n return output_boxes\n\ndef detect_web(path, max_results, return_json=True):\n \"\"\"Detects web annotations given an image.\"\"\"\n client = vision.ImageAnnotatorClient()\n\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n\n image = vision.types.Image(content=content)\n\n response = client.web_detection(image=image, max_results=max_results)\n annotations = response.web_detection\n \n if return_json:\n return annotations\n \n else:\n if annotations.best_guess_labels:\n for label in annotations.best_guess_labels:\n print('\\nBest guess label: {}'.format(label.label))\n\n if annotations.pages_with_matching_images:\n print('\\n{} Pages with matching images found:'.format(\n len(annotations.pages_with_matching_images)))\n\n for page in annotations.pages_with_matching_images:\n print('\\n\\tPage url : {}'.format(page.url))\n\n if page.full_matching_images:\n print('\\t{} Full Matches found: '.format(\n len(page.full_matching_images)))\n\n for image in page.full_matching_images:\n print('\\t\\tImage url : {}'.format(image.url))\n\n if page.partial_matching_images:\n print('\\t{} Partial Matches found: '.format(\n len(page.partial_matching_images)))\n\n for image in page.partial_matching_images:\n print('\\t\\tImage url : {}'.format(image.url))\n\n if annotations.web_entities:\n print('\\n{} Web entities found: '.format(\n len(annotations.web_entities)))\n\n for entity in annotations.web_entities:\n print('\\n\\tScore : {}'.format(entity.score))\n print(u'\\tDescription: {}'.format(entity.description))\n\n if annotations.visually_similar_images:\n print('\\n{} visually similar images found:\\n'.format(\n len(annotations.visually_similar_images)))\n\n for image in annotations.visually_similar_images:\n print('\\tImage url : {}'.format(image.url))\n \n return True\n\ndef detect_text(path, lang):\n \"\"\"Detects text in the file.\"\"\"\n client = vision.ImageAnnotatorClient()\n\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n\n image = vision.types.Image(content=content)\n image_context = types.ImageContext(language_hints=[lang])\n\n response = client.text_detection(image=image, image_context=image_context)\n \n texts = response.text_annotations\n \n output = []\n\n for text in texts:\n if len(text.description.split(\" \")) > 1:\n output.append(text.description)\n \n return output\n","sub_path":"image_processing.py","file_name":"image_processing.py","file_ext":"py","file_size_in_byte":12885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"173259186","text":"from multiprocessing import Process,Pool\nimport os\n\ndef info(title):\n print(title)\n print('module name:', __name__)\n if hasattr(os, 'getppid'): # only available on Unix\n print('parent process:', os.getppid())\n print('process id:', os.getpid())\n\ndef f(name):\n info('function f\\n\\n\\n`')\n \n print('hello', name)\n\n\ninfo('main line')\np = Process(target=f, args=('bob',))\np.start()\np.join()\n\n\n\"\"\"next program demo\"\"\"\n\ndef f1(x):\n return x*x\n\nwith Pool(5) as p:\n print (p.map(f1,[2,3,4,5]))\n print('parent process:', os.getppid())\n print('process id:', os.getpid())","sub_path":"Documents/LiClipse2/tests/python-tests-desktop/test_multiprocessing/test_newmulti.py","file_name":"test_newmulti.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"623641685","text":"#!/usr/bin/env python\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nimport os\nimport sys\n\ndef read(*rnames):\n return open(os.path.join(os.path.dirname(__file__), *rnames)).read()\n\nif float('%d.%d' % sys.version_info[:2]) < 2.6 or float('%d.%d' % sys.version_info[:2]) >= 3.0:\n sys.stderr.write(\"Your Python version %d.%d.%d is not supported.\\n\" %\n sys.version_info[:3])\n sys.stderr.write(\"OAS Python SDK requires Python between 2.6 and 3.0.\\n\")\n sys.exit(1)\n\nclassifiers = [\n 'Development Status :: 4 - Beta',\n\n 'Intended Audience :: Developers',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n\n 'Operating System :: OS Independent',\n\n 'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',\n\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n]\n\nsetup(\n name='oassdk',\n version='0.2.5',\n description='Python SDK for Aliyun OAS (Open Archive Service)',\n author='Aliyun OAS',\n author_email='jianyi.weng@alibaba-inc.com',\n url='http://www.aliyun.com/product/oas',\n packages=['oas', 'oas.ease'],\n license='GPL version 2',\n install_requires=[\n 'pyaml',\n 'ordereddict',\n ],\n scripts=['oascmd.py'],\n)\n","sub_path":"pypi_install_script/oassdk-0.2.5.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"423286079","text":"\nimport json\nimport requests\n\nfrom flask import Flask, request, session, g, redirect, url_for, abort, \\\n render_template, flash, jsonify\n\napp = Flask(__name__)\napp.config.from_object(__name__)\napp.config.from_envvar('settings.py', silent=True)\n\n@app.route('/leaflet_js')\ndef leaflet_js():\n d = open('/var/www/flask_mapbox/SanbornLots.json','r')\n lots = d.read()\n d.close()\n d = open('/var/www/flask_mapbox/SanbornBlocks.json','r')\n blocks = d.read()\n d.close()\n return render_template('leaflet_js.html',\n blocks=blocks,\n lots=lots\n )\n","sub_path":"john_flask/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"192887281","text":"from typing import Any, List, Optional\n\nfrom pydantic import BaseModel\n\nfrom user_similarity_model.processing.validation import UserDataInputSchema\n\n\nclass PredictionResults(BaseModel):\n errors: Optional[Any]\n version: str\n predictions: Optional[List[float]]\n\n\nclass MultipleUserDataInputs(BaseModel):\n inputs: List[UserDataInputSchema]\n\n class Config:\n schema_extra = {\n \"example\": {\n \"inputs\": [\n {\n \"user_handle\": 20,\n }\n ]\n }\n }\n","sub_path":"app/schemas/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"112155257","text":"from nltk.translate.bleu_score import sentence_bleu\nfrom nltk.translate.bleu_score import SmoothingFunction\nfrom sklearn.metrics.pairwise import cosine_similarity\n\ntrim_target=['clever', '...', 'unless', 'right', '...']\ntrim_predict=['god', 'would', 'much', '...', 'good']\n\ntry:\n BLEUscore4 = sentence_bleu([trim_target], trim_predict, weights=(0.33, 0.33, 0.33, 0, 0),\n smoothing_function=SmoothingFunction().method7)\nexcept:\n BLEUscore4 = 0\n\nprint(BLEUscore4)","sub_path":"gen-dnc/nlp_task/report.py","file_name":"report.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"135020679","text":"n=2**14\r\ndef build(i,l,r,d) :\r\n\tif l+1!=r :\r\n\t\tmid=(l+r)>>1\r\n\t\tbuild(i<<1,l,mid,d-1)\r\n\t\tbuild(i<<1|1,mid,r,d-1)\r\n\t\tprint ( \"Mux16(a=in%d,b=in%d,sel=address[%d],out=in%d);\" % (i<<1,i<<1|1,d,i) )\r\n\telse :\r\n\t\tprint ( \"DFF16(in=out%d,out=in%d);\" % (i,i) )\r\n\r\ndef write(i,l,r,d) :\r\n\tif l+1!=r :\r\n\t\tmid=(l+r)>>1\r\n\t\tprint ( \"Mux(a=f%d,b=false,sel=address[%d],out=f%d);\" % (i,d,i<<1) )\r\n\t\tprint ( \"Mux(a=false,b=f%d,sel=address[%d],out=f%d);\" % (i,d,i<<1|1) )\r\n\t\twrite(i<<1,l,mid,d-1)\r\n\t\twrite(i<<1|1,mid,r,d-1)\r\n\telse :\r\n\t\tprint ( \"Mux16(a=in%d,b=in,sel=f%d,out=out%d);\" % (i,i,i) )\r\n\r\nimport math\r\nbuild(1,0,n,math.log(n,2)-1)\r\n\r\nprint (\"And16(a=in1,b=true,out=out);\")\r\n\r\nprint ( \"And(a=load,b=true,out=f1);\" )\r\n\r\nwrite(1,0,n,math.log(n,2)-1)\r\n","sub_path":"陈江伦/03/a/make.py","file_name":"make.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"575108569","text":"# -*- coding: utf-8 -*-\n# created by jngo\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0012_auto_20170728_1148'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='chipregionmetadata',\n name='metadata_field',\n field=models.ForeignKey(default=\"1\", on_delete=django.db.models.deletion.CASCADE, to='core.MetadataField',\n verbose_name='Metadata key'),\n preserve_default=False,\n ),\n ]","sub_path":"core/migrations/0013_chipmetadata_field_foreignkey_create.py","file_name":"0013_chipmetadata_field_foreignkey_create.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"491790015","text":"# -*- coding: utf-8 -*-\n\"\"\"\n.. module:: geek\n :platform: Unix, Windows\n :synopsis: GEneralised Elementary Kinetics\n\n.. moduleauthor:: geek team\n\n[---------]\n\nCopyright 2018 Laboratory of Computational Systems Biotechnology (LCSB),\nEcole Polytechnique Federale de Lausanne (EPFL), Switzerland\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\nhttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\n\"\"\"\n\nfrom pandas import read_csv, DataFrame\nimport random\nimport numpy as np\nfrom geek.analysis import geek_regression\n\n\nseed = 1\n\ndf = read_csv('./data/result_full_factorial_pgm.csv')\n\n# Reference concentrations\npgm = 64e-6\ng3p = 49e-6\ng2p = g3p\n\n# Define microscopic reaction rate constants:\nk1f = 1.52e5 # 1/Ms\nk1b = 10.0 # 1/s\nk2f = 22.0 # 1/s\nk2b = 3.29e5 # 1/Ms\n\n\n\nreference_concentrations = [pgm*0.5, pgm*0.5, g3p, g2p]\nconcentrations = ['enzyme_complex_concentration',\n 'enzyme_concentration',\n 'product_concentration',\n 'substrate_concentration']\n\n\n# Filter the data frame for specific condition\nthis_volume_fraction = 0.1\nthis_mu = 31.9\nthis_sigma = 0.825\n\nthis_df = df [ (df['sigma_mass'] == this_sigma) &\n (df['mu_mass'] == this_mu) &\n (df['volume_fraction'] == this_volume_fraction)]\n\n\n# Extract the GEEK parameters from Linear regression\nk1_fwd_params = geek_regression(this_df,\n concentrations,\n reference_concentrations,\n 'k1_fwd_relative',\n verbose=False)\n\nk1_bwd_params = geek_regression(this_df,\n concentrations,\n reference_concentrations,\n 'k1_bwd_relative',\n verbose=False)\nk2_fwd_params = geek_regression(this_df,\n concentrations,\n reference_concentrations,\n 'k1_fwd_relative',\n verbose=False)\n\nk2_bwd_params = geek_regression(this_df,\n concentrations,\n reference_concentrations,\n 'k2_bwd_relative',\n verbose=False)\n\n\n\nrandom.seed(seed)\n#Map to parameter dict\nparam_dict = {\n 'k_1f0': k1f,\n 'k_1b0': k1b,\n 'beta_1f': k1_fwd_params['beta_lb'] + (k1_fwd_params['beta_ub'] - k1_fwd_params['beta_lb']) * random.random(),\n 'alpha_ES_1f': k1_fwd_params['alpha_enzyme_complex_concentration_lb'] + (\n k1_fwd_params['alpha_enzyme_complex_concentration_ub'] - k1_fwd_params[\n 'alpha_enzyme_complex_concentration_lb']) * random.random(),\n 'alpha_E_1f': k1_fwd_params['alpha_enzyme_concentration_lb'] + (\n k1_fwd_params['alpha_enzyme_concentration_ub'] - k1_fwd_params[\n 'alpha_enzyme_concentration_lb']) * random.random(),\n 'alpha_P_1f': k1_fwd_params['alpha_product_concentration_lb'] + (\n k1_fwd_params['alpha_product_concentration_ub'] - k1_fwd_params[\n 'alpha_product_concentration_lb']) * random.random(),\n 'alpha_S_1f': k1_fwd_params['alpha_substrate_concentration_lb'] + (\n k1_fwd_params['alpha_substrate_concentration_ub'] - k1_fwd_params[\n 'alpha_substrate_concentration_lb']) * random.random(),\n\n 'beta_1b': k1_bwd_params['beta_lb'] + (k1_bwd_params['beta_ub'] - k1_bwd_params['beta_lb']) * random.random(),\n 'alpha_ES_1b': k1_bwd_params['alpha_enzyme_complex_concentration_lb'] + (\n k1_bwd_params['alpha_enzyme_complex_concentration_ub'] - k1_bwd_params[\n 'alpha_enzyme_complex_concentration_lb']) * random.random(),\n 'alpha_E_1b': k1_bwd_params['alpha_enzyme_concentration_lb'] + (\n k1_bwd_params['alpha_enzyme_concentration_ub'] - k1_bwd_params[\n 'alpha_enzyme_concentration_lb']) * random.random(),\n 'alpha_P_1b': k1_bwd_params['alpha_product_concentration_lb'] + (\n k1_bwd_params['alpha_product_concentration_ub'] - k1_bwd_params[\n 'alpha_product_concentration_lb']) * random.random(),\n 'alpha_S_1b': k1_bwd_params['alpha_substrate_concentration_lb'] + (\n k1_bwd_params['alpha_substrate_concentration_ub'] - k1_bwd_params[\n 'alpha_substrate_concentration_lb']) * random.random(),\n\n 'k_2f0': k2f,\n 'k_2b0': k2b,\n 'beta_2f': k2_fwd_params['beta_lb'] + (k2_fwd_params['beta_ub'] - k2_fwd_params['beta_lb']) * random.random(),\n 'alpha_ES_2f': k2_fwd_params['alpha_enzyme_complex_concentration_lb'] + (\n k2_fwd_params['alpha_enzyme_complex_concentration_ub'] - k2_fwd_params[\n 'alpha_enzyme_complex_concentration_lb']) * random.random(),\n 'alpha_E_2f': k2_fwd_params['alpha_enzyme_concentration_lb'] + (\n k2_fwd_params['alpha_enzyme_concentration_ub'] - k2_fwd_params[\n 'alpha_enzyme_concentration_lb']) * random.random(),\n 'alpha_P_2f': k2_fwd_params['alpha_product_concentration_lb'] + (\n k2_fwd_params['alpha_product_concentration_ub'] - k2_fwd_params[\n 'alpha_product_concentration_lb']) * random.random(),\n 'alpha_S_2f': k2_fwd_params['alpha_substrate_concentration_lb'] + (\n k2_fwd_params['alpha_substrate_concentration_ub'] - k2_fwd_params[\n 'alpha_substrate_concentration_lb']) * random.random(),\n\n 'beta_2b': k2_bwd_params['beta_lb'] + (k2_bwd_params['beta_ub'] - k2_bwd_params['beta_lb']) * random.random(),\n 'alpha_ES_2b': k1_bwd_params['alpha_enzyme_complex_concentration_lb'] + (\n k2_bwd_params['alpha_enzyme_complex_concentration_ub'] - k2_bwd_params[\n 'alpha_enzyme_complex_concentration_lb']) * random.random(),\n 'alpha_E_2b': k2_bwd_params['alpha_enzyme_concentration_lb'] + (\n k2_bwd_params['alpha_enzyme_concentration_ub'] - k2_bwd_params[\n 'alpha_enzyme_concentration_lb']) * random.random(),\n 'alpha_P_2b': k2_bwd_params['alpha_product_concentration_lb'] + (\n k2_bwd_params['alpha_product_concentration_ub'] - k2_bwd_params[\n 'alpha_product_concentration_lb']) * random.random(),\n 'alpha_S_2b': k2_bwd_params['alpha_substrate_concentration_lb'] + (\n k2_bwd_params['alpha_substrate_concentration_ub'] - k2_bwd_params[\n 'alpha_substrate_concentration_lb']) * random.random(),\n\n 'ES0': reference_concentrations[0],\n 'E0': reference_concentrations[1],\n 'P0': reference_concentrations[2],\n 'S0': reference_concentrations[2],\n}\n\n\"\"\"\nDeclare ODE-Problem\n\"\"\"\nfrom sympy import symbols\nfrom sympy import exp as sym_exp\n\n# Variables\nES, E, P, S = symbols(['ES', 'E', 'P', 'S'])\nvariables = [ES, E, P, S]\n# Parameters\nk_1f0, k_1b0, k_2f0, k_2b0, = symbols(['k_1f0', 'k_1b0', 'k_2f0','k_2b0'] )\n\n# Define symbols for the GEEK parameters\nbeta_1f, beta_1b, beta_2f, beta_2b , = symbols(['beta_1f', 'beta_1b', 'beta_2f', 'beta_2b' ] )\nalpha_ES_1b,alpha_ES_1f,alpha_ES_2b,alpha_ES_2f, = symbols(['alpha_ES_1f', 'alpha_ES_1b','alpha_ES_2b','alpha_ES_2f'])\nalpha_E_1b, alpha_E_1f, alpha_E_2b, alpha_E_2f, = symbols(['alpha_E_1f', 'alpha_E_1b','alpha_E_2b','alpha_E_2f'])\nalpha_P_1f, alpha_P_1b, alpha_P_2f, alpha_P_2b, = symbols(['alpha_P_1f', 'alpha_P_1b','alpha_P_2f','alpha_P_2b'])\nalpha_S_1f, alpha_S_1b, alpha_S_2f, alpha_S_2b, = symbols(['alpha_S_1f', 'alpha_S_1b','alpha_S_2f','alpha_S_2b'])\n\nES0,E0,P0, S0 = symbols(['ES0', 'E0', 'P0', 'S0'])\n\node_params = [k_1f0, k_1b0, k_2f0, k_2b0,\n beta_1f, beta_1b, beta_2f, beta_2b ,\n alpha_ES_1b,alpha_ES_1f,alpha_ES_2b,alpha_ES_2f,\n alpha_E_1b, alpha_E_1f, alpha_E_2b, alpha_E_2f,\n alpha_P_1f, alpha_P_1b, alpha_P_2f, alpha_P_2b,\n alpha_S_1f, alpha_S_1b, alpha_S_2f, alpha_S_2b,\n ES0, E0, P0, S0]\n# Reactions\n\ngeek_reactions = {\n 'r_1f': k_1f0 * S * E * sym_exp(beta_1f) * (ES / ES0) ** alpha_ES_1f * (E / E0) ** alpha_E_1f * ( P / P0) ** alpha_P_1f * ( S / S0) ** alpha_S_1f,\n 'r_1b': k_1b0 * ES * sym_exp(beta_1b) * (ES / ES0) ** alpha_ES_1b * (E / E0) ** alpha_E_1b * ( P / P0 ) ** alpha_P_1b * ( S / S0) ** alpha_S_1b,\n 'r_2f': k_2f0 * ES * sym_exp(beta_2f) * (ES / ES0) ** alpha_ES_2f * (E / E0) ** alpha_E_2f * (P / P0) ** alpha_P_2f * (S / S0) ** alpha_S_2f,\n 'r_2b': k_2b0 * P * E * sym_exp(beta_2b) * (ES / ES0) ** alpha_ES_2b * (E / E0) ** alpha_E_2b * (P / P0) ** alpha_P_2b * (S / S0) ** alpha_S_2b,\n}\n\n#Expressions\n\nexpressions = {\n ES: geek_reactions['r_1f'] + geek_reactions ['r_2b']- geek_reactions['r_1b'] - geek_reactions['r_2f'],\n E: -(geek_reactions['r_1f'] + geek_reactions ['r_2b']- geek_reactions['r_1b'] - geek_reactions['r_2f']),\n S: -geek_reactions['r_1f'] + geek_reactions['r_1b'],\n P: geek_reactions['r_2f'] - geek_reactions['r_2b'],\n}\n\nfrom geek.analysis.ode_function import OdeFun\nfun = OdeFun(variables,ode_params,expressions)\n\nfrom scipy.integrate import ode\nr = ode(fun).set_integrator('vode', method='bdf')\n\neps = 1e-3\ny0 = [pgm * eps,\n pgm * (1. - eps),\n g3p * eps,\n g3p * (1. - eps),\n ]\n\nt0 = 0.0\n\nr.set_initial_value(y0, t0).set_f_params(param_dict)\ndata = []\n\nt_max = 10.0\nwhile r.successful() and r.t < t_max:\n data.append( np.append(r.t + t_max/1000.0,\n r.integrate(r.t + t_max/1000.0 )) )\ndata = np.array(data)\n\ndf = DataFrame(data=data, columns = ['time', 'ES', 'E', 'P', 'S'])","sub_path":"test/test_geek_ode.py","file_name":"test_geek_ode.py","file_ext":"py","file_size_in_byte":9885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"578000550","text":"import os\nimport time\nimport errno\nfrom tekton.lang.exceptions import IOException\n\nInfinite = 0xffffffff\n\n\nclass LockFile(object):\n def __init__(self, lockDir, name):\n \"\"\"Initialize a LockFile instance\"\"\"\n self.file = 0\n self.name = name\n self.lockDir = lockDir\n\n def __del__(self):\n self.release()\n\n def set_directory(self, lockDir):\n \"\"\"Set lockfile directory\"\"\"\n if self.is_locked():\n raise IOException(\n \"The lockfile must be released before changing directory\")\n\n self.lockDir = lockDir\n\n def set_name(self, name):\n \"\"\"Set lockfile name\"\"\"\n if self.is_locked():\n raise IOException(\n \"The lockfile must be released before changing name\")\n\n self.name = name\n\n def set_filepath(self, filePath):\n \"\"\"Set lockfile full path (including filename)\"\"\"\n if self.is_locked():\n raise IOException(\n \"The lockfile must be released before changing name/directory\")\n\n self.lockDir = os.path.dirname(filePath)\n self.name = os.path.basename(filePath)\n\n def is_locked(self, lockDir=None, name=None):\n \"\"\"Check is file is locked\"\"\"\n if self.file != 0:\n return True\n\n if isinstance(lockDir, basestring) and isinstance(name, basestring):\n return os.access(self.filename(), os.F_OK)\n else:\n return False\n\n def is_owner(self, name):\n return self.is_locked() and name == self.name\n\n def filename(self):\n \"\"\"Normalize name of lockfile\"\"\"\n name = self.name.replace(\"\\\\\", \"\")\n name = name.replace(\"/\", \"\")\n name = name.replace(\":\", \"\")\n name = name.replace(\"*\", \"\")\n name = name.replace(\"?\", \"\")\n name = name.replace(\"\\\"\", \"\")\n name = name.replace(\"'\", \"\")\n name = name.replace(\"<\", \"\")\n name = name.replace(\">\", \"\")\n name = name.replace(\"|\", \"\")\n\n return os.path.join(self.lockDir, \"lockfile_\" + name + \".lock\")\n\n def acquire(self, timeoutMs=Infinite):\n \"\"\"Try to acquire the lock\"\"\"\n if not self.lockDir:\n raise IOException(\"Lock file directory not specified\")\n\n if not os.path.exists(self.lockDir):\n raise IOException(\"Lock file directory %s does not exist\" %\n self.lockDir)\n\n if timeoutMs == Infinite:\n retry = Infinite\n else:\n retry = timeoutMs / 10\n\n fileName = self.filename()\n errorCode = 0\n fd = -1\n\n try:\n fd = os.open(fileName, os.O_CREAT | os.O_EXCL)\n except OSError as e:\n errorCode = e.errno\n\n while fd == -1:\n if retry <= 0:\n break\n if (errorCode != errno.EEXIST and errorCode != errno.EMFILE):\n break\n\n time.sleep(0.01)\n errorCode = 0\n\n try:\n fd = os.open(fileName, os.O_CREAT | os.O_EXCL)\n except OSError as e:\n errorCode = e.errno\n\n retry -= 1\n if retry != Infinite and retry == 0:\n break\n\n if fd != -1:\n os.close(fd)\n self.file = 1\n\n return (self.file != 0)\n\n def release(self):\n \"\"\"Release the lock\"\"\"\n if self.file != 0:\n os.unlink(self.filename())\n self.file = 0\n\n\nif __name__ == \"__main__\":\n import tempfile\n filePath = os.path.join(tempfile.gettempdir(), \"feh.txt\")\n lock = LockFile(os.path.dirname(filePath), os.path.basename(\"feh.txt\"))\n\n lock.set_filepath(filePath)\n if not lock.is_locked():\n lock.acquire()\n\n feh = open(filePath, \"a+\")\n import datetime\n feh.write(\"%s\\n\" % str((datetime.datetime.now())))\n feh.close()\n lock.release()\n","sub_path":"libraries/tekton/io/lockfile.py","file_name":"lockfile.py","file_ext":"py","file_size_in_byte":3848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"27279209","text":"import argparse\nimport os\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nimport torch.utils.data \n\nimport torchvision.datasets as dset \nimport torchvision.transforms as transforms \nimport torchvision.utils as vutils\n\nfrom dataloader import load_train_dataset\nfrom generator import Generator, Discriminator\nfrom utils import weights_init\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--dataset', required=True, help='cifar10 | lsun | imagenet | folder | lfw | fake')\nparser.add_argument('--dataroot', required=True, help='path to dataset')\nparser.add_argument('--workers', type=int, help='number of data loading workers', default=2)\nparser.add_argument('--batchSize', type=int, default=64, help='input batch size')\nparser.add_argument('--imageSize', type=int, default=64, help='the height / width of the input image to network')\nparser.add_argument('--nz', type=int, default=100, help='size of the latent z vector')\nparser.add_argument('--ngf', type=int, default=64)\nparser.add_argument('--ndf', type=int, default=64)\nparser.add_argument('--niter', type=int, default=25, help='number of epochs to train for')\nparser.add_argument('--lr', type=float, default=0.0002, help='learning rate, default=0.0002')\nparser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')\nparser.add_argument('--cuda', action='store_true', help='enables cuda')\nparser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use')\nparser.add_argument('--netG', default='', help=\"path to netG (to continue training)\")\nparser.add_argument('--netD', default='', help=\"path to netD (to continue training)\")\nparser.add_argument('--outf', default='.', help='folder to output images and model checkpoints')\nparser.add_argument('--manualSeed', type=int, help='manual seed')\n\nopt = parser.parse_args()\nprint(opt)\n\ntry:\n os.mkdir(opt.outf)\nexcept OSError:\n pass\n\nif opt.manualSeed is None:\n opt.manualSeed = random.randint(1, 10000)\nprint(\"Random Seed: \", opt.manualSeed)\nrandom.seed(opt.manualSeed)\ntorch.manual_seed(opt.manualSeed)\n\ncudnn.benchmark = True\n\n\n# Decide which device we want to run on\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n\ndata_dir = '/Users/sueliu/projects/anom-detect/data/tar_dir/vae_train'\nloader = load_train_dataset(128, data_dir, batch_size=64)\n\n\n\n# Create the generator\nngpu = 0\nnetG = Generator(ngpu).to(device)\nnetG.apply(weights_init)\n\nprint(netG)\n\nnetD = Discriminator(ngpu).to(device)\nnetD.apply(weights_init)\nif opt.netD != '':\n netD.load_state_dict()","sub_path":"dcgan/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"330536067","text":"import cv2\nimport numpy as np\n\nimg = cv2.imread('./images/IMG.jpg')\nnum_rows, num_cols = img.shape[:2]\t# row, column, channel\n\nimg_scaled = cv2.resize(img, None, fx=1.2, fy=1.2, interpolation = cv2.INTER_LINEAR)\ncv2.imshow('Scaling - Linear Interpolation', img_scaled)\ncv2.waitKey()\nimg_scaled = cv2.resize(img, None, fx=1.2, fy=1.2, interpolation = cv2.INTER_CUBIC)\ncv2.imshow('Scaling - Cubic Interpolation', img_scaled)\ncv2.waitKey()\nimg_scaled = cv2.resize(img, (450, 400), interpolation = cv2.INTER_AREA)\ncv2.imshow('Scaling - Skewed Size', img_scaled)\ncv2.waitKey()","sub_path":"Chapter01/scaling.py","file_name":"scaling.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"357320008","text":"from django.shortcuts import render\nfrom django.conf import settings\nimport django_filters\nfrom rest_framework import viewsets, filters\nfrom rest_framework.decorators import detail_route, list_route\nfrom rest_framework.views import APIView\nimport os\nfrom rest_framework.response import Response\nfrom datetime import datetime\nfrom .models import *\nfrom rest_framework.reverse import reverse\nfrom .serializer import RegisteredStgSerializer, SMSMessageOTPSerializer\nfrom rest_framework.renderers import JSONRenderer\nfrom django.core.files.storage import FileSystemStorage\nimport json\nimport datetime\nimport requests\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom decimal import Decimal\nfrom django.http import JsonResponse\nfrom django.core import serializers\nfrom time import sleep\n\nfrom django.forms.models import model_to_dict\nfrom django.core.serializers.json import DjangoJSONEncoder\n#from django.core import serializers\nfrom django.core.serializers import serialize\nfrom functools import reduce\nfrom random import randint\nfrom django.db import DatabaseError, transaction\nimport http.client, urllib.request, urllib.parse, urllib.error, base64\n#import cognitive_face as CF\nimport pyodbc\n#from kombu import Connection\nfrom kombu import Connection, Exchange, Producer, Queue\nfrom bson.json_util import dumps\nimport pymongo\nfrom requests_oauthlib import OAuth1Session\n\nfrom pymongo import MongoClient\n\ndef GetAzureEmotion(url):\n url = 'https://southeastasia.api.cognitive.microsoft.com/face/v1.0/detect?returnFaceId=true&returnFaceLandmarks=false&returnFaceAttributes=age'\n headers = {'Content-Type': 'application/json','Ocp-Apim-Subscription-Key':'2c223b68e95840d48ad1e6d580332d6c'}\n r = requests.post(url, headers=headers, params={\"url\": \"http://mis.digital:7779/PaySlip/Fair/2019-01-12-19-14-10-618_my_image.jpg\"})\n print(r.text)\n\n\n\n # KEY = '2c223b68e95840d48ad1e6d580332d6c' # Replace with a valid Subscription Key here.\n # CF.Key.set(KEY)\n #\n # BASE_URL = 'https://southeastasia.api.cognitive.microsoft.com/face/v1.0/detect?returnFaceId=true&returnFaceLandmarks=false&returnFaceAttributes=age' # Replace with your regional Base URL\n # CF.BaseUrl.set(BASE_URL)\n #\n # img_url = 'http://mis.digital:7779/PaySlip/Fair/2019-01-12-19-14-10-618_my_image.jpg'\n # result = CF.face.detect(img_url)\n # print(result)\n # headers = {\n # # Request headers\n # 'Content-Type': 'application/json',\n # 'Ocp-Apim-Subscription-Key': '5e96252de9e340ed9225c2366d55a9ad',\n # }\n #\n # params = urllib.parse.urlencode({\n # 'returnFaceId': 'true',\n # 'returnFaceLandmarks': 'false',\n # 'returnFaceAttributes': '{string}',\n # })\n # try:\n # conn = http.client.HTTPSConnection('westus.api.cognitive.microsoft.com')\n # conn.request(\"POST\", \"/face/v1.0/detect?%s\" % params, url, headers)\n # response = conn.getresponse()\n # data = response.read()\n # print(data)\n # conn.close()\n # except Exception as e:\n # print(str(e))\n\n\n# Create your views here.\nclass Location(viewsets.ModelViewSet):\n\n queryset = District.objects.all()\n role_class = RegisteredStgSerializer\n\n # Endpoint to receive image from mobile app\n\n def create(self, request):\n # userId = request.POST.get('userId')\n # password = request.POST.get('password')\n # # userId = request.POST['userId']\n # #password = request.POST['password']\n # print ('userId ' + str(userId) + ' password ' + str(password))\n # checkUser = UserManager.objects.filter(UserId = userId, Password = password).values('UserId','UserName','RoleId').using('MotorConstructionEquipment')\n # print ('checkUser ---- > ' +str(len(checkUser)))\n\n # if len(checkUser) > 0 :\n # data = json.dumps(list(checkUser))\n # response = {'StatusCode': '200', 'StatusMessage': str(data)}\n # return Response(response,content_type=\"application/json\")\n # else:\n # response = {'StatusCode': '203', 'StatusMessage': 'UserId/Password Error'}\n # return Response(response,content_type=\"application/json\")\n\n response = {'StatusCode': '200', 'StatusMessage': 'resend'}\n return Response(response,content_type=\"application/json\")\n\n\n def list(self, request):\n queryset = District.objects.all().values('Id', 'DistrictName').using('YamahaBooking')\n data = json.dumps(list(queryset))\n\n response = {'StatusCode': '200', 'StatusMessage': str(data)}\n print(response)\n return Response(response,content_type=\"application/json\")\n\nclass FormValidation(viewsets.ModelViewSet):\n queryset = District.objects.all()\n role_class = RegisteredStgSerializer\n\n\n def create(self, request):\n response = {'StatusCode': '200', 'StatusMessage': 'resend'}\n return Response(response,content_type=\"application/json\")\n\n def list(self, request):\n fieldName = request.GET['fieldName'].strip()\n print('filed - ' + fieldName)\n if str(fieldName) == 'Mobile':\n mobile = request.GET['mobile'].strip()\n print('mobile - ' + mobile)\n qmobileQuerySet = RegisteredUser.objects.filter(Mobile = str(mobile)).using('YamahaBooking')\n if len(list(qmobileQuerySet)) > 0:\n response = {'StatusCode': '200', 'StatusMessage': '0'}\n print(response)\n return Response(response,content_type=\"application/json\")\n else :\n response = {'StatusCode': '200', 'StatusMessage': '1'}\n print(response)\n return Response(response, content_type=\"application/json\")\n\n if str(fieldName) == 'Email':\n email = request.GET['email'].strip()\n print('email - ' + email)\n qmobileQuerySet = RegisteredUser.objects.filter(Email = str(email)).using('YamahaBooking')\n if len(list(qmobileQuerySet)) > 0:\n response = {'StatusCode': '200', 'StatusMessage': '0'}\n print(response)\n return Response(response,content_type=\"application/json\")\n else :\n response = {'StatusCode': '200', 'StatusMessage': '1'}\n print(response)\n return Response(response, content_type=\"application/json\")\n\n # queryset = District.objects.all().values('Id', 'DistrictName').using('YamahaBooking')\n # data = json.dumps(list(queryset))\n\n response = {'StatusCode': '200', 'StatusMessage': 'NotFound'}\n print(response)\n return Response(response,content_type=\"application/json\")\n\n\nclass UserRegistration(viewsets.ModelViewSet):\n queryset = District.objects.all()\n #role_class = RegisteredStgSerializer\n sms_class = SMSMessageOTPSerializer\n\n def create(self, request):\n userName = request.POST.get('name')\n mobile = request.POST.get('mobile')\n district = request.POST.get('district')\n email = request.POST.get('email')\n remarks = request.POST.get('remarks')\n\n entryTime = datetime.datetime.now()\n Disct = District.objects.filter(pk=int(district)).using('YamahaBooking')[0]\n regUser = RegisteredUser(UserName=str(userName), Mobile=str(mobile), Email= str(email), IsUsed ='N', Status ='1', EntryDate=entryTime, Remark=remarks, DistrictId=Disct)\n regUser.save(using='YamahaBooking')\n\n otpm = random_with_N_digits(4)\n smsMessage = SMSMessage( OtpCode=str(otpm), Email= str(email), IsUsed ='N', EntryDate=entryTime)\n print(str(smsMessage))\n smsMessage.save(using='YamahaBooking')\n text = 'Please use this confirmation code for registration.' + str(otpm)\n SendSMS(mobile, text)\n #SendSMS('01920250777', text)\n\n response = {'StatusCode': '200', 'StatusMessage': 'success'}\n return Response(response,content_type=\"application/json\")\n\nclass ForgetPassword(viewsets.ModelViewSet):\n queryset = District.objects.all()\n role_class = RegisteredStgSerializer\n\n def list(self, request):\n mobile = request.GET.get('mobile').strip()\n redObj = RegisteredUser.objects.filter(Mobile=str(mobile)).using('YamahaBooking')\n if len(list(redObj))>0:\n user = YamahaUser.objects.filter(RegsUserId=redObj[0]).values('UserId','Password').using('YamahaBooking')[0]\n\n text = 'UserId : ' + user['UserId']+\" Passwors : \" + user['Password']\n SendSMS(mobile, text)\n #SendSMS('01920250777', text)\n response = {'StatusCode': '200', 'StatusMessage': 'success'}\n return Response(response, content_type=\"application/json\")\n else:\n response = {'StatusCode': '202', 'StatusMessage': 'Register Please. This System Can Not found your mobile Number.'}\n return Response(response, content_type=\"application/json\")\n\n\n\n\n\nclass OTPCheck(viewsets.ModelViewSet):\n queryset = District.objects.all()\n role_class = RegisteredStgSerializer\n\n def create(self, request):\n email = request.POST.get('email')\n otp = request.POST.get('otp')\n sms = SMSMessage.objects.filter(OtpCode=str(otp), Email=str(email), IsUsed='N').using('YamahaBooking')\n if len(list(sms)) > 0:\n SMSMessage.objects.filter(OtpCode=str(otp), Email=str(email), IsUsed='N').using('YamahaBooking').update(IsUsed='Y')\n response = {'StatusCode': '200', 'StatusMessage': 'success'}\n return Response(response, content_type=\"application/json\")\n else :\n response = {'StatusCode': '202', 'StatusMessage': 'faile'}\n return Response(response, content_type=\"application/json\")\n\nclass LoginCheck(viewsets.ModelViewSet):\n queryset = District.objects.all()\n role_class = RegisteredStgSerializer\n def create(self, request):\n email = request.POST.get('email')\n password = request.POST.get('password')\n\n userCheck = YamahaUser.objects.filter(UserId=str(email), Password=str(password), Status = 'Y').using('YamahaBooking')\n print(\"userCheck \" + str(userCheck ))\n\n if len(list(userCheck)) > 0:\n response = {'StatusCode': '200', 'StatusMessage': 'success'}\n return Response(response, content_type=\"application/json\")\n else :\n RegisteredUser.objects.filter(Email=str(email), IsUsed='N').using('YamahaBooking').update(IsUsed='Y')\n regUser = RegisteredUser.objects.filter(Email=str(email)).values('UserName').using('YamahaBooking')\n rUser = RegisteredUser.objects.filter(Email=str(email)).all().using('YamahaBooking')\n YamahaUser(UserId=str(email), UserName=regUser[0]['UserName'], Password=str(password), IsAdmin='0', Status = 'Y', RegsUserId=rUser[0]).save(using='YamahaBooking')\n response = {'StatusCode': '200', 'StatusMessage': 'success'}\n return Response(response, content_type=\"application/json\")\n\n response = {'StatusCode': '202', 'StatusMessage': 'fail'}\n return Response(response, content_type=\"application/json\")\n\nclass DepositBank(viewsets.ModelViewSet):\n queryset = District.objects.all()\n role_class = RegisteredStgSerializer\n\n def list(self, request):\n #fieldName = request.GET['fieldName'].strip()\n print('All Deposite Bank Information - ')\n queryset = DepositBankInfo.objects.all().values('Id', 'BankAccountName', 'AccountNo', 'BranchName').using('YamahaBooking')\n data = json.dumps(list(queryset))\n\n response = {'StatusCode': '200', 'StatusMessage': str(data)}\n print(response)\n return Response(response, content_type=\"application/json\")\n\n\nclass AllProductInfo(viewsets.ModelViewSet):\n queryset = District.objects.all()\n role_class = RegisteredStgSerializer\n\n def list(self, request):\n #fieldName = request.GET['fieldName'].strip()\n print('All Product Information - ')\n queryset = Product.objects.filter(Stock__gt=0, Status='1').values('Id', 'ProductName', 'ProductPrice', 'MinBookingPrice', 'ProductImage1').using('YamahaBooking')\n if len(list(queryset))> 0:\n data = json.dumps(list(queryset), cls=DjangoJSONEncoder)\n #data = json.dumps(list(queryset))\n response = {'StatusCode': '200', 'StatusMessage': str(data)}\n print(response)\n return Response(response, content_type=\"application/json\")\n else :\n response = {'StatusCode': '202', 'StatusMessage': 'No Item For Sales'}\n print(response)\n return Response(response, content_type=\"application/json\")\n\nclass BookingSave(viewsets.ModelViewSet):\n queryset = District.objects.all()\n role_class = RegisteredStgSerializer\n\n def create(self, request):\n UserName = request.POST.get('UserName')\n AccountName = request.POST.get('AccountName')\n AccountNo = request.POST.get('AccountNo')\n depositBakinfo = request.POST.get('DepositBankInfo')\n dealerPoint = request.POST.get('DealerPoint')\n BookingMoney = request.POST.get('BookingMoney')\n product = request.POST.get('Product')\n TermsCondition = request.POST.get('TermsCondition')\n Remarks = request.POST.get('Remarks')\n\n dt = str(datetime.datetime.now())\n _datetime = datetime.datetime.now()\n datetime_str = _datetime.strftime(\"%Y-%m-%d-%H-%M-%S\")\n fs = FileSystemStorage(location=settings.MEDIA_URL)\n productImage1 = request.FILES['uploaded_file']\n\n PaySlipDoc = datetime_str + \"-\" + productImage1.name\n fs.save(datetime_str+\"-\"+productImage1.name, productImage1)\n user = YamahaUser.objects.filter(UserId = str(UserName)).using('YamahaBooking')[0]\n product = Product.objects.filter(pk=int(product)).using('YamahaBooking')[0]\n dealerLocation = DealerLocation.objects.filter(pk=int(dealerPoint)).using('YamahaBooking')[0]\n checkUserBooking = Booking.objects.filter(UserId=user, ProductId=product, BookingStatus='Pending').using('YamahaBooking')\n if len(list(checkUserBooking)) > 0:\n response = {'StatusCode': '203', 'StatusMessage': 'DB transaction Fail.'}\n else :\n sTime = datetime.datetime.now()\n depositBankBranch = DepositBankInfo.objects.filter(pk=int(depositBakinfo)).values('BranchName').using('YamahaBooking')[0]\n depositBank = DepositBankInfo.objects.filter(pk=int(depositBakinfo)).using('YamahaBooking')[0]\n\n try:\n with transaction.atomic():\n booking = Booking(UserId=user, IsAgree='Y', ProductId=product, BookingStatus=str('Pending'), DepositAmount=Decimal(BookingMoney) ,TermsCondition=TermsCondition, Remarks=str(Remarks), EntryDate=sTime, DealerPoint=dealerLocation)\n booking.save(using='YamahaBooking')\n BookingPaySlip(BankAccountName=str(AccountName) , AccountNo=str(AccountNo), BranchName=depositBankBranch['BranchName'] ,DepositBank=depositBank , EntryDate=sTime, PaySlipDoc=str(PaySlipDoc), PayAmount=Decimal(BookingMoney), Booking=booking).save(using='YamahaBooking')\n response = {'StatusCode': '200', 'StatusMessage': 'success'}\n except DatabaseError:\n response = {'StatusCode': '202', 'StatusMessage': 'DB transaction Fail.'}\n\n print(response)\n return Response(response, content_type=\"application/json\")\n\nclass PiImageSave(viewsets.ModelViewSet):\n queryset = District.objects.all()\n role_class = RegisteredStgSerializer\n\n def create(self, request):\n dt = str(datetime.datetime.now())\n _datetime = datetime.datetime.now()\n datetime_str = _datetime.strftime(\"%Y-%m-%d-%H-%M-%S\")\n fs = FileSystemStorage(location=settings.MEDIA_URL+'Fair/')\n productImage1 = request.FILES['uploaded_file']\n\n fs.save(datetime_str + \"-\" + productImage1.name, productImage1)\n #imageurl=\"http://mis.digital:7779/PaySlip/Fair/\"+productImage1.name\n #GetAzureEmotion(productImage1)\n #imagename = datetime_str + \"-\" + productImage1.name\n # url = 'https://southeastasia.api.cognitive.microsoft.com/face/v1.0/detect?returnFaceId=true&returnFaceLandmarks=false&returnFaceAttributes=age'\n # headers = {'Content-Type': 'application/json', 'Ocp-Apim-Subscription-Key': '2c223b68e95840d48ad1e6d580332d6c'}\n # r = requests.post(url, headers=headers,\n # params={\"url\": \"http://mis.digital:7779/PaySlip/Fair/2019-01-12-19-14-10-618_my_image.jpg\"})\n # print(r.text)\n\n\n # sleep(5)\n # headers = {'Content-Type': 'application/octet-stream',\n # 'Ocp-Apim-Subscription-Key': '2c223b68e95840d48ad1e6d580332d6c'}\n # face_api_url = 'https://southeastasia.api.cognitive.microsoft.com/face/v1.0/detect?returnFaceId=true&returnFaceLandmarks=false&returnFaceAttributes=age'\n #\n # data = open('D:\\\\pyspace\\\\YamahaBookingAPI\\\\PaySlip\\\\Fair\\\\'+imagename, 'rb')\n # r = requests.post(face_api_url, headers=headers, data=data)\n # print(r.text)\n response = {'StatusCode': '200', 'StatuasMessage': 'Success'}\n return Response(response, content_type=\"application/json\")\n\n def list(self, request):\n\n query = RegisteredUser.objects.exclude(UserName = 'Minhaz').values('UserName','Mobile','UserName').using('YamahaBooking')\n #print(\"--Query Size--\" + str(len(list(query))))\n #print(\"--Query--\" + str(query))\n userList = LocTrackUserManager.objects.filter(Id__gte=15).values('UserName', 'Mobile').using('LocationTracker')\n\n #text = 'UserId : ' + user['UserId'] + \" Passwors : \" + user['Password']\n #SendSMS('01755676604', sms)\n for item in userList:\n #print('--'+str(item))\n sms = 'Dearest ' +str(item['UserName'])+', Download the app: http://dashboard.acigroup.info/GetData/aci-connect.apk in your android phone internet browser and then install it allowing the access of your location using your username(staff ID), password(staff ID). After successful installation and login, please restart your android phone.'\n #print('--' + sms)\n #SendSMS(str(item['Password']), sms)\n SendSMS(str(item['Mobile']), sms)\n\n\n # entryDate = datetime.datetime.now()\n # for item in query:\n # sendSMSText = SendSMSText(Mobile=str(item['Mobile']),Name=str(item['UserName']), SmsText=sms, EntryDate=entryDate)\n # sendSMSText.save(using='YamahaBooking')\n # SendSMS(str(item['Mobile']), sms)\n # #SendSMSText(str(item['Mobile']), sms)\n\n response = {'StatusCode': '200', 'StatuasMessage': 'Success'}\n return Response(response, content_type=\"application/json\")\n\n\nclass AccountDetail(viewsets.ModelViewSet):\n queryset = District.objects.all()\n role_class = RegisteredStgSerializer\n def list(self, request):\n UserName = request.GET.get('UserName').strip()\n\n userDetail = YamahaUser.objects.filter(UserId=str(UserName)).values('UserId', 'UserName', 'Password', 'RegsUserId__Mobile', 'RegsUserId__Remark', 'RegsUserId__DistrictId__Id', 'RegsUserId__DistrictId__DistrictName').using('YamahaBooking')\n data = json.dumps(list(userDetail))\n print(data)\n response = {'StatusCode': '200', 'StatusMessage': data}\n return Response(response, content_type=\"application/json\")\n\n\nclass AllBooking(viewsets.ModelViewSet):\n queryset = District.objects.all()\n role_class = RegisteredStgSerializer\n\n def list(self, request):\n UserName = request.GET.get('UserName').strip()\n user = YamahaUser.objects.filter(UserId=str(UserName)).using('YamahaBooking')[0]\n bookingList = Booking.objects.filter(UserId=user).values('Id', 'ProductId__ProductName', 'ProductId__ProductColor','BookingStatus', 'ProductId__ProductPrice', 'ProductId__MinBookingPrice', 'ProductId__ProductImage1', 'EntryDate', 'DealerPoint__DLRPoint').using('YamahaBooking')\n\n bookingList = bookingList.extra(select={'datestr': \"to_char(EntryDate, 'YYYY-MM-DD HH24:MI:SS')\"})\n adminList = list(bookingList)\n print(\"--------->\"+str(user))\n adminTemp = []\n for item in adminList:\n item['EntryDate'] = str(item['EntryDate'].strftime('%Y-%m-%d %H:%M'))\n #item['ProductId__ProductPrice'] = str(item['ProductId__ProductPrice'])\n adminTemp.append(item)\n #data = json.dumps(list(adminTemp))\n data = json.dumps(list(adminTemp), cls=DjangoJSONEncoder)\n print(data)\n response = {'StatusCode': '200', 'StatusMessage': data}\n return Response(response, content_type=\"application/json\")\n\nclass AllProductForMessage(viewsets.ModelViewSet):\n queryset = District.objects.all()\n role_class = RegisteredStgSerializer\n def list(self, request):\n adminTemp = Product.objects.all().values('Id', 'ProductName', 'ProductColor').using('YamahaBooking')\n data = json.dumps(list(adminTemp), cls=DjangoJSONEncoder)\n print(data)\n response = {'StatusCode': '200', 'StatusMessage': data}\n return Response(response, content_type=\"application/json\")\n\nclass BookingQueryCheck(viewsets.ModelViewSet):\n queryset = District.objects.all()\n role_class = RegisteredStgSerializer\n def list(self,request):\n userName = request.GET.get('user').strip()\n product = request.GET.get('product').strip()\n bookingdate = request.GET.get('bookingdate').strip()\n prdct = Product.objects.filter(pk=int(product)).using('YamahaBooking')[0]\n user = YamahaUser.objects.filter(UserId=str(userName)).using('YamahaBooking')[0]\n\n sDate = datetime.datetime(int(bookingdate.split('-')[0]), int(bookingdate.split('-')[1]),int(bookingdate.split('-')[2]), 0, 0, 0, 000)\n eDate = datetime.datetime(int(bookingdate.split('-')[0]), int(bookingdate.split('-')[1]), int(bookingdate.split('-')[2]), 23, 59, 59, 000)\n print(userName + \" \" + product + \" \" + bookingdate)\n bquery = Booking.objects.filter(UserId=user, ProductId=prdct, EntryDate__range=(sDate, eDate)).using('YamahaBooking')\n if len(list(bquery))> 0:\n response = {'StatusCode': '200', 'StatusMessage': 'OK'}\n print(\"response \" + str(response))\n return Response(response, content_type=\"application/json\")\n else :\n response = {'StatusCode': '200', 'StatusMessage': 'FAIL'}\n print(\"response \" + str(response))\n return Response(response, content_type=\"application/json\")\n\n\nclass MessageSave(viewsets.ModelViewSet):\n queryset = District.objects.all()\n role_class = RegisteredStgSerializer\n\n def create(self,request):\n userName = request.POST.get('user').strip()\n product = request.POST.get('Product').strip()\n bookingdate = request.POST.get('BookingDate').strip()\n messageDetails = request.POST.get('BookingMessage').strip()\n\n print(\"messageDetails \" + messageDetails + \" bookingdate \" + bookingdate + \" product \" + product)\n\n sDate = datetime.datetime(int(bookingdate.split('-')[0]), int(bookingdate.split('-')[1]),\n int(bookingdate.split('-')[2]), 0, 0, 0, 000)\n eDate = datetime.datetime(int(bookingdate.split('-')[0]), int(bookingdate.split('-')[1]),\n int(bookingdate.split('-')[2]), 23, 59, 59, 000)\n\n prdct = Product.objects.filter(pk=int(product)).using('YamahaBooking')[0]\n fromUser = YamahaUser.objects.filter(UserId=str(userName)).using('YamahaBooking')[0]\n toUser = YamahaUser.objects.filter(pk=1).using('YamahaBooking')[0]\n entryDate = datetime.datetime.now()\n bquery = Booking.objects.filter(UserId=fromUser, ProductId=prdct, EntryDate__range=(sDate, eDate)).using(\n 'YamahaBooking')\n\n inbox = Inbox(From=fromUser, To=toUser, BookingId=bquery[0], EntryDate=entryDate)\n inbox.save(using='YamahaBooking')\n InboxDetail(Message=str(messageDetails), InboxId=inbox, EntryBy=fromUser, EntryDate=entryDate).save(using='YamahaBooking')\n\n response = {'StatusCode': '200', 'StatusMessage': 'OK'}\n print(\"response \" + str(response))\n return Response(response, content_type=\"application/json\")\n\nclass UserAccountUpdate(viewsets.ModelViewSet):\n queryset = District.objects.all()\n role_class = RegisteredStgSerializer\n\n def create(self, request):\n username = request.POST.get('username').strip()\n mobile = request.POST.get('mobile').strip()\n email = request.POST.get('email').strip()\n password = request.POST.get('password').strip()\n changepassword = request.POST.get('changepassword').strip()\n district = request.POST.get('district').strip()\n remarks = request.POST.get('remarks').strip()\n Disct = District.objects.filter(pk=int(district)).using('YamahaBooking')[0]\n RegisteredUser.objects.filter(Email=str(email)).using('YamahaBooking').update(UserName=str(username),Mobile=str(mobile),Remark=str(remarks), DistrictId=Disct)\n if changepassword != '':\n password = changepassword\n\n YamahaUser.objects.filter(UserId=str(email)).using('YamahaBooking').update(UserName=str(username), Password=password)\n\n response = {'StatusCode': '200', 'StatusMessage': 'OK'}\n print(\"response \" + str(response))\n return Response(response, content_type=\"application/json\")\n\nclass BookingEdit(viewsets.ModelViewSet):\n queryset = District.objects.all()\n role_class = RegisteredStgSerializer\n\n\n def list(self, request):\n bookingId = request.GET.get('bookingId')\n bookQuery = Booking.objects.filter(pk=int(bookingId)).values('ProductId__Id','DepositAmount', 'TermsCondition', 'Remarks', 'DealerPoint__Id').using('YamahaBooking')\n bookQ = Booking.objects.filter(pk=int(bookingId)).using('YamahaBooking')[0]\n bookDetailQue = BookingPaySlip.objects.filter(Booking=bookQ).values('BankAccountName', 'AccountNo', 'DepositBank__Id', 'PaySlipDoc', 'PayAmount').using('YamahaBooking')\n bookingData = json.dumps(list(bookQuery), cls=DjangoJSONEncoder)\n bookDetailData = json.dumps(list(bookDetailQue), cls=DjangoJSONEncoder)\n print(\"bookDetailData \" + str(bookDetailData))\n response = {'StatusCode': '200', 'Booking': bookingData, 'BookingPaySlip': bookDetailData}\n print(\"response \" + str(response))\n return Response(response, content_type=\"application/json\")\n\nclass InboxInfoDetail(viewsets.ModelViewSet):\n queryset = District.objects.all()\n role_class = RegisteredStgSerializer\n def list(self, request):\n userName = request.GET.get('userName')\n UserId = YamahaUser.objects.filter(UserId=str(userName)).using('YamahaBooking')[0]\n bookingList = Booking.objects.filter(UserId=UserId).using('YamahaBooking')\n inboxList = Inbox.objects.filter(BookingId__in=bookingList).using('YamahaBooking')\n queryInboxList = Inbox.objects.filter(BookingId__in=bookingList).order_by('-Id').values('Id','BookingId__ProductId__ProductName','BookingId__ProductId__ProductColor', 'BookingId__ProductId__ProductPrice','EntryDate','From__UserId','To__UserId').using('YamahaBooking')\n queryInboxList = queryInboxList.extra(select={'datestr': \"to_char(EntryDate, 'YYYY-MM-DD HH24:MI:SS')\"})\n myList = list(queryInboxList)\n\n queryInboxDetail = InboxDetail.objects.filter(InboxId__in = inboxList).values('Message','InboxId__Id',).using('YamahaBooking')\n inBoxDetalList = list(queryInboxDetail)\n temp = []\n for item in myList:\n item['EntryDate'] = str(item['EntryDate'].strftime('%Y-%m-%d %H:%M'))\n for ik in inBoxDetalList:\n if item['Id'] == ik['InboxId__Id']:\n item['Message'] = ik['Message']\n\n temp.append(item)\n\n data = json.dumps(list(temp), cls=DjangoJSONEncoder)\n\n response = {'StatusCode': '200', 'Inbox': data}\n print(\"response \" + str(response))\n return Response(response, content_type=\"application/json\")\n\nclass BookingUpdate(viewsets.ModelViewSet):\n queryset = District.objects.all()\n role_class = RegisteredStgSerializer\n\n def create(self, request):\n BookingId = request.POST.get('BookingId')\n UserName = request.POST.get('UserName')\n AccountName = request.POST.get('AccountName')\n AccountNo = request.POST.get('AccountNo')\n depositBakinfo = request.POST.get('DepositBankInfo')\n BookingMoney = request.POST.get('BookingMoney')\n product = request.POST.get('Product')\n dealerPoint = request.POST.get('DealerPoint')\n Remarks = request.POST.get('Remarks')\n IsFile = request.POST.get('IsFile')\n dt = str(datetime.datetime.now())\n _datetime = datetime.datetime.now()\n productImage1 = ''\n if IsFile=='1':\n datetime_str = _datetime.strftime(\"%Y-%m-%d-%H-%M-%S\")\n fs = FileSystemStorage(location=settings.MEDIA_URL)\n productImage1 = request.FILES['uploaded_file']\n\n PaySlipDoc = datetime_str + \"-\" + productImage1.name\n fs.save(datetime_str + \"-\" + productImage1.name, productImage1)\n\n\n\n product = Product.objects.filter(pk=int(product)).using('YamahaBooking')[0]\n sTime = datetime.datetime.now()\n\n dealPoint = DealerLocation.objects.filter(pk=int(dealerPoint)).using('YamahaBooking')[0]\n depositBankBranch = DepositBankInfo.objects.filter(pk=int(depositBakinfo)).values('BranchName').using('YamahaBooking')[0]\n depositBank = DepositBankInfo.objects.filter(pk=int(depositBakinfo)).using('YamahaBooking')[0]\n booking = Booking.objects.filter(pk=int(BookingId)).using('YamahaBooking')[0]\n Booking.objects.filter(pk = int(BookingId)).using('YamahaBooking').update(ProductId=product, DepositAmount=Decimal(BookingMoney), Remarks=Remarks, DealerPoint=dealPoint)\n\n if IsFile == '1':\n BookingPaySlip.objects.filter(Booking=booking).using('YamahaBooking').update(\n BankAccountName=str(AccountName), AccountNo=str(AccountNo),\n BranchName=depositBankBranch['BranchName'], DepositBank=depositBank, EntryDate=sTime,\n PaySlipDoc=str(PaySlipDoc), PayAmount=Decimal(BookingMoney))\n else:\n BookingPaySlip.objects.filter(Booking=booking).using('YamahaBooking').update(\n BankAccountName=str(AccountName), AccountNo=str(AccountNo),\n BranchName=depositBankBranch['BranchName'], DepositBank=depositBank, EntryDate=sTime, PayAmount=Decimal(BookingMoney))\n\n\n response = {'StatusCode': '200', 'StatusMessage': 'OK'}\n print(\"response \" + str(response))\n return Response(response, content_type=\"application/json\")\n\nclass DownloadPaySlip(viewsets.ModelViewSet):\n queryset = District.objects.all()\n role_class = RegisteredStgSerializer\n\n def list(self, request):\n bookingId = request.GET.get('BookingId').strip()\n UserName = request.GET.get('UserName').strip()\n\n yUser = YamahaUser.objects.filter(UserId=str(UserName)).using('YamahaBooking')\n if len(list(yUser)) > 0:\n book = Booking.objects.filter(Id = int(bookingId)).using('YamahaBooking')[0]\n bookQuery = Booking.objects.filter(Id=int(bookingId)).values('UserId__UserName',\n 'UserId__RegsUserId__Mobile',\n 'EntryDate', 'ProductId__ProductName',\n 'ProductId__ProductPrice',\n 'ProductId__ProductColor',\n 'DepositAmount').using('YamahaBooking')\n bookQuery = bookQuery.extra(select={'datestr': \"to_char(EntryDate, 'YYYY-MM-DD HH24:MI:SS')\"})\n bookQueryList = list(bookQuery)\n tempBooking = []\n for item in bookQueryList:\n item['EntryDate'] = str(item['EntryDate'].strftime('%Y-%m-%d %H:%M'))\n tempBooking.append(item)\n\n bookingData = json.dumps(list(tempBooking), cls=DjangoJSONEncoder)\n\n deliveryQuery = DeliveryPoint.objects.filter(BookingId=book).values('DeliveryDate',\n 'DealerLocation__DLRPoint',\n 'DealerLocation__NameOfDealer',\n 'DealerLocation__OwnerContactNo',\n 'DealerLocation__FullLocation',\n 'DealerLocation__DistrictId__DistrictName').using(\n 'YamahaBooking')\n deliveryQuery = deliveryQuery.extra(select={'datestr': \"to_char(DeliveryDate, 'YYYY-MM-DD HH24:MI:SS')\"})\n deliveryQueryList = list(deliveryQuery)\n tempDelivery = []\n\n for item in deliveryQueryList:\n item['DeliveryDate'] = str(item['DeliveryDate'].strftime('%Y-%m-%d %H:%M'))\n tempDelivery.append(item)\n\n deliveryData = json.dumps(list(tempDelivery), cls=DjangoJSONEncoder)\n\n response = {'StatusCode': '200', 'booking': bookingData, 'delivery': deliveryData}\n print(\"response \" + str(response))\n return Response(response, content_type=\"application/json\")\n else:\n response = {'StatusCode': '401', 'booking':\"FAIL\"}\n print(\"response \" + str(response))\n return Response(response, content_type=\"application/json\")\n\nclass StocknRemainingdays(viewsets.ModelViewSet):\n queryset = District.objects.all()\n role_class = RegisteredStgSerializer\n\n def list(self, request):\n queryset = Product.objects.filter(Stock__gt=0, Status='1').values('Id', 'ProductName', 'LastBookingDate','Stock').using('YamahaBooking')\n if len(list(queryset)) > 0:\n deliveryQuery = queryset.extra(select={'datestr': \"to_char(LastBookingDate, 'YYYY-MM-DD HH24:MI:SS')\"})\n deliveryQueryList = list(deliveryQuery)\n tempDelivery = []\n #sTime = datetime.datetime.now() tempDelivery.append(item)\n today = datetime.datetime.today()\n _datetime = datetime.datetime.now()\n #d1 = today.strftime(\"%Y-%m-%d\")\n for item in deliveryQueryList:\n item['LastBookingDate'] = str(item['LastBookingDate'].strftime('%Y-%m-%d %H:%M'))\n\n d2 = datetime.datetime.strptime(item['LastBookingDate'], \"%Y-%m-%d %H:%M\")\n diff = abs((d2 - today).days)\n item['RemainingDay'] = diff\n tempDelivery.append(item)\n\n data = json.dumps(list(tempDelivery), cls=DjangoJSONEncoder)\n # data = json.dumps(list(queryset))\n response = {'StatusCode': '200', 'StatusMessage': str(data)}\n print(response)\n return Response(response, content_type=\"application/json\")\n else:\n response = {'StatusCode': '202', 'StatusMessage': 'No Item For Sales'}\n print(response)\n return Response(response, content_type=\"application/json\")\n\n\nclass AllDealerLocation(viewsets.ModelViewSet):\n queryset = District.objects.all()\n role_class = RegisteredStgSerializer\n def list(self, request):\n dealQuery = DealerLocation.objects.all().values('Id','DLRPoint','DistrictId__DistrictName').using(\n 'YamahaBooking')\n data = json.dumps(list(dealQuery), cls=DjangoJSONEncoder)\n response = {'StatusCode': '200', 'StatusMessage': str(data)}\n print(response)\n return Response(response, content_type=\"application/json\")\n\nclass NotificationControll(viewsets.ModelViewSet):\n queryset = District.objects.all()\n role_class = RegisteredStgSerializer\n def list(self, request):\n userName = request.GET.get('UserName')\n user = YamahaUser.objects.filter(UserId=str(userName)).using('YamahaBooking')[0]\n bookQuery = Booking.objects.filter(UserId=user, BookingStatus='Pending').using('YamahaBooking')\n msg = \"\"\n if len(list(bookQuery)) > 0:\n msg = \"0\"\n else :\n msg = \"1\"\n #data = json.dumps(list(dealQuery), cls=DjangoJSONEncoder)\n response = {'StatusCode': '200', 'StatusMessage': msg}\n print(response)\n return Response(response, content_type=\"application/json\")\n\nclass LocationTrackerLoginCheck(viewsets.ModelViewSet):\n queryset = District.objects.all()\n role_class = RegisteredStgSerializer\n\n def create(self, request):\n userId = request.POST.get('UserId')\n password = request.POST.get('Password')\n print('--->' + str(userId) + \"---->\"+password)\n\n checkUser = LocTrackUserManager.objects.filter(UserId=userId, Password=password, IsActive='Y').values('UserId', 'UserName').using('LocationTracker')\n print('checkUser ---- > ' + str(len(checkUser)))\n\n if len(checkUser) > 0:\n sTime = datetime.datetime.now()\n #LocTrackUserManager.objects.filter(UserId=userId, Password=password, IsActive='Y').using('LocationTracker').update(IsUsed='Y', EditDate=sTime)\n #data = json.dumps(list(checkUser))\n msg = \"1\"\n response = {'StatusCode': '200', 'StatusMessage': msg}\n print(response)\n print(response)\n return Response(response, content_type=\"application/json\")\n else:\n msg = \"2\"\n response = {'StatusCode': '200', 'StatusMessage': msg}\n return Response(response, content_type=\"application/json\")\n\n msg = \"3\"\n #data = json.dumps(list(dealQuery), cls=DjangoJSONEncoder)\n response = {'StatusCode': '200', 'StatusMessage': msg}\n print(response)\n return Response(response, content_type=\"application/json\")\n\nclass UserPathByDate(viewsets.ModelViewSet):\n queryset = District.objects.all()\n role_class = RegisteredStgSerializer\n\n def list(self, request):\n userId = request.GET.get('UserId')\n srartDate = request.GET.get('StartDateTime')\n endDate = request.GET.get('EndDateTime')\n #print(\"====\"+ str(userId))\n connection = pymongo.MongoClient('mongodb://admin:admin@192.168.101.175:27017')\n\n sDate = str(srartDate).split(\" \")[0]\n sTime = str(srartDate).split(\" \")[1]\n mStartDate = str(sDate).split(\"-\")[0]+\"/\"+str(sDate).split(\"-\")[1]+\"/\"+str(sDate).split(\"-\")[2] +\" \"+ sTime\n\n eDate = str(endDate).split(\" \")[0]\n eTime = str(endDate).split(\" \")[1]\n mEndDate = str(eDate).split(\"-\")[0] + \"/\" + str(eDate).split(\"-\")[1] + \"/\" + str(eDate).split(\"-\")[2] + \" \" + eTime\n\n database = connection['MisConnect']\n collection = database['TestMongo']\n\n #myquery = {\"UserId\":userId, \"EntryTime\":{\"$gte\": \"2019-08-28 00:00:00.000\",\"$lt\":\"2019-08-28 23:59:59.000\"}, \"Latitude\":{\"$gte\": \"0.0\"}}\n myquery = {\"UserId\": userId, \"Mobile\": {\"$gte\": mStartDate, \"$lt\": mEndDate},\n \"Latitude\": {\"$gte\": \"0.0\"}}\n #print(\"==SQL==\"+str(myquery))\n cursor = collection.find(myquery,{\"_id\": 0,\"UserId\":1,\"Mobile\":1,\"Latitude\":1,\"Longitude\":1,\"EntryTime\":1}).sort('EntryTime',-1)\n\n #print(dumps(cursor))\n ms = dumps(cursor)\n #for document in cursor:\n #print(document)\n msg = \"1\"\n # data = json.dumps(list(dealQuery), cls=DjangoJSONEncoder)\n response = {'StatusCode': '200', 'StatusMessage': str(ms)}\n #print(response)\n return Response(response, content_type=\"application/json\")\n\n\nclass RecoveryDataRecive(viewsets.ModelViewSet):\n queryset = District.objects.all()\n role_class = RegisteredStgSerializer\n\n def list(self, request):\n #RecoveryData = request.POST.get('RecoveryData')\n #print('--->' + str(RecoveryData))\n received_json_data = json.loads(request.POST['RecoveryData'])\n\n #print('--received_json_data->' + str(received_json_data))\n #print('--received_json_data->' + str(received_json_data))\n msg = \"1\"\n # data = json.dumps(list(dealQuery), cls=DjangoJSONEncoder)\n response = {'StatusCode': '200', 'StatusMessage': msg}\n #print(response)\n return Response(response, content_type=\"application/json\")\n\n def create(self, request):\n #recoverydata = request.POST.get('RecoveryData')\n #received_json_data = json.loads(request.POST['RecoveryData'])\n\n #print('--RecoveryData->' + str(request.data.get('RecoveryData')))\n #print('--ProjectionData->' + str(request.data.get('ProjectionData')))\n\n #json_array = json.load(request.data.get('RecoveryData'))\n json_array = json.loads(request.data.get('RecoveryData'))\n projection_array = json.loads(request.data.get('ProjectionData'))\n capture_array = json.loads(request.data.get('CaptureData'))\n release_array = json.loads(request.data.get('ReleaseData'))\n #print('---------Recovary----' + str(json_array))\n #print('---------Recovary----' + str(json_array))\n #print('---------capture_array----' + str(capture_array))\n #print('---------release_array----' + str(release_array))\n store_list = []\n\n cnxn = pyodbc.connect('DRIVER={SQL Server};SERVER=192.168.100.25;DATABASE=MotorBrInvoiceMirror;UID=sa;PWD=dataport')\n cursor = cnxn.cursor()\n for item in json_array:\n stuffId = item['stuffId']\n mrn = item['mrn']\n customerCode = item['customerCode']\n created_at = item['created_at']+'.123'\n collectiondate = item['collectiondate']\n amount = item['amount']\n supportedby = item['supportedby']\n remarks = item['remarks']\n colorstatus = item['colorstatus']\n latitude = item['latitude']\n longitude = item['longitude']\n LogName = \"COLLECTION_SYNC\"\n query = ''\n\n\n if str(collectiondate) == '':\n query = \"INSERT INTO CollMasterNew(Amount, CustomerCode, mrn, CreateDate, StuffId, SupportedBy, Remarks, ColorStatus, Latitude, Longitude) VALUES(\" + str(\n amount) + \", '\" + customerCode + \"', '\" + str(\n mrn) + \"', '\" + created_at + \"', '\" + stuffId + \"','\"+supportedby+\"','\"+remarks+\"',\"+colorstatus+\",\"+latitude+\",\"+longitude+\")\"\n else:\n query = \"INSERT INTO CollMasterNew(Amount, CustomerCode, mrn, CollectionDate, CreateDate, StuffId, SupportedBy, Remarks, ColorStatus, Latitude, Longitude) VALUES(\" + str(\n amount) + \", '\" + customerCode + \"', '\" + str(\n mrn) + \"', '\" + collectiondate + \"', '\" + created_at + \"', '\" + stuffId + \"','\"+supportedby+\"','\"+remarks+\"',\"+colorstatus+\",\"+latitude+\",\"+longitude+\")\"\n\n #query = \"INSERT INTO CollMasterNew(Amount, CustomerCode, mrn, CollectionDate, CreateDate, StuffId) VALUES(\" + str(amount) + \", '\" + customerCode + \"', '\" + str(mrn) + \"', '\" + collectiondate + \"', '\" + created_at + \"', '\" + stuffId + \"')\"\n #print(query)\n query2 = \"INSERT INTO HitLog(LogName,CustomerCode, AppUserID) VALUES('\" + LogName + \"','\" + customerCode + \"','\" + stuffId + \"')\"\n #print(\"----D--\" + query2)\n cursor.execute(query)\n cursor.execute(query2)\n\n for item in projection_array:\n StaffID = item['staffid']\n ProjectionDate = item['projectiondate']\n CustomerMobile = item['customermobile']\n CustomerCode = item['customercode']\n CreateDate = item['created_at']\n ProjectionAmount = item['amount']\n LogName = \"PROJECTION_SYNC\"\n query = \"INSERT INTO ProjectionMaster(StaffID, CustomerCode, ProjectionAmount, ProjectionDate, CustomerMobile, CreateDate) VALUES('\" + str(StaffID) + \"', '\" + CustomerCode + \"', '\" + ProjectionAmount + \"', '\" + ProjectionDate + \"', '\" + CustomerMobile + \"', '\" + CreateDate + \"')\"\n #query2 = \"INSERT INTO HitLog (LogName, CustomerCode) VALUES('\" + LogName + \"','\" + CustomerCode + \"')\"\n query2 = \"INSERT INTO HitLog(LogName,CustomerCode, AppUserID) VALUES('\" + LogName + \"','\" + CustomerCode + \"','\" + StaffID + \"')\"\n #print(\"---A---\"+query2)\n cursor.execute(query)\n cursor.execute(query2)\n customerCode = ''\n for item in capture_array:\n staffID = item['staffid']\n customerCode = item['customercode']\n customerName = item['customername']\n captureDate = item['capturedate']\n captureLocation = item['capturelocation']\n capturetractormodel = item['capturetractormodel']\n captureother = item['captureother']\n LogName = \"CAPTURE_SYNC\"\n #query2 = \"INSERT INTO HitLog (LogName, CustomerCode) VALUES('\" + LogName + \"','\"+customerCode+\"')\"\n query2 = \"INSERT INTO HitLog(LogName,CustomerCode, AppUserID) VALUES('\" + LogName + \"','\" + customerCode + \"','\" + staffID + \"')\"\n #print(\"--B----\" + query2)\n if captureother == '':\n captureother = ''\n\n query = \"INSERT INTO CaptureMaster(StaffID, CustomerCode, CustomerName, CaptureDate, CaptureLocation, CaptureTractorModel, CaptureOther) VALUES('\" + str(\n staffID) + \"', '\" + customerCode + \"', '\" + customerName + \"', '\" + captureDate + \"', '\" + captureLocation + \"', '\" + capturetractormodel + \"', '\" + captureother + \"')\"\n # print(query)\n cursor.execute(query)\n cursor.execute(query2)\n\n for item in release_array:\n staffID = item['staffid']\n customerCode = item['customercode']\n customerName = item['customername']\n releaseDate = item['releasedate']\n amount = item['amount']\n LogName = \"RELEASE_SYNC\"\n query = \"INSERT INTO ReleaseMaster(StaffID, CustomerCode, CustomerName, ReleaseDate, Amount) VALUES('\" + str(\n staffID) + \"', '\" + customerCode + \"', '\" + customerName + \"', '\" + releaseDate + \"', '\" + amount + \"')\"\n # print(query)\n #query2 = \"INSERT INTO HitLog(LogName,CustomerCode) VALUES('\" + LogName + \"','\" + customerCode + \"')\"\n query2 = \"INSERT INTO HitLog(LogName,CustomerCode, AppUserID) VALUES('\" + LogName + \"','\" + customerCode + \"','\" + staffID + \"')\"\n #print(\"--C----\" + query2)\n cursor.execute(query)\n cursor.execute(query2)\n\n cnxn.commit()\n cnxn.close()\n\n msg = \"1\"\n response = {'StatusCode': '200', 'StatusMessage': msg}\n print(response)\n return Response(response, content_type=\"application/json\")\n \nclass RecoveryOfficerLogin(viewsets.ModelViewSet):\n queryset = District.objects.all()\n role_class = RegisteredStgSerializer\n def create(self, request):\n username = request.POST.get('username')\n password = request.POST.get('password')\n cnxn = pyodbc.connect('DRIVER={SQL Server};SERVER=192.168.100.62;DATABASE=RecoveryOfficer;UID=sa;PWD=dataport')\n cursor = cnxn.cursor()\n sql = \"SELECT COUNT(*) as c FROM UserManager WHERE UserName='\"+str(username)+\"' and Password='\"+str(password)+\"';\"\n cursor.execute(sql) # calls PROCEDURE named LOG_MESSAGE which resides in MY_UTIL Package\n results = cursor.fetchall()\n rcount = ''\n for re in results:\n rcount = re[0]\n\n cursor.close()\n cnxn.commit()\n cnxn.close()\n\n msg=''\n if rcount>0:\n msg = \"1\"\n else:\n msg = \"2\"\n response = {'StatusCode': '200', 'StatusMessage': msg}\n print(response)\n\n return Response(response, content_type=\"application/json\")\n\nclass MotorService(viewsets.ModelViewSet):\n queryset = District.objects.all()\n role_class = RegisteredStgSerializer\n def create(self, request):\n try:\n period = request.POST.get('dateinput')\n cnxn = pyodbc.connect('DRIVER={SQL Server};SERVER=192.168.100.62;DATABASE=ServiceTrack;UID=sa;PWD=dataport')\n cursor = cnxn.cursor()\n cursor.execute(\"Exec all_service_details '\" + period +\"'\")\n results = cursor.fetchall()\n item = {}\n tempDelivery = []\n for row in results:\n item = {}\n\n item['ServiceDetailsId'] = row[0]\n item['StaffId'] = row[1]\n item['TechnicianName'] = row[2]\n item['CustomerName'] = (row[3].encode(\"utf-8\").decode(\"utf-8\", \"ignore\"))\n item['Mobile'] = row[4]\n\n item['HoursProvided'] = row[5]\n item['DateOfInstallation'] = row[6]\n item['ServiceDemandDate'] = row[7]\n item['ServiceStartDate'] = row[8]\n item['ServiceEndDate'] = row[9]\n\n item['VisitDate'] = row[10]\n item['ServiceType'] = row[11]\n tempDelivery.append(item)\n \n data = json.dumps(list(tempDelivery), cls=DjangoJSONEncoder)\n cursor.close()\n cnxn.commit()\n cnxn.close()\n print(period)\n response = {'StatusCode': '200', 'StatusMessage': data}\n return Response(response, content_type=\"application/json\")\n except Exception as e:\n return Response({'StatusCode': '400', 'StatusMessage': str(e)}, content_type=\"application/json\")\n\n \nclass RecoveryOfficerCollection(viewsets.ModelViewSet):\n queryset = District.objects.all()\n role_class = RegisteredStgSerializer\n\n def create(self, request):\n try:\n json_array = json.loads(request.data.get('RecoveryData'))\n projection_array = json.loads(request.data.get('ProjectionData'))\n\n print('----0000--RecovaryData ---'+str(json_array))\n #print('----0000--XXXX ---' + str(projection_array))\n cnxn = pyodbc.connect('DRIVER={SQL Server};SERVER=192.168.100.62;DATABASE=RecoveryOfficer;UID=sa;PWD=dataport')\n cursor = cnxn.cursor()\n for item in json_array:\n #print(\"--------Collections------------\")\n mobileId = item['id']\n stuffId = item['stuffId']\n customerCode = item['customerCode']\n collectedamount = item['collectedAmount']\n mrn = item['mrn']\n nextcommitmentdate = item['nextCommitmentDate']\n created_at = item['created_at']\n supportedby = item['supportedBy']\n remarks = item['remarks']\n #colorstatus = item['colorstatus']\n latitude = item['latitude']\n longitude = item['longitude']\n\n query = \"INSERT INTO Collections (UserId, MobileId, CustomerCode, CollectedAmount, Mrno, NextCommitmentDate, SupportedBy, Remarks, Latitude, Longitude, MobileCreated_at) VALUES ('\" + str(\n stuffId) + \"','\" + str(mobileId) + \"','\" + str( customerCode) + \"','\" + str(collectedamount) + \"','\" \\\n + str(mrn) + \"','\"+ nextcommitmentdate +\"','\"+ str(supportedby) +\"','\" + str(remarks) + \"','\" \\\n + str(latitude) + \"','\" + str(longitude) + \"','\"+created_at+\"')\"\n\n #print(\" --- SQL --- \"+ query)\n cursor.execute(query)\n\n for item in projection_array:\n #print(\"--------YES----------------\")\n mobileId = item['id']\n userId = item['staffid']\n customercode = item['customercode']\n dueamount = item['dueamount']\n overdueage = item['overdueage']\n support = item['support']\n mobileno = item['mobileno']\n actiontaken = item['actiontaken']\n latitude = item['latitude']\n longitude = item['longitude']\n created_at = item['created_at']\n\n #print(\"----\"+str(mobileId)+\"---\"+str(customercode))\n\n query = \"INSERT INTO Projection(UserId, MobileId, CustomerCode, Dueamount, OverDueAge, Support, MobileNo, ActionTaken, Latitude, Longitude, MobileCreated_at) VALUES('\" + str(\n userId) + \"', '\" + str(mobileId) + \"', '\" + str(customercode) + \"', '\" + str(dueamount) + \"', '\" + str(overdueage) + \"', '\" + str(support)\\\n +\"','\"+str(mobileno)+\"','\"+str(actiontaken)+\"','\"+ str(latitude)+\"','\"+str(longitude)+\"','\"+ str(created_at) +\"')\"\n #print(\"000000AAA0000--------SQL \" + query)\n cursor.execute(query)\n\n cnxn.commit()\n cnxn.close()\n\n msg = \"1\"\n response = {'StatusCode': '200', 'StatusMessage': msg}\n print(response)\n\n return Response(response, content_type=\"application/json\")\n except Exception as ex:\n return Response({'StatusCode': '500', 'StatusMessage': 'Exception Occured. Details: ' + str(ex)})\n\n\n\n\n\nclass MyAppCustomer(viewsets.ModelViewSet):\n queryset = District.objects.all()\n role_class = RegisteredStgSerializer\n\n def create(self, request):\n userId = request.POST.get('UserId')\n import re\n #print('--UserId--'+ str(userId))\n cnxn = pyodbc.connect(\n 'DRIVER={SQL Server};SERVER=192.168.100.25;DATABASE=MotorBrInvoiceMirror;UID=sa;PWD=dataport')\n cursor = cnxn.cursor()\n today = datetime.datetime.today()\n mToday = re.split(\" \", str(today))[0]\n\n fDate = re.split(\"-\", str(mToday))[0]+'-'+re.split(\"-\", str(mToday))[1]+'-'+'01'\n #fDate = '2019-10-01'\n\n #print('---' + fDate)\n\n sqlst = \"SELECT aum.UserName, cas.Code, cas.CustomerName, ForMonth, '0' AS SyncStatus FROM AppUserManager aum INNER JOIN Territory TT ON aum.TerritoryCode = TT.TTYCode INNER JOIN CreditAnalysisStg cas ON cas.Territory LIKE TT.TTYName WHERE (cas.ForMonth = '\"+fDate+\"') AND aum.UserName = '\"+str(userId)+\"'\"\n\n #print('--'+str(sqlst))\n\n cursor.execute(sqlst)\n results = cursor.fetchall()\n tempDelivery = []\n # sTime = datetime.datetime.now() tempDelivery.append(item)\n item = {}\n for row in results:\n item = {}\n\n item['userName'] = row[0]\n item['code'] = row[1]\n item['customerName'] = row[2]\n item['forMonth'] = row[3]\n item['syncStatus'] = row[4]\n #item['syncDate'] = row[5]\n tempDelivery.append(item)\n #print('--'+str(userName)+'--'+str(code)+'--'+str(customerName)+'--'+str(forMonth)+'--'+str(syncStatus)+'--'+str(syncDate))\n\n data = json.dumps(list(tempDelivery), cls=DjangoJSONEncoder)\n msg = ''\n response = {'StatusCode': '200', 'StatusMessage': str(data)}\n print(response)\n return Response(response, content_type=\"application/json\")\n\nclass Myrabbitmq(viewsets.ModelViewSet):\n queryset = District.objects.all()\n role_class = RegisteredStgSerializer\n\n def create(self, request):\n userId = request.POST.get('userId')\n doctorType = request.POST.get('doctorType')\n doctorCode = request.POST.get('doctorCode')\n institution = request.POST.get('institution')\n productCode = request.POST.get('productCode')\n prescriptionDate = request.POST.get('prescriptionDate')\n mobileNo = request.POST.get('mobileNo')\n IMEI = request.POST.get('IMEI')\n latitude = request.POST.get('latitude')\n longitude = request.POST.get('longitude')\n competitorBrandCode = request.POST.get('competitorBrandCode')\n\n image = request.FILES.get('prescription_image')\n #image = request.FILES.get('image')\n _datetime = datetime.datetime.now()\n datetime_str = _datetime.strftime(\"%Y-%m-%d-%H-%M-%S-%f\")\n\n fs = FileSystemStorage(location=settings.MEDIA_URL + 'rximage/')\n iName = str.replace(image.name,':','-')\n iName = str.replace(iName,' ','-')\n print('----'+iName)\n #fs.save(datetime_str + \"-\" + image.name, image)\n #fs.save(datetime_str + \"-\" + iName, image)\n fs.save(iName, image)\n\n print('UserId->'+str(userId) + \"doctorType->\"+str(doctorType) + \"->\"+ str(doctorCode) + \"->\"+str(institution)+\"->\"+ str(productCode)+\"->\"+str(latitude))\n\n data = {\"userId\": str(userId), \"doctorType\": str(doctorType), \"doctorCode\": str(doctorCode), \"institution\": str(institution),\n \"productCode\": str(productCode), \"prescriptionDate\": str(prescriptionDate), \"mobileNo\": str(mobileNo), \"IMEI\": str(IMEI),\n \"latitude\": str(latitude), \"longitude\": str(longitude), \"competitorBrandCode\": str(competitorBrandCode),\"imgname\":str(iName)}\n\n json_data = json.dumps(data)\n\n\n\n rabbit_url = 'amqp://admin:admin@192.168.101.175:5672/'\n\n conn = Connection(rabbit_url)\n\n channel = conn.channel()\n\n exchange = Exchange('test', type = 'direct')\n\n producer = Producer(exchange=exchange, channel=channel, routing_key='BOB')\n\n queue = Queue(name='rximage', exchange = exchange, routing_key ='BOB')\n queue.maybe_bind(conn)\n queue.declare()\n\n producer.publish(str(json_data))\n\n response = {'StatusCode': '200', 'StatusMessage': 'OK'}\n return Response(response, content_type=\"application/json\")\n\nclass MayaDataProvider(viewsets.ModelViewSet):\n queryset = District.objects.all()\n role_class = RegisteredStgSerializer\n\n def list(self, request):\n #RecoveryData = request.POST.get('RecoveryData')\n #print('--->' + str(RecoveryData))\n #received_json_data = json.loads(request.POST['RecoveryData'])\n\n twitter = OAuth1Session('1cb615ef0b50b640324bb7e614551f7b',\n client_secret='edf2e5500bd06bdcf0a48182236917af',\n resource_owner_key='bfddd465b6ea96755e5b170c7dc5adec',\n resource_owner_secret='d4b2655c7355faa644dd9dcf20948b57')\n url = 'http://staging.banglameds.com.bd/api/rest/productlist?name=napa&page='\n r = twitter.get(url)\n print(\"=====\"+str(r.content))\n msg = \"1\"\n # data = json.dumps(list(dealQuery), cls=DjangoJSONEncoder)\n response = {'StatusCode': '200', 'StatusMessage': msg}\n print(response)\n return Response(response, content_type=\"application/json\")\n\n\n\ndef SendSMS(number, text):\n data = {'smstext': text,\n 'number': number}\n r = requests.post(url='http://192.168.100.8/fifaabecab/Authenticate/sendSMS', data=data)\n return\n\ndef random_with_N_digits(n):\n range_start = 10**(n-1)\n range_end = (10**n)-1\n return randint(range_start, range_end)\n\t\n\t\nclass PumpCollection(viewsets.ModelViewSet):\n queryset = District.objects.all()\n role_class = RegisteredStgSerializer\n\n def create(self, request):\n try:\n json_array = json.loads(request.data.get('RecoveryData'))\n\t \t\t\n projection_array = json.loads(request.data.get('ProjectionData'))\n\n print('----0000--RecovaryData ---'+str(json_array))\n #print('----0000--XXXX ---' + str(projection_array))\n cnxn = pyodbc.connect('DRIVER={SQL Server};SERVER=192.168.100.62;DATABASE=PumpTrack;UID=sa;PWD=dataport')\n cursor = cnxn.cursor()\n for item in json_array:\n #print(\"--------Collections------------\")\n id = item[\"id\"]\n address = item['address']\n attendate = item['attendate']\n complaindate = item['complaindate']\n staffid = item['staffid']\n customerCode = item['customercode']\n customername = item['customername']\n mobileno = item['mobileno']\n model = item['model']\n mrno = item['mrno']\n portfolio = item['portfolio']\n servicecharge = item['servicecharge']\n created_at = item['created_at']\n NoOfServiceTime = \"1\"\n NatureOfComplain = \"1\"\n ReasonFault=\"1\"\n ActionTaken = \"1\"\n SpareParts = \"1\"\n SPartsQuantity = \"1\"\n Warranty = \"1\"\n WarrantyCardNo = \"1\"\n Status = \"1\"\n\n #colorstatus = item['colorstatus']\n latitude = item['latitude']\n longitude = item['longitude']\n\n query = \"INSERT INTO ServiceMaster ( CustomerCode , CustomerName, Address , MobileNumber ,ServicePortfolio,ModelName, \" \\\n \"PurchaseDate, NoOfServiceTime,ComplainDate,AttendDate,NatureOfComplain,ReasonFault,ActionTaken,SpareParts ,\" \\\n \" SPartsQuantity, Warranty, WarrantyCardNo, ServiceCharge, MRNo ,Status , EntryDate,EditDate,EntryBy,\" \\\n \" MobileInsertID ,Latitude , Longitude) VALUES ('\" + str(\n customerCode) + \"','\" + str(customername) + \"','\" + str( address) + \"','\" + str(mobileno) + \"','\" \\\n + str(portfolio) + \"','\"+ str(model) +\"','\"+ str(created_at) +\"','\" + str(NoOfServiceTime) + \"','\" \\\n + str(NatureOfComplain) + \"','\" + str(ReasonFault) + \"','\" + str(ActionTaken) + \"','\" + str(SpareParts) + \"','\" \\\n + str(SPartsQuantity) + \"','\" + str(Warranty) + \"','\" + str(WarrantyCardNo) + \"','\" + str(servicecharge) + \"','\" \\\n + str(mrno) + \"','\" + str(Status) + \"','\" + str(created_at) + \"','\" + str(created_at) + \"','\" \\\n + str(staffid) + \"','\" + str(id) + \"','\" + str(latitude) + \"','\" + str(longitude) +\"')\"\n\n #print(\" --- SQL --- \"+ query)\n cursor.execute(query)\n\n for item in projection_array:\n #print(\"--------YES----------------\")\n mobileId = item['id']\n userId = item['staffid']\n customercode = item['customercode']\n dueamount = item['dueamount']\n overdueage = item['overdueage']\n support = item['support']\n mobileno = item['mobileno']\n actiontaken = item['actiontaken']\n latitude = item['latitude']\n longitude = item['longitude']\n created_at = item['created_at']\n\n #print(\"----\"+str(mobileId)+\"---\"+str(customercode))\n\n query = \"INSERT INTO Projection(UserId, MobileId, CustomerCode, Dueamount, OverDueAge, Support, MobileNo, ActionTaken, Latitude, Longitude, MobileCreated_at) VALUES('\" + str(\n userId) + \"', '\" + str(mobileId) + \"', '\" + str(customercode) + \"', '\" + str(dueamount) + \"', '\" + str(overdueage) + \"', '\" + str(support)\\\n +\"','\"+str(mobileno)+\"','\"+str(actiontaken)+\"','\"+ str(latitude)+\"','\"+str(longitude)+\"','\"+ str(created_at) +\"')\"\n #print(\"000000AAA0000--------SQL \" + query)\n cursor.execute(query)\n\n cnxn.commit()\n cnxn.close()\n\t \n msg = \"1\"\n response = {'StatusCode': '200', 'StatusMessage': msg}\n print(response)\n\n return Response(response, content_type=\"application/json\")\n except Exception as ex:\n return Response({'StatusCode': '500', 'StatusMessage': 'Exception Occured. Details: ' + str(ex)})\n","sub_path":"YamahaBookingApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":64032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"621245858","text":"# -*- coding: utf-8 -*-\nfrom odoo import fields, models,api, _\nfrom odoo.exceptions import UserError\nfrom datetime import datetime\n\nclass SaleorderMakeOrder(models.TransientModel):\n\t_name='sale.order.make.order'\n\n\tselected_file = fields.Many2one('glass.pdf.file','Archivo a incluir',domain=\"[('is_used','=',False),('is_editable','=',True),('sale_id','=',sale_id)]\")\n\tfile_crokis = fields.Binary('Croquis',related=\"selected_file.pdf_file\")\n\tfile_name = fields.Char('Nombre del archivo',related=\"selected_file.pdf_name\")\n\tdestinity_order = fields.Selection([('local','En la ciudad'),('external','En otra ciudad')],u'Lugar de entrega',default=\"local\")\n\tsend2partner=fields.Boolean('Entregar en ubicacion del cliente',default=False)\n\tin_obra = fields.Boolean('Entregar en Obra')\n\tobra_text = fields.Char(u'Descripción de Obra')\n\tsale_id = fields.Many2one('sale.order','Orden de venta')\n\tis_used = fields.Boolean('Usado',related=\"selected_file.is_used\")\n\tis_editable = fields.Boolean('Es editable',related=\"selected_file.is_editable\")\n\tcomercial_area=fields.Selection([('distribucion',u'Distribución'),('obra','Obra'),('proyecto','Proyecto')],u'Área Comercial',default=\"distribucion\" )\n\n\n\t@api.onchange('selected_file')\n\tdef onchange_selected_file(self):\n\t\tif self.selected_file:\n\t\t\tres = {}\n\t\t\tids = self.env['glass.pdf.file'].search([('sale_id','=',self.sale_id.id)]).ids\n\t\t\tres['domain'] = {'selected_file':[('id','in',ids)]}\n\t\t\treturn res\n\n\t@api.multi\n\tdef create_production(self):\n\t\tself.ensure_one()\n\t\tvals={\n\t\t\t\t#'file_crokis':self.file_crokis,\n\t\t\t\t'selected_file':self.selected_file,\n\t\t\t\t'file_name':self.file_name,\n\t\t\t\t'destinity_order':self.destinity_order,\n\t\t\t\t'send2partner':self.send2partner,\n\t\t\t\t'in_obra':self.in_obra,\n\t\t\t\t'obra_text':self.obra_text,\n\t\t\t\t'comercial_area':self.comercial_area,\n\t\t\t\t'croquis_path':self.selected_file.path_pdf,\n\t\t\t}\n\t\tneworder = self.sale_id.makeproduction(vals)\n\t\tself.selected_file.write(\n\t\t\t\t{\n\t\t\t\t\t'is_editable':False,\n\t\t\t\t\t'is_used':True,\n\t\t\t\t\t'op_id':neworder.id,\n\t\t\t\t})\n\t\tmodule = __name__.split('addons.')[1].split('.')[0]\n\t\tview = self.env.ref('%s.view_glass_order_tree' % module)\n\t\tidact=False\n\t\treturn {\n\t\t\t'name':neworder.name,\n\t\t\t'type': 'ir.actions.act_window',\n\t\t\t'res_id':neworder.id,\n\t\t\t'res_model': 'glass.order',\n\t\t\t'view_mode': 'form',\n\t\t\t'view_type': 'form',\n\t\t}\n\t\t# ctx = self._context.copy()\n\t\t# ctx.update({'domain':[('sale_order_id','=',self._context['active_id'])]})\t\t\n\t\t# data = {\n\t\t# \t\t'name':u'Órdenes de producción',\n\t\t# \t\t'view_type':'form',\n\t\t# \t\t'view_mode':'tree,form',\n\t\t# \t\t'res_model':'glass.order',\n\t\t# \t\t'type':'ir.actions.act_window',\n\t\t# \t\t'domain':[('sale_order_id','=',self._context['active_id'])]\n\t\t# }\n\t\t\n\t\t# return data\t\t\n\n\t@api.one\n\tdef savecroquis(self):\n\t\tif self.file_crokis:\n\t\t\tself.is_editable = False\n\t\t\tself.production_id.sketch = self.file_crokis\n\t\treturn True\n\n\t@api.model\n\tdef default_get(self,fields):\n\t\tres = super(SaleorderMakeOrder,self).default_get(fields)\n\t\tsale_id = self._context['active_id']\n\t\tsaleact = self.env['sale.order'].browse(sale_id)\n\t\tres.update({\n\t\t\t'sale_id':sale_id,\n\t\t\t'selected_file':saleact.files_ids[0].id if saleact.files_ids else False,\n\t\t\t})\n\t\treturn res","sub_path":"Modulos/glass_production_order/wizard/sale_order_makeorder.py","file_name":"sale_order_makeorder.py","file_ext":"py","file_size_in_byte":3185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"654491003","text":"import logging\nfrom pyAitu import executor, Bot, Dispatcher\nfrom pyAitu.models import Message, Options, Form, Header, FormClosed, ValidationRule, Submit, FormAction, MediaPicker\nfrom pyAitu.models.constants.option_media_type import PHOTO, VIDEO\n\nAPI_TOKEN = 'YOUR_API_TOKEN'\n\nbot = Bot(token=API_TOKEN)\ndp = Dispatcher(bot)\n\nlogging.basicConfig(level=logging.INFO)\n\n\n@dp.message_handler()\nasync def send_ui(message: Message):\n header = Header(\n _type=\"toolbar\",\n title=\"Title\",\n options=Options(\n closeable=True\n )\n )\n image_picker = MediaPicker(\n content_id=\"image_picker1\",\n title=\"Image media picker title\",\n options=Options(\n media_type=PHOTO,\n height=20,\n width=80,\n max_count=1\n ),\n validations_rules=[ValidationRule(type=\"required\", value=\"1\", error=\"Выберите хотя бы 1 файл\")]\n )\n video_picker = MediaPicker(\n content_id=\"video_picker1\",\n title=\"Video media picker title\",\n options=Options(\n should_open_editor=False,\n media_type=VIDEO,\n height=30,\n width=80,\n max_count=2\n ),\n validations_rules=[ValidationRule(type=\"required\", value=\"1\", error=\"Выберите хотя бы 1 файл\")]\n )\n submit = Submit(\n content_id=\"submit_id\",\n title=\"Send\",\n form_action=FormAction(\n action=\"submit_form\"\n )\n )\n form = Form(\n _id=\"media_picker_form\",\n header=header,\n content=[image_picker, video_picker, submit],\n options=Options(fullscreen=True)\n )\n await bot.send_form(message.chat.id, form=form)\n\n\n@dp.form_closed_handler()\nasync def get_form_closed(fc: FormClosed):\n await bot.send_message(fc.chat.id, \"form closed\")\n\n\nif __name__ == '__main__':\n executor.start_polling(dp)\n","sub_path":"examples/media_picker_bot.py","file_name":"media_picker_bot.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"97657583","text":"#! /usr/bin/env python3\n\nimport airsim\nimport rospy\nfrom std_msgs.msg import Float32\nfrom std_msgs.msg import Float64MultiArray\nfrom std_msgs.msg import MultiArrayLayout\nfrom std_msgs.msg import MultiArrayDimension\n\nimport numpy as np\nfrom pynput.keyboard import Key,Listener\n\nclient = airsim.MultirotorClient()\nclient.confirmConnection()\ncontrol_pub = rospy.Publisher('controls/to_loc_by_vel', Float64MultiArray, queue_size=1)\n# Initializing ROS node\nrospy.init_node(\"keyboard_controller\")\nglobal shift\nshift = 2\n\ndef publish(vals):\n command = Float64MultiArray()\n layout = MultiArrayLayout()\n dimension = MultiArrayDimension()\n dimension.label = \"keyboard_control\"\n dimension.size = 4\n dimension.stride = 4\n layout.data_offset = 0\n layout.dim = [dimension]\n command.layout = layout\n command.data = vals\n control_pub.publish(command)\n \ndef move(direction):\n global shift\n try:\n position = client.getMultirotorState().kinematics_estimated.position\n #target_pos = np.array([position.x_val,position.y_val,position.z_val])\n target_pos = np.array([0,0,0])\n #print(\"{0}\".format(direction))\n direction = direction.char\n if direction == 'e': # up\n target_pos = np.array([0,0,-1*shift])\n elif direction == 'q': # down\n target_pos = np.array([0,0,shift])\n elif direction == 'w': # forward\n target_pos = np.array([shift,0,0])\n elif direction == 'a': # left\n target_pos = np.array([0,-1*shift,0])\n elif direction == 's': # back\n target_pos = np.array([-1*shift,0,0])\n elif direction == 'd': # right\n target_pos = np.array([0,shift,0])\n \n publish(target_pos.tolist()+[5])\n except:\n pass\n \n \n# Collect all event until released\nwith Listener(on_press = move) as listener:\n listener.join()\n\n","sub_path":"src/keyboard_control.py","file_name":"keyboard_control.py","file_ext":"py","file_size_in_byte":1896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"602086382","text":"# -*- coding: utf-8 -*-\n\n\n#***********************************************************************************************\n#*** External imports.\n#***********************************************************************************************\nimport sys\nimport os\nimport re\nimport subprocess\n\n\n#***********************************************************************************************\n#*** Module classes and definitions.\n#***********************************************************************************************\n\ndef getCurrentIP():\n if 'linux' in sys.platform:\n ip_addr=subprocess.Popen(\"/sbin/ifconfig\",stdout=subprocess.PIPE).stdout\n else:\n ip_addr=subprocess.Popen(\"ipconfig\",stdout=subprocess.PIPE).stdout\n data=ip_addr.read().strip().split(\"\\n\")\n ip_addr.close()\n# \n pattern=re.compile(r\"(10)\\.(0)\\.(\\d{1,2}|1\\d{2}|2[0-4]\\d|25[0-4])\\.(\\d{1,3})\")\n C_ADDRESS=pattern.findall(str(data)) \n if C_ADDRESS:\n return '.'.join( C_ADDRESS[0] )\n else :\n pattern=re.compile(r\"(30)\\.(0)\\.(\\d{1,2}|1\\d{2}|2[0-4]\\d|25[0-4])\\.(\\d{1,3})\")\n C_ADDRESS=pattern.findall(str(data)) \n return '.'.join( C_ADDRESS[0] )\n\ndef location( ):\n theIP = getCurrentIP()\n if '30' == theIP.split('.')[0]:\n return False\n else :\n return True\n\n\n\nenv_username = os.getenv('USERNAME') if location() else os.getenv( 'USER' )\n\nclass iPipelineInit(object):\n\n def initialize(self): \n if 'linux' in sys.platform: \n self.userName = re.search('d\\d{5}' , env_username ).group()\n else :\n self.userName = 'idea' \n self.currProjectName = \"\"\n self.currentUser = \"default\"\n if location():\n superUserList = ['d10021', 'd10230', 'd10060', 'd10165' , 'd10058' , 'd10068' , 'd10166' , 'd10218' ]\n else :\n superUserList = ['d11017' ]\n self.isSuperUser = True if self.userName in superUserList else False\n if self.currProjectName == \"\":\n self.currOpenType = \"\"\n self.currOpenCategory = \"\"\n self.currOpenVersion = 0\n self.currOpenLevel1 = \"\"\n self.currOpenLevel2 = \"\"\n self.currOpenLevel3 = \"\"\n self.currOpenTab = 0\n self.currOpenProjectName = \"\"\n self.currProjectPath = \"\"\n self.libPath = \"\"\n self.shotPath = \"\"\n self.scriptsPath = \"\"\n self.rendersPath = \"\"\n self.particlesPath = \"\"\n self.texturesPath = \"\"\n self.archivePath = \"\"\n self.deletePath = \"\"\n self.workshopFormat = \"\"\n self.masterFormat = \"\"\n self.workshopName = \"\"\n self.masterName = \"\"\n else:\n pass\n\n def reset(self):\n self.currProjectName = \"\"\n self.initialize()\n \n ","sub_path":"Core/iPipelineInit.py","file_name":"iPipelineInit.py","file_ext":"py","file_size_in_byte":2889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"114035192","text":"import pandas as pd\nfrom bs4 import BeautifulSoup\nimport re\nfrom nltk.corpus import stopwords\nimport nltk.data\nimport numpy as np\nfrom wordcloud import WordCloud\nfrom matplotlib import pyplot as plt\nfrom nltk.stem import SnowballStemmer\nfrom nltk.stem import WordNetLemmatizer\nimport sys\nimport os\nfrom nltk.tokenize import WordPunctTokenizer\nfrom collections import defaultdict\nimport nltk\nimport pickle\nimport chardet\n\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.text import text_to_word_sequence\nfrom keras.utils.np_utils import to_categorical\nfrom nltk import tokenize\n\nclass Datasets():\n lemma = WordNetLemmatizer()\n\n def get_sent_detector(self):\n\n sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')\n return sent_detector\n\n\n #get datasets from the csv--------\n #2:\n def get_train_data(self):\n\n train_data = pd.read_csv(\"/Users/xiaoyiwen/Desktop/Word2vec_Sentiment_Analysis/MasterProject/data_Preprocessing/Datasets/kaggle_data/labeledTrainData.tsv\",header = 0, delimiter='\\t', quoting=3)\n return train_data\n\n #3:\n def get_test_data(self):\n\n test_data = pd.read_csv(\"/Users/xiaoyiwen/Desktop/Word2vec_Sentiment_Analysis/MasterProject/data_Preprocessing/Datasets/kaggle_data/testData.tsv\",header=0, delimiter='\\t',quoting=3)\n return test_data\n\n #4:\n def get_unlabeled_data(self):\n\n\n unlabeled_data = pd.read_csv(\"/Users/xiaoyiwen/Desktop/Word2vec_Sentiment_Analysis/MasterProject/data_Preprocessing/Datasets/kaggle_data/unlabeledTrainData.tsv\", header=0, delimiter='\\t', quoting=3)\n return unlabeled_data\n def get_polarity_data(self):\n polarity = pd.read_csv(\"/Users/xiaoyiwen/Desktop/MasterProject/MasterProject/data_Preprocessing/Datasets/kaggle_data/polarity.tsv\", header=0, delimiter='\\t', quoting=3)\n return polarity\n #5:\n # get all the review\n def get_clean_review_lists(self,review_datasets):\n clean_review_datasets = []\n for review in review_datasets:\n clean_review_datasets.append(Datasets.clean_text(review))\n return clean_review_datasets\n\n\n #clean the datasets------------\n #6���\n # preprocess the text and split each review to a list of tokens\n def clean_text(review):\n # 1: remove the tags\n review = BeautifulSoup(review).get_text()\n # 2.remove the non alpha\n text = re.sub(\"[^a-zA-Z]\", \" \", review)\n # 3.remove split the tokens\n lowercase = text.lower().split()\n # 4:remove the stopwords\n stops = set(stopwords.words(\"English\"))\n words = [w for w in lowercase if not w in stops]\n\n return words\n\n def clean_text_without_filter_stopwords(review):\n # 1: remove the tags\n review = BeautifulSoup(review).get_text()\n # 2.remove the non alpha\n text = re.sub(\"[^a-zA-Z]\", \" \", review)\n # 3.remove split the tokens\n words = text.lower().split()\n\n return words\n\n\n def LDA_preprocessing( self, review,do_stem = False):\n # 1: remove the tags\n review = BeautifulSoup(review).get_text()\n # 2.remove the non alpha\n text = re.sub(\"[^a-zA-Z]\", \" \", review)\n # 3.remove split the tokens\n lowercase = text.lower().split()\n # 4:remove the stopwords\n stops = set(stopwords.words(\"English\"))\n words = [w for w in lowercase if not w in stops]\n if(do_stem ==True):\n words = [Datasets.lemma.lemmatize(word) for word in words]\n return words\n\n #7:\n #split each review into sentences and clean the sentences -> to a list\n def transfer_review_to_sentences(review, sent_detector):\n\n #split the review to a list\n sentences = sent_detector.tokenize(review.strip())\n\n sentencs_list = []\n for sentence in sentences:\n if(len(sentence)>0):\n words = Datasets.clean_text_without_filter_stopwords(sentence)\n sentencs_list.append(words)\n\n return sentencs_list\n\n #8:\n def transfer_datasets_to_sentences(self,datasets,sent_detector):\n\n all_sentences = []\n\n for review in datasets:\n all_sentences += Datasets.transfer_review_to_sentences(review,sent_detector)\n\n return all_sentences\n\n\n\n\n #9:get averaged vector review\n def return_averaged_vector_review(dimension, review, model):\n\n review_vector = np.zeros((dimension,), dtype=\"float32\")\n\n vocab = set(model.wv.index2word)\n\n num_words = 0\n\n for word in review:\n if (word in vocab):\n num_words += 1\n review_vector = np.add(review_vector, model[word])\n\n # divide the result\n review_vector = np.divide(review_vector, num_words)\n\n return review_vector\n\n\n #10:\n def return_total_vector(self, reviews, model, dimension):\n\n # should be 25000(train reviews) * 300(dimension)\n matrix = np.zeros((len(reviews), dimension), dtype=\"float32\")\n\n count = 0\n\n for review in reviews:\n\n if (count % 1000 == 0):\n print(\"Review %d of %d\" % (count, len(reviews)))\n\n matrix[count] = Datasets.return_averaged_vector_review(dimension, review, model)\n count = count + 1\n\n return matrix\n\n\n #11:\n # plot word cloud\n def plot_word_cloud(self, terms):\n text = terms.index\n text = ' '.join(list(text))\n\n # lower the max fontsize\n wordcloud = WordCloud(max_font_size=40).generate(text)\n plt.figure(figsize=(25, 25))\n plt.imshow(wordcloud, interpolation=\"bilinear\")\n plt.axis(\"off\")\n plt.show()\n\n\n\n\n # #11: build up a vocab\n # def build_up_vocab(self,dataset,vocab_path):\n # #\n # if(os.path.exists(vocab_path)):\n # vocab = open(vocab_path,'rb')\n # vocab = pickle.load(vocab)\n # print(\"vocab successfully loaded!\")\n #\n # else:\n # word_frequent = defaultdict(int)\n #\n # for review in dataset:\n # words = Datasets.clean_text_without_filter_stopwords(review)\n # for word in words:\n # word_frequent[word] +=1\n # print(\"loaded finished\")\n #\n # #create vocab\n # vocab = {}\n # i = 1\n # vocab['UNKNOW_TOKEN'] = 0\n # for word, freq in word_frequent.items():\n # if freq>5:\n # vocab[word] =i\n # i +=1\n #\n # #save the vocab\n # with open(vocab_path,'wb') as file:\n # pickle.dump(vocab,file)\n # print(len(vocab))\n # print(\"vocab save finished\")\n #\n # return vocab\n\n\n\n#word2vec get the tokens for trainig\n def get_normalized_data(self,dataset_name):\n reviews = []\n review_sentences = []\n review_tokens = []\n\n\n data_main = Datasets()\n if(dataset_name ==\"train\"):\n train_data = data_main.get_train_data()\n data = train_data\n if(dataset_name ==\"test\"):\n test_data = data_main.get_test_data()\n data = test_data\n if(dataset_name ==\"unlabeled\"):\n unlabeled_data = data_main.get_unlabeled_data()\n data = unlabeled_data\n if (dataset_name == \"polarity\"):\n data = data_main.get_polarity_data()\n\n print(\"here\")\n # clean the test dataset\n for review in data[\"review\"]:\n text = BeautifulSoup(review)\n cleaned_text = text.get_text().encode('ascii','ignore')\n cleaned_string = cleaned_text.decode('utf-8')\n cleaned_review = data_main.clean_text_to_text(cleaned_string)\n reviews.append(cleaned_review)\n sentence = tokenize.sent_tokenize(cleaned_review)\n # number of the review\n review_sentences.append(sentence)\n\n for s in sentence:\n if(len(s)>0):\n tokens = text_to_word_sequence(s)\n #filter out non-alpha\n tokens = [token for token in tokens if token.isalpha()]\n #filter out those short letters\n tokens = [t for t in tokens if len(t)>1]\n review_tokens.append(tokens)\n\n\n return reviews, review_sentences, review_tokens\n\n def clean_text_to_text(self,review):\n #remove the the',\",\\\n review = re.sub(r\"\\\\\", \"\", review)\n review = re.sub(r\"\\'\", \"\", review)\n review = re.sub(r\"\\\"\", \"\", review)\n #return the lower case\n text = review.strip().lower()\n\n return text","sub_path":"get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":8408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"357298776","text":"from __future__ import division\nimport numpy as np\nimport copy\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.init as init\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom state_encoders.wrappers import FrameStack\nfrom state_encoders.depth_nngrid import NNGrid as senc_NNGrid\nfrom action_decoders.depth_nngrid import NNGrid as adec_NNGrid\n\nfrom .convlstm_layer import ConvLSTM\n\nfrom common.utils import norm_col_init, weights_init, weights_init_mlp\n\n# Early-fusion Conv1D + LSTM\n# All frames stacks, passed to 1D convnet then LSTM\nclass ActorCritic(torch.nn.Module):\n def __init__(self, observation_space, action_space, n_frames, args):\n super(ActorCritic, self).__init__()\n\n # State preprocessing\n self.senc_nngrid = senc_NNGrid(args)\n self.frame_stack = FrameStack(n_frames)\n\n # Action postprocessing\n self.adec_nngrid = adec_NNGrid(action_space, args)\n\n self.observation_space = observation_space\n self.action_space = action_space\n\n self.input_size = self.senc_nngrid.observation_space.shape\n self.output_size = int(np.prod(self.action_space.shape))\n\n _s = [32, 64, 128, 128]\n self.convlstm1 = ConvLSTM(self.frame_stack.n_frames*self.input_size[0], 32, 4, stride=1, padding=1)\n self.convlstm2 = ConvLSTM( 32, 64, 3, stride=1, padding=1)\n self.convlstm3 = ConvLSTM( 64, 128, 3, stride=1, padding=1)\n self.convlstm4 = ConvLSTM(128, 128, 3, stride=1, padding=1)\n self.convlstm = [\n self.convlstm1,\n self.convlstm2,\n self.convlstm3,\n self.convlstm4,\n ]\n _is = (n_frames*self.input_size[0],)+self.input_size[1:]\n self.convh0 = []\n self.convc0 = []\n self.memsizes = []\n for i in range(len(self.convlstm)):\n _is = self.convlstm[i]._spatial_size_output_given_input((1,)+_is)\n _is = (_s[i],)+_is\n self.memsizes.append(copy.deepcopy(_is))\n self.convh0.append(nn.Parameter(torch.zeros((1,)+self.memsizes[i])))\n self.convc0.append(nn.Parameter(torch.zeros((1,)+self.memsizes[i])))\n self._convh0_module = nn.ParameterList(self.convh0)\n self._convc0_module = nn.ParameterList(self.convc0)\n \n self.critic_linear = nn.Conv2d(128, 2, 3, stride=1, padding=1)\n self.actor_linear = nn.Conv2d(128, 2, 3, stride=1, padding=1)\n self.actor_linear2 = nn.Conv2d(128, 2, 3, stride=1, padding=1)\n\n self.actor_linear.weight.data = norm_col_init(\n self.actor_linear.weight.data, 0.01)\n self.actor_linear.bias.data.fill_(0)\n self.actor_linear2.weight.data = norm_col_init(\n self.actor_linear2.weight.data, 0.01)\n self.actor_linear2.bias.data.fill_(0)\n self.critic_linear.weight.data = norm_col_init(\n self.critic_linear.weight.data, 1.0)\n self.critic_linear.bias.data.fill_(0)\n\n self.train()\n\n def _convlstmforward(self, x, convhx, convcx):\n last_convhx = x\n for i in range(len(self.convlstm)):\n convhx[i], convcx[i] = self.convlstm[i](last_convhx, (convhx[i], convcx[i]))\n last_convhx = convhx[i]\n return convhx, convcx\n\n def forward(self, inputs):\n ob, info, (convhx, convcx, frames) = inputs\n\n # Get the grid state from vectorized input\n x = self.senc_nngrid((ob, info))\n\n # Stack it\n x, frames = self.frame_stack((x, frames))\n\n # Resize to correct dims for convnet\n batch_size = x.size(0)\n x = x.view(batch_size,\n self.frame_stack.n_frames*self.input_size[0],\n self.input_size[1], self.input_size[2])\n convhx, convcx = self._convlstmforward(x, convhx, convcx)\n x = convhx[-1]\n\n # Compute action mean, action var and value grid\n critic_out = self.critic_linear(x)\n actor_out = F.softsign(self.actor_linear(x))\n actor_out2 = self.actor_linear2(x)\n\n # Extract motor-specific values from action grid\n critic_out = self.adec_nngrid((critic_out, info)).mean(-1, keepdim=True)\n actor_out = self.adec_nngrid((actor_out, info))\n actor_out2 = self.adec_nngrid((actor_out2, info))\n return critic_out, actor_out, actor_out2, (convhx, convcx, frames)\n\n def initialize_memory(self):\n #print(np.sum([torch.norm(ch0).item() for ch0 in self.convh0]),\n # np.sum([torch.norm(cc0).item() for cc0 in self.convc0]))\n use_gpu = next(self.parameters()).is_cuda\n return (\n #self.convh0,\n #self.convc0,\n # DO NOT REMOVE BELOW CODE \n # Below code is needed to fix a strange bug in graph backprop\n # TODO(eparisot): debug this further (low priority, might be pytorch..)\n #[ch0 for ch0 in self.convh0],\n #[cc0 for cc0 in self.convc0],\n [Variable(torch.zeros(ch0.size()).cuda()) if use_gpu else Variable(torch.zeros(ch0.size())) for ch0 in self.convh0],\n [Variable(torch.zeros(cc0.size()).cuda()) if use_gpu else Variable(torch.zeros(cc0.size())) for cc0 in self.convc0],\n self.frame_stack.initialize_memory())\n\n def reinitialize_memory(self, old_memory):\n old_convhx, old_convcx, old_frames = old_memory\n return (\n [Variable(chx.data) for chx in old_convhx],\n [Variable(ccx.data) for ccx in old_convcx],\n self.frame_stack.reinitialize_memory(old_frames))\n","sub_path":"models/depthgridconvlstm.py","file_name":"depthgridconvlstm.py","file_ext":"py","file_size_in_byte":5535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"341261390","text":"def render_html(offer):\n html = \"
    \"\n for o in offer:\n html += (\n \"
  • \"\n \"{name} ({price})\"\n \"
    \"\n '{url}'\n \"
  • \"\n ).format(\n **o.__dict__\n )\n html += \"
\"\n return html\n","sub_path":"renderer.py","file_name":"renderer.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"166436094","text":"#coding:utf-8\n\n# 2016/11/21作成。\n# エリアクラスを作成する。\nimport numpy as np\nimport D\nimport D2\nimport convert\nimport subroutine\nimport Using_jit\nimport Budget_at_Global_Ocean\n\ndef GetLabel(i):\n\t# i=0:South, i=1:North, i=2:West, i=3:East\n\tif i == 0:\n\t\treturn 'South'\n\telif i == 1:\n\t\treturn 'North'\n\telif i == 2:\n\t\treturn 'West'\n\telif i == 3:\n\t\treturn 'East'\n\telse:\n\t\traise ValueError('your i is not valid! i must be 0~3!')\n\n\n\nclass EACH_SECTION(object):\n\tdef __init__(self, South, North, West, East):\n\t\tself.South = South\n\t\tself.North = North\n\t\tself.West = West\n\t\tself.East = East\n\n\tdef GetSD(self, i):\n\t\t# Get_a_SectionData. みやすさのために関数名を短くした。\n\t\t# i=0:South, i=1:North, i=2:West, i=3:East\n\t\tif i == 0:\n\t\t\treturn self.South\n\t\telif i == 1:\n\t\t\treturn self.North\n\t\telif i == 2:\n\t\t\treturn self.West\n\t\telif i == 3:\n\t\t\treturn self.East\n\t\telse:\n\t\t\traise ValueError('your i is not valid! i must be 0~3!')\n\n\nclass AREA(object):\n\tdef __init__(self, ID, AreaName, slat, nlat, wlon, elon):\n\t\tself.ID = ID\n\t\tself.AreaName = AreaName\n\n\t\t# 領域の端の「グリッドポイント」の緯度・経度。\n\t\tself.slat = slat\n\t\tself.nlat = nlat\n\t\tself.wlon = wlon\n\t\tself.elon = elon\n\t\tself.xn = int(elon - wlon + 1)\n\t\tself.yn = int(nlat - slat + 1)\n\n\t\t# 領域の端の緯度・経度。\n\t\tself.S_lim = slat - 0.5\n\t\tself.N_lim = nlat + 0.5\n\t\tself.W_lim = wlon - 0.5\n\t\tself.E_lim = elon + 0.5\n\n\t\tself.EWdist = subroutine.dist_on_sphere([self.W_lim, 0.5 * (self.S_lim + self.N_lim)], [self.E_lim, 0.5 * (self.S_lim + self.N_lim)])\n\t\tself.NSdist = subroutine.dist_on_sphere([0.0, self.S_lim], [0.0, self.N_lim])\n\t\tself.Square = self.EWdist * self.NSdist # Areaの面積\n\n\n\tdef Get_data_of_area(self, data, var, product_n):\n\t\t# 2次元のデータは勿論、3次元のデータもこの関数を使ってtrimmingできます。\n\t\tif var == 'u' or var == 'v':\n\t\t\tif data.ndim == 3:\n\t\t\t\tdata = convert.convert_UVgrid_value_to_Sgrid_value_3D(data)\n\t\t\telif data.ndim == 2:\n\t\t\t\tdata = convert.convert_UVgrid_value_to_Sgrid_value_2D(data)\n\n\t\txgrid, ygrid, _ = D.get_grid_value('t', product_n)\n\t\tmx = np.where(xgrid == self.wlon)[0][0]\n\t\tnx = np.where(xgrid == self.elon)[0][0] + 1\n\t\tmy = np.where(ygrid == self.slat)[0][0]\n\t\tny = np.where(ygrid == self.nlat)[0][0] + 1\n\n\t\txgrid = xgrid[mx:nx]\n\t\tygrid = ygrid[my:ny]\n\t\tdata = data[my:ny, mx:nx]\n\t\treturn data, xgrid, ygrid\n\n\tdef Get_data_of_AreaSection_from_data(self, data, var, product_n):\n\t\t# 例えばwlon=10.5, elon=19.5だったとしたら、\n\t\t# Data_wは経度10.0度、Data_e=は経度20度に於けるデータを取得することとします。\n\t\txgrid, ygrid, _ = D.get_grid_value('t', product_n)\n\t\tif var == 'u' or var == 'v':\n\t\t\tif data.ndim == 3:\n\t\t\t\tdata = convert.convert_UVgrid_value_to_Sgrid_value_3D(data)\n\t\t\telif data.ndim == 2:\n\t\t\t\tdata = convert.convert_UVgrid_value_to_Sgrid_value_2D(data)\n\n\t\tmx = np.where(xgrid == self.wlon)[0][0]\n\t\tnx = np.where(xgrid == self.elon)[0][0] + 1\n\t\tmy = np.where(ygrid == self.slat)[0][0]\n\t\tny = np.where(ygrid == self.nlat)[0][0] + 1\n\t\tData_S = 0.5 * (data[my - 1, mx:nx] + data[my, mx:nx])\n\t\tData_N = 0.5 * (data[ny - 1, mx:nx] + data[ny, mx:nx])\n\t\tData_W = 0.5 * (data[my:ny, mx - 1] + data[my:ny, mx])\n\t\tData_E = 0.5 * (data[my:ny, nx - 1] + data[my:ny, nx])\n\t\tData = EACH_SECTION(Data_S, Data_N, Data_W, Data_E)\n\t\treturn Data\n\n\tdef cal_Mass_Budget(self, year, month, depth = 10, product_n = 3):\n\t\t# その領域における体積収支を計算する。\n\t\t# 注意! x,y,z軸方向のグリッドの大きさはすべて等しいと仮定して計算しております。\n\t\t# さらに、領域は南北に長くなく、EWdistで南北両側面の断面積を計算していいこととしております。\n\t\tu = D.get_data(year, month, 'u', 0, product_n)[:, :, :depth]\n\t\tv = D.get_data(year, month, 'v', 0, product_n)[:, :, :depth]\n\t\tw = D.get_data(year, month, 'w', depth, product_n)\n\t\tssh = D2.a_Data_of_ym(D2.D2Data[33], year, month, product_n).Data()\n\t\tssh_Sec = self.Get_data_of_AreaSection_from_data(ssh, 'ht', product_n)\n\t\t_, _, zgrid = D.get_grid_value('w', product_n)\n\t\tu = self.Get_data_of_AreaSection_from_data(u, 'u', product_n)\n\t\tv = self.Get_data_of_AreaSection_from_data(v, 'v', product_n)\n\t\tw, _, _ = self.Get_data_of_area(w, 'w', product_n)\n\n\t\t# 海面力学高度の分だけ、各断面の高さに下駄を履かせてやる\n\t\tWest = np.average(u.West) * (zgrid[depth - 1] + np.average(ssh_Sec.West)) * self.NSdist\n\t\tEast = np.average(u.East) * (zgrid[depth - 1] + np.average(ssh_Sec.East)) * self.NSdist\n\t\tNorth = np.average(v.North) * (zgrid[depth - 1] + np.average(ssh_Sec.North)) * self.EWdist\n\t\tSouth = np.average(v.South) * (zgrid[depth - 1] + np.average(ssh_Sec.South)) * self.EWdist\n\t\tBottom = self.Square * np.average(w)\n\t\treturn South, North, West, East, Bottom\n\n\n\tdef cal_Salinity_Transport_Budget(self, year, month, depth = 10, product_n = 3):\n\t\t# その領域における塩分輸送量収支を計算する。\n\t\t# 注意! x,y,z軸方向のグリッドの大きさはすべて等しいと仮定して計算しております。\n\t\t# さらに、領域は南北に長くなく、EWdistで南北両側面の断面積を計算していいこととしております。\n\t\trho0 = 1024.0\n\t\tdS = Budget_at_Global_Ocean.cal_dSdt(year, month, product_n)[:, :, :depth]\n\t\tsff = D.get_data(year, month, 'sff', 1, product_n)\n\t\ts = D.get_data(year, month, 's', 0, product_n)[:, :, :depth]\n\t\tu = D.get_data(year, month, 'u', 0, product_n)[:, :, :depth]\n\t\tv = D.get_data(year, month, 'v', 0, product_n)[:, :, :depth]\n\t\tssh = D2.a_Data_of_ym(D2.D2Data[33], year, month, product_n).Data()\n\t\tssh_Sec = self.Get_data_of_AreaSection_from_data(ssh, 'ht', product_n)\n\t\tssh_Area, _, _ = self.Get_data_of_area(ssh, 'ht', product_n)\n\t\ts_bottom = s[:, :, depth - 1]\n\t\ts_surface = s[:, :, 0]\n\t\tw = D.get_data(year, month, 'w', depth, product_n)\n\t\t_, _, zgrid = D.get_grid_value('w', product_n)\n\t\tZonTsp = Using_jit.cal_Salt_Transport(s, u, 0, product_n)\n\t\tMerTsp = Using_jit.cal_Salt_Transport(s, v, 1, product_n)\n\t\tZonTsp = self.Get_data_of_AreaSection_from_data(ZonTsp, 's', product_n)\n\t\tMerTsp = self.Get_data_of_AreaSection_from_data(MerTsp, 's', product_n)\n\n\t\t# 海面力学高度の分だけ、各断面の高さに下駄を履かせてやる\n\t\tWest = np.sum(ZonTsp.West) * (zgrid[depth - 1] + np.average(ssh_Sec.West)) / zgrid[depth - 1]\n\t\tEast = np.sum(ZonTsp.East) * (zgrid[depth - 1] + np.average(ssh_Sec.East)) / zgrid[depth - 1]\n\t\tNorth = np.sum(MerTsp.North) * (zgrid[depth - 1] + np.average(ssh_Sec.North)) / zgrid[depth - 1]\n\t\tSouth = np.sum(MerTsp.South) * (zgrid[depth - 1] + np.average(ssh_Sec.South)) / zgrid[depth - 1]\n\n\t\tw, _, _ = self.Get_data_of_area(w, 'w', product_n)\n\t\ts_bottom, _, _ = self.Get_data_of_area(s_bottom, 's', product_n)\n\t\tBottom = self.Square * np.average(1e-3 * rho0 * w * s_bottom)\n\n\t\tsff, _, _ = self.Get_data_of_area(sff, 'sff', product_n)\n\t\ts_surface, _, _ = self.Get_data_of_area(s_surface, 's', product_n)\n\t\tSurface = self.Square * np.average(1e-3 * rho0 * sff * s_surface) / (60 * 60 * 24 * 30.0)\n\n\t\t# 海面力学高度の分だけ、水柱の高さに下駄を履かせてやる\n\t\tdS, _, _ = self.Get_data_of_area(dS, 's', product_n)\n\t\tChange = 1e-3 * rho0 * np.average(dS) * (zgrid[depth - 1] + np.average(ssh_Area)) * self.Square / (60 * 60 * 24 * 30.0)\n\n\t\treturn South, North, West, East, Bottom, Surface, Change\n\n\n\n\tdef draw_area(self, plt, xlim = [40, 110], ylim = [ - 20, 30], interval = 10, color = 'red', Map = 0, lonlabel = 1, latlabel = 1):\n\t\tif Map == 0:\n\t\t\tm = subroutine.General_map(slat = ylim[0], nlat = ylim[1], wlon = xlim[0], elon = xlim[1])\n\t\t\tm = subroutine.add_lonlat_line_on_map(m, interval = interval, lonlabel = lonlabel, latlabel = latlabel)\n\n\t\t# 例えば、[0.5, 4.5, 0.5, 9.5]っていうふうにslat等を定義していたら、[0,0],[0,10],[5,0][5,10]を結ぶ四角形を描く。\n\t\tM = int(self.N_lim - self.S_lim) + 1\n\t\tN = int(self.E_lim - self.W_lim) + 1\n\t\ta = np.arange(M) + self.S_lim\n\t\tb1 = np.ones(M) * self.W_lim\n\t\tb2 = np.ones(M) * self.E_lim\n\t\tc = np.arange(N) + self.W_lim\n\t\td1 = np.ones(N) * self.S_lim\n\t\td2 = np.ones(N) * self.N_lim\n\t\tplt.plot(b1, a, color = color)\n\t\tplt.plot(b2, a, color = color)\n\t\tplt.plot(c, d1, color = color)\n\t\tplt.plot(c, d2, color = color)\n\t\treturn plt\n\n\n\nArea = [0] * 30\nArea[0] = AREA(0\t, 'IO', -19.5, 29.5, 40.5, 109.5)\nArea[1] = AREA(1\t, 'eDMI', -9.5, -0.5, 90.5, 109.5)\nArea[2] = AREA(2\t, 'wDMI', -9.5, 9.5, 50.5, 69.5)\nArea[3] = AREA(3\t, 'EEIO', -3.5, 3.5, 90.5, 99.5)\nArea[4] = AREA(4\t, 'BoB', 10.5, 25.5, 80.5, 99.5)\nArea[5] = AREA(5\t, 'SEAS', 4.5, 7.5, 62.5, 71.5)\nArea[6] = AREA(6\t, 'Ayeyarwady', 12.5, 15.5, 94.5, 97.5)\nArea[7] = AREA(7\t, 'head_of_Sumatera', 3.5, 8.5, 91.5, 97.5)\nArea[8] = AREA(8\t, 'West_of_Ayeyarwady', 11.5, 15.5, 90.5, 94.5)\nArea[9] = AREA(9\t, 'North_IO', 0.5, 29.5, 45.5, 99.5)\nArea[10] = AREA(10, 'CIO', 0.5, 2.5, 73.5, 79.5)\nArea[11] = AREA(11, 'South_of_Srilanka', 3.5, 5.5, 77.5, 82.5)\nArea[12] = AREA(12, 'North_AS', 12.5, 24.5, 58.5, 71.5)\nArea[13] = AREA(13, 'East_AS', 10.5, 14.5, 67.5, 74.5)\nArea[14] = AREA(14, 'West_BoB', 10.5, 19.5, 80.5, 89.5)\nArea[15] = AREA(15, 'SouthWestern_BoB', 6.5, 9.5, 80.5, 89.5)\nArea[16] = AREA(16, 'NorthWestern_BoB', 15.5, 19.5, 85.5, 89.5)\nArea[17] = AREA(17, 'CentralWestern_BoB', 13.5, 16.5, 80.5, 83.5)\nArea[18] = AREA(18, 'BoB_for_Aquarius', 7.5, 23.5, 79.5, 95.5)\nArea[19] = AREA(19, 'SEAS_for_Aquarius', 4.5, 13.5, 64.5, 76.5)\nArea[20] = AREA(20, 'EEIO_for_Aquarius', -5.5, 5.5, 79.5, 100.5)\nArea[21] = AREA(21, 'Mouth_of_GB', 20.5, 21.5, 88.5, 91.5)\nArea[22] = AREA(22, 'South_of_Indian_Subcontinent', 2.5, 11.5, 65.5, 89.5)\nArea[23] = AREA(23, 'East_of_BoB', 10.5, 22.5, 90.5, 98.5)\n","sub_path":"Area.py","file_name":"Area.py","file_ext":"py","file_size_in_byte":9744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"365819938","text":"import datetime\nimport os, random, json\nfrom django.conf import settings\nfrom django.core.cache import cache\nfrom django.core.paginator import Paginator, PageNotAnInteger, EmptyPage\nfrom django.db.models import Q\nfrom django.shortcuts import render, get_object_or_404\nfrom django.views.decorators.cache import cache_page\n\nfrom basketapp.models import Basket\nfrom .models import Product, ProductCategory\n\n\ndef get_links_menu():\n if settings.LOW_CACHE:\n key = 'links_menu'\n links_menu = cache.get(key)\n if links_menu is None:\n links_menu = ProductCategory.objects.filter(is_active=True)\n cache.set(key, links_menu)\n return links_menu\n else:\n return ProductCategory.objects.filter(is_active=True)\n\n\n\ndef get_hot_product():\n products_list = Product.objects.all()\n return random.sample(list(products_list), 1)[0]\n\ndef get_same_products(hot_product):\n return Product.objects.filter(category__pk=hot_product.category.pk).exclude(pk=hot_product.pk)[:3]\n\ndef main(request):\n title = 'Главная'\n # products = Product.objects.all()[:3]\n # products = Product.objects.filter(is_active=True, category__is_active=True)[:3]\n products = Product.objects.filter(is_active=True, category__is_active=True).select_related('category')[:3]\n content = {'title': title,\n 'products': products,\n }\n return render(request, 'mainapp/index.html', content)\n\ndef get_category(pk):\n if settings.LOW_CACHE:\n key = f'category_{pk}'\n category = cache.get(key)\n if category is None:\n category = get_object_or_404(ProductCategory, pk=pk)\n cache.set(key, category)\n return category\n else:\n return get_object_or_404(ProductCategory, pk)\n\n@cache_page(3600)\ndef products(request, pk=None, page=1):\n title = 'продукты'\n # links_menu = ProductCategory.objects.filter(is_active=True)\n links_menu = get_links_menu()\n if pk is not None:\n if pk == 0:\n category = {'name': 'все', 'pk': 0}\n products = Product.objects.filter(is_active=True).order_by('price')\n else:\n category = get_category(pk)\n products = Product.objects.filter(category__pk=pk).order_by('price')\n # products = Product.objects.filter(Q(category__pk=1) | Q(category__pk=2))\n paginator = Paginator(products, 2)\n try:\n products_paginator = paginator.page(page)\n except PageNotAnInteger:\n products_paginator = paginator.page(1)\n except EmptyPage:\n products_paginator = paginator.page(paginator.num_pages)\n\n\n content = {\n 'title': title,\n 'links_menu': links_menu,\n 'category': category,\n 'products': products_paginator,\n }\n return render(request, 'mainapp/products_list.html', content)\n\n hot_product = get_hot_product()\n same_products = get_same_products(hot_product)\n\n content = {'title': title,\n 'links_menu': links_menu,\n 'same_products': same_products,\n 'hot_product': hot_product,\n }\n return render(request, 'mainapp/products.html', content)\n\n\ndef product(request, pk):\n title = 'продукты'\n\n content = {\n 'title': title,\n 'links_menu': ProductCategory.objects.all(),\n 'product': get_object_or_404(Product, pk=pk),\n }\n\n return render(request, 'mainapp/product.html', content)\n\n\ndef contact(request):\n title = 'О нас'\n visit_date = datetime.datetime.now()\n locations = []\n file_path = os.path.join(settings.BASE_DIR, 'contacts.json')\n with open((file_path), encoding='utf-8') as file_contacts:\n locations = json.load(file_contacts)\n content = {'title': title,\n 'visit_date': visit_date,\n 'locations': locations,\n }\n return render(request, 'mainapp/contact.html', content)\n\ndef products_all(request):\n links_menu = [\n {'href': 'products_all', 'name': 'все'},\n {'href': 'products_home', 'name': 'дом'},\n {'href': 'products_office', 'name': 'офис'},\n {'href': 'products_modern', 'name': 'модерн'},\n {'href': 'products_classic', 'name': 'классика'},\n ]\n content = {\n 'title': 'Продукты',\n 'links_menu': links_menu\n };\n return render(request, 'mainapp/products.html', content)\n\ndef products_home(request):\n links_menu = [\n {'href': 'products_all', 'name': 'все'},\n {'href': 'products_home', 'name': 'дом'},\n {'href': 'products_office', 'name': 'офис'},\n {'href': 'products_modern', 'name': 'модерн'},\n {'href': 'products_classic', 'name': 'классика'},\n ]\n content = {\n 'title': 'Продукты',\n 'links_menu': links_menu\n };\n return render(request, 'mainapp/products.html', content)\n\n","sub_path":"mainapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"445795405","text":"import random\nimport numpy as np\nimport copy\nimport torch.nn as nn\nimport torch\nfrom collections import deque\nfrom Data.PrioritizedMemory import PrioritizedMemory\nfrom Utils import init_weights\n\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n\nclass Agent:\n\n def __init__(self,\n model,\n optim,\n eps=1,\n gamma=.99,\n eps_decay=1e-6,\n min_eps=.1,\n ddqn=True,\n using_priority=True,\n num_atoms=51,\n v_max=10,\n v_min=-10):\n \"\"\"\n Agent for Rainbow (can configure to use different parts individually)\n\n :param model: Network\n :param optim: Optimizer\n :param eps: Epsilon for epsilon-greedy\n :param gamma: Discount factor\n :param eps_decay: Epsilon decay\n :param min_eps: Minimum epsilon\n :param ddqn: Use of double deep q learning\n :param using_priority: Use priority replay\n :param num_atoms: Number of atoms for categorical DQN (put 0 if not using categorical DQN)\n :param v_max: Max value in the categorical DQN output\n :param v_min: Min value in the categorical DQN output\n \"\"\"\n\n self.network = model\n self.network.apply(init_weights)\n\n self.target_network = copy.deepcopy(model).to(device)\n\n self.optim = optim\n\n self.gamma = gamma\n self.eps = eps\n self.eps_decay = eps_decay\n self.min_eps = min_eps\n\n self.memory_limit = 70000\n\n if using_priority:\n self.memory = PrioritizedMemory(self.memory_limit)\n else:\n self.memory = deque()\n\n self.objective = nn.SmoothL1Loss().to(device) # Huber loss to clip grads\n\n self.ddqn = ddqn\n self.using_priority = using_priority\n\n self.num_atoms = num_atoms\n if self.num_atoms > 0:\n self.v_max = v_max\n self.v_min = v_min\n self.delta_z = float((v_max - v_min) / (num_atoms - 1))\n self.z = torch.linspace(v_min, v_max, num_atoms).to(device)\n\n def act(self, state):\n\n if self.network.sigma <= 0: # if not using noisy nets\n if self.eps > self.min_eps:\n self.eps -= self.eps_decay # linear decay\n\n if np.random.rand() <= self.eps:\n return random.randrange(self.network.num_actions)\n\n action_values = self.network(state)\n\n if self.num_atoms > 0:\n return (action_values * self.z).sum(2).argmax(1).item()\n\n return torch.argmax(action_values).detach().item()\n\n def store_mem(self, mem):\n\n if self.using_priority:\n self.memory.add(mem)\n else:\n if len(self.memory) >= self.memory_limit:\n self.memory.popleft()\n self.memory.append(mem)\n\n def get_sample(self, bs):\n\n if self.using_priority:\n batch, is_weights, tree_indices = self.memory.sample(bs)\n is_weights = torch.tensor(is_weights).type(torch.FloatTensor).unsqueeze(1).to(device)\n else:\n batch = random.sample(self.memory, bs)\n\n states = []\n actions = []\n next_states = []\n rewards = []\n dones = []\n\n for sample in batch:\n\n states.append(sample[0]) # states already tensors\n actions.append(torch.tensor(sample[1]).clone().to(device).detach())\n next_states.append(sample[2]) # next state is already a tensor\n rewards.append(torch.tensor(sample[3]))\n dones.append(sample[4])\n\n states = torch.stack(tuple(states))\n actions = torch.stack(tuple(actions)).unsqueeze(1).to(device)\n next_states = torch.stack(tuple(next_states))\n rewards = torch.stack(tuple(rewards)).unsqueeze(1).type(torch.FloatTensor).to(device)\n\n dones = np.array(dones).astype(int)\n dones = torch.from_numpy(np.vstack(dones)).type(torch.FloatTensor).to(device)\n\n if self.using_priority:\n return states, actions, next_states, rewards, dones, is_weights, tree_indices\n else:\n return states, actions, next_states, rewards, dones\n\n def replay(self, bs, num_step_return):\n\n if self.network.sigma > 0:\n self.network.resample_noise()\n self.target_network.resample_noise() # resample noise once per update\n\n if self.num_atoms > 0 and self.using_priority:\n self.rainbow_replay(bs, num_step_return)\n elif self.num_atoms > 0:\n self.categorical_replay(bs, num_step_return)\n elif self.using_priority:\n self.prioritized_replay(bs, num_step_return)\n else:\n if self.ddqn:\n self.replay_ddqn(bs, num_step_return)\n else:\n self.replay_dqn(bs, num_step_return)\n\n def replay_dqn(self, bs, num_step_return):\n \"\"\"\n Experience replay with vanilla deep q-learning\n :param bs: mini-batch size\n :param num_step_return: Number of steps for reward\n \"\"\"\n\n states, actions, next_states, rewards, dones = self.get_sample(bs)\n\n self.network.train()\n\n preds, targets, _ = self.get_td_error_non_ddqn(states, actions, next_states, rewards, dones, num_step_return)\n\n loss = self.objective(preds, targets)\n self.optim.zero_grad()\n\n loss.backward()\n self.optim.step()\n\n def get_td_error(self, states, actions, next_states, rewards, dones, num_step_return, ddqn=True):\n\n if self.num_atoms > 0:\n return self.get_td_error_cat(states, actions, next_states, rewards, dones, num_step_return, 1, ddqn)\n\n elif ddqn:\n return self.get_td_error_non_cat(states, actions, next_states, rewards, dones, num_step_return)\n else:\n return self.get_td_error_non_ddqn(states, actions, next_states, rewards, dones, num_step_return)\n\n def get_td_error_non_ddqn(self, states, actions, next_states, rewards, dones, num_step_return):\n\n predicted_qs = self.network(states).gather(1, actions) # get actions used in training run\n\n target_actions = self.target_network(next_states).detach()\n target_actions = target_actions.max(1)[0].unsqueeze(1) # max over Q'\n\n targets = rewards + (self.gamma ** num_step_return) * target_actions * (1 - dones) # if done only reward\n\n error = targets - predicted_qs\n error = torch.abs(error.detach().cpu())\n\n return predicted_qs, targets, error\n\n def get_td_error_non_cat(self, states, actions, next_states, rewards, dones, num_step_return):\n \"\"\"\n Get double DQN TD error for non-categorical DQN\n \"\"\"\n\n preds = self.network(states).gather(1, actions)\n\n next_max_actions = self.network(next_states).detach().argmax(1).unsqueeze(1)\n next_qs = self.target_network(next_states).gather(1, next_max_actions).detach()\n\n targets = rewards + (self.gamma ** num_step_return) * next_qs * (1 - dones)\n\n error = targets - preds\n error = torch.abs(error.detach()).cpu()\n\n return preds, targets, error\n\n def get_td_error_cat(self, states, actions, next_states, rewards, dones, num_step_return, bs, ddqn=True):\n \"\"\"\n Get TD error for categorical DQN\n \"\"\"\n\n pred_p = self.network(states)\n actions = actions.unsqueeze(1).expand(bs, 1, self.num_atoms)\n pred_p = pred_p.gather(1, actions).squeeze(1)\n\n m = torch.zeros(bs, self.num_atoms).type(torch.FloatTensor)\n offset = torch.linspace(0, (bs - 1) * self.num_atoms, bs).unsqueeze(1).expand(bs, self.num_atoms)\n\n next_p = self.target_network(next_states).detach() # [bs, |A|, N]\n\n if ddqn:\n next_p_on = self.network(next_states).detach() # next p for online network\n next_q_on = (self.z.expand_as(next_p_on) * next_p_on).sum(2)\n next_a_on = next_q_on.argmax(1)\n next_a_on = next_a_on.unsqueeze(1).unsqueeze(1).expand(bs, 1, self.num_atoms).type(torch.FloatTensor)\n argmax_q = next_p.gather(1, next_a_on.long()).squeeze(1)\n\n else:\n next_q = (self.z.expand_as(next_p) * next_p).sum(2) # [bs, |A|]\n\n next_a = next_q.argmax(1) # [bs, 1] a* = argmax_a Q(x+1, a)\n next_a = next_a.unsqueeze(1).unsqueeze(1).expand(bs, 1, self.num_atoms).type(torch.FloatTensor)\n # [bs, 1, N]\n\n argmax_q = next_p.gather(1, next_a.long()).squeeze(1) # row of p corresponding to a*\n\n # Projection of T_z onto the support z\n T_z = rewards + (self.gamma ** num_step_return) * self.z.unsqueeze(0) * (1-dones)\n T_z = T_z.clamp_(self.v_min, self.v_max)\n # [bs, num_atoms]\n\n b = (T_z - self.v_min) / self.delta_z\n\n l = b.floor()\n u = b.ceil()\n\n m.view(-1).index_add_(0, (l + offset).view(-1).long(), (argmax_q * (u.float() - b)).view(-1))\n # ml = ml + p(s_t+n, a*)(u - b)\n m.view(-1).index_add_(0, (u + offset).view(-1).long(), (argmax_q * (b - l.float())).view(-1))\n # mu = mu + p(s_t+n, a*)(b - l)\n\n error = torch.abs(m - pred_p).mean(1).unsqueeze(1)\n error = error.detach().cpu()\n\n return pred_p, m, error\n\n def replay_ddqn(self, bs, num_step_return):\n \"\"\"\n Experience replay with double deep q-learning\n :param bs: mini-batch size\n :param num_step_return: Number of steps for reward\n \"\"\"\n\n states, actions, next_states, rewards, dones = self.get_sample(bs)\n self.network.train()\n\n preds, targets, _ = self.get_td_error_non_cat(states, actions, next_states, rewards, dones, num_step_return)\n\n loss = self.objective(preds, targets)\n self.optim.zero_grad()\n loss.backward()\n self.optim.step()\n\n def prioritized_replay(self, bs, num_step_return):\n \"\"\"\n Prioritized Experience Replay on double DQN\n :param bs: mini-batch size\n :param num_step_return: Number of steps for reward\n \"\"\"\n\n states, actions, next_states, rewards, dones, is_weights, tree_indices = self.get_sample(bs)\n\n self.network.train()\n\n preds, targets, errors = self.get_td_error_non_cat(states, actions, next_states, rewards, dones, num_step_return)\n\n errors = np.array(errors)\n\n for i in range(bs):\n self.memory.update(errors[i][0], tree_indices[i])\n\n loss = (self.objective(preds, targets) * is_weights).mean()\n\n self.optim.zero_grad()\n loss.backward()\n self.optim.step()\n\n def categorical_replay(self, bs, num_step_return):\n \"\"\"\n C51 Replay\n :param bs: batch size\n :param num_step_return: number of steps for multi-step learning\n \"\"\"\n\n self.network.train()\n states, actions, next_states, rewards, dones = self.get_sample(bs)\n\n preds, targets, _ = self.get_td_error_cat(states, actions, next_states, rewards, dones, num_step_return, bs)\n\n loss = -(targets.to(device) * torch.log(preds)).sum(-1).mean().to(device) # cross-entropy loss ()\n\n self.optim.zero_grad()\n loss.backward()\n self.optim.step()\n\n def rainbow_replay(self, bs, num_step_return):\n \"\"\"\n C51 Replay with prioritized experience replay and use of noisy nets\n :param bs: batch size\n :param num_step_return: number of steps for multi-step learning\n \"\"\"\n\n self.network.train()\n states, actions, next_states, rewards, dones, is_weights, tree_indices = self.get_sample(bs)\n\n preds, targets, errors = self.get_td_error_cat(states, actions, next_states, rewards, dones, num_step_return, bs)\n\n for i in range(bs):\n self.memory.update(errors[i][0], tree_indices[i])\n\n loss = -(targets.to(device) * torch.log(preds)).sum(-1).mean().to(device) # cross-entropy loss ()\n loss = (loss * is_weights).mean()\n\n self.optim.zero_grad()\n loss.backward()\n self.optim.step()\n","sub_path":"Agent.py","file_name":"Agent.py","file_ext":"py","file_size_in_byte":12065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"457496238","text":"import FWCore.ParameterSet.Config as cms\n\nfeatures = cms.EDAnalyzer(\n \"IDFeatures\",\n checkFromB = cms.bool(True),\n drMax = cms.double(0.02),\n fakesMultiplier = cms.double(0.5),\n # AOD and MINIAOD\n rho = cms.InputTag('fixedGridRhoFastjetAll'),\n beamspot = cms.InputTag(\"offlineBeamSpot\"),\n gsfTracks = cms.InputTag(\"lowPtGsfEleGsfTracks\"),\n MVASeedUnbiased = cms.InputTag(\"lowPtGsfElectronSeedValueMaps:unbiased\"),\n MVASeedPtbiased = cms.InputTag(\"lowPtGsfElectronSeedValueMaps:ptbiased\"),\n MVAIDLowPt = cms.InputTag('lowPtGsfElectronID'),\n MVAIDV2 = cms.InputTag('electronMVAValueMapProducer:ElectronMVAEstimatorRun2Fall17IsoV2Values'),\n # AOD only\n egammaGsfTracks = cms.InputTag(\"electronGsfTracks\"),\n electrons = cms.InputTag(\"lowPtGsfElectrons\"),\n egammaElectrons = cms.InputTag(\"gedGsfElectrons\"),\n genParticles = cms.InputTag(\"genParticles\"),\n # MINIAOD only\n egammaGsfTracks_mAOD = cms.InputTag(\"reducedEgamma:reducedGsfTracks\"),\n electrons_mAOD = cms.InputTag(\"slimmedLowPtElectrons\"),\n egammaElectrons_mAOD = cms.InputTag(\"slimmedElectrons\"),\n prunedGenParticles = cms.InputTag(\"prunedGenParticles\"),\n packedGenParticles = cms.InputTag(\"packedGenParticles\"),\n )\n\n #convVtxFitProb = cms.InputTag('electronMVAVariableHelper:convVtxFitProb'),\n","sub_path":"python/IDFeatures_cfi.py","file_name":"IDFeatures_cfi.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"398531068","text":"from flask import Flask, render_template, request, session, url_for, redirect\nimport pymysql.cursors\nfrom . import routes\nimport datetime\n\nconn = pymysql.connect(host='localhost',\n user='root',\n password='@Ygzzbhctyc1996',\n unix_socket='/Applications/XAMPP/xamppfiles/var/mysql/mysql.sock',\n db='Air_Reservation_System',\n charset='utf8mb4',\n cursorclass=pymysql.cursors.DictCursor)\n\ntoday = datetime.date.today()\n\n@routes.route('/', methods=['GET'])\ndef home_get():\n return render_template('index.html', msg=None)\n\n\n@routes.route('/', methods=['POST'])\ndef home_post():\n cursor = conn.cursor()\n data1 = None\n data2 = None\n data_list = [request.form.get('souceairport'), request.form.get('destinationairport'), \\\n request.form.get('soucecity'), request.form.get('destinationcity')]\n data_tuple = [x if (x != '' and x != ' ') else None for x in data_list]\n data_tuple = tuple(data_tuple)\n\n msg = {'search': None, 'status': None}\n if data_tuple != (None, None, None, None):\n query = \"CREATE VIEW flight_D AS\\\n (SELECT airline_name, flight_num, departure_airport, departure_time, arrival_airport, arrival_time, price, airport.airport_city as departure_city\\\n FROM flight JOIN airport \\\n WHERE departure_airport = airport_name AND status = 'upcoming');\\\n CREATE VIEW flight_with_city AS\\\n (SELECT airline_name, flight_num, departure_airport, departure_time, arrival_airport, arrival_time, price, departure_city, airport.airport_city as arrival_city\\\n FROM flight_D JOIN airport \\\n WHERE arrival_airport = airport_name);\"\n cursor.execute(query)\n date = request.form.get('date')\n if not date:\n ls = list(data_tuple)\n ls.insert(4, today)\n data_tuple = tuple(ls)\n query = \"SELECT airline_name as airline, flight_num as flightnum, departure_time as departure, arrival_time as arrival, price\\\n FROM flight_with_city \\\n WHERE departure_airport = IFNULL(%s, departure_airport) AND arrival_airport = IFNULL(%s, arrival_airport) \\\n AND departure_city = IFNULL(%s, departure_city) AND arrival_city = IFNULL(%s, arrival_city) AND DATE(arrival_time)>=%s\"\n cursor.execute(query, data_tuple)\n data1 = cursor.fetchall()\n query = \"DROP VIEW flight_with_city;DROP VIEW flight_D\"\n cursor.execute(query)\n if not data1:\n msg['search'] = \"No flight found.\"\n return render_template('index.html', msg=msg)\n else:\n query = \"CREATE VIEW temp as (SELECT *\\\n FROM flight_with_city \\\n WHERE departure_airport = IFNULL(%s, departure_airport) AND arrival_airport = IFNULL(%s, arrival_airport) \\\n AND departure_city = IFNULL(%s, departure_city) AND arrival_city = IFNULL(%s, arrival_city))\"\n cursor.execute(query, data_tuple)\n query = \"SELECT airline_name as airline, flight_num as flightnum, departure_time as departure, arrival_time as arrival, price \\\n FROM temp WHERE DATE(departure_time) >= %s\"\n cursor.execute(query, date)\n data1 = cursor.fetchall()\n query = \"DROP VIEW flight_with_city;DROP VIEW temp;DROP VIEW flight_D\"\n cursor.execute(query)\n if not data1:\n msg['search'] = 'No data found.'\n return render_template('index.html', msg=msg)\n else:\n msg['search'] = 'Please enter more info.'\n\n\n flightnum = request.form.get('flight')\n depart = request.form.get('departure')\n arrival = request.form.get('arrival')\n if not(flightnum or depart or arrival):\n return render_template('index.html', data1=data1, msg=msg)\n else:\n msg['search'] = None\n if ((arrival == \" \") or (arrival == \"\") or arrival==None ) and ((depart == \" \") or (depart == \"\") or depart==None):\n msg['status'] = \"Please enter depart/arrival time.\"\n return render_template('index.html', msg=msg)\n else:\n if flightnum == \" \" or flightnum == \"\":\n flightnum = None\n if (arrival != \" \") and (arrival != \"\") and (arrival!=None):\n query = \"SELECT flight_num as flightnum2, status\\\n FROM flight\\\n WHERE flight_num = IFNULL(%s, flight_num) AND DATE(arrival_time)=%s \"\n cursor.execute(query, (flightnum, arrival))\n data2 = cursor.fetchall()\n if not data2:\n msg['status'] = 'No such flight.'\n return render_template('index.html', msg=msg)\n else:\n query = \"SELECT flight_num as flightnum2, status\\\n FROM flight\\\n WHERE flight_num = IFNULL(%s, flight_num) AND DATE(departure_time) = %s\"\n cursor.execute(query, (flightnum, depart))\n data2 = cursor.fetchall()\n if not data2:\n msg['status'] = 'No such flight.'\n return render_template('index.html', msg=msg)\n return render_template('index.html', data2=data2, msg=None)","sub_path":"code/routes/home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":5480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"384333801","text":"import pygame\r\nfrom SUPERMARIO静止画面 import tools, setup, stuff, brick, box, enemy, coin\r\nfrom SUPERMARIO静止画面 import info\r\nfrom.import constant as c\r\nfrom SUPERMARIO静止画面 import player_2\r\nimport json\r\nimport os\r\n\r\n\r\nclass Level:\r\n def start(self, game_info):\r\n self.game_info = game_info\r\n self.finished = False\r\n self.next_state = 'game_over'\r\n self.setup_background()\r\n self.info = info.Info('level', game_info)\r\n self.load_map_data()\r\n self.set_position()\r\n self.setup_player() # 关卡中玩家初始化\r\n self.setup_ground_items()\r\n self.setup_bricks_and_boxes()\r\n self.setup_enemies()\r\n self.setup_checkpoints()\r\n\r\n def setup_background(self):\r\n self.name = 'background_items'\r\n self.BG = pygame.image.load('D:\\\\game\\\\Mario\\\\resources\\\\graphics\\\\level_1.png')\r\n w, h = self.BG.get_size()\r\n self.BG = pygame.transform.scale(self.BG, (int(w*c.BG_SCALE), int(h*c.BG_SCALE)))\r\n self.viewport = tools.load_image('D:\\\\game\\\\Mario\\\\resources\\\\graphics\\\\title_screen.png', 1, 60, 176, 88,\r\n (255, 0, 220),c.BG_SCALE)\r\n self.BG_rect = self.BG.get_rect()\r\n self.game_window = setup.SCREEN.get_rect()\r\n self.bg = pygame.Surface((self.BG_rect.width, self.BG_rect.height)) # 创建一个大小和游戏背景图一致的黑色对象\r\n\r\n def load_map_data(self):\r\n file_name = 'level_1.json'\r\n file_path = os.path.join('D:\\game\\Mario\\source\\data\\maps', file_name)\r\n with open(file_path) as f:\r\n self.map_data = json.load(f)\r\n\r\n def set_position(self):\r\n self.position = []\r\n for data in self.map_data['maps']:\r\n self.position.append((data['start_x'], data['end_x'], data['player_x'], data['player_y']))\r\n self.start_x, self.end_x, self.player_x, self.player_y = self.position[0]\r\n\r\n\r\n def setup_player(self): # 定义方法\r\n self.player = player_2.Player('mario')\r\n self.player.rect.x = self.player_x\r\n self.player.rect.bottom = self.player_y\r\n\r\n def setup_ground_items(self):\r\n self.ground_items_group = pygame.sprite.Group()\r\n for name in ['ground', 'pipe', 'step']:\r\n for item in self.map_data[name]:\r\n self.ground_items_group.add(stuff.Item(item['x'], item['y'], item['width'], item['height'], name))\r\n\r\n def setup_bricks_and_boxes(self):\r\n self.brick_group = pygame.sprite.Group()\r\n self.box_group = pygame.sprite.Group()\r\n\r\n if 'brick' in self.map_data:\r\n for brick_data in self.map_data['brick']:\r\n x,y = brick_data['x'],brick_data['y']\r\n brick_type = brick_data['type']\r\n if 'brick_num' in brick_data:\r\n # TODO batch bricks\r\n pass\r\n else:\r\n self.brick_group.add(brick.Brick(x, y, brick_type))\r\n\r\n if 'box' in self.map_data:\r\n for box_data in self.map_data['box']:\r\n x, y = box_data['x'], box_data['y']\r\n box_type = box_data['type']\r\n self.box_group.add(box.Box(x, y, box_type))\r\n\r\n def setup_enemies(self):\r\n self.dying_group = pygame.sprite.Group()\r\n self.enemy_group = pygame.sprite.Group()\r\n # 创建存放敌人的空字典,键:组数;值:存放所有野怪的精灵组\r\n self.enemy_group_dict = {}\r\n for enemy_group_data in self.map_data['enemy']:\r\n group = pygame.sprite.Group()\r\n for enemy_group_id, enemy_list in enemy_group_data.items():\r\n for enemy_data in enemy_list:\r\n group.add(enemy.create_enemy(enemy_data))\r\n self.enemy_group_dict[enemy_group_id] = group\r\n\r\n def setup_checkpoints(self):\r\n self.checkpoint_group = pygame.sprite.Group()\r\n for item in self.map_data['checkpoint']:\r\n x, y, w, h, checkpoint_type = item['x'], item['y'], item['width'], item['height'], item['type']\r\n enemy_groupid = item.get('enemy_groupid')\r\n check_point = stuff.Checkpoint(x, y, w, h, checkpoint_type, enemy_groupid)\r\n self.checkpoint_group.add(check_point)\r\n\r\n # def setup_coin(self):\r\n # self.coin_group = pygame.sprite.Group()\r\n # for coin_data in self.map_data['coin']:\r\n # x, y = coin_data['x'], coin_data['y']\r\n # self.coin_group.add(coin.Levelcoin(x, y))\r\n\r\n def update(self, surface, keys):\r\n self.current_time = pygame.time.get_ticks()\r\n self.player.update(keys)\r\n if self.player.dead:\r\n if self.current_time - self.player.death_timer > 3000:\r\n self.finished = True\r\n self.update_game_info()\r\n\r\n else:\r\n self.update_player_position()\r\n self.check_checkpoint()\r\n self.check_if_go_die()\r\n self.update_window_position()\r\n self.brick_group.update()\r\n self.box_group.update()\r\n self.enemy_group.update(self)\r\n self.dying_group.update(self)\r\n # self.coin_group.update()\r\n self.draw(surface)\r\n\r\n def check_if_go_die(self):\r\n if self.player.rect.y > c.SCREEN_H:\r\n self.player.go_die()\r\n\r\n def update_player_position(self): # 利用当前玩家速度更新位置的方法\r\n self.player.rect.x += self.player.x_vel\r\n if self.player.rect.x < self.start_x:\r\n self.player.rect.x = self.start_x\r\n elif self.player.rect.right > self.end_x:\r\n self.player.rect.right = self.end_x\r\n\r\n # x方向\r\n self.check_x_collision()\r\n self.player.rect.y += self.player.y_vel\r\n # y方向\r\n if not self.player.dead:\r\n self.player.rect.y += self.player.y_vel\r\n self.check_y_collision()\r\n\r\n def check_x_collision(self):\r\n check_group = pygame.sprite.Group(self.ground_items_group, self.brick_group, self.box_group)\r\n ground_item = pygame.sprite.spritecollideany(self.player, check_group)\r\n if ground_item:\r\n self.adjust_player_x(ground_item)\r\n\r\n enemy = pygame.sprite.spritecollideany(self.player, self.enemy_group)\r\n if enemy:\r\n # self.player.go_die()\r\n pass\r\n\r\n # coin = pygame.sprite.spritecollideany(self.player, self.coin_group)\r\n # if coin:\r\n # coin.kill()\r\n # self.game_info['coin'] += 1\r\n\r\n def check_y_collision(self):\r\n check_group = pygame.sprite.Group(self.ground_items_group, self.brick_group, self.box_group)\r\n ground_item = pygame.sprite.spritecollideany(self.player, check_group)\r\n\r\n if ground_item:\r\n self.adjust_player_y(ground_item)\r\n self.adjust_box_position(ground_item)\r\n\r\n enemy = pygame.sprite.spritecollideany(self.player, self.enemy_group)\r\n if enemy:\r\n self.enemy_group.remove(enemy)\r\n self.dying_group.add(enemy)\r\n # 从下往上顶敌人\r\n if self.player.y_vel < 0:\r\n how = 'bumped'\r\n # 从上往下压敌人\r\n else:\r\n how = 'trampled'\r\n self.player.state = 'jump'\r\n self.player.rect.bottom = enemy.rect.top\r\n self.player.y_vel = self.player.jump_vel*0.8\r\n enemy.go_die(how)\r\n\r\n self.check_will_fall(self.player)\r\n\r\n # coin = pygame.sprite.spritecollideany(self.player, self.coin_group)\r\n # if coin:\r\n # coin.kill()\r\n # self.game_info['coin'] += 1\r\n\r\n def adjust_player_x(self, sprite):\r\n if self.player.rect.x < sprite.rect.x:\r\n self.player.rect.right = sprite.rect.left\r\n else:\r\n self.player.rect.left = sprite.rect.right\r\n self.player.x_vel = 0\r\n\r\n def adjust_player_y(self, sprite):\r\n # 从下往上\r\n if self.player.rect.bottom >= sprite.rect.bottom:\r\n self.player.y_vel = 7\r\n self.player.rect.top = sprite.rect.bottom \r\n self.player.state = 'fall'\r\n\r\n # 从上往下\r\n else:\r\n self.player.y_vel = 0\r\n self.player.rect.bottom = sprite.rect.top\r\n self.player.state = 'walk'\r\n\r\n def adjust_box_position(self, group_sprite):\r\n if group_sprite.name == 'box':\r\n group_sprite.y_vel = -2\r\n group_sprite.y_accel = 0.25\r\n group_sprite.frame_index = 3\r\n\r\n\r\n def check_will_fall(self, sprite):\r\n sprite.rect.y += 1\r\n check_group = pygame.sprite.Group(self.ground_items_group, self.brick_group, self.box_group)\r\n m = pygame.sprite.spritecollideany(sprite, check_group)\r\n if not m and sprite.state != 'jump':\r\n sprite.state = 'fall'\r\n sprite.rect.y -=1\r\n\r\n def update_window_position(self): # 窗口滑动代替主角移动\r\n third = self.game_window.x + self.game_window.width*1/3\r\n if self.player.rect.right > third and self.player.x_vel > 0 and self.game_window.right < self.end_x:\r\n self.game_window.x += self.player.x_vel\r\n self.start_x = self.game_window.x\r\n\r\n def draw(self, surface):\r\n self.bg.blit(self.BG, self.game_window, self.game_window) # (目标图层,目标图层的左上角放到原图层的位置,特定部分)\r\n self.bg.blit(self.player.image, self.player.rect)\r\n self.brick_group.draw(self.bg)\r\n self.box_group.draw(self.bg)\r\n self.enemy_group.draw(self.bg)\r\n self.dying_group.draw(self.bg)\r\n # self.coin_group.draw(self.bg)\r\n\r\n surface.blit(self.bg, (0, 0),self.game_window)\r\n\r\n self.info.start(self.game_info)\r\n self.info.update()\r\n self.info.draw(surface)\r\n\r\n def update_game_info(self):\r\n if self.player.dead:\r\n self.game_info['lives'] -= 1\r\n if self.game_info['lives'] == 0:\r\n self.next_state = 'game_over'\r\n else:\r\n self.next_state = 'load_screen'\r\n\r\n def check_checkpoint(self):\r\n checkpoint = pygame.sprite.spritecollideany(self.player, self.checkpoint_group)\r\n if checkpoint:\r\n if checkpoint.checkpoint_type == 0:\r\n self.enemy_group.add(self.enemy_group_dict[str(checkpoint.enemy_groupid)])\r\n checkpoint.kill()","sub_path":"level.py","file_name":"level.py","file_ext":"py","file_size_in_byte":10504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"335276509","text":"\"\"\"\n uriparse is a standards compliant module for parsing an manipulating URIs\n\"\"\"\n\nimport re\nimport recordtype\n\n_URI_COMPONENTS = ('scheme', 'authority', 'path', 'query', 'fragment')\n\nclass SplitResultsContainer(recordtype.recordtype('SplitResultsContainer', _URI_COMPONENTS)):\n \"\"\"\n This class acts as a container for the results of the regex based URI componentization.\n \"\"\"\n\n # RFC 3986 Appendix B\n # https://goo.gl/WVNwU3\n RE = re.compile(br\"\"\"\n (?:([^:/?#]+):)? # scheme\n (?://([^/?#]*))? # authority\n ([^?#]*) # path\n (?:\\?([^#]*))? # query\n (?:\\#(.*))? # fragment\n \"\"\", flags=re.VERBOSE)\n\n # RFC 3986 2.2 gen-delims\n # https://goo.gl/AuU5xb\n COLON, SLASH, QUEST, HASH, LBRACKET, RBRACKET, AT = (\n u':', u'/', u'?', u'#', u'[', u']', u'@'\n )\n\n # RFC 3.0 Components\n # https://goo.gl/BLVqii\n SCHEME, AUTH, USER, HOST, PORT, PATH, QUERY, FRAG = (\n 'scheme', 'authority', 'user', 'host', 'port', 'path', 'query', 'fragment'\n )\n\n EMPTY, EQL, DIGITS, AMP = '', '=', b'0123456789', u'&'\n\n @property\n def userinfo(self):\n if self.authority is not None:\n userinfo, delim, _ = self.authority.rpartition(self.AT)\n if delim:\n return userinfo\n else:\n return None\n\n @property\n def host(self):\n authority = self.authority\n if authority is None:\n return None\n _, _, hostinfo = authority.rpartition(self.AT)\n host, _, port = hostinfo.rpartition(self.COLON)\n if port.lstrip(self.DIGITS):\n return hostinfo\n else:\n return host\n\n @property\n def port(self):\n authority = self.authority\n if authority is None:\n return None\n _, present, port = authority.rpartition(self.COLON)\n if present and not port.lstrip(self.DIGITS):\n return port\n else:\n return None\n\n def _geturi(self):\n \"\"\"return full uri based on values stored in SplitResultsContainer properties\"\"\"\n scheme, authority, path, query, fragment = self\n result = []\n if scheme is not None:\n result.append(self.EMPTY.join([scheme, self.COLON]))\n if authority is not None:\n result.append(self.EMPTY.join([self.SLASH , self.SLASH, authority]))\n if path is not None:\n result.append(path)\n if query is not None:\n result.append(self.EMPTY.join([self.QUEST, query]))\n if fragment is not None:\n result.append(self.EMPTY.join([self.HASH, fragment]))\n return self.EMPTY.join(result)\n\n def getquery(self):\n params = {}\n if self.query:\n if self.AMP in self.query:\n for query in self.query.split(self.AMP):\n name, _, value = query.rpartition(self.EQL)\n params[name] = value\n else:\n name, _, value = self.query.rpartition(self.EQL)\n params[name] = value\n else:\n return None\n return params\n\n def appendquery(self, params):\n if params:\n if isinstance(params, dict):\n result = []\n for key, value in params.items():\n result.append(self.EQL.join([key, value]))\n if self.query:\n setattr(self, self.QUERY, self.AMP.join([self.query, self.AMP.join(result)]))\n else:\n setattr(self, self.QUERY, self.AMP.join(result))\n else:\n raise TypeError('argument must be a dict')\n return self._geturi()\n\n def appendpath(self, path):\n if path:\n if isinstance(path, list):\n if self.path.endswith(self.SLASH):\n setattr(self, self.PATH, self.EMPTY.join([self.path, self.SLASH.join(path)]))\n else:\n setattr(self, self.PATH, self.SLASH.join([self.path, self.SLASH.join(path)]))\n else:\n raise TypeError('argument must be a list')\n return self._geturi()\n\n def update(self, attribute, value):\n if attribute not in [self.SCHEME, self.AUTH, self.PATH, self.QUERY, self.FRAG]:\n AttributeError('{} attribute is not supported'.format(attribute))\n\n setattr(self, attribute, value)\n return self._geturi()\n\ndef splituri(uristring):\n \"\"\"\n This factory returns an instance of SplitResultsContainer that contains a 5 part tuple\n of each top level URI component as well as properties for authority sub components\n :///?#\n \"\"\"\n return SplitResultsContainer(*SplitResultsContainer.RE.match(uristring).groups())\n\ndef unsplituri(parts):\n \"\"\"\n This recomposes individual components back into a valid URI\n RFC 3986 5.3 https://goo.gl/kLYVDw\n \"\"\"\n scheme, authority, path, query, fragment = parts\n return SplitResultsContainer(scheme, authority, path, query, fragment)._geturi()\n","sub_path":"uriparse/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":5116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"414449963","text":"import w3lib.url\nimport w3lib.html\n\nfrom lxml import etree\nimport re, sys\nfrom hashlib import md5\n\nfrom feedgenerator import Rss201rev2Feed, Enclosure\nimport datetime\n\nimport MySQLdb\nfrom settings import DATABASES, DOWNLOADER_USER_AGENT\n\nurl_hash_regexp = re.compile('(#.*)?$')\n\nPOST_TIME_DISTANCE = 15 # minutes, RSS Feed Reader skip same titles created in 10 min interval\n\nFIELD_IDS = {'title': 1, 'description': 2, 'link': 3}\n\ndef save_post(conn, created, feed_id, post_fields):\n cur = conn.cursor()\n cur.execute(\"\"\"insert into frontend_post (md5sum, created, feed_id)\n values (%s, %s, %s)\"\"\", (post_fields['md5'], created, feed_id))\n print(cur._last_executed)\n\n post_id = conn.insert_id()\n for key in ['title', 'description', 'title_link']:\n if key in post_fields:\n #import pdb;pdb.set_trace()\n cur.execute(\"\"\"insert into frontend_postfield (field_id, post_id, `text`)\n values (%s, %s, %s)\"\"\", (FIELD_IDS[key], post_id, post_fields[key].encode('utf-8')))\n print(cur._last_executed)\n\ndef fill_time(feed_id, items):\n if not items:\n return []\n for item in items:\n #create md5\n h = md5('')\n for key in ['title', 'description', 'link']:\n if key in item:\n h.update(item[key].encode('utf-8')) \n item['md5'] = h.hexdigest()\n\n #fetch dates from db\n fetched_dates = {}\n db = get_conn()\n with db:\n quoted_hashes = ','.join([\"'%s'\" % (i['md5']) for i in items])\n\n cur = db.cursor()\n cur.execute(\"\"\"select p.md5sum, p.created, p.id\n from frontend_post p\n where p.md5sum in (%s)\n and p.feed_id=%s\"\"\" % (quoted_hashes, feed_id,))\n rows = cur.fetchall()\n print(cur._last_executed)\n for row in rows:\n md5hash = row[0]\n created = row[1]\n post_id = row[2]\n fetched_dates[md5hash] = created\n cur_time = datetime.datetime.utcnow()\n new_posts = []\n for item in items:\n if item['md5'] in fetched_dates:\n item['time'] = fetched_dates[item['md5']]\n else:\n item['time'] = cur_time\n save_post(db, cur_time, feed_id, item)\n cur_time -= datetime.timedelta(minutes=POST_TIME_DISTANCE)\n\n\ndef decode(text, encoding): # it's strange but true\n if isinstance(text, unicode):\n return text\n else:\n return text.decode(encoding)\n\ndef element_to_unicode(element, encoding):\n if isinstance(element, basestring): # attribute\n return decode(element, encoding)\n\n s = [decode(element.text, encoding)] if element.text else []\n for sub_element in element:\n s.append(decode(etree.tostring(sub_element), encoding))\n return u''.join(s)\n\ndef _build_link(html, doc_url, url):\n base_url = w3lib.html.get_base_url(html, doc_url)\n return w3lib.url.urljoin_rfc(base_url, url).decode('utf-8')\n\ndef buildFeed(response, feed_config):\n response.selector.remove_namespaces()\n\n selector = response.selector\n tree = selector.root.getroottree()\n # get data from html \n items = []\n #import pdb;pdb.set_trace()\n for node in selector.xpath(feed_config['xpath']):\n item = {}\n required_count = 0\n required_found = 0\n for field_name in ['title', 'description', 'link']:\n if field_name in feed_config['fields']:\n if feed_config['required'][field_name]:\n required_count += 1\n\n extracted = node.xpath(feed_config['fields'][field_name]).extract()\n if extracted:\n item[field_name] = u''.join(extracted)\n if feed_config['required'][field_name]:\n required_found += 1\n if field_name == 'link':\n item['link'] = _build_link(response.body_as_unicode(), feed_config['uri'], item[field_name])\n\n if required_count == required_found:\n items.append(item)\n\n title = selector.xpath('//title/text()').extract()\n\n #build feed\n feed = Rss201rev2Feed(\n title = title[0] if title else 'Polite Pol: ' + feed_config['uri'],\n link=feed_config['uri'],\n description=\"Generated by PolitePol.com.\\n\"+\\\n \"Source page url: \" + feed_config['uri'],\n language=\"en\",\n )\n\n fill_time(feed_config['id'], items)\n\n for item in items:\n title = item['title'] if 'title' in item else ''\n desc = item['description'] if 'description' in item else ''\n time = item['time'] if 'time' in item else datetime.datetime.utcnow()\n if 'link' in item:\n link = item['link']\n else:\n link = url_hash_regexp.sub('#' + md5((title+desc).encode('utf-8')).hexdigest(), feed_config['uri'])\n feed.add_item(\n title = title,\n link = link,\n unique_id = link,\n description = desc,\n #enclosure=Enclosure(fields[4], \"32000\", \"image/jpeg\") if 4 in fields else None, #\"Image\"\n pubdate = time\n )\n return feed.writeString('utf-8')\n\ndef getFeedData(request, feed_id):\n # get url, xpathes\n feed = {}\n db = get_conn()\n with db:\n cur = db.cursor()\n cur.execute(\"\"\"select f.uri, f.xpath, fi.name, ff.xpath, fi.required from frontend_feed f\n right join frontend_feedfield ff on ff.feed_id=f.id\n left join frontend_field fi on fi.id=ff.field_id\n where f.id=%s\"\"\", (feed_id,))\n rows = cur.fetchall()\n\n for row in rows:\n if not feed:\n feed['id'] = feed_id\n feed['uri'] = row[0]\n feed['xpath'] = row[1]\n feed['fields'] = {}\n feed['required'] = {}\n feed['fields'][row[2]] = row[3]\n feed['required'][row[2]] = row[4]\n\n if feed:\n return [feed['uri'], feed]\n else:\n return 'Feed generator error: config of feed is empty'\n\ndef get_conn():\n creds = DATABASES['default']\n db = MySQLdb.connect(host=creds['HOST'], port=int(creds['PORT']), user=creds['USER'], passwd=creds['PASSWORD'], db=creds['NAME'], init_command='SET NAMES utf8mb4')\n db.autocommit(True)\n return db\n","sub_path":"feed.py","file_name":"feed.py","file_ext":"py","file_size_in_byte":6348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"11880385","text":"#!env/bin/python\n# -*- coding: utf-8 -*-\n\nimport telepot\nimport time\nimport ConfigParser\nfrom cobe.brain import Brain\n\nconfig = ConfigParser.ConfigParser()\nconfig.read(\"bot.cfg\")\n\ndef handle(msg):\n content_type, chat_type, chat_id = telepot.glance(msg)\n if content_type == 'text':\n brain = Brain(config.get('Brain', 'path') + str(chat_id) + \".brain\")\n brain.learn(msg['text'])\n if 'reply_to_message' in msg and msg['reply_to_message']['from']['username'] == \"Braulio_bot\":\n bot.sendMessage(chat_id,brain.reply(msg['text']),reply_to_message_id=msg['message_id'])\n elif 'braulio' in msg['text'].lower():\n bot.sendMessage(chat_id,brain.reply(msg['text']).replace(\"Braulio\",msg['from']['first_name']))\n\ntoken = config.get('General', 'token')\nbot = telepot.Bot(token)\nbot.message_loop(handle)\n\nprint('Listening')\n\nwhile 1:\n time.sleep(10)\n\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"279095017","text":"import os\nimport dnacauldron as dc\n\nthis_directory = os.path.dirname(os.path.realpath(__file__))\nparts_and_oligos_folder = os.path.join(this_directory, \"parts_and_oligos\")\nassembly_plan_path = os.path.join(this_directory, \"assembly_plan.csv\")\nflawed_plan_path = os.path.join(this_directory, \"assembly_plan_flawed.csv\")\n\n\ndef test_BASIC_assembly():\n repo = dc.SequenceRepository()\n repo.import_records(folder=parts_and_oligos_folder)\n plan = dc.AssemblyPlan.from_spreadsheet(\n path=assembly_plan_path, assembly_class=\"from_spreadsheet\"\n )\n simulation = plan.simulate(repo)\n stats = simulation.compute_stats()\n assert stats[\"valid_assemblies\"] == 10\n assert stats[\"errored_assemblies\"] == 0\n\n\ndef test_BASIC_assembly_flawed():\n repo = dc.SequenceRepository()\n repo.import_records(folder=parts_and_oligos_folder)\n plan = dc.AssemblyPlan.from_spreadsheet(\n path=flawed_plan_path, assembly_class=\"from_spreadsheet\"\n )\n simulation = plan.simulate(repo)\n stats = simulation.compute_stats()\n assert stats[\"errored_assemblies\"] == 3\n assert stats[\"cancelled_assemblies\"] == 1\n assert stats[\"valid_assemblies\"] == 8\n simulation.write_report(\"@memory\")\n","sub_path":"tests/test_BASIC_assembly.py/test_BASIC_assembly.py","file_name":"test_BASIC_assembly.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"505224058","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n\t# Paramètres et sauvegarde des figures\ndef createPlot(axis = False, xScaling = False, yScaling = False, xLabel = False, yLabel = False,\n\t\t\t originLines = False, grid = False, invert = False, legend = False, title = False, file = False):\n\tif axis: plt.axis(axis)\n\tif xScaling: plt.xscale(xScaling)\n\tif yScaling: plt.yscale(yScaling)\n\tif xLabel: plt.xlabel(xLabel)\n\tif yLabel: plt.ylabel(yLabel)\n\tif originLines:\n\t\tplt.axhline(y = 0, color = \"k\")\n\t\tplt.axvline(x = 0, color = \"k\")\n\tif grid:\n\t\tplt.minorticks_on()\n\t\tplt.grid(True, \"minor\", color = \"grey\", linestyle = \"--\", linewidth = 0.25)\n\t\tplt.grid(True, \"major\", color = \"black\", linestyle = \"--\", linewidth = 0.5)\n\tif invert:\n\t\tif ((invert == \"xAxis\") or (invert == \"both\")):\n\t\t\tplt.gca().invert_xaxis()\n\t\tif ((invert == \"yAxis\") or (invert == \"both\")):\n\t\t\tplt.gca().invert_yaxis()\n\tif legend: plt.legend()\n\tif title: plt.title(title)\n\tif file: plt.savefig(file)\n\ndef neutronInteraction(absorptionProb, scatterProb, lam, plateDepth, neutronAmount):\n\tsumReflected, sumAbsorbed, sumTransmitted = 0, 0, 0\n\n\tfor i in range(0, neutronAmount):\n\t\ttheta, x = 0.0, 0.0\n\n\t\twhile True:\n\t\t\tr = np.random.random()\n\t\t\tl = -lam*np.log(r)\n\t\t\tx += l*np.cos(theta)\n\n\t\t\t\t# Si (x < 0), le neutron est réfléchi\n\t\t\tif (x < 0):\n\t\t\t\tsumReflected += 1\n\t\t\t\tbreak\n\t\t\t\t# Si (x > d), le neutron a traversé la plaque\n\t\t\telif (x > plateDepth):\n\t\t\t\tsumTransmitted += 1\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tr = np.random.random()\n\t\t\t\t\t# Si (r < pA), le neutron est absorbé\n\t\t\t\tif (r < absorptionProb):\n\t\t\t\t\tsumAbsorbed += 1\n\t\t\t\t\tbreak\n\t\t\t\t\t# Si (pA <= r < (pA + pS)), le neutron est dispersé dans la plaque, mais n'est pas absorbé\n\t\t\t\telif (r < (absorptionProb + scatterProb)):\n\t\t\t\t\ttheta = np.arccos(1 - 2*r)\n\t\t\t\t\tcontinue\n\t\t\t\t\t# Si (r >= (pA + pS)), le neutron est toujours dans la plaque, mais n'est ni absorbé, ni dispersé\n\t\t\t\telse:\n\t\t\t\t\tcontinue\n\n\t\t# Taux de neutrons réfléchis/absorbés/transmis\n\treflectionRate = sumReflected/neutronAmount\n\tabsorptionRate = sumAbsorbed/neutronAmount\n\ttransmissionRate = sumTransmitted/neutronAmount\n\n\t\t# Incertitudes sur les taux neutrons réfléchis/absorbés/transmis\n\tsigmaReflectionRate = rateUncertainty(reflectionRate, neutronAmount)\n\tsigmaAbsorptionRate = rateUncertainty(absorptionRate, neutronAmount)\n\tsigmaTransmissionRate = rateUncertainty(transmissionRate, neutronAmount)\n\n\t\t# Incertitudes sur la somme des neutrons réfléchis/absorbés/transmis\n\tsigmaReflectionSum = sumUncertainty(sumReflected, neutronAmount)\n\tsigmaAbsorptionSum = sumUncertainty(sumAbsorbed, neutronAmount)\n\tsigmaTransmissionSum = sumUncertainty(sumTransmitted, neutronAmount)\n\n\trates = [reflectionRate, absorptionRate, transmissionRate]\n\trateSigmas = [sigmaReflectionRate, sigmaAbsorptionRate, sigmaTransmissionRate]\n\tsumSigmas = [sigmaReflectionSum, sigmaAbsorptionSum, sigmaTransmissionSum]\n\n\treturn rates, rateSigmas, sumSigmas\n\n\t# Formules d'incertitudes\nrateUncertainty = lambda X, N : np.sqrt(X*(1 - X)/(N - 1))\nsumUncertainty = lambda X, N : np.sqrt(X*(N - X)/(N - 1))\n\n\t# Initialisations des valeurs\npA = 0.1\ndeltaPS = 0.1\npS = np.arange(0, (1 - pA) + deltaPS, deltaPS)\nlam = 0.4\nd = 10\nneutronAmount = 100000\nsimulationRounds = pS.size\n\n\t# Initialisations des tableaux\nR, A, T = np.zeros(simulationRounds), np.zeros(simulationRounds), np.zeros(simulationRounds)\nsigR, sigA, sigT = np.zeros(simulationRounds), np.zeros(simulationRounds), np.zeros(simulationRounds)\n\n\t# Boucle de simulations\nfor i in range(0, simulationRounds):\n\trates, rateSigmas, sumSigmas = neutronInteraction(pA, pS[i], lam, d, neutronAmount)\n\n\tR[i] = rates[0]\n\tA[i] = rates[1]\n\tT[i] = rates[2]\n\n\tsigR[i] = rateSigmas[0]\n\tsigA[i] = rateSigmas[1]\n\tsigT[i] = rateSigmas[2]\n\nsigR, sigA, sigT = np.max(sigR), np.max(sigA), np.max(sigT)\n\n\t# Dimensions de la figure (width, height)\nfigsize = (12, 8)\n\t# Création de la 1re figure et des axes\nfig, axes = plt.subplots(1, 1, figsize = figsize)\n\naxes.plot(pS, R, color = \"r\", label = f\"Réflectance selon pS, ±{sigR:.4f}\")\naxes.plot(pS, A, color = \"g\", label = f\"Absorbance selon pS, ±{sigA:.4f}\")\naxes.plot(pS, T, color = \"b\", label = f\"Transmission selon pS, ±{sigT:.4f}\")\n\n\ncreatePlot(xLabel = \"pS\", yLabel = \"Coefficients\", originLines = True, grid = True, legend = True,\n\t\t title = \"Interactions des neutrons sur un matériaux selon sa propriété de dispersion\\n\"\n\t\t\t\t f\"avec pA = {pA}, λ = {lam} et d = {d}\", file = \"graphique4.png\")\nplt.show()\n","sub_path":"source/tp/6/question4.py","file_name":"question4.py","file_ext":"py","file_size_in_byte":4475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"7388133","text":"import collections\nimport copy\nimport os\nimport os.path as osp\nimport time\n\nimport chainer\nimport fcn\nimport numpy as np\nimport pandas\nimport skimage.io\nimport skimage.util\nimport tqdm\n\nimport grasp_fusion_lib\n\n\nclass FCNTrainer(object):\n\n \"\"\"Training class for FCN models.\n\n Parameters\n ----------\n device: int\n GPU id, negative values represents use of CPU.\n model: chainer.Chain\n NN model.\n optimizer: chainer.Optimizer\n Optimizer.\n iter_train: chainer.Iterator\n Dataset itarator for training dataset.\n iter_valid: chainer.Iterator\n Dataset itarator for validation dataset.\n out: str\n Log output directory.\n max_iter: int\n Max iteration to stop training iterations.\n interval_validate: None or int\n If None, validation is never run. (default: 4000)\n \"\"\"\n\n def __init__(\n self,\n device,\n model,\n optimizer,\n iter_train,\n iter_valid,\n out,\n max_iter,\n interval_validate=4000,\n interval_save=None,\n ):\n self.device = device\n self.model = model\n self.optimizer = optimizer\n self.iter_train = iter_train\n self.iter_valid = iter_valid\n self.out = out\n self.epoch = 0\n self.iteration = 0\n self.max_iter = max_iter\n self.interval_validate = interval_validate\n self.interval_save = interval_save\n self.log_headers = [\n 'epoch',\n 'iteration',\n 'elapsed_time',\n 'train/loss',\n 'train/lbl_cls/loss',\n 'train/lbl_cls/acc_cls',\n 'train/lbl_cls/mean_iu',\n 'train/lbl_suc/loss',\n 'train/lbl_suc/acc_cls',\n 'train/lbl_suc/mean_iu',\n 'valid/loss',\n 'valid/lbl_cls/loss',\n 'valid/lbl_cls/acc_cls',\n 'valid/lbl_cls/mean_iu',\n 'valid/lbl_suc/loss',\n 'valid/lbl_suc/acc_cls',\n 'valid/lbl_suc/mean_iu',\n ]\n if not osp.exists(self.out):\n os.makedirs(self.out)\n with open(osp.join(self.out, 'log.csv'), 'w') as f:\n f.write(','.join(self.log_headers) + '\\n')\n\n def validate(self, n_viz=9):\n \"\"\"Validate current model using validation dataset.\n\n Parameters\n ----------\n n_viz: int\n Number fo visualization.\n\n Returns\n -------\n log: dict\n Log values.\n \"\"\"\n iter_valid = copy.copy(self.iter_valid)\n losses = []\n lbl_cls_trues, lbl_cls_preds = [], []\n lbl_suc_trues, lbl_suc_preds = [], []\n vizs = []\n dataset = iter_valid.dataset\n desc = 'valid [iteration=%08d]' % self.iteration\n for batch in tqdm.tqdm(iter_valid, desc=desc, total=len(dataset),\n ncols=80, leave=False):\n img, lbl_cls_true, lbl_suc_true = zip(*batch)\n batch = map(fcn.datasets.transform_lsvrc2012_vgg16, batch)\n with chainer.no_backprop_mode(), \\\n chainer.using_config('train', False):\n in_vars = fcn.utils.batch_to_vars(batch, device=self.device)\n loss_cls, loss_suc = self.model(*in_vars)\n loss = loss_cls + 100 * loss_suc\n losses.append({'__sum__': float(loss.data),\n 'lbl_cls': float(loss_cls.data),\n 'lbl_suc': float(loss_suc.data)})\n score_cls = self.model.score_cls\n lbl_cls_pred = chainer.functions.argmax(score_cls, axis=1)\n lbl_cls_pred = chainer.cuda.to_cpu(lbl_cls_pred.data)\n score_suc = self.model.score_suc\n lbl_suc_pred = chainer.functions.argmax(score_suc, axis=1)\n lbl_suc_pred = chainer.cuda.to_cpu(lbl_suc_pred.data)\n hmp_suc_pred = chainer.functions.softmax(score_suc)\n hmp_suc_pred = chainer.cuda.to_cpu(hmp_suc_pred.data)[:, 1, :, :]\n for im, lct, lcp, lst, lsp, hsp in \\\n zip(img, lbl_cls_true, lbl_cls_pred,\n lbl_suc_true, lbl_suc_pred, hmp_suc_pred):\n lbl_cls_trues.append(lct)\n lbl_cls_preds.append(lcp)\n lbl_suc_trues.append(lst)\n lbl_suc_preds.append(lsp)\n if len(vizs) < n_viz:\n viz_cls = fcn.utils.visualize_segmentation(\n lbl_pred=lcp, lbl_true=lct,\n img=im, n_class=self.model.n_class)\n viz_suc = fcn.utils.visualize_segmentation(\n lbl_pred=lsp, lbl_true=lst,\n img=im, n_class=self.model.n_class)\n hst = np.zeros_like(hsp)\n hst.fill(0.5)\n hst[lst == 1] = 1\n viz_suc = np.hstack([\n viz_suc,\n np.vstack([\n grasp_fusion_lib.image.colorize_heatmap(hst),\n grasp_fusion_lib.image.colorize_heatmap(hsp),\n ]),\n ])\n viz = np.hstack((viz_cls, viz_suc))\n vizs.append(viz)\n # save visualization\n out_viz = osp.join(self.out, 'visualizations_valid',\n 'iter%08d.jpg' % self.iteration)\n if not osp.exists(osp.dirname(out_viz)):\n os.makedirs(osp.dirname(out_viz))\n viz = fcn.utils.get_tile_image(vizs)\n skimage.io.imsave(out_viz, viz)\n # generate log\n acc_lbl_cls = fcn.utils.label_accuracy_score(\n lbl_cls_trues, lbl_cls_preds, self.model.n_class)\n acc_lbl_suc = fcn.utils.label_accuracy_score(\n lbl_suc_trues, lbl_suc_preds, 2)\n loss = pandas.DataFrame(losses).mean()\n log = {\n 'valid/loss': loss['__sum__'],\n 'valid/lbl_cls/loss': loss['lbl_cls'],\n 'valid/lbl_cls/acc': acc_lbl_cls[0],\n 'valid/lbl_cls/acc_cls': acc_lbl_cls[1],\n 'valid/lbl_cls/mean_iu': acc_lbl_cls[2],\n 'valid/lbl_cls/fwavacc': acc_lbl_cls[3],\n 'valid/lbl_suc/loss': loss['lbl_suc'],\n 'valid/lbl_suc/acc': acc_lbl_suc[0],\n 'valid/lbl_suc/acc_cls': acc_lbl_suc[1],\n 'valid/lbl_suc/mean_iu': acc_lbl_suc[2],\n 'valid/lbl_suc/fwavacc': acc_lbl_suc[3],\n }\n # finalize\n return log\n\n def save_model(self):\n out_model_dir = osp.join(self.out, 'models')\n if not osp.exists(out_model_dir):\n os.makedirs(out_model_dir)\n out_model = osp.join(\n out_model_dir, '%s_iter%08d.npz' %\n (self.model.__class__.__name__, self.iteration))\n chainer.serializers.save_npz(out_model, self.model)\n\n def train(self):\n \"\"\"Train the network using the training dataset.\n\n Parameters\n ----------\n None\n\n Returns\n -------\n None\n \"\"\"\n stamp_start = time.time()\n for iteration, batch in tqdm.tqdm(enumerate(self.iter_train),\n desc='train', total=self.max_iter,\n ncols=80):\n self.epoch = self.iter_train.epoch\n self.iteration = iteration\n\n ############\n # validate #\n ############\n\n if self.interval_validate and \\\n self.iteration % self.interval_validate == 0:\n log = collections.defaultdict(str)\n log_valid = self.validate()\n log.update(log_valid)\n log['epoch'] = self.iter_train.epoch\n log['iteration'] = iteration\n log['elapsed_time'] = time.time() - stamp_start\n with open(osp.join(self.out, 'log.csv'), 'a') as f:\n f.write(','.join(str(log[h]) for h in self.log_headers) +\n '\\n')\n self.save_model()\n\n #########\n # train #\n #########\n\n batch = map(fcn.datasets.transform_lsvrc2012_vgg16, batch)\n in_vars = fcn.utils.batch_to_vars(batch, device=self.device)\n self.model.zerograds()\n loss_cls, loss_suc = self.model(*in_vars)\n loss = loss_cls + 100 * loss_suc\n\n if loss_cls is not None and loss_suc is not None:\n loss.backward()\n self.optimizer.update()\n\n _, lbl_cls_true, lbl_suc_true = zip(*batch)\n lbl_cls_pred = chainer.functions.argmax(\n self.model.score_cls, axis=1)\n lbl_cls_pred = chainer.cuda.to_cpu(lbl_cls_pred.data)\n lbl_suc_pred = chainer.functions.argmax(\n self.model.score_suc, axis=1)\n lbl_suc_pred = chainer.cuda.to_cpu(lbl_suc_pred.data)\n acc_lbl_cls = fcn.utils.label_accuracy_score(\n lbl_cls_true, lbl_cls_pred, self.model.n_class)\n acc_lbl_suc = fcn.utils.label_accuracy_score(\n lbl_suc_true, lbl_suc_pred, 2)\n log = collections.defaultdict(str)\n log_train = {\n 'train/loss': float(loss.data),\n 'train/lbl_cls/loss': float(loss_cls.data),\n 'train/lbl_cls/acc': acc_lbl_cls[0],\n 'train/lbl_cls/acc_cls': acc_lbl_cls[1],\n 'train/lbl_cls/mean_iu': acc_lbl_cls[2],\n 'train/lbl_cls/fwavacc': acc_lbl_cls[3],\n 'train/lbl_suc/loss': float(loss_suc.data),\n 'train/lbl_suc/acc': acc_lbl_suc[0],\n 'train/lbl_suc/acc_cls': acc_lbl_suc[1],\n 'train/lbl_suc/mean_iu': acc_lbl_suc[2],\n 'train/lbl_suc/fwavacc': acc_lbl_suc[3],\n }\n log['epoch'] = self.iter_train.epoch\n log['iteration'] = iteration\n log['elapsed_time'] = time.time() - stamp_start\n log.update(log_train)\n with open(osp.join(self.out, 'log.csv'), 'a') as f:\n f.write(','.join(str(log[h]) for h in self.log_headers) +\n '\\n')\n\n if self.interval_save and \\\n self.iteration != 0 and \\\n self.iteration % self.interval_save == 0:\n self.save_model()\n\n if iteration >= self.max_iter:\n self.save_model()\n break\n","sub_path":"demos/grasp_prediction_arc2017/grasp_prediction_arc2017_lib/contrib/grasp_prediction_arc2017/trainers/fcn_trainer.py","file_name":"fcn_trainer.py","file_ext":"py","file_size_in_byte":10656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"363858038","text":"import os;\r\nfrom finviz.screener import Screener\r\n\r\ndef get_screener_name():\r\n return 'greenPlus15'\r\n\r\ndef dump_to_csv(dirName):\r\n\r\n if not os.path.exists(dirName):\r\n os.makedirs(dirName)\r\n\r\n\t#http://freetraderchat.com/blog/top-5-finviz-scans/\r\n\t#https://finviz.com/screener.ashx?v=111&f=sh_curvol_o200,sh_price_u10,ta_change_u,ta_changeopen_u,ta_perf_dup,ta_perf2_d15o&ft=4\r\n\r\n filters = ['sh_curvol_o200','sh_price_u10','ta_change_u','ta_changeopen_u','ta_perf_dup','ta_perf2_d15o'] # Shows companies in NASDAQ which are in the S&P500\r\n stock_list = Screener(filters=filters, order='ticker') \r\n\r\n\r\n print((stock_list))\r\n stock_list.to_csv(dirName,get_screener_name())\r\n\r\n#dump_to_csv('screenerOutput')","sub_path":"screeners/greenPlus15.py","file_name":"greenPlus15.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"203869429","text":"import os\nimport numpy as np\nfrom bert4keras.backend import keras, search_layer, K\nfrom bert4keras.tokenizers import Tokenizer\nfrom bert4keras.models import build_transformer_model\nfrom bert4keras.optimizers import Adam\nfrom bert4keras.snippets import sequence_padding, DataGenerator\nfrom keras.layers import Lambda, Dense\nfrom keras.utils import to_categorical\n\nnum_classes = 3\nmaxlen = 128\nbatch_size = 32\n\n# BERT base\nconfig_path = './chinese_L-12_H-768_A-12/bert_config.json'\ncheckpoint_path = './chinese_L-12_H-768_A-12/bert_model.ckpt'\ndict_path = './chinese_L-12_H-768_A-12/vocab.txt'\n\ntexts = []\nlabels_index = {u'健康': 0, u'教育': 1, u'财经': 2}\nlabels = []\nTEXT_PATH = '../preprocessing'\nfor name in os.listdir(TEXT_PATH):\n if name.split('.')[-1] == 'txt':\n class_name = name.split('.')[0]\n fpath = os.path.join(TEXT_PATH, name)\n with open(fpath, encoding='utf-8') as f:\n for l in f.readlines():\n texts.append(l.split(' '))\n labels.append(labels_index[class_name])\n print(fpath)\n print(class_name)\n\nfrom sklearn.model_selection import train_test_split\n\nX_train, X_test, y_train, y_test = train_test_split(texts, labels, test_size=0.2, random_state=42)\ntrain_data = [(X_train[i], y_train[i]) for i in range(len(y_train))]\ntest_data = [(X_test[i], y_test[i]) for i in range(len(y_test))]\n\n# 建立分词器\ntokenizer = Tokenizer(dict_path, do_lower_case=True)\n\n\nclass data_generator(DataGenerator):\n \"\"\"数据生成器\"\"\"\n\n def __iter__(self, random=False):\n batch_token_ids, batch_segment_ids, batch_labels = [], [], []\n for is_end, (text, label) in self.sample(random):\n token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)\n batch_token_ids.append(token_ids)\n batch_segment_ids.append(segment_ids)\n batch_labels.append([label])\n if len(batch_token_ids) == self.batch_size or is_end:\n batch_token_ids = sequence_padding(batch_token_ids)\n batch_segment_ids = sequence_padding(batch_segment_ids)\n batch_labels = sequence_padding(batch_labels)\n yield [batch_token_ids, batch_segment_ids], batch_labels\n batch_token_ids, batch_segment_ids, batch_labels = [], [], []\n\n\n# 转换数据集\ntrain_generator = data_generator(train_data, batch_size)\ntest_generator = data_generator(test_data, batch_size)\n\nfrom keras.callbacks import TensorBoard, EarlyStopping, ModelCheckpoint\nfrom keras import backend as K\n\n\ndef evaluate(data):\n total, right, true_positives, possible_positives, predicted_positives = 0., 0., 0., 0., 0.\n for x_true, y_true in data:\n y_pred = model.predict(x_true).argmax(axis=1)\n y_true = y_true[:, 0]\n right += (y_true == y_pred).sum()\n true_positives += K.sum(K.round(K.clip(y_true * y_pred, 0, 1))).numpy()\n possible_positives += K.sum(K.round(K.clip(y_true, 0, 1))).numpy()\n predicted_positives += K.sum(K.round(K.clip(y_pred, 0, 1))).numpy()\n total += len(y_true)\n accuracy = right / total\n recall = true_positives / (possible_positives + K.epsilon())\n precision = true_positives / (predicted_positives + K.epsilon())\n f1_score = 2 * ((precision * recall) / (precision + recall + K.epsilon()))\n return accuracy, recall, precision, f1_score\n\n\nclass Evaluator(keras.callbacks.Callback):\n \"\"\"评估与保存\"\"\"\n def __init__(self):\n self.best_val_acc = 0.\n\n def on_epoch_end(self, epoch, logs=None):\n val_acc, val_recall, val_precision, val_f1 = evaluate(test_generator)\n if val_acc > self.best_val_acc:\n self.best_val_acc = val_acc\n print(u'val_acc: %.5f, best_val_acc: %.5f\\n' % (val_acc, self.best_val_acc))\n print(u'val_recall: %.5f\\n' % val_recall)\n print(u'val_precision: %.5f\\n' % val_precision)\n print(u'val_f1: %.5f\\n' % val_f1)\n\n\n# 加载预训练模型\nbert = build_transformer_model(\n config_path=config_path,\n checkpoint_path=checkpoint_path,\n return_keras_model=False,\n)\n\noutput = Lambda(lambda x: x[:, 0])(bert.model.output)\noutput = Dense(num_classes, activation='softmax', kernel_initializer=bert.initializer)(output)\n\nmodel = keras.models.Model(bert.model.input, output)\nmodel.summary()\n\nmodel.compile(\n loss='sparse_categorical_crossentropy',\n optimizer=Adam(2e-5),\n metrics=['sparse_categorical_accuracy'],\n)\n\nevaluator = Evaluator()\n\nmy_callbacks = [\n EarlyStopping(patience=5),\n TensorBoard(log_dir='./logs'),\n evaluator,\n ModelCheckpoint('BERT.h5', monitor='val_acc', save_best_only=True, mode='auto'),\n]\n\nmodel.fit(\n train_generator.forfit(),\n steps_per_epoch=len(train_generator),\n epochs=50,\n callbacks=my_callbacks\n)\n","sub_path":"method/BERT.py","file_name":"BERT.py","file_ext":"py","file_size_in_byte":4791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"406895074","text":"\"\"\"empty message\n\nRevision ID: bd668186fe19\nRevises: 465745427642\nCreate Date: 2018-11-10 21:21:15.501980\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\n# revision identifiers, used by Alembic.\nrevision = 'bd668186fe19'\ndown_revision = '465745427642'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('users', sa.Column('bread_count', sa.Integer(), nullable=True))\n op.add_column('users', sa.Column('fish_count', sa.Integer(), nullable=True))\n op.add_column('users', sa.Column('streak', sa.Integer(), nullable=True))\n op.drop_column('users', 'num_of_neg_recipes')\n op.drop_column('users', 'num_of_pos_recipes')\n op.drop_column('users', 'is_fitted')\n op.drop_column('users', 'eval_dish_ids')\n op.drop_column('users', 'num_of_pos_dishes')\n op.drop_column('users', 'y')\n op.drop_column('users', 'X')\n op.drop_column('users', 'prev_dish')\n op.drop_column('users', 'clf')\n op.drop_column('users', 'num_of_dishes')\n op.drop_column('users', 'num_of_neg_dishes')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('users', sa.Column('num_of_neg_dishes', sa.INTEGER(), autoincrement=False, nullable=True))\n op.add_column('users', sa.Column('num_of_dishes', sa.INTEGER(), autoincrement=False, nullable=True))\n op.add_column('users', sa.Column('clf', postgresql.BYTEA(), autoincrement=False, nullable=True))\n op.add_column('users', sa.Column('prev_dish', postgresql.BYTEA(), autoincrement=False, nullable=True))\n op.add_column('users', sa.Column('X', postgresql.BYTEA(), autoincrement=False, nullable=True))\n op.add_column('users', sa.Column('y', postgresql.BYTEA(), autoincrement=False, nullable=True))\n op.add_column('users', sa.Column('num_of_pos_dishes', sa.INTEGER(), autoincrement=False, nullable=True))\n op.add_column('users', sa.Column('eval_dish_ids', postgresql.BYTEA(), autoincrement=False, nullable=True))\n op.add_column('users', sa.Column('is_fitted', sa.INTEGER(), autoincrement=False, nullable=True))\n op.add_column('users', sa.Column('num_of_pos_recipes', sa.INTEGER(), autoincrement=False, nullable=True))\n op.add_column('users', sa.Column('num_of_neg_recipes', sa.INTEGER(), autoincrement=False, nullable=True))\n op.drop_column('users', 'streak')\n op.drop_column('users', 'fish_count')\n op.drop_column('users', 'bread_count')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/bd668186fe19_.py","file_name":"bd668186fe19_.py","file_ext":"py","file_size_in_byte":2572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"366184238","text":"'''\nExtractors that operate primarily or exclusively on Text stimuli.\n'''\n\nfrom featurex.stimuli.text import TextStim, ComplexTextStim\nfrom featurex.extractors import Extractor\nfrom featurex.support.exceptions import FeatureXError\nfrom featurex.support.decorators import requires_nltk_corpus\nfrom featurex.datasets.text import fetch_dictionary, datasets\nimport numpy as np\nfrom featurex.core import Value, Event\nimport pandas as pd\nfrom six import string_types\nfrom collections import defaultdict\n\n# Optional dependencies\ntry:\n import nltk\nexcept ImportError:\n pass\n\nclass TextExtractor(Extractor):\n\n ''' Base Text Extractor class; all subclasses can only be applied to text.\n '''\n target = TextStim\n\n\nclass ComplexTextExtractor(Extractor):\n\n ''' Base ComplexTextStim Extractor class; all subclasses can only be\n applied to ComplexTextStim instance.\n '''\n target = ComplexTextStim\n\n\nclass DictionaryExtractor(TextExtractor):\n\n ''' A generic dictionary-based extractor that supports extraction of\n arbitrary features contained in a lookup table.\n Args:\n dictionary (str, DataFrame): The dictionary containing the feature\n values. Either a string giving the path to the dictionary file,\n or a pandas DF. Format must be tab-delimited, with the first column\n containing the text key used for lookup. Subsequent columns each\n represent a single feature that can be used in extraction.\n variables (list): Optional subset of columns to keep from the\n dictionary.\n missing: Value to insert if no lookup value is found for a text token.\n Defaults to numpy's NaN.\n '''\n\n def __init__(self, dictionary, variables=None, missing=np.nan):\n if isinstance(dictionary, string_types):\n dictionary = pd.read_csv(dictionary, sep='\\t', index_col=0)\n self.data = dictionary\n self.variables = variables\n if variables is not None:\n self.data = self.data[variables]\n # Set up response when key is missing\n self.missing = missing\n super(DictionaryExtractor, self).__init__()\n\n def _extract(self, stim):\n if stim.text not in self.data.index:\n vals = pd.Series(self.missing, self.variables)\n else:\n vals = self.data.loc[stim.text].fillna(self.missing)\n return Value(stim, self, vals.to_dict())\n\n\nclass PredefinedDictionaryExtractor(DictionaryExtractor):\n\n def __init__(self, variables, missing=np.nan, case_sensitive=True):\n\n if isinstance(variables, (list, tuple)):\n _vars = {}\n for v in variables:\n v = v.split('/')\n if v[0] not in _vars:\n _vars[v[0]] = []\n if len(v) == 2:\n _vars[v[0]].append(v[1])\n variables = _vars\n\n dicts = []\n for k, v in variables.items():\n d = fetch_dictionary(k)\n if not case_sensitive:\n d.index = d.index.str.lower()\n if v:\n d = d[v]\n d.columns = ['%s_%s' % (k, c) for c in d.columns]\n dicts.append(d)\n\n dictionary = pd.concat(dicts, axis=1, join='outer')\n super(PredefinedDictionaryExtractor, self).__init__(dictionary, missing=missing)\n\n\nclass LengthExtractor(TextExtractor):\n\n ''' Extracts the length of the text in characters. '''\n\n def _extract(self, stim):\n return Value(stim, self, {'text_length': len(stim.text)})\n\n\nclass NumUniqueWordsExtractor(TextExtractor):\n\n ''' Extracts the number of unique words used in the text. '''\n\n @requires_nltk_corpus\n def _extract(self, stim, tokenizer=None):\n text = stim.text\n if tokenizer is None:\n try:\n import nltk\n return len(nltk.word_tokenize(text))\n except:\n return len(text.split())\n else:\n return Value(stim, self,\n {'num_unique_words': tokenizer.tokenize(text)})\n\n\nclass PartOfSpeechExtractor(ComplexTextExtractor):\n\n ''' Tags parts of speech in text with nltk. '''\n\n @requires_nltk_corpus\n def _extract(self, stim):\n words = [w.text for w in stim]\n pos = nltk.pos_tag(words)\n if len(words) != len(pos):\n raise FeatureXError(\n \"The number of words in the ComplexTextStim does not match \"\n \"the number of tagged words returned by nltk's part-of-speech\"\n \" tagger.\")\n events = []\n for i, w in enumerate(stim):\n value = Value(stim, self, {'part_of_speech': pos[i][1]})\n event = Event(onset=w.onset, duration=w.duration, values=[value])\n events.append(event)\n return events\n\n\n# class BasicStatsExtractorCollection(TransformerCollection):\n\n# ''' A collection of basic text statistics. Just a prototype; needs work.\n# '''\n\n# target = TextStim\n\n# def __init__(self, statistics=None):\n\n# all_stats = {'lengthextractor', 'numuniquewordsextractor'}\n# if statistics is not None:\n# statistics = set([s.lower() for s in statistics]) & all_stats\n# else:\n# statistics = all_stats\n\n# super(BasicStatsExtractorCollection, self).__init__(statistics)\n","sub_path":"featurex/extractors/text.py","file_name":"text.py","file_ext":"py","file_size_in_byte":5328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"124045752","text":"# http://www.reddit.com/r/dailyprogrammer/comments/1rdtky/111113_challenge_142_easy_falling_sand/\nsandInput = \"\"\"5\n.....\n # \n# \n \n .\"\"\"\n\nimport re\ndef fall(sand):\n columns = zip(*reversed(sand.split('\\n')[1:]))\n columns = [\n re.subn('( +)(\\.+)', r'\\2\\1', ''.join(x))[0]\n for x in columns]\n print('\\n'.join(reversed([''.join(x) for x in zip(*columns)])))\n","sub_path":"python/sand.py","file_name":"sand.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"584237124","text":"import numpy as np\n\n\n# Select elements at (1,0), (0,1), (1,2) and (0,0)\nprint(my_2d_array[[1, 0, 1, 0],[0, 1, 2, 0]])\n\n# Select a subset of the rows and columns\nprint(my_2d_array[[1, 0, 1, 0]][:,[0,1,2,0]])\n\n# Look up info on `mean` with `np.lookfor()`\n#Use lookfor() to do a keyword search on docstrings.\nprint(np.lookfor(\"mean\"))\n\n# Get info on data types with `np.info()`\nnp.info(np.ndarray.dtype)\n\n#you switch around the shape of the array\n# Print `my_2d_array`\nprint(my_2d_array)\n\n# Transpose `my_2d_array`\nprint(np.transpose(my_2d_array))\n\n# Or use `T` to transpose `my_2d_array`\nprint(my_2d_array.T)\n\n\n\"\"\"\n\nThe number of dimensions needs to be the same if you want to concatenate two arrays with np.concatenate(). \nAs such, if you want to concatenate an array with my_array, which is 1-D, you’ll need to make sure that the\n second array that you have, is also 1-D.\nWith np.vstack(), you effortlessly combine my_array with my_2d_array. You just have to make sure that, as you’re \nstacking the arrays row-wise, that the number of columns in both arrays is the same. As such, you could also add\n an array with shape (2,4) or (3,4) to my_2d_array, \nas long as the number of columns matches. Stated differently, the arrays must have the same shape along all but \nthe first axis. The same holds also for when you want to use np.r[].\nFor np.hstack(), you have to make sure that the number of dimensions is the same and that the number of rows in\n both arrays is the same. That means that you could stack arrays such as (2,3) or (2,4) to my_2d_array, which \n itself as a shape of (2,4). Anything is possible as long as you make sure that the number of rows matches.\n This function is still supported by NumPy, but you should prefer np.concatenate() or np.stack().\n\"\"\"\n# Concatentate `my_array` and `x`\nprint(np.concatenate((my_array,x)))\n\n# Stack arrays row-wise\nprint(np.vstack((my_array, my_2d_array)))\n\n# Stack arrays row-wise\nprint(np.r_[my_resized_array, my_2d_array])\n\n# Stack arrays horizontally\nprint(np.hstack((my_resized_array, my_2d_array)))\n\n# Stack arrays column-wise\nprint(np.column_stack((my_resized_array, my_2d_array)))\n\n# Stack arrays column-wise\nprint(np.c_[my_resized_array, my_2d_array])\n\n\n\n\n# Transposing Array\ni = np.transpose(b) #Permute array dimensions\ni.T #Permute array dimensions\n\n# Changing Array Shape\n\nb.ravel() #Flatten the array\ng.reshape(3,-2) #Reshape, but don’t change data\n\n# Adding/Removing Elements\n\nh.resize((2,6)) #Return a new array with shape (2,6)\nnp.append(h,g) #Append items to an array\nnp.insert(a, 1, 5) #Insert items in an array\nnp.delete(a,[1]) #Delete items from an array\n\n# Combining Arrays\nnp.concatenate((a,d),axis=0) #Concatenate arrays\n\nnp.vstack((a,b)) #Stack arrays vertically (row-wise)\n\nnp.r_[e,f] #Stack arrays vertically (row-wise)\n\nnp.hstack((e,f)) # Stack arrays horizontally (column-wise)\n\nnp.column_stack((a,d)) #Create stacked column-wise arrays\n\nnp.c_[a,d] #Create stacked column-wise arrays\n\n\n# Splitting Arrays\n\nnp.hsplit(a,3) #Split the array horizontally at the 3rd index\n\nnp.vsplit(c,2) #Split the array vertically at the 2nd index\n","sub_path":"numpyassignments/numpystackfilesandmaipulation.py","file_name":"numpystackfilesandmaipulation.py","file_ext":"py","file_size_in_byte":3113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"342915325","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function, division #for python2 users\nimport rospy\nimport numpy as np\n\nfrom geometry_msgs.msg import Twist, Vector3, Pose, Point, Quaternion\nfrom sensor_msgs.msg import LaserScan\nfrom std_msgs.msg import Header, ColorRGBA\nfrom visualization_msgs.msg import Marker\nimport atexit\nimport emergency_stop\n\n#import create_marker\n\n\"\"\"\nOh christ\n\nCHARLIE: LET GO OF VISUALIZATION, JUST TRY TO GET THE OTHER PARTS WORKING\n\n\n\"\"\"\n\nclass WallFollowing(object):\n def __init__(self):\n rospy.init_node('wall_following')\n # subscribe to laser scans\n rospy.Subscriber('/scan', LaserScan, self.process_scan)\n # publish velocities\n self.pubVel = rospy.Publisher('/cmd_vel', Twist, queue_size=10)\n # publish markers\n self.pubMark = rospy.Publisher('/markers', Marker, queue_size=10)\n # array to hold distances from laser scanner\n self.distances = np.ones(362)\n self.linearVel = 0.0 # start moving\n self.angularVel = 0.0\n self.front_distance = 1.0 # approx directly in front of neato\n self.left_front_distance = 1.0 # approx 315 deg\n self.left_back_distance = 1.0 # approx 225 deg\n self.right_front_distance = 1.0 # approx 315 deg\n self.right_back_distance = 1.0 # approx 225 deg\n\n self.left_front_angle = 360 # QUESTION\n self.left_back_angle = 0 # QUESTION\n # ARROW MARKERS\n self.front_marker = Marker()\n self.left_front_marker = Marker()\n self.left_back_marker = Marker()\n # Emergency stops\n self.estop = emergency_stop.EmergencyStopNode()\n atexit.register(self.exit_handler)\n\n def process_scan(self, m):\n for x in range(len(self.distances)-1):\n if m.ranges[x] != 0.0:\n self.distances[x] = m.ranges[x]\n\n def find_max_distance(self, distances, start, end):\n # angles is array, start is first index, end is last index\n section = distances[start:end]\n max_distance = max(section)\n if max_distance > 15:\n max_distance = 0\n return max_distance\n\n def check_obstacle(self):\n if self.front_distance < .5:\n self.angularVel = 0.5 # turns left by default. this may need changing\n self.linearVel = 0.0\n\n def make_horizontal(self):\n if left_back_distance > 0 and left_front_distance > 0:\n self.left_front_angle = np.degrees(np.arctan(self.left_back_distance/self.left_front_distance))\n self.left_back_angle = np.degrees(np.arctan(self.left_front_distance/self.left_back_distance))\n angle_difference = self.left_front_angle-self.left_back_angle\n if self.left_front_distance < .5 or self.left_back_distance < .5:\n # MAY NEED TO CHANGE PROPORTION\n self.angularVel = 0.3*angle_difference # turns left if positive, right if negative\n else:\n self.angularVel = 0.0\n else:\n angularVel = 0.0\n \n\n def visualize_points(self,deg,distance):\n deg = np.radians(deg)\n scale = [.1,.1,.1]\n color = [255,105,180,100]\n point = [distance*np.cos(deg),distance*np.sin(deg),0]\n return scale, color, point\n\n def visualize_arrow(self, deg, distance, id):\n #if distance > 0:\n header = Header(stamp=rospy.Time.now(), frame_id=\"base_link\")\n type = 2\n #color = ColorRGBA(255,105,180,100)\n color = ColorRGBA(1,1,0,1)\n #point = Point((distance*np.cos(deg)),(distance*np.sin(deg)),0)\n x = distance*np.cos(deg)\n y = distance*np.sin(deg)\n point = Point(x,y,0)\n #scale = Vector3(.01,.05,distance) # arrow?\n scale = Vector3(.1,.1,.1) # sphere\n lifetime = rospy.Time(1,0)\n marker = Marker(pose=Pose(position=point), header=header, type=type, color=color, scale=scale, lifetime=lifetime, id=id)\n self.pubMark.publish(marker)\n #return marker\n\n\n def run(self):\n r = rospy.Rate(10)\n while not rospy.is_shutdown():\n # have to do this from 355-360 and 0-5 so it's a little weird:\n self.front_distance = (self.find_max_distance(self.distances, 355, 360)+self.find_max_distance(self.distances,0,5))/2\n self.left_front_distance = self.find_max_distance(self.distances,310,320)\n self.left_back_distance = self.find_max_distance(self.distances,220,230)\n self.right_front_distance = self.find_max_distance(self.distances,40,50)\n self.right_back_distance = self.find_max_distance(self.distances,130,140)\n # check for something in front\n self.check_obstacle()\n # orient appropriately\n #self.make_horizontal()\n # Publish action\n self.pubVel.publish(Twist(linear=Vector3(x=self.linearVel), angular=Vector3(z=self.angularVel)))\n # ARROW MARKERS\n self.front_marker = self.visualize_arrow(0, self.front_distance, 1)\n self.left_front_maker = self.visualize_arrow(315, self.left_front_distance, 2)\n self.left_back_marker = self.visualize_arrow(225, self.left_back_distance, 3)\n self.right_front_marker = self.visualize_arrow(45, self.right_front_distance, 4)\n self.right_back_marker = self.visualize_arrow(135, self.right_back_distance, 5)\n \"\"\"self.pubMark.publish(self.front_marker)\n self.pubMark.publish(self.left_front_marker)\n self.pubMark.publish(self.left_back_marker)\n self.pubMark.publish(self.right_front_marker)\n self.pubMark.publish(self.right_back_marker)\"\"\"\n\n\n # Troubleshooting\n #print(\"front: \" + str(self.front_distance) + \" left front: \" + str(self.left_front_distance) + \" left back: \" + str(self.left_back_distance))\n #print(\"right front: \" + str(self.right_front_distance) + \" right back: \" + str(self.right_back_distance))\n\n\n # THIS COULD BE THE END OF THE CODE\n \"\"\"self.front_marker = self.visualize_arrow(0, self.front_distance)\n self.left_front_distance = self.visualize_arrow(315, self.left_front_distance)\n self.left_back_distance = self.visualize_arrow(225, self.left_back_distance)\n # PUBLISH ARROWS\n self.pubMark.publish(self.front_marker)\n self.pubMark.publish(self.left_front_marker)\n self.pubMark.publish(self.left_back_distance)\"\"\"\n #Bump stop\n #self.estop.run()\n\n def exit_handler(self):\n # emergency exit\n self.linearVel = 0.0\n self.angularVel = 0.0\n print(\"thank you for shopping with wall_following\")\n\nif __name__ == '__main__':\n node = WallFollowing()\n node.run()\n ","sub_path":"warmup_project/scripts/Scrap Code/old_code.py","file_name":"old_code.py","file_ext":"py","file_size_in_byte":6793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"242548806","text":"import socket\n\nHOST = '127.0.0.1'\nPORT = 1234\nLIMIT = 10\n\nwith socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n\t\n\ts.bind((HOST, PORT))\n\ts.listen(LIMIT)\n\twhile True:\n\n\t\tconn, addr = s.accept()\n\t\t#далее работает с этим клиентом, \n\t\t#остальные клиенты ждут в очереди = LIMIT\n\t\t# клиенту LIMIT+1 сервер откажет в соединении\n\t\t\n\t\twith conn:\n\t\t\tprint('Connected by', addr)\n\t\t\twhile True:\n\n\t\t\t\tdata = conn.recv(1024)\n\t\t\t\tprint(data.decode())\n\n\t\t\t\tif not data: break\n\t\t\t\t\n\t\t\t\t#conn.sendall(data) # echo-сервер отправляет те же данные что получил\n\t\t\t\t\n\t\t\t\tprint('Your answer is: ')\n\n\t\t\t\tdata2 = bytes(input(), 'utf-8')\n\t\t\t\tconn.sendall(data2)\n","sub_path":"tcp/tcp_server_127_0_0_1_1234.py","file_name":"tcp_server_127_0_0_1_1234.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"173758340","text":"import os\nfrom datautil.dataloader import load_data\nfrom collections import Counter\nfrom config.Const import *\n\n\ndef build_vocab(path, min_count):\n wd_counter = Counter()\n token_lst = load_data(path)\n for tokens in token_lst:\n wd_counter.update(tokens)\n return WordVocab(wd_counter, min_count)\n\n\nclass WordVocab(object):\n def __init__(self, wd_counter, min_count=5):\n super(WordVocab, self).__init__()\n self.PAD = 0\n self.UNK = 1\n self.BOS = 2\n self.EOS = 3\n\n self._wd2freq = dict(filter(lambda x: x[1] >= min_count, wd_counter.items()))\n # self._wd2freq = dict((wd, freq) for wd, freq in wd_counter.items() if freq >= min_count)\n\n self._wd2idx = {\n PAD: self.PAD,\n UNK: self.UNK,\n BOS: self.BOS,\n EOS: self.EOS\n }\n\n for wd in self._wd2freq.keys():\n if wd not in self._wd2idx:\n self._wd2idx[wd] = len(self._wd2idx)\n\n self._idx2wd = dict((idx, wd) for wd, idx in self._wd2idx.items())\n\n print(f'vocab size: {self.vocab_size}')\n\n def word2index(self, wds):\n if isinstance(wds, list):\n return [self._wd2idx.get(wd, self.UNK) for wd in wds]\n else:\n return self._wd2idx.get(wds, self.UNK)\n\n def index2word(self, idxs):\n if isinstance(idxs, list):\n return [self._idx2wd.get(i) for i in idxs]\n else:\n return self._idx2wd.get(idxs)\n\n @property\n def vocab_size(self):\n return len(self._wd2idx)\n\n # 保存词频表\n def save_freq_vocab(self, path):\n assert os.path.exists(path)\n with open(path, 'a', encoding='utf-8') as fw:\n for wd, freq in self._wd2freq.items():\n fw.write(f'{wd} {freq}')\n fw.write('\\n')\n","sub_path":"vocab/Vocab.py","file_name":"Vocab.py","file_ext":"py","file_size_in_byte":1885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"559481949","text":"# Given a string s of '(' , ')' and lowercase English characters. \n\n# Your task is to remove the minimum number of parentheses ( '(' or ')', in any positions ) so that the resulting parentheses string is valid and return any valid string.\n\n# Formally, a parentheses string is valid if and only if:\n\n# It is the empty string, contains only lowercase characters, or\n# It can be written as AB (A concatenated with B), where A and B are valid strings, or\n# It can be written as (A), where A is a valid string.\n \n\n# Example 1:\n\n# Input: s = \"lee(t(c)o)de)\"\n# Output: \"lee(t(c)o)de\"\n# Explanation: \"lee(t(co)de)\" , \"lee(t(c)ode)\" would also be accepted.\n# Example 2:\n\n# Input: s = \"a)b(c)d\"\n# Output: \"ab(c)d\"\n# Example 3:\n\n# Input: s = \"))((\"\n# Output: \"\"\n# Explanation: An empty string is also valid.\n# Example 4:\n\n# Input: s = \"(a(b(c)d)\"\n# Output: \"a(b(c)d)\"\n \n\n# Constraints:\n\n# 1 <= s.length <= 10^5\n# s[i] is one of '(' , ')' and lowercase English letters.\n\nclass Solution:\n def minRemoveToMakeValid(self, s: str) -> str:\n left = 0\n right = 0\n proper_right = 0\n \n for c in s:\n if c is '(': left += 1\n if c is ')': right += 1\n if c is ')' and proper_right < left: proper_right += 1\n \n \n if proper_right is right and left is right: return s\n \n left = 0\n right = 0\n \n result = ''\n \n for c in s:\n if c is '(' and left < proper_right: \n result += c\n left += 1\n if c is ')' and right < left and right < proper_right: \n result += c\n right += 1\n \n if c is not '(' and c is not ')':\n result += c\n \n return result","sub_path":"january 2021/parenthesisProblem.py","file_name":"parenthesisProblem.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"144727186","text":"import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport os\n\n#from torch.autograd import Variable\n\nfrom tensorflow.examples.tutorials.mnist import input_data\n\n\nmnist = input_data.read_data_sets('../../MNIST_data', one_hot=True)\nmb_size = 16\nz_dim = 100\nX_dim = mnist.train.images.shape[1]\ny_dim = mnist.train.labels.shape[1]\nh_dim = 128\nc = 0\nlr = 1e-3\n\n\ndef plot(samples):\n fig = plt.figure(figsize=(4, 4))\n gs = gridspec.GridSpec(4, 4)\n gs.update(wspace=0.05, hspace=0.05)\n\n for i, sample in enumerate(samples):\n ax = plt.subplot(gs[i])\n plt.axis('off')\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_aspect('equal')\n plt.imshow(sample.reshape(28, 28), cmap='Greys_r')\n\n return fig\n\n#-----------------TMC----------------------------\n\nclass Node:\n def __init__(self,t, parent = None,left_child = None,right_child = None,name = 0):\n self.parent = parent\n self.left_child = left_child\n self.right_child = right_child\n self.t = t\n self.z = None\n self.name = name\n\n def set_t(self,t):\n self.t = t\n def set_z(self,z):\n self.z = z\n \n def set_mean(self,mean):\n self.mean = mean\n def set_var(self,var):\n self.var = var\n\nclass TMC:\n def __init__(self, N = 64,a = 1, b = 5, z_dim = 100):\n\n self.N = N\n self.z_dim = z_dim\n self.leaves = [] #total leaf nodes unchanged\n left = []\n \n for i in range(N):\n self.leaves.append(Node(t=1,name = str(i)))\n \n #left.append(Node(t=1,name = str(i)))\n left = [x for x in self.leaves]\n #bottom up merge to build tree\n \n for j in range(N-1):\n '''\n print(\"names\")\n for n in left:\n print(n.name)\n '''\n total = len(left)\n num1 = np.random.randint(low=0,high = total)\n node1 = left[num1]\n left.remove(node1)\n\n total = len(left)\n num2 = np.random.randint(low=0,high = total)\n node2 = left[num2]\n left.remove(node2)\n\n new_node = Node(name= \"[\"+node1.name+\"]\" +\",\" + \"[\"+node2.name+\"]\", t=None,left_child = node1,right_child = node2)\n left.append(new_node)\n \n \n \n self.root = left[0]\n print(self.root.left_child.name,self.root.right_child.name)\n self.root.set_t(0)\n self.root.set_z(np.random.normal(0,1,z_dim))\n\t\t\n layer = [self.root]\n new_layer = []\n \n print(\"built\")\n \n #top down layer traversal to assign t and z\n while True:\n #print(\"iter\")\n for node in layer:\n #print(\"node\",node.name,node.left_child.name,node.right_child.name)\n\t\t\t\t# put non-leaf node to new_layer and check next time\n \n if node.left_child != None:\n beta_v = np.random.beta(a, b, size=1)[0]\n #print(beta_v)\n node.left_child.set_t(node.t + beta_v*(1-node.t))\n #print(\"node.z\",node.z,\"node.left_child.t\",node.left_child.t,\"node.t\",node.t)\n #print(np.ones(z_dim)*(node.left_child.t - node.t))\n \n node.left_child.set_z(np.random.normal(node.z, np.ones(z_dim)*(node.left_child.t - node.t),z_dim)) \n node.left_child.set_mean(node.z)\n node.left_child.set_var(node.left_child.t - node.t)\n \n new_layer.append(node.left_child)\n\n if node.right_child != None:\n beta_v = np.random.beta(a, b, size=1)\n\n node.right_child.set_t(node.t + beta_v*(1-node.t))\n \n node.right_child.set_z(np.random.normal(node.z,node.right_child.t - node.t,z_dim))\n node.right_child.set_mean(node.z)\n node.right_child.set_var(node.right_child.t - node.t)\n\t\t\t\t\t\n new_layer.append(node.right_child)\n '''\n if node.left_child == None and node.right_child ==None:\n self.leaves.append(node)\n # print(\"leaf\")\n '''\n if len(new_layer) == 0:\n break\n \n layer = [x for x in new_layer]\n new_layer = []\n\n\n def sample(self,N = 64):\n '''\n self.root.set_z(np.random.normal(0,1,z_dim))\n\n layer = [self.root]\n new_layer = []\n leaves = []\n #layer level traversal\n while True:\n for node in layer:\n if node.left_child != None:\n \n node.left_child.set_z(np.random.normal(node.z,node.left_child.t - node.t))\n\n new_layer.append(node.left_child)\n\n elif node.right_child != None:\n\n node.right_child.set_z(np.random.normal(node.z,node.right_child.t - node.t))\n\n new_layer.append(node.right_child)\n\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\t#leaf node\n\t\t\t\t\tpass\n\t\t\t\t\t#leaves.append(ndoe)\n if len(new_layer) == 0:\n break\n layer = [x for x in new_layer]\n new_layer = []\n'''\n\n #print(len(self.leaves))\n latent = np.zeros((N,self.z_dim))\n \n means = np.zeros((N,self.z_dim))\n vars = np.zeros((N,self.z_dim))\n \n for i in range(N):\n #latent[i] = self.leaves[i].z #latent = [0. ,0. , ...] 每个leaf 对应一个数据点 \n latent[i] = np.random.normal(self.leaves[i].mean,self.leaves[i].var,self.z_dim)\n \n means[i] = self.leaves[i].mean\n vars[i] = self.leaves[i].var\n \n return latent,means,vars #latent = (batch*dim_z)\n\n\n\ndef KL(q_x,p_x):\n\n #kl = 0 \n #for i in range(N):\n # qlog q - qlog p\n p_x = tf.convert_to_tensor(p_x, dtype=tf.float32)\n \n \n \n kl= tf.reduce_sum(q*tf.log(q+0.000000001), 1) - tf.reduce_sum(q*tf.log(p+0.000000001), 1)\n\n #kl = kl/N\n\n return kl\n\n\ndef KL_q_tmc(q_means,q_vars,tmc_means,tmc_vars):\n tmc_means = tf.convert_to_tensor(tmc_means, dtype=tf.float32)\n tmc_vars = tf.convert_to_tensor(tmc_vars, dtype=tf.float32)\n \n kl_loss_p_and_tmc = tf.reduce_sum(tf.log(tmc_vars / tf.exp(z_logvar)) + ( (tf.exp(z_logvar))**2+ (z_mu-tmc_means)**2 )/ (2* (tmc_vars)**2) -0.5, 1)\n \n return kl_loss_p_and_tmc\n#下面传进去一个 sample了N次的batch p,q\n\n\ndef sample_from_prior():\n pass\n\n#----------------------------------------------------------------\n\ndef xavier_init(size):\n in_dim = size[0]\n xavier_stddev = 1. / tf.sqrt(in_dim / 2.)\n return tf.random_normal(shape=size, stddev=xavier_stddev)\n\n\n# =============================== Q(z|X) ======================================\n\nX = tf.placeholder(tf.float32, shape=[None, X_dim])\nz = tf.placeholder(tf.float32, shape=[None, z_dim])\n\nQ_W1 = tf.Variable(xavier_init([X_dim, h_dim]))\nQ_b1 = tf.Variable(tf.zeros(shape=[h_dim]))\n\nQ_W2_mu = tf.Variable(xavier_init([h_dim, z_dim]))\nQ_b2_mu = tf.Variable(tf.zeros(shape=[z_dim]))\n\nQ_W2_sigma = tf.Variable(xavier_init([h_dim, z_dim]))\nQ_b2_sigma = tf.Variable(tf.zeros(shape=[z_dim]))\n\n\ndef Q(X):\n h = tf.nn.relu(tf.matmul(X, Q_W1) + Q_b1)\n z_mu = tf.matmul(h, Q_W2_mu) + Q_b2_mu\n z_logvar = tf.matmul(h, Q_W2_sigma) + Q_b2_sigma\n return z_mu, z_logvar\n\n\ndef sample_z(mu, log_var):\n eps = tf.random_normal(shape=tf.shape(mu))\n return mu + tf.exp(log_var / 2) * eps\n\n\n# =============================== P(X|z) ======================================\n\nP_W1 = tf.Variable(xavier_init([z_dim, h_dim]))\nP_b1 = tf.Variable(tf.zeros(shape=[h_dim]))\n\nP_W2 = tf.Variable(xavier_init([h_dim, X_dim]))\nP_b2 = tf.Variable(tf.zeros(shape=[X_dim]))\n\n\ndef P(z):\n h = tf.nn.relu(tf.matmul(z, P_W1) + P_b1)\n logits = tf.matmul(h, P_W2) + P_b2\n prob = tf.nn.sigmoid(logits)\n return prob, logits\n\n\n# =============================== TRAINING ====================================\n\nz_mu, z_logvar = Q(X)\n\nz_sample = sample_z(z_mu, z_logvar)\n\n#-------tmc-------------------\ntmc = TMC(N = mb_size,a=2,b=2,z_dim = z_dim)\ntmc_latent, tmc_means, tmc_vars = tmc.sample(N = mb_size)\n\n\n#-----------------------------\n\n_, logits = P(z_sample)\n\n# Sampling from random z\nX_samples, _ = P(z)\n\n# E[log P(X|z)]\nrecon_loss = tf.reduce_mean( tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=X), 1) )\n# D_KL(Q(z|X) || P(z)); calculate in closed form as both dist. are Gaussian\n\n#多个 图片的平均loss\n#每个 图片 sample N 次和 q中sample的结果求 KL\n#-----------KL loss between q(z|x) and tmc----------------\n#kl_loss_p_and_tmc = KL(z_sample,tmc_latent) #(batch,1)\n\n#kl_loss_p_and_tmc = 0.5 * tf.reduce_sum(tf.log(tmc_vars / tf.exp(z_logvar)) + ( (tf.exp(z_logvar))**2+ (z_mu-tmc_means)**2 )/ (2* (tmc_vars)**2), 1)\nkl_loss_p_and_tmc = tf.reduce_mean( KL_q_tmc(z_mu,tf.exp(z_logvar),tmc_means,tmc_vars) * 0.001 )\n#-----------------------------------------------------------\n\n#kl_loss = 0.5 * tf.reduce_sum(tf.exp(z_logvar) + z_mu**2 - 1. - z_logvar, 1)\n\n\n# VAE loss\nvae_loss = recon_loss + kl_loss_p_and_tmc\n\nsolver = tf.train.AdamOptimizer().minimize(vae_loss)\n\nsess = tf.Session()\nsess.run(tf.global_variables_initializer())\n\nif not os.path.exists('out/'):\n os.makedirs('out/')\n\ni = 0\n\nfor it in range(1000000):\n X_mb, _ = mnist.train.next_batch(mb_size)\n\n _, loss ,kl_loss_p_and_tmc_,z_sample_,z_mu_, z_logvar_,recon_loss_ = sess.run([solver, vae_loss, kl_loss_p_and_tmc,z_sample,z_mu, z_logvar,recon_loss], feed_dict={X: X_mb})\n\n if it % 1000 == 0:\n print('Iter: {}'.format(it))\n print('Loss: {:.4}'. format(loss))\n print('kl_loss_p_and_tmc:',kl_loss_p_and_tmc_)\n print('recon_loss',recon_loss_)\n #print(\"z_sample\",z_sample_)\n #print(\"tmc_latent\",tmc_latent_)\n \n #print(\"z_mu\",z_mu_)\n #print(\"z_logvar\",z_logvar_)\n print()\n \n #sampled_prior = np.random.randn(16, z_dim)\n sampled_prior,_,_ = tmc.sample(N=16) #(16,z_dim)\n print(sampled_prior)\n samples = sess.run(X_samples, feed_dict={z: sampled_prior})\n\n fig = plot(samples)\n plt.savefig('out/{}.png'.format(str(i).zfill(3)), bbox_inches='tight')\n i += 1\n plt.close(fig)\n","sub_path":"tmc_vae.py","file_name":"tmc_vae.py","file_ext":"py","file_size_in_byte":10486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"560208056","text":"from keras.models import Sequential\nfrom keras.layers import Convolution2D, MaxPooling2D\nfrom keras.layers import Activation, Dropout, Flatten, Dense\nfrom keras.optimizers import SGD, Adam\n\ndef build_model():\n model_name = 'encoder2'\n\n model = Sequential()\n model.add(Convolution2D(32, 3, 3, input_shape=(1,128,128), init='he_normal', border_mode='same'))\n model.add(Activation('relu'))\n\n model.add(Convolution2D(32, 3, 3, init='he_normal', border_mode='same'))\n model.add(Activation('relu'))\n\n model.add(Convolution2D(32, 3, 3, init='he_normal', border_mode='same'))\n model.add(Activation('relu'))\n\n model.add(Convolution2D(32, 3, 3, init='he_normal', border_mode='same'))\n model.add(Activation('relu'))\n\n model.add(Convolution2D(32, 3, 3, init='he_normal', border_mode='same'))\n model.add(Activation('relu'))\n\n model.add(Convolution2D(1, 1, 1, init='he_normal'))\n model.add(Activation('sigmoid'))\n\n sgd = SGD(lr=0.01, decay=1e-6, momentum=0, nesterov=True)\n model.compile(loss='mean_squared_error',\n optimizer=sgd,\n metrics=['accuracy']\n )\n\n return model, model_name\n","sub_path":"src/pymodels/encoder2.py","file_name":"encoder2.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"158569502","text":"import argparse\nimport json\nimport editdistance\nimport pickle\nimport numpy as np\nfrom tqdm import tqdm\n\n\n\"\"\"\nThis script computes the Levenshtein and Jaccard distances between the\nraw matched and unmatched terms. For each term it finds the minimum\ndistance in the other set.\n\"\"\"\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--infile\", type=str, required=True,\n help=\"JSON file containing matches.\")\n parser.add_argument(\"--outfile\", type=str, required=True,\n help=\"Where to save the matching summary.\")\n return parser.parse_args()\n\n\ndef main(infile, outfile):\n matches = json.load(open(infile))\n # We don't (as of now) compute distances between sets of normalized terms.\n matched, unmatched = get_matched_unmatched_raw_terms(matches)\n print(\"Computing Edit Distances\")\n edits_match, edits_unmatch = min_distances(matched, unmatched,\n editdistance.eval)\n pickle.dump(edits_match, open(f\"{outfile}.edit_dist_match.pickle\", \"wb\"))\n pickle.dump(edits_unmatch, open(f\"{outfile}.edit_dist_unmatch.pickle\", \"wb\")) # noqa\n print(\"Computing Jaccard Distances\")\n jaccs_match, jaccs_unmatch = min_distances(matched, unmatched, jaccard)\n pickle.dump(jaccs_match, open(f\"{outfile}.jaccard_dist_match.pickle\", \"wb\")) # noqa\n pickle.dump(jaccs_unmatch, open(f\"{outfile}.jaccard_dist_unmatch.pickle\", \"wb\")) # noqa\n summarize(edits_match, outfile, write_mode='w',\n name=\"Edit Distance (matched -> unmatched)\")\n summarize(edits_unmatch, outfile, write_mode='a',\n name=\"Edit Distance (unmatched -> matched)\")\n summarize(jaccs_match, outfile, write_mode='a',\n name=\"Jaccard Distance (matched -> unmatched)\")\n summarize(jaccs_unmatch, outfile, write_mode='a',\n name=\"Jaccard Distance (unmatched -> matched)\")\n\n\ndef get_matched_unmatched_raw_terms(matches):\n matched_terms = set()\n unmatched_terms = set()\n for cui in matches:\n for aui in matches[cui]:\n match = matches[cui][aui]\n if len(match[\"umls_cuis\"]) > 0:\n matched_terms.add(match[\"term\"].lower())\n else:\n unmatched_terms.add(match[\"term\"].lower())\n return matched_terms, unmatched_terms\n\n\ndef min_distances(matched, unmatched, distance_func):\n \"\"\"\n Returns two lists of numbers:\n 1. The minimum distance of each matched term to the unmatched terms.\n 2. The minimum distance of each unmatched term to the matched terms.\n where distance is measured by the supplied distance function.\n \"\"\"\n matched_distances = [np.inf] * len(matched)\n unmatched_distances = [np.inf] * len(unmatched)\n for (m, matched_term) in tqdm(list(enumerate(matched))):\n for (u, unmatched_term) in enumerate(unmatched):\n dist = distance_func(matched_term, unmatched_term)\n if dist < matched_distances[m]:\n matched_distances[m] = dist\n if dist < unmatched_distances[u]:\n unmatched_distances[m] = dist\n return matched_distances, unmatched_distances\n\n\ndef jaccard(term1, term2):\n st1 = set(term1)\n st2 = set(term2)\n num = len(st1.intersection(st2))\n denom = len(st1.union(st2))\n return 1 - (num / denom)\n\n\ndef summarize(distances, outfile, write_mode='w', name=\"\"):\n q1, q2, q3 = np.percentile(distances, [25, 50, 75])\n minimum = min(distances)\n maximum = max(distances)\n with open(outfile, write_mode) as outF:\n outF.write(name + '\\n')\n outF.write(f\"min, max: {minimum:.2f}, {maximum:.2f}\\n\")\n outF.write(f\"quartiles: {q1:.2f}, {q2:.2f}, {q3:.2f}\\n\\n\")\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n main(args.infile, args.outfile)\n","sub_path":"coverage/src/distance.py","file_name":"distance.py","file_ext":"py","file_size_in_byte":3817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"472331301","text":"from dolfin import *\nfrom dolfin_adjoint import *\n\ndef ice_sheet(model):\n assert model[\"Steady\"] is True\n assert model[\"GlenExponent\"] == 3 # need to do more analysis for the depth-averaged viscosity, cf. email from Perego, viscosity.py\n\n Z = model[\"VelocityFunctionSpace\"]\n z = Function(Z, name=\"Velocity\")\n\n (u, v) = split(z)\n (u_test, v_test) = split(TestFunction(Z))\n\n betasq = model[\"FrictionCoefficient\"]\n rho = model[\"IceDensity\"]\n g = model[\"Gravity\"]\n s = model[\"Surface\"]\n b = model[\"Bathymetry\"]\n H = s - b\n bcs = model[\"BoundaryConditions\"]\n\n mu = viscosity(z, model)\n F = (inner(2*mu*eps1(z), grad(u_test))*dx\n + inner(betasq*u, u_test)*dx\n + inner(rho*g*H*grad(s)[0], u_test)*dx\n + inner(2*mu*eps2(z), grad(v_test))*dx\n + inner(betasq*v, v_test)*dx\n + inner(rho*g*H*grad(s)[1], v_test)*dx)\n\n solver_parameters = {}\n solver_parameters[\"linear_solver\"] = \"mumps\"\n solver_parameters[\"newton_solver\"] = {}\n solver_parameters[\"newton_solver\"][\"relaxation_parameter\"] = 1.0\n solver_parameters[\"newton_solver\"][\"maximum_iterations\"] = 100\n\n if isinstance(model[\"InitialGuess\"], Function):\n z.assign(model[\"InitialGuess\"])\n elif isinstance(model[\"InitialGuess\"], float): # have a viscosity to use to compute the initial guess\n mu_linearised = Constant(model[\"InitialGuess\"])\n # Replace mu -> mu_linearised -- dolfin.replace doesn't work, unfortunately\n F_linearised = (inner(2*mu_linearised*eps1(z), grad(u_test))*dx\n + inner(betasq*u, u_test)*dx\n + inner(rho*g*H*grad(s)[0], u_test)*dx\n + inner(2*mu_linearised*eps2(z), grad(v_test))*dx\n + inner(betasq*v, v_test)*dx\n + inner(rho*g*H*grad(s)[1], v_test)*dx)\n solve(F_linearised == 0, z, bcs=bcs, J=derivative(F_linearised, z), solver_parameters=solver_parameters, annotate=False)\n\n solve(F == 0, z, bcs=bcs, J=derivative(F, z), solver_parameters=solver_parameters)\n return z\n\n# There's probably a nice description of these in terms of vector or tensor calculus.\n# But let's play it safe and do it as they've written in the paper!\n\ndef eps(z, i, j):\n return 0.5*(grad(z[i])[j])\n\ndef eps1(z):\n return as_vector((2*eps(z, 0, 0) + eps(z, 1, 1), eps(z, 0, 1)))\n\ndef eps2(z):\n return as_vector((eps(z, 1, 0), eps(z, 0, 0) + 2*eps(z, 1, 1)))\n\ndef eps_e(z):\n return sqrt(0.5*(eps(z, 0, 0)**2 + eps(z, 0, 1)**2 + eps(z, 1, 0)**2 + eps(z, 1, 1)**2))\n\ndef viscosity(z, model):\n A = model[\"IceFlowParameter\"]\n n = model[\"GlenExponent\"]\n return 0.5 * Constant(A)**(-1.0/n) * eps_e(z)**(1.0/n - 1)\n","sub_path":"l1l2/ice_sheet.py","file_name":"ice_sheet.py","file_ext":"py","file_size_in_byte":2592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"7833860","text":"\n__all__ = ['TrigElectron']\n\nfrom Gaugi import EDM\nfrom Gaugi import StatusCode\nfrom Gaugi import stdvector_to_list\nimport numpy as np\nfrom kepler.core import Dataframe as DataframeEnum\n\n\nclass TrigElectron(EDM):\n\n __eventBranches = {\n 'Electron_v1':\n [\n 'trig_L2_el_trackAlgID',\n 'trig_L2_el_pt',\n 'trig_L2_el_caloEta',\n 'trig_L2_el_eta',\n 'trig_L2_el_phi',\n 'trig_L2_el_charge',\n 'trig_L2_el_nTRTHits',\n 'trig_L2_el_nTRTHiThresholdHits',\n 'trig_L2_el_etOverPt',\n 'trig_L2_el_trkClusDeta',\n 'trig_L2_el_trkClusDphi',\n\n ]\n }\n\n def __init__(self):\n EDM.__init__(self)\n\n def initialize(self):\n \"\"\"\n Initialize all branches\n \"\"\"\n if self._dataframe is DataframeEnum.Electron_v1:\n self.link( self.__eventBranches[\"Electron_v1\"] )\n return StatusCode.SUCCESS\n else:\n self._logger.warning( \"Can not initialize the FastElectron object. Dataframe not available.\" )\n return StatusCode.FAILURE\n\n\n\n def pt(self):\n \"\"\"\n Retrieve the pt information from Physval or SkimmedNtuple\n \"\"\"\n if self._dataframe is DataframeEnum.Electron_v1:\n return self._event.trig_L2_el_pt[self.getPos()]\n else:\n self._logger.warning(\"Impossible to retrieve the value of pt. Unknow dataframe\")\n\n\n def eta(self):\n \"\"\"\n Retrieve the eta information from Physval or SkimmedNtuple\n \"\"\"\n if self._dataframe is DataframeEnum.Electron_v1:\n return self._event.trig_L2_el_eta[self.getPos()]\n else:\n self._logger.warning(\"Impossible to retrieve the value of eta. Unknow dataframe\")\n\n def phi(self):\n \"\"\"\n Retrieve the phi information from Physval or SkimmedNtuple\n \"\"\"\n if self._dataframe is DataframeEnum.Electron_v1:\n return self._event.trig_L2_el_phi[self.getPos()]\n else:\n self._logger.warning(\"Impossible to retrieve the value of phi. Unknow dataframe\")\n\n def charge(self):\n \"\"\"\n Retrieve the charge information from Physval or SkimmedNtuple\n \"\"\"\n if self._dataframe is DataframeEnum.Electron_v1:\n return self._event.trig_L2_el_charge[self.getPos()]\n else:\n self._logger.warning(\"Impossible to retrieve the value of charge. Unknow dataframe\")\n\n def caloEta(self):\n \"\"\"\n Retrieve the caloEta information from Physval or SkimmedNtuple\n \"\"\"\n if self._dataframe is DataframeEnum.Electron_v1:\n return self._event.trig_L2_el_caloEta[self.getPos()]\n else:\n self._logger.warning(\"Impossible to retrieve the value of caloEta. Unknow dataframe\")\n\n def numberOfTRTHits(self):\n \"\"\"\n Retrieve the number of TRT hits information from Physval or SkimmedNtuple\n \"\"\"\n if self._dataframe is DataframeEnum.Electron_v1:\n return self._event.trig_L2_el_nTRTHits[self.getPos()]\n else:\n self._logger.warning(\"Impossible to retrieve the value of nTRTHits. Unknow dataframe\")\n\n def numberOfTRTHiThresholdHits(self):\n \"\"\"\n Retrieve the number of TRT high thresholdhits information from Physval or SkimmedNtuple\n \"\"\"\n if self._dataframe is DataframeEnum.Electron_v1:\n return self._event.trig_L2_el_nTRTHiThresholdHits[self.getPos()]\n else:\n self._logger.warning(\"Impossible to retrieve the value of nTRTHiThrehsoldHits. Unknow dataframe\")\n\n\n def etOverPt(self):\n \"\"\"\n Retrieve the et/pt information from Physval or SkimmedNtuple\n \"\"\"\n if self._dataframe is DataframeEnum.Electron_v1:\n return self._event.trig_L2_el_etOverPt[self.getPos()]\n else:\n self._logger.warning(\"Impossible to retrieve the value of et/pt. Unknow dataframe\")\n\n def trkClusDeta(self):\n \"\"\"\n Retrieve the trkClusDeta information from Physval or SkimmedNtuple\n \"\"\"\n if self._dataframe is DataframeEnum.Electron_v1:\n return self._event.trig_L2_el_trkClusDeta[self.getPos()]\n else:\n self._logger.warning(\"Impossible to retrieve the value of trkClusDeta. Unknow dataframe\")\n\n def trkClusDphi(self):\n \"\"\"\n Retrieve the trkClusDphi information from Physval or SkimmedNtuple\n \"\"\"\n if self._dataframe is DataframeEnum.Electron_v1:\n return self._event.trig_L2_el_trkClusDphi[self.getPos()]\n else:\n self._logger.warning(\"Impossible to retrieve the value of trkClusDphi. Unknow dataframe\")\n\n\n def size(self):\n return self._event.trig_L2_el_pt.size()\n\n\n\n def setToBeClosestThanCluster( self ):\n idx = 0; minDeltaR = 999\n for trk in self:\n dR = self.deltaR( 0.0, 0.0, trk.trkClusDeta(), trk.trkClusDphi() )\n if dR < minDeltaR:\n minDeltaR = dR\n idx = self.getPos()\n self.setPos(idx)\n\n\n def deltaR( self, eta1, phi1, eta2, phi2 ):\n deta = abs( eta1 - eta2 )\n dphi = abs( phi1 - phi2 ) if abs(phi1 - phi2) < np.pi else (2*np.pi-abs(phi1-phi2))\n return np.sqrt( deta*deta + dphi*dphi )\n\n\n\n\n\n\n\n","sub_path":"kepler/events/TrigElectron.py","file_name":"TrigElectron.py","file_ext":"py","file_size_in_byte":5457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"69373631","text":"from django.contrib import admin\r\nfrom django.urls import path\r\nfrom django.http import HttpResponse\r\nfrom . import views\r\n\r\n\r\nurlpatterns = [\r\n path('', views.races),\r\n path('races//drivers', views.drivers, name=\"race\"),\r\n path('races/', views.races, name=\"races\"),\r\n path('home/', views.home, name=\"home\"),\r\n path('registration/create', views.createReg, name=\"registration_cr\"),\r\n path('registration//change', views.changeReg, name=\"registration_ch\"),\r\n path('registration//delete', views.deleteReg, name=\"registration_dl\"),\r\n path('car/create', views.createCar, name=\"car_cr\"),\r\n path('car//change', views.changeCar, name=\"car_ch\"),\r\n path('register', views.regPage, name=\"register\"),\r\n path('login', views.loginPage, name=\"login\"),\r\n path('logout', views.logoutUser, name=\"logout\"),\r\n path('driver_cr', views.regDriver, name=\"driver_cr\"),\r\n path('race//registrate', views.raceReg, name=\"race_reg\"),\r\n path('race//comment', views.writeComment, name=\"comment\"),\r\n path('race/create', views.createRace, name=\"race_cr\"),\r\n path('race//change', views.changeRace, name=\"race_ch\"),\r\n path('race//results', views.results, name=\"results\"),\r\n path('race//comments', views.comments, name=\"comments\"),\r\n path('race//delete', views.delRace, name=\"race_dl\"),\r\n]\r\n","sub_path":"students/K33402/Dubina Sergey/laboratory_works/lab2/races_project/races/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"359212466","text":"import numpy as np\r\n\r\n\r\nclass NeuralNetwork(object):\r\n\r\n def __init__(self):\r\n self.a1 = None\r\n self.z1 = None\r\n self.a2 = None\r\n self.y = None\r\n self.w1 = np.array([[0.8,0.4,0.3],[0.2,0.9,0.5]])\r\n self.w2 = np.array([[0.3], [0.5], [0.9]])\r\n\r\n @staticmethod\r\n def sigmoid(z):\r\n return 1 / (1 + np.exp(-z))\r\n\r\n @staticmethod\r\n def sigmoid_prime(z):\r\n return np.exp(-z) / ((1 + np.exp(-z)) ** 2)\r\n\r\n def forward(self, x): # x should by a (1, 1) dimension array\r\n self.a1 = np.dot(x, self.w1)\r\n self.z1 = self.sigmoid(self.a1)\r\n\r\n self.a2 = np.dot(self.z1, self.w2)\r\n self.z2 = self.sigmoid(self.a2)\r\n\r\n self.y = self.z2\r\n return self.y\r\n\r\n def margin_error(self, x, y):\r\n return y - self.forward(x)\r\n\r\n def delta_output_sum(self, x, y):\r\n return self.sigmoid_prime(NN.a2) * (self.margin_error(x, y))\r\n\r\n def delta_hidden_sum_a1(self, x, y):\r\n sigp_a1 = self.sigmoid_prime(self.a1)\r\n quotient_dos_w2 = self.delta_output_sum(x, y) / self.w2\r\n delta_hidden_sum = np.dot(quotient_dos_w2, sigp_a1)\r\n return np.array([[delta_hidden_sum[0][0]], [delta_hidden_sum[1][1]], [delta_hidden_sum[2][2]]])\r\n\r\n def delta_weights(self, x, y):\r\n dw2 = self.delta_output_sum(x, y) / self.z1\r\n dw1 = self.delta_hidden_sum_a1(x, y) / x\r\n\r\n\r\n return dw1, dw2\r\n\r\n def train(self, x, y, iterations=1000):\r\n epochs = 0\r\n\r\n for i in range(iterations):\r\n dw1, dw2 = self.delta_weights(x, y)\r\n dw1 = dw1.ravel().reshape(2, 3)\r\n dw2 = dw2.ravel().reshape(3, 1)\r\n self.w1 += dw1\r\n self.w2 += dw2\r\n epochs += 1\r\n\r\n return epochs\r\n\r\n# Test code\r\nx = np.array([[1, 1]])\r\ny = np.array([[0.89]])\r\nNN = NeuralNetwork()\r\nprint(NN.forward(x))\r\nprint(NN.train(x,y))\r\nprint(NN.forward(x))\r\n\r\n","sub_path":"NeuralNetworkModel.py","file_name":"NeuralNetworkModel.py","file_ext":"py","file_size_in_byte":1931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"566671322","text":"#\n# 2206번: 벽 부수고 이동하기\n# https://www.acmicpc.net/problem/2206\n# Version: Python 3.9.7\n#\n# Created by WhiteHyun on 2021/12/23.\n#\n\n\nfrom sys import stdin\n\nread = stdin.readline\n\nif __name__ == \"__main__\":\n row, col = map(int, read().split())\n escape_map = [list(map(int, list(read().rstrip()))) for _ in range(row)]\n\n # 3차원 리스트, 행과 열, 그리고 부순 횟수에 따른 방문\n visited = [[[False] * col for _ in range(row)] for _ in range(2)]\n\n bfs = [(0, 0, 1)] # (0, 0)부터 시작 부술 기회는 1번\n visited[1][0][0] = True\n\n # == BFS ==\n\n dx = (-1, 1, 0, 0) # 상 하\n dy = (0, 0, -1, 1) # 좌 우\n count = 1 # 최소 방문 횟수\n\n while bfs:\n\n next_bfs = []\n for x, y, chance in bfs:\n if x == row - 1 and y == col - 1:\n break\n for i in range(4): # 상 하 좌 우\n temp_x = x + dx[i]\n temp_y = y + dy[i]\n\n # 외부벽일 때\n if temp_x < 0 or temp_y < 0 or temp_x > row - 1 or temp_y > col - 1:\n continue\n # 부술 기회가 있으며 벽을 맞닥뜨렸을 때\n if (\n chance\n and escape_map[temp_x][temp_y] == 1\n and not visited[chance - 1][temp_x][temp_y]\n ):\n next_bfs.append((temp_x, temp_y, chance - 1))\n visited[chance - 1][temp_x][temp_y] = True\n # 그냥 지나갈 수 있는 통로인 경우\n elif (\n escape_map[temp_x][temp_y] == 0\n and not visited[chance][temp_x][temp_y]\n ):\n next_bfs.append((temp_x, temp_y, chance))\n visited[chance][temp_x][temp_y] = True\n else:\n bfs = next_bfs\n count += 1\n continue\n break\n\n else:\n print(-1)\n exit(0)\n print(count)\n","sub_path":"boj/gold4/2206.py","file_name":"2206.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"558026552","text":"'''\nfix-mpd-logs-missing-yr.py\n\nThis utility script is used to fix/correct MPD log files that were created without having a proper\ntimestamp format in their log line timestamps. Such log files are missing the year value in the \ntimestamps, making the logs hard to work with for use with playstats. \n\nThis script fixes that and replaces the timestamp in each line with one that is formatted like \nthat of the default format used in syslog logs.\n\n'''\n\nimport argparse\nimport datetime\n\n# Do setup processing so that this script can import all the needed modules from the \"mlu\" package.\n# This is necessary because these scripts are not located in the root directory of the project, but\n# instead in the 'scripts' folder.\nimport envsetup\nenvsetup.PreparePythonProjectEnvironment()\n\nfrom com.nwrobel import mypycommons\nimport com.nwrobel.mypycommons.file\nimport com.nwrobel.mypycommons.time\n\nimport mlu.mpd.logs\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"logFilepath\", \n help=\"Absolute filepath of the MPD log to fix the timestamps for, by adding the year to each timestamp.\",\n type=str)\n\n parser.add_argument(\"startYear\", \n help=\"The year of the timestamp of the first line in this log file (when log was first written to)\",\n type=str)\n\n args = parser.parse_args()\n\n rawLogFileLines = mypycommons.file.readFile(args.logFilepath)\n rawLogFileLines = [logLine.replace('\\n', '') for logLine in rawLogFileLines]\n fixedRawLogFileLines = []\n\n firstLineTimestamp = mlu.mpd.logs.getTimestampFromRawLogLineAddYear(logLine=rawLogFileLines[0], year=args.startYear)\n firstLineSyslogFmtTime = mlu.mpd.logs.formatTimestampToSyslogFormat(firstLineTimestamp)\n\n logTextPart = rawLogFileLines[0].split(' : ')[1]\n fixedRawLogFileLines.append(firstLineSyslogFmtTime + \" \" + logTextPart)\n del rawLogFileLines[0]\n\n for logLine in rawLogFileLines:\n lineTimestamp = mlu.mpd.logs.getTimestampFromRawLogLineAddYear(logLine=logLine, year=args.startYear)\n\n if (firstLineTimestamp > lineTimestamp):\n nextYear = str(int(args.startYear) + 1)\n lineTimestamp = mlu.mpd.logs.getTimestampFromRawLogLineAddYear(logLine=logLine, year=nextYear)\n\n if (firstLineTimestamp > lineTimestamp):\n raise RuntimeError(\"Logic error: this log line timestamp is incorrect - it is older than the oldest log line in the file\")\n\n lineSyslogFmtTime = mlu.mpd.logs.formatTimestampToSyslogFormat(lineTimestamp)\n logTextPart = logLine.split(' : ')[1]\n fixedRawLogFileLines.append(lineSyslogFmtTime + \" \" + logTextPart)\n\n newLogFileName = \"{} {}\".format(\"[YR+FIX]\", mypycommons.file.GetFilename(args.logFilepath))\n newLogFilepath = mypycommons.file.JoinPaths(mypycommons.file.getParentDirectory(args.logFilepath), newLogFileName)\n mypycommons.file.writeToFile(filepath=newLogFilepath, content=fixedRawLogFileLines)\n","sub_path":"scripts/fix-mpd-logs-missing-yr.py","file_name":"fix-mpd-logs-missing-yr.py","file_ext":"py","file_size_in_byte":2946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"624599194","text":"#!/usr/bin/python3\n\nA = 1\n\ndef outer():\n A = 2\n\n def inner():\n print(f\"Inside inner(): A = {A}\") # What's the output and why?\n A = 3\n\n inner()\n print(f\"After inner(): A = {A}\") # What's the output and why?\n\n\nouter()\nprint(f\"After outer(): A = {A}\") # What's the output and why?\n","sub_path":"Python/questions/namespace-all.py","file_name":"namespace-all.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"597055580","text":"from ConfigParser import *\nfrom StringIO import StringIO\n\n\nclass WZConfigParser(ConfigParser):\n \"\"\"\n Extended ConfigParser\n Add handling space comments started from spaces.\n Save ini files on ini :)\n \"\"\"\n header = None\n\n def optionxform(self, optionstr):\n \"\"\"default emplementation makes it lowercase\"\"\"\n return optionstr\n\n # def pre_read(self):\n def load(self, path):\n with open(path) as f:\n lines = f.readlines()\n is_header = True\n self.header = []\n new_lines = []\n for original_line in lines:\n line = original_line.strip()\n if line:\n\n if is_header:\n if line.startswith(';'):\n self.header.append(original_line)\n else:\n is_header = False\n new_lines.append(line)\n\n fp = StringIO('\\n'.join(new_lines))\n self.readfp(fp)\n\n def save(self, fp):\n if self.header:\n for line in self.header:\n fp.write(line)\n fp.write('\\n')\n self.write(fp)","sub_path":"libs/config_parser.py","file_name":"config_parser.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"535906523","text":"import csv\nfrom os import name\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\n\n#Extract information for the first graph\nopen_file = open(\"sitka_weather_2018_simple.csv\", \"r\")\ncsv_file_1 = csv.reader(open_file, delimiter=\",\")\n\nheader_row = next(csv_file_1)\n#Get indexes for values\nfor index, column_header in enumerate(header_row):\n if column_header == \"TMAX\":\n high_index_1 = index\n elif column_header == \"TMIN\":\n low_index_1 = index\n elif column_header == \"DATE\":\n date_index_1 = index\n elif column_header == \"NAME\":\n name_index_1 = index\n\nhighs_1 = []\nlows_1 = []\ndates_1 = []\n\n\n\nfor rec in csv_file_1:\n try:\n date = datetime.strptime(rec[date_index_1], \"%Y-%m-%d\")\n high = int(rec[high_index_1])\n low = int(rec[low_index_1])\n graph_title_1 = rec[name_index_1]\n except ValueError:\n print(f\"Missing data for {date}\")\n else:\n highs_1.append(high)\n lows_1.append(low)\n dates_1.append(date)\nopen_file.close()\n\n#Extract information for the second graph\nopen_file = open(\"death_valley_2018_simple.csv\", \"r\")\ncsv_file_2 = csv.reader(open_file, delimiter=\",\")\n\nheader_row = next(csv_file_2)\n#Get indexes for values\nfor index, column_header in enumerate(header_row):\n if column_header == \"TMAX\":\n high_index_2 = index\n elif column_header == \"TMIN\":\n low_index_2 = index\n elif column_header == \"DATE\":\n date_index_2 = index\n elif column_header == \"NAME\":\n name_index_2 = index\n\nhighs_2 = []\nlows_2 = []\ndates_2 = []\n\n\n\nfor rec in csv_file_2:\n try:\n date = datetime.strptime(rec[date_index_2], \"%Y-%m-%d\")\n high = int(rec[high_index_2])\n low = int(rec[low_index_2])\n graph_title_2 = rec[name_index_2]\n except ValueError:\n print(f\"Missing data for {date}\")\n else:\n highs_2.append(high)\n lows_2.append(low)\n dates_2.append(date)\nopen_file.close()\n\n\n\nplt.subplot(2, 1, 1)\nplt.plot(dates_1, highs_1, c=\"red\")\nplt.plot(dates_1, lows_1, c = \"blue\")\nplt.fill_between(dates_1, highs_1, lows_1, facecolor = 'blue', alpha = 0.1)\nplt.title(graph_title_1)\n\nplt.subplot(2, 1, 2)\nplt.plot(dates_2, highs_2, c= \"red\")\nplt.plot(dates_2, lows_2, c=\"blue\")\nplt.fill_between(dates_2, highs_2, lows_2, facecolor = \"blue\", alpha = 0.1)\nplt.title(graph_title_2)\n\nplt.suptitle(f\"Temperature comparison between {graph_title_1} and {graph_title_2}\")\n\nplt.show()\n","sub_path":"sitka5.py","file_name":"sitka5.py","file_ext":"py","file_size_in_byte":2453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"143430389","text":"import inspect\nimport sys\n\nimport six\n\nfrom dagster.check import CheckError\nfrom dagster.core.errors import DagsterInvalidDefinitionError\nfrom dagster.seven import funcsigs, is_module_available\n\nfrom .input import InputDefinition\nfrom .output import OutputDefinition\n\n\ndef _infer_input_description_from_docstring(fn):\n if not is_module_available(\"docstring_parser\"):\n return {}\n\n from docstring_parser import parse\n\n docstring = parse(fn.__doc__)\n return {p.arg_name: p.description for p in docstring.params}\n\n\ndef _infer_output_description_from_docstring(fn):\n if not is_module_available(\"docstring_parser\"):\n return\n from docstring_parser import parse\n\n docstring = parse(fn.__doc__)\n if docstring.returns is None:\n return\n\n return docstring.returns.description\n\n\ndef infer_output_definitions(decorator_name, solid_name, fn):\n signature = funcsigs.signature(fn)\n try:\n description = _infer_output_description_from_docstring(fn)\n return [\n OutputDefinition()\n if signature.return_annotation is funcsigs.Signature.empty\n else OutputDefinition(signature.return_annotation, description=description)\n ]\n\n except CheckError as type_error:\n six.raise_from(\n DagsterInvalidDefinitionError(\n \"Error inferring Dagster type for return type \"\n '\"{type_annotation}\" from {decorator} \"{solid}\". '\n \"Correct the issue or explicitly pass definitions to {decorator}.\".format(\n decorator=decorator_name,\n solid=solid_name,\n type_annotation=signature.return_annotation,\n )\n ),\n type_error,\n )\n\n\ndef has_explicit_return_type(fn):\n signature = funcsigs.signature(fn)\n return not signature.return_annotation is funcsigs.Signature.empty\n\n\ndef _input_param_type(type_annotation):\n if sys.version_info.major >= 3 and type_annotation is not inspect.Parameter.empty:\n return type_annotation\n return None\n\n\ndef infer_input_definitions_for_lambda_solid(solid_name, fn):\n signature = funcsigs.signature(fn)\n params = list(signature.parameters.values())\n descriptions = _infer_input_description_from_docstring(fn)\n defs = _infer_inputs_from_params(params, \"@lambda_solid\", solid_name, descriptions=descriptions)\n return defs\n\n\ndef _infer_inputs_from_params(params, decorator_name, solid_name, descriptions=None):\n descriptions = descriptions or {}\n input_defs = []\n for param in params:\n try:\n if param.default is not funcsigs.Parameter.empty:\n input_def = InputDefinition(\n param.name,\n _input_param_type(param.annotation),\n default_value=param.default,\n description=descriptions.get(param.name),\n )\n else:\n input_def = InputDefinition(\n param.name,\n _input_param_type(param.annotation),\n description=descriptions.get(param.name),\n )\n\n input_defs.append(input_def)\n\n except CheckError as type_error:\n six.raise_from(\n DagsterInvalidDefinitionError(\n \"Error inferring Dagster type for input name {param} typed as \"\n '\"{type_annotation}\" from {decorator} \"{solid}\". '\n \"Correct the issue or explicitly pass definitions to {decorator}.\".format(\n decorator=decorator_name,\n solid=solid_name,\n param=param.name,\n type_annotation=param.annotation,\n )\n ),\n type_error,\n )\n\n return input_defs\n\n\ndef infer_input_definitions_for_composite_solid(solid_name, fn):\n signature = funcsigs.signature(fn)\n params = list(signature.parameters.values())\n descriptions = _infer_input_description_from_docstring(fn)\n defs = _infer_inputs_from_params(\n params, \"@composite_solid\", solid_name, descriptions=descriptions\n )\n return defs\n\n\ndef infer_input_definitions_for_solid(solid_name, fn):\n signature = funcsigs.signature(fn)\n params = list(signature.parameters.values())\n descriptions = _infer_input_description_from_docstring(fn)\n defs = _infer_inputs_from_params(params[1:], \"@solid\", solid_name, descriptions=descriptions)\n return defs\n","sub_path":"python_modules/dagster/dagster/core/definitions/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":4527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"385319653","text":"from utils import CE, crop_image, dice_loss\nfrom sklearn.metrics import confusion_matrix\nimport os\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"2,3\"\nimport ConvNet\nimport dataloader\nimport torch.optim as optim\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport utils\nimport visdom\nfrom dataloader import SegData\nfrom torchvision import transforms\nfrom torch.utils.data import DataLoader\nimport matplotlib.pyplot as plt\n\n\ndef transform(img):\n max_value = img.max()\n img_tensor = torch.from_numpy(img.astype(np.int32))\n img_tensor = img_tensor.float() / max_value\n return img_tensor\n\ntrain_root = '/home/srk1995/pub/db/Dicom_Image_Unet_pseudo/Train/'\ntest_root = '/home/srk1995/pub/db/Dicom_Image_Unet_pseudo/Test/'\nPATH = './saved/'\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nvis = visdom.Visdom()\n\ntrain_batch_num = 1\n\nalpha = 0\nmse = torch.nn.MSELoss()\n\nproj_pix = [256, 256]\n\n\n\n# train_win = vis.line(Y=torch.randn(1), X=np.array([5]), opts=dict(title=\"Train\"))\n# test_win = vis.line(Y=torch.randn(1), X=np.array([5]), opts=dict(title=\"Test\"))\nloss_win = None\ntrain_drr_win = None\ntest_drr_win = None\ntrain_xray_win = None\ntest_xray_win = None\n\ntransfroms_ = transforms.Compose([\n transforms.ToTensor(),\n # transforms.Resize((64, 64))\n])\ntrain_dataset = SegData(train_root, transform=transfroms_)\ntest_dataset = SegData(test_root, transform=transfroms_)\ntrainloader = DataLoader(train_dataset, batch_size=train_batch_num, shuffle=True, num_workers=0)\ntestloader = DataLoader(test_dataset, batch_size=train_batch_num, shuffle=False, num_workers=0)\n\nnet = ConvNet.layer6Net(1, 20, 6)\nnet = net.cuda()\nnet = nn.DataParallel(net)\n\ncriterion = torch.nn.MSELoss()\noptimizer = optim.Adam(net.parameters(), lr=1e-3, weight_decay=1e-4)\n# train_scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=300)\n\nbest_loss = np.inf\n\n\n\ndef test(net, loader, optimizer):\n test_tre = 0.0\n num = 0\n net.eval()\n\n for i, data in enumerate(loader, 0):\n # inputs and labels.\n inputs = data[0]\n inputs_X = data[1]\n inputs, inputs_X, labels= inputs.cuda(), inputs_X.cuda(), data[2].cuda()\n # Set the gradient to be 0.\n optimizer.zero_grad()\n\n # Feed forward\n outputs = net(inputs, inputs_X)\n\n drr = utils.DRR_generation(data[0].view(1, inputs.shape[2], inputs.shape[3], inputs.shape[4]), outputs,\n train_batch_num).view((1, proj_pix[0], proj_pix[1]))\n tre = utils.TRE(data[0].view(1, inputs.shape[2], inputs.shape[3], inputs.shape[4]), labels, outputs, train_batch_num)\n\n plt.imsave(\"./images/test/alpha1e-2/xray\"+str(i)+\".png\", inputs_X.view((256, 256)).cpu().numpy())\n plt.imsave(\"./images/test/alpha1e-2/drr\" + str(i) + \".png\", drr.view((256, 256)).cpu().numpy())\n\n test_tre += tre.item()\n num += data[0].size(0)\n\n return test_tre / num, drr\n\n\nif __name__ == \"__main__\":\n env = \"seg_6layer_alpha_1e-2_lr_1e-2\"\n # vis.close(env=env)\n ck = torch.load(\"./saved/BEST\"+env[3:] + \".pth\")\n net.load_state_dict(ck['state_dict'])\n optimizer.load_state_dict(ck['optimizer'])\n best_loss = ck['best_loss']\n\n for epoch in range(1):\n test_tre, test_drr = test(net, testloader, optimizer)\n\n print('Target Registration Error: %.3f' % (test_tre))\n\n\n\n print('Finished Training')","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"348108189","text":"# Give an array of integers:\n# arr = [100, -101, 200, -3, 1000]\n# Find out the biggest sum of 2 integer\n# And return the INDEX of those integer\n\narr = [100, -101, 200, -3, 1000]\nmax = 0\nnew = []\nfor i in range(0, len(arr) - 1):\n for j in range(i + 1, len(arr)):\n if (arr[i]+arr[j] > max):\n max = arr[i]+arr[j]\n new.append(i)\n new.append(j)\n\nprint(new[-2:])\n\n\n\n","sub_path":"Corgi_golden_age.py","file_name":"Corgi_golden_age.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"506668772","text":"import database_helper\nimport random\nimport config\n\n\nclass Animal:\n\n def __init__(self, species, age, name, gender, weight):\n self.species = species\n self.age = age\n self.name = name\n self.gender = gender\n self.weight = weight\n self._get_animal_stats()\n self.is_dead = False\n self.is_pregnant = False\n self.relax_period = age\n self.gestination_period = 0\n\n # Loads the common stats for the species\n def _get_animal_stats(self):\n animals_database = database_helper.read_database(config.DATABASE)\n if self.species in animals_database:\n self.species_info = animals_database[self.species]\n else:\n return False\n\n def _chance_of_dying(self):\n return self.age / self.species_info['life_expectancy']\n\n def try_die(self):\n if self.is_dead:\n # If the animal is dead it cannot die!\n return False\n if random.random() < self._chance_of_dying():\n self.is_dead = True\n return self.is_dead\n\n def eat(self):\n food_per_kg = self.species_info['food_weight_ratio']\n eaten_food = self.weight * food_per_kg\n food_cost = config.FOODS_PRIZE[self.species_info['food_type']]\n cost = eaten_food * food_cost\n return int(cost)\n\n def grow(self, months):\n self.age += months\n if self.weight < self.species_info['average_weight']:\n adding_weight = self.species_info['weight_age_ration'] * months\n self.weight += adding_weight\n if self.is_pregnant is False:\n self.relax_period += months\n elif self.is_pregnant is True:\n self.relax_period = 0\n\n def get_pregnant(self):\n self.is_pregnant = True\n self.gestination_period = 0\n self.relax_period = 0\n","sub_path":"week4/zoo/animal.py","file_name":"animal.py","file_ext":"py","file_size_in_byte":1838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"559129088","text":"\"\"\"\nA simple bubble tea store flask app.\nata is stored in a SQLite database that looks something like the following:\n\n+---------+-----------------+-----------+-------+---------+-------------+--------------+--------+---------------+---------+------------+\n| Name | Street | City | State | Zipcode | Store Hours | Phonenumber | Rating | Menu | Review | signed_on |\n+=========+=================+===========+=======+=========+=============+==============+========+===============+=========+============+\n| Bubble |232 SW 122th Ave | Beaverton | OR | 98006 | M-Sa: 10-9 | 503-232-1212 | 4 |Mango Bubble...| Awesome | 2012-05-28 |\n+---------+-----------------+-----------+-------+---------+-------------+--------------+--------+---------------+---------+------------+\n\nThis can be created with the following SQL (see bottom of this file):\n\n create table bubbleteaStore (name text, staddr text, city text, state text, zipcode text, storehours text, phonenumber text,rating text, menu, review, signed_on date);\n\n\"\"\"\nfrom datetime import date\nfrom .Model import Model\nimport sqlite3\nDB_FILE = 'entries.db' # file for our Database\n\nclass model(Model):\n def __init__(self):\n # Make sure our database exists\n connection = sqlite3.connect(DB_FILE)\n cursor = connection.cursor()\n try:\n cursor.execute(\"select count(rowid) from bubbleteaStore\")\n except sqlite3.OperationalError:\n cursor.execute(\"create table bubbleteaStore (name text, staddr text, city text, state text, zipcode text, storehours text, phonenumber text,rating text, menu, review, signed_on date)\")\n cursor.close()\n\n def select(self):\n \"\"\"\n Gets all rows from the database\n Each row contains: name, staddr, city, state, zipcode, storehours, phonenumber, rating, menu, review, date\n :return: List of lists containing all rows of database\n \"\"\"\n connection = sqlite3.connect(DB_FILE)\n cursor = connection.cursor()\n cursor.execute(\"SELECT * FROM bubbleteaStore\")\n return cursor.fetchall()\n\n def insert(self, name, staddr, city, state, zipcode, storehours, phonenumber, rating, menu, review):\n \"\"\"\n Inserts entry into database\n :param name: String\n :param staddr: String\n :param city: String\n :param state: String\n :param zipcode: String\n :param storehours: String\n :param phonenumber: String\n :param rating: String\n :param menu: String\n :param review: String\n :return: True\n :raises: Database errors on connection and insertion\n \"\"\"\n params = {'name':name, 'staddr':staddr,'city':city, \n 'state':state, 'zipcode':zipcode, 'storehours':storehours, 'phonenumber':phonenumber, \n 'rating':rating, 'menu':menu, 'review':review, 'date':date.today()}\n connection = sqlite3.connect(DB_FILE)\n cursor = connection.cursor()\n cursor.execute(\"insert into bubbleteaStore (name, staddr, city, state, zipcode, storehours, phonenumber, rating, menu, review, signed_on) VALUES (:name, :staddr, :city, :state, :zipcode, :storehours, :phonenumber, :rating, :menu, :review, :date)\", params)\n\n connection.commit()\n cursor.close()\n return True\n","sub_path":"hw3/gbmodel/model_sqlite3.py","file_name":"model_sqlite3.py","file_ext":"py","file_size_in_byte":3335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"349208278","text":"import os\r\nimport sys\r\nimport numpy as np\r\nimport scipy.io\r\nimport scipy.misc\r\nimport cv2\r\nimport tensorflow as tf\r\nimport h5py as h5\r\nimport pandas as pd \r\nfrom keras import backend as K\r\n\r\nf = h5.File('trained_model-11-19.h5', 'r')\r\n\r\nfile_path = 'cac.csv'\r\n# Constants for the image input and output.\r\n# Output folder for the images.\r\nOUTPUT_DIR = 'output/'\r\n# Style image \r\nSTYLE_IMAGE = 'images/StarryNight.jpg'\r\n# Content image to use.\r\nCONTENT_IMAGE = 'images/hongkong.jpg'\r\n# Image dimensions constants. \r\n#ls = cv2.imread(CONTENT_IMAGE)\r\n#print(ls.shape)\r\nIMAGE_WIDTH = 800\r\nIMAGE_HEIGHT = 600\r\nCOLOR_CHANNELS = 3\r\n# Noise ratio. Percentage of weight of the noise for intermixing with the\r\n# content image.\r\nNOISE_RATIO = 0.6\r\n# Number of iterations to run.\r\nITERATIONS = 20\r\n# Constant to put more emphasis on content loss.\r\nBETA = 5\r\n# Constant to put more emphasis on style loss.\r\nALPHA = 100\r\nVGG_MODEL = 'imagenet-vgg-verydeep-19.mat'\r\n# The mean to subtract from the input to the VGG model. This is the mean that\r\n# when the VGG was used to train. Minor changes to this will make a lot of\r\n# difference to the performance of model.\r\nMEAN_VALUES = np.array([123.68, 116.779, 103.939]).reshape((1,1,1,3))\r\n\r\ndef generate_noise_image(content_image, noise_ratio = NOISE_RATIO):\r\n \"\"\"\r\n Returns a noise image intermixed with the content image at a certain ratio.\r\n \"\"\"\r\n noise_image = np.random.uniform(\r\n -20, 20,\r\n (1, IMAGE_HEIGHT, IMAGE_WIDTH, COLOR_CHANNELS)).astype('float32')\r\n # White noise image from the content representation. Take a weighted average\r\n # of the values\r\n input_image = noise_image * noise_ratio + content_image * (1 - noise_ratio)\r\n return input_image\r\n''' def crop_center(img,cropx,cropy):\r\n y,x,z = img.shape\r\n startx = x//2-(cropx//2)\r\n starty = y//2-(cropy//2) \r\n return img[starty:starty+cropy,startx:startx+cropx]\r\n '''\r\nK.set_image_data_format('channels_last')\r\n\r\n\r\n\r\n\r\n# def CapsNet(input_shape, n_class, routings):\r\n# \"\"\"\r\n# A Capsule Network on MNIST.\r\n# :param input_shape: data shape, 3d, [width, height, channels]\r\n# :param n_class: number of classes\r\n# :param routings: number of routing iterations\r\n# :return: Two Keras Models, the first one used for training, and the second one for evaluation.\r\n# `eval_model` can also be used for training.\r\n# \"\"\"\r\n# x = layers.Input(shape=input_shape)\r\n\r\n# # Layer 1: Just a conventional Conv2D layer\r\n# conv1 = layers.Conv2D(filters=256, kernel_size=9, strides=1, padding='valid', activation='relu', name='conv1')(x)\r\n\r\n# # Layer 2: Conv2D layer with `squash` activation, then reshape to [None, num_capsule, dim_capsule]\r\n# primarycaps = PrimaryCap(conv1, dim_capsule=8, n_channels=32, kernel_size=9, strides=2, padding='valid')\r\n\r\n# # Layer 3: Capsule layer. Routing algorithm works here.\r\n# digitcaps = CapsuleLayer(num_capsule=n_class, dim_capsule=16, routings=routings,\r\n# name='digitcaps')(primarycaps)\r\n\r\n# # Layer 4: This is an auxiliary layer to replace each capsule with its length. Just to match the true label's shape.\r\n# # If using tensorflow, this will not be necessary. :)\r\n# out_caps = Length(name='capsnet')(digitcaps)\r\n\r\n# # Decoder network.\r\n# y = layers.Input(shape=(n_class,))\r\n# masked_by_y = Mask()([digitcaps, y]) # The true label is used to mask the output of capsule layer. For training\r\n# masked = Mask()(digitcaps) # Mask using the capsule with maximal length. For prediction\r\n\r\n# # Shared Decoder model in training and prediction\r\n# decoder = models.Sequential(name='decoder')\r\n# decoder.add(layers.Dense(512, activation='relu', input_dim=16*n_class))\r\n# decoder.add(layers.Dense(1024, activation='relu'))\r\n# decoder.add(layers.Dense(np.prod(input_shape), activation='sigmoid'))\r\n# decoder.add(layers.Reshape(target_shape=input_shape, name='out_recon'))\r\n\r\n \r\n\r\n\r\n\r\ndef load_image(path):\r\n image = scipy.misc.imread(path)\r\n # Resize the image for convnet input, there is no change but just\r\n # add an extra dimension.\r\n #image = crop_center(image, 400, 400)\r\n image = np.reshape(image, ((1,) + image.shape))\r\n # Input to the VGG model expects the mean to be subtracted.\r\n image = image - MEAN_VALUES\r\n return image\r\n\r\ndef multiplier(image, file_path):\r\n # import pandas as pd\r\n df = pd.read_csv(read_csv)\r\n mat = df.iloc[1:, 1:]\r\n mat_val = mat.values\r\n image = np.einsum('ij,jkl->ikl',mat_val,image)\r\n \r\n\r\ndef save_image(path, image):\r\n # Output should add back the mean.\r\n image = image + MEAN_VALUES\r\n # Get rid of the first useless dimension, what remains is the image.\r\n image = image[0]\r\n image = np.clip(image, 0, 255).astype('uint8')\r\n scipy.misc.imsave(path, image)\r\n\r\ndef load_vgg_model(path):\r\n \r\n\r\n vgg = scipy.io.loadmat(path)\r\n\r\n vgg_layers = vgg['layers']\r\n def _weights(layer, expected_layer_name):\r\n \"\"\"\r\n Return the weights and bias from the VGG model for a given layer.\r\n \"\"\"\r\n W = vgg_layers[0][layer][0][0][0][0][0]\r\n b = vgg_layers[0][layer][0][0][0][0][1]\r\n layer_name = vgg_layers[0][layer][0][0][-2]\r\n assert layer_name == expected_layer_name\r\n return W, b\r\n\r\n def _relu(conv2d_layer):\r\n \"\"\"\r\n Return the RELU function wrapped over a TensorFlow layer. Expects a\r\n Conv2d layer input.\r\n \"\"\"\r\n return tf.nn.relu(conv2d_layer)\r\n\r\n def _conv2d(prev_layer, layer, layer_name):\r\n \"\"\"\r\n Return the Conv2D layer using the weights, biases from the VGG\r\n model at 'layer'.\r\n \"\"\"\r\n W, b = _weights(layer, layer_name)\r\n W = tf.constant(W)\r\n b = tf.constant(np.reshape(b, (b.size)))\r\n return tf.nn.conv2d(\r\n prev_layer, filter=W, strides=[1, 1, 1, 1], padding='SAME') + b\r\n\r\n def _conv2d_relu(prev_layer, layer, layer_name):\r\n \"\"\"\r\n Return the Conv2D + RELU layer using the weights, biases from the VGG\r\n model at 'layer'.\r\n \"\"\"\r\n return _relu(_conv2d(prev_layer, layer, layer_name))\r\n\r\n def _avgpool(prev_layer):\r\n \"\"\"\r\n Return the AveragePooling layer.\r\n \"\"\"\r\n return tf.nn.avg_pool(prev_layer, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\r\n\r\n def caps_call(caps_layers,shape=[800,600,3]):\r\n\r\n # Constructs the graph model.\r\n graph=[]\r\n graph['input'] = tf.Variable(np.zeros((1, IMAGE_HEIGHT, IMAGE_WIDTH, COLOR_CHANNELS)), dtype = 'float32')\r\n graph['caps'] = _caps_relu(graph['input'],0 , 'caps' )\r\n graph['conv1_1'] = _conv2d_relu(graph['caps'], 1, 'conv1_1')\r\n graph['conv1_2'] = _conv2d_relu(graph['conv1_1'], 2, 'conv1_2')\r\n graph['avgpool1'] = _avgpool(graph['conv1_2'])\r\n graph['conv2_1'] = _conv2d_relu(graph['avgpool1'], 5, 'conv2_1')\r\n graph['conv2_2'] = _conv2d_relu(graph['conv2_1'], 7, 'conv2_2')\r\n graph['avgpool2'] = _avgpool(graph['conv2_2'])\r\n graph['conv3_1'] = _conv2d_relu(graph['avgpool2'], 10, 'conv3_1')\r\n graph['conv3_2'] = _conv2d_relu(graph['conv3_1'], 12, 'conv3_2')\r\n graph['conv3_3'] = _conv2d_relu(graph['conv3_2'], 14, 'conv3_3')\r\n graph['conv3_4'] = _conv2d_relu(graph['conv3_3'], 16, 'conv3_4')\r\n graph['avgpool3'] = _avgpool(graph['conv3_4'])\r\n graph['conv4_1'] = _conv2d_relu(graph['avgpool3'], 19, 'conv4_1')\r\n graph['conv4_2'] = _conv2d_relu(graph['conv4_1'], 21, 'conv4_2')\r\n graph['conv4_3'] = _conv2d_relu(graph['conv4_2'], 23, 'conv4_3')\r\n graph['conv4_4'] = _conv2d_relu(graph['conv4_3'], 25, 'conv4_4')\r\n graph['avgpool4'] = _avgpool(graph['conv4_4'])\r\n graph['conv5_1'] = _conv2d_relu(graph['avgpool4'], 28, 'conv5_1')\r\n graph['conv5_2'] = _conv2d_relu(graph['conv5_1'], 30, 'conv5_2')\r\n graph['conv5_3'] = _conv2d_relu(graph['conv5_2'], 32, 'conv5_3')\r\n graph['conv5_4'] = _conv2d_relu(graph['conv5_3'], 34, 'conv5_4')\r\n graph['avgpool5'] = _avgpool(graph['conv5_4'])\r\n return graph\r\n\r\ndef content_loss_func(sess, model):\r\n \"\"\"\r\n Content loss function as defined in the paper.\r\n \"\"\"\r\n def _content_loss(p, x):\r\n # N is the number of filters (at layer l).\r\n N = p.shape[3]\r\n # M is the height times the width of the feature map (at layer l).\r\n M = p.shape[1] * p.shape[2]\r\n\r\n return (1 / (4 * N * M)) * tf.reduce_sum(tf.pow(x - p, 2))\r\n return _content_loss(sess.run(model['conv4_2']), model['conv4_2'])\r\n\r\ndef style_loss_func(sess, model):\r\n \"\"\"\r\n Style loss function as defined in the paper.\r\n \"\"\"\r\n def _gram_matrix(F, N, M):\r\n \"\"\"\r\n The gram matrix G.\r\n \"\"\"\r\n Ft = tf.reshape(F, (M, N))\r\n return tf.matmul(tf.transpose(Ft), Ft)\r\n\r\n def _style_loss(a, x):\r\n \"\"\"\r\n The style loss calculation.\r\n \"\"\"\r\n # N is the number of filters (at layer l).\r\n N = a.shape[3]\r\n # M is the height times the width of the feature map (at layer l).\r\n M = a.shape[1] * a.shape[2]\r\n # A is the style representation of the original image (at layer l).\r\n A = _gram_matrix(a, N, M)\r\n # G is the style representation of the generated image (at layer l).\r\n G = _gram_matrix(x, N, M)\r\n result = (1 / (4 * N**2 * M**2)) * tf.reduce_sum(tf.pow(G - A, 2))\r\n return result\r\n\r\n # Layers to use. We will use these layers as advised in the paper.\r\n # To have softer features, increase the weight of the higher layers\r\n # (conv5_1) and decrease the weight of the lower layers (conv1_1).\r\n # To have harder features, decrease the weight of the higher layers\r\n # (conv5_1) and increase the weight of the lower layers (conv1_1).\r\n layers = [\r\n ('conv1_1', 0.5),\r\n ('conv2_1', 1.0),\r\n ('conv3_1', 1.5),\r\n ('conv4_1', 3.0),\r\n ('conv5_1', 4.0),\r\n ]\r\n\r\n E = [_style_loss(sess.run(model[layer_name]), model[layer_name]) for layer_name, _ in layers]\r\n W = [w for _, w in layers]\r\n loss = sum([W[l] * E[l] for l in range(len(layers))])\r\n return loss\r\n\r\n\r\nif __name__ == '__main__':\r\n with tf.Session() as sess:\r\n # Load the images.\r\n content_image = load_image(CONTENT_IMAGE)\r\n style_image = load_image(STYLE_IMAGE)\r\n # Load the model.\r\n model = load_vgg_model(VGG_MODEL)\r\n\r\n # Generate the white noise and content presentation mixed image\r\n # which will be the basis for the algorithm to \"paint\".\r\n input_image = generate_noise_image(content_image)\r\n\r\n sess.run(tf.initialize_all_variables())\r\n # Construct content_loss using content_image.\r\n sess.run(model['input'].assign(content_image))\r\n content_loss = content_loss_func(sess, model)\r\n\r\n # Construct style_loss using style_image.\r\n sess.run(model['input'].assign(style_image))\r\n style_loss = style_loss_func(sess, model)\r\n\r\n # Instantiate equation 7 of the paper.\r\n total_loss = BETA * content_loss + ALPHA * style_loss\r\n # The content is built from one layer, while the style is from five\r\n # layers. Then we minimize the total_loss, which is the equation 7.\r\n optimizer = tf.train.AdamOptimizer(2.0)\r\n train_step = optimizer.minimize(total_loss)\r\n\r\n sess.run(tf.initialize_all_variables())\r\n sess.run(model['input'].assign(input_image))\r\n for it in range(ITERATIONS):\r\n sess.run(train_step)\r\n\r\n mixed_image = sess.run(model['input'])\r\n print('Iteration %d' % (it))\r\n print('sum : ', sess.run(tf.reduce_sum(mixed_image)))\r\n print('cost: ', sess.run(total_loss))\r\n\r\n if not os.path.exists(OUTPUT_DIR):\r\n os.mkdir(OUTPUT_DIR)\r\n\r\n filename = 'output/%d.png' % (it)\r\n save_image(filename, mixed_image)","sub_path":"solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":12056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"323845884","text":"# 测试pandas是否安装成功\nfrom pandas import DataFrame, Series\nimport pandas as pd\nimport numpy as np\n\nrecords = [{'name': 'dan', 'age': 18}, {'name': 'star', 'age': 20}, {'name': 'rui', 'age': 20}]\nframe = DataFrame(records)\nprint(frame)\n\n# strip函数练习,去除两端的空格或者字符\nstring1 = \"___Remove unwanted from this string. \\t\\t \\n +++$$$\"\nprint(\"Output#1: string1: {}\".format(string1))\n\nstring1_lstrip = string1.lstrip()\nprint(\"Output#2: lstrip: {}\".format(string1_lstrip))\n\nstring1_rstrip = string1.rstrip()\nprint(\"Output#3: rstrip: {}\".format(string1_rstrip))\n\nstring1_strip = string1.strip('_+$')\nprint(\"Output#4: strip: {}\".format(string1_strip))\n","sub_path":"test_2/01_python_string_strip.py","file_name":"01_python_string_strip.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"637910044","text":"# DAI2.py -- new version of Dummy Device DAI.py, modified by tsaiwn@cs.nctu.edu.tw\n# you can get from here: https://goo.gl/6jtP41 ; Search dummy + iottalk for other files\nimport time, DAN, requests, random \nimport threading, sys\n\n# ServerURL = 'http://Your_server_IP_or_DomainName:9999' #with no secure connection\n# ServerURL = 'http://192.168.20.101:9999' #with no secure connection\nServerURL = 'https://3.iottalk.tw' #with SSL secure connection\n# ServerURL = 'https://Your_DomainName' #with SSL connection (IP can not be used with https)\nReg_addr = None #if None, Reg_addr = MAC address\n\nmac_addr = 'C860008BD249_9795654898900' # put here for easy to modify;; the mac_addr in DAN.py is NOT used\n# Copy DAI.py to DAI2.py and then modify the above mac_addr, then you can have two dummy devices\nReg_addr = mac_addr # Otherwise, the mac addr generated in DAN.py will always be the same !\n\nDAN.profile['dm_name']='Dummy_Device' # you can change this but should also add the DM in server\nDAN.profile['df_list']=['Dummy_Sensor', 'Dummy_Control']\nDAN.profile['d_name']= '0516097_device_T' # None for autoNaming\nDAN.device_registration_with_retry(ServerURL, Reg_addr)\n\n# global gotInput, theInput\ngotInput=False\ntheInput=\"haha\"\nallDead=False\n\n\n\nwhile True:\n try:\n f = open('CoolTerm Capture 2020-07-01 20-31-43','r')\n lines = f.read().splitlines()\n score = int(lines[-1]) \n DAN.push ('Dummy_Sensor', score, score)\n time.sleep(0.5)\n\n except Exception as e:\n print(e)\n \nprint(\"Bye ! --------------\", flush=True)\nsys.exit( );","sub_path":"DAI.py","file_name":"DAI.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"1645568","text":"import os\nimport glob\nimport time\n\n#http://www.innovadomotics.com/mn-tuto/mn-mod/mn-rp/11-raspberry-pi-ds18b20.html\n\nos.system('modprobe w1-gpio')\nos.system('modprobe w1-therm')\n\n_direccion = '/sys/bus/w1/devices/'\ndispositivo_folder = glob.glob(_direccion + '28*')[0]\ndispositivo_pad = dispositivo_folder + '/w1_slave'\n\ndef leer_temperatura():\n f = open(dispositivo_pad, 'r')\n lineas = f.readlines()\n f.close()\n return lineas\n\ndef determinar_valores():\n lineas = leer_temperatura()\n while lineas[0].strip()[-3:] != 'YES':\n time.sleep(0.2)\n lineas = leer_temperatura()\n igual_pos = lineas[1].find('t=')\n if igual_pos != -1:\n temp_string = lineas[1][igual_pos+2:]\n temp_c = float(temp_string) / 1000.0\n temp_f = temp_c * 9.0 / 5.0 + 32.0\n return temp_c, temp_f\nwhile True:\n print(\"centigrados,fahrenheit\")\n print(determinar_valores())\n time.sleep(1)\n","sub_path":"Temp.py","file_name":"Temp.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"370395303","text":"'''\nauthor: HAK\ntime : 11:00 PM, 05/11/2017\n'''\n\nimport os, time\nimport sys\nfrom Config import ConfigPaths\nfrom shutil import copy\nimport csv\nimport ntpath\nimport subprocess\nfrom subprocess import DEVNULL\nfrom colorama import Fore\nheaders = ['File Name', 'Timestamp']\n\n#Get process id (Get the path for file check if its doe copying or downloading)\n#--------------------------------------------------------------------------------------------\nprint('Process id is',str(os.getpid()))\nfilepath = sys.argv[1]\nfileObj = None\n#--------------------------------------------------------------------------------------------\n\n\n#Directories paths to put files\n#--------------------------------------------------------------------------------------------\npaths_list = [x[1] for x in ConfigPaths.config.items('hak.paths')]\n#--------------------------------------------------------------------------------------------\n\n\n#Csvs paths to put files\n#--------------------------------------------------------------------------------------------\ncsv_list = [x[1] for x in ConfigPaths.config.items('hak.csv')]\n#--------------------------------------------------------------------------------------------\n\n\n#Check for each directory existence\n#--------------------------------------------------------------------------------------------\nfor paths in paths_list:\n if not os.path.isdir(paths):\n print(Fore.RED,'Provided directory for some variable not exist '+paths,Fore.RESET)\n sys.exit(0)\n#--------------------------------------------------------------------------------------------\n\n\n#Update csv for files added\n#--------------------------------------------------------------------------------------------\ndef update_csv(path, timeOfFile, pathName):\n csvFile = None\n while True:\n try:\n csvFile = open(path, 'a')\n if csvFile:\n csvFile = open(path, 'a')\n writercsv = csv.DictWriter(csvFile, delimiter=',', lineterminator='\\n', fieldnames=headers)\n writercsv.writerow({'File Name': str(pathName),\n 'Timestamp': timeOfFile})\n print(Fore.GREEN,'CSV file not locked', csvFile.name, Fore.RESET)\n except OSError:\n print('csv locked', csvFile.name)\n finally:\n if csvFile:\n print(Fore.GREEN,'CSV file written and closed', csvFile.name, Fore.RESET)\n csvFile.close()\n break\n time.sleep(2)\n# --------------------------------------------------------------------------------------------\n\n\n# Keep running until a process releases file\n#---------------------------------------------------------------------------------------------\nif os.path.exists(filepath):\n while True:\n try:\n fileObj = open(filepath, 'a')\n print('trying to open file',filepath)\n if fileObj:\n print(Fore.GREEN,'file not locked',filepath,Fore.RESET)\n except OSError:\n print(Fore.GREEN,'file is locked',filepath,Fore.RESET)\n finally:\n if fileObj:\n fileObj.close()\n\n # Convert file from .js to .json and get file creation time\n newname = filepath.replace('.js', '.json') if '.json' not in filepath else filepath\n filectime = time.ctime(os.path.getmtime(filepath))\n os.rename(filepath, newname)\n\n # Start a new process based on file type (name)\n if 'SENSOR' in newname:\n copy(newname, paths_list[3])\n update_csv(csv_list[3], filectime, paths_list[3] + '/' + ntpath.basename(newname))\n subprocess.Popen(['python3', 'sensorcsvProcessing.py', paths_list[3] + '/' + ntpath.basename(newname)])\n elif 'LOG' in newname:\n copy(newname, paths_list[2])\n update_csv(csv_list[2], filectime, paths_list[2] + '/' + ntpath.basename(newname))\n elif 'ERRORS' in newname:\n copy(newname, paths_list[0])\n update_csv(csv_list[0], filectime, paths_list[0] + '/' + ntpath.basename(newname))\n elif ntpath.basename(newname).startswith('METER'):\n copy(newname, paths_list[4])\n update_csv(csv_list[4], filectime, paths_list[4] + '/' + ntpath.basename(newname))\n subprocess.Popen(['python3', 'metercsvProcessing.py', paths_list[4]+'/'+ntpath.basename(newname)])\n elif ntpath.basename(newname).startswith('INVERTER'):\n copy(newname, paths_list[1])\n update_csv(csv_list[1], filectime, paths_list[1] + '/' + ntpath.basename(newname))\n subprocess.Popen(['python3', 'invertercsvProcessing.py', paths_list[1]+'/'+ntpath.basename(newname)])\n os.remove(newname)\n break\n time.sleep(2)\n sys.exit(1)\nelse:\n print('file path not exist', filepath)\n sys.exit(0)\n#----------------------------------------------------------------------------------------------","sub_path":"checkFilerelease.py","file_name":"checkFilerelease.py","file_ext":"py","file_size_in_byte":5128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"77499132","text":"from django.test import TestCase\nfrom django.test import Client\nfrom django.urls import resolve\nfrom .views import index, about_me, landing_page_content, message_table, message_post\nfrom .views import response as views_response\nfrom lab_1.views import mhs_name\nfrom .models import Message\nfrom .forms import Message_Form\n\nimport os\nimport environ\n\n# Create your tests here.\nclient_main = Client()\nroot = environ.Path(__file__) - 3 # three folder back (/a/b/c/ - 3 = /)\nenv = environ.Env(DEBUG=(bool, False),)\nenviron.Env.read_env('.env')\n\ndef setUpModule():\n\tprint(\"\\nTesting Lab 4\")\n\tclient_main.post('/custom_auth/login/', {\"username\": env(\"SSO_USERNAME\"), \"password\": env(\"SSO_PASSWORD\")})\n\nclass Lab4UnitTest(TestCase):\n\tdef test_lab_4_url_is_exist(self):\n\t\tresponse = client_main.get('/lab-4/')\n\t\tself.assertEqual(response.status_code, 200)\n\n\tdef test_lab_4_has_navbar(self):\n\t\tresponse = client_main.get('/lab-4/')\n\t\thtml_response = response.content.decode('utf8')\n\t\tself.assertIn('