diff --git "a/2551.jsonl" "b/2551.jsonl" new file mode 100644--- /dev/null +++ "b/2551.jsonl" @@ -0,0 +1,778 @@ +{"seq_id":"436858022","text":"\"\"\"\nBefore you can use this, you have to do the following:\n\nwget http://nlp.stanford.edu/software/stanford-corenlp-full-2017-06-09.zip\nunzip stanford-corenlp-full-2017-06-09.zip\n\ncd stanford-corenlp-full-2017-06-09\njava -mx5g -cp \"*\" edu.stanford.nlp.pipeline.StanfordCoreNLPServer -timeout 10000\n\npip install pycorenlp\n\nMUST BE RUN WITH PYTHON 3\n\"\"\"\n\nfrom pycorenlp import StanfordCoreNLP\nimport numpy as np\nimport csv\nimport glob\nimport json\n\ndef analyze_context(context):\n \"\"\"Perform stanford sentiment analysis on context of a citation\"\"\"\n nlp = StanfordCoreNLP('http://localhost:9000')\n res = nlp.annotate(context,\n properties={\n 'annotators': 'sentiment',\n 'outputFormat': 'json',\n 'timeout': 10000,\n })\n print(type(res))\n sentences =''\n try:\n sentences=res['sentences']\n except Exception as e:\n print(e)\n if type(res) is str:\n try:\n res=json.loads(res)\n sentences = res['sentences']\n except Exception as e:\n print(res)\n print(e)\n sentenceValues = []\n #print(json.dumps(res,indent=4))\n try:\n for s in sentences:\n sentenceValues.append(int(s['sentimentValue']))\n \"\"\"print (\"%d: '%s': %s %s\" % (\n s[\"index\"],\n \" \".join([t[\"word\"] for t in s[\"tokens\"]]),\n s[\"sentimentValue\"], s[\"sentiment\"]))\n \"\"\"\n average = np.mean(sentenceValues)\n except Exception as e:\n print(e)\n return average\n\n\ndef write_new_csv(doi):\n \"\"\"Write sentiment analysis to csv\"\"\"\n reader = csv.DictReader(open(\"{}.csv\".format(doi), 'r'))\n cols = ['intext', 'full', 'context', 'doi', 'title', 'sentiment']\n fh = open(\"{}_with_sentiment.csv\".format(doi), 'w')\n writer = csv.DictWriter(fh, fieldnames=cols, delimiter=',')\n writer.writeheader()\n for row in reader:\n row['sentiment'] = analyze_context(row['context'])\n writer.writerow(row)\n\n\ndef write_all_csvs(path):\n \"\"\"Get sentiment analysis for all csvs in directory\"\"\"\n files = glob.glob('{}/*csv'.format(path))\n for file in files:\n write_new_csv(file.split(\".csv\")[0])\n\n\nwrite_all_csvs('/home/benglick/extract')\n","sub_path":"sentiment_analysis.py","file_name":"sentiment_analysis.py","file_ext":"py","file_size_in_byte":2322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"11207480","text":"# import SparkConf, SparkContext\nfrom pyspark import SparkConf, SparkContext\n\n# Set master node as the local machine. We call this application \"WordCount\"\nconf = SparkConf().setMaster(\"local\").setAppName(\"WordCount\")\n# Assign Spack context object to sc\nsc = SparkContext.getOrCreate(conf)\n\n# ---------------------- Implement your codes ----------------------------\n# read data from Book.txt\n# splits up each line of text \n# count how many times each unique values occur\n\n# -------------------------------------------------------------------------\n\n# print results\nfor word, count in wordCounts.items():\n cleanWord = word.encode('ascii', 'ignore')\n if (cleanWord):\n print(cleanWord.decode() + \" \" + str(count))\n","sub_path":"study/project-jupyter/exam-wordcount/word-count.py","file_name":"word-count.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"60234997","text":"import datetime\n\nctime = datetime.datetime.now()\ncurrent_time = \" \".join(map(str, [ctime.hour, ctime.minute, ctime.second]))\nprint(current_time)\n\n\nhr, mins, sec = input(\"Enter time in h m s :\").split()\nhr = int(hr)\nmins = int(mins)\nsec = int(sec)\ntime_for_bread = \" \".join(map(str, [hr, mins, sec]))\n\nprint(time_for_bread)\n\n\naddTime = \" \".join(map(str, [ctime.hour + hr, ctime.minute + mins, ctime.second + sec]))\nprint(addTime)\n","sub_path":"simple.py","file_name":"simple.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"401421111","text":"#import requests\nimport formulas\nimport csv\nimport requests\n\ndef get_focal_point(location_list, r1):\n \n # get lat and long for each address\n '''\n for address in address_list:\n response = requests.get(\"http://dev.virtualearth.net/REST/v1/Locations/\" + address,\n params={\"include\":\"queryParse\",\n \"key\":\"AvQOaBs2cYn6OAWmZ9tEAvGuJGfJusGnLSyHnD9g7USe35x69PmSiyk_51Htk3Z0\"})\n data = response.json()\n lat = data['resourceSets'][0]['resources'][0]['point']['coordinates'][0]\n lng = data['resourceSets'][0]['resources'][0]['point']['coordinates'][1]\n \n # add to location_list\n location_list.append(address, lat, lng)\n\n print(str(lat) + \", \" + str(lng))\n '''\n #list of location set around every point in tuple form (location, set)\n local_set_list =[]\n # iterate through location_list to create sets for each location\n for location in location_list:\n lat1 = location[0]\n lon1 = location[1]\n local_set = set()\n for other_location in location_list:\n lat2 = other_location[0]\n lon2 = other_location[1]\n dist = formulas.haversine(lat1, lon1, lat2, lon2)\n if dist <= float(r1):\n local_set.add(other_location)\n local_set_list.append((location, local_set))\n \n #local set around focal point of locations in (address, lat, long)\n local_set = set()\n # focal point\n focal_point = None\n #iterate through dictionary to get largest set and focal point\n for loc, self_set in local_set_list:\n if (len(self_set) > len(local_set)):\n local_set = self_set\n focal_point = loc\n \n return (focal_point, local_set)\n\ndef create_remote_set(focal_point, location_list, r2):\n #remote set around focal point of locations in (address, lat, long)\n remote_set = set()\n\n lat1 = focal_point[0]\n lon1 = focal_point[1]\n for loc in location_list:\n lat2 = loc[0]\n lon2 = loc[1]\n dist = formulas.haversine(lat1, lon1, lat2, lon2)\n if dist > float(r2):\n remote_set.add(loc)\n return remote_set\n\ndef generate_geo_relationship(country1, other_center):\n #this is for using the bing reverse geocode api\n coord2 = str(other_center[0]) +\",\"+ str(other_center[1])\n response2 = requests.get(\"http://dev.virtualearth.net/REST/v1/Locations/\" + coord2,\n params={\"key\":\"AjhzSUKjNFFV0ckKVCV64tSLhw_EWSlN6LP9UPiWdEJDRMZn3Vm17HtoSclZZfO_ \",\n })\n data2 = response2.json()\n #get the country data\n \n try:\n country2 = str(data2['resourceSets'][0]['resources'][0]['address']['countryRegion'])\n except:\n country2 = \"N/A\"\n \n if country1 == country2:\n if country1 == \"N/A\":\n return (country1, country2, \"N/A\")\n else: \n return (country1, country2, \"domestic\")\n else:\n return (country1, country2, \"cross border\")\n\n \ndef output_each_patent(ungrouped, patent, r1, r2):\n #get the local locations\n (local_center, local_set) = get_focal_point(ungrouped, r1)\n #get the remote locations\n remote_set = create_remote_set(local_center, ungrouped, r2)\n #find the locations that are not local and not remote\n inbetween = set(ungrouped) - remote_set - local_set\n \n #row to write\n row = []\n row.append(patent)\n row.append(len(ungrouped))\n row.append(len(local_set))\n row.append(len(remote_set))\n \n \n # get local country\n coord1 = str(local_center[0]) +\",\"+ str(local_center[1])\n response1 = requests.get(\"http://dev.virtualearth.net/REST/v1/Locations/\" + coord1,\n params={\"key\":\"AjhzSUKjNFFV0ckKVCV64tSLhw_EWSlN6LP9UPiWdEJDRMZn3Vm17HtoSclZZfO_ \",\n })\n data1 = response1.json()\n #get the country data\n try:\n country1 = str(data1['resourceSets'][0]['resources'][0]['address']['countryRegion'])\n except:\n country1 = \"N/A\"\n \n \n #list of sets of remote groups\n remote_groups = []\n \n while len(remote_set) > 0:\n #get largest remote group, add to remote groups and remove from set of ungrouped remotes\n remote_group = get_focal_point(remote_set, r1)\n remote_groups.append(remote_group)\n remote_set -= remote_group[1]\n \n row.append(1 + len(remote_groups)) # number of clusters\n row.append(r1) # local radius\n row.append(r2) # remote radius\n with open('outputs/output.csv', 'a', newline=\"\\n\", encoding='latin-1') as out_file: \n csv_writer = csv.writer(out_file, delimiter=',')\n '''\n header = [\"patent_id\", \"number_of_inventors\", \"number_of_local_inventors\", \"number_of_remote_inventors\", \"number_of_clusters (local+remote)\", \"radius_local\", \n \"radius_remote\", \"local_cluster\", \"nonlocal_cluster\", \"remote_cluster\"]\n csv_writer.writerow(header)\n '''\n # header = [\"group_classification\", \"locations\", \"point_lat\", \"point_lng\", \"country\", \"geographical_relationship\", \"haversine_distance_to_local\"]\n \n \n # convert local_set from a set of tuples to a list of strings\n local_set_string = []\n for (lat, lon, id) in local_set:\n coord = '(' + str(lat) + ',' + str(lon) +')'\n local_set_string.append(coord)\n # dict for local_cluster\n local_cluster_dict = {'number_of_inventors_in_cluster': len(local_set),\n 'locations': '; '.join(local_set_string),\n 'center_lat': local_center[0],\n 'center_lng': local_center[1],\n 'country': country1,\n 'geographical_relationship:': 'domestic',\n 'haversine_distance_to_local': 'N/A'}\n row.append(local_cluster_dict)\n \n # convert inbetween set from a set of tuples to a list of strings\n inbetween_set_string = []\n for (lat, lon, id) in inbetween:\n coord = '(' + str(lat) + ',' + str(lon) +')'\n inbetween_set_string.append(coord)\n \n # dict for nonlocal_cluster\n nonlocal_cluster_dict = {'number_of_inventors_in_cluster': len(inbetween),\n 'locations': '; '.join(inbetween_set_string),\n 'center_lat': 'N/A',\n 'center_lng': 'N/A',\n 'country': 'N/A',\n 'geographical_relationship:': 'N/A',\n 'haversine_distance_to_local': 'N/A'}\n row.append(nonlocal_cluster_dict)\n \n \n # sort remote groups by distance away from local focal point\n remote_group_list = []\n for remote_group in remote_groups:\n (coordinates, group) = remote_group\n dist = formulas.haversine(local_center[0], local_center[1], coordinates[0], coordinates[1])\n remote_group_list.append((coordinates, group, dist))\n remote_group_list.sort(key=lambda tup: tup[2]) # sorts in place\n \n for remote_group in remote_group_list:\n (coordinates, group, dist) = remote_group\n # convert remote_group from a set of tuples to a list of strings\n remote_group_string = []\n for (lat, lon, id) in group:\n coord = '(' + str(lat) + ',' + str(lon) +')'\n remote_group_string.append(coord)\n (c1, c2, rel) = generate_geo_relationship(country1, coordinates)\n # dict for remote_cluster\n remote_cluster_dict = {'number_of_inventors_in_cluster': len(group),\n 'locations': '; '.join(remote_group_string),\n 'center_lat': coordinates[0],\n 'center_lng': coordinates[1],\n 'country': c2,\n 'geographical_relationship:': rel,\n 'haversine_distance_to_local': formulas.haversine(local_center[0], local_center[1], coordinates[0], coordinates[1])}\n row.append(remote_cluster_dict)\n csv_writer.writerow(row)\n \nif __name__ == '__main__':\n \n # write header\n with open('outputs/output.csv', 'w', newline=\"\\n\", encoding='latin-1') as out_file: \n csv_writer = csv.writer(out_file, delimiter=',')\n header = [\"patent_id\", \"number_of_inventors\", \"number_of_local_inventors\", \"number_of_remote_inventors\", \"number_of_clusters (local+remote)\", \"radius_local\", \n \"radius_remote\", \"local_cluster\", \"nonlocal_cluster\", \"remote_cluster\"]\n csv_writer.writerow(header)\n # process radii\n r1 = 0\n r2 = 0\n \n with open('inputs/arguments.csv', encoding='utf-8-sig') as csvfile:\n reader = csv.DictReader(csvfile, delimiter=',')\n \n #create the list of ungrouped addresses\n for row in reader:\n r1 = row['r1']\n r2 = row['r2']\n \n # process patent records\n with open('inputs/input100_blank.csv', encoding='utf-8-sig') as csvfile:\n reader = csv.DictReader(csvfile, delimiter=',')\n \n #create the set of ungrouped addresses\n ungrouped = []\n total_length = sum(1 for row in reader)\n \n \n \n for row in reader:\n if (reader.line_num == 1):\n patent = row['patent_id']\n lat = row['inventor_add_lat']\n lng = row['inventor_add_lon']\n inventor_id = row['inventor_id']\n ungrouped.append((lat, lng, inventor_id))\n elif (reader.line_num == total_length):\n #process last row here\n if row['patent_id'] == patent: \n try:\n patent = row['patent_id']\n lat = row['inventor_add_lat']\n lng = row['inventor_add_lon']\n inventor_id = row['inventor_id']\n ungrouped.append((lat, lng, inventor_id))\n \n print(row)\n output_each_patent(ungrouped, patent, r1, r2)\n except ValueError:\n print(ValueError)\n print(int(row['inventor_add_lat'][0]))\n print(row['inventor_add_lon'])\n print(type(lng))\n \n else:\n \n patent = row['patent_id']\n ungrouped = []\n lat = row['inventor_add_lat']\n lng = row['inventor_add_lon']\n inventor_id = row['inventor_id']\n ungrouped.append((lat, lng, inventor_id))\n output_each_patent(ungrouped, patent, r1, r2)\n \n else: \n if row['patent_id'] == patent:\n \n patent = row['patent_id']\n lat = row['inventor_add_lat'] \n lng = row['inventor_add_lon']\n inventor_id = row['inventor_id']\n ungrouped.append((lat, lng, inventor_id))\n \n else:\n output_each_patent(ungrouped, patent, r1, r2)\n \n patent = row['patent_id']\n lat = row['inventor_add_lat']\n lng = row['inventor_add_lon']\n inventor_id = row['inventor_id']\n ungrouped = []\n ungrouped.append((lat, lng, inventor_id))\n \n \n \n \n \n \n \n ","sub_path":"LocationCalculatorGeneral/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":11900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"631452550","text":"#python 3.6\r\n\r\n\"\"\"\r\nhttps://codejam.withgoogle.com/2018/challenges/00000000000000cb/dashboard/00000000000079cc\r\n\"\"\"\r\n\r\n\r\n\"\"\"\r\nnatural posn:\r\n cube turned45 degrees along z axis\r\nequation:\r\n a= 2**0.5*cos(tx)+sin(tx)\r\n \r\n the diagonal plane along xz plane in natural position(as mentioned above) projects a shadow of root2\r\n this plane when rotates about x axis by theta projects root2*cos(tx)\r\n \r\n 2 triangles of area 0.5,0.5 project shadows when the cube tilts along the x axis and (0.5+0.5)*sin(tx)\r\n\r\n thus eqn of a=...\r\n \r\nconvert tx into coordinates required:\r\n \r\n 3 centers to return: (.5,0,0), (0,.5,0), (0,0,.5) make lists for ease\r\n \r\n rotate about y axis by 45 degrees\r\n y axis by 45 degrees\r\n just x, z coordinates will change\r\n thus:\r\n T=45\r\n x,y,z go to (xcosT-zsinT), y, (xsinT+zcosT)\r\n \r\n rotate about x axis by tx degrees, tx unknown found by solving eq1\r\n \r\n T=solved from eqn\r\n x,y,z go to x, (ycosT-zsinT), (ysinT+zcosT)\r\n \r\n\r\n\"\"\"\r\nimport math\r\n\r\ndef dist(v):\r\n return (v[0]**2+v[1]**2+v[2]**2)**.5\r\n\r\ndef angle(v1,v2):\r\n #a dot b= |a|*|b|*cosTheta dot product\r\n v1dotv2=v1[0]*v2[0]+v1[1]*v2[1]+v1[2]*v2[2]\r\n dv1=dist(v1)\r\n dv2=dist(v2)\r\n return math.acos(v1dotv2/dv1/dv2) \r\n\r\n\r\ndef isDistHalf(v):\r\n d=dist(v)\r\n return d<.5+10**(-6) and d>.5-10**(-6)\r\n\r\n\r\ndef isAngle90(v1,v2):\r\n piby2=math.pi/2\r\n ang=angle(v1,v2)\r\n delta=10**(-6)\r\n return ang>piby2-delta and ang\n'''\n\nfrom sage.all import *\n\n# Functions #\n\ndef iterate_N(alpha_num, chars_of_coeffs, gauss_sums, q):\n iterate = reduce(lambda x, y: x * y, \n [chars_of_coeffs[i][alpha_num[i]] * gauss_sums[i][alpha_num[i]] \n for i in xrange(len(alpha_num))], 1)\n return iterate/q\n","sub_path":"utils/N.py","file_name":"N.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"44453920","text":"import caffe\nfrom caffe import layers as L, params as P, to_proto\nfrom caffe.coord_map import crop # crop(net.upscore, net.data) automatically calculate the axis and offset\n\n# def bn_relu_conv(bottom, num_output, pad=0, kernel_size=3, stride=1):\n# \tbn = L.BatchNorm(bottom, in_place=False, use_global_stats=use_global_stats, param=[dict(lr_mult=0), dict(lr_mult=0), dict(lr_mult=0)])\n# \tscale = L.Scale(bn, in_place=True, axis=1, filler=dict(type='constant', value=1), bias_term=1, bias_filler=dict(type='constant', value=0))\n# \trelu = L.ReLU(scale, in_place=True, engine=engine)\n# \tconv = L.Convolution(relu, num_output=num_output, pad=pad, kernel_size=kernel_size, stride=stride,\n# \t\tparam=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],\t\t\n# \t\tweight_filler=dict(type='msra'), bias_filler=dict(type='constant', value=0),\n# \t\tengine=engine)\n# \treturn bn, scale, relu, conv\n\ndef conv_bn(bottom, num_output, pad=0, kernel_size=3, stride=1):\n\tconv = L.Convolution(bottom, num_output=num_output, pad=pad, kernel_size=kernel_size, stride=stride,\n\t\tparam=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],\t\t\n\t\tweight_filler=dict(type='msra'), bias_filler=dict(type='constant', value=0),\n\t\tengine=engine)\n\tbn = L.BatchNorm(conv, in_place=True, use_global_stats=use_global_stats, param=[dict(lr_mult=0), dict(lr_mult=0), dict(lr_mult=0)])\n\tscale = L.Scale(bn, in_place=True, axis=1, filler=dict(type='constant', value=1), bias_term=1, bias_filler=dict(type='constant', value=0))\n\treturn conv, bn, scale\n\ndef conv_bn_relu(bottom, num_output, pad=0, kernel_size=3, stride=1):\n\tconv = L.Convolution(bottom, num_output=num_output, pad=pad, kernel_size=kernel_size, stride=stride,\n\t\tparam=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],\t\t\n\t\tweight_filler=dict(type='msra'), bias_filler=dict(type='constant', value=0),\n\t\tengine=engine)\n\tbn = L.BatchNorm(conv, in_place=True, use_global_stats=use_global_stats, param=[dict(lr_mult=0), dict(lr_mult=0), dict(lr_mult=0)])\n\tscale = L.Scale(bn, in_place=True, axis=1, filler=dict(type='constant', value=1), bias_term=1, bias_filler=dict(type='constant', value=0))\n\trelu = L.ReLU(scale, in_place=True, engine=engine)\n\treturn conv, bn, scale, relu\n\ndef deconv_bn_relu(bottom, num_output, pad=0, kernel_size=3, stride=1):\n\tdeconv = L.Deconvolution(bottom, param=[dict(lr_mult=1, decay_mult=1)],\n\t\tconvolution_param=dict(num_output=num_output, pad=pad, kernel_size=kernel_size, stride=stride,\n\t\t\tweight_filler=dict(type='msra'), bias_term=0,\n\t\t\tengine=engine))\n\tbn = L.BatchNorm(deconv, in_place=True, use_global_stats=use_global_stats, param=[dict(lr_mult=0), dict(lr_mult=0), dict(lr_mult=0)])\n\tscale = L.Scale(bn, in_place=True, axis=1, filler=dict(type='constant', value=1), bias_term=1, bias_filler=dict(type='constant', value=0))\n\trelu = L.ReLU(scale, in_place=True, engine=engine)\n\treturn deconv, bn, scale, relu\n\ndef add_layer(bottom1, bottom2, num_output):\n\tconv, bn, scale = conv_bn(bottom1, num_output=num_output, pad=0, kernel_size=1, stride=1)\n\teltw = L.Eltwise(conv, bottom2, eltwise_param=dict(operation=1))\n\trule = L.ReLU(eltw, in_place=True, engine=engine)\n\treturn conv, bn, scale, eltw, rule\n\n# def add_relu(bottom1, bottom2):\n# \teltw = L.Eltwise(bottom1, bottom2, eltwise_param=dict(operation=1))\n# \trule = L.ReLU(eltw, in_place=True, engine=engine)\n# \treturn eltw, rule\n\n# def conv1_conv2_add_bn_relu(bottom1, bottom2, num_output, pad=0, kernel_size=3, stride=1):\n# \tconv1 = L.Convolution(bottom1, num_output=num_output, pad=pad, kernel_size=kernel_size, stride=stride,\n# \t\tparam=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],\t\t\n# \t\tweight_filler=dict(type='msra'), bias_filler=dict(type='constant', value=0),\n# \t\tengine=engine)\n# \tconv2 = L.Convolution(bottom2, num_output=num_output, pad=0, kernel_size=1, stride=1,\n# \t\tparam=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],\t\t\n# \t\tweight_filler=dict(type='msra'), bias_filler=dict(type='constant', value=0),\n# \t\tengine=engine)\n# \teltw = L.Eltwise(conv1, conv2, eltwise_param=dict(operation=1))\n# \tbn = L.BatchNorm(eltw, in_place=True, use_global_stats=use_global_stats, param=[dict(lr_mult=0), dict(lr_mult=0), dict(lr_mult=0)])\n# \tscale = L.Scale(bn, in_place=True, axis=1, filler=dict(type='constant', value=1), bias_term=1, bias_filler=dict(type='constant', value=0))\n# \trelu = L.ReLU(scale, in_place=True, engine=engine)\n\t\n# \treturn conv1, conv2, eltw, bn, scale, relu\n\ndef pool_conv_bn_relu_concat(bottom, num_output):\n\tpool = L.Pooling(bottom, pool=P.Pooling.MAX, pad=0, kernel_size=2, stride=2, engine=engine)\n\tconv, bn, scale, relu = conv_bn_relu(bottom, num_output, pad=1, kernel_size=3, stride=2)\n\tconcat = L.Concat(pool, conv, axis=1)\n\n\treturn pool, conv, bn, scale, relu, concat\n\n# def max_pool(bottom, pad=0, kernel_size=2, stride=2):\n# \treturn L.Pooling(bottom, pool=P.Pooling.MAX, pad=pad, kernel_size=kernel_size, stride=stride, engine=engine)\n\ndef incept_2(bottom, num_output):\n\t# branch 1\n\tconv_b1_a, bn_b1_a, scale_b1_a = conv_bn(bottom, num_output, pad=1, kernel_size=3, stride=1)\n\t# add\n\teltw = L.Eltwise(bottom, conv_b1_a, eltwise_param=dict(operation=1))\n\trule = L.ReLU(eltw, in_place=True, engine=engine)\n\n\treturn conv_b1_a, bn_b1_a, scale_b1_a, \\\n\teltw, rule\n\ndef incept_3(bottom, num_output):\n\t# branch 1\n\tconv_b1_a, bn_b1_a, scale_b1_a = conv_bn(bottom, num_output, pad=1, kernel_size=3, stride=1)\n\t# branch 2\n\tconv_b2_a, bn_b2_a, scale_b2_a, relu_b2_a = conv_bn_relu(bottom, num_output, pad=1, kernel_size=3, stride=1)\n\tconv_b2_b, bn_b2_b, scale_b2_b = conv_bn(relu_b2_a, num_output, pad=1, kernel_size=3, stride=1)\n\t# add\n\teltw = L.Eltwise(bottom, conv_b1_a, conv_b2_b, eltwise_param=dict(operation=1))\n\trule = L.ReLU(eltw, in_place=True, engine=engine)\n\n\treturn conv_b1_a, bn_b1_a, scale_b1_a, \\\n\tconv_b2_a, bn_b2_a, scale_b2_a, relu_b2_a, conv_b2_b, bn_b2_b, scale_b2_b, \\\n\teltw, rule\n\ndef incept_4(bottom, num_output):\n\t# branch 1\n\tconv_b1_a, bn_b1_a, scale_b1_a = conv_bn(bottom, num_output, pad=1, kernel_size=3, stride=1)\n\t# branch 2\n\tconv_b2_a, bn_b2_a, scale_b2_a, relu_b2_a = conv_bn_relu(bottom, num_output, pad=1, kernel_size=3, stride=1)\n\tconv_b2_b, bn_b2_b, scale_b2_b = conv_bn(relu_b2_a, num_output, pad=1, kernel_size=3, stride=1)\n\t# branch 3\n\tconv_b3_a, bn_b3_a, scale_b3_a, relu_b3_a = conv_bn_relu(bottom, num_output, pad=1, kernel_size=3, stride=1)\n\tconv_b3_b, bn_b3_b, scale_b3_b, relu_b3_b = conv_bn_relu(relu_b3_a, num_output, pad=1, kernel_size=3, stride=1)\n\tconv_b3_c, bn_b3_c, scale_b3_c = conv_bn(relu_b3_b, num_output, pad=1, kernel_size=3, stride=1)\n\t\n\t# add\n\teltw = L.Eltwise(bottom, conv_b1_a, conv_b2_b, conv_b3_c, eltwise_param=dict(operation=1))\n\trelu = L.ReLU(eltw, in_place=True, engine=engine)\n\n\treturn conv_b1_a, bn_b1_a, scale_b1_a, \\\n\tconv_b2_a, bn_b2_a, scale_b2_a, relu_b2_a, conv_b2_b, bn_b2_b, scale_b2_b, \\\n\tconv_b3_a, bn_b3_a, scale_b3_a, relu_b3_a, conv_b3_b, bn_b3_b, scale_b3_b, relu_b3_b, conv_b3_c, bn_b3_c, scale_b3_c, \\\n\teltw, relu\n\ndef incept_3_decode(bottom, num_output):\n\t# branch 1\n\tconv_b1_a, bn_b1_a, scale_b1_a = conv_bn(bottom, num_output, pad=0, kernel_size=1, stride=1)\n\t# branch 2\n\tconv_b2_a, bn_b2_a, scale_b2_a, relu_b2_a = conv_bn_relu(bottom, num_output, pad=1, kernel_size=3, stride=1)\n\tconv_b2_b, bn_b2_b, scale_b2_b = conv_bn(relu_b2_a, num_output, pad=1, kernel_size=3, stride=1)\n\t# add\n\teltw = L.Eltwise(conv_b1_a, conv_b2_b, eltwise_param=dict(operation=1))\n\trule = L.ReLU(eltw, in_place=True, engine=engine)\n\n\treturn conv_b1_a, bn_b1_a, scale_b1_a, \\\n\tconv_b2_a, bn_b2_a, scale_b2_a, relu_b2_a, conv_b2_b, bn_b2_b, scale_b2_b, \\\n\teltw, rule\n\ndef incept_4_decode(bottom, num_output):\n\t# branch 1\n\tconv_b1_a, bn_b1_a, scale_b1_a = conv_bn(bottom, num_output, pad=0, kernel_size=1, stride=1)\n\t# branch 2\n\tconv_b2_a, bn_b2_a, scale_b2_a, relu_b2_a = conv_bn_relu(bottom, num_output, pad=1, kernel_size=3, stride=1)\n\tconv_b2_b, bn_b2_b, scale_b2_b = conv_bn(relu_b2_a, num_output, pad=1, kernel_size=3, stride=1)\n\t# branch 3\n\tconv_b3_a, bn_b3_a, scale_b3_a, relu_b3_a = conv_bn_relu(bottom, num_output, pad=1, kernel_size=3, stride=1)\n\tconv_b3_b, bn_b3_b, scale_b3_b, relu_b3_b = conv_bn_relu(relu_b3_a, num_output, pad=1, kernel_size=3, stride=1)\n\tconv_b3_c, bn_b3_c, scale_b3_c = conv_bn(relu_b3_b, num_output, pad=1, kernel_size=3, stride=1)\n\t\n\t# add\n\teltw = L.Eltwise(conv_b1_a, conv_b2_b, conv_b3_c, eltwise_param=dict(operation=1))\n\trelu = L.ReLU(eltw, in_place=True, engine=engine)\n\n\treturn conv_b1_a, bn_b1_a, scale_b1_a, \\\n\tconv_b2_a, bn_b2_a, scale_b2_a, relu_b2_a, conv_b2_b, bn_b2_b, scale_b2_b, \\\n\tconv_b3_a, bn_b3_a, scale_b3_a, relu_b3_a, conv_b3_b, bn_b3_b, scale_b3_b, relu_b3_b, conv_b3_c, bn_b3_c, scale_b3_c, \\\n\teltw, relu\n\n################################\ndef uvinet_2d_bn_weighted(dim_data, dim_label, num_class, phase='train'):\n\tnet = caffe.NetSpec()\n\t############ d0 ############\n\tnet.data = L.Input(input_param=dict(shape=dict(dim=dim_data)))\n\tif phase == \"train\":\n\t\tnet.label = L.Input(input_param=dict(shape=dict(dim=dim_label)))\n\t\tnet.label_weight = L.Input(input_param=dict(shape=dict(dim=dim_label)))\t\n\tnet.d0b_conv, net.d0b_bn, net.d0b_scale, net.d0b_relu = conv_bn_relu(net.data, 64, pad=1, kernel_size=3, stride=1)\n\tnet.d0c_conv, net.d0c_bn, net.d0c_scale, net.d0c_relu = conv_bn_relu(net.d0b_relu, 64, pad=1, kernel_size=3, stride=1)\n\t############ d1 ############\n\t# net.d1a_pool = max_pool(net.d0c_conv, pad=0, kernel_size=2, stride=2)\n\tnet.d1a_pool, net.d1a_conv, net.d1a_bn, net.d1a_scale, net.d1a_relu, net.d1a_concat = pool_conv_bn_relu_concat(net.d0c_relu, 64)\n\tnet.d1_conv_b1_a, net.d1_bn_b1_a, net.d1_scale_b1_a, \\\n\tnet.d1_conv_b2_a, net.d1_bn_b2_a, net.d1_scale_b2_a, net.d1_relu_b2_a, net.d1_conv_b2_b, net.d1_bn_b2_b, net.d1_scale_b2_b, \\\n\tnet.d1_eltw, net.d1_relu = incept_3(net.d1a_concat, 128)\n\t\n\t############ d2 ############\n\t# net.d2a_pool = max_pool(net.d1d_relu, pad=0, kernel_size=2, stride=2)\n\tnet.d2a_pool, net.d2a_conv, net.d2a_bn, net.d2a_scale, net.d2a_relu, net.d2a_concat = pool_conv_bn_relu_concat(net.d1_relu, 128)\n\tnet.d2_conv_b1_a, net.d2_bn_b1_a, net.d2_scale_b1_a, \\\n\tnet.d2_conv_b2_a, net.d2_bn_b2_a, net.d2_scale_b2_a, net.d2_relu_b2_a, net.d2_conv_b2_b, net.d2_bn_b2_b, net.d2_scale_b2_b, \\\n\tnet.d2_conv_b3_a, net.d2_bn_b3_a, net.d2_scale_b3_a, net.d2_relu_b3_a, net.d2_conv_b3_b, net.d2_bn_b3_b, net.d2_scale_b3_b, net.d2_relu_b3_b, net.d2_conv_b3_c, net.d2_bn_b3_c, net.d2_scale_b3_c, \\\n\tnet.d2_eltw, net.d2_relu = incept_4(net.d2a_concat, 256)\n\t\n\t############ d3 ############\n\t# net.d3a_pool = max_pool(net.d2d_relu, pad=0, kernel_size=2, stride=2)\n\tnet.d3a_pool, net.d3a_conv, net.d3a_bn, net.d3a_scale, net.d3a_relu, net.d3a_concat = pool_conv_bn_relu_concat(net.d2_relu, 256)\n\tnet.d3_conv_b1_a, net.d3_bn_b1_a, net.d3_scale_b1_a, \\\n\tnet.d3_conv_b2_a, net.d3_bn_b2_a, net.d3_scale_b2_a, net.d3_relu_b2_a, net.d3_conv_b2_b, net.d3_bn_b2_b, net.d3_scale_b2_b, \\\n\tnet.d3_conv_b3_a, net.d3_bn_b3_a, net.d3_scale_b3_a, net.d3_relu_b3_a, net.d3_conv_b3_b, net.d3_bn_b3_b, net.d3_scale_b3_b, net.d3_relu_b3_b, net.d3_conv_b3_c, net.d3_bn_b3_c, net.d3_scale_b3_c, \\\n\tnet.d3_eltw, net.d3_relu = incept_4(net.d3a_concat, 512)\n\t\n\t############ d4 ############\n\t# net.d4a_pool = max_pool(net.d3d_relu, pad=0, kernel_size=2, stride=2)\n\tnet.d4a_pool, net.d4a_conv, net.d4a_bn, net.d4a_scale, net.d4a_relu, net.d4a_concat = pool_conv_bn_relu_concat(net.d3_relu, 512)\n\tnet.d4_conv_b1_a, net.d4_bn_b1_a, net.d4_scale_b1_a, \\\n\tnet.d4_conv_b2_a, net.d4_bn_b2_a, net.d4_scale_b2_a, net.d4_relu_b2_a, net.d4_conv_b2_b, net.d4_bn_b2_b, net.d4_scale_b2_b, \\\n\tnet.d4_conv_b3_a, net.d4_bn_b3_a, net.d4_scale_b3_a, net.d4_relu_b3_a, net.d4_conv_b3_b, net.d4_bn_b3_b, net.d4_scale_b3_b, net.d4_relu_b3_b, net.d4_conv_b3_c, net.d4_bn_b3_c, net.d4_scale_b3_c, \\\n\tnet.d4_eltw, net.d4_relu = incept_4(net.d4a_concat, 1024)\n\t\n\t############ u3 ############\n\t### a ### First Deconvolution\n\tnet.u3a_dconv, net.u3a_bn, net.u3a_scale, net.u3a_relu = deconv_bn_relu(net.d4_relu, 512, pad=0, kernel_size=2, stride=2)\n\tnet.u3b_concat = L.Concat(net.u3a_relu, net.d3_relu, axis=1)\n\tnet.u3_conv_b1_a, net.u3_bn_b1_a, net.u3_scale_b1_a, \\\n\tnet.u3_conv_b2_a, net.u3_bn_b2_a, net.u3_scale_b2_a, net.u3_relu_b2_a, net.u3_conv_b2_b, net.u3_bn_b2_b, net.u3_scale_b2_b, \\\n\tnet.u3_conv_b3_a, net.u3_bn_b3_a, net.u3_scale_b3_a, net.u3_relu_b3_a, net.u3_conv_b3_b, net.u3_bn_b3_b, net.u3_scale_b3_b, net.u3_relu_b3_b, net.u3_conv_b3_c, net.u3_bn_b3_c, net.u3_scale_b3_c, \\\n\tnet.u3_eltw, net.u3_relu = incept_4_decode(net.u3b_concat, 512)\n\n\t############ u2 ############\n\t### a ### Second Deconvolution\n\tnet.u2a_dconv, net.u2a_bn, net.u2a_scale, net.u2a_relu = deconv_bn_relu(net.u3_relu, 256, pad=0, kernel_size=2, stride=2)\n\tnet.u2b_concat = L.Concat(net.u2a_relu, net.d2_relu, axis=1)\n\tnet.u2_conv_b1_a, net.u2_bn_b1_a, net.u2_scale_b1_a, \\\n\tnet.u2_conv_b2_a, net.u2_bn_b2_a, net.u2_scale_b2_a, net.u2_relu_b2_a, net.u2_conv_b2_b, net.u2_bn_b2_b, net.u2_scale_b2_b, \\\n\tnet.u2_conv_b3_a, net.u2_bn_b3_a, net.u2_scale_b3_a, net.u2_relu_b3_a, net.u2_conv_b3_b, net.u2_bn_b3_b, net.u2_scale_b3_b, net.u2_relu_b3_b, net.u2_conv_b3_c, net.u2_bn_b3_c, net.u2_scale_b3_c, \\\n\tnet.u2_eltw, net.u2_relu = incept_4_decode(net.u2b_concat, 256)\n\n\t############ u1 ############\n\t### a ### Third Deconvolution\n\tnet.u1a_dconv, net.u1a_bn, net.u1a_scale, net.u1a_relu = deconv_bn_relu(net.u2_relu, 128, pad=0, kernel_size=2, stride=2)\n\tnet.u1b_concat = L.Concat(net.u1a_relu, net.d1_relu, axis=1)\n\tnet.u1_conv_b1_a, net.u1_bn_b1_a, net.u1_scale_b1_a, \\\n\tnet.u1_conv_b2_a, net.u1_bn_b2_a, net.u1_scale_b2_a, net.u1_relu_b2_a, net.u1_conv_b2_b, net.u1_bn_b2_b, net.u1_scale_b2_b, \\\n\tnet.u1_eltw, net.u1_relu = incept_3_decode(net.u1b_concat, 128)\n\n\t############ u0 ############\n\t### a ### Fourth Deconvolution\n\tnet.u0a_dconv, net.u0a_bn, net.u0a_scale, net.u0a_relu = deconv_bn_relu(net.u1_relu, 64, pad=0, kernel_size=2, stride=2)\n\tnet.u0b_concat = L.Concat(net.u0a_relu, net.d0c_relu, axis=1)\n\tnet.u0_conv_b1_a, net.u0_bn_b1_a, net.u0_scale_b1_a, \\\n\tnet.u0_conv_b2_a, net.u0_bn_b2_a, net.u0_scale_b2_a, net.u0_relu_b2_a, net.u0_conv_b2_b, net.u0_bn_b2_b, net.u0_scale_b2_b, \\\n\tnet.u0_eltw, net.u0_relu = incept_3_decode(net.u0b_concat, 64)\n\n\t############ score ###########\n\tnet.score4 = L.Convolution(net.d4_relu,\n\t\tparam=[dict(lr_mult=10, decay_mult=1), dict(lr_mult=20, decay_mult=0)],\t\n\t\tnum_output=num_class, pad=0, kernel_size=1, stride=1, \n\t\tweight_filler=dict(type='msra'), bias_filler=dict(type='constant', value=0), engine=engine)\n\tnet.upscore4 = L.Deconvolution(net.score4,\n\t\tparam=[dict(lr_mult=10, decay_mult=1)],\n\t\tconvolution_param=dict(num_output=num_class, pad=8, kernel_size=32, stride=16,\n\t\t\tweight_filler=dict(type='bilinear'), bias_term=0, engine=engine))\n\tif phase == \"train\":\n\t\tnet.loss4 = L.WeightedSoftmaxWithLoss(net.upscore4, net.label, net.label_weight,\n\t\t# net.loss4 = L.SoftmaxWithLoss(net.upscore4, net.label,\n\t\t\tphase=0,\n\t\t\tloss_weight=0.0625,\n\t\t\tloss_param=dict(ignore_label=ignore_label))\n\n\tnet.score3 = L.Convolution(net.u3_relu,\n\t\tparam=[dict(lr_mult=10, decay_mult=1), dict(lr_mult=20, decay_mult=0)],\t\n\t\tnum_output=num_class, pad=0, kernel_size=1, stride=1, \n\t\tweight_filler=dict(type='msra'), bias_filler=dict(type='constant', value=0), engine=engine)\n\tnet.upscore3 = L.Deconvolution(net.score3,\n\t\tparam=[dict(lr_mult=10, decay_mult=1)],\n\t\tconvolution_param=dict(num_output=num_class, pad=4, kernel_size=16, stride=8,\n\t\t\tweight_filler=dict(type='bilinear'), bias_term=0, engine=engine))\n\tif phase == \"train\":\n\t\tnet.loss3 = L.WeightedSoftmaxWithLoss(net.upscore3, net.label, net.label_weight,\n\t\t# net.loss3 = L.SoftmaxWithLoss(net.upscore3, net.label,\n\t\t\tphase=0,\n\t\t\tloss_weight=0.0625,\n\t\t\tloss_param=dict(ignore_label=ignore_label))\n\n\t### loss 2\n\tnet.score2 = L.Convolution(net.u2_relu,\n\t\tparam=[dict(lr_mult=10, decay_mult=1), dict(lr_mult=20, decay_mult=0)],\t\n\t\tnum_output=num_class, pad=0, kernel_size=1, stride=1, \n\t\tweight_filler=dict(type='msra'), bias_filler=dict(type='constant', value=0), engine=engine)\n\tnet.upscore2 = L.Deconvolution(net.score2,\n\t\tparam=[dict(lr_mult=10, decay_mult=1)],\n\t\tconvolution_param=dict(num_output=num_class, pad=2, kernel_size=8, stride=4,\n\t\t\tweight_filler=dict(type='bilinear'), bias_term=0, engine=engine))\n\tif phase == \"train\":\n\t\tnet.loss2 = L.WeightedSoftmaxWithLoss(net.upscore2, net.label, net.label_weight,\n\t\t# net.loss2 = L.SoftmaxWithLoss(net.upscore2, net.label,\n\t\t\tphase=0,\n\t\t\tloss_weight=0.125,\n\t\t\tloss_param=dict(ignore_label=ignore_label))\n\n\t### loss 1\n\tnet.score1 = L.Convolution(net.u1_relu,\n\t\tparam=[dict(lr_mult=10, decay_mult=1), dict(lr_mult=20, decay_mult=0)],\t\n\t\tnum_output=num_class, pad=0, kernel_size=1, stride=1, \n\t\tweight_filler=dict(type='msra'), bias_filler=dict(type='constant', value=0), engine=engine)\n\tnet.upscore1 = L.Deconvolution(net.score1,\n\t\tparam=[dict(lr_mult=10, decay_mult=1)],\n\t\tconvolution_param=dict(num_output=num_class, pad=1, kernel_size=4, stride=2,\n\t\t\tweight_filler=dict(type='bilinear'), bias_term=0, engine=engine))\n\tif phase == \"train\":\n\t\tnet.loss1 = L.WeightedSoftmaxWithLoss(net.upscore1, net.label, net.label_weight,\n\t\t# net.loss1 = L.SoftmaxWithLoss(net.upscore1, net.label,\n\t\t\tphase=0,\n\t\t\tloss_weight=0.25,\n\t\t\tloss_param=dict(ignore_label=ignore_label))\n\n\t### loss 0\n\tnet.score = L.Convolution(net.u0_relu,\n\t\tparam=[dict(lr_mult=10, decay_mult=1), dict(lr_mult=20, decay_mult=0)],\t\n\t\tnum_output=num_class, pad=0, kernel_size=1, stride=1, \n\t\tweight_filler=dict(type='msra'), bias_filler=dict(type='constant', value=0), engine=engine)\n\tif phase == \"train\":\n\t\tnet.loss = L.WeightedSoftmaxWithLoss(net.score, net.label, net.label_weight,\n\t\t# net.loss = L.SoftmaxWithLoss(net.score, net.label,\n\t\t\tphase=0,\n\t\t\tloss_weight=1,\n\t\t\tloss_param=dict(ignore_label=ignore_label))\n\telse:\n\t\tnet.prob = L.Softmax(net.score, axis=1)\n\treturn net.to_proto()\n\n\ndef make_uvinet(net, dim_data, dim_label, num_class, prototxt_train, prototxt_test):\n\t# register net\n\t__nets = ['uvinet_2d_bn_weighted']\n\tassert net in __nets, 'Unknown net: {}'.format(net)\n\tglobal use_global_stats, engine, ignore_label\n\tengine = 2\n\tignore_label = 255\n\n\tif net == 'uvinet_2d_bn_weighted':\n\t\tuse_global_stats = 0\n\t\ttrain_net = uvinet_2d_bn_weighted(dim_data, dim_label, num_class, phase='train')\n\t\tuse_global_stats = 1\n\t\tdim_data[0] = 1\n\t\ttest_net = uvinet_2d_bn_weighted(dim_data, dim_label, num_class, phase='test')\n\twith open(prototxt_train, 'w') as f:\n\t\tf.write(str(train_net))\n\twith open(prototxt_test, 'w') as f:\n\t\tf.write(str(test_net))\n","sub_path":"tools/uvinet.py","file_name":"uvinet.py","file_ext":"py","file_size_in_byte":18518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"550944580","text":"from .base import BaseSatellite\n\n\nclass GF2Satellite(BaseSatellite):\n \"\"\"\n 高分二号遥感卫星\n \"\"\"\n\n def exists(self, added_name):\n for name in self.names:\n if added_name in name:\n return True\n return False\n\n def pre_parse(self, tar):\n \"\"\"\n 预解析\n \"\"\"\n self.names = tar.getnames()\n if self.sensor_id.startswith('PMS'):\n added_name = 'MSS1' if self.sensor_id == 'PMS1' else 'MSS2'\n if not self.exists(added_name):\n # MSS 不存在时,使用 PAN\n added_name = 'PAN1' if self.sensor_id == 'PMS1' else 'PAN2'\n if not self.exists(added_name):\n print(\"MSS 和 PAN 都不存在\")\n return\n self.image_name = \"{}-{}.jpg\".format(self.base_name, added_name)\n self.xml_name = \"{}-{}.xml\".format(self.base_name, added_name)\n else:\n print('Pre Parse None')\n return\n member_image = tar.getmember(self.image_name)\n # image 为解压后的图片文件\n self.image = tar.extractfile(member_image).read()\n member_xml = tar.getmember(self.xml_name)\n # data 为解压并解析后的字典数据\n self.data = self.xmltodict(\n tar.extractfile(member_xml).read())\n self.parse()\n\n def parse(self):\n \"\"\"\n 解析\n \"\"\"\n self.meta = self.parse_method_one()\n","sub_path":"src/satellite_parser/gf2.py","file_name":"gf2.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"316134563","text":"\"\"\"6 2\n3 3 3 2 1 1\n3 3 3 2 2 1\n3 3 3 3 3 2\n2 2 3 2 2 2\n2 2 3 2 2 2\n2 2 2 2 2 2\"\"\"\ndef sol(N,X, row):\n flag = True\n visited = []\n former = row[0]\n for i in range(N-1):\n if abs(row[i+1] - row[i]) > 1:\n return False\n elif row[i] == row[i + 1] - 1:\n prev = row[i]\n for j in range(i - X+1, i+1):\n if j < 0 or j >= N:\n return False\n\n if row[j] != prev or j in visited:\n return False\n visited.append(j)\n elif row[i] == row[i + 1] + 1:\n prev = row[i+1]\n for j in range(i + 1, i + X+1):\n if j < 0 or j >= N:\n return False\n if row[j] != prev or j in visited:\n return False\n visited.append(j)\n return True\n\n\nfor test_case in range(1, int(input())+1):\n N, X = map(int, input().split())\n board1 = []\n board2 = [[] for i in range(N)]\n for i in range(N):\n row = list(map(int,input().split()))\n board1.append(row)\n for j in range(N):\n board2[j].append(row[j])\n board = board1 + board2\n answer = 0\n for row in board:\n if sol(N,X,row):\n answer+=1\n print(\"#{} {}\".format(test_case, answer))","sub_path":"SW Expert Academy/활주로 건설.py","file_name":"활주로 건설.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"554068713","text":"# -*- coding: utf-8 -*-\nimport KSR\nimport re\n# Per SIP request initial checks\ndef ksr_route_reqinit(self):\n if KSR.is_INVITE():\n KSR.xlog.xinfo(\"REQINIT. Check request $ru from $fu:$si\")\n\n if KSR.is_method(\"PUBLISH|SUBSCRIBE\"):\n # Drop unsupported methods\n KSR.sl.sl_send_reply(404, \"Pool is closed due to aids.\")\n return -255\n\n if KSR.is_OPTIONS() and re.match(\"friendly-scanner|sipcli|VaxSIPUserAgent\", KSR.pv.getw(\"$ua\")):\n # silent drop for scanners - uncomment next line if want to reply\n # KSR.sl.sl_send_reply(200, \"OK\")\n KSR.sl.sl_send_reply(503, \"There is no money, but you hang in there. Best wishes! Cheers!\")\n return -255\n\n if KSR.maxfwd.process_maxfwd(10) < 0:\n KSR.sl.sl_send_reply(483, \"Too Many Hops\")\n return -255\n\n if KSR.is_OPTIONS():\n KSR.sl.sl_send_reply(200, \"Keepalive\")\n return -255\n \n if (int(KSR.siputils.is_request()) > 0) and (int(KSR.textops.has_body()) < 0) and (int(KSR.hdr.is_present(\"Content-Length\")) < 0):\n KSR.xlog.xwarn(\"Malformed SIP message from $si:$sp - unpresent Body and no Content-Length header. User agent:$ua - Append hdr\")\n KSR.hdr.append(\"Content-Length: 0\\r\\n\")\n KSR.textopsx.msg_apply_changes()\n \n if \"null\" in KSR.pv.getw(\"$ct\"):\n KSR.xlog.xalert(\"Null in contact:{} Patch it\".format(KSR.pv.getw(\"$ct\")))\n KSR.hdr.remove(\"Contact\")\n KSR.hdr.append(\"Contact: sip:{}@{}:{}\\r\\n\".format(KSR.pv.getw(\"$fU\"), KSR.pv.getw(\"$si\"), KSR.pv.getw(\"$sp\")))\n KSR.textopsx.msg_apply_changes()\n KSR.xlog.xalert(\"Contact now:{}\".format(KSR.pv.getw(\"$ct\")))\n\n if KSR.sanity.sanity_check(1511, 7) < 0:\n KSR.xlog.xerr(\"Malformed SIP message from $si:$sp\")\n return -255\n\n if not KSR.pv.is_null(\"$au\") and re.match(\"(\\=)|(\\-\\-)|(')|(\\#)|(\\%27)|(\\%24)\", KSR.pv.getw(\"$au\")):\n KSR.xlog.xalert(\"SQL Injection in authorization username from IP:$si:$sp - $au\")\n KSR.sl.sl_send_reply(503, \"There is no money, but you hang in there. Best wishes! Cheers!\")\n return -255\n\n if KSR.is_INVITE() and re.match(\"(\\=)|(\\-\\-)|(')|(\\#)|(\\%27)|(\\%24)\", KSR.pv.getw(\"$ru\")):\n KSR.xlog.xalert(\"SQL Injection in RURI in INVITE from IP:$si:$sp - $ru\")\n KSR.sl.sl_send_reply(503, \"There is no money, but you hang in there. Best wishes! Cheers!\")\n return -255\n return 1\n","sub_path":"kamailio/python/ksr_route_reqinit.py","file_name":"ksr_route_reqinit.py","file_ext":"py","file_size_in_byte":2413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"336768297","text":"from Depend import *\nfrom BackendFiles.AcademicGraph import AcademicGraph\nfrom Commons import *\n\nclass AcademicApi(Resource):\n\n def __init__(self):\n self.AcadObj = AcademicGraph()\n\n def post(self):\n data = json.loads(request.data.decode())\n self.comparison = []\n subjectlist = data[\"subjectList\"]\n institutelist = data[\"instituteList\"]\n gradelist = data[\"gradeList\"]\n dateFrom = data[\"DateFrom\"]\n dateTo = data[\"DateTo\"]\n if 'filtertype' in data:\n filterType = data['filtertype']\n\n if filterType == 'Branch':\n institutelist = ConvertName(institutelist)\n\n if \"comparisonList\" in data:\n self.comparison = data[\"comparisonList\"]\n\n\n if data[\"flag\"] == 1: # {\"subjectList\":[\"Geography\", \"Visual Arts\", \"Economics\", \"English\"],\"instituteList\":[\"Makhaza\",\"Nyanga\"],\"gradeList\":[\"G11\",\"G10\", \"G09\"],\"DateFrom\":\"2016-07-24\",\"DateTo\":\"2017-09-29\",\"flag\":1}\n return self.AcadObj.GetAvgAllMarks(subjectList=subjectlist,instituteList=institutelist,gradeList=gradelist,dateFrom=dateFrom,dateTo=dateTo,comparisonList=self.comparison, filterType=filterType)\n\n elif data[\"flag\"] == 2:\n return self.AcadObj.GetPercentOfStudentsPass(subjectList=subjectlist,instituteList=institutelist,gradeList=gradelist,dateFrom=dateFrom,dateTo=dateTo,comparisonList=self.comparison, filterType=filterType)\n\n elif data[\"flag\"] == 3:\n return self.AcadObj.GetNumberOfDistinctions(subjectList=subjectlist,instituteList=institutelist,gradeList=gradelist,dateFrom=dateFrom,dateTo=dateTo,comparisonList=self.comparison, filterType=filterType)\n\n elif data[\"flag\"] == 4:\n return self.AcadObj.GetNumberTakingSubjects(subjectList=subjectlist,instituteList=institutelist,gradeList=gradelist,dateFrom=dateFrom,dateTo=dateTo,comparisonList=self.comparison, filterType=filterType)\n\n elif data[\"flag\"] == 5: # {\"subjectList\":[\"Geography\", \"Visual Arts\", \"Economics\", \"English\"],\"instituteList\":[\"Makhaza\",\"Nyanga\"],\"gradeList\":[\"G11\",\"G10\", \"G09\"],\"DateFrom\":\"2016-07-24\",\"DateTo\":\"2017-09-29\",\"flag\":5}\n return self.AcadObj.GetTrendGraph(gradeList=gradelist, instituteList=institutelist,\n subjectList=subjectlist, dateFrom=dateFrom, dateTo=dateTo)\n\n elif data[\"flag\"] == 6: # {\"subjectList\":[\"Geography\", \"Visual Arts\", \"Economics\", \"English\"],\"instituteList\":[\"Makhaza\",\"Nyanga\"],\"gradeList\":[\"G11\",\"G10\", \"G09\"],\"DateFrom\":\"2016-07-24\",\"DateTo\":\"2017-09-29\",\"flag\":5}\n return self.AcadObj.GetSubjectsByBranchGrade(gradeList=gradelist, instituteList=institutelist,filterType=filterType, dateFrom=dateFrom, dateTo=dateTo)\n\n\n\n\n\n\n","sub_path":"DashBoardAPI v3.3(Last)/Apis/AcademicApi.py","file_name":"AcademicApi.py","file_ext":"py","file_size_in_byte":2760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"614686012","text":"import os\nfrom db import *\nimport xml.etree.ElementTree as etree\n\n\nclass Parser:\n bot_url = \"\"\n\n def __init__(self, bot_url):\n super().__init__()\n self.bot_url = bot_url\n self.results = []\n\n def parse_line(self, line):\n if line.__contains__('Date:'):\n if not line.__contains__('Article Date:'):\n pass\n if line.__contains__('Prospect:'):\n # print(line)\n line = line.strip()\n if len(line) > 10:\n self.results.append(line[10:])\n\n def parse_results(self):\n os.chdir(self.bot_url)\n if os.path.isfile('log/result.log'):\n with open('log/result.log', 'rb') as f:\n for cp in ('cp1252', 'cp850'):\n for line in f:\n try:\n line = line.decode(cp)\n self.parse_line(line)\n pass\n except UnicodeDecodeError as e:\n print(e)\n if len(self.results) > 0:\n print(\"NUMBER OF RESULTS: \" + str(len(self.results)))\n print(\"PROSPECT LIST: \" + str(self.results))\n try:\n db = DB()\n the_id = self.get_source_id()\n db.row_insert(the_id, self.results)\n except Exception as e:\n print(e)\n\n def get_source_id(self):\n the_id = -1\n os.chdir(self.bot_url)\n if os.path.isfile('Settings.config'):\n tree = etree.parse('Settings.config')\n root = tree.getroot()\n for child in root:\n key = child.get('key')\n if key == \"DebugDataSourceId\":\n the_id = child.get('value')\n return the_id\n\n\nif __name__ == '__main__':\n p = Parser(\"C:/Users/ted/bot\")\n p.get_source_id()\n # p.parse_results()\n","sub_path":"parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":1909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"90044460","text":"# Send a text message using Twilio sample code. \n# \n\nfrom twilio.rest import TwilioRestClient \n \n# put your own credentials here \nACCOUNT_SID = \"ACf65c410082029acd8a6a665bec20e9c5\" \nAUTH_TOKEN = \"2d119fb3cf48f30dd1baee04afd390d8\" \n \nclient = TwilioRestClient(ACCOUNT_SID, AUTH_TOKEN) \n \nclient.messages.create(\n to=\"+12817988559\", \n from_=\"+18326482798\", \n body=\"This is the ship that made the Kessel Run in fourteen parsecs?\", \n media_url=\"https://c1.staticflickr.com/3/2899/14341091933_1e92e62d12_b.jpg\", \n)","sub_path":"send_text_message.py","file_name":"send_text_message.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"284289894","text":"#!/usr/bin/env python\nimport pandas as pd\nimport os\nimport sys\nimport sys\nimport os\nimport pandas as pd\nimport datetime\nimport getpass\nimport uuid\nimport argparse\nimport glob\n\"\"\"\nEvery python wrapper is supposed to be similar, since they are using the same convention.\n\nThe only thing need to be changed is the guess_input function and the argparser function.\n\nlook for ## CHANGE THE FUNCTION HERE FOR DIFFERENT WRAPPER\n\nvariable inherents from utils:\nmyData\nmyPars\nmyPipelines\n\"\"\"\ndef my_args():\n\tusername = getpass.getuser()\n\taddon_string = str(uuid.uuid4()).split(\"-\")[-1]\n\tmainParser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,description=\"Parse Vartrix output\")\n\n\tmainParser.add_argument('-f',\"--sample_list\", help=\"input sample list\",required=True)\n\tmainParser.add_argument('-ref', help=\"ref\",required=True,type=int)\n\tmainParser.add_argument('-alt', help=\"alt\",required=True,type=int)\n\t# mainParser.add_argument(\"--header\", help=\"read with header\",action='store_true')\n\tmainParser.add_argument('-o',\"--output\", help=\"output table name\",default=username+\"_\"+str(datetime.date.today())+\"_vartrix.tsv\")\n\tmainParser.add_argument('-min',\"--min_read_count\", help=\"total reads cover the variant\",default=3,type=int)\n\t\n\t##------- add parameters above ---------------------\n\targs = mainParser.parse_args()\t\n\treturn args\n\t\ndef make_boxplot(plot_df,pDict):\n\n\timport matplotlib\n\tmatplotlib.use('agg')\n\timport matplotlib.pyplot as plt\n\t\n\timport seaborn as sns\n\n\tsns.set_style(pDict['sns_style'])\n\tif pDict['log2']:\n\t\tplot_df = plot_df.transform(lambda x:np.log2(x+1))\n\t# plot_df['value'] = list1+list2\n\t# plot_df['variable'] = [\"Positive (n=%s)\"%(n1)]*len(list1)+[\"Negative (n=%s)\"%(n2)]*len(list2)\n\n\n\tplt.figure()\n\tax=sns.boxplot(x=\"variable\",y='value',data=plot_df,linewidth=3,palette=\"husl\")\n\tfor patch in ax.artists:\n\t\tr, g, bb, _ = patch.get_facecolor()\n\t\tpatch.set_facecolor((r, g, bb, .8))\n\t\n\n\tplt.xticks(rotation=90)\n\tplt.ylabel(pDict['ylabel'])\n\tplt.xlabel(\"\")\n\tplt.savefig(\"%s.pdf\"%(pDict['output']), bbox_inches='tight')\n\n\ndef parse_file(f,min_read,ref,alt):\n\tfile=\"{0}_results/{0}/outs/vartrix.matrix.var_coverage.out\".format(f)\n\tdf = pd.read_csv(file,sep=\" \",header=None,skiprows=3)\n\t\n\ttotal_cells = pd.read_csv(file,sep=\" \",header=None,skiprows=2,nrows=1)[1].tolist()[0]\n\tsum_df = df.groupby(1)[2].sum()\n\t# sum_df = sum_df[sum_df>=5]\n\tsum_df = sum_df[sum_df>=100]\n\tdf = df[df[1].isin(sum_df.index)]\n\tdf_alt = df[df[0]==alt]\n\tdf_alt = df_alt.set_index(1)\n\tdf_ref = df[df[0]==ref]\n\tdf_ref = df_ref.set_index(1)\n\t\n\tcells_ref = df_ref[df_ref[2]>=min_read].index.tolist()\n\tcells_alt = df_alt[df_alt[2]>=min_read].index.tolist()\n\tref_alt = set(cells_ref).intersection(cells_alt)\n\tref_ref = set(cells_ref) - set(ref_alt)\n\talt_alt = set(cells_alt) - set(ref_alt)\n\treturn len(ref_ref),len(ref_alt),len(alt_alt),total_cells - len(ref_ref)-len(ref_alt)-len(alt_alt)\n\ndef parse_file2(f,min_read,ref,alt):\n\tfile=\"{0}_results/{0}/outs/vartrix.matrix.var_coverage.out\".format(f)\n\tdf = pd.read_csv(file,sep=\" \",header=None,skiprows=3)\n\tsum_df = df.groupby(1)[2].sum()\n\tsum_df = sum_df[sum_df>=min_read]\n\tdf = df[df[1].isin(sum_df.index)]\n\tdf_alt = df[df[0]==alt]\n\tdf_alt = df_alt.set_index(1)\n\tdf_alt[3] = df_alt[2]/sum_df\n\tdf_ref = df[df[0]==ref]\n\tdf_ref = df_ref.set_index(1)\n\tdf_ref[3] = df_ref[2]/sum_df\n\treturn df_alt[3].tolist(),df_ref[3].tolist()\n\ndef main():\n\n\targs = my_args()\n\tsample_list = pd.read_csv(args.sample_list,header=None)[0].tolist()\n\tgenotype = []\n\t\n\tfor s in sample_list:\n\t\tgenotype.append(parse_file(s,args.min_read_count,args.ref,args.alt))\n\tdf = pd.DataFrame(genotype)\n\tdf.columns = ['ref/ref','ref/alt','alt/alt','No Call']\n\tdf.index = sample_list\n\tdf.to_csv(args.output,sep=\"\\t\")\n\t\nif __name__ == \"__main__\":\n\tmain()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"bin/vartrix_allele_frequency_table.py","file_name":"vartrix_allele_frequency_table.py","file_ext":"py","file_size_in_byte":3830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"195426679","text":"import streamlit as st\nimport pandas as pd\nimport joblib\nfrom sklearn.ensemble import RandomForestClassifier\n\nst.write(\"\"\"\n# Credit eligibility check working on Large dataset\nThis app predicts the **Eligibility**!\n\"\"\")\n\nst.sidebar.header('User Input Parameters')\n\n\ndef user_input_features():\n age = st.sidebar.slider('age', 18, 95, 32)\n balance = st.sidebar.slider('balance', -8019.00, 102127.00, 0.00)\n duration = st.sidebar.slider('duration', 0, 4918, 2280)\n \n pdays = st.sidebar.slider('pdays', -1, 530, -1)\n previous = st.sidebar.slider('previous', 0.0, 275.0, 200.0)\n poutcome = st.sidebar.slider('poutcome', 0.0, 3.0, 2.0)\n \n \n job = st.sidebar.radio(label='job',options=[\"unemployed\", \"student\", \"unknown\", \"retired\",\"housemaid\",\"blue-collar\", \n \"technician\",\"services\", \"admin.\",\"self-employed\",\"management\",\"entrepreneur\" ])\n \n education = st.sidebar.radio(label=\"education\", options=[\"primary\", \"unknown\", \"secondary\", \"tertiary\"]) \n \n default = st.sidebar.radio(label=\"default\", options=[\"no\", \"yes\"])\n \n housing = st.sidebar.radio(label=\"housing\", options=[\"no\", \"yes\"])\n \n data = {'age': age,\n 'balance': balance,\n 'duration': duration,\n 'previous': previous,\n 'poutcome': poutcome,\n 'default': default,\n 'job': job,\n 'education': education,\n 'pdays': pdays,\n 'housing': housing\n }\n features = pd.DataFrame(data, index=[0])\n return features\n\ndf = user_input_features()\n\n\n\n\ndf[\"education\"] = df[\"education\"].astype('category')\ndf[\"default\"] = df[\"default\"].astype('category')\ndf[\"housing\"] = df[\"housing\"].astype('category')\n\ndf[\"job\"] = df[\"job\"].astype('category')\n\ndf['jobcat'] = df['job'].apply(lambda x: ['unemployed', 'student', 'unknown', 'retired','housemaid','blue-collar', 'technician','services', 'admin.','self-employed','management','entrepreneur' ].index(x))\ndf['educationcat'] = df['education'].apply(lambda x: ['primary', 'unknown', 'secondary', 'tertiary'].index(x))\ndf['defaultcat'] = df['default'].apply(lambda x: ['no', 'yes'].index(x))\ndf['housingcat'] = df['housing'].apply(lambda x: ['no', 'yes'].index(x))\n\n\n\n\n\n\ndel df['job']\ndel df['education']\ndel df['default']\ndel df['housing']\n\n\n\nst.subheader('User Input parameters ')\n\n\n\n\ndf['jobcat']= df['jobcat'].cat.codes\ndf['educationcat']= df['educationcat'].cat.codes\ndf['housingcat']= df['housingcat'].cat.codes\ndf['defaultcat']= df['defaultcat'].cat.codes\n\npredictors = list(list(df.columns))\n#df = df[predictors].values\ndf = df.reindex(columns= ['age', 'balance', 'duration', 'pdays', 'previous', 'poutcome', 'jobcat', 'educationcat', 'defaultcat', 'housingcat'])\n\n\nst.write(df)\n\n#$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$mno scale belowbelow$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\n\nst.header('Predictions')\n# Random Forest Prediction\n# Reads in saved regression model\nloaded_LinR_clf = joblib.load(open('VK_linR_Model.pkl', 'rb'))\n\nLinR_prediction = loaded_LinR_clf.predict(df)\nst.subheader('LinearRegression')\nst.write(LinR_prediction)\n\n\n# Linear regression\n# Reads in saved regression model\nloaded_XGB_clf = joblib.load(open('VK_xgb_Model.pkl', 'rb'))\n\nxgb_prediction = loaded_XGB_clf.predict(df)\n\nst.subheader('XGBClassifier')\nst.write(xgb_prediction)\n\n\n# RIDGE regression\n# Reads in saved regression model\nloaded_LogR_clf = joblib.load(open('VK_logR_Model.pkl', 'rb'))\n\nLogR_prediction = loaded_LogR_clf.predict(df)\nst.subheader('LogisticRegression')\nst.write(LogR_prediction)\n\n\n\n# LASSO regression\n# Reads in saved regression model\nloaded_DTR_clf = joblib.load(open('VK_DTR_Model.pkl', 'rb'))\n\nDTR_prediction = loaded_DTR_clf.predict(df)\nst.subheader('DecisionTreeRegressor')\nst.write(DTR_prediction)\n\n\n\n\n\n# Linear regression\n# Reads in saved regression model\nloaded_DTC_clf = joblib.load(open('VK_DTC_Model.pkl', 'rb'))\n\ndtc_prediction = loaded_DTC_clf.predict(df)\n\nst.subheader('DecisionTreeClassifier')\nst.write(dtc_prediction)\n\n\n\n# Linear regression\n# Reads in saved regression model\nloaded_RF_clf = joblib.load(open('VK_RF_Model.pkl', 'rb'))\n\nrf_prediction = loaded_RF_clf.predict(df)\n\nst.subheader('RandomForestRegressor')\nst.write(rf_prediction)\n\n\n# # Linear regression\n# # Reads in saved regression model\n# loaded_SVM_clf = joblib.load(open('VK_svm_Model.pkl', 'rb'))\n\n# svm_prediction = loaded_SVM_clf.predict(df)\n\n# st.subheader('SVM')\n# st.write(svm_prediction)\n\n\n# Linear regression\n# Reads in saved regression model\nloaded_ADB_clf = joblib.load(open('VK_ADB_Model.pkl', 'rb'))\n\nadb_prediction = loaded_ADB_clf.predict(df)\n\nst.subheader('ADBClassifier')\nst.write(adb_prediction)\n\n\n\n\n\n\n\n\n\n","sub_path":"Customer-Credit-Check-large/credit-large.py","file_name":"credit-large.py","file_ext":"py","file_size_in_byte":4686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"262190615","text":"#compiles fortran code, computes error between real and numerical solutions, and graphs the data\nimport os\nimport sys\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\n#checks to see if already compiled, if not it will make, if already compiled it will make clean and then make\ndef make():\n\tdoesExecutableExist = os.path.exists('../fortran/main.exe')\n\tif not doesExecutableExist:\n\t\tos.chdir('../fortran')\n\t\tmakeIt = 'make'\n\t\tos.system(makeIt)\n\telse:\n\t\tos.chdir('../fortran')\n\t\tmakeCleanMake = 'make clean && make'\n\t\tos.system(makeCleanMake)\n\n#computes error between real solution and numerical solution\ndef error(arr):\n eFinal = 0\n eArray = []\n for i in range(len(arr)):\n #Uses provided formula to calculate error\n eArray.append(abs((-math.sqrt(2*np.log(float(arr[i][0])**2 + 1) + 4)) - float(arr[i][1])))\n eFinal += eArray[i]\n eFinal = eFinal/len(arr)\n return eFinal, eArray\n#graphs the provided data and saved as file\ndef graph(numerical,number):\n t = np.asarray([])\n y = np.asarray([])\n t2 = np.asarray([])\n save = number\n actual = np.asarray([])\n #gets and stores t and y values from numerical array\n for i in range(number):\n t = np.append(round(float(numerical[i][0]),2),t)\n y = np.append(round(float(numerical[i][1]),2),y)\n #computes actual values using provided formula\n for i in range(number):\n actual = np.append(actual,(-math.sqrt(2*np.log(float(numerical[i][0])**2 + 1) + 4)))\n eF, eA = error(numerical)\n #plots numerical solutions\n plt.plot(t,y,'r.:')\n number = number - 1\n while number >= 0:\n t2 = np.append(round(float(numerical[number][0]),2),t2)\n number = number - 1\n #plots real solutions\n plt.plot(t2,actual,'b.-')\n #labels\n plt.title(eF)\n plt.xlabel('t axis')\n plt.ylabel('y axis')\n plt.grid()\n #saves graph as a file\n plt.savefig('result_' + str(save) +'.png')\n\n \nif __name__ == '__main__':\n #calls make function\n make()\n #intialize lists\n fread8,fread16,fread32,fread64 = [],[],[],[]\n #read files and store as lists in the format t,y\n #closes files\n f=open( '../fortran/output_08.dat','r')\n for line in f:\n line = line.split()\n fread8.append(line)\n f.close()\n \n f=open( '../fortran/output_16.dat','r')\n for line in f:\n line = line.split()\n fread16.append(line)\n f.close()\n\n f=open( '../fortran/output_32.dat','r')\n for line in f:\n line = line.split()\n fread32.append(line)\n f.close()\n\n f=open( '../fortran/output_64.dat','r')\n for line in f:\n line = line.split()\n fread64.append(line)\n f.close()\n #convert lists into numpy arrays\n array8 = np.asarray(fread8)\n array16 = np.asarray(fread16)\n array32= np.asarray(fread32)\n array64= np.asarray(fread64)\n #calls graph function\n graph(array8,8)\n graph(array16,16)\n graph(array32,32)\n graph(array64,64)\n\n\n\n","sub_path":"Programs/IVP_Ord_Diff_Eq/pyRun/pyRun.py","file_name":"pyRun.py","file_ext":"py","file_size_in_byte":2970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"462655383","text":"#!/usr/bin/python\r\n\r\nimport datetime\r\n\r\nfrom selenium import webdriver\r\n\r\n\r\nclass Webtests():\r\n \"\"\"A simple web class\"\"\"\r\n\r\n def __init__(self):\r\n '''method takes address from external file'''\r\n self.URL = []\r\n current_time = datetime.datetime.now().strftime('%Y %m %d %H:%M:%S')\r\n with open('log.txt', 'w') as ff:\r\n ff.write('Test was started at {}'.format(current_time + '\\n'))\r\n\r\n with open(\"list_of_sites.txt\") as f:\r\n num = len(f.readlines())\r\n f1 = open(\"list_of_sites.txt\",'r')\r\n for i in f1.readlines():\r\n self.URL.append(i.replace('\\n',''))\r\n\r\n def open_site(self):\r\n '''method opens site from list of sites'''\r\n url = self.URL\r\n browser = webdriver.Chrome()\r\n for address in url:\r\n browser.get('http://'+address)\r\n self.make_log(address)\r\n\r\n browser.close()\r\n\r\n def make_log(self, address):\r\n '''method writes log file'''\r\n with open('log.txt', 'a') as f:\r\n my_time = datetime.datetime.now().strftime('%H:%M:%S')\r\n f.write('{0} was opened at {1} \\n'.format(address, my_time))\r\n\r\n\r\n\r\n\r\n\r\ntest_1 = Webtests()\r\n\r\ntest_1.open_site()\r\n\r\n\r\n","sub_path":"test_01.py","file_name":"test_01.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"280123200","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Convert CIFAR-10 Python Pickle file to image file\n# \n# Extract image and label and write it into destination directories
\n# it will extract 60,000 images from pickle files.
\n# in csv file, it will have file name and label.\n# It will have total 6 csv files
\n# in the directory, images will be written with PNG format
\n# \n\nSRC_DIR = 'D:/Users/kim jung hoo/Desktop/김정현/학교/강의/3학년 2학기/01.전공/인공지능/Term_Project/originData'\nDES_DIR = 'D:/Users/kim jung hoo/Desktop/김정현/학교/강의/3학년 2학기/01.전공/인공지능/Term_Project/images'\n\n\n# clean up destination dir first\nimport os\nimport shutil\n\nfor the_file in os.listdir(DES_DIR):\n file_path = os.path.join(DES_DIR, the_file)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n except Exception as e:\n print(e)\n\nprint('cleansing destination directory has been done')\n\n\n# In[33]:\n\nimport _pickle\nimport numpy as np\nfrom PIL import Image\n\nPICKLE_FILES = ['data_batch_1', 'data_batch_2', 'data_batch_3', 'data_batch_4', 'data_batch_5', 'test_batch']\n\n\ndef unpickle(file):\n import _pickle\n with open(file, 'rb') as fo:\n dict = _pickle.load(fo, encoding='latin1')\n data = dict.get('data')\n labels = dict.get('labels') # narray\n filenames = dict.get('filenames')\n size = len(data)\n return size, data, labels, filenames\n\n\ndef writeToImage(desDir, flat_image, label, filename):\n R = flat_image[0:1024].reshape((32, 32))\n G = flat_image[1024:2048].reshape((32, 32))\n B = flat_image[2048:3072].reshape((32, 32))\n img_array = np.dstack((R, G, B))\n img = Image.fromarray(img_array,mode='RGB')\n img.save(desDir+'/'+filename,'PNG')\n\n\ndef pickleToImage(desDir, picklefile, pickefile):\n size, data, labels, filenames = unpickle(picklefile)\n metafilename = desDir+'/'+pickefile+'.csv'\n metafile = open(metafilename,'w')\n print('File :'+pickefile+'.csv')\n print('Writing :'+str(size))\n for i in range(0,size):\n writeToImage(desDir, data[i], labels[i], filenames[i])\n metafile.write(filenames[i]+','+str(labels[i])+'\\n')\n metafile.close()\n\n\nmetafilename = DES_DIR+'/meta'\n\nfor picklefile in PICKLE_FILES:\n print ('processing ' + picklefile)\n pickleToImage(DES_DIR, SRC_DIR+'/'+picklefile, picklefile)\n\n\n\n\n\n","sub_path":"CIFAR-10 Pickle files to image file.py","file_name":"CIFAR-10 Pickle files to image file.py","file_ext":"py","file_size_in_byte":2363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"361348965","text":"#!/usr/bin/env python3\n'''\nCreated by Conan Albrecht \nApache open source license.\nNovember, 2017\n'''\n##################################################\n### Unique id generator. Similar to uuid1() but \n### also includes the process id.\n###\n### Note that upping the counter requires a global lock.\n###\n### The bit assignment:\n### \n### 52 bits for nanoseconds since epoch (really it can use unlimited bits because on left side of the number, but 52 bits gets us to ~2100)\n### 16 bits for counter\n### 48 bits for machine id\n### 24 bits for process id\n### ========\n### 140 bits total, or 35 hex characters \n### \n### Maximum number is 1.39e42\n###\n\nimport uuid\nimport time as time \nimport os\nimport random\nimport threading\nimport math\nimport collections\n \n \n# initial values/constants\nlastnow = 0\ncounterstart = random.getrandbits(16) - 1\ncountermax = math.pow(2, 16) - 1\ncounter = counterstart\n# returns a 48 bit number\nmachineid = uuid.getnode()\n# linux is usually 16 bits\nprocessid = os.getpid()\n\n# the main data structure\nUID = collections.namedtuple('UID', ( 'time', 'counter', 'machine', 'process' ))\n\n# binary size of each number\n# and binary positions for shifting\n# and for splitting hex and binary (from right side)\nsize = UID(52, 16, 48, 24)\n_shift = []\nfor i in reversed(range(len(size))):\n _shift.append(sum(size[i:]))\nshift = UID(*reversed(_shift))\nhsplit = UID(*(int(s/-4) for s in shift))\nbsplit = UID(*(s*-1 for s in shift))\n\n######################################\n### Main API\n\ndef ruid():\n '''\n Creates a \"raw\" unique id. The result is a\n UID namedtuple with four parts:\n \n time\n counter\n machine\n process\n \n All other functions in this module just format\n the id created in this function.\n '''\n global lastnow, counter_start, counter\n # update the nanoseconds and counter\n with threading.RLock():\n now = int(time.time())#int(time.time() * 1e6)\n counter += 1\n if counter >= countermax:\n counter = 0\n while now == lastnow and counter == counterstart:\n time.sleep(.001) # wait a millisecond and try again\n now = int(time.time())#int(time.time() * 1e6)\n lastnow = now\n # return the named tuple\n return UID(now, counter, machineid, processid)\n \n\ndef iuid(raw=None):\n '''\n Creates a unique id as an int. \n\n If provided, raw should be a UID named tuple\n (usually from a call to ruid).\n ''' \n if raw is None:\n raw = ruid()\n return (raw.time << shift.counter) + \\\n (raw.counter << shift.machine) + \\\n (raw.machine << shift.process) + \\\n (raw.process)\n \n\ndef uid(raw=None, sep=None):\n '''\n Creates a unique id as a hex string. \n\n If provided, raw should be a UID named tuple\n (usually from a call to ruid).\n \n Use sep='-' to separate the parts by dashes.\n '''\n if raw is None:\n raw = ruid()\n # hex version\n if sep is None:\n return '{:0x}'.format(iuid(raw))\n # pretty version\n n = uid(raw)\n return sep.join((\n n[:hsplit.counter], \n n[hsplit.counter: hsplit.machine], \n n[hsplit.machine: hsplit.process], \n n[hsplit.process:],\n ))\n \n \ndef buid(raw=None, sep=None):\n '''\n Creates a unique id as a binary string. \n\n If provided, raw should be a UID named tuple\n (usually from a call to ruid).\n \n Use sep='-' to separate the parts by dashes.\n '''\n if raw is None:\n raw = ruid()\n # hex version\n if sep is None:\n return '{:0b}'.format(iuid(raw))\n # pretty version\n n = buid(raw)\n return sep.join((\n n[:bsplit.counter], \n n[bsplit.counter: bsplit.machine], \n n[bsplit.machine: bsplit.process], \n n[bsplit.process:],\n ))\n \n\ndef wuid(raw=None, leading='u'):\n '''\n Creates a unique id as a web-compliant id\n for use in HTML ids. This is the same as \n a hex id, but it has a leading `u` to ensure\n an alphabetical character comes first, per \n the standard.\n\n If provided, raw should be a UID named tuple\n (usually from a call to ruid).\n\n Use sep='-' to separate the parts by dashes.\n '''\n if raw is None:\n raw = ruid()\n return '{}{}'.format(leading, uid(raw))\n \n\ndef iunpack(n):\n '''\n Unpacks the given integer number\n into a UID namedtuple.\n '''\n # format of these is (mask & n) >> shifted\n return UID(\n n >> shift.counter,\n ((((1 << size.counter) - 1) << shift.machine) & n) >> shift.machine,\n ((((1 << size.machine) - 1) << shift.process) & n) >> shift.process,\n ((1 << shift.process) - 1) & n,\n )\n \n \ndef unpack(hex_n):\n '''\n Unpacks the given hex number string\n into a UID namedtuple.\n \n To unpack a web id, use\n unpack(myid[1:])\n to remove the leading character.\n '''\n return iunpack(int(hex_n, 16))\n \n \n \n \n###################################################\n### Unit tests for this module:\n###\n### python3 uid.py\n###\n\nimport unittest\n\nclass Tester(unittest.TestCase):\n\n def test_ruid(self):\n u = ruid()\n u2 = ruid()\n self.assertEqual(u.machine, u2.machine)\n self.assertEqual(u.process, u2.process)\n\n def test_int_hex_binary(self):\n u = ruid()\n n = iuid(u)\n h = uid(u)\n b = buid(u)\n self.assertEqual(n, int(h, 16))\n self.assertEqual(n, int(b, 2))\n \n def test_int_hex_binary(self):\n u = ruid()\n n = iuid(u)\n h = uid(u)\n b = buid(u)\n self.assertEqual(n, int(h, 16))\n self.assertEqual(n, int(b, 2))\n \n def test_pretty(self):\n u = ruid()\n # hex\n h = uid(u)\n p = uid(u, '-')\n self.assertEqual(h, p.replace('-', ''))\n # binary\n b = buid(u)\n p = buid(u, '-')\n self.assertEqual(b, p.replace('-', ''))\n\n def test_unpack(self):\n # one test\n u = ruid()\n self.assertEqual(u, unpack(uid(u)))\n self.assertEqual(u, iunpack(iuid(u)))\n # other direction with int\n n = iuid()\n self.assertEqual(n, iuid(iunpack(n)))\n # other direction with hex\n h = uid()\n self.assertEqual(h, uid(unpack(h)))\n \nif __name__ == '__main__':\n unittest.main()\n ","sub_path":"django_mako_plus/uid.py","file_name":"uid.py","file_ext":"py","file_size_in_byte":6403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"558447435","text":"import numpy as np\nimport pandas as pd\nfrom bs4 import BeautifulSoup\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport re\nimport nltk\nimport json\n\n#nltk.download('stopwords')\nfrom nltk.corpus import stopwords\n\nfrom nltk.stem import SnowballStemmer,PorterStemmer\nfrom nltk.tokenize import TweetTokenizer\n\nfrom sklearn.svm import LinearSVC\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer, TfidfVectorizer \nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC\nfrom sklearn.model_selection import train_test_split, StratifiedKFold\nfrom sklearn.pipeline import make_pipeline, Pipeline\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.metrics import make_scorer, accuracy_score, f1_score\nfrom sklearn.metrics import roc_curve, auc\nfrom sklearn.metrics import confusion_matrix, roc_auc_score, recall_score, precision_score\n\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\n\n#convert train data to DataFrame\ndef load_training_data_to_pandas(filename = 'data/train.jsonl'):\n X = []\n Y = []\n fhand = open(filename,encoding='utf8')\n for line in fhand:\n data = json.loads(line)\n\n fullTweet = data['response']\n X.append(fullTweet)\n Y.append(data['label'])\n\n dfdata = pd.DataFrame({'Tweets': X,'Labels': Y}) \n\n dfdata.to_csv(r'data/dataPandas1.csv',index=False)\n \n#Convert test data to DataFrame\ndef load_test_data_to_pandas(filename = 'data/test.jsonl'):\n tid = []\n X = []\n Y = []\n fhand = open(filename,encoding='utf8')\n for line in fhand:\n data = json.loads(line)\n tid.append(data['id'])\n\n fullTweet = data['response']\n X.append(fullTweet)\n\n dftestdata = pd.DataFrame({'ID': tid,'Tweets': X})\n \n dftestdata.to_csv(r'data/dftestdata1.csv',index=False)\n \nload_training_data_to_pandas()\nload_test_data_to_pandas()\n\ndata = pd.read_csv(\"data/dataPandas1.csv\")\ndata.isnull().values.any()\ndata_clean = data.copy()\ndata_clean['Labels'] = data_clean['Labels'].apply(lambda x: 1 if x=='SARCASM' else 0)\ndata_clean['text_clean'] = data_clean['Tweets'].apply(lambda x: BeautifulSoup(x, \"lxml\").text)\n\ndata_clean = data_clean.loc[:, ['text_clean', 'Labels']]\ndata_clean.head()\n\ntrain, test = train_test_split(data_clean, test_size=0.2, random_state=42)\nX_train = train['text_clean'].values\nX_test = test['text_clean'].values\ny_train = train['Labels']\ny_test = test['Labels']\n\n#tokenize the data\ndef tokenize(text): \n tknzr = TweetTokenizer()\n return tknzr.tokenize(text)\n\nen_stopwords = set(stopwords.words(\"english\")) \n\nvectorizer = CountVectorizer(\n analyzer = 'word',\n tokenizer = tokenize,\n lowercase = True,\n ngram_range=(1, 1),\n stop_words = en_stopwords)\n \n#cross validation and grid search to find good hyperparameters for our SVM model\nkfolds = StratifiedKFold(n_splits=5, shuffle=True, random_state=1)\n\nnp.random.seed(1)\n\npipeline_svm = make_pipeline(vectorizer,SVC(probability=True, kernel=\"linear\", class_weight=\"balanced\"))\n\ngrid_svm = GridSearchCV(pipeline_svm,\n param_grid = {'svc__C': [0.01, 0.1, 1]}, \n cv = kfolds,\n scoring=\"roc_auc\",\n verbose=1, \n n_jobs=-1) \n\ngrid_svm.fit(X_train, y_train)\n\nmodel = grid_svm.best_estimator_\n\nX_valTokens = twittertestdata['Tweets'].values\n\n#Predict model on test data set\nvalidation = model.predict(X_valTokens)\ntwittertestdata['Predict'] = validation\ntwittertestdata['PLabel'] = np.where(twittertestdata['Predict'] > 0.5, \"SARCASM\", \"NOT_SARCASM\")\ntwittertestdata.head()\ntwittertestdata.to_csv('answer_SVM.txt', columns = [\"ID\" , \"PLabel\"] , header = False , index = False)\n","sub_path":"SVM.py","file_name":"SVM.py","file_ext":"py","file_size_in_byte":3764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"304814723","text":"def event_trimm(inputfile):\n \"\"\"\n Trimming of the data from mseed file and saving in form of acsii\n Data path, data files characteristics are specified in the inputfile\n\n :param inputfile: all the inputs are defined in the inputfile\n :return:\n \"\"\"\n\n import Inputread\n import os\n import Catalog\n from obspy.core import UTCDateTime\n from obspy import read\n from pylab import plot, show\n\n # check the existence of the inputfile\n if not os.path.isfile(inputfile):\n raise FileExistsError('input file called ' + inputfile + 'does not exist')\n\n # loading of the inputfile\n inps = Inputread.inpread(inputfile, header_lines = 1)\n\n # check the existence of dbtable\n if not os.path.isfile(inps['dbtable']):\n raise ValueError('Database table does not exist')\n\n # check the existence of datafile\n if not os.path.isfile(inps['datafile']):\n raise ValueError('Datafile does not exist')\n\n # check the existence of dbtable\n if not os.path.exists(inps['datapath']):\n raise IsADirectoryError('Data directory does not exist')\n\n # check the existence of dbtable\n if not os.path.exists(inps['outpath']):\n raise IsADirectoryError('Outpath directory does not exist')\n\n # check the station file existence\n if not os.path.isfile(inps['stafile']):\n raise FileExistsError('Sta file called does not exist')\n\n # check the station file existence\n if not os.path.isfile(inps['phasefile']):\n raise FileExistsError('Phase file called does not exist')\n\n # reading the station file\n sta = []\n with open(inps['stafile'], encoding='utf-8') as file:\n for line in file:\n read_line = line.rstrip()\n read_line = read_line.split()\n sta.append(read_line[0])\n\n # reading the data files list\n datafiles = []\n datapaths = []\n with open(inps['datafile'], encoding='utf-8') as file:\n for line in file:\n read_line = line.rstrip()\n read_line = read_line.split()\n datafiles.append(read_line[0])\n datapaths.append(read_line[1])\n\n # setting the components\n comp = list(inps['comp'])\n\n # reading of the phase file and inserting the values to the catalog and pick classes\n cat = Catalog.EventCatalog()\n Ptab = Catalog.PickTab()\n Stab = Catalog.PickTab()\n\n Ptab.set_header(sta)\n Ptab.set_phase('P')\n Stab.set_header(sta)\n Stab.set_phase('S')\n\n frst = 1\n\n with open(inps['phasefile'], encoding='utf-8') as file:\n for line in file:\n read_line = line.rstrip()\n read_line = read_line.split()\n\n if read_line[0] == '#':\n # t0\n t = UTCDateTime(int(read_line[1]), int(read_line[2]), int(read_line[3]), int(read_line[4]),\n int(read_line[5]), float(read_line[6]))\n\n # ID\n ID = int(read_line[14])\n\n # inserting information into a CatalogItem class\n event = Catalog.CatalogItem(id=int(read_line[14]), t0=t.timestamp, lat=float(read_line[7]),\n lon=float(read_line[8]), depth=float(read_line[9]),\n mag=float(read_line[10]))\n\n # adding event to a catalog\n cat.add_event(event)\n\n # inserting pick classes to a pick tab\n if frst != 1:\n Ptab.add_pick(P)\n Stab.add_pick(S)\n\n frst = 2\n\n # creating pick classes for P and S\n P = Catalog.PickItem()\n S = Catalog.PickItem()\n\n P.set_header(pattern=sta)\n P.set_phase(phase='P')\n S.set_header(pattern=sta)\n S.set_phase(phase='S')\n\n elif read_line[0] != '#':\n statemp = read_line[0]\n ttemp = t + float(read_line[1])\n phasetemp = read_line[3]\n\n if phasetemp == 'P':\n P.add_pick(id=ID, pick_phase=phasetemp, t=ttemp.timestamp, sta=statemp)\n else:\n S.add_pick(id=ID, pick_phase=phasetemp, t=ttemp.timestamp, sta=statemp)\n\n # getting necessary catalog information\n ids = cat.get_param('id')\n origts = cat.get_param('t0')\n\n # cycle through components\n for c in comp:\n # cycle through stations\n for s in sta:\n\n p = 1\n # cycle through catalog - creating the data file names\n fnmsstart = []\n fnmssend = []\n idsnew = []\n origtnew = []\n\n # getting the file names - ISLAND\n for i in list(range(0,cat.number_events())):\n nm = get_island_filename(UTCDateTime(origts[i]), datafiles, datapaths, c, s)\n\n if nm:\n nm2 = get_island_filename(UTCDateTime(origts[i]) + inps['lenght'], datafiles, datapaths, c, s)\n\n if nm2:\n fnmsstart.append(nm)\n fnmssend.append(nm2)\n\n origtnew.append(origts[i])\n idsnew.append(ids[i])\n\n\n # cycle through unique filenames\n fnuniq = set(fnmsstart)\n\n for fnameload in fnuniq:\n # reading the datafile\n st = read(fnameload)\n #print(fnameload + ' read')\n\n # trimming the data\n j = 0\n for fname in fnmsstart:\n\n if fname == fnameload:\n\n if fname == fnmssend[j]:\n\n stout = obspy_seis_trimm(st, UTCDateTime(origtnew[j]),\n UTCDateTime(origtnew[j]) + inps['lenght'])\n\n elif fname != fnmssend[j]:\n st2 = read(fnmssend[j])\n\n stn = st + st2\n\n stn.sort(['starttime'])\n stn.merge(method=1)\n\n stout = obspy_seis_trimm(st, UTCDateTime(origtnew[j]),\n UTCDateTime(origtnew[j]) + inps['lenght'])\n\n # write into a mseed\n if stout:\n\n outname = str(idsnew[j]) + '_' + s + '_HH' + c + '.mseed'\n stout.write(os.path.join(inps['outpath'], outname), format='MSEED')\n\n print(outname)\n print('Done ' + str(p) + '/' + str(len(idsnew)))\n p += 1\n\n j += 1\n\n\ndef obspy_seis_trimm(st, t1, t2):\n \"\"\"\n Trimming the seismogram sequence from stream file\n :param tracefile:\n :param d1:\n :param d2:\n :return:\n \"\"\"\n\n from obspy.core import UTCDateTime\n from obspy import Trace, Stream\n\n # checking, if the start time and endtime are within the seismogram range\n if t1 > st[0].stats.endtime:\n # raise ValueError('Start time is out of the seismogram range')\n st = None\n return st\n\n if t1 < st[0].stats.starttime:\n # raise ValueError('Start time is out of the seismogram range')\n st = None\n return st\n\n if t2 > st[0].stats.endtime:\n # raise ValueError('End time is out of the seismogram range')\n st = None\n return st\n\n if t2 < st[0].stats.starttime:\n # raise ValueError('End time is out of the seismogram range')\n st = None\n return st\n\n if t2 < t1:\n # raise ValueError('Start time must be before the end time')\n st = None\n return st\n\n # searching for the indicies\n t1ind = round((t1.timestamp - st[0].stats.starttime.timestamp) * st[0].stats.sampling_rate)\n t2ind = round((t2.timestamp - st[0].stats.starttime.timestamp) * st[0].stats.sampling_rate)\n\n # trimming the data\n data = st[0].data[t1ind:t2ind]\n\n # preparing data for stream creation\n stats = {'starttime':st[0].stats.starttime + t1ind * st[0].stats.delta}\n stats.update({'sampling_rate': st[0].stats.sampling_rate})\n stats.update({'station': st[0].stats.station})\n stats.update({'npts': len(data)})\n\n # tvorba streamu\n st = Stream([Trace(data=data, header=stats)])\n\n # write as mseed ASCII file (encoding=0)\n # st.write('outfile.ascii', format ='SLIST')\n # st.write('pokusoutput.mseed', format='MSEED')\n\n return st\n\n\ndef get_island_filename(t, datafiles, datapaths, comp, sta):\n \"\"\"\n Getting the filename for time t, component comp and station sta. FIlename is selected from datafiles\n STAYYMMDD00000 0or1. EorH Hcomp\n AHS141203000001.EHZ\n :param t:\n :param datafiles:\n :param comp:\n :param sta:\n :return:\n \"\"\"\n\n from obspy.core import UTCDateTime\n import re\n import os\n\n # central part of the filename\n p1 = str(t.year)\n p1 = p1[-2:]\n p2 = str(t.month + 1000)\n p2 = p2[-2:]\n p3 = str(t.day + 1000)\n p3 = p3[-2:]\n\n # forming of the regular expression\n pattern = re.compile(r'^' + sta + p1 + p2 + p3 + '00000' + '(0|1)' + '.' + '(E|H)' + 'H' + comp + '$')\n\n # cycle through datafiles\n fname = None\n for fnm in datafiles:\n a = pattern.search(fnm)\n if a:\n fname = a.group()\n ind = datafiles.index(fname)\n fname = os.path.join(datapaths[ind], fname)\n break\n\n # returun\n return fname\n\n\ndef fdsn_download(inputfile):\n \"\"\"\n Read the catalog and create catalog class\n :return:\n \"\"\"\n\n import Inputread\n import os\n import Catalog\n from obspy.core import UTCDateTime\n from obspy import read\n\n # check the existence of the inputfile\n if not os.path.isfile(inputfile):\n raise FileExistsError('input file called ' + inputfile + 'does not exist')\n\n # loading of the inputfile\n inps = Inputread.inpread(inputfile, header_lines=1)\n\n # check the station file existence\n if not os.path.isfile(inps['stafile']):\n raise FileExistsError('Sta file called ' + inps['stafile'] + ' does not exist')\n\n # check the cat file existence\n if not os.path.isfile(inps['catfile']):\n raise FileExistsError('Cat file called ' + inps['catfile'] + ' does not exist')\n\n if not os.path.exists(inps['outpath']):\n raise IsADirectoryError('Outpath directory does not exist')\n\n # setting the components\n comp = list(inps['comp'])\n\n # setting the waveform duration\n dur = inps['duration']\n\n # start the time counter\n start = UTCDateTime.now()\n\n # reading the station file\n sta = []\n with open(inps['stafile'], encoding='utf-8') as file:\n for line in file:\n read_line = line.rstrip()\n read_line = read_line.split()\n sta.append(read_line[0])\n\n # web adress template for FDSN server\n # 'http://eida.bgr.de/fdsnws/dataselect/1/query?station=FBE??&channel=HHZ&starttime=2014-01-01T12:00:00&endtime=2014-01-01T12:10:00'\n templ1 = 'http://eida.bgr.de/fdsnws/dataselect/1/query?station='\n templ2 = '&channel=HH'\n templ3 = '&starttime='\n templ4 = '&endtime='\n\n # length of the catalog - nr of events and stations\n nev = file_len(inps['catfile'])\n\n # CYCLES through catalog, station and components\n catfile = open(inps['catfile'], encoding='utf-8', mode='r')\n datalist = open(inps['datalist'], encoding='utf-8', mode='a')\n\n # CYCLE throug catalog lines\n ix = 0\n\n for line in catfile:\n\n ix = ix + 1\n\n print(str(ix) + ' / ' + str(nev) + '\\n')\n\n read_line = line.rstrip()\n read_line = read_line.split()\n\n t0 = UTCDateTime(int(read_line[0]),int(read_line[1]),int(read_line[2]),int(read_line[3]),int(read_line[4]),int(float(read_line[5])))\n t1 = t0 + dur\n\n # cycle through stations\n for s in sta:\n\n # cycle through components\n for c in comp:\n\n # print(str(read_line[10] + ' ' + s + ' HH' + c + '\\n'))\n\n webname = templ1 + s + templ2 + c + templ3 + t0.isoformat() + templ4 + t1.isoformat()\n\n try:\n\n # reading the data\n st = read(webname)\n\n # saving the data\n outname = str(read_line[10]) + '_' + s + '_HH' + c + '.mseed'\n st.write(os.path.join(inps['outpath'], outname), format='MSEED')\n\n # update the datalist\n datalist.write(outname + ' ' + s + ' ' + c + '\\n')\n\n print(outname + ' ' + 'saved\\n')\n\n except:\n print( str(read_line[10]) + '_' + s + '_HH' + c + '.mseed' + ' No fucking way :(\\n')\n\n catfile.close()\n datalist.close()\n\n # stop the time counter and print the time\n stoptime = UTCDateTime.now()\n\n durtime = UTCDateTime(stoptime - start)\n\n print('Data downloaded in {0:2d} hours, {1:2d} minutes and {2:2d} seconds'.format(durtime.hour, durtime.minute,\n durtime.second))\n\n\ndef file_len(fname):\n with open(fname) as f:\n for i, l in enumerate(f):\n pass\n return i + 1\n\n\ndef files_list(outname,path):\n \"\"\"\n Get the txt list of all the files within the path folder\n :param path: '/home/jebacka/kokoty_na_snehu'\n :param outname: 'outname.txt'\n :return:\n \"\"\"\n\n import os\n\n if not os.path.exists(path):\n raise IsADirectoryError('Outpath directory does not exist')\n\n # list of files inside the path directory\n datafiles = os.listdir(path)\n\n # write into file\n fid = open(outname, mode='w', encoding='utf-8')\n\n for line in datafiles:\n fid.write(line + ' ' + path + '\\n')\n\n fid.close()\n\n\ndef get_stats(fileslist, catfile):\n \"\"\"\n Get the statistics of the data availability - number of recordings for one station\n :param fileslist: '/home/pojebsa/kakam/z/vesela.txt'\n :param fileslist: '/home/pojebsa/kakam/z/catfile.txt'\n :return:\n \"\"\"\n\n import os\n\n # check the cat file existence\n if not os.path.isfile(catfile):\n raise FileExistsError('Cat file called ' + catfile + ' does not exist')\n\n # check the cat file existence\n if not os.path.isfile(fileslist):\n raise FileExistsError('Fileslist file called ' + fileslist + ' does not exist')\n\n\ndef pick_data(inputfile):\n \"\"\"\n Picking routine aplicable for west bohemia data - for further cross-correlation\n :param inputfile:\n :return:\n \"\"\"\n\n from obspy.core import UTCDateTime\n import Inputread\n import os\n import Catalog\n from obspy import read\n from obspy.signal.trigger import ar_pick\n import matplotlib.pyplot as plt\n from obspy.core import Stream\n\n # check the existence of the inputfile\n if not os.path.isfile(inputfile):\n raise FileExistsError('input file called ' + inputfile + 'does not exist')\n\n # loading of the inputfile\n inps = Inputread.inpread(inputfile, header_lines=1)\n\n # check the station file existence\n if not os.path.isfile(inps['stafile']):\n raise FileExistsError('Sta file called ' + inps['stafile'] + ' does not exist')\n\n # check the cat file existence\n if not os.path.isfile(inps['catfile']):\n raise FileExistsError('Cat file called ' + inps['catfile'] + ' does not exist')\n\n # check the cat file existence\n if not os.path.isfile(inps['datafiles']):\n raise FileExistsError('Data files tab called ' + inps['datafilea'] + ' does not exist')\n\n if not os.path.exists(inps['datapath']):\n raise IsADirectoryError('Datapath directory does not exist')\n\n if not os.path.exists(inps['outpath']):\n raise IsADirectoryError('Outpath directory does not exist')\n\n\n # start the time counter\n start = UTCDateTime.now()\n\n # reading the station file\n sta, stalat, stalon = [], [], []\n with open(inps['stafile'], encoding='utf-8') as file:\n for line in file:\n read_line = line.rstrip()\n read_line = read_line.split()\n sta.append(read_line[0])\n stalat.append(read_line[1])\n stalon.append(read_line[2])\n\n # reading the catalog\n cat = Catalog.EventCatalog()\n\n with open(inps['catfile'], encoding='utf-8') as file:\n for line in file:\n read_line = line.rstrip()\n read_line = read_line.split()\n t = UTCDateTime(int(read_line[0]), int(read_line[1]), int(read_line[2]), int(read_line[3]),\n int(read_line[4]), float(read_line[5])).timestamp\n\n event = Catalog.CatalogItem(id=int(read_line[-1]), t0=t, lat=float(read_line[6]),\n lon=float(read_line[7]), depth=float(read_line[8]),\n mag=float(read_line[9]))\n\n # add event into the catalog\n cat.add_event(event)\n\n # reading the datafiles\n datafiles = []\n with open(inps['datafiles'], encoding='utf-8') as file:\n for line in file:\n read_line = line.rstrip()\n read_line = read_line.split()\n datafiles.append(read_line[0])\n\n # cycle through event items\n ID = cat.get_param('id')\n\n # constants for picking\n flow, fhigh = 4, 25\n stap, ltap = 0.1, 1.0\n stas, ltas = 0.2, 2\n arp, ars = 3, 8\n varwp, varws = 0.1, 0.2\n\n for id in ID:\n\n # cycle through stations\n for s in sta:\n\n fnames = []\n\n fnames.append(os.path.join(inps['datapath'], get_wb_filename(id, s, 'HHZ')))\n fnames.append(os.path.join(inps['datapath'], get_wb_filename(id, s, 'HHN')))\n fnames.append(os.path.join(inps['datapath'], get_wb_filename(id, s, 'HHE')))\n\n # existence control\n if os.path.isfile(fnames[0]) and os.path.isfile(fnames[1]) and os.path.isfile(fnames[2]):\n\n tr = read(fnames[0])\n tr += read(fnames[1])\n tr += read(fnames[2])\n\n\n # sampling frequency\n dt = []\n dt.append(tr[0].stats.sampling_rate)\n dt.append(tr[1].stats.sampling_rate)\n dt.append(tr[2].stats.sampling_rate)\n\n # check if the sampling frequencies are the same\n if not dt[0] == dt[1] == dt[2]:\n raise ValueError('Sampling frequencies are not the same')\n else:\n df = dt[0]\n\n # pickin via az picker\n p_pick, s_pick = ar_pick(tr[0].data, tr[1].data, tr[2].data, df, flow, fhigh, ltap, stap, ltas, stas, arp, ars, varwp,\n varws)\n\n # Plotting the results\n ax = plt.subplot(111)\n plt.plot(tr[1].data, 'k')\n ymin, ymax = ax.get_ylim()\n plt.vlines(p_pick*tr[0].stats.sampling_rate , ymin, ymax, color='r', linewidth=2)\n plt.vlines(s_pick*tr[0].stats.sampling_rate, ymin, ymax, color='b', linewidth=2)\n plt.axis('tight')\n plt.savefig(os.path.join(inps['outpath'], str(id) + '_' + s + '.png'), dpi=200)\n # plt.show()\n plt.close()\n\n # deleting the traces\n del tr\n\n # deleting the filenames\n del fnames\n\n\n\n\n\n\n\n\n\ndef get_wb_filename(id,sta,comp):\n \"\"\"\n Generate the filename of West Bohemian trigger\n :param id: int\n :param sta: str\n :param comp: str\n :return:\n \"\"\"\n\n fname = str(int(id)) + '_' + sta + '_' + comp + '.mseed'\n\n return fname\n\n\npick_data('/home/bachura/Documents/VpVs/2011/picker.inp')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Waveform.py","file_name":"Waveform.py","file_ext":"py","file_size_in_byte":19806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"51272701","text":"# -*- coding: utf-8 -*-\n##########################################################################\n#\n# Copyright (c) 2016-Present Webkul Software Pvt. Ltd. ()\n# Author : www.webkul.com\n#\n##########################################################################\n\nfrom openerp import models, fields, api, _\nfrom openerp import SUPERUSER_ID\nfrom openerp.exceptions import except_orm, Warning, RedirectWarning\n\n\nclass ProductTemplate(models.Model):\n _inherit = 'product.template'\n _description = \"Marketplace Product Template\"\n\n @api.model\n def _set_seller_id(self):\n user_obj = self.env['res.users'].sudo().browse(self._uid)\n if user_obj.partner_id and user_obj.partner_id.seller:\n return user_obj.partner_id.id\n return self.env['res.partner']\n\n @api.model\n def _get_pending_qty_request(self):\n for obj in self:\n mp_stock_obj = self.env[\"marketplace.stock\"].search(\n [('product_temp_id', '=', obj.id), ('state', '=', 'requested')])\n if mp_stock_obj:\n obj.pending_qty_request = True\n else:\n obj.pending_qty_request = False\n\n status = fields.Selection([('draft', 'Draft'), ('pending', 'Pending'), (\n 'approved', 'Approved'), ('rejected', 'Rejected')], \"Marketplace Status\", default=\"draft\")\n qty = fields.Float(string=\"Initial Quantity\",\n help=\"Initial quantity of the product which you want to update in warehouse for inventory purpose.\")\n template_id = fields.Many2one(\n \"product.template\", string=\"Product Template Id\")\n marketplace_seller_id = fields.Many2one(\n \"res.partner\", string=\"Seller\", default=_set_seller_id)\n color = fields.Integer('Color Index')\n pending_qty_request = fields.Boolean(\n string=\"Request Pending\", compute=_get_pending_qty_request)\n\n @api.multi\n def toggle_website_published(self):\n \"\"\" Inverse the value of the field ``website_published`` on the records in ``self``. \"\"\"\n for record in self:\n record.website_published = not record.website_published\n\n @api.multi\n def auto_approve(self):\n for obj in self:\n if obj.marketplace_seller_id.auto_product_approve:\n obj.write({\"status\": \"pending\"})\n obj.sudo().approved()\n else:\n obj.write({\"status\": \"pending\"})\n return True\n\n @api.multi\n def approved(self):\n for obj in self:\n if not obj.marketplace_seller_id:\n raise Warning(\n _(\"Marketplace seller id is not assign to this product.\"))\n if obj.marketplace_seller_id.state == \"approved\":\n obj.write({\"sale_ok\": True})\n obj.set_initial_qty()\n obj.write({\"status\": \"approved\"})\n template = self.pool.get('mail.template')\n config_setting_obj = self.env[\n 'marketplace.config.settings'].get_default_values()\n if config_setting_obj[\"enable_notify_admin_on_product_approve_reject\"] and config_setting_obj.has_key(\"notify_admin_on_product_approve_reject\") and config_setting_obj[\"notify_admin_on_product_approve_reject\"]:\n # Notify to admin by admin when product approved\n temp_id = config_setting_obj[\n \"notify_admin_on_product_approve_reject\"]\n x = template.send_mail(\n self._cr, self._uid, temp_id, obj.id, True)\n if config_setting_obj[\"enable_notify_seller_on_product_approve_reject\"] and config_setting_obj.has_key(\"notify_seller_on_product_approve_reject\") and config_setting_obj[\"notify_seller_on_product_approve_reject\"]:\n # Notify to Seller by admin when product approved\n temp_id2 = config_setting_obj[\n \"notify_seller_on_product_approve_reject\"]\n x2 = template.send_mail(\n self._cr, self._uid, temp_id2, obj.id, True)\n else:\n raise Warning(\n _(\"Marketplace seller of this product is not approved.\"))\n return True\n\n @api.multi\n def reject(self):\n for product_obj in self:\n if product_obj.status in (\"draft\", \"pending\", \"approved\") and product_obj.marketplace_seller_id:\n product_obj.write(\n {\"sale_ok\": False, \"website_published\": False, \"status\": \"rejected\"})\n template = self.pool.get('mail.template')\n config_setting_obj = self.env[\n 'marketplace.config.settings'].get_default_values()\n if config_setting_obj[\"enable_notify_admin_on_product_approve_reject\"] and config_setting_obj.has_key(\"notify_admin_on_product_approve_reject\") and config_setting_obj[\"notify_admin_on_product_approve_reject\"]:\n # Notify to admin by admin when product rejected\n temp_id = config_setting_obj[\n \"notify_admin_on_product_approve_reject\"]\n x = template.send_mail(\n self._cr, self._uid, temp_id, product_obj.id, True)\n if config_setting_obj[\"enable_notify_seller_on_product_approve_reject\"] and config_setting_obj.has_key(\"notify_seller_on_product_approve_reject\") and config_setting_obj[\"notify_seller_on_product_approve_reject\"]:\n # Notify to Seller by admin when product rejected\n temp_id2 = config_setting_obj[\n \"notify_seller_on_product_approve_reject\"]\n x2 = template.send_mail(\n self._cr, self._uid, temp_id2, product_obj.id, True)\n\n # Called in server action\n @api.multi\n def approve_product(self):\n for product_obj in self:\n if product_obj.status == \"pending\" and product_obj.marketplace_seller_id:\n product_obj.signal_workflow(\"approve\")\n\n # Called in server action\n @api.multi\n def reject_product(self):\n for product_obj in self:\n if product_obj.status in (\"draft\", \"pending\", \"approved\") and product_obj.marketplace_seller_id:\n product_obj.signal_workflow(\"rejected\")\n\n @api.model\n def create(self, vals):\n ''' Set default false to sale_ok and purchase_ok for seller product'''\n user = self.env[\"res.users\"].sudo().browse(self._uid)\n if user:\n if user.sudo().partner_id.seller:\n vals[\"sale_ok\"] = False\n vals[\"type\"] = \"product\"\n is_purchase_install = self.env['ir.module.module'].sudo().search([('name', 'in', ['purchase']), ('state', 'in', [\n 'to install', 'installed', 'to upgrade'])])\n if is_purchase_install:\n vals[\"purchase_ok\"] = False\n config_setting_obj = self.env[\n 'marketplace.config.settings'].get_default_values()\n if config_setting_obj['internal_categ']:\n vals[\"categ_id\"] = config_setting_obj['internal_categ']\n product_template = super(ProductTemplate, self).create(vals)\n return product_template\n\n @api.multi\n def set_initial_qty(self):\n for template_obj in self:\n if len(self) == 1:\n if template_obj.qty < 0:\n raise Warning(_('Initial Quantity can not be negative'))\n if not template_obj.marketplace_seller_id.location_id:\n raise Warning(_(\"Seller has no location/warehouse.\"))\n if template_obj.qty > 0:\n vals = {\n 'product_id': template_obj.product_variant_ids[0].id,\n 'product_temp_id': template_obj.id,\n 'new_quantity': template_obj.qty,\n 'location_id': template_obj.marketplace_seller_id.location_id.id or False, # Phase 2\n 'note': _(\"Initial Quantity.\"),\n 'state': \"requested\",\n }\n mp_product_stock = self.env['marketplace.stock'].create(vals)\n mp_product_stock.auto_approve()\n\n def disable_seller_all_products(self, seller_id):\n if seller_id:\n product_objs = self.search(\n [(\"marketplace_seller_id\", \"=\", seller_id)])\n product_objs.reject()\n","sub_path":"odoo_marketplace 0/models/marketplace_product.py","file_name":"marketplace_product.py","file_ext":"py","file_size_in_byte":8449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"453221588","text":"import requests\nfrom re import findall\nfrom termcolor import colored\nfrom configparser import RawConfigParser\n\n\ndef init(domain):\n\tC99 = []\n\n\tprint(colored(\"[*]-Searching C99...\", \"yellow\"))\n\n\tparser = RawConfigParser()\n\tparser.read(\"config.ini\")\n\tC99_API_KEY = parser.get(\"C99\", \"C99_API_KEY\")\n\n\tif C99_API_KEY == \"\":\n\t\tprint(\" \\__\", colored(\"No C99 API key configured\", \"red\"))\n\t\treturn []\n\n\telse:\n\t\theaders = {\"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:88.0) Gecko/20100101 Firefox/88.0\"}\n\t\turl = \"https://api.c99.nl/subdomainfinder?key={0}&domain={1}&json\".format(C99_API_KEY, domain)\n\n\t\ttry:\n\t\t\tresponse = requests.get(url, headers=headers)\n\t\t\tresults = findall(\"([\\w\\d][\\w\\d\\-\\.]*\\.{0})\".format(domain.replace(\".\", \"\\.\")), response.text)\n\t\t\t\n\t\t\tif results:\n\t\t\t\tC99.extend(results)\n\t\t\t\tC99 = set(C99)\n\n\t\t\tprint(\" \\__ {0}: {1}\".format(colored(\"Subdomains found\", \"cyan\"), colored(len(C99), \"yellow\")))\n\t\t\treturn C99\n\n\t\texcept requests.exceptions.RequestException as err:\n\t\t\tprint(\" \\__\", colored(err, \"red\"))\n\t\t\treturn []\n\n\t\texcept requests.exceptions.HTTPError as errh:\n\t\t\tprint(\" \\__\", colored(errh, \"red\"))\n\t\t\treturn []\n\n\t\texcept requests.exceptions.ConnectionError as errc:\n\t\t\tprint(\" \\__\", colored(errc, \"red\"))\n\t\t\treturn []\n\n\t\texcept requests.exceptions.Timeout as errt:\n\t\t\tprint(\" \\__\", colored(errt, \"red\"))\n\t\t\treturn []\n\t\t\n\t\texcept Exception:\n\t\t\tprint(\" \\__\", colored(\"Something went wrong!\", \"red\"))\n\t\t\treturn []","sub_path":"collectors/C99.py","file_name":"C99.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"203385613","text":"from unittest import TestCase\n\nfrom cloudshell.networking.cisco.iosxr.command_templates import cisco_ios_xr_cmd_templates\n\n\nclass TestCiscoIOSXRCmdTemplates(TestCase):\n\n def setUp(self):\n self.path = \"ftp://admin:password@10.10.10.10/CloudShell/config\"\n self.firmware_file_name = \"name\"\n\n def test_install_commit(self):\n output = \"install commit\"\n output_admin = \"admin install commit\"\n commit = cisco_ios_xr_cmd_templates.INSTALL_COMMIT.get_command(admin=None)\n commit_admin = cisco_ios_xr_cmd_templates.INSTALL_COMMIT.get_command(admin=\"\")\n\n self.assertEqual(output, commit.get('command'))\n self.assertEqual(output_admin, commit_admin.get('command'))\n\n def test_install_activate(self):\n packages_new = \"pkg1 pkg2\"\n packages_old = \"disk0:pkg1 disk0:pkg2\"\n output = \"install activate {}\".format(packages_new)\n output_admin = \"admin install activate {} synchronous\".format(packages_old)\n activate = cisco_ios_xr_cmd_templates.INSTALL_ACTIVATE.get_command(admin=None, feature_names=packages_new,\n sync=None)\n activate_admin = cisco_ios_xr_cmd_templates.INSTALL_ACTIVATE.get_command(admin=\"\", feature_names=packages_old,\n sync=\"\")\n\n self.assertEqual(output, activate.get('command'))\n self.assertEqual(output_admin, activate_admin.get('command'))\n\n def test_install_add_source(self):\n file_extension = \"tar\"\n result1 = \"install add source {} {}\".format(self.path, self.firmware_file_name)\n result2 = \"admin install add source {} {} synchronous\".format(self.path, self.firmware_file_name)\n result3 = \"admin install add source {} {} {} synchronous\".format(self.path, file_extension,\n self.firmware_file_name)\n install_add_source_1 = cisco_ios_xr_cmd_templates.INSTALL_ADD_SRC.get_command(path=self.path,\n file_extension=None,\n file_name=self.firmware_file_name,\n admin=None,\n sync=None)\n install_add_source_2 = cisco_ios_xr_cmd_templates.INSTALL_ADD_SRC.get_command(path=self.path,\n file_extension=None,\n file_name=self.firmware_file_name,\n admin=\"\",\n sync=\"\")\n install_add_source_3 = cisco_ios_xr_cmd_templates.INSTALL_ADD_SRC.get_command(path=self.path,\n file_extension=file_extension,\n file_name=self.firmware_file_name,\n admin=\"\",\n sync=\"\")\n\n self.assertEqual(result1, install_add_source_1.get('command'))\n self.assertEqual(result2, install_add_source_2.get('command'))\n self.assertEqual(result3, install_add_source_3.get('command'))\n","sub_path":"package/tests/networking/cisco/iosxr/command_templates/test_iosxr_cmd_templates.py","file_name":"test_iosxr_cmd_templates.py","file_ext":"py","file_size_in_byte":3772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"212945444","text":"import tcod\r\nfrom dice import roll_dice\r\n\r\nfrom random import randint\r\nfrom game_messages import Message\r\nfrom renderer.render_functions import draw_animation\r\n\r\n\r\ndef heal(*args, **kwargs):\r\n entity = args[0]\r\n amount = kwargs.get('amount')\r\n\r\n results = []\r\n\r\n if entity.__Fighter.hp == entity.__Fighter.max_hp:\r\n results.append({'consumed': False, 'message': Message('You are already at full health', tcod.yellow)})\r\n else:\r\n entity.__Fighter.heal(amount)\r\n results.append({'consumed': True, 'message': Message('Your wounds start to feel better!', tcod.green)})\r\n\r\n return results\r\n\r\ndef read(*args, **kwargs):\r\n entity = args[0]\r\n about = kwargs.get('about')\r\n #content = kwargs.get('content')\r\n\r\n results = []\r\n\r\n results.append({'used': True,\r\n 'message': Message(F'You open the book and read... This is a book {about}.', tcod.white)})\r\n\r\n return results\r\n\r\ndef talisman(*args, **kwargs):\r\n results = []\r\n\r\n chance = randint(1,9)\r\n if chance == 1:\r\n talk = \"askes you how are things going.\"\r\n elif chance == 2:\r\n talk = \"tells you that she and Mary knew each other.\"\r\n elif chance == 3:\r\n talk = \"says she think that you should hurry.\"\r\n elif chance == 4:\r\n talk = \"tells you that death is nothing compared to the meaningless of life.\"\r\n elif chance == 5:\r\n talk = \"smiles breifly.\"\r\n elif chance == 6:\r\n talk = \"worries about Mary.\"\r\n elif chance == 7:\r\n talk = \"assures you how time travel is completely possible even within the theory of relativity.\"\r\n elif chance == 8:\r\n talk = \"laughs softly.\"\r\n elif chance == 9:\r\n talk = \"tells you she could really have a walk, but is trapped in this talisman.\"\r\n\r\n results.append({'used': True,\r\n 'message': Message(F'You look at the talisman. The devil of the talisman {talk}', tcod.lighter_purple)})\r\n return results\r\n\r\ndef cast_spell(*args, **kwargs):\r\n results = []\r\n camera = kwargs.get('camera')\r\n con = kwargs.get('console')\r\n screen_width = kwargs.get('screen_width')\r\n screen_height = kwargs.get('screen_height')\r\n fov_map = kwargs.get('fov_map')\r\n\r\n caster = args[0]\r\n entities = kwargs.get('entities')\r\n damage = roll_dice(kwargs.get('damage'))\r\n maximum_range = kwargs.get('maximum_range')\r\n\r\n target = None\r\n closest_distance = maximum_range + 1\r\n\r\n for entity in entities:\r\n if entity._Fighter and entity != caster and fov_map.fov[entity.y, entity.x]:\r\n distance = caster.distance_to(entity)\r\n\r\n if distance < closest_distance:\r\n target = entity\r\n closest_distance = distance\r\n\r\n if target:\r\n draw_animation(con, camera, screen_width, screen_height, target.x, target.y, 'flash')\r\n tcod.console_blit(con, 0, 0, screen_width, screen_height, 0, 0, 0)\r\n tcod.console_flush(keep_aspect=True)\r\n\r\n results.append({'consumed': True, 'target': target,\r\n 'message': Message(F'A crackling stream of energy hits {target.name} for {damage} hit points.')})\r\n results.extend(target._Fighter.take_damage(damage))\r\n else:\r\n results.append({'consumed': False, 'target': None, 'message': Message('No enemy is close enough to strike.', tcod.red)})\r\n\r\n return results\r\n\r\ndef cast_fireball(*args, **kwargs):\r\n results = []\r\n camera = kwargs.get('camera')\r\n con = kwargs.get('console')\r\n screen_width = kwargs.get('screen_width')\r\n screen_height = kwargs.get('screen_height')\r\n fov_map = kwargs.get('fov_map')\r\n\r\n entities = kwargs.get('entities')\r\n damage_dice = kwargs.get('damage')\r\n radius = kwargs.get('radius')\r\n r = int((radius-1) / 2)\r\n target_x = kwargs.get('target_x') - camera.x\r\n target_y = kwargs.get('target_y') - camera.y\r\n\r\n if not fov_map.fov[target_y, target_x]:\r\n results.append({'consumed': False,\r\n 'message': Message('You cannot target a tile outside your field of view.', tcod.yellow)})\r\n return results\r\n\r\n for x in range(target_x - r, target_x + r + 1):\r\n for y in range(target_y - r, target_y + r + 1):\r\n draw_animation(con, camera, screen_width, screen_height, x,y, 'explosion')\r\n\r\n tcod.console_blit(con, 0, 0, screen_width, screen_height, 0, 0, 0)\r\n tcod.console_flush(keep_aspect=True)\r\n results.append({'consumed': True,\r\n 'message': Message(F'The flaming sphere explodes!', tcod.orange)})\r\n for entity in entities:\r\n if entity.distance(target_x, target_y) <= radius and entity._Fighter:\r\n damage = roll_dice(damage_dice)\r\n results.append({'message': Message(F'{entity.name} gets blasted for {damage} hit points.', tcod.orange)})\r\n results.extend(entity._Fighter.take_damage(damage))\r\n return results","sub_path":"item_functions.py","file_name":"item_functions.py","file_ext":"py","file_size_in_byte":4911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"111124237","text":"import numpy as np\nimport regreg.api as rr\n#from selection.bayesian.cisEQTLS.tests.CV_lambda import tuning_parameter_glmnet\n# from rpy2.robjects.packages import importr\n# from rpy2 import robjects\n# glmnet = importr('glmnet')\n#import rpy2.robjects.numpy2ri\n#rpy2.robjects.numpy2ri.activate()\nimport numpy as np\nimport regreg.api as rr\nfrom selection.tests.instance import gaussian_instance\n\n\n# def tuning_parameter_glmnet(X, y):\n# robjects.r('''\n# glmnet_cv = function(X,y, lam_seq=NA){\n# y = as.matrix(y)\n# X = as.matrix(X)\n# if (is.na(lam_seq)){\n# G_CV = cv.glmnet(X, y, standardize=FALSE, intercept=FALSE)\n# }\n# else {\n# G_CV = cv.glmnet(X, y, standardize=FALSE, intercept=FALSE, lambda=lam_seq)\n# }\n# lam_1SE = G_CV$lambda.1se\n# lam_minCV = G_CV$lambda.min\n# n = nrow(X)\n# lam_minCV = lam_minCV*n\n# lam_1SE = lam_1SE*n\n# lam_seq = G_CV$lambda*n\n# result = list(lam_minCV=lam_minCV, lam_1SE=lam_1SE, lam_seq = lam_seq, CV_err=G_CV$cvm, SD=G_CV$cvsd)\n# return(result)\n# }''')\n#\n# r_glmnet_cv = robjects.globalenv['glmnet_cv']\n# n, p = X.shape\n# r_X = robjects.r.matrix(X, nrow=n, ncol=p)\n# r_y = robjects.r.matrix(y, nrow=n, ncol=1)\n# result = r_glmnet_cv(r_X, r_y)\n# lam_minCV = result[0][0]\n# lam_1SE = result[1][0]\n# return lam_minCV, lam_1SE\n\n\ndef selection(X, y, random_Z, randomization_scale=1, sigma=None, method=\"theoretical\"):\n n, p = X.shape\n loss = rr.glm.gaussian(X,y)\n epsilon = 1. / np.sqrt(n)\n lam_frac = 1.\n if sigma is None:\n sigma = 1.\n if method == \"theoretical\":\n lam = 1. * sigma * lam_frac * np.mean(np.fabs(np.dot(X.T, np.random.standard_normal((n, 10000)))).max(0))\n # elif method == \"cross-validation\":\n # lam = tuning_parameter_glmnet(X, y)[1]\n # print(lam)\n\n W = np.ones(p)*lam\n penalty = rr.group_lasso(np.arange(p), weights = dict(zip(np.arange(p), W)), lagrange=1.)\n\n # initial solution\n\n problem = rr.simple_problem(loss, penalty)\n random_term = rr.identity_quadratic(epsilon, 0, -randomization_scale * random_Z, 0)\n solve_args = {'tol': 1.e-10, 'min_its': 100, 'max_its': 500}\n\n\n solve_args = {'tol': 1.e-10, 'min_its': 100, 'max_its': 500}\n initial_soln = problem.solve(random_term, **solve_args)\n active = (initial_soln != 0)\n if np.sum(active) == 0:\n return None\n initial_grad = loss.smooth_objective(initial_soln, mode='grad')\n betaE = initial_soln[active]\n subgradient = -(initial_grad+epsilon*initial_soln-randomization_scale*random_Z)\n cube = subgradient[~active]/lam\n return lam, epsilon, active, betaE, cube, initial_soln\n\n#creating instance X,y,beta: for a single X, sampling lots of y\n\nclass instance(object):\n\n def __init__(self, n, p, s, snr=5, sigma=1., rho=0, random_signs=False, scale =True, center=True):\n (self.n, self.p, self.s,\n self.snr,\n self.sigma,\n self.rho) = (n, p, s,\n snr,\n sigma,\n rho)\n\n self.X = (np.sqrt(1 - self.rho) * np.random.standard_normal((self.n, self.p)) +\n np.sqrt(self.rho) * np.random.standard_normal(self.n)[:, None])\n if center:\n self.X -= self.X.mean(0)[None, :]\n if scale:\n self.X /= (self.X.std(0)[None, :] * np.sqrt(self.n))\n\n self.beta = np.zeros(p)\n self.beta[:self.s] = self.snr\n if random_signs:\n self.beta[:self.s] *= (2 * np.random.binomial(1, 0.5, size=(s,)) - 1.)\n self.active = np.zeros(p, np.bool)\n self.active[:self.s] = True\n\n def _noise(self):\n return np.random.standard_normal(self.n)\n\n def generate_response(self):\n\n Y = (self.X.dot(self.beta) + self._noise()) * self.sigma\n return self.X, Y, self.beta * self.sigma, np.nonzero(self.active)[0], self.sigma\n\n","sub_path":"selection/reduced_optimization/initial_soln.py","file_name":"initial_soln.py","file_ext":"py","file_size_in_byte":3979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"103929831","text":"import datetime\r\nimport os\r\nimport time\r\nimport shutil\r\ni =0\r\nl = ''\r\nnow = datetime.datetime.now()\r\ntime_now = now.strftime(\"%Y%m%d%H%M%S\")\r\nend_date=''\r\n\r\n\r\nprint('choose the time when youre exploid start works')\r\nprint('Year-(1) ' + 'Month-(2)' + ' Day = (3) ' + 'Hours-(4)' + ' Minutes-(5)' + ' Seconds-(6)' )\r\nfor i in range(1,7):\r\n print((i))\r\n y = str(input())\r\n l = end_date + y\r\n end_date = l\r\n i+=1\r\n#Ask which actions uses wanna make \r\n\r\ndef ask(): \r\n print('you should choose one of the two exploide\\'s mode')\r\n print('if you want to delete just one file push ' + '1')\r\n print('if you want to delete all files push ' + '2')\r\n return input()\r\n \r\n# if condition equal 1\r\ndef First(n):\r\n print('Enter a file name' + ' Example main.txt, index.html')\r\n i = os.path.abspath(n)\r\n os.remove(i)\r\n print('Deleting was completely done :)')\r\n \r\nb= '' \r\n#if condition equal 2 \r\ndef second():\r\n global b \r\n \r\n l = os.getcwd()\r\n path = os.path.join(os.path.abspath(os.path.dirname(__file__)), l)\r\n shutil.rmtree(path)\r\n \r\n \r\n print('All Files Was Completely deleted :)') \r\n\r\n\r\nf=ask()\r\n\r\nif int(f) == 1:\r\n print('Enter a file name' + ' Example main.txt, index.html')\r\n m = input() \r\n \r\n#Loops which is checking meaning of time_now \r\nwhile int(time_now) != int(end_date)+1:\r\n now = datetime.datetime.now()\r\n time_now = now.strftime(\"%Y%m%d%H%M%S\")\r\n\r\n \r\n# Main condtion \r\nif int(time_now) >= int(end_date):\r\n if int(f) == 1:\r\n First(m)\r\n elif int(f) == 2:\r\n second()\r\ntime.sleep(200)\r\n\r\n\r\n\r\n\r\n","sub_path":"Exploit.py","file_name":"Exploit.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"125799120","text":"from cyaron import *\ncards = ['', 'A', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K', 'joker', 'JOKER']\nidx = [0, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3]\nfor i in range(1, 11):\n test_data = IO(file_prefix=\"poker\", data_id=i)\n if idx[i] == 1:\n test_data.input_writeln(cards[randint(1, 15)])\n test_data.input_writeln(cards[randint(1, 15)])\n elif idx[i] == 2:\n la = randint(1, 15)\n lb = 15 - la\n vis = [False for i in range(16)]\n s = ''\n for j in range(la):\n x = randint(1, 15)\n while vis[x]: x = randint(1, 15)\n vis[x] = True\n s += cards[x]\n if j != la - 1: s += ' '\n test_data.input_writeln(s)\n s = ''\n for j in range(lb):\n x = randint(1, 15)\n while vis[x]: x = randint(1, 15)\n vis[x] = True\n s += cards[x]\n if j != lb - 1: s += ' '\n test_data.input_writeln(s)\n else:\n maxn1, maxn2 = [4 for j in range(16)], [4 for j in range(16)]\n maxn1[14], maxn1[15], maxn2[14], maxn2[15] = 1, 1, 1, 1\n la = randint(1, 54)\n lb = randint(1, 54)\n s = ''\n for j in range(la):\n x = randint(1, 15)\n while maxn1[x] == 0: x = randint(1, 15)\n maxn1[x] -= 1\n s += cards[x]\n if j != la - 1: s += ' '\n test_data.input_writeln(s)\n s = ''\n for j in range(lb):\n x = randint(1, 15)\n while maxn2[x] == 0: x = randint(1, 15)\n maxn2[x] -= 1\n s += cards[x]\n if j != lb - 1: s += ' '\n test_data.input_writeln(s)\n test_data.output_gen(\"./std\")","sub_path":"luogu/U125854/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"639382297","text":"\"\"\"\nQ101\nSymmetric Tree\nEasy\n\n[iterative solution]\n\nGiven a binary tree, check whether it is a mirror of\nitself (ie, symmetric around its center).\n\nFor example, this binary tree [1,2,2,3,4,4,3] is symmetric:\n\n 1\n / \\\n 2 2\n / \\ / \\\n3 4 4 3\n\nBut the following [1,2,2,null,3,null,3] is not:\n\n 1\n / \\\n 2 2\n \\ \\\n 3 3\n\n\nNote:\nBonus points if you could solve it both recursively and iteratively.\n\"\"\"\n\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution:\n def isSymmetric(self, root: TreeNode) -> bool:\n\n # find the max depth\n def find_depth(root):\n if root is None:\n return 0\n else:\n return max(find_depth(root.left), find_depth(root.right)) + 1\n\n depth = find_depth(root)\n\n if depth == 0 or depth == 1:\n return True\n\n left_nodes = [root.left]\n right_nodes = [root.right]\n\n for i in range(depth):\n left_next = []\n right_next = []\n\n for left, right in zip(left_nodes, right_nodes):\n\n if (left is None and right is not None) or (\n left is not None and right is None):\n return False\n\n elif left is None and right is None:\n left_next.append(None)\n left_next.append(None)\n right_next.append(None)\n right_next.append(None)\n\n else:\n if left.val != right.val:\n return False\n else:\n left_next.append(left.left)\n left_next.append(left.right)\n right_next.append(right.right)\n right_next.append(right.left)\n\n right_nodes = right_next\n left_nodes = left_next\n\n return True\n\n\na1 = TreeNode(1)\na2 = TreeNode(2)\na3 = TreeNode(1)\n\na2.left = a1\na2.right = a3\n\nsol = Solution()\nprint(sol.isSymmetric(a2))\n\n\n","sub_path":"Q101.py","file_name":"Q101.py","file_ext":"py","file_size_in_byte":2112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"117109643","text":"import sys\nimport random\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nimport weapon\n\n\nclass AutoStarDialog(QDialog):\n def __init__(self):\n super().__init__()\n self.setupUI()\n self.nowYouSeeMe=False\n self.goal = 0\n \n\n def setupUI(self):\n self.setGeometry(760, 380, 300, 100)\n self.setWindowTitle(\"자동 강화\")\n\n label1 = QLabel(\"원하는 스타포스『10~22』 : \\n 『23성 이상은 위험』\\n\\n파괴 1회 : 사용 메소 +20억\")\n\n self.lineEdit1 = QLineEdit()\n self.pushButton1= QPushButton(\"Start\")\n self.pushButton1.clicked.connect(self.pushButtonClicked)\n\n self.chBox = QCheckBox('Now you see me (20ms)',self)\n\n layout = QGridLayout()\n layout.addWidget(label1, 0, 0)\n layout.addWidget(self.lineEdit1, 0, 1)\n layout.addWidget(self.pushButton1, 0, 2)\n layout.addWidget(self.chBox,1,2)\n\n self.setLayout(layout)\n\n def pushButtonClicked(self):\n if(10 <= int(self.lineEdit1.text()) <= 25):\n self.goal = self.lineEdit1.text()\n self.nowYouSeeMe = self.chBox.isChecked()\n print(self.nowYouSeeMe)\n self.close()\n else:\n msg = QMessageBox.warning(self,'ERROR','잘못된 입력입니다.')\n \nclass MyWindow(QMainWindow):\n # 강화에 필요한 메소\n mesoRate=[321000, 641000, 961000, 1281000, 1601000,\n 1921000, 2241000, 2561000, 2881000, 3201000,\n 12966500, 16400100, 20356300, 24865300, 29956500,\n 71316500, 83999600, 98016700, 113422300, 130270000,\n 148612400, 168501500, 189988600, 213124000, 237957700]\n\n # 강화 성공 확률\n successRateNoStar=[0.95, 0.9, 0.85, 0.85, 0.8,\n 0.75, 0.7, 0.65, 0.6, 0.55,\n 0.5, 0.45, 0.4, 0.35, 0.3,\n 0.3, 0.3, 0.3, 0.3, 0.3,\n 0.3, 0.3, 0.03, 0.02, 0.01]\n\n # 파괴확률\n destroyRate=[0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0,\n 0, 0, 0.006, 0.013, 0.014,\n 0.021, 0.021, 0.021, 0.028, 0.028,\n 0.07, 0.07, 0.2, 0.3, 0.4]\n\n mesoTotal=0\n star=0\n failRepeatCount = 0\n\n def __init__(self):\n super().__init__()\n self.setGeometry(800, 300, 220, 403)\n self.setFixedSize(220, 403)\n self.setWindowTitle(\"starforce\")\n # self.setWindowIcon(QIcon(\"starIcon.png\"))\n\n def autoStar_clicked():\n dlg = AutoStarDialog()\n dlg.exec_()\n self.goal = int(dlg.goal)\n nowYouSeeme = dlg.nowYouSeeMe\n print('star goal :',self.goal)\n \n if(nowYouSeeme == False):\n while(self.star < self.goal):\n btnStar_clicked()\n else:\n self.timer = QTimer(self)\n self.timer.start(20)\n self.timer.timeout.connect(autoStar)\n \n def autoStar():\n if(self.star >= self.goal):\n self.timer.stop()\n else:\n btnStar_clicked()\n\n def btnStar_clicked():\n print(\"강화하기 클릭\")\n \n rate = random.random()\n\n if(self.failRepeatCount==2):\n print(\"\\n\\n찬스타임 발동!! 강화 성공!!!\")\n self.star+=1\n self.failRepeatCount=0\n\n elif(rate <= self.successRateNoStar[self.star]):\n print(\"\\n\\n강화 성공!!\")\n self.star+=1\n self.failRepeatCount=0\n\n elif(rate <= self.successRateNoStar[self.star]+self.destroyRate[self.star]):\n print(\"\\n\\n장비가 파괴되었습니다!!!!!!!!\")\n self.star=12\n self.mesoTotal+=2000000000 # 파괴시 아이템값 20억메소 \n\n else:\n if(self.star>10):\n if(self.star != 15 and self.star != 20):\n print(\"\\n\\n강화 실패!! ( 스타포스 하락 )\")\n self.star-=1\n self.failRepeatCount+=1\n else:\n print(\"\\n\\n강화 실패!! ( 스타포스 유지 )\")\n else:\n print(\"\\n\\n강화 실패!! ( 스타포스 유지 )\")\n \n self.mesoTotal += self.mesoRate[self.star]\n\n labelItemName.setText(\"아케인셰이드 투핸드소드 ★\"+str(self.star))\n labelMesoTotal.setText(\"총 사용 메소 : \"+format(self.mesoTotal, \",\"))\n\n if(self.star >= 20):\n labelItemImage.setStyleSheet(\"color: green\")\n elif(self.star >= 15):\n labelItemImage.setStyleSheet(\"color: rgb(150,100,0)\")\n elif(self.star >= 10):\n labelItemImage.setStyleSheet(\"color: rgb(200,0,255)\")\n elif(self.star >= 5):\n labelItemImage.setStyleSheet(\"color: rgb(0,0,200)\")\n\n def btnStarReset_clicked():\n print(\"강화 초기회 클릭\")\n self.star = 0\n labelItemName.setText(\"아케인셰이드 투핸드소드 ★\"+str(self.star))\n \n def btnMesoReset_clicked():\n print(\"메소 초기회 클릭\")\n self.mesoTotal=0\n labelMesoTotal.setText(\"총 사용 메소 : \"+format(self.mesoTotal, \",\"))\n \n def btnQuit_clicked():\n print(\"자동강화 클릭\")\n autoStar_clicked()\n\n \n labelItemName = QLabel(\"아케인셰이드 투핸드소드 ★\"+str(self.star),self)\n labelItemName.setGeometry(5, 2, 300, 20)\n labelItemName.show()\n\n labelItemImage = QLabel(weapon.twoHandSword,self)\n labelItemImage.setGeometry(10,15,200,250)\n my_font = QFont(\"Lucida Console\", 1)\n labelItemImage.setFont(my_font)\n\n labelMesoTotal = QLabel(\"총 사용 메소 : \"+str(self.mesoTotal), self)\n labelMesoTotal.setGeometry(8, 219, 300, 100)\n\n btnStar = QPushButton(\"강화하기\", self)\n btnStar.setGeometry(5, 280, 210, 38)\n btnStar.clicked.connect(btnStar_clicked)\n\n btnStarReset = QPushButton(\"강화 초기화\", self)\n btnStarReset.setGeometry(5, 320, 103, 38)\n btnStarReset.clicked.connect(btnStarReset_clicked)\n \n\n btnMesoReset = QPushButton(\"메소 초기화\", self)\n btnMesoReset.setGeometry(112, 320, 103, 38)\n btnMesoReset.clicked.connect(btnMesoReset_clicked)\n\n btnQuit = QPushButton(\"자동강화\", self)\n btnQuit.setGeometry(5, 360, 210, 38)\n btnQuit.clicked.connect(btnQuit_clicked)\n \n self.show()\n\n def closeEvent(self, event):\n reply = QMessageBox.question(self, 'Message', 'Are you sure to quit?',\n QMessageBox.Yes | QMessageBox.No, QMessageBox.No)\n\n if reply == QMessageBox.Yes:\n event.accept()\n else:\n event.ignore()\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = MyWindow()\n sys.exit(app.exec_())\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"232319519","text":"import couchdb\nfrom process_tweet import get_sentiment\nimport time\nimport sys\n\ndb_name = 'aus_analytics'\n\ncouch = couchdb.Server('http://115.146.93.17:5984')\ndb = couch[db_name]\n\n\nprint('started')\nsys.stdout.flush()\noptions = {'limit': 50000}\n\nwhile True:\n count = 0\n for emp_match in db.view('helper_p/no_sent', **options):\n if count % 100 == 0:\n print(\"Processed %d tweets\" % count)\n sys.stdout.flush()\n emp_match.value['sentiment'] = get_sentiment(emp_match.value['text'])\n if count % 10 == 0:\n print(\"Tagged with: %s\" % emp_match.value['sentiment'])\n sys.stdout.flush()\n db.save(emp_match.value)\n retry = False\n while True:\n try:\n if retry:\n couch = couchdb.Server('http://115.146.93.17:5984')\n db = couch[db_name]\n db.save(emp_match.value)\n break\n except couchdb.http.ServerError as e:\n print(\"Server error %s. Sleeping for 60 seconds\" % e)\n sys.stdout.flush()\n time.sleep(60)\n retry = True\n count += 1\n if count == 0:\n break\nprint(\"Finished\")\n","sub_path":"存档/欣然/past example/tag_existing.py","file_name":"tag_existing.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"209802397","text":"# 分布式进程\n# 指导网址: https://blog.csdn.net/Solo95/article/details/78913709\n#task_master.py\nimport random, time, queue\nfrom multiprocessing.managers import BaseManager\nfrom multiprocessing import freeze_support\n\n#send queue 发送队列\ntask_queue = queue.Queue()\n#receiver queue 接收队列\nresult_queue = queue.Queue()\n\nclass QueueManager(BaseManager):\n pass\n\n#注册2个queue到网络上 使用callable和匿名函数关联了Queue对象\n'''仅适用Linux Windows下callable不能使用lambda表达式赋值\nQueueManager.register('get_task_queue', callable=lambda: task_queue)\nQueueManager.register('get_result_queue', callable=lambda: result_queue)\n'''\ndef return_task_queue():\n global task_queue\n return task_queue\ndef return_result_queue():\n global result_queue\n return result_queue\n\ndef runf():\n QueueManager.register('get_task_queue', callable=return_task_queue)\n QueueManager.register('get_result_queue', callable=return_result_queue)\n #绑定端口5000,设置验证密码'abc'\n manager = QueueManager(address=('127.0.0.1', 5000), authkey=b'abc') \n #Linux下address留空等于本机 Windows下不能留空 127.0.0.0即本机的地址\n #启动Queue\n manager.start()\n #通过网络获取Queue对象\n task = manager.get_task_queue()\n result = manager.get_result_queue()\n #开启示例任务\n for i in range(10):\n n = random.randint(0, 10000)\n print('Put task %d to run...' %n)\n task.put(n)\n #读取任务结果\n print('Try to get results...')\n for i in range(10):\n r = result.get(timeout=10)\n print('Results: %s' %r)\n manager.shutdown()\n print('master has been shoutdown')\n\nif __name__ == '__main__':\n freeze_support()\n runf()","sub_path":"Python/PAT/distributedp-master.py","file_name":"distributedp-master.py","file_ext":"py","file_size_in_byte":1757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"382706966","text":"import cmd\nfrom .graph import rgraph, rtree\nfrom .drawer import Drawer\nfrom .simulate import Simulator\nimport pyparsing as pp\n\n\ndef int_arg(flags):\n return (pp.Suppress(pp.oneOf(flags)) +\n pp.Word(pp.nums)).setParseAction(lambda t: int(t[0]))\n\n\nclass GraphShell(cmd.Cmd):\n prompt = '(G) '\n\n def do_random(self, arg):\n \"\"\"Runs a simulation with a totally random graph.\n -n or --nodes: number of nodes\n -e or --edges: number of edges\n Example: random --nodes 9 --edges 13\"\"\"\n pnodes = int_arg('-n --nodes')\n pedges = int_arg('-e --edges')\n try:\n N, E = (pnodes + pedges).parseString(arg)\n except pp.ParseException:\n print('*** Could not parse arguments:', arg)\n return\n g = rgraph(range(N), E)\n Drawer(g).run()\n\n def do_tree(self, arg):\n \"\"\"Runs a simulation with a random tree graph.\n -c or --maxchildren: maximum number of children a node can have.\n -d or --maxdepth: maximum distance from leaves to root.\n Example: tree --maxchildren 3 --maxdepth 2\n \"\"\"\n pchildren = int_arg('-c --maxchildren')\n pdepth = int_arg('-d --maxdepth')\n try:\n max_children, depth = (pchildren + pdepth).parseString(arg)\n except pp.ParseException:\n print('*** Could not parse arguments:', arg)\n return\n g = rtree(max_children, depth)\n Drawer(g).run()\n\n def do_test(self, arg):\n g = rgraph(range(10), 15)\n s = Simulator(g)\n Drawer(simulator=s).run()\n Drawer(simulator=s).run()\n\n def do_quit(self, arg):\n \"\"\"Close the windows and quit.\n Example: quit\"\"\"\n print('Bye!')\n return True\n","sub_path":"src/shell.py","file_name":"shell.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"76944219","text":"from flask import Flask\nimport config\nfrom flask_restful import Api, Resource, fields, marshal_with\nfrom exts import db\nfrom models import User, Article, Tag\n\napp = Flask(__name__) # type:Flask\napp.config.from_object(config)\n\n# 使用Restful api\napi = Api(app)\ndb.init_app(app)\n\n\n# 准备数据,插入到数据当中\n@app.route('/')\ndef index():\n # 插入数据到数据库中\n user = User(username='xag', email='xingag66@gmail.com')\n article = Article(title='title1', content='content1')\n article.author = user\n tag1 = Tag(name='前端')\n tag2 = Tag(name='Python')\n article.tags.append(tag1)\n article.tags.append(tag2)\n db.session.add(article)\n db.session.commit()\n return '首页!'\n\n\n# restful API\n# http://127.0.0.1:5000/article/1/\nclass ArticleView(Resource):\n # 返回的数据\n # 2.1 重命名\n # 使用attribute 关键字。前面是展示给用户看的,后面attribute指向的是模型的真正字段\n\n # 2.2 默认值\n # 使用default 关键字,如果数据在模型中不存在,就显示默认值\n\n resource_fields = {\n 'article_title': fields.String(attribute='title'),\n 'content': fields.String,\n # 这里需要进行嵌套,才能正确解析author\n 'author': fields.Nested({\n 'username': fields.String,\n 'email': fields.String\n }),\n 'tags': fields.List(fields.Nested({\n 'id': fields.Integer,\n 'name': fields.String\n })),\n 'read_count': fields.Integer(default=80)\n }\n\n @marshal_with(resource_fields)\n def get(self, article_id):\n article = Article.query.get(article_id)\n return article\n\n\napi.add_resource(ArticleView, '/article//', endpoint='article')\n\nif __name__ == '__main__':\n app.run(debug=True)\n\n# ===============================================================================\n\n# 2.重命名属性、返回值默认值、返回值复杂结构\n# 2.1 重命名属性\n# 2.2 返回值的默认值\n# 2.3 返回复杂结构【对象和列表】\n\n# ===============================================================================\n","sub_path":"Python/Flask/2.Flask 进阶/6.WTForms、上传文件、Cookie+Session、CSRF、Flask上下文、钩子函数、信号机制、Restful API/8、Restful API/3. restfulDemo 标准化使用restful api/restfulDemo2.py","file_name":"restfulDemo2.py","file_ext":"py","file_size_in_byte":2142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"565544509","text":"# coding: utf-8\n\n\nimport os\nimport json\nimport time\nimport inspect\n\nimport numpy as np\nimport random\nimport torch\nimport torchvision\nfrom torchvision.ops import misc as misc_nn_ops\nfrom torch.utils.data import DataLoader\n\ntry:\n import custom_dataset\nexcept ModuleNotFoundError as e:\n import sys\n sys.path += ['src']\n\nfrom custom_dataset import collate_fn, UnifiedTextifier, VisualSelectionDataset\n\n\nclass TrainConfig(object):\n \n name_space = 'lcfp_selector_onecommon_exp_0'\n \n # Randomness\n rseed = 0 # None\n cudnn_deterministic = True\n \n # Task definition\n provide_image = True\n coco_image_directories = ['data/img/val2014', 'data/img/train2014']\n dataset_list = {\n 'train': [\n {'task_name':'onecommon.selection', 'path':'onecommon_data/converted/train.json'},\n #{'task_name':'guesswhat.selection', 'path':'data/guesswhat.train.jsonl', \n # 'success_only':True, 'coco_image_directories':coco_image_directories},\n ],\n 'valid': [\n {'task_name':'onecommon.selection', 'path':'onecommon_data/converted/valid.json'},\n #{'task_name':'guesswhat.selection', 'path':'data/guesswhat.valid.jsonl', \n # 'success_only':True, 'coco_image_directories':coco_image_directories},\n ],\n }\n \n # Model definition\n textifier_dict = 'cache/onecommon_textifier_train_u5.json'\n textifier_use_more_than = 5\n train_textifier_ratio_force_unk = 0\n train_optional_info_ratio_force_zero = 0.01\n model_net_args = {\n 'image_shape': (224, 224),\n 'use_bbox': True,\n 'resnet_name': 'resnet50',\n 'used_pyramids': ('l4', 'l3', 'l2', 'l1', 'l0'),\n 'rnn_type': 'gru',\n 'n_rnn_directions': 1,\n 'n_rnn_layers': 1,\n 'dim_token': 256,\n 'dim_lang': 1024,\n 'dim_feature': 256,\n 'dim_object_optional_info': 256,\n 'dim_mlp': 1024,\n 'pooling_method': 'mean',\n 'dropout_ratio_lang': 0,\n 'dropout_ratio_feat': 0,\n 'dropout_ratio_mlp': 0,\n 'coordinate_noise': 0,\n }\n \n # Optimization\n n_workers = 1\n device_name = 'cuda:0'\n n_epoch = 5\n minibatch_size = 32\n optimizer_name = 'Adam'\n optimizer_args = {'lr':5e-4, 'eps':1e-9, 'weight_decay':0, 'betas':(0.9, 0.999)}\n optimizer_scheduler = None \n # optimizer_scheduler = {'name': 'StepLR', 'step_size':2, 'gamma':0.5}\n \n # Others\n models_path = 'models'\n cache_path = 'cache'\n weight_name_template = 'weight_ep%d'\n history_name = 'history.txt'\n \n def __init__(self):\n self.model_dir = os.path.join(self.models_path, self.name_space)\n self.weight_path_template = os.path.join(self.model_dir, self.weight_name_template)\n self.history_path = os.path.join(self.model_dir, self.history_name)\n \n @property\n def device(self):\n return torch.device(self.device_name)\n \n\nclass SummaryHolder(object):\n \n formatter = {\n float: lambda x: '%.3f'%x,\n None: lambda x: str(x),\n } \n\n def __setattr__(self, n, v):\n if not hasattr(self, '_name_list'):\n super(SummaryHolder, self).__setattr__('_name_list', [])\n self._name_list.append(n)\n super(SummaryHolder, self).__setattr__(n, v)\n \n def to_str(self, *name_list, prefix='', no_name=False):\n cells = []\n for n in (name_list or self._name_list):\n v = getattr(self, n)\n v = self.formatter.get(type(v), self.formatter[None])(v)\n cells.append(v if no_name else '%s=%s'%(prefix+n, v))\n return ' '.join(cells)\n\n\ndef run_dataset(config, net, opt, data_loader, i_epoch):\n \n start_time = time.time()\n n_samples = 0\n loss_sum = 0\n n_corrects = 0\n \n def summarize():\n _div = lambda x, y: x / y if y != 0 else 0.\n sh = SummaryHolder()\n sh.s = time.time() - start_time\n sh.n = n_samples\n sh.loss = _div(loss_sum, n_samples)\n sh.acc = _div(n_corrects, n_samples)\n return sh\n \n for i_mb, mb in enumerate(data_loader):\n mb = {k:v.to(config.device) if hasattr(v, 'to') else v for k, v in mb.items()}\n ups = net(mb['image'], mb['tokens'], mb['n_tokens'], mb['object_bboxes'], mb['object_optional_info'])\n loss, is_correct = net.calc_loss(ups, mb['n_objects'], mb['ground_truth_id'])\n \n if net.training:\n opt.zero_grad()\n loss.backward()\n opt.step()\n \n n_samples += mb['size']\n loss_sum += (loss.item() * mb['size'])\n n_corrects += is_correct.sum().item()\n \n if (i_mb + 1) % 100 == 0:\n print(i_epoch, summarize().to_str())\n \n return summarize()\n\n\ndef set_random_seed(seed):\n np.random.seed(seed)\n random.seed(seed)\n torch.manual_seed(seed)\n\n\ndef train(config):\n \n # Reproductivity\n if config.rseed is not None:\n set_random_seed(config.rseed)\n torch.backends.cudnn.deterministic = config.cudnn_deterministic\n \n # Initialization on Environment\n if not os.path.exists(config.cache_path):\n os.makedirs(config.cache_path)\n \n if not os.path.exists(config.model_dir):\n os.makedirs(config.model_dir) \n print('model_dir', config.model_dir)\n \n with open(config.history_path, 'w') as f:\n f.write('')\n \n # Tokenizer\n print('#', 'initializing textifiers', '...')\n textifier = UnifiedTextifier.load_or_make(config.textifier_dict,\n config.dataset_list['train'], config.textifier_use_more_than,\n )\n print('textifier vocab size', textifier.get_len())\n \n # Dataset & Data Loader\n print('#', 'loading datasets', '...')\n datasets = {}\n for key, dataset_list in config.dataset_list.items():\n is_train = key == 'train'\n datasets[key] = dataset = VisualSelectionDataset(\n dataset_list=dataset_list,\n textifier=textifier,\n provide_image=config.provide_image,\n image_shape=config.model_net_args['image_shape'],\n dim_object_optional_info=config.model_net_args['dim_object_optional_info'],\n ratio_force_unk=config.train_textifier_ratio_force_unk if is_train else 0,\n ratio_force_zero=config.train_optional_info_ratio_force_zero if is_train else 0,\n )\n print('len dataset', key, len(dataset))\n \n # Model\n # Some modifications will be required to use multi GPU\n print('#', 'constructing a model', '...')\n model_net = SelectorNet(textifier.get_len(), **config.model_net_args)\n model_net.to(config.device)\n model_net.device = config.device\n\n # Optimizer\n opt = getattr(torch.optim, config.optimizer_name)(\n filter(lambda x: x.requires_grad, model_net.parameters()), \n **config.optimizer_args\n )\n scheduler = None\n if config.optimizer_scheduler is not None:\n s_args = config.optimizer_scheduler.copy()\n name = s_args.pop('name')\n scheduler = getattr(torch.optim.lr_scheduler, name)(opt, **s_args)\n \n # Training loop\n print('#', 'training loop starts')\n print('n_epoch', config.n_epoch)\n for i_epoch in range(config.n_epoch):\n # a setter for worker random number generator's seed\n def worker_init_fn(_id):\n if config.rseed is not None:\n seed = config.rseed + (config.n_workers + 1) * i_epoch + _id\n random.seed(seed)\n np.random.seed(seed)\n \n # training\n data_loader = DataLoader(\n datasets['train'], \n batch_size=config.minibatch_size, \n num_workers=config.n_workers, collate_fn=collate_fn, shuffle=True,\n worker_init_fn=worker_init_fn,\n )\n model_net.train()\n train_summary = run_dataset(config, model_net, opt, data_loader, i_epoch)\n if scheduler is not None:\n scheduler.step()\n \n # validation\n data_loader = DataLoader(\n datasets['valid'], \n batch_size=config.minibatch_size, \n num_workers=config.n_workers, collate_fn=collate_fn, shuffle=False,\n )\n model_net.eval()\n with torch.no_grad():\n valid_summary = run_dataset(config, model_net, opt, data_loader, i_epoch)\n \n # Save states\n weight_path = config.weight_path_template%(i_epoch)\n checkpoint = {\n 'model_net_state_dict': model_net.state_dict(),\n }\n torch.save(checkpoint, weight_path)\n \n # Save history\n with open(config.history_path, 'a') as f:\n f.write(' '.join([\n str(i_epoch),\n train_summary.to_str('loss', 'acc', no_name=True),\n valid_summary.to_str('loss', 'acc', no_name=True),\n ]) + '\\n')\n \n print(' '.join([\n 'ep=%d'%(i_epoch),\n train_summary.to_str(prefix='t_'),\n valid_summary.to_str(prefix='v_'),\n ]))\n\n\ndef positional_encode(images):\n \n try:\n device = images.get_device()\n except:\n device = -1\n if device < 0:\n device = torch.device('cpu')\n \n n, c, h, w = images.size()\n x_coordinate = torch.linspace(-1, 1, w).view(1, 1, 1, w).expand(n, 1, h, w).to(device)\n y_coordinate = torch.linspace(-1, 1, h).view(1, 1, h, 1).expand(n, 1, h, w).to(device)\n images = torch.cat([images, x_coordinate, y_coordinate], 1)\n return images\n\n\nclass FilmBlock(torch.nn.Module):\n \n def __init__(self, in_channel, out_channel, ksize):\n super(FilmBlock, self).__init__()\n self.conv1 = torch.nn.Conv2d(in_channel + 2, out_channel, 1, 1, 0)\n self.conv2 = torch.nn.Conv2d(out_channel, out_channel, ksize, 1, (ksize - 1)//2)\n self.batch_norm = torch.nn.BatchNorm2d(out_channel)\n self.relu = torch.nn.ReLU(inplace=True)\n\n def forward(self, x, c):\n x = positional_encode(x)\n x = self.relu(self.conv1(x))\n residual = x\n beta = c[:, 0].unsqueeze(2).unsqueeze(3).expand_as(x)\n gamma = c[:, 1].unsqueeze(2).unsqueeze(3).expand_as(x)\n x = self.batch_norm(self.conv2(x))\n x = self.relu(x * beta + gamma)\n x = x + residual\n return x\n\n\nclass ImageNormalizer(torch.nn.Module):\n \n def __init__(self, mean, std):\n super(ImageNormalizer, self).__init__()\n self.mean = torch.nn.parameter.Parameter(torch.as_tensor(mean)[None, :, None, None], requires_grad=False)\n self.std = torch.nn.parameter.Parameter(torch.as_tensor(std)[None, :, None, None], requires_grad=False)\n\n def forward(self, x):\n return x.sub_(self.mean).div_(self.std)\n\n\nclass SelectorNet(torch.nn.Module):\n \n # Information about pre-trained resnet\n all_pyramids = ('l4', 'l3', 'l2', 'l1', 'l0')\n defined_dim_pyramids = {\n 'resnet50': {'l4':2048, 'l3':1024, 'l2':512, 'l1':256, 'l0':64},\n 'resnet34': {'l4':512, 'l3':256, 'l2':128, 'l1':64, 'l0':64},\n 'resnet18': {'l4':512, 'l3':256, 'l2':128, 'l1':64, 'l0':64},\n }\n sf_pyramids = {'l4':2, 'l3':2, 'l2':2, 'l1':2, 'l0':1}\n last_scale_factor = 2\n \n # Values from torchvision documentation\n image_normalization_mean = [0.485, 0.456, 0.406]\n image_normalization_std = [0.229, 0.224, 0.225]\n \n def __init__(self, n_tokens,\n image_shape=(224, 224),\n use_bbox=True,\n resnet_name='resnet50',\n used_pyramids=('l4', 'l3', 'l2', 'l1', 'l0'),\n rnn_type='gru',\n n_rnn_directions=1,\n n_rnn_layers=1,\n dim_token=256,\n dim_lang=1024,\n dim_feature=256,\n dim_object_optional_info=512,\n dim_mlp=1024,\n pooling_method='mean',\n dropout_ratio_lang=0,\n dropout_ratio_feat=0,\n dropout_ratio_mlp=0,\n coordinate_noise=0,\n ):\n super(SelectorNet, self).__init__()\n \n # set arguments as sttributes\n local_dict = locals()\n for a in inspect.getfullargspec(self.__init__).args:\n (a == 'self') or setattr(self, a, local_dict[a])\n del local_dict\n \n self.n_tokens, self.n_sub_tokens = self.n_tokens['main'], self.n_tokens['sub']\n self.device = torch.device('cpu')\n self.dropout_lang = torch.nn.Dropout(self.dropout_ratio_lang)\n self.dropout_feat = torch.nn.Dropout(self.dropout_ratio_feat)\n self.n_used_pyramids = len(self.used_pyramids)\n \n if self.pooling_method == 'mean':\n self.pooling = lambda x: x.mean(axis=(-1, -2))\n else:\n self.pooling = lambda x: x.max(dim=-1)[0].max(dim=-1)[0]\n \n # Language encoder\n self.embed_token = torch.nn.Embedding(self.n_tokens, self.dim_token, padding_idx=0)\n \n rnn_module = torch.nn.LSTM if self.rnn_type=='lstm' else torch.nn.GRU\n self.rnn_lang = rnn_module(\n input_size=self.dim_token,\n hidden_size=self.dim_lang // self.n_rnn_directions,\n bidirectional=(self.n_rnn_directions == 2),\n num_layers=self.n_rnn_layers,\n dropout=self.dropout_ratio_lang if self.n_rnn_layers > 1 else 0,\n bias=True,\n batch_first=True,\n )\n \n # Image feature extracter\n self.image_normalizer = ImageNormalizer(\n mean=self.image_normalization_mean,\n std=self.image_normalization_std,\n )\n self.resnet = getattr(torchvision.models, self.resnet_name)(\n pretrained=True,\n norm_layer=misc_nn_ops.FrozenBatchNorm2d\n )\n self.resnet.requires_grad_(False)\n self.dim_pyramids = self.defined_dim_pyramids[self.resnet_name]\n \n # Vision-language interaction\n self.film_fc = torch.nn.Linear(self.dim_lang, self.n_used_pyramids * 2 * self.dim_feature)\n self.film_blocks = torch.nn.ModuleDict({\n k: FilmBlock(d, self.dim_feature, 3) for k, d in self.dim_pyramids.items()\n })\n self.upsamplers = torch.nn.ModuleDict({\n k: torch.nn.Upsample(scale_factor=sf, mode='nearest') if sf != 1 else \n torch.nn.Identity() for k, sf in self.sf_pyramids.items()\n })\n \n # Optional Information\n self.embed_optional_info = torch.nn.Embedding(\n self.n_sub_tokens, self.dim_object_optional_info, padding_idx=0)\n \n # Probability\n dim_pre_mlp = self.dim_feature + self.dim_object_optional_info\n self.to_logit = torch.nn.Sequential(\n torch.nn.Dropout(self.dropout_ratio_mlp),\n torch.nn.Linear(dim_pre_mlp, self.dim_mlp),\n torch.nn.ReLU(inplace=True),\n torch.nn.Linear(self.dim_mlp, 1),\n )\n \n def forward(self, images, tokens, n_tokens, obj_bboxes, obj_optional_info):\n \"\"\"returns unnormalized probability maps\"\"\"\n \n images = self.image_normalizer(images)\n image_feats = self._forward_resnet(images)\n \n h_lang = self._forward_lang(tokens, n_tokens)\n \n feat_all = self._forward_fuse(h_lang, image_feats)\n output = self._forward_candidates(feat_all, obj_bboxes, obj_optional_info)\n return output\n \n def _forward_resnet(self, x):\n \n resnet = self.resnet\n x = resnet.conv1(x)\n x = resnet.bn1(x)\n x = l0 = resnet.relu(x)\n x = resnet.maxpool(x)\n x = l1 = resnet.layer1(x)\n x = l2 = resnet.layer2(x)\n x = l3 = resnet.layer3(x)\n x = l4 = resnet.layer4(x)\n \n return {'l4':l4, 'l3':l3, 'l2':l2, 'l1':l1, 'l0':l0}\n \n def _forward_lang(self, tokens, n_tokens):\n \n embs = self.embed_token(tokens)\n embs = self.dropout_lang(embs)\n packed_embs = torch.nn.utils.rnn.pack_padded_sequence(\n embs, n_tokens, batch_first=True, enforce_sorted=False)\n _, _h = self.rnn_lang(packed_embs)\n h_lang = _h[0] if isinstance(_h, tuple) else _h\n h_lang = h_lang.permute(1, 0, 2).view(h_lang.size(1), -1)\n return h_lang\n \n def _forward_fuse(self, h_lang, image_feats):\n \n betagamma = self.film_fc(h_lang).view(-1, self.n_used_pyramids, 2, self.dim_feature)\n \n batch_size, _, h, w = image_feats[self.all_pyramids[0]].size()\n feat_all = torch.zeros((batch_size, self.dim_feature, h, w), device=self.device)\n \n n_used_film = 0\n for k in self.all_pyramids:\n if k in self.used_pyramids:\n feat_local = self.film_blocks[k](image_feats[k], betagamma[:, n_used_film])\n feat_local = self.dropout_feat(feat_local)\n feat_all += feat_local\n n_used_film += 1\n feat_all = self.upsamplers[k](feat_all)\n \n return feat_all\n \n def _forward_candidates(self, feat_all, obj_bboxes, obj_optional_info):\n \n if self.use_bbox:\n obj_feats = self._forward_map_select_bbox(feat_all, obj_bboxes)\n else:\n obj_centers = 0.5*(obj_bboxes[:,:,:2] + obj_bboxes[:,:,2:])\n obj_feats = self._forward_map_select_center(feat_all, obj_centers)\n \n obj_optional_info = self.embed_optional_info(obj_optional_info)\n \n f = torch.cat((obj_feats, obj_optional_info), axis=2)\n y = self.to_logit(f)[:, :, 0]\n return y\n \n def _forward_map_select_bbox(self, feat_all, obj_bboxes):\n \n if self.training and self.coordinate_noise > 0:\n obj_bboxes += torch.randn_like(obj_bboxes)*(self.coordinate_noise*self.image_shape[0])\n \n batch_size, feat_dim, h, w = feat_all.size()\n n_objs = obj_bboxes.size(1)\n \n # truncating coordinates into integer \n obj_bboxes /= self.last_scale_factor\n for i, u, f in [(0, w, True), (1, h, True), (2, w, False), (3, h, False)]:\n obj_bboxes[..., i].clamp_(0, u)\n if f:\n obj_bboxes[..., i] = obj_bboxes[..., i].floor()\n else:\n obj_bboxes[..., i] = obj_bboxes[..., i].ceil()\n obj_bboxes = torch.as_tensor(obj_bboxes, dtype=np.int, device=self.device)\n \n # selecting boxes\n obj_feats = []\n z = torch.zeros((feat_dim,), device=self.device, dtype=torch.float32)\n for ib in range(batch_size):\n feats = []\n batch_map = feat_all[ib]\n for io in range(n_objs):\n x_min, y_min, x_max, y_max = obj_bboxes[ib, io]\n bbox = batch_map[:, y_min:y_max, x_min:x_max]\n if bbox.nelement() == 0:\n feats.append(z)\n else:\n feats.append(self.pooling(bbox))\n obj_feats.append(torch.stack(feats))\n obj_feats = torch.stack(obj_feats)\n \n return obj_feats\n \n def _forward_map_select_center(self, feat_all, obj_centers):\n \n if self.training and self.coordinate_noise > 0:\n obj_centers += torch.randn_like(obj_centers)*(self.coordinate_noise*self.image_shape[0])\n \n batch_size, _, h, w = feat_all.size()\n n_obj = obj_centers.size(1)\n \n # truncating coordinates into integer\n obj_centers /= self.last_scale_factor\n obj_centers = obj_centers.round()\n for i, u in [(0, w), (1, h)]:\n obj_centers[..., i] = obj_centers[..., i].clamp(0, u - 1)\n obj_centers = torch.as_tensor(obj_centers, dtype=np.int, device=self.device)\n \n # selecting points\n ib = torch.arange(batch_size, device=self.device).view(-1, 1).repeat(1, n_obj).flatten()\n ix = obj_centers[:, :, 0].flatten()\n iy = obj_centers[:, :, 1].flatten()\n obj_feats = feat_all.permute(0, 2, 3, 1)[(ib, iy, ix)].view(batch_size, n_obj, -1)\n \n return obj_feats\n \n def _forward_map_all(self, feat_all):\n raise NotImplementedError()\n \n def calc_loss(self, ups, n_objs, target_obj_ids, with_is_correct=True):\n \n batch_size, n_max = ups.size()\n arange = torch.arange(0, n_max, device=self.device)[None]\n mask = arange < n_objs[:, None]\n label_mask = arange == target_obj_ids[:, None]\n \n ups_filled = ups.masked_fill(~mask, float('-inf'))\n y = ups_filled.logsumexp(axis=1) - (ups * label_mask).sum(axis=1)\n y = y.sum(axis=0) / batch_size\n \n if not with_is_correct:\n return y\n \n pred_ids = ups_filled.detach().argmax(axis=1)\n is_correct = pred_ids == target_obj_ids\n \n return y, is_correct\n\n\nif __name__ == '__main__':\n config = TrainConfig()\n train(config)\n","sub_path":"src/lcfp_selector_onecommon_exp_0.py","file_name":"lcfp_selector_onecommon_exp_0.py","file_ext":"py","file_size_in_byte":20803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"502184855","text":"from __future__ import print_function\nfrom ortools.linear_solver import pywraplp\n\n\ndef main():\n\n # Create the linear solver with the GLOP backend.\n solver = pywraplp.Solver('simple_lp_program',\n pywraplp.Solver.GLOP_LINEAR_PROGRAMMING)\n\n # Create the variables v1 and v2\n v1 = solver.NumVar(0, 1, 'v1')\n v2 = solver.NumVar(0, 2, 'v2')\n\n print(\"These are the number of the variables used = \", solver.NumVariables())\n\n # The constraint 0 <= x + y <= 2 which represents an equation\n ct = solver.Constraint(0, 2, 'ct')\n ct.SetCoefficient(v1, 1)\n ct.SetCoefficient(v2, 1)\n\n print('Number of constraints =', solver.NumConstraints())\n \n\n # Create the objective function, 3 * x + y.\n # This function needs to be maximized\n objective = solver.Objective()\n objective.SetCoefficient(v1, 3)\n objective.SetCoefficient(v1, 1)\n objective.SetMaximization()\n\n solver.Solve()\n\n print('Solution:')\n print('Objective value =', objective.Value())\n print('x =', x.solution_value())\n print('y =', y.solution_value())\n\n\nif __name__ == '__main__':\n main()","sub_path":"CSProg1.py","file_name":"CSProg1.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"228283555","text":"# -*- encoding: utf-8 -*-\n\nfrom __future__ import absolute_import\nimport subprocess\n\nimport setuptools\n\nfrom setuptools import Extension\nfrom setuptools.command.install import install\n\nfrom pip.download import PipSession\nfrom pip.req import parse_requirements\n\nfrom conf import SCRIPT_INSTALL_REQS\nfrom scripts import download_binaries\n\nhave_cython = False\ntry:\n from Cython.Distutils import build_ext as _build_ext\n\n have_cython = True\nexcept ImportError:\n from distutils.command.build_ext import build_ext as _build_ext\n\nC_UTILS_BASE = 'autosklearn/c_utils/competition_c_functions'\nC_UTILS_LIB = 'autosklearn.c_utils.competition_c_functions'\n\nclass Download(install):\n\n def run(self):\n subprocess.call([\"pip install -r requirements.txt --no-clean\"], shell=True)\n download_binaries()\n subprocess.call(['bash', SCRIPT_INSTALL_REQS])\n install.do_egg_install(self)\n\n\nif have_cython:\n c_utils = Extension(C_UTILS_LIB, ['%s.pyx' % C_UTILS_BASE])\nelse:\n c_utils = Extension(C_UTILS_LIB, ['%s.c' % C_UTILS_BASE])\n\n\nsetuptools.setup(\n name='AutoSklearn',\n description='Code to participate in the AutoML 2015 challenge.',\n version='0.0.2dev',\n ext_modules=[c_utils],\n packages=setuptools.find_packages(exclude=['test']),\n install_requires=[str(ir.req) for ir in\n parse_requirements('requirements.txt',\n session=PipSession())],\n test_suite='nose.collector',\n cmdclass={'install': Download, 'build_ext': _build_ext},\n scripts=['scripts/autosklearn'],\n include_package_data=True,\n author='Matthias Feurer',\n author_email='feurerm@informatik.uni-freiburg.de',\n license='BSD',\n platforms=['Linux'],\n classifiers=[],\n url='www.automl.org')\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"606846573","text":"from Baseline import Baseline\r\nfrom ScreenMatcher import ScreenMatcher\r\nfrom MatchCleaner import MatchCleaner\r\nfrom Clicker import Clicker\r\nimport random\r\n\r\nbase = Baseline();\r\n#nature_rune_check = ScreenMatcher(\"templates/items/nature_rune.png\");\r\n#astral_rune_check = ScreenMatcher(\"templates/items/astral_rune.png\");\r\n#coin_check = ScreenMatcher(\"templates/items/cash_stack\");\r\nprint('Start in north edge bank with inventory of nature runes, astral runes, money, and logs.');\r\nprint('How many logs do you have: ')\r\n#print('If you do not the bot will not stop');\r\nnum_logs = input()\r\nbase.compass();\r\n\r\nsm3 = ScreenMatcher(\"templates/interface/backpack.png\");\r\nmc3 = MatchCleaner(sm3.getPositions());\r\ncl3 = Clicker();\r\ncl3.LC(mc3.getCenters()[0], 0.5);\r\n\r\n\r\nsm3.setTemplate(\"templates/items/teak_log.png\")\r\nsm3.setThresh(0.7);\r\nmc3.setRawPos(sm3.getPositions())\r\n\r\n\r\n\r\nsm = ScreenMatcher(\"templates/interface/lunar_book.png\");\r\nmc = MatchCleaner(sm.getPositions());\r\ncl = Clicker();\r\ncl.LC(mc.getCenters()[0], 1.2);\r\nGPPerPlank = 2147-1624\r\n\r\nplanks_made = 0\r\nwhile planks_made < num_logs:\r\n sm2 = ScreenMatcher(\"templates/spells/plank_make.png\");\r\n sm2.setThresh(0.7);\r\n mc2 = MatchCleaner(sm2.getPositions());\r\n style = random.randint(0,2)\r\n j=0;\r\n m=0;\r\n n=0;\r\n for i in range(25):\r\n cl.LC(mc2.getCenters()[0], .4);\r\n\r\n if(style==0):\r\n cl.LC(mc3.getCenters()[i], 0.15);\r\n if (style==1):\r\n cl.LC(mc3.getCenters()[24-i], 0.15);\r\n if (style==2):\r\n if i<7:\r\n cl.LC(mc3.getCenters()[i*4], 0.15);\r\n if 7<=i and i<13:\r\n cl.LC(mc3.getCenters()[3+j*4], 0.15);\r\n j+=1\r\n if 13<=i and i<19:\r\n cl.LC(mc3.getCenters()[2+m*4], 0.15);\r\n m+=1\r\n if 19<=i and i<25:\r\n cl.LC(mc3.getCenters()[1+n*4], 0.15);\r\n n+=1\r\n planks_made += 1;\r\n print(str(GPPerPlank*planks_made/1000)+'K profit made so far')\r\n\r\n sm.setTemplate(\"templates/world/edge_bank.png\");\r\n sm.setThresh(0.7);\r\n mc.setRawPos(sm.getPositions())\r\n cl.LC(mc.getCenters()[0], 1);\r\n\r\n sm.setTemplate(\"templates/items/teak_plank.png\");\r\n mc.setRawPos(sm.getPositions())\r\n cl.RC(mc.getCenters()[3], 0.5);\r\n\r\n sm.setTemplate(\"templates/interface/all.png\");\r\n mc.setRawPos(sm.getPositions())\r\n cl.LC(mc.getCenters()[0], 0.5);\r\n\r\n sm.setTemplate(\"templates/items/teak_log_bank.png\");\r\n mc.setRawPos(sm.getPositions());\r\n cl.RC(mc.getCenters()[0], 0.5);\r\n\r\n sm.setTemplate(\"templates/interface/all.png\");\r\n mc.setRawPos(sm.getPositions());\r\n cl.LC(mc.getCenters()[0], 0.5);\r\n\r\n sm.setTemplate(\"templates/interface/close_bank.png\");\r\n mc.setRawPos(sm.getPositions());\r\n cl.LC(mc.getCenters()[0], 0.5);\r\n","sub_path":"gui_bot_builder/OldSchoolRunescapeBots/PlankMakeTeak.py","file_name":"PlankMakeTeak.py","file_ext":"py","file_size_in_byte":2830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"613642135","text":"# data collection and analyzing module \n\nfrom manager import *\nimport time\nfrom scipy.spatial import distance\nimport statistics\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom data_acq import *\nfrom init import *\n\n\ndef thrh_comp(Y):\n ''' Used for Dynamic Threshold calculation and therein carries scattered energy info'''\n #percen_thr=0.05 # 5% of max energy holds - defined in init.py\n return np.mean(np.sort(abs(Y))[-int(len(Y)*percen_thr):-1])\n \n\ndef fft_block(Xdata, isplot, issave, fname='data/AxisX_pass.png'):\n #Fs = 2048.0 # sampling rate - defined in init.py\n Ts = 1.0/Fs # sampling interval\n t = np.arange(0,len(Xdata)/Fs,Ts) # time vector\n y = Xdata - np.mean(Xdata)\n n = len(y) # length of the signal\n k = np.arange(n)\n T = n/Fs\n frq = k/T # two sides frequency range \n frq = frq[range(int(n/2))] # one side frequency range\n Y = np.fft.fft(y)/n # fft computing and normalization\n Y = Y[range(int(n/2))] \n thrh=thrh_comp(Y)\n if isplot:\n fig, ax = plt.subplots(2, 1)\n ax[0].plot(t,y)\n ax[0].set_xlabel('Time')\n ax[0].set_ylabel('Amplitude')\n ax[1].plot(frq,abs(Y),'b',frq,thrh+abs(Y)*0,'r') # plotting the spectrum\n ax[1].vlines([230, 240 ], 0, np.max(abs(Y)), colors='g')\n ax[1].vlines([ 470, 480 ], 0, np.max(abs(Y)), colors='g')\n ax[1].vlines([ 710, 720 ], 0, np.max(abs(Y)), colors='g')\n ax[1].vlines([ 565, 630 ], 0, np.max(abs(Y)), colors='g')\n ax[1].set_xlabel('Freq (Hz)')\n ax[1].set_ylabel('|Y(freq)|')\n ax[0].grid(True)\n ax[1].grid(True)\n if issave:\n plt.savefig(fname) \n plt.show()\n return thrh*10000 # 1000 - imperical normalization factor \n\ndef fft_main():\n data = acq_data()\n datapool=[ data.AxisX.to_numpy(),\n data.AxisY.to_numpy(),\n data.AxisZ.to_numpy()]\n Ax_thrh=[]\n for cnt, Xdata in enumerate(datapool):\n Ax_thrh.append(fft_block(Xdata, isplot, issave, fname='data/Axis'+str(cnt)+'.png'))\n return Ax_thrh\n\n\ndef vib_dsp():\n current = fft_main()\n d = distance.euclidean(current, Axes_Threshold)\n print(\"Euclidean distance: \",d)\n std = statistics.stdev([abs(j-i) for i,j in zip(current , Axes_Threshold)])\n print(\"Standard Deviation of sample is % s \" \n % (std))\n if d > max_eucl or std*100 > deviation_percentage:\n return True\n return False\n \n","sub_path":"dataAnalyzer.py","file_name":"dataAnalyzer.py","file_ext":"py","file_size_in_byte":2455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"371127237","text":"the_count = [1, 2, 3, 4, 5]\r\nfruits = ['apples', 'oranges', 'pears', 'apricots', 'bananas']\r\nchange = [1, 'pennies', 2, 'dimes', 3, 'quarters', 4, 'nickels']\r\n\r\n# this first kind of 'for-loop' goes through a list\r\nfor number in the_count:\r\n print(\"This is count %d\" % number)\r\n\r\n# same as above (lazy comments)\r\nfor fruit in fruits:\r\n print(\"A fruit of type: %s\" % fruit)\r\n\r\n# also, we can go through mixed lists too\r\n# notice we have to use %r since we don't know what data type it may be\r\nfor i in change:\r\n print(\"I got %r\" % i)\r\n\r\n# we can also build lists. First, start with an empty list\r\nelements = []\r\n\r\n# then 'fill it in.' This time we're using a range function to do 6 counts\r\nfor i in range(0, 6):\r\n print(\"Adding %d to the list.\" % i)\r\n # append to the list; it's a function a list understands\r\n elements.append(i)\r\n\r\n# Now, we can print them out too\r\nfor i in elements:\r\n print(\"Element was %d\" % i)\r\n \r\n","sub_path":"ex32.py","file_name":"ex32.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"57783694","text":"import json\r\nimport workouts\r\nimport os\r\nfrom collections import defaultdict\r\n\r\n# Set paths\r\nAPP_PATH = os.path.dirname(os.path.abspath(__file__))\r\n# Data directory path\r\nDATA_PATH = os.path.join(APP_PATH, \"data\")\r\n# .json file path\r\nWORKOUTS_FILE = os.path.join(DATA_PATH, \"workouts.json\")\r\n\r\n\r\ndef load_json_data(file_name):\r\n \"\"\"Loads workouts data from .json file.\"\"\"\r\n with open(file_name, \"r\") as file:\r\n data = json.load(file)\r\n return data\r\n\r\n\r\ndef build_workouts_instances(plans_names):\r\n \"\"\"Initiates instances for each workout name in plans_names\"\"\"\r\n\r\n workouts_plans = list(plans_names.copy())\r\n for plan in workouts_plans:\r\n\r\n workout = workouts.WorkoutPlan(plan)\r\n\r\n try:\r\n exercises = WORKOUTS_DATA[workout.name].get(\"exercises\")\r\n workout.exercises = [workouts.Exercise(exercise) for exercise in exercises]\r\n try:\r\n for exercise in workout.exercises:\r\n exercise.details = exercises[exercise.name][\"details\"]\r\n except KeyError:\r\n pass\r\n except KeyError:\r\n pass\r\n\r\n try:\r\n if WORKOUTS_DATA[workout.name].get(\"trainings\"):\r\n trainings = WORKOUTS_DATA[workout.name].get(\"trainings\")\r\n workout.trainings = []\r\n for training in trainings:\r\n try:\r\n training_exercises = [\r\n workouts.Exercise(exercise)\r\n for exercise in trainings[training][\"exercises\"]\r\n ]\r\n\r\n for exercise in training_exercises:\r\n if trainings[training][\"exercises\"][exercise.name].get(\r\n \"details\"\r\n ):\r\n exercise.details = trainings[training][\"exercises\"][\r\n exercise.name\r\n ].get(\"details\")\r\n\r\n workout.trainings.append(\r\n workouts.Training(training, training_exercises)\r\n )\r\n except KeyError:\r\n workout.trainings.append(workouts.Training(training))\r\n except KeyError:\r\n pass\r\n workouts_instances.append(workout)\r\n return\r\n\r\n\r\ndef create_plan():\r\n \"\"\"Initiates building new WorkoutPlan instance with name taken from the user.\"\"\"\r\n new_plan_name = \"\"\r\n while not new_plan_name:\r\n new_plan_name = input(\"Enter new plan name: \")\r\n\r\n if plan_exists(new_plan_name):\r\n return print(f\"Plan {new_plan_name} already exists\")\r\n\r\n else:\r\n build_workouts_instances([new_plan_name])\r\n print(f\"New plan {new_plan_name} has been created\")\r\n return\r\n\r\n\r\ndef create_training(plan_instance):\r\n \"\"\"Creates training instance\"\"\"\r\n plan = plan_instance\r\n training_name = \"\"\r\n while not training_name:\r\n training_name = input(\"Enter name for new training: \")\r\n\r\n if plan:\r\n plan.add_training(training_name)\r\n else:\r\n print(\"There is something wrong. Cant't add new training.\")\r\n\r\n\r\ndef create_exercise(workout_instance=None, training_instance=None):\r\n workout = workout_instance\r\n training = training_instance\r\n\r\n if training and workout:\r\n print(\r\n f\"1. Add exercise from {workout.name} exercises\" \r\n \"\\n2. Add new exercise\"\r\n )\r\n user_choice = input(\"Chose an option: \")\r\n\r\n if user_choice == \"1\":\r\n if workout.exercises:\r\n for i in range(len(workout.exercises)):\r\n print(f\"{i + 1}. {workout.exercises[i].name}\")\r\n else:\r\n print(\"No workout exercises.\")\r\n\r\n correct_input = False\r\n while not correct_input:\r\n user_choice = input(\"Chose an option: \")\r\n try:\r\n user_choice = int(user_choice)\r\n if user_choice in range(len(workout.exercises) + 1):\r\n correct_input = True\r\n except ValueError:\r\n print(\"Wrong input.\")\r\n continue\r\n else:\r\n print(\"Wrong input.\")\r\n\r\n exercise = workout.exercises[user_choice - 1]\r\n training.add_exercise_instance(exercise)\r\n\r\n elif user_choice == \"2\":\r\n new_ex_name = input(\"Enter name for new exercise: \")\r\n training.add_exercise(new_ex_name)\r\n\r\n else:\r\n print(\"Wrong input. Try again\")\r\n create_exercise(workout, training)\r\n\r\n\r\ndef plan_exists(plan_name):\r\n \"\"\"Checks if plan of given name already exists in data (.json) file.\"\"\"\r\n return plan_name in [plan.name for plan in workouts_instances]\r\n\r\n\r\ndef gather_current_data():\r\n current_data = defaultdict(dict)\r\n\r\n # add workout plans\r\n for workout in workouts_instances:\r\n current_data[workout.name] = {}\r\n\r\n # add trainings\r\n if workout.trainings:\r\n current_data[workout.name][\"trainings\"] = {\r\n training.name: {\r\n \"exercises\": {\r\n exercise.name: exercise.details\r\n for exercise in training.exercises\r\n if exercise.details\r\n }\r\n }\r\n if training.exercises else {}\r\n for training in workout.trainings\r\n }\r\n\r\n # add exercises\r\n if workout.exercises:\r\n current_data[workout.name][\"exercises\"] = {\r\n exercise.name: {\"details\": exercise.details} if exercise.details else {}\r\n for exercise in workout.exercises\r\n }\r\n\r\n exercises = current_data[workout.name][\"exercises\"]\r\n\r\n for exercise in workout.exercises:\r\n exercises[exercise.name] = {}\r\n\r\n # add exercise details\r\n if exercise.details:\r\n exercises[exercise.name][\"details\"] = exercise.details\r\n\r\n return current_data\r\n\r\n\r\ndef save_session():\r\n \"\"\"Exports current data to .json file.\"\"\"\r\n data_to_save = gather_current_data()\r\n\r\n with open(WORKOUTS_FILE, \"w\") as json_file:\r\n json.dump(data_to_save, json_file, indent=4)\r\n\r\n\r\nWORKOUTS_DATA = load_json_data(WORKOUTS_FILE)\r\nPLANS = [plan for plan in WORKOUTS_DATA.keys()]\r\n\r\n# Lists to store workouts and exercises instances\r\nworkouts_instances = []\r\n# exercises_instances = []\r\n\r\nbuild_workouts_instances(PLANS)\r\n","sub_path":"service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":6627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"36606484","text":"# https://www.acmicpc.net/problem/1753\nimport heapq\nfrom collections import deque\ntestSource = \\\n\"\"\"5 6\n1\n5 1 1\n1 3 3\n1 2 2\n2 3 4\n2 4 5\n3 4 6\"\"\"\n\ndef queueTest():\n\tqueue = []\n\tD = []\n\tfor i in range(9,-1,-1):\n\t\tD.append([i,90+(i*10)])\n\t\theapq.heappush(queue, D[9-i])\n\tfor i in range(9):\n\t\tj = heapq.heappop(queue)\n\t\tprint(j)\n\n\ndef dijkstra(vCnt,start,path):\n\tMAX_INT = 987654321\n\tvisited = [MAX_INT] * (vCnt)\n\tvisited[start-1] = 0\n\tq = deque()\n\tq.append((start-1, 0))\n\twhile q:\n\t\tu, d = q.popleft()\n\t\tfor v, w in path[u]:\n\t\t\tif d + w < visited[v]:\n\t\t\t\tvisited[v] = d + w\n\t\t\t\tq.append((v, d+w))\n\tsolution = []\n\tfor i in visited:\n\t\tif i == MAX_INT:\n\t\t\t# print(\"INF\")\n\t\t\tsolution.append(\"INF\")\n\t\telse:\n\t\t\t# print(i)\n\t\t\tsolution.append(str(i))\n\treturn solution\n\ndef dijkstra2(vCnt,start,path): # time error\n\tMAX_INT = 987654321\n\tvisited = [0] * vCnt\n\tqueue = []\n\tD = []\n\tfor i in range(vCnt):\n\t\tif i == start-1:\n\t\t\tD.append([0, i])\n\t\telse:\t\n\t\t\tD.append([MAX_INT, i])\n\t\theapq.heappush(queue, D[i])\n\twhile queue:\n\t\td,u = heapq.heappop(queue)\n\t\tvisited[u] = 1\n\t\tfor v,w in path[u]:\n\t\t\tif visited[v] == 0 and (D[v][0] > D[u][0] + w ):\n\t\t\t\tD[v][0] = D[u][0] + w\n\t\theapq.heapify(queue)\n\n\tsolution = []\n\tfor i in D:\n\t\tif i[0] == MAX_INT:\n\t\t\t# print(\"INF\")\n\t\t\tsolution.append(\"INF\")\n\t\telse:\n\t\t\t# print(i[0])\n\t\t\tsolution.append(str(i[0]))\n\treturn solution\n\ndef run():\n\tvCnt, eCnt = list(map(int,input().split()))\n\tstart = int(input())\n\tpath = [[] for i in range(vCnt)]\n\tfor i in range(eCnt):\n\t\tu,v,w = map(int,input().split())\n\t\tpath[u-1].append((v-1,w))\n\tprint(\"\\n\".join(dijkstra(vCnt,start,path)))\n\n\ndef testRun(testSource):\n\tsource = testSource.split(\"\\n\")\n\tvCnt, eCnt = list(map(int,source.pop(0).split()))\n\tstart = int(source.pop(0))\n\tpath = [[] for i in range(vCnt)]\n\tfor i in source:\n\t\tu,v,w = map(int,i.split())\n\t\tpath[u-1].append((v-1,w))\n\tprint(\"\\n\".join(dijkstra(vCnt,start,path)))\n\ndef testCompareTwoMethod(testSource):\n\tsource = testSource.split(\"\\n\")\n\tvCnt, eCnt = list(map(int,source.pop(0).split()))\n\tstart = int(source.pop(0))\n\tpath = [[] for i in range(vCnt)]\n\tfor i in source:\n\t\tu,v,w = map(int,i.split())\n\t\tpath[u-1].append((v-1,w))\n\tprint(dijkstra(vCnt,start,path) == dijkstra2(vCnt,start,path))\n\t\n\ndef randomCompare():\n\ttime = 1\n\tfor i in range(time):\n\t\ttestSource = makeRandomSource()\n\t\ttestCompareTwoMethod(testSource)\n\ndef makeRandomSource():\n\timport random\n\tvCnt = random.randint(1,20000)\n\teCnt = random.randint(1,300000)\n\tstart = random.randint(1,vCnt)\n\tsource = str(vCnt) + \" \" + str(eCnt) + \"\\n\" + str(start)\n\tfor i in range(eCnt):\n\t\tu,v,w = random.randint(1,vCnt), random.randint(1,vCnt), random.randint(1,10)\n\t\twhile u==v:\n\t\t\tv = random.randint(1,vCnt)\n\t\tsource += \"\\n\" + str(u) + \" \" + str(v) + \" \" + str(w)\n\treturn source\n\ndef pprint(path):\n\tfor i in path:\n\t\tprint(i)\n\tprint()\n\t\n# queueTest()\n# testRun(testSource)\nrun()\n# randomCompare()","sub_path":"037_SHORTEST_PATH/jeemyeong-1753.py","file_name":"jeemyeong-1753.py","file_ext":"py","file_size_in_byte":2858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"329649975","text":"# Copyright 2020 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Callable, List, Optional, Sequence, Union\n\nimport torch\nimport torch.distributed as dist\n\nfrom monai.metrics import compute_confusion_matrix\nfrom monai.utils import Average, exact_version, optional_import\n\nfrom .utils import all_gather\n\nMetric, _ = optional_import(\"ignite.metrics\", \"0.4.2\", exact_version, \"Metric\")\nreinit__is_reduced, _ = optional_import(\"ignite.metrics.metric\", \"0.4.2\", exact_version, \"reinit__is_reduced\")\n\n\nclass ConfusionMatrix(Metric): # type: ignore[valid-type, misc] # due to optional_import\n \"\"\"\n Compute confusion matrix related metrics. This function supports to calculate all metrics\n mentioned in: `Confusion matrix `_.\n accumulating predictions and the ground-truth during an epoch and applying `compute_confusion_matrix`.\n\n Args:\n to_onehot_y: whether to convert `y` into the one-hot format. Defaults to False.\n activation: [``\"sigmoid\"``, ``\"softmax\"``]\n Activation method, if specified, an activation function will be employed for `y_pred`.\n Defaults to None.\n The parameter can also be a callable function, for example:\n ``activation = lambda x: torch.log_softmax(x)``.\n bin_mode: [``\"threshold\"``, ``\"mutually_exclusive\"``]\n Binarization method, if specified, a binarization manipulation will be employed\n for `y_pred`.\n\n - ``\"threshold\"``, a single threshold or a sequence of thresholds should be set.\n - ``\"mutually_exclusive\"``, `y_pred` will be converted by a combination of `argmax` and `to_onehot`.\n bin_threshold: the threshold for binarization, can be a single value or a sequence of\n values that each one of the value represents a threshold for a class.\n metric_name: [``\"sensitivity\"``, ``\"specificity\"``, ``\"precision\"``, ``\"negative predictive value\"``,\n ``\"miss rate\"``, ``\"fall out\"``, ``\"false discovery rate\"``, ``\"false omission rate\"``,\n ``\"prevalence threshold\"``, ``\"threat score\"``, ``\"accuracy\"``, ``\"balanced accuracy\"``,\n ``\"f1 score\"``, ``\"matthews correlation coefficient\"``, ``\"fowlkes mallows index\"``,\n ``\"informedness\"``, ``\"markedness\"``]\n Some of the metrics have multiple aliases (as shown in the wikipedia page aforementioned),\n and you can also input those names instead.\n average: [``\"macro\"``, ``\"weighted\"``, ``\"micro\"``, ``\"none\"``]\n Type of averaging performed if not binary classification.\n Defaults to ``\"macro\"``.\n\n - ``\"macro\"``: calculate metrics for each label, and find their unweighted mean.\n This does not take label imbalance into account.\n - ``\"weighted\"``: calculate metrics for each label, and find their average,\n weighted by support (the number of true instances for each label).\n - ``\"micro\"``: calculate metrics globally by considering each element of the label\n indicator matrix as a label.\n - ``\"none\"``: the scores for each class are returned.\n zero_division: the value to return when there is a zero division, for example, when all\n predictions and labels are negative. Defaults to 0.\n output_transform: a callable that is used to transform the\n :class:`~ignite.engine.Engine` `process_function` output into the\n form expected by the metric. This can be useful if, for example, you have a multi-output model and\n you want to compute the metric with respect to one of the outputs.\n device: device specification in case of distributed computation usage.\n\n \"\"\"\n\n def __init__(\n self,\n to_onehot_y: bool = False,\n activation: Optional[Union[str, Callable]] = None,\n bin_mode: Optional[str] = \"threshold\",\n bin_threshold: Union[float, Sequence[float]] = 0.5,\n metric_name: str = \"hit_rate\",\n average: Union[Average, str] = Average.MACRO,\n zero_division: int = 0,\n output_transform: Callable = lambda x: x,\n device: Optional[torch.device] = None,\n ) -> None:\n super().__init__(output_transform, device=device)\n self.to_onehot_y = to_onehot_y\n self.activation = activation\n self.bin_mode = bin_mode\n self.bin_threshold = bin_threshold\n self.metric_name = metric_name\n self.average: Average = Average(average)\n self.zero_division = zero_division\n\n @reinit__is_reduced\n def reset(self) -> None:\n self._predictions: List[torch.Tensor] = []\n self._targets: List[torch.Tensor] = []\n\n @reinit__is_reduced\n def update(self, output: Sequence[torch.Tensor]) -> None:\n \"\"\"\n Args:\n output: sequence with contents [y_pred, y].\n\n Raises:\n ValueError: When ``output`` length is not 2.\n ValueError: When ``y_pred`` dimension is not one of [1, 2].\n ValueError: When ``y`` dimension is not one of [1, 2].\n\n \"\"\"\n if len(output) != 2:\n raise ValueError(f\"output must have length 2, got {len(output)}.\")\n y_pred, y = output\n if y_pred.ndimension() not in (1, 2):\n raise ValueError(\"Predictions should be of shape (batch_size, n_classes) or (batch_size, ).\")\n if y.ndimension() not in (1, 2):\n raise ValueError(\"Targets should be of shape (batch_size, n_classes) or (batch_size, ).\")\n\n self._predictions.append(y_pred.clone())\n self._targets.append(y.clone())\n\n def compute(self):\n _prediction_tensor = torch.cat(self._predictions, dim=0)\n _target_tensor = torch.cat(self._targets, dim=0)\n\n if dist.is_available() and dist.is_initialized() and not self._is_reduced:\n _prediction_tensor = all_gather(_prediction_tensor)\n _target_tensor = all_gather(_target_tensor)\n self._is_reduced = True\n\n return compute_confusion_matrix(\n y_pred=_prediction_tensor,\n y=_target_tensor,\n to_onehot_y=self.to_onehot_y,\n activation=self.activation,\n bin_mode=self.bin_mode,\n bin_threshold=self.bin_threshold,\n metric_name=self.metric_name,\n average=self.average,\n zero_division=self.zero_division,\n )\n","sub_path":"monai/handlers/confusion_matrix.py","file_name":"confusion_matrix.py","file_ext":"py","file_size_in_byte":6974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"35093845","text":"from rest_framework import serializers\n\nfrom users.models import User\nfrom users.roles import Roles\n\n\nclass TokenObtainSerializer(serializers.Serializer):\n email = serializers.EmailField()\n confirmation_code = serializers.CharField()\n\n\nclass EmailSerializer(serializers.Serializer):\n email = serializers.EmailField()\n\n\nclass UserSerializer(serializers.ModelSerializer):\n class Meta:\n model = User\n fields = (\n 'first_name',\n 'last_name',\n 'username',\n 'bio',\n 'email',\n 'role',\n )\n\n def update(self, user, validated_data):\n request = self.context.get('request')\n if request:\n request_user = request.user\n if (request_user.role != Roles.ADMIN\n or not request_user.is_superuser\n or not request_user.is_staff):\n validated_data.pop('role', None)\n user = super().update(user, validated_data)\n return user\n","sub_path":"users/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"400168614","text":"from BinaryTree import *\n\ndef lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':\n def dfs(root, path, node):\n if root is None: return\n path.append(root)\n if root.val == node.val:\n return path\n path_left = dfs(root.left, path, node)\n if path_left: return path_left\n path_right = dfs(root.right, path, node)\n if path_right: return path_right\n path.pop()\n\n path_p = dfs(root, [], p)\n path_q = dfs(root, [], q)\n n = min(len(path_p), len(path_q)) \n i = 0\n while i < n and path_p[i].val == path_q[i].val:\n i += 1\n return path_p[i - 1]\n\ndef lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':\n if root is None: return None\n if root.val == p.val or root.val == q.val: return root\n \n lca_left = lowestCommonAncestor(root.left, p, q)\n lca_right = lowestCommonAncestor(root.right, p, q)\n\n if lca_left and lca_right: return root\n return lca_left if lca_left else lca_right\n\nif __name__ == \"__main__\":\n root = listtoTreeNode([3, 5, 1, 6, 2, 0, 8, None, None, 7, 4])\n prettyPrintTree(root)\n","sub_path":"lowest_common_ancestor_of_a_binary_tree_236.py","file_name":"lowest_common_ancestor_of_a_binary_tree_236.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"460401754","text":"\nfrom django.urls import path\n\nfrom . import views\n\napp_name = 'users'\nurlpatterns = [\n path('register_login/', views.register_login, name='register_login'),\n path('register_user/', views.register_user, name='register_user'),\n path('login_user/', views.login_user, name='login_user'),\n path('logout_user/', views.logout_user, name='logout_user'),\n path('protected/', views.protected, name='protected'),\n path('protected2/', views.protected2, name='protected2')\n]\n\n","sub_path":"Code/matthew/django/mysite/users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"644411162","text":"import unittest\n\nfrom chum import create_app, db\nfrom chum.models import User, BucketList, BucketListItem\n\n\nclass BaseTestCase(unittest.TestCase):\n \"\"\"\n Base test class for all tests\n \"\"\"\n\n def setUp(self):\n self.app = create_app('testing')\n self.context = self.app.app_context()\n self.context.push()\n db.drop_all()\n db.create_all()\n self.client = self.app.test_client()\n\n self.user = User(\n name='Larry',\n username='larry',\n email='larry@example.org'\n )\n self.user.set_password('password')\n\n self.user2 = User(\n name='Larry Wachira',\n username='larry2',\n email='larry2@example.org'\n )\n self.user2.set_password('password2')\n\n self.user3 = User(\n name='Larry Wachira Muchiri',\n username='larry3',\n email='larry3@example.org'\n )\n self.user3.set_password('password3')\n\n self.user_bucket_list = BucketList(\n name='travel',\n user=self.user\n )\n\n self.user_bucket_list2 = BucketList(\n name='fun activities',\n user=self.user\n )\n\n self.user2_bucket_list = BucketList(\n name='food',\n user=self.user2\n )\n\n self.user3_bucket_list = BucketList(\n name='travel',\n user=self.user3\n )\n\n self.user_bucket_list_item = BucketListItem(\n name='Mombasa',\n bucket_list=self.user_bucket_list\n )\n\n self.user_bucket_list_item2 = BucketListItem(\n name='Mara',\n description='See a lot of lions!',\n bucket_list=self.user_bucket_list\n )\n\n self.user2_bucket_list_item = BucketListItem(\n name='Paris',\n bucket_list=self.user2_bucket_list\n )\n\n self.user3_bucket_list_item = BucketListItem(\n name='italy',\n description='Pasta.',\n bucket_list=self.user3_bucket_list\n )\n\n db.session.add_all([\n self.user,\n self.user2,\n self.user3,\n self.user_bucket_list,\n self.user_bucket_list2,\n self.user2_bucket_list,\n self.user3_bucket_list,\n self.user_bucket_list_item,\n self.user_bucket_list_item2,\n self.user2_bucket_list_item,\n self.user3_bucket_list_item\n ])\n db.session.commit()\n\n user = User.query.filter_by(username='larry').first()\n user2 = User.query.filter_by(username='larry2').first()\n user3 = User.query.filter_by(username='larry3').first()\n\n self.user_token = user.generate_auth_token()\n self.user2_token = user2.generate_auth_token()\n self.user3_token = user3.generate_auth_token()\n\n def tearDown(self):\n db.session.remove()\n db.drop_all()\n self.context.pop()\n","sub_path":"chum/api/tests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"116290116","text":"# -*- coding: utf-8 -*-\n\nimport logging\nimport os\nfrom scrapy.pipelines.files import FilesPipeline\nfrom credit_downloader import settings\n\n\nclass CreditDownloaderPipeline(FilesPipeline):\n logger = logging.getLogger()\n storage = os.path.normpath(settings.FILES_STORE)\n\n # Overriding\n def item_completed(self, results, item, info):\n ''' \b把下载文件归类, 并\b更新 status. 文件存在\n credit_downloader/downloads/// '''\n\n # move file\n source, category = item['source'], item['category']\n for result in [x for ok, x in results if ok]:\n target_path = self.get_target_path(source, category, result)\n dirname = os.path.dirname(target_path)\n tmp_path = os.path.join(self.get_project_dirname(), self.storage, result['path'])\n\n result['path'] = target_path\n item['files'].append(result)\n\n os.makedirs(dirname, exist_ok=True)\n\n try:\n os.rename(tmp_path, target_path)\n except:\n self.logger.error('Unable to move files from %s to %s' , tmp_path, target_path)\n item['status'] = 'move_failed'\n\n # Update Item json to save download status and paths\n if self.FILES_RESULT_FIELD in item.fields:\n item[self.FILES_RESULT_FIELD] = [x for ok, x in results if ok]\n if item['files'] == []:\n if item['status'] not in ['move_failed', 'missing']:\n item['status'] = 'download_failed'\n else:\n if item['status'] not in ['move_failed', 'download_failed', 'missing']:\n item['status'] = 'success'\n item.pop('file_urls', None)\n return item\n\n def get_target_path(self, src, cat, result):\n name = result['url'].split('/')[-1]\n return os.path.join(self.get_project_dirname(), self.storage, src, cat, name)\n\n def get_project_dirname(self):\n '''Get the absolute path to project.'''\n return os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n","sub_path":"credit_downloader/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"556098912","text":"#!/usr/bin/env python\n# vim : fileencoding=UTF-8 :\n\nfrom __future__ import absolute_import, division, unicode_literals\n\nimport logging\nimport multiprocessing as mp\nimport random\nimport re\nimport time\nimport unittest\nfrom io import StringIO\n\ntry:\n from unittest import mock\nexcept ImportError:\n mock = None\n\nfrom multiprocessing_logging import install_mp_handler, MultiProcessingHandler\n\n\nclass InstallHandlersTest(unittest.TestCase):\n\n def setUp(self):\n self.handler = logging.NullHandler()\n self.logger = logging.Logger('test-logger')\n self.logger.addHandler(self.handler)\n\n def _assert_result(self):\n wrapper_handler, = self.logger.handlers\n self.assertIsInstance(wrapper_handler, MultiProcessingHandler)\n self.assertIs(wrapper_handler.sub_handler, self.handler)\n\n def test_when_no_logger_is_specified_then_it_uses_the_root_logger(self):\n if not mock:\n self.skipTest('unittest.mock is not available')\n\n with mock.patch('logging.getLogger') as getLogger:\n getLogger.return_value = self.logger\n\n install_mp_handler()\n\n getLogger.assert_called_once_with()\n\n wrapper_handler, = self.logger.handlers\n self.assertIsInstance(wrapper_handler, MultiProcessingHandler)\n self.assertIs(wrapper_handler.sub_handler, self.handler)\n\n def test_when_a_logger_is_passed_then_it_does_not_change_the_root_logger(self):\n if not mock:\n self.skipTest('unittest.mock is not available')\n\n with mock.patch('logging.getLogger') as getLogger:\n install_mp_handler(self.logger)\n\n self.assertEqual(0, getLogger.call_count)\n\n def test_when_a_logger_is_passed_then_it_wraps_all_handlers(self):\n install_mp_handler(self.logger)\n\n wrapper_handler, = self.logger.handlers\n self.assertIsInstance(wrapper_handler, MultiProcessingHandler)\n self.assertIs(wrapper_handler.sub_handler, self.handler)\n\n\nclass WhenMultipleProcessesLogRecords(unittest.TestCase):\n\n def test_then_records_should_not_be_garbled(self):\n stream = StringIO()\n subject = MultiProcessingHandler(\n 'mp-handler', logging.StreamHandler(stream=stream))\n logger = logging.Logger('root')\n logger.addHandler(subject)\n\n def worker(wid, logger):\n logger.info(\"Worker %d started.\", wid)\n\n time.sleep(random.random())\n\n logger.info(\"Worker %d finished processing.\", wid)\n\n logger.info(\"Starting workers...\")\n procs = [mp.Process(target=worker, args=(wid, logger)) for wid in range(100)]\n for proc in procs:\n proc.daemon = True\n proc.start()\n\n logger.info(\"Workers started.\")\n time.sleep(1)\n\n for proc in procs:\n proc.join(timeout=1)\n logger.info(\"Workers done.\")\n\n time.sleep(0.5) # For log records to propagate.\n\n subject.sub_handler.flush()\n subject.close()\n stream.seek(0)\n lines = stream.readlines()\n self.assertIn(\"Starting workers...\\n\", lines)\n self.assertIn(\"Workers done.\\n\", lines)\n\n valid_line = re.compile(\n r\"(?:Starting workers...)\"\n r\"|(?:Worker \\d+ started\\.)\"\n r\"|(?:Workers started\\.)\"\n r\"|(?:Worker \\d+ finished processing\\.)\"\n r\"|(?:Workers done.)\"\n )\n for line in lines[1:-1]:\n self.assertTrue(re.match(valid_line, line))\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"Application1/Tool/multiprocessing-logging/test_multiprocessing_logging.py","file_name":"test_multiprocessing_logging.py","file_ext":"py","file_size_in_byte":3532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"520084656","text":"\"\"\"\n\nAuthor: Cat Tran\nCreated on: 09/27/2017\n\nA collections of common filters that are commonly used in lipidomics research.\n\"\"\"\nimport pandas as pd\n\n\ndef set_baseline_to_value(dataframe=None, columns=None, rows=None, threshold=100, set_value=None):\n \"\"\"\n This function will take a DataFrame and check to see which value is under a threshold. If a value is under a threshold,\n it will set that value equal to set_value parameter. The columns, and rows in which values are checked can be specified.\n :param dataframe: The pandas DataFrame that hold values that we want to set a baseline.\n :param columns: Columns that contains values to be checked.\n :param rows: Rows that contains values to be checked\n :param threshold: The threshold under which values will be set to set_value\n :param set_value: The value to set to for values that is under or equal to threshold.\n :return: A new DataFrame that contains modified values.\n \"\"\"\n # Raise error when there's no dataframe\n if dataframe is None:\n raise ValueError(\"A dataframe parameter is required\")\n # use all dataframe columns if no columns parameter specified\n if columns is None:\n columns = list(dataframe)\n # use all rows if no rows parameter specified, currently all rows are considered\n if rows is None:\n pass\n # if no set_value is passed, default to the threshold value\n if set_value is None:\n set_value = threshold\n\n new_df = dataframe.copy()\n new_df = new_df.where(new_df >= threshold, set_value, raise_on_error=False)\n return new_df\n\n\ndef neutral_loss_filter(dataframe=None, nl_column='neutral_loss', threshold=0):\n \"\"\"\n This method accepts a dataframe that contains a neutral loss column as parameter; it will return a new dataframe that\n only contains rows where neutral loss value is greater or equal to the threshold. The original Dataframe is unmodified.\n :param dataframe: A pandas Dataframe contains a neutral loss column.\n :param nl_column: A string name for the neutral loss column.\n :param threshold: A float value for the threshold.\n :return:\n \"\"\"\n if dataframe is None:\n raise ValueError(\"The 'dataframe' parameter containing a neutral loss column must be specified!\")\n\n new_df = dataframe.copy() # make a copy to avoid modifying the original Dataframe\n new_df = new_df[new_df[nl_column] >= threshold] # will potentially throw column not found or key-error error\n return new_df\n\n\ndef low_average_filter(dataframe=None, columns=None, threshold=100):\n \"\"\"\n This method accepts a dataframe in which row values are numeric intensities and calculate the average of each row. If\n the average of each row is not greater or equal than the threshold, it will filter out those rows, and return a new\n dataframe. The original dataframe will be unmodified.\n :param dataframe: A pandas Dataframe that contains numeric values to be filtered.\n ;:param columns: A list of subset of columns that this method will perform filtering. If the default is None, all columns\n will be considered.\n :param threshold: A float value in which the row averages will be compared against.\n :return: A new pandas Dataframe.\n \"\"\"\n if dataframe is None:\n raise ValueError(\"The 'dataframe' parameter containing numeric values must be specified!\")\n\n new_df = dataframe.copy()\n # If the default value of columns is None, all columns are considered\n if columns is None:\n new_df = new_df.loc[new_df.mean(axis=1) >= threshold]\n else:\n new_df = new_df.loc[new_df[columns].mean(axis=1) >= threshold]\n return new_df\n\n\ndef average_and_max_filter(dataframe=None, columns=None, avg_threshold=100, max_threshold=300):\n \"\"\"\n This method accepts a pandas Dataframe that contains numeric values in its rows. It will perform 2 tests, an average\n test and a max test. The average test is where the average for each row is calculated, if this average is not greater\n or equal to the avg_threshold, that row fails the test. The max test is where the max value of each row is located, if\n the max value of each row is not greater or equal to the max_threshold, that row fails the test. If a row fails BOTH\n tests, it will be filtered out. The returned dataframe only contains the rows that do not fail BOTH test, failing one\n test but passing the other is fine. The original dataframe will be unmodified.\n :param dataframe: A pandas Dataframe that contains numeric values.\n :param columns: A list of columns name in which the tests will be performed.\n :param avg_threshold: A threshold for the average test.\n :param max_threshold: A threshold for the max test.\n :return: A new pandas Dataframe.\n \"\"\"\n if dataframe is None:\n raise ValueError(\"The 'dataframe' parameter containing numeric value to be filtered must be specified!\")\n\n new_df = dataframe.copy()\n # if columns is not specified, all columns in the dataframe are considered\n if columns is None:\n new_df = new_df.loc[(new_df.mean(axis=1) >= avg_threshold) | (new_df.max(axis=1) >= max_threshold)]\n else:\n new_df = new_df.loc[(new_df[columns].mean(axis=1) >= avg_threshold) | (new_df[columns].max(axis=1) >= max_threshold)]\n return new_df\n\n\ndef low_average_group_filter(dataframe=None, groups=None, threshold=100):\n \"\"\"\n For each row, this method will go through each group of columns in the dataframe, if all of average of each group is\n below the threshold, that row will be removed for the returned dataframe. So as long as there is at least 1 group's\n average surpasses the threshold, that entire row will be kept. The original dataframe is un-modified.\n :param dataframe: See low_average_filter().\n :param groups: A list of lists of group columns this method will check.\n :param threshold: See low_average_filter().\n :return: A new dataframe.\n \"\"\"\n # This wrapper actually requires a group of column names\n if dataframe is None or groups is None:\n raise ValueError(\"The 'group' parameter must be specified!\")\n new_df = dataframe.copy()\n temp = new_df.apply(lambda row: groups_average_boolean(row=row, groups=groups, threshold=threshold), axis=1)\n return new_df.loc[temp]\n\n\ndef average_and_max_group_filter(dataframe=None, group=None, avg_threshold=100, max_threshold=300):\n \"\"\"\n This is basically a wrapper for average_max_filter()\n :param dataframe: See average_max_filter\n :param group: A list of group columns that this method will consider.\n :param avg_threshold: See average_max_filter\n :param max_threshold: See average_max_filter\n :return: A new dataframe.\n \"\"\"\n if group is None:\n raise ValueError(\"The 'group' parameter must be specified!\")\n return average_and_max_filter(dataframe=dataframe, columns=group, avg_threshold=avg_threshold, max_threshold=max_threshold)\n\n\ndef child_peaks_filter(dataframe=None, child_peaks=[(1, 10)], neu_loss_col='neutral_loss'):\n \"\"\"\n This method will filter out any rows that have neutral loss value falls between the 'child_peaks' parameter pair. As\n result, it will only return a new dataframe that contains only rows with neutral loss values that do not fall between\n child_peaks parameter pair/s. The original dataframe is un-modified.\n :param dataframe: An original dataframe that need child peaks filtered.\n :param child_peaks: A list of tuples that contains the range within which neutral loss values are filtered out.\n :param neu_loss_col: A string column name where the neutral loss values can be found.\n :return: A new dataframe\n \"\"\"\n if dataframe is None:\n raise ValueError(\"The 'dataframe' parameter must be specified!\")\n new_df = dataframe.copy()\n # perform child peak filter for each of the (min,max) child peaks in the 'child_peaks' parameters\n for pair in child_peaks:\n # filter, and concatenate rows that have neutral loss above the max and rows that have neutral loss below the min\n new_df = pd.concat([new_df[new_df[neu_loss_col] > max(pair)], new_df[new_df[neu_loss_col] < min(pair)]])\n return new_df\n\n\n\ndef group_quantile_filter(dataframe=None, group=None, quantile=0.3, threshold=100):\n \"\"\"\n This method will look at a group of columns in the dataframe and perform a quantile test. For each row in that group,\n if there is at least the quantile (fyi, 0.3 = 30%) number of the cells that is above the threshold, it will keep that row in the new dataframe.\n Otherwise, it will filter that row out. The original dataframe will be unmodified.\n :param dataframe: A pandas Dataframe.\n :param group: A list of columns names that we want to perform the test against.\n :param quantile: A quantile (on the scale of [0, 1]).\n :param threshold: A float value for which the quantile is compared against.\n :return: A new dataframe.\n \"\"\"\n if dataframe is None or group is None:\n raise ValueError(\"Both 'dataframe' and 'groups' parameters must be specified!\")\n if (quantile < 0 or quantile > 1):\n raise ValueError(\"The 'quantile' parameter must be within the range of [0, 1] (quantile = 0 or 1 is acceptable)!\")\n new_df = dataframe.copy()\n new_df = new_df.loc[ (new_df[group] >= threshold).apply(lambda row: (row==True).sum()/len(row) >= quantile, axis=1) ]\n return new_df\n\n\n#--------------------------------------------------- helper methods----------------------------------------------//\n\ndef groups_average_boolean(row=None, groups=None, threshold=100):\n \"\"\"\n A helper method for 'low_group_average_filter()'\n :param row:\n :return:\n \"\"\"\n kept = False\n for group in groups:\n kept = row[group].mean() >= threshold\n if kept:\n return kept\n return kept","sub_path":"api/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":9815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"133306388","text":"#!/usr/bin/env python\n# coding: utf-8\n\n\nimport argparse\nfrom niddk_covid_sicr import data\nfrom pathlib import Path\n\n# Parse all the command-line arguments\nparser = argparse.ArgumentParser(description='Get data to use for fitting')\nparser.add_argument('-dp', '--data-path', default='./data',\n help='Path for storing data')\nparser.add_argument('-s', '--sources', default=['jhu', 'canada', 'brazil'],\n nargs='+', help='Data sources to use.')\nparser.add_argument('-fi', '--filter', default=0, type=int,\n help='Whether or not to filter based on data thresholds')\nparser.add_argument('-fn', '--fix-negatives', default=0, type=int,\n help=(\"Whether or not to fix negative values \"\n \"in the daily data or not\"))\nparser.add_argument('-nm', '--negify-missing', default=1, type=int,\n help=(\"Whether or not to set putative missing values, i.e.\"\n \"where that value is 0 for all days, to -1 in the \"\n \"daily change (i.e. a column of all 0's for cumulative \"\n \"recovered becomes a column of all -1's for new \"\n \"recovered\"))\nparser.add_argument('-ror', '--remove-old-rois', default=1, type=int,\n help=(\"Remove rois that no longer report adequate data from data_path if present \"))\n\nargs = parser.parse_args()\n\n# Create the data path\ndata_path = Path(args.data_path)\ndata_path.mkdir(parents=True, exist_ok=True)\nassert data_path.exists(), \"%s is not a valid data path\" % data_path.resolve()\n\n\ndef get_scraper(name):\n func_name = 'get_%s' % name.replace('-', '_')\n try:\n f = getattr(data, func_name)\n except AttributeError:\n raise Exception(\"No function named %s in the data.py module\"\n % func_name)\n return f\n\n\nfor source in args.sources:\n print(\"Getting data from %s...\" % source)\n f = get_scraper(source)\n f(data_path, filter_=args.filter)\n\nif args.fix_negatives:\n print(\"Fixing negative values in daily data...\")\n data.fix_negatives(data_path)\n\nif args.negify_missing:\n print(\"Replacing missing columns with -1 in daily data...\")\n data.negify_missing(data_path)\n\nif args.remove_old_rois:\n print(\"Removing old regions that no longer report data:\")\n data.remove_old_rois(data_path)\n\nprint(\"Data now available at %s\" % data_path.resolve())\n","sub_path":"scripts/get-data.py","file_name":"get-data.py","file_ext":"py","file_size_in_byte":2438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"17589636","text":"import importlib\nimport logging\nimport sys\nimport os\nimport time\n\ndef add_folder(folder):\n logging.info('\\n')\n logging.info(''.center(80, '#'))\n logging.info('##{}##'.format(folder.center(76)))\n logging.info(''.center(80, '#'))\n module = importlib.import_module('{}.pack'.format(folder))\n module.main()\n\ndef setup_logger():\n filename = 'run_{}.sh'.format(time.time())\n if len(sys.argv) > 1:\n foldername = sys.argv[1]\n else:\n foldername = 'build'\n if not os.path.isdir(foldername):\n os.mkdir(foldername)\n with open('{}/{}'.format(foldername, filename), 'w'):\n pass\n rootLogger = logging.getLogger()\n rootLogger.setLevel(logging.DEBUG)\n\n console = logging.StreamHandler(sys.stdout)\n console.setLevel(logging.DEBUG)\n rootLogger.addHandler(console)\n file = logging.FileHandler('{}/{}'.format(foldername, filename))\n file.setLevel(logging.DEBUG)\n rootLogger.addHandler(file)\n\nsetup_logger()\n","sub_path":"toolkit.py","file_name":"toolkit.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"503525556","text":"import openpyxl\n\nwb = openpyxl.load_workbook('bd.xlsx')\nsheet = wb.active\nkol = sheet.max_row\n\ndef User(peer_id, text):\n flag = False\n for i in range(kol):\n if sheet['A'+str(i)] == peer_id:\n cls = sheet['B'+str(i)]\n flag = True\n break\n if not flag:\n cl = functions.clas(text).strip()\n sheet['A'+str(kol+1)] = peer_id\n sheet['B'+str(kol+1)] = cl\n wb.save('bd.xlsx')\n cls = cl\n return cls\n\n\n\n","sub_path":"sql.py","file_name":"sql.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"105521735","text":"#-*- coding:utf-8 -*-\n\nimport os\n\nsource_path = '/home/xjyu/kgduan/plate_tfmtcnn/prepare_data/plate_list'\ntarget_path = '/home/xjyu/kgduan/plate_tfmtcnn/prepare_data/plate_list/coord_with_color/'\ntxt_files = os.listdir(source_path)\n\nfor txt in txt_files:\n if txt == '100.txt' or txt == '101.txt' or txt == '102.txt' or txt == '103.txt' or txt == '104.txt' or txt == '105.txt' or txt == '106.txt' or txt == '107.txt' or txt == '108.txt' or txt == '109.txt' or txt == 'data_augmentation_night_list.txt' or txt == 'data_augmentation_night_list_2.txt' or txt == 'data_augmentation_night_list.txt' or txt == 'data_augmentation_police_list.txt' or txt == 'night_100.txt' or txt == 'night_101.txt' or txt == 'night_list.txt' or txt == 'night_list_2.txt':\n txt_path = os.path.join(source_path, txt)\n new_txt_name = txt[:-4] + '_with_color.txt'\n new_txt_path = target_path + new_txt_name\n with open(new_txt_path, 'w') as nf:\n with open(txt_path, 'r') as f:\n while 1:\n line = f.readline()\n line=line.strip('\\n')\n if not line:\n break\n if '蓝' in line:\n new_line = line + ' 1'\n nf.write(new_line)\n nf.write('\\n')\n if '黄' in line:\n new_line = line + ' 2'\n nf.write(new_line)\n nf.write('\\n')\n if '白' in line:\n new_line = line + ' 0'\n nf.write(new_line)\n nf.write('\\n')\n","sub_path":"plate_tfmtcnn_tools/get_plate_color.py","file_name":"get_plate_color.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"550397582","text":"import unittest\nfrom unittest.mock import patch\nimport sys\nimport os\nfrom scipy import stats\nsys.path.append( os.path.dirname( os.path.dirname( os.path.abspath(__file__) ) ) )\nfrom utils.train import prepare_samples, train, get_param_distribution, log_uniform\nfrom utils.db.db import Notice, NoticeType, Attachment, Model, now_minus_two\nfrom utils.db.db_utils import get_db_url, session_scope, insert_updated_nightly_file, \\\n DataAccessLayer, clear_data, object_as_dict, fetch_notice_type_id, \\\n insert_model, insert_notice_types, retrain_check, \\\n get_validation_count, get_trained_count, \\\n get_validated_untrained_count, fetch_validated_attachments, \\\n fetch_last_score\n\nclass TrainTestCase(unittest.TestCase):\n def setUp(self):\n self.attachments = [\n {\n 'text':\"this is a test of automagic.\",\n 'target':1\n },\n {\n 'text':\"this is a test of automagic.\",\n 'target':1\n },\n {\n 'text':\"this is another test.\",\n 'target':1\n },\n {\n 'text':\"this is another test. \",\n 'target':1\n },\n {\n 'text':'this is another test',\n 'target':1\n },{\n 'text':'this is another test',\n 'target':1\n },{\n 'text':'this is another test',\n 'target':0\n },{\n 'text':'this is another test',\n 'target':0\n },{\n 'text':'this is another test',\n 'target':0\n },{\n 'text':'this is another test',\n 'target':0\n },{\n 'text':'this is another test',\n 'target':0\n },{\n 'text':'this is another test',\n 'target':1\n },\n {\n 'text':\"this is a test of automagic.\",\n 'target':1\n },\n {\n 'text':\"this is a test of automagic.\",\n 'target':1\n },\n {\n 'text':\"this is a test of automagic.\",\n 'target':1\n },\n {\n 'text':\"this is a test of the grid search.\",\n 'target':0\n },\n {\n 'text':\"this is a test of the grid search.\",\n 'target':0\n },{\n 'text':\"this is a test of the grid search.\",\n 'target':0\n },{\n 'text':\"this is a test of the grid search.\",\n 'target':0\n },{\n 'text':\"this is a test of the grid search.\",\n 'target':0\n },{\n 'text':\"this is a test of the grid search.\",\n 'target':0\n },{\n 'text':\"this is a test of the grid search.\",\n 'target':0\n }\n ]\n\n def tearDown(self):\n self.attachments = None\n\n def test_prepare_samples(self):\n X, _ = prepare_samples(self.attachments)\n result = len(X)\n expected = 22\n self.assertEqual(result, expected)\n\n @patch('utils.train.get_param_distribution')\n def test_train(self, param_dist_mock):\n param_dist = {\n \"vectorizer__ngram_range\":[(1,1), (1,2)],\n \"vectorizer__min_df\":stats.randint(1,3),\n \"vectorizer__max_df\":stats.uniform(.95,.3),\n \"vectorizer__sublinear_tf\":[True, False],\n \"select__k\":['all'],\n \"clf__alpha\": log_uniform(-5,2),\n \"clf__penalty\": ['l2','l1','elasticnet'],\n \"clf__loss\": ['hinge', 'log', 'modified_huber', 'squared_hinge', 'perceptron'],\n }\n param_dist_mock.return_value = param_dist\n X, y = prepare_samples(self.attachments)\n try:\n _, _, _, _ = train(X, \n y,\n n_iter_search = 10,\n score = \"accuracy\")\n except:\n self.fail(\"train() raised an exception!\")\n \n\nif __name__ == '__main__':\n unittest.main()","sub_path":"tests/train_test.py","file_name":"train_test.py","file_ext":"py","file_size_in_byte":4342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"602698934","text":"import unittest\nfrom typing import List, Dict, Set\n\nclass Graph(object):\n def __init__(self, n: int, adj_list:Dict[int, Set[int]]):\n self._n = n\n self.adj_list = adj_list\n\n def size(self)->int:\n return self._n\n\n def dfs(self, start_vertex)->List[int]:\n visited = [False] * (self._n + 1)\n path = []\n self._dfs(start_vertex, visited, path)\n return path\n\n def get_connected_components(self)->List[List[int]]:\n visited = [False] * (self._n + 1)\n components = []\n for vertex in range(1, self._n + 1):\n if not visited[vertex]:\n path = []\n self._dfs(vertex, visited, path)\n components.append(path)\n\n return components\n\n def _dfs(self, vertex: int, visited:List[int], path: List[int])->None:\n if visited[vertex]:\n return\n \n visited[vertex] = True\n path.append(vertex)\n neighbours = self._adj_list.get(vertex)\n if neighbours is not None:\n for neighbour in neighbours:\n self._dfs(neighbour, visited, path)\n\n def bfs(self, start_vertex, end_vertex = None)->List[int]:\n visited = [False] * (self._n + 1)\n path = []\n queue = [start_vertex]\n vertex = start_vertex\n while len(queue) > 0 and vertex != end_vertex:\n vertex = queue.pop(0)\n if not visited[vertex]:\n visited[vertex] = True\n path.append(vertex)\n neighbours = self._adj_list.get(vertex)\n if neighbours is not None:\n for neighbour in neighbours:\n queue.append(neighbour)\n return path\n\n def shortest_path(self, start_vertex: int, end_vertex: int)->List[int]:\n if start_vertex is None or end_vertex is None:\n raise ValueError(\"invalid input\")\n visited = [False] * (self._n + 1)\n visited[start_vertex] = True\n prev = [None] * (self._n + 1)\n queue = [start_vertex]\n vertex = start_vertex\n found = False\n while len(queue) > 0 and not found:\n vertex = queue.pop(0)\n neighbours = self._adj_list.get(vertex)\n if neighbours is not None:\n for neighbour in neighbours:\n if not visited[neighbour]:\n visited[neighbour] = True\n prev[neighbour] = vertex\n if neighbour != end_vertex:\n queue.append(neighbour)\n else:\n found = True\n break\n if found: \n shortest_path = []\n at = end_vertex\n while at is not None:\n shortest_path.append(at)\n at = prev[at]\n shortest_path.reverse()\n return shortest_path\n return []\n\nclass Tests(unittest.TestCase):\n def setUp(self):\n adj_list = dict()# adjacency list\n adj_list[1] = {2, 3}\n adj_list[2] = {4}\n adj_list[3] = {4}\n adj_list[4] = {5}\n adj_list[5] = {6, 7, 8}\n self.graph = Graph(9, adj_list)\n\n def test_dfs(self): \n path = self.graph.dfs(1)\n self.assertEqual([1, 2, 4, 5, 8, 6, 7, 3], path)\n\n def test_bfs(self): \n path = self.graph.bfs(1)\n self.assertEqual([1, 2, 3, 4, 5, 8, 6, 7], path)\n\n def test_connected_components(self): \n connected_components = self.graph.get_connected_components()\n self.assertEqual([[1, 2, 4, 5, 8, 6, 7, 3], [9]], connected_components)\n\n def test_shortest_path(self):\n adj_list = dict()# adjacency list\n adj_list[1] = {2, 3}\n adj_list[2] = {1, 6}\n adj_list[3] = {4, 5}\n adj_list[4] = {3, 9}\n adj_list[5] = {3, 7}\n adj_list[6] = {2, 7}\n adj_list[7] = {5, 6, 8}\n adj_list[8] = {7, 9}\n adj_list[9] = {4, 8}\n graph = Graph(9, adj_list)\n shortest_path = graph.shortest_path(1, 7)\n self.assertEqual([1, 2, 6, 7], shortest_path)\n\nif __name__ == \"__main__\":\n unittest.main(verbosity=2)","sub_path":"Graphs/graph_traversals.py","file_name":"graph_traversals.py","file_ext":"py","file_size_in_byte":4223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"85968937","text":"from curses.ascii import HT\nimport re\nfrom django.contrib import messages\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.decorators import login_required\n\nfrom django import forms\n\nfrom django.urls import reverse\n\nfrom django.http import JsonResponse, HttpResponse, Http404\n\nfrom django.shortcuts import render, redirect\n\nfrom django.template.loader import render_to_string\n\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.generic.edit import DeleteView, UpdateView, CreateView\n\nfrom django.utils.decorators import method_decorator\n\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework.decorators import api_view\n\nfrom website.views import homepage_view\n\nfrom .forms import ProfileForm, RecordingForm, SettingsForm, KeyEventForm, MouseEventForm, WaitEventForm, UserForm\nfrom .models import Profile, KeyEvent, MouseEvent, Recording, Settings, WaitEvent\nfrom .serializers import KeyEventSerializer, MouseEventSerializer, WaitEventSerializer\nfrom .utils import (\n get_settings,\n get_current_profile,\n toggle_play_mode,\n start_recording,\n stop_recording,\n serialize_events,\n sync,\n get_profiles,\n set_current_profile,\n get_favorite_profiles,\n get_settings_from_token,\n Http404,\n convert_from_url_safe_key_code,\n toggle_record_waits,\n)\n\nimport json\nimport html\n\n\ndef toggle_favorite_profile(request, pk):\n p = Profile.objects.get(pk=pk)\n p.is_favorite = not p.is_favorite\n p.save()\n return redirect(\"website:homepage\")\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass UpdateUserView(UpdateView):\n model = User\n form_class = UserForm\n template_name_suffix = \"_update_form\"\n template_name = \"macros/user_update_form.html\"\n success_url = \"/\"\n\n def get_object(self, *args, **kwargs):\n obj = super(UpdateUserView, self).get_object(*args, **kwargs)\n if not obj == self.request.user:\n raise Http404\n return obj\n\n def form_valid(self, form):\n messages.success(self.request, \"Profile was updated successfully!\")\n return super().form_valid(form)\n \n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"settings\"] = get_settings(self.request.user)\n context[\"tips\"] = [\n {\n \"pk\": 1,\n \"title\": \"Organization is Outstanding\",\n \"description\": \"The whole point of macros is to save you time. What’s the point if you’re spending the same amount of time looking for your macros in janky profiles?\",\n },\n {\n \"pk\": 2,\n \"title\": \"Generality is Great\",\n \"description\": \"Optimizing one task is good, but optimizing more than one with a single macros is great. Think of tasks that can benefit from the same action, create a macro that is one-size-fits-all.\",\n },\n {\n \"pk\": 3,\n \"title\": \"Punctuality is Pertinent\",\n \"description\": \"Nicknames, abbreviations, and shorting words in general is great. BUT, try not to get to crazy with it. Because when you have to come back to it in a bit and forget what it means, uh no, no time saved.\",\n },\n ]\n return context\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass UpdateSettingsView(UpdateView):\n model = Settings\n form_class = SettingsForm\n template_name_suffix = \"_update_form\"\n success_url = \"/\"\n\n def get_object(self, *args, **kwargs):\n obj = super(UpdateSettingsView, self).get_object(*args, **kwargs)\n if not obj.user == self.request.user:\n raise Http404\n return obj\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"settings\"] = get_settings(self.request.user)\n context[\"token\"] = Token.objects.get_or_create(user=self.request.user)[0]\n context[\"tips\"] = [\n {\n \"pk\": 1,\n \"title\": \"Organization is Outstanding\",\n \"description\": \"The whole point of macros is to save you time. What’s the point if you’re spending the same amount of time looking for your macros in janky profiles?\",\n },\n {\n \"pk\": 2,\n \"title\": \"Generality is Great\",\n \"description\": \"Optimizing one task is good, but optimizing more than one with a single macros is great. Think of tasks that can benefit from the same action, create a macro that is one-size-fits-all.\",\n },\n {\n \"pk\": 3,\n \"title\": \"Punctuality is Pertinent\",\n \"description\": \"Nicknames, abbreviations, and shorting words in general is great. BUT, try not to get to crazy with it. Because when you have to come back to it in a bit and forget what it means, uh no, no time saved.\",\n },\n ]\n return context\n\n def form_valid(self, form):\n messages.success(self.request, \"Settings were updated successfully!\")\n return super().form_valid(form)\n\n\n@login_required\ndef stop_showing_sharing(request):\n settings = get_settings(request.user)\n settings.show_social_sharing = False\n settings.save()\n return redirect(\"website:homepage\")\n\n\n@login_required\ndef get_recording_details(request, pk):\n return JsonResponse({\"test\": \"yolo\"}, status=200)\n\n\n@login_required\ndef quit_tutorial(request):\n try:\n settings = get_settings(request.user)\n settings.offer_tutorial = False\n settings.show_social_sharing = True\n settings.save()\n except Exception as e:\n print(str(e))\n return redirect(\"website:homepage\")\n\n\ndef toggle_play_mode_view(request, toggle, token):\n toggle_play_mode(token, toggle)\n return JsonResponse({}, status=200)\n\ndef toggle_record_waits_view(request):\n toggle_record_waits(request.user)\n return redirect('website:homepage')\n\n\ndef sync_view(request, token):\n settings = sync(token)\n return JsonResponse(\n {\n \"recording_key\": settings.recording_key,\n \"play_mode_key\": settings.play_mode_key,\n \"quick_play_key\": settings.quick_play_key,\n \"record_waits\": settings.record_waits,\n }, \n status=200\n )\n\n\n# ------------------------------------------------------------------------------\n# PROFILE RELATED VIEWS\n# ------------------------------------------------------------------------------\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass CreateProfileView(CreateView):\n model = Profile\n form_class = ProfileForm\n success_url = \"/\"\n\n def form_valid(self, form):\n form.instance.user = self.request.user\n messages.success(\n self.request,\n 'Profile \"{0}\" has been created successfully!'.format(\n form.cleaned_data[\"name\"]\n ),\n )\n return super(CreateProfileView, self).form_valid(form)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"profiles\"] = get_profiles(self.request.user)\n context[\"settings\"] = get_settings(self.request.user)\n context[\"current_profile\"] = get_current_profile(self.request.user)\n context[\"favorite_profiles\"] = get_favorite_profiles(self.request.user)\n context[\"tips\"] = [\n {\n \"pk\": 1,\n \"title\": \"Organization is Outstanding\",\n \"description\": \"The whole point of macros is to save you time. What’s the point if you’re spending the same amount of time looking for your macros in janky profiles?\",\n },\n {\n \"pk\": 2,\n \"title\": \"Generality is Great\",\n \"description\": \"Optimizing one task is good, but optimizing more than one with a single macros is great. Think of tasks that can benefit from the same action, create a macro that is one-size-fits-all.\",\n },\n {\n \"pk\": 3,\n \"title\": \"Punctuality is Pertinent\",\n \"description\": \"Nicknames, abbreviations, and shorting words in general is great. BUT, try not to get to crazy with it. Because when you have to come back to it in a bit and forget what it means, uh no, no time saved.\",\n },\n ]\n return context\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass DeleteProfileView(DeleteView):\n model = Profile\n success_url = \"/\"\n\n def get_object(self, *args, **kwargs):\n obj = super(DeleteProfileView, self).get_object(*args, **kwargs)\n if not obj.user == self.request.user:\n raise Http404\n return obj\n\n def form_valid(self, form):\n object = self.get_object()\n # Change user's current profile, only if it is the one being deleted\n try:\n settings = get_settings(self.request.user)\n if settings.current_profile.pk == object.pk:\n profile = get_profiles(settings.user).exclude(pk=object.pk).first()\n print(str(profile))\n settings.current_profile = profile\n settings.save()\n except Exception as outer_error:\n print(outer_error)\n settings.current_profile = None\n settings.save()\n messages.error(\n self.request, \"You have no profiles to set as your current profile.\"\n )\n\n messages.success(self.request, \"Profile Deleted!\")\n return super().form_valid(form)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"profiles\"] = get_profiles(self.request.user)\n context[\"settings\"] = get_settings(self.request.user)\n return context\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass UpdateProfileView(UpdateView):\n model = Profile\n form_class = ProfileForm\n template_name_suffix = \"_update_form\"\n success_url = \"/\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"settings\"] = get_settings(self.request.user)\n return context\n\n def get_object(self, *args, **kwargs):\n obj = super(UpdateProfileView, self).get_object(*args, **kwargs)\n if not obj.user == self.request.user:\n raise Http404\n return obj\n\n def form_valid(self, form):\n messages.success(\n self.request,\n 'The profile \"{0}\" was updated successfully!'.format(\n self.get_object().name\n ),\n )\n return super().form_valid(form)\n\n\n@login_required\ndef set_current_profile_view(request, pk):\n set_current_profile(request.user, pk)\n messages.success(\n request,\n 'Current profile set to \"{0}\"!'.format(get_current_profile(request.user)),\n )\n return redirect(\"website:homepage\")\n\n\n# ------------------------------------------------------------------------------\n# RECORDING RELATED VIEWS\n# ------------------------------------------------------------------------------\n\n\ndef update_recording_events_order_view(request):\n data = json.load(request)\n events = data.get(\"events\")\n for i, event in enumerate(events):\n # Get the pk of the event\n event_pk = int(event.split(\"-\")[1])\n # Check if it's a keyboard or mouse event\n if \"mouse\" in event:\n event_obj = MouseEvent.objects.get(pk=event_pk)\n elif \"key\" in event:\n event_obj = KeyEvent.objects.get(pk=event_pk)\n else:\n event_obj = WaitEvent.objects.get(pk=event_pk)\n event_obj.order_in_recording = i + 1\n event_obj.save()\n messages.success(\n request,\n 'The recording \"{0}\" was updated successfully!'.format(\n event_obj.recording.name\n ),\n )\n return JsonResponse({\"status\": 200})\n\n\ndef save_recording(request):\n\n # Make sure user has a temporary recording\n form = RecordingForm()\n r = Recording.objects.filter(profile__user=request.user, is_temp=True)\n if not r.exists():\n\n messages.error(\n request,\n \"You have no temporary recording. Please record one first!\",\n )\n\n return redirect(\"website:homepage\")\n\n # Check if user is trying to save a temporary recording\n if request.method == \"POST\":\n form = RecordingForm(request.POST)\n form.user = request.user\n if form.is_valid():\n r = Recording.objects.get(profile__user=request.user, is_temp=True)\n r.name = form.cleaned_data[\"name\"]\n r.key_code = form.cleaned_data[\"key_code\"]\n r.description = form.cleaned_data[\"description\"]\n r.profile = form.cleaned_data[\"profile\"]\n r.is_temp = False\n r.save()\n messages.success(request, \"Recording saved!\")\n return redirect(\"website:homepage\")\n else:\n print(form.errors)\n\n # Display form to allow saving\n r = Recording.objects.get(profile__user=request.user, is_temp=True)\n\n context = {\n \"settings\": get_settings(request.user),\n \"recording\": r,\n \"form\": form,\n }\n context[\"tips\"] = [\n {\n \"pk\": 1,\n \"title\": \"Organization is Outstanding\",\n \"description\": \"The whole point of macros is to save you time. What’s the point if you’re spending the same amount of time looking for your macros in janky profiles?\",\n },\n {\n \"pk\": 2,\n \"title\": \"Generality is Great\",\n \"description\": \"Optimizing one task is good, but optimizing more than one with a single macros is great. Think of tasks that can benefit from the same action, create a macro that is one-size-fits-all.\",\n },\n {\n \"pk\": 3,\n \"title\": \"Punctuality is Pertinent\",\n \"description\": \"Nicknames, abbreviations, and shorting words in general is great. BUT, try not to get to crazy with it. Because when you have to come back to it in a bit and forget what it means, uh no, no time saved.\",\n },\n ]\n actions = []\n for event in KeyEvent.objects.filter(recording=r.pk):\n actions.append({\"type\": \"key\", \"event\": event})\n for event in MouseEvent.objects.filter(recording=r.pk):\n actions.append({\"type\": \"mouse\", \"event\": event})\n for event in WaitEvent.objects.filter(recording=r.pk):\n actions.append({\"type\": \"wait\", \"event\": event})\n actions.sort(key=lambda x: x[\"event\"].order_in_recording)\n context[\"actions\"] = actions\n return render(request, \"macros/save_recording.html\", context)\n\n\nclass DeleteRecordingView(DeleteView):\n model = Recording\n success_url = \"/\"\n\n def get_object(self, *args, **kwargs):\n obj = super(DeleteRecordingView, self).get_object(*args, **kwargs)\n if not obj.profile.user == self.request.user:\n raise Http404\n return obj\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"profiles\"] = get_profiles(self.request.user)\n context[\"settings\"] = get_settings(self.request.user)\n return context\n\n\ndef download_recording(request, token, key_char):\n try:\n user = Token.objects.get(key=token).user\n print(str(\"KEYCODE: \" + convert_from_url_safe_key_code(key_char)))\n recording = Recording.objects.get(\n key_code=convert_from_url_safe_key_code(key_char),\n profile=get_settings(user).current_profile,\n )\n events = recording.get_serializable_events()\n\n key_event_serializer = KeyEventSerializer(events[\"key_events\"], many=True)\n mouse_event_serializer = MouseEventSerializer(events[\"mouse_events\"], many=True)\n wait_event_serializer = WaitEventSerializer(events[\"wait_events\"], many=True)\n return JsonResponse(\n {\"events\": key_event_serializer.data + mouse_event_serializer.data + wait_event_serializer.data}, status=200\n )\n\n except Recording.DoesNotExist as e:\n print(e)\n return HttpResponse(status=404)\n\n\ndef start_recording_view(request, token):\n try:\n if get_settings_from_token(token).play_mode:\n start_recording(token)\n else:\n return JsonResponse({\"errors\": \"Play mode is not active\"}, status=404)\n except Exception as e:\n return JsonResponse({\"errors\": \"Invalid token\"}, status=404)\n return JsonResponse({}, status=200)\n\n\n@csrf_exempt\n@api_view([\"GET\", \"POST\", \"PUT\", \"DELETE\"])\ndef stop_recording_view(request, token):\n settings = get_settings_from_token(token)\n\n # Delete temp recordings\n Recording.objects.filter(profile__user=settings.user, is_temp=True).delete()\n\n # Create new temp recording to save actions to.\n new_recording = Recording.objects.create(\n profile=settings.current_profile,\n name=\"_temp\",\n is_temp=True,\n key_code=settings.quick_play_key,\n )\n\n # Serialize incoming events\n serialization_result = serialize_events(request.data, new_recording)\n if serialization_result[\"errors\"] == None:\n return JsonResponse({}, status=200)\n else:\n return JsonResponse({\"errors\": serialization_result[\"errors\"]}, status=400)\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass UpdateRecordingView(UpdateView):\n model = Recording\n form_class = RecordingForm\n template_name_suffix = \"_update_form\"\n success_url = \"/\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"settings\"] = get_settings(self.request.user)\n return context\n\n\n@login_required\ndef delete_mouse_event_view(request, pk):\n try:\n # Get the event to delete\n event = MouseEvent.objects.get(pk=pk)\n\n # Make sure user owns event\n if event.recording.profile.user != request.user:\n raise Http404\n\n # Delete the event\n event.delete()\n\n except Exception as e:\n print(str(e))\n\n return HttpResponse(status=200)\n\n\n@login_required\ndef delete_key_event_view(request, pk):\n try:\n # Get the event to delete\n event = KeyEvent.objects.get(pk=pk)\n\n # Make sure user owns event\n if event.recording.profile.user != request.user:\n raise Http404\n\n # Delete the event\n event.delete()\n\n except Exception as e:\n print(str(e))\n\n return HttpResponse(status=200)\n\n\n@login_required\ndef delete_wait_event_view(request, pk):\n try:\n # Get the event to delete\n event = WaitEvent.objects.get(pk=pk)\n\n # Make sure user owns event\n if event.recording.profile.user != request.user:\n raise Http404\n\n # Delete the event\n event.delete()\n\n except Exception as e:\n print(str(e))\n\n return HttpResponse(status=200)\n\n\n# ------------------------------------------------------------------------------\n# EVENT RELATED VIEWS\n# ------------------------------------------------------------------------------\n\n@login_required\ndef add_key_event_view(request, recording_pk):\n if request.method == \"POST\":\n # Save new event\n form = KeyEventForm(request.POST)\n if form.is_valid():\n event = form.save(commit=False)\n event.recording = Recording.objects.get(pk=recording_pk)\n event.order_in_recording = event.recording.get_events()[-1].order_in_recording + 1\n event.save()\n return redirect('macros:update_recording', pk=recording_pk)\n # Return new event form\n return JsonResponse(status=200, data={\"html\": render_to_string('macros/event_form.html', {\"form\": KeyEventForm(), \"type\": \"Key\"})})\n\n\ndef add_mouse_event_view(request, recording_pk):\n if request.method == \"POST\":\n # Save new event\n form = MouseEventForm(request.POST)\n if form.is_valid():\n event = form.save(commit=False)\n event.recording = Recording.objects.get(pk=recording_pk)\n event.order_in_recording = event.recording.get_events()[-1].order_in_recording + 1\n event.save()\n else:\n print(form.errors)\n return redirect('macros:update_recording', pk=recording_pk)\n # Return new event form\n return JsonResponse(status=200, data={\"html\": render_to_string('macros/event_form.html', {\"form\": MouseEventForm(), \"type\": \"Mouse\"})})\n \n\ndef add_wait_event_view(request, recording_pk):\n if request.method == \"POST\":\n # Save new event\n form = WaitEventForm(request.POST)\n if form.is_valid():\n event = form.save(commit=False)\n event.recording = Recording.objects.get(pk=recording_pk)\n event.order_in_recording = event.recording.get_events()[-1].order_in_recording + 1\n event.save()\n else:\n msg = form.errors['wait_time'][0].replace(\"\\n\", \"\")\n messages.error(request, msg)\n return redirect('macros:update_recording', pk=recording_pk)\n # Return new event form\n return JsonResponse(status=200, data={\"html\": render_to_string('macros/event_form.html', {\"form\": WaitEventForm(), \"type\": \"Wait\"})})\n","sub_path":"macros/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":21245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"633736837","text":"#encoding:UTF-8\nimport bayes\nfrom numpy import *\nimport re\n\ndef main():\n mySent = 'this book is the best book on python or M.L. I have ever laid eyes upon.'\n regEx = re.compile('\\\\W*')\n listOfTokens = regEx.split(mySent)\n listOfTokens = [tok.lower() for tok in listOfTokens if len(tok)>0]\n print(listOfTokens)\n \n \nif __name__ == '__main__': main()","sub_path":"03bayes/main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"87843498","text":"import aiohttp\nimport asyncio\nimport uvicorn\nfrom fastai import *\nfrom fastai.vision import *\nfrom io import BytesIO\nfrom starlette.applications import Starlette\nfrom starlette.middleware.cors import CORSMiddleware\nfrom starlette.responses import HTMLResponse, JSONResponse, Response\nfrom starlette.staticfiles import StaticFiles\nimport base64\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\ncodes = ['Animal', 'Archway', 'Bicyclist', 'Bridge', 'Building', 'Car', 'CartLuggagePram', 'Child', 'Column_Pole',\n 'Fence', 'LaneMkgsDriv', 'LaneMkgsNonDriv', 'Misc_Text', 'MotorcycleScooter', 'OtherMoving', 'ParkingBlock',\n 'Pedestrian', 'Road', 'RoadShoulder', 'Sidewalk', 'SignSymbol', 'Sky', 'SUVPickupTruck', 'TrafficCone',\n 'TrafficLight', 'Train', 'Tree', 'Truck_Bus', 'Tunnel', 'VegetationMisc', 'Void', 'Wall']\n\ndef acc_camvid(input, target):\n target = target.squeeze(1)\n mask = target != void_code\n return (input.argmax(dim=1)[mask]==target[mask]).float().mean()\n\nexport_file_url = 'https://www.dropbox.com/s/dth7kg6dak6x3sd/export.pkl?dl=1'\nexport_file_name = 'export.pkl'\n\nclasses = ['black', 'grizzly', 'teddys']\npath = Path(__file__).parent\n\napp = Starlette()\napp.add_middleware(CORSMiddleware, allow_origins=['*'], allow_headers=['X-Requested-With', 'Content-Type'])\napp.mount('/static', StaticFiles(directory='app/static'))\n\n\nasync def download_file(url, dest):\n if dest.exists(): return\n async with aiohttp.ClientSession() as session:\n async with session.get(url) as response:\n data = await response.read()\n with open(dest, 'wb') as f:\n f.write(data)\n\n\nasync def setup_learner():\n await download_file(export_file_url, path / export_file_name)\n try:\n learn = load_learner(path, export_file_name)\n return learn\n except RuntimeError as e:\n if len(e.args) > 0 and 'CPU-only machine' in e.args[0]:\n print(e)\n message = \"\\n\\nThis model was trained with an old version of fastai and will not work in a CPU environment.\\n\\nPlease update the fastai library in your training environment and export your model again.\\n\\nSee instructions for 'Returning to work' at https://course.fast.ai.\"\n raise RuntimeError(message)\n else:\n raise\n\n\nloop = asyncio.get_event_loop()\ntasks = [asyncio.ensure_future(setup_learner())]\nlearn = loop.run_until_complete(asyncio.gather(*tasks))[0]\nloop.close()\n\n\n@app.route('/')\nasync def homepage(request):\n html_file = path / 'view' / 'index.html'\n return HTMLResponse(html_file.open().read())\n\n\n@app.route('/analyze', methods=['POST'])\nasync def analyze(request):\n img_data = await request.form()\n img_bytes = await (img_data['file'].read())\n img = open_image(BytesIO(img_bytes))\n img = img.resize(416)\n outputs = learn.predict(img)\n cm_hot = plt.get_cmap('tab10')\n masked = outputs[0].data\n im = np.array(masked)\n im2 = np.squeeze(im)\n im_color = cm_hot(im2)\n resp_bytes = BytesIO()\n PIL.Image.fromarray((im_color*255).astype('uint8')).save(resp_bytes, format='png')\n background=Image.open(BytesIO(img_bytes))\n background=background.resize((416,416))\n overlay=Image.open(resp_bytes)\n background = background.convert(\"RGBA\")\n overlay = overlay.convert(\"RGBA\")\n new_img = Image.blend(background, overlay, 0.5)\n buffered = BytesIO()\n new_img.save(buffered, format=\"png\")\n img_str = base64.b64encode(buffered.getvalue()).decode()\n img_str = \"data:image/png;base64,\" + img_str\n return JSONResponse({'result': str(img_str)})\n\n\n\nif __name__ == '__main__':\n if 'serve' in sys.argv:\n uvicorn.run(app=app, host='0.0.0.0', port=5000, log_level=\"info\")\n\n","sub_path":"app/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"547754713","text":"from collections import Counter\nfrom typing import List, Tuple, TypeVar, Set\n\nfrom Graph import Graph\nfrom Kdmer import Kdmer\nfrom Read import Read\nfrom ReadPair import ReadPair\nfrom ToDeBruijnGraph import to_debruijn_graph, to_graphviz\nfrom WalkRandomEulerianCycle import walk_eulerian_cycle\n\nT = TypeVar('T')\n\n\n# MARKDOWN\ndef find_unbalanced_nodes(graph: Graph[T]) -> List[Tuple[T, int, int]]:\n unbalanced_nodes = []\n for node in graph.get_nodes():\n in_degree = graph.get_in_degree(node)\n out_degree = graph.get_out_degree(node)\n if in_degree != out_degree:\n unbalanced_nodes.append((node, in_degree, out_degree))\n return unbalanced_nodes\n\n\n# creates a balanced graph from a nearly balanced graph -- nearly balanced means the graph has an equal number of\n# missing outputs and missing inputs.\ndef balance_graph(graph: Graph[T]) -> Tuple[Graph[T], Set[T], Set[T]]:\n unbalanced_nodes = find_unbalanced_nodes(graph)\n nodes_with_missing_ins = filter(lambda x: x[1] < x[2], unbalanced_nodes)\n nodes_with_missing_outs = filter(lambda x: x[1] > x[2], unbalanced_nodes)\n\n graph = graph.copy()\n\n # create 1 copy per missing input / per missing output\n n_per_need_in = [_n for n, in_degree, out_degree in nodes_with_missing_ins for _n in [n] * (out_degree - in_degree)]\n n_per_need_out = [_n for n, in_degree, out_degree in nodes_with_missing_outs for _n in [n] * (in_degree - out_degree)]\n assert len(n_per_need_in) == len(n_per_need_out) # need an equal count of missing ins and missing outs to balance\n\n # balance\n for n_need_in, n_need_out in zip(n_per_need_in, n_per_need_out):\n graph.insert_edge(n_need_out, n_need_in)\n\n return graph, set(n_per_need_in), set(n_per_need_out) # return graph with cycle, orig root nodes, orig tail nodes\n# MARKDOWN\n\n\ndef main():\n print(\"
\", end=\"\\n\\n\")\n print(\"`{bm-disable-all}`\", end=\"\\n\\n\")\n try:\n lines = []\n while True:\n try:\n line = input().strip()\n if len(line) > 0:\n lines.append(line)\n except EOFError:\n break\n\n command = lines[0]\n lines = lines[1:]\n counter = Counter(lines)\n if command == 'reads':\n frags = [Read(r, i) for r, c in counter.items() for i in range(c)]\n elif command == 'read-pairs':\n frags = [ReadPair(Kdmer(r.split('|')[0], r.split('|')[2], int(r.split('|')[1])), i) for r, c in counter.items() for i in range(c)]\n else:\n raise\n graph = to_debruijn_graph(frags)\n graph, head_nodes, tail_nodes = balance_graph(graph)\n print(f'Given the fragments {lines}, the artificially balanced de Bruijn graph is...', end=\"\\n\\n\")\n print(f'```{{dot}}\\n{to_graphviz(graph)}\\n```\\n\\n')\n print(f'... with original head nodes at {head_nodes} and tail nodes at {tail_nodes}.')\n finally:\n print(\"
\", end=\"\\n\\n\")\n print(\"`{bm-enable-all}`\", end=\"\\n\\n\")\n\n\nif __name__ == '__main__':\n main()\n\n# if __name__ == '__main__':\n# g = Graph()\n# g.insert_edge('0', '2')\n# g.insert_edge('1', '3')\n# g.insert_edge('2', '1')\n# g.insert_edge('3', '0')\n# g.insert_edge('3', '4')\n# g.insert_edge('6', '3')\n# g.insert_edge('6', '7')\n# g.insert_edge('7', '8')\n# g.insert_edge('8', '9')\n# g.insert_edge('9', '6')\n#\n# g, _, _ = balance_graph(g)\n# path = walk_eularian_cycle(g, '0')\n# print(f'{\"->\".join(path)}')\n","sub_path":"docs/data/learn/Bioinformatics/output/ch3_code/src/BalanceNearlyBalancedGraph.py","file_name":"BalanceNearlyBalancedGraph.py","file_ext":"py","file_size_in_byte":3546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"163783468","text":"# coding: utf-8\n\nfrom __future__ import absolute_import\nfrom datetime import date, datetime # noqa: F401\n\nfrom typing import List, Dict # noqa: F401\n\nfrom swagger_server.models.base_model_ import Model\nfrom swagger_server.models.object import Object # noqa: F401,E501\nfrom swagger_server import util\n\n\nclass ComposedVirtualCardInfo(Model):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n def __init__(self, virtual: AllOfComposedVirtualCardInfoVirtual=None, constraints: AllOfComposedVirtualCardInfoConstraints=None, info: AllOfComposedVirtualCardInfoInfo=None): # noqa: E501\n \"\"\"ComposedVirtualCardInfo - a model defined in Swagger\n\n :param virtual: The virtual of this ComposedVirtualCardInfo. # noqa: E501\n :type virtual: AllOfComposedVirtualCardInfoVirtual\n :param constraints: The constraints of this ComposedVirtualCardInfo. # noqa: E501\n :type constraints: AllOfComposedVirtualCardInfoConstraints\n :param info: The info of this ComposedVirtualCardInfo. # noqa: E501\n :type info: AllOfComposedVirtualCardInfoInfo\n \"\"\"\n self.swagger_types = {\n 'virtual': AllOfComposedVirtualCardInfoVirtual,\n 'constraints': AllOfComposedVirtualCardInfoConstraints,\n 'info': AllOfComposedVirtualCardInfoInfo\n }\n\n self.attribute_map = {\n 'virtual': 'virtual',\n 'constraints': 'constraints',\n 'info': 'info'\n }\n self._virtual = virtual\n self._constraints = constraints\n self._info = info\n\n @classmethod\n def from_dict(cls, dikt) -> 'ComposedVirtualCardInfo':\n \"\"\"Returns the dict as a model\n\n :param dikt: A dict.\n :type: dict\n :return: The ComposedVirtualCardInfo of this ComposedVirtualCardInfo. # noqa: E501\n :rtype: ComposedVirtualCardInfo\n \"\"\"\n return util.deserialize_model(dikt, cls)\n\n @property\n def virtual(self) -> AllOfComposedVirtualCardInfoVirtual:\n \"\"\"Gets the virtual of this ComposedVirtualCardInfo.\n\n\n :return: The virtual of this ComposedVirtualCardInfo.\n :rtype: AllOfComposedVirtualCardInfoVirtual\n \"\"\"\n return self._virtual\n\n @virtual.setter\n def virtual(self, virtual: AllOfComposedVirtualCardInfoVirtual):\n \"\"\"Sets the virtual of this ComposedVirtualCardInfo.\n\n\n :param virtual: The virtual of this ComposedVirtualCardInfo.\n :type virtual: AllOfComposedVirtualCardInfoVirtual\n \"\"\"\n\n self._virtual = virtual\n\n @property\n def constraints(self) -> AllOfComposedVirtualCardInfoConstraints:\n \"\"\"Gets the constraints of this ComposedVirtualCardInfo.\n\n\n :return: The constraints of this ComposedVirtualCardInfo.\n :rtype: AllOfComposedVirtualCardInfoConstraints\n \"\"\"\n return self._constraints\n\n @constraints.setter\n def constraints(self, constraints: AllOfComposedVirtualCardInfoConstraints):\n \"\"\"Sets the constraints of this ComposedVirtualCardInfo.\n\n\n :param constraints: The constraints of this ComposedVirtualCardInfo.\n :type constraints: AllOfComposedVirtualCardInfoConstraints\n \"\"\"\n\n self._constraints = constraints\n\n @property\n def info(self) -> AllOfComposedVirtualCardInfoInfo:\n \"\"\"Gets the info of this ComposedVirtualCardInfo.\n\n\n :return: The info of this ComposedVirtualCardInfo.\n :rtype: AllOfComposedVirtualCardInfoInfo\n \"\"\"\n return self._info\n\n @info.setter\n def info(self, info: AllOfComposedVirtualCardInfoInfo):\n \"\"\"Sets the info of this ComposedVirtualCardInfo.\n\n\n :param info: The info of this ComposedVirtualCardInfo.\n :type info: AllOfComposedVirtualCardInfoInfo\n \"\"\"\n\n self._info = info\n","sub_path":"swagger_server/models/composed_virtual_card_info.py","file_name":"composed_virtual_card_info.py","file_ext":"py","file_size_in_byte":3873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"353576928","text":"# Copyright (C) 2016-2016, A10 Networks Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n# Copyright (C) 2014-2016, A10 Networks Inc. All rights reserved.\n\nimport logging\n\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.core.validators import MaxValueValidator, MinValueValidator\nfrom django.shortcuts import redirect\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom horizon import exceptions\nfrom horizon import forms\nfrom horizon import tables\nfrom horizon import workflows\n\nfrom neutron_lbaas_dashboard.api import lbaasv2\n# Yeah, instances in networking - easy way to get subnet data.\nfrom openstack_dashboard.api import neutron as neutron_api\nfrom openstack_dashboard.dashboards.project.instances import utils as instance_utils\n\n\nLOG = logging.getLogger(__name__)\n\n\"\"\"\nNotes:\n\nv2 defines a VIP as a combination of a listener/loadbalancer.\n\n\"\"\"\n\n\nclass CreateLbAction(workflows.Action):\n lb_name = forms.CharField(label=_(\"Name\"), min_length=1, max_length=255,\n required=True)\n lb_description = forms.CharField(label=_(\"Description\"), min_length=1,\n max_length=255, required=False)\n vip_subnet = forms.ChoiceField(label=_(\"VIP Subnet\"), required=True)\n\n def populate_vip_subnet_choices(self, request, context):\n transform_func = lambda x: (x.get(\"id\"), \"{0} - {1}\".format(x.get(\"name\"), x.get(\"cidr\")))\n return sorted([transform_func(x) for x in neutron_api.subnet_list(request)],\n key=lambda x: x[0])\n\n def populate_protocol_choices(self, request, context):\n # TODO(mdurrant) - Return these from a service\n return [(\"\",\"Select a protocol\"),\n (\"TCP\", \"TCP\"),\n (\"HTTP\", \"HTTP\"),\n (\"HTTPS\", \"HTTPS\"),\n (\"TERMINATED_HTTPS\", \"Terminated HTTPS\")\n ]\n\n\n class Meta(object):\n name = _(\"LB Name and Subnet\")\n # TODO(mdurrant) - Add a10-specific permissions\n permissions = (\"openstack.services.network\", )\n help_text = _(\"Specify the details for the VIP below\")\n\n\nclass CreateVipAction(workflows.Action):\n protocol = forms.ChoiceField(label=_(\"Protocol\"), required=True)\n protocol_port = forms.IntegerField(label=_(\"Protocol Port\"), min_value=1, max_value=65535, required=True)\n\n def populate_vip_subnet_choices(self, request, context):\n return instance_utils.subnet_field_data(request, True)\n\n def populate_protocol_choices(self, request, context):\n # TODO(mdurrant) - Return these from a service\n return [(\"\",\"Select a protocol\"),\n (\"TCP\", \"TCP\"),\n (\"HTTP\", \"HTTP\"),\n (\"HTTPS\", \"HTTPS\"),\n (\"TERMINATED_HTTPS\", \"Terminated HTTPS\")\n ]\n\n\n class Meta(object):\n name = _(\"Protocol Data\")\n # TODO(mdurrant) - Add a10-specific permissions\n permissions = (\"openstack.services.network\", )\n help_text = _(\"Specify the details for the name and subnet of the VIP below\")\n\n\nclass CreateLbStep(workflows.Step):\n action_class = CreateLbAction\n contributes = (\"lb_name\", \"lb_description\", \"vip_subnet\")\n\n\nclass CreateVipStep(workflows.Step):\n action_class = CreateVipAction\n contributes = (\"protocol\", \"protocol_port\")\n\n\nclass CreateVipWorkflow(workflows.Workflow):\n slug = \"addvip\"\n name = _(\"Create VIP\")\n default_steps = (CreateLbStep, CreateVipStep, )\n success_url = \"horizon:project:a10vips:index\"\n finalize_button_name = \"Create VIP\"\n\n def handle(self, request, context):\n # First, try to create the LB. Make sure we get an IP back because we need it for the listener.\n # Then, try to create the listener.\n # If we fail, delete the LB.\n success = False\n lb = None\n\n try:\n lb_body = self._get_lb_body_from_context(context)\n lb = lbaasv2.create_loadbalancer(request, lb_body)\n lb_id = lb.get(\"id\")\n\n listener_body = self._get_listener_body_from_context(context, lb_id)\n listener = lbaasv2.create_listener(request, listener_body)\n success = True\n except Exception as ex:\n # If we bomb here, delete the LB that was created.\n LOG.exception(ex)\n exceptions.handle(request, _(\"Could not create listener\"))\n\n if not success and lb:\n lbaasv2.delete_loadbalancer(lb)\n\n return success\n\n def _get_lb_body_from_context(self, context):\n return { \"loadbalancer\": {\n \"name\": context.get(\"lb_name\"),\n \"description\": context.get(\"lb_description\"),\n \"vip_subnet_id\": context.get(\"vip_subnet\")\n }}\n\n def _get_listener_body_from_context(self, context, lb_id):\n return {\"listener\": {\n \"name\": str(\"{0}_{1}\".format(context.get(\"protocol\"),\n context.get(\"protocol_port\"))),\n \"description\": str(context.get(\"listener_desc\")),\n \"loadbalancer_id\": str(lb_id),\n \"protocol\": str(context.get(\"protocol\")),\n \"protocol_port\": context.get(\"protocol_port\")\n }}\n","sub_path":"a10_horizon/dashboard/project/a10networks/vips/workflows.py","file_name":"workflows.py","file_ext":"py","file_size_in_byte":5742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"48063530","text":"# Definition for singly-linked list.\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution(object):\n def getIntersectionNode(self, headA, headB):\n if headA.next == None or headB.next == None:\n return None\n l1 = ListNode(None)\n l2 = ListNode(None)\n while l1 != None and l2 != None:\n if l1 == l2:\n return l1\n l1 = l1.next\n l2 = l2.next\n if l1 == None:\n l1 = headA\n if l2 == None:\n l2 = headB\n return None\n \"\"\"\n :type head1, head1: ListNode\n :rtype: ListNode\n \"\"\"\n","sub_path":"Intersection of Two Linked Lists.py","file_name":"Intersection of Two Linked Lists.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"586384027","text":"#\n# @lc app=leetcode id=82 lang=python3\n#\n# [82] Remove Duplicates from Sorted List II\n#\n# https://leetcode.com/problems/remove-duplicates-from-sorted-list-ii/description/\n#\n# algorithms\n# Medium (35.77%)\n# Total Accepted: 233.6K\n# Total Submissions: 653.1K\n# Testcase Example: '[1,2,3,3,4,4,5]'\n#\n# Given a sorted linked list, delete all nodes that have duplicate numbers,\n# leaving only distinct numbers from the original list.\n# \n# Return the linked list sorted as well.\n# \n# Example 1:\n# \n# \n# Input: 1->2->3->3->4->4->5\n# Output: 1->2->5\n# \n# \n# Example 2:\n# \n# \n# Input: 1->1->1->2->3\n# Output: 2->3\n# \n# \n#\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution:\n def deleteDuplicates(self, head: ListNode) -> ListNode:\n \"\"\"\n 快慢指针: 快指针负责跳过重复元素,慢指针负责把链表接起来\n \"\"\"\n if not head or not head.next:\n return head\n dummy = ListNode(None)\n dummy.next = head\n slow = dummy\n fast = dummy.next\n while fast:\n if fast.next and fast.next.val == fast.val:\n tmp = fast.val\n while fast and tmp == fast.val:\n fast = fast.next\n else:\n slow.next = fast # 结点插入链表\n slow = fast\n fast = fast.next\n slow.next = fast\n return dummy.next\n\n\nif __name__ == '__main__':\n head = ListNode(None) # head指针指向链表表头\n pos = head\n for i in [1, 1, 1, 2, 3]:\n node = ListNode(i)\n pos.next = node # 链表插入结点\n pos = pos.next # pos指针向前移动\n\n s = Solution()\n result = s.deleteDuplicates(head.next)\n while result:\n print(result.val)\n result = result.next\n","sub_path":"linked_list/two_pointer/82.remove-duplicates-from-sorted-list-ii.py","file_name":"82.remove-duplicates-from-sorted-list-ii.py","file_ext":"py","file_size_in_byte":1870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"43363418","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # 1. Loading the Data from Google Drive\n# ---\n# ### 1.1\n# \n# Mount the drive and extract the dataset from images separated into corresponding folders\n# \n# The mounting of the drive is only necessary if the notebook is run on Google Colab instead of locally\n# \n# The folder is divided into:\n# 1. Test Folder\n# * With Mask Folder\n# * Without Mask Folder\n# 2. Train Folder\n# * With Mask Folder\n# * Without Mask Folder\n# 2. Validation Folder\n# * With Mask Folder\n# * Without Mask Folder\n# \n# ---\n# ### 1.2\n# \n# All required modules are also imported after \n# \n# These modules are:\n# * Numpy\n# * Matplotlib.pyplot\n# * Seaborn\n# * Tensorflow\n# * Keras \n# * Scikit-Learn \n\n\n# 1.1 \n# Mounting to drive\n# This step is only necessary if the notebook is run on Google Colab instead of locally\n\nfrom google.colab import drive \n\ndrive.mount(\"/content/gdrive\") \n\n\n# 1.2\n# Import needed modules\n\n# Basic packages needed for data analysis, visualization and manipulation\nimport numpy as np\nimport pandas as pd \nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# Mainly Tensorflow packages for data preprocessing\nfrom PIL import Image \nimport tensorflow as tf\nfrom tensorflow.keras.preprocessing.image import img_to_array, load_img, ImageDataGenerator\nfrom tensorflow.keras.utils import to_categorical \n\n# Mainly Tensorflow.keras layers and pre-trained Convolutional Neural Network (CNN) models needed to do Transfer Learning\nfrom tensorflow.keras.applications import VGG19\nfrom tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Dropout, Flatten, BatchNormalization, Input, GlobalAveragePooling2D\nfrom tensorflow.keras import Sequential, regularizers, Model\n\n# Mainly functions to load from saved checkpoints\nfrom tensorflow.keras.models import load_model\n\n# Mainly Tensorflow modules that help to optimize and fine-tune the CNN models better\nfrom tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau\nfrom tensorflow.keras.optimizers import Adam \nfrom sklearn.model_selection import KFold, train_test_split \nfrom math import ceil \n\n# Mainly metrics to assess the CNN's performance\nfrom sklearn.metrics import accuracy_score, confusion_matrix, recall_score, precision_score, f1_score, classification_report\n\n\n# # 2. Preprocessing the data and preparing the VGG19 model\n# \n# ___\n# ### 2.1 Prepare ImageDataGenerator\n# Create an ImageDataGenerator instance for data augmentation\n# \n# Considering the rotation, width and height shift, brightness, shear, zoom and horizontal flipping to mimic the possible real-world data the model would face\n# \n# Diversifies the dataset to let the model be trained on a larger and more diverse set of images\n# \n# ___\n# \n# ### 2.2 Construct CNN Model Using the Pre-trained VGG19\n# The model's architecture would be built upon the VGG19 model trained on the ImageNet classification task \n# \n# The activation function for the hidden layers would be ReLU and Sigmoid, instead of VGG19's usual SoftMax, would be applied to the output layer\n# \n# The dense layers would have dropout regularization and L2 weight regularization would be applied \n# \n# The optimizer used is Adam, which gives an adaptive learning rate, the loss measured is \"binary cross-entropy\", early stopping would be implemented (loss is monitored for early stopping) and reducing LR on plateau is used too. \n# \n# The model is as follows: (_Refer to model summary for more info_)\n# 1. Input layer (following our image size)\n# 2. VGG19 (consists of 5 convolutional blocks but only the bottom 4 are used)\n# 3. Flatten for dense network\n# 4. First Dense Block\n# * Dense \n# * Dropout\n# 5. Second Dense Block\n# * Dense\n# * Dropout\n# 6. Output layer\n# \n# ___ \n# \n# ### 2.3 Setting Trainable Layers\n# Set the bottom layers to be untrainable for fine-tuning the pre-trained model\n# \n# ___\n# \n# ### 2.4 Compile Model & Prepare Callbacks\n# Compile the model with the Adam optimizer, Binary Cross-Entropy loss and Accuracy as the measure of success\n# \n# The callback objects for early stopping, saving model checkpoints and reducing LR on plateau are prepared\n# \n# ___\n# \n# ### 2.5 Loading Previous Models \n# Instead of fitting a model to the dataset from scratch, we can also load a model that was saved from a previous training and resume training from there\n# \n# This can be done either by loading from checkpoints (CKPT format) or a saved model (HDF5 format)\n# \n# If the whole model is loaded, there is no need to construct the whole model from scratch, but the trainable layers have to be set correctly after loading the entire model\n\n\n# 2.1 \n# ImageDataGenerator is an iterator for data augmentation\n\n# As the images of the dataset are quite closely zoomed onto faces already, \n# the shift range is limited to only 0.1 to prevent the faces from going out of frame\n\n# The brightness is not altered too drastically as the model should ultimately be used in a well lit setting\n\n# Vertical flipping is not applied as we do not expect to see upside down faces in our use cases\n\n# Rescaling pixels to a value between 0.0 and 1.0 as a form of normalization \n# to increase training speeds, stability and comprehensiveness\n\ntarget_img_size = (224, 224)\n\ndatagen_train = ImageDataGenerator(\n rotation_range=40, # Randomly rotates the image by up to 40 degrees\n width_shift_range=0.1, # Displaces the image horizontally by up to 10% of the original image size\n height_shift_range=0.1, # Displaces the image vertically by up to 10% of the original image size\n brightness_range=[0.8, 1.2], # Alters brightness by a positive 20% or negative 20% \n shear_range=0.3, # Shears by up to 30% in the counter-clockwise direction\n zoom_range=0.2, # Randomly zooms in and out by up to 20%\n horizontal_flip=True, # Randomly flips the image horizontally\n rescale=1./255 # Rescales pixels to a float between 0 and 1\n ) \n\ndatagen_val = ImageDataGenerator(\n rotation_range=40, # Same requirements repeated for validation dataset\n width_shift_range=0.1, \n height_shift_range=0.1, \n brightness_range=[0.8, 1.2], \n shear_range=0.3, \n zoom_range=0.2, \n horizontal_flip=True, \n rescale=1./255\n ) \n\ndatagen_test = ImageDataGenerator(\n rescale=1./255 # No augmentation involved for testing dataset, just rescaling as the CNN is trained on normalized pixels\n)\n\n\n# 2.2 \n# TL Model with 2 dense layers added to the top\n\n# Load the pretrained VGG19 model for transfer learning, specifying the input image size\ntemp_model = VGG19(weights='imagenet', include_top=False, input_shape=(target_img_size[0], target_img_size[1], 3)) \n\n# Checking the model \ntemp_model.summary() \n\n\n# Obtaining the wanted layers\n\n# Only layers below block4_pool would be used\n\n# The pre-trained model's bottom layers are mainly involved in extracting more general data from images \n# such as edges, texture, shape, etc., the top layers are more fine-tuned towards the true classfication task\n\n# As such, we should preserve the bottom layers and \n# allow for changes and fine-tuning in the top layers as well as the newly added dense layers\n\n# Use 4 convol blocks instead of 5 from the VGG19 model\noutput_layer = 'block4_pool' \nint_model = Model(inputs=temp_model.input, outputs=temp_model.get_layer(output_layer).output)\n# Use Keras's Functional API to form a Model object with a top layer of 'block4_pool'\n\n# Checking the model\nint_model.summary() \n\n\n# Adding desired dense layers \n\n# Use Keras's Functional API to add the dense layers\n# Each layer is chained to the existing Model model in sequence\ntemp_out = int_model.output\ntemp_out = Flatten()(temp_out)\ntemp_out = Dense(1000, kernel_regularizer=regularizers.L2(0.0001), activation='relu')(temp_out)\ntemp_out = Dropout(0.2)(temp_out)\ntemp_out = Dense(500, kernel_regularizer=regularizers.L2(0.0001), activation='relu')(temp_out)\ntemp_out = Dropout(0.2)(temp_out)\npredictions = Dense(1, activation='sigmoid')(temp_out)\n\n# Forming final model\n\n# Use Keras's Functional API to form a Model object with the top layers we defined ourselves\nmodel = Model(inputs=int_model.input, outputs=predictions)\n\n# Checking the FINAL model\nmodel.summary()\n\n\n# 2.3\n# Set the top 5 CONVOL layers to be trainable and the other bottom layers to be untrainable\n# so that whatever the pre-trained model has learnt is still preserved but\n# the model is fine-tuned to our classification task\n\n# It is a given that the dense layers are set to be trainable\n\nTRAINABLE_LAYERS = 5 # Number of trainable CONVOL layers\nidx = -(TRAINABLE_LAYERS) # Multiply the number by -1 to form the negative index\n\n# Set all layers to trainable first\nfor layer in model.layers[:]:\n layer.trainable = True\n\n# Set bottom layers to be untrainable\nfor layer in int_model.layers[:idx]: # specifies which (CONVOL) layers are trainable\n layer.trainable = False\n\n# Double-checking the model\nmodel.summary()\n\n\n# 2.4\n\n# Compiling the model \nopt = Adam(learning_rate=1e-4) # Using the adam optimizer which allows for adapting LRs, initialized as 0.0001\n\nmodel.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy']) \n# Binary cross-entropy used as there are 2 categories predicted by one output neuron\n# Accuracy used as measure of success\n\n# Prepare for early stopping\nearly_stopping = EarlyStopping(monitor='val_loss', patience=1)\n\n# Prepare for model checkpoint saving\ncheckpoint_path = \"/content/gdrive//.ckpt\"\n\nmodel_checkpoint = ModelCheckpoint(filepath=checkpoint_path, monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=True)\n\n# Prepare for reduced LR when approaching a plateau \nreduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=1, min_lr=0.00001)\n\n\n# 2.5\n# Load from a previously saved state of a trained model\n\n# Load previously saved weights from CKPT file\nmodel.load_weights(\"/content/gdrive//.ckpt\")\n\n# Load previously saved model from HDF5 file\n# model = load_model('/content/gdrive//.h5')\n# If the whole model is loaded, there is no need to construct the whole model from scratch\n\n#####\n# IF the model is loaded directly from the HDF5 file, \n# there may be a need to set up the trainable layers after loading the model\n\n# Set all layers to trainable first\n# for layer in model.layers[:]:\n # layer.trainable = True\n\n# Set bottom layers to be untrainable\n# for layer in int_model.layers[:-5]: # specifies which layers are trainable\n # layer.trainable = False\n\n# Model summary\n# model.summary()\n#####\n\n\n# # 3. Fine-tuning the Model\n# \n# ___\n# \n# ### 3.1 Prepare Dataset\n# \n# Prepare the training, validation and testing dataset using the flow_from_directory() method of the ImageDataGenerator object\n# \n# The method reads the images and yields them in batches from the corresponding folders using the filepath, batch size and target image size provided\n# \n# Shuffle is meant to introduce more randomness and the seed was set to 1 as a group decision in order to standardize within the group\n# \n# The images yielded may be altered according to the earlier set parameters at random\n# \n# Initially, the dataset was loaded using load_img() and img_to_array() and the augmentor iterates from memory using the flow() method. \n# \n# In order to standardize the dataset used however, flow_from_directory() method is now used instead to iterate from storage instead. \n# \n# ___\n# \n# ### 3.2 Fine-tuning the CNN Model\n# \n# The model is fitted/fine-tuned to the dataset, using the training and validation ImageDataGenerator and the 3 callback objects prepared\n# \n# The results are appended to a list called model_metadata\n# \n# ___\n# \n# ### 3.3 Load Previous Model\n# \n# Instead of fitting a new model to the dataset, we can also load a model that was saved from a previous training\n# \n# This can be done either by loading from checkpoints (CKPT format) or a saved model (HDF5 format)\n# \n# If the whole model is loaded, there is no need to construct the whole model from scratch\n# \n# ___\n# \n# ### 3.4 Evaluation\n# \n# The model's performance after fitting is then evaluated using the testing ImageDataGenerator\n# \n# A confusion matrix and classification report is also plotted to observe the model's other metrics of performance\n# \n# ___\n# \n# ### 3.5 Viewing the Predictions\n# \n# Using matplotlib.pyplot, we can see the image of predicted images alongside the prediction and true label\n# \n# ___\n# \n# ### 3.6 Saving Model\n# \n# The model can be saved in its entirety into an HDF5 file\n\n\n# 3.1\n\n# Standard batch size of 32 for time trade-off\nBATCH_SIZE = 32\n\n# Paths to corresponding dataset folders\ntrain_path = \"/content/gdrive//data/train\" # Training dataset\nval_path = \"/content/gdrive//data/val\" # Validation dataset\ntest_path = \"/content/gdrive//data/test\" # Testing dataset\n\n# ImageDataGenerator can directly read the dataset from disk and create batches of images that are altered at random\n\n# Target image size is (224, 224), batch size is 32, class mode refers to the 2 classes predicted by the CNN model\n# Shuffle set to True indicates that the generator would select images to alter and yield randomly from a dataset that is random shuffled\n# Testing ImageDataGenerator has shuffle set to False so that the class labels used for the confusion matrix would be in the correct order\n\ntrain_gen = datagen_train.flow_from_directory(train_path, target_size=target_img_size, batch_size=BATCH_SIZE, class_mode=\"binary\", shuffle=True, seed=1)\n\nval_gen = datagen_val.flow_from_directory(val_path, target_size=target_img_size, batch_size=BATCH_SIZE, class_mode=\"binary\", shuffle=True, seed=1)\n\ntest_gen = datagen_test.flow_from_directory(test_path, target_size=target_img_size, batch_size=BATCH_SIZE, class_mode=\"binary\", shuffle=False)\n\n\n# 3.2\n# Preparing the list to store details of the model's training performance\nmodel_metadata = []\n\n# Fitting the model\nmodel_metadata.append(\n model.fit_generator(train_gen, \n steps_per_epoch=ceil(3561/BATCH_SIZE),\n epochs=15, \n callbacks=[early_stopping, reduce_lr, model_checkpoint],\n validation_data=val_gen,\n validation_steps=ceil(1017/BATCH_SIZE),\n verbose=1))\n\n\n# 3.3\n# Load from a previously saved state of a trained model\n\n# Load previously saved weights from CKPT file\n# model.load_weights(\"/content/gdrive//.ckpt\")\n\n# Load previously saved model from HDF5 file\nmodel = load_model('/content/gdrive//.h5')\n# If the whole model is loaded, there is no need to construct the whole model from scratch\n\n\n# 3.4\n# Observing the results\n\n# Use evaluate() method to evaluate performance metrics of accuracy and loss\nmodel_metadata.append(\n model.evaluate(test_gen, verbose=1, return_dict=True)\n) \n\n\n# Observe the returned results of the evaluate() method\nmodel_metadata[-1]\n\n\n# Plotting the confusion matrix \n\n# Getting the predictions\ny_pred = model.predict(test_gen, verbose=1)\ny_pred = (y_pred > 0.5).astype('int32')\n\n# Getting the class labels\ny_classes = list(test_gen.class_indices)\n\n# Plotting the confusion matrix\ncm = confusion_matrix(test_gen.classes, y_pred)\nsns.heatmap(cm, annot=True, xticklabels=y_classes, yticklabels=y_classes)\n\n\n# Scores according to different metrics\n\n# Accuracy \nprint(\"Accuracy: \", end=\"\")\nprint(accuracy_score(test_gen.classes, y_pred))\n\n# Recall\nprint(\"Recall: \", end=\"\")\nprint(recall_score(test_gen.classes, y_pred))\n\n# Precision \nprint(\"Precision: \", end=\"\")\nprint(precision_score(test_gen.classes, y_pred))\n\n# F1\nprint(\"F1 score: \", end=\"\")\nprint(f1_score(test_gen.classes, y_pred))\n\n\n# Creating the classification report\n\nprint(classification_report(test_gen.classes, y_pred, target_names=y_classes))\n\n\n# 3.5 \n# Viewing the image and corresponding class and prediction by the model\n\n# Getting a batch of images \nbatch = test_gen.__next__()\n\n# Getting predictions on that batch\nresults = model.predict(batch, verbose=1)\nresults = (results > 0.5).astype('int32') \n\n\n# Picture to be reviewed\nimage = 1 # Image at index 1\n\n# Get the true class\nprint(f\"Correct Class: {batch[1][image]}\")\n\n# Get the predicted class\nprint(f\"Predicted Class: {results[image]}\")\n\n# Get the image \nplt.imshow(batch[0][image]) \n\n\n# 3.6\n# Saving the model in HDF5 format\n\nmodel.save('/content/gdrive//.h5') \n\n","sub_path":"Development/Transfer_Learning.py","file_name":"Transfer_Learning.py","file_ext":"py","file_size_in_byte":16474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"491530707","text":"'''\nCreated on 28 Aug 2013\n\n@author: will\n'''\n\nimport xmpp\nimport time\nimport logging\nfrom datetime import timedelta, datetime\nfrom Queue import Queue\nfrom threading import Thread\nfrom xmpp.protocol import Message\n\nfrom .config import settings\n\nclass Controller(object):\n \"\"\"\n The notification controller is an approaching object listener, and on being \n notified of an alert, is responsible for coordinating the actions of other\n components such as the notification gateway. \n \n The controller will throttle alerts it receives to ensure that it doesn't \n send out superfluous notifications. See proximity_settings.py for more\n information on configuration settings.\n \n \"\"\"\n \n def __init__(self):\n logging.info('Initialising Jabber notification gateway...')\n self._gateway = JabberGateway()\n logging.info('Jabber notification gateway initialised')\n self._last_notification_time = None\n self._interval = timedelta(seconds=\n settings.MIN_NOTIFICATION_INTERVAL_SECS)\n \n def notify(self, alert_id):\n now = datetime.now()\n if (self._last_notification_time is None or \n (self._last_notification_time + self._interval) < now):\n \n self._gateway.send(settings.NOTIFICATION_TEXT_TEMPLATE)\n self._last_notification_time = now\n else:\n logging.debug('Still inside min notification interval - ignoring '\n 'notification request')\n \n def __call__(self, alert_id):\n \"\"\"\n Invoked by the ApproachingObjectAlerter to notify when an object\n approaches.\n \n Args: \n alert_id: the unique identifier representing the alert.\n \n \"\"\"\n self.notify(id)\n \n \nclass JabberGateway(object):\n \"\"\"\n XMPP implementation of a notification gateway that uses xmpppy to send a\n notification message to a given recipient. Implementation specific \n configuration - such as the Jabber server, username, password and recipient\n are specified in the proximity_settings.\n \n Message sending is asynchronous. Calling send() will queue the request and\n the send() method will then immediately return. The queued send request is\n picked up by a worker thread and processed. \n \n Note that the queue has a max size of 1. This means that while a send\n request exists in the queue, calling send() again will block until the \n queued send request has been dequeued processed by the worker.\n \n Using a queue ensures serialised access to the XMPP client (as both send\n and keep alive requests are being pushed from different threads) - and so\n hopes to avoid any threadsafety issues arising from using a single client.\n \n Having a max size of 1 prevents multiple requests from building up on the\n queue which may all suddenly be dequeued and processed rapidly - which\n may then cause problems server side.\n \n \"\"\"\n \n def __init__(self):\n \"\"\"\n Creates a new JabberGateway instance establishing an initial\n connection to the Jabber server and performing necessary authorisation.\n \n \"\"\"\n self._client = self._initialise_client()\n self._send_queue = Queue(maxsize=1)\n self._initialise_threads()\n \n def _initialise_client(self):\n jabber_server = settings.JABBER_SERVER # @UndefinedVariable\n client = xmpp.Client(jabber_server)\n \n logging.debug(\"Connecting to Jabber server '{0}'...\"\n .format(jabber_server))\n if not client.connect():\n raise IOError(\"Unable to connect to Jabber server '{0}' \"\n \"- check network connection\".format(jabber_server))\n\n jabber_username = settings.JABBER_USERNAME # @UndefinedVariable\n logging.debug(\"Authenticating with Jabber server as '{0}'...\"\n .format(jabber_username))\n if not client.auth(jabber_username, \n settings.JABBER_PASSWORD): # @UndefinedVariable\n raise IOError(\"Unable to authorise - \"\n \"check Jabber username/password\") \n \n return client\n \n def _initialise_threads(self):\n \"\"\"Initialises the worker and keep alive threads\"\"\"\n \n logging.info('Starting Jabber queue processor thread')\n processor_thread = Thread(target=self._queue_processor)\n processor_thread.daemon = True\n processor_thread.start() \n \n logging.info('Starting Jabber keep-alive thread')\n keep_alive_thread = Thread(target=self._keep_alive)\n keep_alive_thread.daemon = True\n keep_alive_thread.start() \n \n def send(self, text, img=None):\n \"\"\"\n Sends the specified text as an XMPP message. The send request will\n actually be queued for sending and this method will return immediately\n (assuming there is no current send request being processed which would\n otherwise cause this method to block until that message has been sent).\n \n The recipient of the message is obtained from the proximity_settings.\n \n Args:\n text: The text to send in the message.\n img: An optional bytes object containing a jpeg image.\n Ignored in this implementation.\n \n \"\"\"\n def _send_internal():\n logging.info('Sending message')\n self._send(text, settings.JABBER_TO) # @UndefinedVariable\n self._send_queue.put(_send_internal)\n \n def _send(self, text, to=None):\n \"\"\"\n Sends the specified text as an XMPP message. \n \n Args: \n text: The text to send in the message.\n to: The recipient address.\n Raises:\n IOError: If there was a problem sending the message.\n \n \"\"\" \n try:\n self._client.send(Message(to, text))\n except IOError:\n logging.warn(\"Failed to send message \"\n \"- reconnecting and retrying...\")\n self._client.reconnectAndReauth()\n self._client.send(Message(to, text))\n \n def _queue_processor(self):\n \"\"\"\n Invoked by the worker thread used to process \n the queue. The queue is expected to contain callables.\n \n \"\"\"\n while True:\n c = self._send_queue.get()\n try:\n c()\n except Exception:\n logging.exception(\"Error processing queued item\")\n finally:\n self._send_queue.task_done()\n \n def _keep_alive(self):\n \"\"\"\n Invoked from the keep alive thread used to send keep-alive\n requests.\n \n \"\"\"\n def _keep_alive_internal():\n logging.debug(\"Sending keepalive\")\n self._send(' ')\n while True:\n self._send_queue.put(_keep_alive_internal)\n time.sleep(120)\n ","sub_path":"whos_there/notification.py","file_name":"notification.py","file_ext":"py","file_size_in_byte":7133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"264755806","text":"import random\nimport math\n\n# simple neuron with two inputs\n\n\ndef sigmoid(value):\n return 1 / (1 + math.e**-value)\n\n\ndef sigmoid_derivative(value):\n return sigmoid(value) * (1 - sigmoid(value))\n\n\ntrainingInputs = ((100, 30), (400, 300), (600, 10), (55, 500))\ntrainingOutputs = (0, 1, 0, 1)\nlearningRate = 1;\nweight1 = random.randint(0, 100) / 100\nweight2 = random.randint(0, 100) / 100\n\nprint(weight1)\nprint(weight2)\n\nfor i in range(0,1000):\n for dataSet in range(0, len(trainingOutputs)):\n input1 = trainingInputs[dataSet][0]\n input2 = trainingInputs[dataSet][1]\n desiredOutput = trainingOutputs[dataSet]\n summedInput = input1 * weight1 + input2 * weight2\n print(\"summedInput: \" + str(summedInput))\n #sigmoid function\n output = sigmoid(summedInput)\n print(\"output: \" + str(output))\n error = 0.5 * (desiredOutput - output)**2\n print(\"error: \" + str(error))\n weight1Change = -learningRate * input1 * sigmoid_derivative(summedInput) * (output - desiredOutput)\n weight2Change = -learningRate * input2 * sigmoid_derivative(summedInput) * (output - desiredOutput)\n\n print(\"weight1Change: \" + str(weight1Change))\n print(\"weight2Change: \" + str(weight2Change))\n\n weight1 += weight1Change\n weight2 += weight2Change\n\n print(\"weight1: \" + str(weight1))","sub_path":"NeuralNet/NeuralNet.py","file_name":"NeuralNet.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"96061477","text":"\"\"\" Functions for db in flask via sqlalchemy \"\"\"\nfrom flask import request, session\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom staff import Staff\nfrom material import Material\n\nengine = create_engine('sqlite:///db/blackops.sqlite')\nSession = sessionmaker(bind=engine)\nsession = Session()\n\n\ndef fixStaffTable():\n \"\"\" Function returning html-table with all humans in db \"\"\"\n staffTable = ''\n all_staffs = session.query(Staff).all()\n for staff in all_staffs:\n staffTable += \"{id}{fname}\\n{lname}\\n{occupation}{age}\\n\" \\\n \"{rating}\\nTa bort\\nUpdate\\n \" \\\n \"\".format(id=staff.id, fname=staff.fname, lname=staff.lname, occupation=staff.occupation,\n age=staff.age, rating=staff.rating)\n\n return staffTable\n\n\ndef fixMaterialTable():\n \"\"\" Function returning html-table with all animals in db \"\"\"\n materialTable = ''\n all_maters = session.query(Material).all()\n for mat in all_maters:\n materialTable += \"{id}{name}\\n{typ}{qty}\\n \" \\\n \"Ta bort\\nUpdate\\n \" \\\n \"\\n\".format(id=mat.id, name=mat.name, typ=mat.typ, qty=mat.qty)\n\n return materialTable\n\ndef get_staff(id):\n staff = session.query(Staff).filter(Staff.id == id).one()\n return staff\n\ndef update_rating(staff, rating):\n staff.rating = rating\n\ndef get_material(id):\n staff = session.query(Material).filter(Staff.id == id).one()\n return staff\n\n\ndef removeItem(itemId, whichTable):\n \"\"\" Function for removing item from db \"\"\"\n if whichTable == 'st':\n session.query(Staff).filter(Staff.id == itemId).delete()\n elif whichTable == 'mt':\n session.query(Material).filter(Material.id == itemId).delete()\n session.commit()\n\n\ndef addItem():\n \"\"\" Function for adding item to db \"\"\"\n if request.form['table'] == 'staff':\n newStaff = Staff(fname=request.form['fname'], lname=request.form['lname'],\n occupation=request.form['occupation'], age=request.form['age'], rating=request.form['rating'])\n session.add(newStaff)\n elif request.form['table'] == 'material':\n newMat = Material(name=request.form['name'], typ=request.form['typ'], qty=request.form['qty'])\n session.add(newMat)\n session.commit()\n","sub_path":"kmom10/blackops/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":2634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"619439949","text":"#!/usr/bin/python -O\r\n# -*- coding: iso-8859-15 -*-\r\n# -O Optimize e non scrive il __debug__\r\n#\r\n# Version 0.01 08/04/2010: Starting\r\n# ####################################################################################################################\r\nimport sys\r\n\r\nfrom ..LnCommon.LnLogger import SetLogger\r\nfrom ..LnCommon.LnColor import LnColor\r\nfrom ..LnCommon.Exit import Exit\r\n\r\n# ###########################################################################\r\n# * Gestione input da Keyboard.\r\n# * 29-08-2010 - Rimosso LnSys dalla chiamata alla LnSys.exit()\r\n# * 12-02-2012 - Cambiato keys in keyLIST\r\n# * 12-03-2013 - Cambiato keyLIST in validKeys\r\n# * 01-01-2014 - modificato il validKeysLIST.\r\n# ###########################################################################\r\ndef getKeyboardInput(msg, validKeys='ENTER', exitKey='X', deepLevel=1, keySep=\"|\", fDEBUG=False):\r\n logger = SetLogger(package=__name__)\r\n C = LnColor()\r\n exitKeyUPP = exitKey.upper()\r\n\r\n if keySep in validKeys:\r\n validKeyLIST = validKeys.split(keySep)\r\n else:\r\n validKeyLIST = validKeys\r\n\r\n if keySep in exitKeyUPP:\r\n exitKeyLIST = exitKeyUPP.split(keySep)\r\n else:\r\n exitKeyLIST = [exitKeyUPP]\r\n\r\n print()\r\n if \" uscita temporanea\" in msg.lower():\r\n if not 'ENTER' in exitKeyLIST: exitKeyLIST.append('ENTER')\r\n fDEBUG = True\r\n\r\n if fDEBUG:\r\n funcName = __name__.split('.')[-1]\r\n C.printCyan(\" {0} - exitKeyLIST....: {1}\".format(funcName, exitKeyLIST), tab=4)\r\n C.printCyan(\" {0} - validKeyLIST...: {1}\".format(funcName, validKeyLIST), tab=4)\r\n print()\r\n caller = calledBy(deepLevel)\r\n msg = \"<{CALLER}> - [{MSG} - ({VALKEY})] ({EXITKEY} to exit) ==> \".format(CALLER=caller, MSG=msg, VALKEY=validKeys, EXITKEY=exitKey)\r\n else:\r\n msg = \"{0} [{1}] - ({2} to exit) ==> \".format(msg, validKeys, exitKey)\r\n\r\n try:\r\n while True:\r\n choice = input(msg).strip() # non mi accetta il colore\r\n choiceUPP = choice.upper()\r\n if fDEBUG: C.printCyan(\"choice: [{0}]\".format(choice))\r\n\r\n if choice == '': # diamo priorità alla exit\r\n if \"ENTER\" in exitKeyLIST:\r\n sys.exit()\r\n elif \"ENTER\" in validKeys:\r\n return ''\r\n else:\r\n C.printCyan('\\n... please enter something\\n')\r\n\r\n elif choiceUPP in exitKeyLIST:\r\n Exit(9998, \"Exiting on user request new.\", printStack=True)\r\n\r\n elif choice in validKeyLIST:\r\n break\r\n\r\n else:\r\n C.printCyan('\\n... try again\\n')\r\n\r\n except Exception as why:\r\n Exit(8, \"Error running program [{ME}]\\n\\n ....{WHY}\\n\".format(ME=sys.argv[0], WHY=why) )\r\n\r\n return choice\r\n\r\n###############################################\r\n#\r\n###############################################\r\ndef _calledBy(deepLevel=0):\r\n\r\n try:\r\n caller = inspect.stack()[deepLevel + 1]\r\n\r\n except Exception as why:\r\n return '{0}'.format(why)\r\n return 'Unknown - {0}'.format(why)\r\n\r\n programFile = caller[1]\r\n lineNumber = caller[2]\r\n funcName = caller[3]\r\n lineCode = caller[4]\r\n\r\n fname = os.path.basename(programFile).split('.')[0]\r\n str = \"[{0}-{1}:{2}]\".format(fname, caller[3], int (caller[2]) )\r\n if funcName == '':\r\n str = \"[{0}:{1}]\".format(fname, lineNumber)\r\n else:\r\n str = \"[{0}.{1}:{2}]\".format(fname, funcName, lineNumber)\r\n return str","sub_path":"Source/LnLib/System/GetKeyboardInput.py","file_name":"GetKeyboardInput.py","file_ext":"py","file_size_in_byte":3589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"193411809","text":"#!/bin/env run-with-python3\n\n# This demonstrates regular expression matching. The output of this script should be:\n#\n# 'maxitems = 99' => Match!\n# ' maxitems = 99' => No match!\n# 'maxitems == 99' => No match!\n# 'maxitems =99' => No match!\n# 'maxitems =' => No match!\n\nimport re\n\nif __name__ == '__main__':\n\tstrings = [\"maxitems = 99\", \" maxitems = 99\", \"maxitems == 99\", \"maxitems =99\", \"maxitems =\" ]\n\n\tregex = r\"^[a-z]+\\s+=\\s+.+$\"\n\n\tfor string in strings:\n\t\tif re.match(regex, string, re.IGNORECASE):\n\t\t\tprint(\"'{0}' => Match!\".format(string))\n\t\telse:\n\t\t\tprint(\"'{0}' => No match!\".format(string))\n\n\t# Accessing groups \n\tmatch = re.match(r\"^[a-z]+([0-9]+)[a-z]+([0-9]+)$\", \"hello88world99\", re.IGNORECASE)\n\n\tif match:\n\t\tprint(\"full match = '{0}', group 1 = '{1}, group 2 = '{2}'\".format(match.group(0),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t match.group(1),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t match.group(2)))\n","sub_path":"python-examples/regular-expressions.py","file_name":"regular-expressions.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"196255040","text":"# -*- coding: utf-8 -*-\n\"\"\"\nTencentBlueKing is pleased to support the open source community by making 蓝鲸智云-权限中心(BlueKing-IAM) available.\nCopyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.\nLicensed under the MIT License (the \"License\"); you may not use this file except in compliance with the License.\nYou may obtain a copy of the License at http://opensource.org/licenses/MIT\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on\nan \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\nspecific language governing permissions and limitations under the License.\n\"\"\"\n\"\"\"\nblueapps.conf\n=============\n\"\"\"\n\n\nclass BlueSettings(object):\n def __init__(self):\n from django.conf import settings as django_settings\n from blueapps.conf import default_settings\n\n self._django_settings = django_settings\n self._default_settings = default_settings\n\n def __getattr__(self, key):\n if key == key.upper():\n if hasattr(self._django_settings, key):\n return getattr(self._django_settings, key)\n elif hasattr(self._default_settings, key):\n return getattr(self._default_settings, key)\n\n raise AttributeError(\"%r object has no attribute %r\" %\n (self.__class__.__name__, key))\n\n\nsettings = BlueSettings()\n","sub_path":"saas/blueapps/conf/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"349926317","text":"\n\"\"\"\nThe marketing team is spending way too much time typing in hashtags.\nLet's help them with our own Hashtag Generator!\n\nHere's the deal:\n\nIt must start with a hashtag (#).\nAll words must have their first letter capitalized.\nIf the final result is longer than 140 chars it must return false.\nIf the input or the result is an empty string it must return false.\n\"\"\"\n\n\ndef generate_hashtag(texto):\n if len(texto)>140 or len(texto)==0: #making sure that garbage dont get in the function\n return False\n else:\n abc=texto.split() #getting a list without spaces\n mayus=[i.capitalize() for i in abc] \n mayus.insert(0,'#') \n mayus2=''.join(mayus) #getting all elements together in a string\n return mayus2\n\n\n\n\n","sub_path":"hashtag_generator.py","file_name":"hashtag_generator.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"319673874","text":"import os\nimport re\n\nimport setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nPKG = \"tock\"\nVERSIONFILE = os.path.join(PKG, \"__version__.py\")\n\ntry:\n verstrline = open(VERSIONFILE, \"rt\").read()\nexcept EnvironmentError:\n pass # Okay, there is no version file.\nelse:\n VSRE = r\"^verstr = ['\\\"]([^'\\\"]*)['\\\"]\"\n mo = re.search(VSRE, verstrline, re.M)\n if mo:\n version = mo.group(1)\n else:\n print(\"unable to find version in %s\" % (VERSIONFILE,))\n raise RuntimeError(\"if %s.py exists, it is required to be well-formed\" % (VERSIONFILE,))\n\nsetuptools.setup(\n name=\"tock-py\",\n version=version,\n author=\"Erwan LE BESCOND\",\n author_email=\"elebescond@gmail.com\",\n description=\"Build chatbots using Tock and Python\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/theopenconversationkit/tock-py/\",\n packages=setuptools.find_packages(),\n install_requires=[\n 'aiohttp==3.7.4',\n 'asyncio==3.4.3',\n 'isodate==0.6.0',\n 'marshmallow==3.9.1',\n 'marshmallow_enum==1.5.1',\n 'marshmallow-oneofschema==2.1.0',\n 'testfixtures==6.15.0',\n ],\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.6',\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"25061522","text":"import sys\nimport collections\n\n\ndef solve(input):\n lower_input = input.lower()\n input_counter = collections.Counter(lower_input)\n most_2 = input_counter.most_common(2)\n\n if most_2[0][1] == most_2[1][1]:\n return '?'\n\n return most_2[0][0].upper()\n\n\nif __name__ == \"__main__\":\n rl = lambda : sys.stdin.readline().strip()\n print(solve(rl()))\n","sub_path":"string/1157.py","file_name":"1157.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"409088162","text":"import numpy as np\nimport cv2\n\nyellowMinThresh = (19, 9, 0)\nyellowMaxThresh = (110, 187, 255)\n\ndef callback(x): pass\n\ndef CreateTrackbarWindow():\n\tcv2.namedWindow(\"Trackbars\", 0) \n\tminh, mins, minv = yellowMinThresh\n\tmaxh, maxs, maxv = yellowMaxThresh\n\tcv2.createTrackbar(\"Hue Min\", \"Trackbars\", minh, 255, callback)\n\tcv2.createTrackbar(\"Hue Max\", \"Trackbars\", maxh, 255, callback)\n\tcv2.createTrackbar(\"Satuation Min\", \"Trackbars\", mins, 255, callback)\n\tcv2.createTrackbar(\"Satuation Max\", \"Trackbars\", maxs, 255, callback)\n\tcv2.createTrackbar(\"Value Min\", \"Trackbars\", minv, 255, callback)\n\tcv2.createTrackbar(\"Value Max\", \"Trackbars\", maxv, 255, callback)\n\ndef GetMinThreshhold():\n\th = cv2.getTrackbarPos(\"Hue Min\", \"Trackbars\")\n\ts = cv2.getTrackbarPos(\"Satuation Min\", \"Trackbars\")\n\tv = cv2.getTrackbarPos(\"Value Min\", \"Trackbars\")\n\treturn (h, s, v)\n\ndef GetMaxThreshhold():\n\th = cv2.getTrackbarPos(\"Hue Max\", \"Trackbars\")\n\ts = cv2.getTrackbarPos(\"Satuation Max\", \"Trackbars\")\n\tv = cv2.getTrackbarPos(\"Value Max\", \"Trackbars\")\n\treturn (h, s, v)\n\nCreateTrackbarWindow()\n\ncap = cv2.VideoCapture(0)\n\nwhile(True):\n\t# Capture frame-by-frame\n\tret, frame = cap.read()\n\n\thsvImg = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n\n\tthreshImg = cv2.inRange(hsvImg, GetMinThreshhold(), GetMaxThreshhold())\n\t\n\tmodImg = cv2.erode(threshImg, None, iterations=2)\n\tmodImg = cv2.dilate(modImg, None, iterations=4)\n\t_, contours, _ = cv2.findContours(modImg, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n\tif(len(contours) > 0):\n\t\tmaxAreaContour = max(contours, key=cv2.contourArea)\n\t\t(x, y), radius = cv2.minEnclosingCircle(maxAreaContour)\n\t\tcv2.circle(frame, (int(x), int(y)), int(radius),(0, 255, 255), 2)\n\t\t#cv2.drawContours(frame, maxAreaContour, -1, (0, 255, 0), 3)\n\n\t# Display the resulting frame\n\tcv2.imshow('frame',frame)\n\tif cv2.waitKey(1) & 0xFF == ord('q'):\n\t\tbreak\n\nprint(\"Min threshold\", GetMinThreshhold())\nprint(\"Max threshold\", GetMaxThreshhold())\n\n# When everything done, release the capture\ncap.release()\ncv2.destroyAllWindows()","sub_path":"demofiles/trackcolor.py","file_name":"trackcolor.py","file_ext":"py","file_size_in_byte":2020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"105400353","text":"import os\n#declarar variables\nseccion=\"\"\n#pedir la variable via argumento\nseccion=os.sys.argv[1]\nseccion_invalida=True\n\n#si la seccion es invalida volver a pedir la seccion\nwhile(seccion_invalida):\n seccion=input(\"Ingrese la seccion (A/B/C/D/E):\")\n seccion_invalida=(seccion != \"A\" and seccion != \"B\" and\n seccion !=\"C\" and seccion !=\"D\" and seccion !=\"E\")\n#fin while\nprint(\"fin del bucle\")\nprint(\"la seccion es:\",seccion)","sub_path":"galan/ejercicio5.py","file_name":"ejercicio5.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"473074358","text":"from flask_wtf import FlaskForm\nfrom wtforms import StringField, SubmitField, SelectField\nfrom wtforms.validators import DataRequired\n\nclass WordForm(FlaskForm):\n word = StringField('Write word', validators=[DataRequired()])\n submit = SubmitField('Next')\n\nclass VerbiForm(FlaskForm):\n number = SelectField('Number', [DataRequired()],\n choices=[('ma', 'MA infinitive'), ('da', 'DA infinitive'), ('des', 'DES form'), ('n', '1. person sg active'),('b', '3. person sg active'),\n ('o', '2. person sg imperative'), ('ge', '2. person pl imperative'), ('neg o', 'Negative 2. person sg imperative'),\n ('neg ge', 'Negative 2. person pl imperative') , ('ks', 'Conditional'), ('neg nuks', 'Conditional negative'), ('neg', 'Negative'),\n ('nud', 'NUD-participe'), ('tud', 'TUD-participe'), ('v', 'Active participe'), ('tav', 'Active impersonal participe'), ('sin', '1. person sg past simple'),\n ('sid', '2. person sg or 3. person pl past simple'), ('s', '3. person sg past simple'), ('takse', 'Impersonal'),\n ('ta', 'Impersonal negative'), ('ti', 'Impersonal past simple')])\n\n submit = SubmitField('Next')\n\nclass SubstantiiviForm(FlaskForm):\n number = SelectField('Number', [DataRequired()],\n choices=[('sg', 'Singular'),\n ('pl', 'Plural')])\n\n case = SelectField('Case', [DataRequired()],\n choices=[( 'n', 'Nominative'), ('g', 'Genitive'), ('p', 'Partitive'), ('ill', 'Illative'), ('adt', 'Illative shortform'),\n ('in', 'Inessive'), ('el', 'Elative'), ('all', 'Allative'), ('ad', 'Adessive'), ('abl', 'Ablative'), ('ab', 'Abessive'),\n ('kom', 'Komitative'), ('es', 'Essive'), ('ter', 'Terminative'), ('tr', 'Translative')])\n\n submit = SubmitField('Next')\n","sub_path":"app/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"623897677","text":"import tensorflow as tf\nfrom tensorflow_core.examples.tutorials.mnist import input_data\n\nmnist = input_data.read_data_sets(\"MNIST_data\", one_hot=True)\nbatch_size = 200\nn_batch = mnist.train.num_examples // batch_size\n\nx = tf.placeholder(tf.float32, [None, 784])\ny = tf.placeholder(tf.float32, [None, 10])\nkeep_prob = tf.placeholder(tf.float32)\nlr = tf.Variable(0.001, dtype=tf.float32)\n\n\"\"\"\nW = tf.Variable(tf.zeros([784, 10]))\nb = tf.Variable(tf.zeros([10]))\nprediction = tf.nn.softmax(tf.matmul(x, W)+b)\n\"\"\"\n\nW1 = tf.Variable(tf.truncated_normal([784, 500], stddev=0.1))\nb1 = tf.Variable(tf.zeros([500])+0.1)\nprediction1 = tf.nn.tanh(tf.matmul(x, W1)+b1)\nprediction1_drop = tf.nn.dropout(prediction1, keep_prob)\n\nW2 = tf.Variable(tf.truncated_normal([500, 250], stddev=0.1))\nb2 = tf.Variable(tf.zeros([250])+0.1)\nprediction2 = tf.nn.tanh(tf.matmul(prediction1_drop, W2)+b2)\nprediction2_drop = tf.nn.dropout(prediction2, keep_prob)\n\nW3 = tf.Variable(tf.truncated_normal([250, 10], stddev=0.1))\nb3 = tf.Variable(tf.zeros([10])+0.1)\nprediction = tf.nn.softmax(tf.matmul(prediction2_drop, W3)+b3)\n\n# loss = tf.reduce_mean(tf.square(y-prediction))\nloss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=prediction))\ntrain_step = tf.train.AdamOptimizer(lr).minimize(loss)\n\ncorrect_prediction = tf.equal(tf.argmax(y, 1), tf.arg_max(prediction, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for epoch in range(201):\n sess.run(tf.assign(lr, 0.001*(0.95**epoch)))\n for batch in range(n_batch):\n batch_x, batch_y = mnist.train.next_batch(batch_size)\n sess.run(train_step, feed_dict={x: batch_x, y: batch_y, keep_prob: 1.0})\n\n train_acc = sess.run(accuracy, feed_dict={x: mnist.train.images, y: mnist.train.labels, keep_prob: 1.0})\n test_acc = sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels, keep_prob: 1.0})\n learn_rate = sess.run(lr)\n print(\"Iter \" + str(epoch) + \",Testing Accurary \" + str(test_acc) + \",Training Accurary \" + str(train_acc) + \",Learning Rate \" + str(learn_rate))\n","sub_path":"boy.py","file_name":"boy.py","file_ext":"py","file_size_in_byte":2192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"327729674","text":"class FileFormatException(IOError):\n pass\n\n\ndef read_products(path: str):\n with open(path) as file:\n for line in file:\n splitted = line.split(\":\")\n if len(splitted) != 2:\n raise FileFormatException(\"Line cannot be split by ':'\")\n productid, accessories = splitted\n yield int(productid), len(accessories.split(\",\"))\n\n\ndef get_max_accessories(path: str):\n \"\"\"\n Function, that finds product ID with the highest number of accessories.\n For example, if accessories.txt contains:\n 1: 8,17\n 12: 2,3,4,5,6,7\n 13: \n 15: 1,11\n Returns (12, 6) \n \"\"\"\n try:\n product_id_max = None\n accessories_max = 0\n for product_id, accessories_count in read_products(path):\n if accessories_count >= accessories_max:\n accessories_max = accessories_count\n product_id_max = product_id\n return product_id_max, accessories_max\n except FileFormatException:\n print(\"Bad input file. Please check format of your file\")\n raise\n return None\n\n\nif __name__ == \"__main__\":\n print(get_max_accessories(\"accessories.txt\"))\n","sub_path":"accessories.py","file_name":"accessories.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"440095985","text":"import tornado.web\nimport os\nfrom pycket.session import SessionMixin\n\nfrom utils import photo\nfrom utils.account import add_post_for,get_post_for\n\n\n\nclass AuthBaseHandler(tornado.web.RequestHandler,SessionMixin):\n\n def get_current_user(self):\n return self.session.get('tudo_user_info')\n\nclass IndexHandler(AuthBaseHandler):\n ''' 网页家目录 index.html'''\n @tornado.web.authenticated\n def get(self,*args,**kwargs):\n #images_path = os.path.join(self.settings.get('static_path'),'upload')\n #images = photo.get_images(images_path)\n image_urls = photo.get_images('./static/upload/images')\n self.render('index.html',images = image_urls)\n\n\nclass ExploreHandler(AuthBaseHandler):\n ''' explore.html'''\n\n @tornado.web.authenticated\n def get(self,*args,**kwargs):\n\n\n thumb_images = photo.get_images('./static/upload/images/thumb_images/')\n self.render('explore.html',images=thumb_images)\n\nclass PostHandler(AuthBaseHandler):\n ''' post 照片详情页'''\n #def get(self,*args,**kwargs):\n #self.render('post.html',post_id=kwargs['post_id'])\n #self.render('post.html',post_id=post_id)\n def get(self,post_id):\n self.render('post.html',post_id=post_id)\n\n\nclass UploadHandler(AuthBaseHandler):\n\n @tornado.web.authenticated\n def get(self, *args, **kwargs):\n next = self.get_argument('next', '')\n self.render('upload.html', next=next)\n\n def post(self, *args, **kwargs):\n img_files = self.request.files.get('newing', None)\n for img in img_files:\n saver = photo.ImageSave(self.settings['static_path'], img['filename'])\n saver.save_upload(img['body'])\n saver.make_thumb()\n\n add_post_for(self.current_user, saver.upload_url, saver.thumb_url)\n print('save to {}'.format(saver.upload_path))\n self.redirect('/')\n\n self.render('upload.html')\n\n # self.write({'msg':'got file :{}'.format(img_files[0]['filename'])})\n self.write('恭喜您,完成提交!')\n self.redirect('explore')\n\n\n\n","sub_path":"handlers/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"449294801","text":"from __future__ import print_function\nimport sys\nfrom optparse import OptionParser\nimport mdtraj\nimport pdb\nimport bilayer_analysis_functions \nfrom collections import OrderedDict\nimport numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_pdf import PdfPages\n\nparser = OptionParser()\nparser.add_option('-f', action=\"store\", type=\"string\", default = 'nopbc.xtc', dest = 'trajfile')\nparser.add_option('-c', action=\"store\", type=\"string\", default = 'Stage5_ZCon0.gro', dest = 'grofile')\nparser.add_option('-p', action=\"store\", type=\"string\", default = 'Stage5_ZCon0.gro', dest = 'pdbfile')\nparser.add_option('-o', action='store', type='string', default = 'BilayerAnalysis', dest = 'outfilename')\nparser.add_option('-b', action='store_true', default = False, dest = 'blocked')\n\n(options, args) = parser.parse_args()\ntrajfile = options.trajfile\ngrofile = options.grofile\npdbfile = options.pdbfile\noutfilename = options.outfilename\n\nprint('Loading trajectory <{}>...'.format(trajfile))\nprint('Loading topology <{}>...'.format(grofile))\ntraj = mdtraj.load(trajfile, top=grofile)\nprint('Loading topology <{}>...'.format(pdbfile))\ntraj_pdb = mdtraj.load(trajfile, top=pdbfile)\ntopol = traj.topology\n\n# Compute system information\nprint('Gathering system information <{}>...'.format(grofile))\nlipid_dict, headgroup_dict = bilayer_analysis_functions.get_lipids(topol)\nlipid_tails,lipid_heads = bilayer_analysis_functions.get_lipid_tails(topol, lipid_dict)\n\nn_lipid = len(lipid_dict.keys())\nn_lipid_tails = len(lipid_tails.keys())\nn_tails_per_lipid = n_lipid_tails/n_lipid\n\n\n\n# Vectorized Calculations start here\nprint('Calculating area per lipid...')\napl_avg, apl_std, apl_list = bilayer_analysis_functions.calc_APL(traj,n_lipid, blocked=options.blocked)\nprint('Calculating tilt angles...')\nangle_avg, angle_std, angle_list = bilayer_analysis_functions.calc_tilt_angle(traj, topol, lipid_tails, blocked=options.blocked)\nprint('Calculating area per tail...')\napt_avg, apt_std, apt_list = bilayer_analysis_functions.calc_APT(traj, apl_list, angle_list, n_tails_per_lipid, \n blocked=options.blocked)\nprint('Calculating nematic order...')\ns2_ave, s2_std, s2_list = bilayer_analysis_functions.calc_nematic_order(traj, lipid_dict, blocked=options.blocked)\nprint('Calculating headgroup distances...')\nheadgroup_distance_dict = bilayer_analysis_functions.compute_headgroup_distances(traj, topol, headgroup_dict, blocked=options.blocked)\nprint('Calculating bilayer height...')\nHpp_ave, Hpp_std, Hpp_list = bilayer_analysis_functions.calc_bilayer_height(traj, headgroup_distance_dict, blocked=options.blocked)\nprint('Calculating component offsets...')\noffset_dict = bilayer_analysis_functions.calc_offsets(traj, headgroup_distance_dict, blocked=options.blocked)\nprint('Calculating density profile...')\ndensity_profile, density_profile_avg, density_profile_top, density_profile_bot, bins = \\\n bilayer_analysis_functions.calc_density_profile(traj, topol, lipid_dict)\nprint('Calculating interdigitation...')\ninterdig_avg, interdig_std, interdig_list = bilayer_analysis_functions.calc_interdigitation(traj, density_profile_top, density_profile_bot, bins, blocked=options.blocked)\nprint('Calculating hydrogen bonds...')\nhbond_matrix_avg, hbond_matrix_std, hbond_matrix_list, labelmap = bilayer_analysis_functions.calc_hbonds(traj, traj_pdb, topol, lipid_dict, headgroup_dict)\n\n# Printing properties\nprint('Outputting to <{}>...'.format(outfilename))\noutfile = open((outfilename + '.txt'),'w')\noutpdf = PdfPages((outfilename+'.pdf'))\noutfile.write('{:<20s}: {}\\n'.format('Trajectory',trajfile))\noutfile.write('{:<20s}: {}\\n'.format('Structure',grofile))\noutfile.write('{:<20s}: {}\\n'.format('# Frames',traj.n_frames))\noutfile.write('{:<20s}: {}\\n'.format('Lipids',n_lipid))\noutfile.write('{:<20s}: {}\\n'.format('Tails',n_lipid_tails))\noutfile.write('{:<20s}: {} ({})\\n'.format('APL (A^2)',apl_avg, apl_std))\noutfile.write('{:<20s}: {} ({})\\n'.format('APT (A^2)',apt_avg, apt_std))\noutfile.write('{:<20s}: {} ({})\\n'.format('Bilayer Height (A)',Hpp_ave, Hpp_std))\noutfile.write('{:<20s}: {} ({})\\n'.format('Tilt Angle', angle_avg, angle_std))\noutfile.write('{:<20s}: {} ({})\\n'.format('S2', s2_ave, s2_std))\noutfile.write('{:<20s}: {} ({})\\n'.format('Interdigitation (A)', interdig_avg, interdig_std))\nfor key in offset_dict.keys():\n outfile.write('{:<20s}: {} ({})\\n'.format\n ((key + ' offset (A)'), offset_dict[key][0], offset_dict[key][1]))\noutfile.write('{:<20s}: {} ({})\\n'.format(\n 'Leaflet 1 Tilt Angle', np.mean(angle_list[:, 0 :int(np.floor(n_lipid_tails/2))]),\n np.std(angle_list[:, 0 :int(np.floor(n_lipid_tails/2))])))\noutfile.write('{:<20s}: {} ({})\\n'.format(\n 'Leaflet 2 Tilt Angle', np.mean(angle_list[:, int(np.floor(n_lipid_tails/2)):len(angle_list[0])]), \n np.std(angle_list[:, int(np.floor(n_lipid_tails/2)):len(angle_list[0])])))\noutfile.write('{:<20s}:\\n'.format(\"Hbonding (D-A)\"))\nfor row_label in labelmap.keys():\n for col_label in labelmap.keys():\n row_index = labelmap[row_label]\n col_index = labelmap[col_label]\n hbond_avg = hbond_matrix_avg[row_index, col_index]\n hbond_std = hbond_matrix_std[row_index, col_index]\n outfile.write('{:<20s}: {} ({})\\n'.format(str(row_label+\"-\"+ col_label), hbond_avg, hbond_std))\n\n\n# Plotting\n\nfig1 = plt.figure(1)\nplt.subplot(3,2,1)\nplt.plot(apl_list)\nplt.title('APL')\n\nplt.subplot(3,2,2)\nplt.plot(np.mean(angle_list, axis=1))\nplt.title('Tilt Angle ($^o$)')\n\nplt.subplot(3,2,3)\nplt.plot(np.mean(apt_list,axis=1))\nplt.title('APT')\n\nplt.subplot(3,2,4)\nplt.plot(Hpp_list)\nplt.title('H$_{PP}$')\n\nplt.subplot(3,2,5)\nplt.plot(s2_list)\nplt.title('S2')\n\nplt.subplot(3,2,6)\nplt.plot(interdig_list)\nplt.title('Interdigitation (A)')\n\nplt.tight_layout()\noutpdf.savefig(fig1)\nplt.close()\n\ndensity_profile_top_avg = np.mean(density_profile_top, axis = 0)\ndensity_profile_bot_avg = np.mean(density_profile_bot, axis = 0)\n\n\nfig2 = plt.figure(2)\nplt.subplot(2,1,1)\nplt.plot(bins,density_profile_avg)\nplt.xlabel('Depth (nm)')\nplt.title('Density Profile (kg m$^{-3}$)')\n\n\nplt.subplot(2,1,2)\n\n#plt.plot(bins,density_profile_bot_avg)\n#plt.plot(bins,density_profile_top_avg)\n\nplt.hist(np.mean(angle_list[:, 0 : int(np.floor(n_lipid_tails/2))], axis = 0), bins = 50, \n alpha = 0.5, facecolor = 'blue', normed = True)\nplt.hist(np.mean(angle_list[:, int(np.floor(n_lipid_tails/2)) : len(angle_list[0])], axis = 0), bins = 50, \n alpha = 0.5, facecolor = 'red', normed = True)\nplt.title('Angle Distribution by Leaflet')\nplt.xlabel('Angle ($^o$)')\n\nplt.tight_layout()\noutpdf.savefig(fig2)\nplt.close()\noutpdf.close()\n\nprint('**********')\nprint('{:^10s}'.format('Done'))\nprint('**********')\n\n\n\n\n","sub_path":"Bilayers/AnalyzeBilayer.py","file_name":"AnalyzeBilayer.py","file_ext":"py","file_size_in_byte":6751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"583596559","text":"from PIL import Image\r\nfrom huffman import count_symbols,build_tree,trim_tree,assign_codes_impl,assign_codes,to_binary_list,from_binary_list,pad_bits\r\nfrom inputBitStream import InputBitStream\r\n\r\n\r\n\r\ndef decode_header(bitstream):\r\n height = from_binary_list(bitstream.read_bits(16))\r\n width = from_binary_list(bitstream.read_bits(16))\r\n return (height, width)\r\n\r\ndef decode_tree(bitstream):\r\n flag = bitstream.read_bits(1)[0]\r\n if flag == 1: # Leaf, read and return symbol\r\n return from_binary_list(bitstream.read_bits(8))\r\n left = decode_tree(bitstream)\r\n right = decode_tree(bitstream)\r\n return (left, right)\r\n\r\ndef decode_value(tree, bitstream):\r\n bit = bitstream.read_bits(1)[0]\r\n node = tree[bit]\r\n if type(node) == tuple:\r\n return decode_value(node, bitstream)\r\n return node\r\n\r\ndef decode_pixels(height, width, tree, bitstream):\r\n pixels = bytearray()\r\n for i in range(height * width * 3):\r\n pixels.append(decode_value(tree, bitstream))\r\n return Image.frombytes('RGB', (width, height), bytes(pixels))\r\n\r\ndef decompress_image(in_file_name, out_file_name):\r\n print('Decompressing \"%s\" -> \"%s\"' % (in_file_name, out_file_name))\r\n\r\n print('Reading...')\r\n stream = InputBitStream(in_file_name)\r\n print('* Header offset: %d' % stream.bytes_read)\r\n height, width = decode_header(stream)\r\n stream.flush() # Ensure next chunk is byte-aligned\r\n print('* Tree offset: %d' % stream.bytes_read) \r\n trimmed_tree = decode_tree(stream)\r\n stream.flush() # Ensure next chunk is byte-aligned\r\n print('* Pixel offset: %d' % stream.bytes_read)\r\n image = decode_pixels(height, width, trimmed_tree, stream)\r\n stream.close()\r\n print('Read %d bytes.' % stream.bytes_read)\r\n\r\n print('Image size: (height=%d, width=%d)' % (height, width))\r\n print('Trimmed tree: %s' % str(trimmed_tree))\r\n image.save(out_file_name)\r\n","sub_path":"Sem 6/Image Processing/Practical 7 Huffman/MyHuffman/decompression.py","file_name":"decompression.py","file_ext":"py","file_size_in_byte":1912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"565870375","text":"# coding=utf8\nimport requests\n\nfrom toolcommon.logger import g_logger\n\nCHECK_URL = 'http://{0}/version/'\n\n\ndef check(host):\n if not host:\n g_logger.error('Host is empty')\n return\n\n url = CHECK_URL.format(host)\n g_logger.info(url)\n try:\n response = requests.get(url, timeout=3)\n g_logger.info(response.content)\n return response.ok\n except:\n g_logger.exception(url)\n return False\n\n\ndef get_admin_url(host):\n return None\n\n","sub_path":"blitz/exp_env.py","file_name":"exp_env.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"255112894","text":"from room import Room\nfrom player import Player\nfrom world import World\n\nimport random\nfrom random import choice\nfrom ast import literal_eval\n# from graph import Graph\nfrom util import Queue, Stack, Graph\n# Load world\nworld = World()\n\n\n# You may uncomment the smaller graphs for development and testing purposes.\n# map_file = \"maps/test_line.txt\" # this one passes currently\n# map_file = \"maps/test_cross.txt\"\n# map_file = \"maps/test_loop.txt\"\n# map_file = \"maps/test_loop_fork.txt\"\nmap_file = \"maps/main_maze.txt\"\n#\n# Loads the map into a dictionary\nroom_graph = literal_eval(open(map_file, \"r\").read())\nworld.load_graph(room_graph)\n\n# Print an ASCII map\nworld.print_rooms()\n\nplayer = Player(world.starting_room)\n\n# Fill this out with directions to walk\n# traversal_path = ['n', 'n']\ntraversal_path = []\n\n\n# THIS IS MY CODE BELOW.... WATCH IT DO WHAT IT DO BABY!\n# start by adding the vertexes from the list of rooms in world.rooms.value\n# that way all ther vertices are already existing and we don't have to worry about making that as we go\n# make dft that creates the edges as it goes.. dont use a visited in the main dft....\n# when looking for what room to do next at a junction point look for what has a value == '?'\n# start out with grabing the starting location\ng = Graph()\ncurrent_R = player.current_room\nprint(f\"starting_room: {current_R.id}\")\nstarting_room = current_R.id\n# this creates the graph with neighbors (key being direction) with the neighboring rooms (as the value )\nfor each in world.rooms:\n g.add_vertex(each)\n for exit in world.rooms[each].get_exits():\n g.vertices[each][exit] = '?'\n# print(g.vertices)\n# this looks at entire graph and looks for values with \"?\"\n\n\ndef get_opposite(dir):\n direction = {'n': 's', 's': 'n', 'w': 'e', 'e': 'w'}\n return direction[dir]\n\n\ndef pick_room(list):\n if len(list) >= 1:\n dir = random.choice(list)\n return dir\n else:\n return None\n\n\ndef find_all_Qs():\n qs = {}\n for each in g.vertices:\n # print(f\"room: \", each)\n if g.get_room_Q(each) != None:\n qs[each] = g.get_room_Q(each)\n if len(qs) == 0:\n return None\n return qs\n\n\nthe_q_list = find_all_Qs()\n# print(f\"the_q_list: {the_q_list}, len(the_q_list): {len(the_q_list)}\")\n# g.get_room_Q(g.vertices[player.current_room.id])\n# will get you either None if empty or a list of\n# the directions for that room with value == '?'\n\n\ndef bfs_to_another_hallway(visited, currentV, Qpath, old_path, plan_to_visit, pathDirs):\n q = Queue()\n q.enqueue([currentV])\n # visited is being used from\n # adding to path needs to be the 'next_room'\n # adding current on the way back adds\n # the ending of the hallway 2x's\n q_list = find_all_Qs()\n\n together_now = ''\n while q.size() > 0 and len(q_list) > 0:\n q_list = find_all_Qs()\n # print(f\"list of the dirs with '?' {q_list}\")\n current_path = q.dequeue()\n current_room = current_path[-1]\n any = g.get_room_Q(current_room)\n # print(\n # f\"any from bfs at current_room: {current_room}, exits: {any}\")\n if any == None:\n # print('means no room is unused at this loc')\n # add directions and save a path then return it\n visited.add(current_room)\n next_rooms = g.get_neighbors(current_room)\n for dir in next_rooms:\n # print(f\"direction avail: {dir}\")\n # print(\n # f\"next room would be: {g.vertices[current_room][dir]}\")\n if g.vertices[current_room][dir] != '?' and g.vertices[current_room][dir] not in visited:\n Qpath.append(g.vertices[current_room][dir])\n new_path = Qpath.copy()\n pathDirs.append(dir)\n # hopefully this puts the dir in the right spot... we'll see\n traversal_path.append(dir)\n q.enqueue(new_path)\n player.travel(dir)\n else:\n # print(f\"found a room {current_room} with exits{any}\")\n # print(f\"current_path outside bfs: {old_path}\")\n # print(f\"path: {Qpath}\")\n # print(f\"player current location: {player.current_room.id}\")\n # now need to push current room to stack to continue the\n together_now = old_path + Qpath\n # print(\n # f\"together_now-putting traversal path together: {together_now}\")\n return together_now\n\n\ndef get_to_all_room():\n starting_room = current_R.id\n plan_to_visit = Stack()\n plan_to_visit.push([starting_room])\n # mainly for the breadth first back to find next crosspoint with unused exits\n Qpath = []\n # try to use this to collect the dirs along the way of traversal, at the point of traversal\n pathDirs = []\n visited = set()\n # now start the loop for the dft whileloop\n\n been_to = False\n # maybe need to add condition to look for all room to have no '?'\n while plan_to_visit.size() > 0 and been_to == False:\n current_path = plan_to_visit.pop()\n current = current_path[-1]\n # print(\n # f\"at top of while loop, current room: {current}, current path: {current_path}\")\n # current_dir_list = g.get_room_Q(current)\n current_dir_list = g.get_neighbors(current)\n if current_dir_list == None:\n # this means that the current room has no other directions to choose from not already seen\n # print(\n # f\"current room has no unused directions\\nAlso may be the end of a hallway\\n current room: {current}\")\n # print(f\"{current_path}\")\n\n path = bfs_to_another_hallway(\n visited, current, Qpath, current_path, plan_to_visit, pathDirs)\n plan_to_visit.push(path)\n # at end of hallway this is where we bfs back to\n\n # head back to find room with unexplored exits\n # current_N = g.get_neighbors(current)\n # print(f\"current_dir_list: {current_dir_list}\\ncurrent_N: {current_N}\")\n next_dir = pick_room(current_dir_list)\n if next_dir == None:\n return f\"its at the end of this line {current_path}, current loc: {current}\"\n # print(f\"next_dir: \", next_dir)\n next_room = player.current_room.get_room_in_direction(next_dir)\n next_room = next_room.id\n # print(f\"next_room: {next_room}\")\n g.add_edge(current, next_dir, next_room)\n g.add_edge(next_room, get_opposite(next_dir), current)\n # print(g.vertices)\n # now we travel down the hallway\n current_path.append(next_room)\n copy_path = current_path.copy()\n pathDirs.append(next_dir)\n # adding to the traversal_path\n traversal_path.append(next_dir)\n # print(f\"copy_path: {copy_path}, pathDirs: {pathDirs}\")\n the_q_list = find_all_Qs()\n if the_q_list == None:\n been_to = True\n # print(f\"the_q_list: {the_q_list}\")\n player.travel(next_dir)\n plan_to_visit.push(copy_path)\n\n\nget_to_all_room()\n####\n# NOW BACK TO PREVIOUSLY WRITTEN CODE (NOT MINE BELOW)... ITS THE TEST TRAVERSAL\n# TRAVERSAL TEST - DO NOT MODIFY\nvisited_rooms = set()\nplayer.current_room = world.starting_room\nvisited_rooms.add(player.current_room)\n\nfor move in traversal_path:\n player.travel(move)\n visited_rooms.add(player.current_room)\n\nif len(visited_rooms) == len(room_graph):\n print(\n f\"TESTS PASSED: {len(traversal_path)} moves, {len(visited_rooms)} rooms visited\")\nelse:\n print(\"TESTS FAILED: INCOMPLETE TRAVERSAL\")\n print(f\"{len(room_graph) - len(visited_rooms)} unvisited rooms\")\n\n\n#######\n# UNCOMMENT TO WALK AROUND\n#######\n# player.current_room.print_room_description(player)\n\n# while True:\n# cmds = input(\"-> \").lower().split(\" \")\n# if cmds[0] in [\"n\", \"s\", \"e\", \"w\"]:\n# player.travel(cmds[0], True)\n# elif cmds[0] == \"q\":\n# break\n# else:\n# print(\"I did not understand that command.\")\n","sub_path":"adv.py","file_name":"adv.py","file_ext":"py","file_size_in_byte":8012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"522320394","text":"from configparser import ConfigParser, MissingSectionHeaderError\nfrom json import dump, loads, load\nfrom os import chdir, path, remove\nfrom platform import system\nfrom shutil import copyfile, move\nfrom subprocess import PIPE, STDOUT, Popen\nfrom sys import exit\nfrom tkinter import Button, Tk, Entry, messagebox, TOP, YES, BOTH, W\nfrom urllib.request import urlopen, URLError\n\n\nclass button:\n def __init__(self, cust, master):\n self.cust = cust\n self.startButton = Button(master, text=self.cust, command=lambda: startCust(cust), padx=8, width=15, bg=\"#FFF\",\n activeforeground=\"#006935\", anchor=W, background=\"#ededed\",\n activebackground=\"#c4c4c4\")\n self.visible = True\n self.startButton.pack(side=TOP, expand=YES, fill=BOTH)\n\n def hide(self):\n self.visible = False\n self.startButton.pack_forget()\n\n def show(self):\n self.visible = True\n self.startButton.pack(side=TOP, expand=YES, fill=BOTH)\n\n def select(self):\n self.startButton.config(state=\"active\")\n\n def deselect(self):\n self.startButton.config(state=\"normal\")\n\n def getState(self):\n return self.startButton.config()[\"state\"][4]\n\n\ndef sortCustList(_list):\n for sortedCust in sorted(_list):\n _list[sortedCust].hide()\n _list[sortedCust].show()\n\n\ndef getVisibleCustList():\n visibleCustList = {}\n for cust in custList:\n if custList[cust].visible:\n visibleCustList[cust] = custList[cust]\n return visibleCustList\n\n\ndef writeTempConfig(cust):\n def writeTemplate(_dict, cust, text):\n if type(_dict) is dict and type(_dict.get(cust, None)) is dict:\n with open(tempConfig, 'a') as file:\n file.write('\\n;' + text + cust + '\\n')\n for param, value in _dict[cust].items():\n with open(tempConfig, 'a') as file:\n file.write(param + \"=\" + value + \"\\n\")\n\n # Create a empty temp file\n open(tempConfig, 'w').close()\n # Local default config prio 3\n writeTemplate(custDict, \"default\", \"LOCAL \")\n # Remote customer config prio 2\n writeTemplate(remoteCustDict, cust, \"REMOTE \")\n # Local customer config prio 1\n writeTemplate(custDict, cust, \"LOCAL \")\n\n\ndef coachAutoUpdate(cust):\n if path.isfile(\"ClearCoach.jar_dl\"):\n remove(\"ClearCoach.jar_dl\")\n if path.isfile(\"ClearCoach.jar_new\"):\n src = \"ClearCoach\" + cust.title() + \".jar\"\n dst = \"ClearCoach\" + cust.title() + \".jar_bak\"\n move(src, dst)\n src = \"ClearCoach\" + cust.title() + \".jar_new\"\n dst = \"ClearCoach\" + cust.title() + \".jar\"\n move(src, dst)\n\n\ndef initJavaParameters(jarFile):\n if clientType == \"agent\":\n return [javaBin, '-client', '-jar', jarFile, '-ini_file_name', tempConfig]\n elif clientType == \"coach\":\n return [javaBin, '-client', '-jar', jarFile, tempConfig, 'log4j.xml\"']\n\n\ndef initJarFile(cust):\n jarFile = \"Clear\" + clientType.title() + cust.title() + \".jar\"\n if not path.isfile(jarFile):\n try:\n copyfile(defaultJar, jarFile)\n messagebox.showinfo(\"Notice\", \"Couldn't find \" + jarFile + \", copying \" + defaultJar + \"...\")\n except FileNotFoundError:\n messagebox.showinfo(\"Notice\", \"Could not find defaultJar specified in config, \"\n \"copying Clear\" + clientType.title() + \".jar...\")\n try:\n if clientType == \"agent\":\n copyfile(\"ClearAgent.jar\", jarFile)\n elif clientType == \"coach\":\n copyfile(\"ClearCoach.jar\", jarFile)\n except FileNotFoundError:\n messagebox.showerror(\"Error\", \"Can't find Clear\" + clientType.title() + \".jar, check your installation\")\n exit()\n return jarFile\n\n\ndef startCust(cust):\n chdir(clearDir)\n coachAutoUpdate(cust)\n writeTempConfig(cust)\n jarFile = initJarFile(cust)\n Popen(initJavaParameters(jarFile), stdout=PIPE, stderr=STDOUT, stdin=PIPE)\n exit()\n\n\ndef filterKey(event):\n sortCustList(custList)\n for cust in custList:\n if cust.lower().startswith(filterBox.get().lower()) or len(filterBox.get()) == 0:\n custList[cust].show()\n else:\n custList[cust].hide()\n if getVisibleCustList() and event.keysym not in [\"Up\", \"Down\"]:\n for cust in custList:\n custList[cust].deselect()\n custList[sorted(getVisibleCustList())[0]].select()\n\n\ndef enterKey(event):\n for cust in getVisibleCustList():\n if custList[cust].getState() == \"active\":\n startCust(cust)\n\n\ndef upKey(event):\n sortedVis = sorted(getVisibleCustList(), reverse=True)\n keyStep(sortedVis)\n\n\ndef downKey(event):\n sortedVis = sorted(getVisibleCustList())\n keyStep(sortedVis)\n\n\ndef escapeKey(event):\n exit()\n\n\ndef keyStep(sortedVis):\n anyKeySelected = False\n for cust in sortedVis:\n if custList[cust].getState() == \"active\":\n anyKeySelected = True\n custList[cust].deselect()\n if cust == sortedVis[-1]:\n custList[sortedVis[0]].select()\n else:\n custList[sortedVis[sortedVis.index(cust) + 1]].select()\n break\n if not anyKeySelected:\n custList[sortedVis[0]].select()\n\n\ndef initJson(config):\n try:\n with open(config) as file:\n return load(file)\n except FileNotFoundError:\n if type(remoteCustDict) is dict:\n with open(config, 'w') as file:\n dump(remoteCustDict, file, indent=4, sort_keys=True)\n return remoteCustDict\n else:\n messagebox.showerror(\"Error\", \"No customers in local list and remote inaccessible\\n\" + remoteCustServer)\n exit()\n except ValueError:\n messagebox.showerror(\"ValueError\", ValueError)\n\n\ndef initRemoteCustDict():\n url = remoteCustServer + \"?action=get&type=\" + clientType\n try:\n httpResponse = urlopen(url, timeout=0.1)\n except URLError:\n return \"URLError\"\n jsonObject = httpResponse.read().decode('utf8')\n response = loads(jsonObject)\n if response[\"status\"] == \"success\":\n return response[\"data\"]\n\n\ndef initRootWin():\n rootWin.bind(\"\", escapeKey)\n rootWin.resizable(0, 0)\n # TODO does this work on other platforms than windows?\n # rootWin.attributes(\"-toolwindow\", 1)\n # TODO This was neat, is this something we should use? Does it work on os x/linux?\n rootWin.overrideredirect(1)\n rootWin.call('wm', 'attributes', '.', '-topmost', True)\n # rootWin.after_idle(rootWin.call, 'wm', 'attributes', '.', '-topmost', False)\n\n if clientType == \"agent\":\n rootWin.iconbitmap(clearDir + '\\ClearAgent32.ico')\n rootWin.wm_title('StartClearAgent')\n elif clientType == \"coach\":\n rootWin.iconbitmap(clearDir + '\\clearcoach.ico')\n rootWin.wm_title('StartClearCoach')\n rootWin.lift()\n return rootWin\n\n\ndef initFilterBox():\n filterBox = Entry(rootWin)\n filterBox.pack(side=TOP, fill=BOTH)\n filterBox.bind(\"\", enterKey)\n filterBox.bind(\"\", upKey)\n filterBox.bind(\"\", downKey)\n filterBox.bind(\"\", filterKey)\n filterBox.focus_force()\n return filterBox\n\n\ndef initCustList():\n custList = {}\n for cust in custDict:\n if cust != 'default':\n custList[cust] = button(cust, rootWin)\n sortCustList(custList)\n return custList\n\n\ndef initJavaBin():\n if system() == \"Windows\":\n return \"jre\\\\bin\\\\javaw.exe\"\n else:\n return \"java\"\n\n\ndef initConfig(param, defaultValue=\"\"):\n config = ConfigParser()\n config.optionxform = str\n try:\n config.read(clearStartConfig)\n if not path.isfile(clearStartConfig):\n open(clearStartConfig, 'w')\n if not config.sections():\n raise MissingSectionHeaderError(\"\", 0, 0)\n if config['CONFIG'][param] == \"\":\n raise KeyError\n else:\n if param == \"clientType\" and config['CONFIG'][param] not in [\"agent\", \"coach\"]:\n messagebox.showerror(\"Error\", \"Invalid clientType in config: \" + clientType)\n exit()\n return config['CONFIG'][param]\n except MissingSectionHeaderError:\n with open(clearStartConfig, 'r+') as file:\n content = file.read()\n file.seek(0, 0)\n file.write(\"[CONFIG]\".strip('\\r\\n') + '\\n' + content)\n return initConfig(param)\n except KeyError:\n config['CONFIG'][param] = defaultValue\n with open(clearStartConfig, 'w') as file:\n config.write(file)\n if defaultValue == \"\":\n messagebox.showerror(\"Error\", \"Missing parameter in config file: \" + param)\n exit()\n else:\n messagebox.showerror(\"Error\", \"Missing parameter in config file: \" +\n param + \", setting default value...\")\n return initConfig(param)\n\n\nif __name__ == '__main__':\n # Instancing rootWin\n rootWin = Tk()\n\n # Hard coded config\n tempConfig = \"clearStart.temp\"\n clearStartConfig = \"clearStartConfig.ini\"\n customerConfig = \"clearStartCustomers.json\"\n\n # Config from configuration file\n clearDir = initConfig(\"clearDir\")\n remoteCustServer = initConfig(\"remoteCustServer\", \"http://192.168.20.4:5001\")\n clientType = initConfig(\"clientType\")\n defaultJar = initConfig(\"defaultJar\", \"Clear\" + clientType.title() + \".jar\")\n\n # Initializing data\n remoteCustDict = initRemoteCustDict()\n custDict = initJson(customerConfig)\n javaBin = initJavaBin()\n\n # Elements\n rootWin = initRootWin()\n filterBox = initFilterBox()\n custList = initCustList()\n rootWin.mainloop()\n","sub_path":"Config/clearStart.py","file_name":"clearStart.py","file_ext":"py","file_size_in_byte":9806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"619114499","text":"#这一题不会解。。。在discuss看到下面这种解法\nclass Solution(object):\n\tdef islandPerimeter(self, grid):\n\t\t\"\"\"\n\t\t:type grid: List[List[int]]\n\t\t:rtype: int\n\t\t\"\"\"\n\t\tm, n = len(grid), len(grid[0])\n\t\tdef peri(i, j):\n\t\t\tp = 0\n\t\t\tif i > 0:\n\t\t\t\tp += grid[i][j] != grid[i-1][j]\n\t\t\tif j > 0:\n\t\t\t\tp += grid[i][j] != grid[i][j-1]\n\t\t\tif grid[i][j] == 1:\n\t\t\t\tp += (i==0) + (i==m-1) + (j==0) + (j==n-1)\n\t\t\treturn p\n\t\treturn sum(peri(i, j) for i in xrange(m) for j in xrange(n))\n\n#还得加把劲努力啊\n","sub_path":"463.IslandPerimeter.py","file_name":"463.IslandPerimeter.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"312415913","text":"from modules.ftools import *\n\nmodule = {\n 'name': 'Formatting Table',\n 'version': '0.1',\n 'author': 'Filip9696',\n 'description': 'Shows all formatting on load',\n 'functions': {}\n}\n\ndef print_format_table():\n \"\"\"\n prints table of formatted text format options\n \"\"\"\n for style in range(8):\n for fg in range(30,38):\n s1 = ''\n for bg in range(40,48):\n format = ';'.join([str(style), str(fg), str(bg)])\n s1 += '\\x1b[%sm %s \\x1b[0m' % (format, format)\n print(s1)\n print('\\n')\n\n\ndef onLoad():\n print_format_table()","sub_path":"modules/formattable.py","file_name":"formattable.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"380683105","text":"#\nimport json\nimport logging\nfrom urlparse import urljoin\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.Five.browser import BrowserView\nfrom plone import api\nfrom plone.registry.interfaces import IRegistry\nfrom zope.component import getUtility\nfrom zope.interface import implementer\nfrom zope.publisher.interfaces import NotFound, BadRequest, Unauthorized\nfrom zope.publisher.interfaces import IPublishTraverse\nfrom zope.security import checkPermission\nfrom .interfaces import IOAuth1Settings, IOAuth2Settings\n\n\nLOG = logging.getLogger(__name__)\n\ndef check_authenticated(func):\n def check(*args, **kw):\n if api.user.is_anonymous():\n raise Unauthorized()\n return func(*args, **kw)\n return check\n\nclass OAuthBaseView(BrowserView):\n\n _skey = \"{0}_oauth_token\"\n _session = None\n _property = \"{0}_oauth_token\"\n config = None\n\n def __init__(self, context, request, config):\n super(OAuthBaseView, self).__init__(context, request)\n self._property = self._property.format(config.id)\n self._skey = self._skey.format(config.id)\n self.config = config\n\n @property\n def session(self):\n # get current session\n if not self._session:\n sdm = getToolByName(self.context, 'session_data_manager')\n self._session = sdm.getSessionData(create=True)\n return self._session\n\n def getToken(self):\n # get token for current user\n member = api.user.get_current()\n token = member.getProperty(self._property, \"\")\n #LOG.info('Found stored token: %s', token)\n if token:\n token = json.loads(token)\n return token\n\n def setToken(self, token):\n # permanently store token for user.\n # creates new memberdata property if necesarry\n member = api.user.get_current()\n # CMF WAY? ... prepare property sheet... need to do this only once?\n pmd = getToolByName(self.context, 'portal_memberdata')\n if not pmd.hasProperty(self._property):\n LOG.info('added new token property to member data tool')\n pmd.manage_addProperty(id=self._property, value=\"\", type=\"string\")\n\n # Would there be a PAS way as well?\n # acl_users = getToolByName(self.context, 'acl_users')\n # property_plugins = acl_users.plugins.listPlugins(IPropertiesPlugin)\n member.setProperties({self._property: json.dumps(token)})\n\n def hasToken(self):\n try:\n return bool(self.getToken())\n except Exception:\n return False\n\n def cleartoken(self):\n # get token for current user\n member = api.user.get_current()\n member.setProperties({self._property: \"\"})\n return_url = self.request.get('HTTP_REFERER')\n self.request.response.redirect(return_url)\n\n\nclass OAuth2View(OAuthBaseView):\n\n def oauth_session(self, token=None, state=None):\n from requests_oauthlib import OAuth2Session\n if not token:\n token = {}\n\n #scope = [\"profile\", \"email\"]\n scope = ['https://www.googleapis.com/auth/userinfo.email',\n 'https://www.googleapis.com/auth/userinfo.profile']\n\n redirect_url = self.config.redirect_url\n if not redirect_url:\n redirect_url = urljoin(self.request.getURL(), 'callback')\n\n oauth = OAuth2Session(self.config.client_id, state=state,\n redirect_uri=redirect_url, token=token,\n auto_refresh_kwargs={'client_id': self.config.client_id,\n 'client_secret': self.config.client_secret},\n auto_refresh_url=self.config.refresh_url,\n token_updater=self.setToken,\n scope=scope)\n return oauth\n\n @check_authenticated\n def authorize(self, access_type='offline', approval_prompt='force'):\n # redirect to external service authorisation page\n oauth = self.oauth_session()\n authorization_url, state = oauth.authorization_url(\n self.config.authorization_url,\n # access_type and approval_prompt are Google specific extra\n # parameters.\n access_type=access_type, approval_prompt=approval_prompt)\n # state ... roundtripped by oauth, can be used to verify response\n return_url = self.request.get('HTTP_REFERER')\n self.session[self._skey] = (state, return_url)\n # redirect to auth url?\n self.request.response.redirect(authorization_url)\n # TODO: what about failures here? return success/failure\n\n def is_callback(self):\n # check if request is a authorize \"callback\"\n return (self.config.authorization_url in self.request.get('HTTP_REFERER')\n and 'code' in self.request.form\n and 'state' in self.request.form)\n\n @check_authenticated\n def callback(self, state=None, return_url=None):\n if not self.is_callback():\n # TODO: maybe rais some other error here?\n raise NotFound(self.context, 'callback', self.request)\n # get current state to verify callback\n state, return_url = self.session.get(self._skey)\n # verify oauth callback\n oauth = self.oauth_session(state=state)\n # TODO: there should be a better way to get the full request url\n authorization_response = self.request.getURL() + '?' + self.request['QUERY_STRING']\n # the request must have some auth_response somewhere?\n # NOTE: since oauthlib 0.7.2 which correctly compares scope\n # we need export OAUTHLIB_RELAX_TOKEN_SCOPE=1 or catch the Warning\n # otherwise google login won't work\n # We no longer need 'state' after we have parsed the response url\n token = oauth.fetch_token(\n self.config.token_url,\n authorization_response=authorization_response,\n # Google specific extra parameter used for client\n # authentication\n client_secret=self.config.client_secret)\n # store token and clean up session\n if self.session.has_key(self._skey):\n del self.session[self._skey]\n self.setToken(token)\n # Do another redirect to clean up the url\n self.request.response.redirect(return_url or self.request.getURL())\n\n @check_authenticated\n def accesstoken(self):\n # FIXME: this is a quick workaround, user parameter should not be here\n if checkPermission('cmf.ManagePortal', self.context):\n # we are admin ... check if user is set\n username = self.request.form.get('user')\n member = api.user.get(username=username)\n access_token = member.getProperty(self._property, \"\")\n if access_token:\n access_token = json.loads(token)\n else:\n access_token = self.getToken()\n # return full access token for current user\n self.request.response['CONTENT-TYPE'] = 'application/json'\n return json.dumps(access_token)\n\n @check_authenticated\n def clienttoken(self):\n # only admin can fetch client token\n if not checkPermission('cmf.ManagePortal', self.context):\n raise Unauthorized()\n self.request.response['CONTENT-TYPE'] = 'application/json'\n return json.dumps({\n 'client_id': self.config.client_id,\n 'redirect_uri': self.config.redirect_url,\n 'auto_refresh_kwargs': {\n 'client_id': self.config.client_id,\n 'client_secret': self.config.client_secret,\n },\n 'auto_refresh_url': self.config.refresh_url\n })\n\n\n def validate(self):\n \"\"\"Validate a token with the OAuth provider Google.\n \"\"\"\n # TODO: OAuth2Session has attribute .authorized ... it only checks for presence of various tokens, but should be a good indicator of successfull authorisation\n token = self.getToken()\n try:\n # Defined at https://developers.google.com/accounts/docs/OAuth2LoginV1#validatingtoken\n validate_url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?'\n 'access_token=%s' % token['access_token'])\n # No OAuth2Session is needed, just a plain GET request\n import requests\n result = requests.get(validate_url)\n # TODO: return something more useful\n return True\n except Exception as e:\n LOG.info('OAuth validate failed: %s', e)\n return False\n\n def refresh(self):\n \"\"\"Refreshing an OAuth 2 token using a refresh token.\n \"\"\"\n token = self.getToken()\n extra = {\n 'client_id': self.config.client_id,\n 'client_secret': self.config.client_secret,\n }\n\n oauth = self.oath_session(token)\n new_token = oauth.refresh_token(self.config.refresh_url, **extra)\n return new_token\n\n # GOOGLE specific methods\n def userinfo(self):\n # fetch some info about our oauth connection and render them in template\n token = self.getToken()\n google = self.oauth_session(token=token)\n userinfo_url = 'https://www.googleapis.com/oauth2/v2/userinfo'\n # TODO: may throw requests ConnectionError in: requests.adapters:415\n # TODO: this returns the requests response object.. shall we retrun somethin else?\n result = google.get(userinfo_url)\n return result.text\n\n\nclass OAuth1View(OAuthBaseView):\n\n def oauth_session(self, token=None, state=None):\n from requests_oauthlib import OAuth1Session\n if not token:\n # token should contain access token if available\n token = {}\n\n redirect_url = self.config.redirect_url\n if not redirect_url:\n redirect_url = urljoin(self.request.getURL(), 'callback')\n\n # TODO: for ourselves we need to put static token key into resource_owner_xx\n oauth = OAuth1Session(client_key=self.config.client_key,\n client_secret=self.config.client_secret,\n resource_owner_key=token.get('oauth_token'),\n resource_owner_secret=token.get('oauth_token_secret'),\n verifier=token.get('oauth_verifier'),\n callback_uri=redirect_url,\n signature_type='auth_header')\n return oauth\n\n @check_authenticated\n def authorize(self):\n # redirect to external service authorisation page\n oauth = self.oauth_session()\n # get a request token for ourselves\n request_token = oauth.fetch_request_token(self.config.request_url)\n # get the authorization url and redirect user to it\n authorization_url = oauth.authorization_url(self.config.authorization_url)\n # state ... roundtripped by oauth, can be used to verify response\n return_url = self.request.get(\"HTTP_REFERER\")\n self.session[self._skey] = (request_token, return_url)\n # redirect to auth url?\n self.request.response.redirect(authorization_url)\n # TODO: return something about success / failure?\n\n def is_callback(self):\n return ('oauth_verifier' in self.request.form\n and 'oauth_token' in self.request.form\n and self.config.oauth_url in self.request.environ['HTTP_REFERER'])\n\n @check_authenticated\n def callback(self):\n if not self.is_callback():\n raise NotFound(self.context, 'callback', self.request)\n # get info from session and request\n token, return_url = self.session.get(self._skey)\n # get auth_token to fetch access_token\n # token should be the request token used to initiate the authorization\n # start an oauth session with all our old tokens from authorize\n oauth = self.oauth_session(token=token) # should return token\n # now we can update our session with the authorize response\n # TODO: there should be a better way to get the full request url\n authorization_response = self.request.getURL() + '?' + self.request['QUERY_STRING']\n # Parsing the url, updates the state of oauth session as well\n request_token = oauth.parse_authorization_response(authorization_response)\n # TODO: verify request_token somehow?\n # We have got a request token with verifier. (already set in oauth session)\n # Fetch the final access token\n access_token = oauth.fetch_access_token(self.config.access_url)\n # clean up session and store token for user\n if self.session.has_key(self._skey):\n del self.session[self._skey]\n self.setToken(access_token)\n # redirect to last known address?\n self.request.response.redirect(return_url or self.request.getURL())\n\n @check_authenticated\n def accesstoken(self):\n # FIXME: this is a quick workaround, user parameter should not be here\n # allow for user parameter in case we are admin\n if checkPermission('cmf.ManagePortal', self.context):\n # we are admin ... check if user is set\n username = self.request.form.get('user')\n member = api.user.get(username=username)\n access_token = member.getProperty(self._property, \"\")\n if access_token:\n access_token = json.loads(access_token)\n else:\n access_token = self.getToken()\n # return full access token for current user\n self.request.response['CONTENT-TYPE'] = 'application/json'\n return json.dumps({\n 'oauth_token': access_token['oauth_token'],\n 'oauth_token_secret': access_token['oauth_token_secret']\n })\n\n @check_authenticated\n def clienttoken(self):\n # only admin can fetch client token\n if not checkPermission('cmf.ManagePortal', self.context):\n raise Unauthorized()\n self.request.response['CONTENT-TYPE'] = 'application/json'\n return json.dumps({\n 'client_key': self.config.client_key,\n 'client_secret': self.config.client_secret,\n })\n\n # Figshare API\n def validate(self):\n # TODO: OAuth2Session has attribute .authorized ... it only checks for presence of various tokens, but should be a good indicator of successfull authorisation\n token = self.getToken()\n try:\n oauth = self.oauth_session(token=token)\n\n # params = {\n # 'page': 0,\n # 'status': 'drafts', # private, public\n # }\n params = None\n\n response = oauth.get('http://api.figshare.com/v1/my_data/articles', params=params)\n #data=json.dumps(body), headers=headers)\n #/articles\n return response.status_code == 200\n except Exception as e:\n LOG.info('OAuth validate failed: %s', e)\n return False\n\n\n# TODO: always the sam e.... IPublishTraverse or ITraverse?\n@implementer(IPublishTraverse)\nclass OAuthTraverser(BrowserView):\n # parse urls like oauth//\n\n _serviceid = None\n _view = None\n\n def publishTraverse(self, context, name):\n # no serviceid yet ? .... name should be it\n if not self._serviceid:\n registry = getUtility(IRegistry)\n coll = registry.collectionOfInterface(IOAuth1Settings)\n for cid, config in coll.items():\n if cid == name:\n self._serviceid = name\n self._view = OAuth1View(self.context, self.request, config)\n return self\n coll = registry.collectionOfInterface(IOAuth2Settings)\n for cid, config in coll.items():\n if cid == name:\n self._serviceid = name\n self._view = OAuth2View(self.context, self.request, config)\n return self\n # raise NotFound\n raise NotFound(self, name, self.request)\n else:\n # we have a serviceid ... name should now be a command\n if name in ('authorize', 'callback', 'accesstoken', 'clienttoken', 'cleartoken'):\n return getattr(self._view, name)\n raise NotFound(self, name, self.request)\n\n def __call__(self):\n raise BadRequest('Missing parameter')\n","sub_path":"src/org/bccvl/site/oauth/oauth.py","file_name":"oauth.py","file_ext":"py","file_size_in_byte":16440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"249538637","text":"\"\"\"\nPartially reconstructed charm from B,\nwith muon and slow pion(s) tags.\nIncludes\n- D0 -> K3pi with muon and D* tag\n- Lambda_c+ -> pKpi with muon and Lambda_c* tags\n\"\"\"\n__author__ = ['Mika Vesterinen']\n__date__ = '10/03/2013'\n__version__ = '$Revision: 0.0 $'\n\nfrom Gaudi.Configuration import *\n\nfrom Configurables import FilterDesktop, CombineParticles, OfflineVertexFitter\nfrom PhysSelPython.Wrappers import Selection\nfrom StrippingConf.StrippingLine import StrippingLine\nfrom StrippingUtils.Utils import LineBuilder\nfrom StandardParticles import StdAllLoosePions, StdLoosePions, StdLooseMuons, StdLooseKaons, StdLooseProtons\nfrom Configurables import TisTosParticleTagger\n\n__all__ = ('CharmFromBSemiForHadronAsyAllLinesConf',\n 'TOSFilter',\n 'confdict')\n\nconfdict = {\n \"prescale_LbRS\" : 1.0\n ,\"prescale_LbWS\" : 0.2 \n ,\"prescale_D0to3piRS\" : 1.0\n ,\"prescale_D0to3piWS\" : 0.2\n ,\"prescale_D0toK2piRS\" : 1.0\n ,\"prescale_D0toK2piWS\" : 0.2 \n #### common to all modes\n ,\"GEC_nLongTrk\" : 250. # adimensional\n ,\"GHOSTPROB_MAX\" : 0.35 #adimensional\n ,\"Mu_PT\" : 800. # MeV\n ,\"H_PT\" : 250. # MeV\n ,\"Pi_PIDKMax\" : 6. # adimensional\n ,\"K_PIDKMin\" : 6. # adimensional\n ,\"Slowpi_PIDKMax\" : 10. # adimensional\n ,\"Slowpi_PIDeMax\" : 99. # adimensional\n ,\"Slowpi_PTMin\" : 200. # MeV\n ##### specific to D0 modes\n ,\"MuPi_SUMPT_MIN\" : 1300. # MeV\n ,\"MuPi_DOCACHI2_MAX\" : 8.0 # adimensional\n ,\"MuPi_CHI2NDOF_MAX\" : 3.0 # adimensional\n ,\"MuPi_DIRA_MIN\" : -99. # adimensional\n ,\"MuPi_FDCHI2_MIN\" : 20. # adimensional\n ,\"D0to3H_REQUIRE_TOS\" : True # bool\n ,\"D0to3H_DOCACHI2_MAX\" : 10. # adimensional\n ,\"D0to3H_VCHI2NDF_MAX\" : 3.0 # adimensional\n ,\"D0to3H_SUMPT_MIN\" : 1800. # MeV\n ,\"D0to3H_DZ\" : 2.0 # mm\n ,\"D0to3H_3pi_DeltaMass_MAX\" : 350. # MeV\n ,\"D0to3H_K2pi_DeltaMass_MAX\" : 250. # MeV\n ,\"D0to3H_3pi_MASS_MIN\" : 900. # MeV\n ,\"D0to3H_3pi_MASS_MAX\" : 1400. # MeV\n ,\"D0to3H_K2pi_MASS_MIN\" : 1300. # MeV\n ,\"D0to3H_K2pi_MASS_MAX\" : 1800. # MeV\n ,\"D0to3H_B_MASS_MIN\" : 1800. # MeV\n ,\"D0to3H_B_MASS_MAX\" : 4900. # MeV\n ,\"D0to3H_B_DIRA_MIN\" : 0.99 # adimensional\n ,\"D0to3H_B_VCHI2NDF_MAX\" : 15. # adimensional\n ,\"D0to3H_B_DOCACHI2_MAX\" : 50. # adimensional\n #### specific to Lambda_c+ modes\n ,\"PiPi_MASS_MAX\" : 500. # MeV\n ,\"PiPi_DOCACHI2_MAX\" : 15. # adimensional\n ,\"PiPi_CHI2NDF\" : 3. # adimensional\n ,\"PiPi_SUMPT_MIN\" : 600. # MeV\n ,\"MuPiPi_DOCACHI2_MAX\" : 15. \n ,\"MuPiPi_CHI2NDF\": 3.\n ,\"MuPiPi_FDCHI2_MIN\" : 20. # adimensional\n ,\"Lc2Kpi_REQUIRE_TOS\" : True # bool\n ,\"Lc2Kpi_DOCACHI2_MAX\" : 10. # adimensional\n ,\"Lc2Kpi_VCHI2NDF_MAX\" : 3.0 # adimensional\n ,\"Lc2Kpi_SUMPT_MIN\" : 1500. # MeV\n ,\"Lc2Kpi_FDCHI2_MIN\" : 20. # adimensional\n ,\"Lc2Kpi_MASS_MIN\" : 800. # MeV\n ,\"Lc2Kpi_MASS_MAX\" : 1350. # MeV\n ,\"Lc2Kpi_DeltaMass_MAX\" : 700. # MeV\n ,\"Lc2Kpi_DZ\" : 1.0 # mm\n ,\"Lc2Kpi_B_MASS_MIN\" : 2200. # MeV\n ,\"Lc2Kpi_B_MASS_MAX\" : 4300. # MeV\n ,\"Lc2Kpi_B_FDCHI2_MIN\" : 20. # adimensional\n ,\"Lc2Kpi_B_DIRA_MIN\" : 0.99 # adimensional\n ,\"Lc2Kpi_B_DOCACHI2_MAX\" : 50. # adimensional\n ,\"Lc2Kpi_B_VCHI2NDF_MAX\" : 15. # adimensional\n } \n\nclass CharmFromBSemiForHadronAsyAllLinesConf(LineBuilder) :\n \"\"\"\n \"\"\"\n \n __configuration_keys__ = (\n \"prescale_LbRS\"\n ,\"prescale_LbWS\"\n ,\"prescale_D0to3piRS\" \n ,\"prescale_D0to3piWS\" \n ,\"prescale_D0toK2piRS\"\n ,\"prescale_D0toK2piWS\"\n #### common to all modes\n ,\"GEC_nLongTrk\" \n ,\"GHOSTPROB_MAX\"\n ,\"Mu_PT\"\n ,\"H_PT\" \n ,\"Pi_PIDKMax\"\n ,\"K_PIDKMin\"\n ,\"Slowpi_PIDKMax\"\n ,\"Slowpi_PIDeMax\"\n ,\"Slowpi_PTMin\" \n ##### specific to D0 modes\n ,\"MuPi_SUMPT_MIN\"\n ,\"MuPi_DOCACHI2_MAX\"\n ,\"MuPi_CHI2NDOF_MAX\"\n ,\"MuPi_DIRA_MIN\" \n ,\"MuPi_FDCHI2_MIN\"\n ,\"D0to3H_REQUIRE_TOS\"\n ,\"D0to3H_DOCACHI2_MAX\" \n ,\"D0to3H_VCHI2NDF_MAX\"\n ,\"D0to3H_SUMPT_MIN\"\n ,\"D0to3H_DZ\" \n ,\"D0to3H_3pi_DeltaMass_MAX\" \n ,\"D0to3H_K2pi_DeltaMass_MAX\" \n ,\"D0to3H_3pi_MASS_MIN\"\n ,\"D0to3H_3pi_MASS_MAX\" \n ,\"D0to3H_K2pi_MASS_MIN\"\n ,\"D0to3H_K2pi_MASS_MAX\"\n ,\"D0to3H_B_MASS_MIN\" \n ,\"D0to3H_B_MASS_MAX\" \n ,\"D0to3H_B_DIRA_MIN\" \n ,\"D0to3H_B_VCHI2NDF_MAX\" \n ,\"D0to3H_B_DOCACHI2_MAX\" \n #### specific to Lambda_c+ modes\n ,\"PiPi_MASS_MAX\" \n ,\"PiPi_DOCACHI2_MAX\" \n ,\"PiPi_CHI2NDF\" \n ,\"PiPi_SUMPT_MIN\" \n ,\"MuPiPi_DOCACHI2_MAX\"\n ,\"MuPiPi_CHI2NDF\"\n ,\"MuPiPi_FDCHI2_MIN\"\n ,\"Lc2Kpi_REQUIRE_TOS\"\n ,\"Lc2Kpi_DOCACHI2_MAX\" \n ,\"Lc2Kpi_VCHI2NDF_MAX\" \n ,\"Lc2Kpi_SUMPT_MIN\" \n ,\"Lc2Kpi_FDCHI2_MIN\"\n ,\"Lc2Kpi_MASS_MIN\" \n ,\"Lc2Kpi_MASS_MAX\"\n ,\"Lc2Kpi_DeltaMass_MAX\" \n ,\"Lc2Kpi_DZ\" \n ,\"Lc2Kpi_B_MASS_MIN\" \n ,\"Lc2Kpi_B_MASS_MAX\" \n ,\"Lc2Kpi_B_FDCHI2_MIN\" \n ,\"Lc2Kpi_B_DIRA_MIN\" \n ,\"Lc2Kpi_B_DOCACHI2_MAX\" \n ,\"Lc2Kpi_B_VCHI2NDF_MAX\" \n )\n __confdict__={}\n \n def __init__(self, _name, config) :\n\n LineBuilder.__init__(self, _name, config)\n self.__confdict__=config\n \n ### define the global event cuts\n ### max number of long tracks\n GECs = { \"Code\":\"( recSummaryTrack(LHCb.RecSummary.nLongTracks, TrLONG) < %(GEC_nLongTrk)s )\" %config ,\n \"Preambulo\": [\"from LoKiTracks.decorators import *\"]}\n \n ### define the muon cuts\n self.MuonCuts = \"(PT > %(Mu_PT)s *MeV) & (TRGHOSTPROB < %(GHOSTPROB_MAX)s) & (PIDmu > 0)\" %config\n self.PionCuts = \"(PT > %(H_PT)s *MeV) & (TRGHOSTPROB < %(GHOSTPROB_MAX)s) & (PIDK < %(Pi_PIDKMax)s)\" %config \n self.KaonCuts = \"(PT > %(H_PT)s *MeV) & (TRGHOSTPROB < %(GHOSTPROB_MAX)s) & (PIDK > %(K_PIDKMin)s)\" %config \n self.SlowpionCuts = \"(PT > %(Slowpi_PTMin)s*MeV) & (TRGHOSTPROB < %(GHOSTPROB_MAX)s) & (PIDe < %(Slowpi_PIDeMax)s)\"\\\n \"& (PIDK < %(Slowpi_PIDKMax)s)\" %config\n \n self.Muons = Selection( \"Mufor\" + _name,\n Algorithm = FilterDesktop(name = \"MuFilterFor\"+_name, Code = self.MuonCuts ),\n RequiredSelections = [StdLooseMuons])\n \n self.Pions = Selection( \"Pifor\" + _name,\n Algorithm = FilterDesktop(name = \"PiFilterFor\"+_name, Code = self.PionCuts),\n RequiredSelections = [StdLoosePions])\n\n self.Kaons = Selection( \"Kfor\" + _name,\n Algorithm = FilterDesktop(name = \"KFilterFor\"+_name, Code = self.KaonCuts),\n RequiredSelections = [StdLooseKaons])\n\n self.Slowpions = Selection( \"Slowpifor\" + _name,\n Algorithm = FilterDesktop(name = \"SlowpiFilterFor\"+_name, Code = self.SlowpionCuts),\n RequiredSelections = [StdLoosePions])\n #### D0 -> K3pi lines\n \n self.D0to3pi_RS = D0To3HMaker(\"D0To3piRSFor\"+_name\n ,config[\"D0to3H_3pi_MASS_MIN\"],config[\"D0to3H_3pi_MASS_MAX\"]\n ,config\n ,[self.Pions]\n ,['[D0 -> pi+ pi- pi+]cc'])\n\n self.D0to3pi_WS = D0To3HMaker(\"D0To3piWSFor\"+_name\n ,config[\"D0to3H_3pi_MASS_MIN\"],config[\"D0to3H_3pi_MASS_MAX\"]\n ,config\n ,[self.Pions]\n ,['[D0 -> pi+ pi- pi+]cc','[D0 -> pi+ pi+ pi+]cc'])\n \n self.D0toK2pi_RS = D0To3HMaker(\"D0ToK2piRSFor\"+_name\n ,config[\"D0to3H_K2pi_MASS_MIN\"],config[\"D0to3H_K2pi_MASS_MAX\"]\n ,config,[self.Kaons,self.Pions]\n ,['[D0 -> K- pi+ pi-]cc','[D0 -> K- pi+ pi+]cc'])\n\n self.D0toK2pi_WS = D0To3HMaker(\"D0ToK2piWSFor\"+_name\n ,config[\"D0to3H_K2pi_MASS_MIN\"],config[\"D0to3H_K2pi_MASS_MAX\"]\n ,config,[self.Kaons,self.Pions]\n ,['[D0 -> K- pi+ pi-]cc',\n '[D0 -> K- pi+ pi+]cc',\n '[D0 -> K- pi- pi-]cc',\n '[D0 -> K+ pi+ pi-]cc',\n '[D0 -> K+ pi+ pi+]cc',\n '[D0 -> K+ pi- pi-]cc'])\n\n self.MuPi_RS = MuPiMaker(\"MuPiRSFor\"+_name\n ,config\n ,[self.Muons,self.Slowpions]\n ,['[K*(892)+ -> mu+ pi-]cc'])\n\n self.MuPi_WS = MuPiMaker(\"MuPiWSFor\"+_name\n ,config\n ,[self.Muons,self.Slowpions]\n ,['[K*(892)+ -> mu+ pi-]cc',\n '[K*(892)+ -> mu+ pi+]cc',\n '[K*(892)+ -> mu- pi-]cc'])\n\n self.BtoDstarMuD0to3pi_RS = BtoDstarMuMaker(\"BtoDstarMuD0to3pi_RSFor\"+_name\n ,config[\"D0to3H_3pi_DeltaMass_MAX\"]\n ,config\n ,[self.MuPi_RS,self.D0to3pi_RS]\n ,['[B0 -> D~0 K*(892)+]cc'])\n\n self.BtoDstarMuD0to3pi_WS = BtoDstarMuMaker(\"BtoDstarMuD0to3pi_WSFor\"+_name\n ,config[\"D0to3H_3pi_DeltaMass_MAX\"]\n ,config\n ,[self.MuPi_WS,self.D0to3pi_WS]\n ,['[B0 -> D~0 K*(892)+]cc'])\n \n self.BtoDstarMuD0toK2pi_RS = BtoDstarMuMaker(\"BtoDstarMuD0toK2pi_RSFor\"+_name\n ,config[\"D0to3H_K2pi_DeltaMass_MAX\"]\n ,config\n ,[self.MuPi_RS,self.D0toK2pi_RS]\n ,['[B0 -> D~0 K*(892)+]cc'])\n\n self.BtoDstarMuD0toK2pi_WS = BtoDstarMuMaker(\"BtoDstarMuD0toK2pi_WSFor\"+_name\n ,config[\"D0to3H_K2pi_DeltaMass_MAX\"]\n ,config\n ,[self.MuPi_WS,self.D0toK2pi_WS]\n ,['[B0 -> D~0 K*(892)+]cc'])\n \n self.B2DstarMuD0to3piRSLine = StrippingLine(_name+'_B2DstarMuD0to3piRS',\n prescale = config[\"prescale_D0to3piRS\"],\n FILTER=GECs,\n selection = self.BtoDstarMuD0to3pi_RS)\n\n self.B2DstarMuD0to3piWSLine = StrippingLine(_name+'_B2DstarMuD0to3piWS',\n prescale = config[\"prescale_D0to3piWS\"],\n FILTER=GECs,\n selection = self.BtoDstarMuD0to3pi_WS)\n \n self.B2DstarMuD0toK2piRSLine = StrippingLine(_name+'_B2DstarMuD0toK2piRS',\n prescale = config[\"prescale_D0toK2piRS\"],\n FILTER=GECs,\n selection = self.BtoDstarMuD0toK2pi_RS)\n\n self.B2DstarMuD0toK2piWSLine = StrippingLine(_name+'_B2DstarMuD0toK2piWS',\n prescale = config[\"prescale_D0toK2piWS\"],\n FILTER=GECs,\n selection = self.BtoDstarMuD0toK2pi_WS)\n \n \n ###################### Lambda_c lines ##########################\n \n self.LcToKpi_RS = LcToKpiMaker(\"LcToKpiRSFor\"+_name,config,[self.Kaons,self.Pions],['[Lambda_c+ -> K- pi+]cc'])\n self.LcToKpi_WS = LcToKpiMaker(\"LcToKpiWSFor\"+_name,config,[self.Kaons,self.Pions],\n ['[Lambda_c+ -> K- pi+]cc','[Lambda_c+ -> K+ pi-]cc','[Lambda_c+ -> K+ pi+]cc','[Lambda_c+ -> K- pi-]cc'])\n \n self.PiPi_RS = PiPiMaker(\"PiPiRSFor\"+_name,config,[self.Slowpions],['rho(770)0 -> pi+ pi-'])\n self.PiPi_WS = PiPiMaker(\"PiPiWSFor\"+_name,config,[self.Slowpions],['rho(770)0 -> pi+ pi-','rho(770)0 -> pi+ pi+','rho(770)0 -> pi- pi-'])\n\n self.MuPiPi_RS = MuPiPiMaker(\"MuPiPiRSFor\"+_name,config,[self.Muons,self.PiPi_RS],['K*(892)+ -> mu+ rho(770)0'])\n self.MuPiPi_WS = MuPiPiMaker(\"MuPiPiWSFor\"+_name,config,[self.Muons,self.PiPi_WS],['K*(892)+ -> mu+ rho(770)0',\n 'K*(892)+ -> mu- rho(770)0'])\n\n self.Lb_RS = LbMaker(\"LbRSFor\"+_name,config,[self.MuPiPi_RS,self.LcToKpi_RS],['[Lambda_b0 -> Lambda_c+ K*(892)-]cc'])\n self.Lb_WS = LbMaker(\"LbWSFor\"+_name,config,[self.MuPiPi_WS,self.LcToKpi_WS],['[Lambda_b0 -> Lambda_c+ K*(892)-]cc'])\n\n self.LbRSLine = StrippingLine(_name+'_LbToLcStarMuLcToKpiRS',\n prescale = config[\"prescale_LbRS\"],\n FILTER=GECs,\n selection = self.Lb_RS)\n \n self.LbWSLine = StrippingLine(_name+'_LbToLcStarMuLcToKpiWS',\n prescale = config[\"prescale_LbWS\"],\n FILTER=GECs,\n selection = self.Lb_WS)\n\n ######## register lines \n \n self.registerLine(self.B2DstarMuD0to3piRSLine) \n self.registerLine(self.B2DstarMuD0to3piWSLine) \n self.registerLine(self.B2DstarMuD0toK2piRSLine) \n self.registerLine(self.B2DstarMuD0toK2piWSLine) \n self.registerLine(self.LbRSLine) \n self.registerLine(self.LbWSLine) \n \ndef MuPiMaker(_combName,config,_RequiredSelections,_decayDescriptors):\n _CombinationCut = \"(ACHILD(PT,1)+ACHILD(PT,2) > %(MuPi_SUMPT_MIN)s *MeV)\"\\\n \"& (ADOCACHI2CUT(%(MuPi_DOCACHI2_MAX)s, ''))\" %config\n _MotherCut = \"(VFASPF(VCHI2/VDOF) < %(MuPi_CHI2NDOF_MAX)s)\"\\\n \"& (BPVDIRA> %(MuPi_DIRA_MIN)s)\"\\\n \"& (BPVVDCHI2 > %(MuPi_FDCHI2_MIN)s)\" %config\n _comb = CombineParticles( name = \"Comb\"+_combName,\n DecayDescriptors = _decayDescriptors,\n CombinationCut = _CombinationCut,\n MotherCut = _MotherCut)\n sel = Selection(\"Sel\"+_combName,\n Algorithm = _comb,\n RequiredSelections = _RequiredSelections)\n return sel\n\ndef BtoDstarMuMaker(_combName,DeltaMass_MAX,config,_RequiredSelections,_decayDescriptors):\n preambulo = ['PXD0 = ACHILD(PX,1)',\n 'PYD0 = ACHILD(PY,1)',\n 'PZD0 = ACHILD(PZ,1)',\n 'ED0 = ACHILD(E,1)',\n 'PXPI = ACHILD(CHILD(PX,2),2)',\n 'PYPI = ACHILD(CHILD(PY,2),2)',\n 'PZPI = ACHILD(CHILD(PZ,2),2)',\n 'EPI = ACHILD(CHILD(E,2),2)',\n 'MD0PI = sqrt((EPI+ED0)**2 - (PXPI+PXD0)**2 - (PYPI+PYD0)**2 - (PZPI+PZD0)**2)',\n 'DELTA_MASS = MD0PI - ACHILD(M,1)']\n _CombinationCut = \"(DELTA_MASS < %s *MeV)\" %DeltaMass_MAX\n _CombinationCut += \"& (ADOCACHI2CUT(%(D0to3H_B_DOCACHI2_MAX)s, ''))\" %config \n _MotherCut = \"(M > %(D0to3H_B_MASS_MIN)s *MeV) & (M < %(D0to3H_B_MASS_MAX)s *MeV)\"\\\n \"& (MINTREE((ABSID=='D0'),VFASPF(VZ)) - MINTREE((ABSID=='K*(892)+'),VFASPF(VZ)) > %(D0to3H_DZ)s *mm )\"\\\n \"& (VFASPF(VCHI2/VDOF) < %(D0to3H_B_VCHI2NDF_MAX)s)\"\\\n \"& (BPVDIRA> %(D0to3H_B_DIRA_MIN)s)\" %config\n _comb = CombineParticles( name = \"Comb\"+_combName,\n Preambulo = preambulo,\n DecayDescriptors = _decayDescriptors,\n CombinationCut = _CombinationCut,\n MotherCut = _MotherCut)\n sel = Selection(\"Sel\"+_combName,\n Algorithm = _comb,\n RequiredSelections = _RequiredSelections)\n if config[\"D0to3H_REQUIRE_TOS\"] == True:\n sel_Hlt1TOS = TOSFilter( \"Sel\"+_combName+\"_Hlt1TOS\"\n ,sel\n ,\"Hlt1.*Decision%TOS\")\n sel_Hlt2TOS = TOSFilter( \"Sel\"+_combName+\"_Hlt2TOS\"\n ,sel_Hlt1TOS\n ,\"Hlt2.*Decision%TOS\")\n return sel_Hlt2TOS\n else:\n return sel\n\ndef D0To3HMaker(_combName,MASS_MIN,MASS_MAX,config,_RequiredSelections,_decayDescriptors):\n _CombinationCut = \"(AM+10 > %s *MeV) & (AM-10 < %s *MeV)\" %(MASS_MIN,MASS_MAX)\n _CombinationCut = _CombinationCut + \"& (ADOCACHI2CUT(%(D0to3H_DOCACHI2_MAX)s, ''))\"\\\n \"& (ACHILD(PT,1) + ACHILD(PT,2) + ACHILD(PT,3) > %(D0to3H_SUMPT_MIN)s)\" %config\n ### different mass cut depending on whether we miss a pion or a kaon\n _MotherCut = \"(M > %s *MeV) & (M < %s *MeV)\" %(MASS_MIN,MASS_MAX)\n _MotherCut = _MotherCut + \"& (VFASPF(VCHI2/VDOF)< %(D0to3H_VCHI2NDF_MAX)s)\" %config \n _comb = CombineParticles( name = \"Comb\"+_combName,\n DecayDescriptors = _decayDescriptors,\n CombinationCut = _CombinationCut,\n MotherCut = _MotherCut)\n sel = Selection(\"Sel\"+_combName,\n Algorithm = _comb,\n RequiredSelections = _RequiredSelections)\n return sel\n\n\n\ndef PiPiMaker(_combName,config,_RequiredSelections,_decayDescriptors):\n comb = CombineParticles(name=\"Comb\"+_combName,\n DecayDescriptors = _decayDescriptors, \n CombinationCut = \"(AM < %(PiPi_MASS_MAX)s*MeV)\"\\\n \"& (ACUTDOCACHI2(%(PiPi_DOCACHI2_MAX)s,''))\"\\\n \"& (ACHILD(PT,1) + ACHILD(PT,2) > %(PiPi_SUMPT_MIN)s *MeV)\" % config, \n MotherCut = \"(VFASPF(VCHI2/VDOF)< %(PiPi_CHI2NDF)s)\" % config) \n sel = Selection(\"Sel\"+_combName,\n Algorithm = comb,\n RequiredSelections = _RequiredSelections)\n return sel\n\ndef MuPiPiMaker(_combName,config,_RequiredSelections,_decayDescriptors):\n comb = CombineParticles(name=\"Comb\"+_combName,\n DecayDescriptors = _decayDescriptors, \n CombinationCut = \"(ACUTDOCACHI2(%(MuPiPi_DOCACHI2_MAX)s,''))\" % config,\n MotherCut = \"(VFASPF(VCHI2/VDOF)< %(MuPiPi_CHI2NDF)s)\"\\\n \"& (BPVVDCHI2 > %(MuPiPi_FDCHI2_MIN)s)\" %config)\n sel = Selection(\"Sel\"+_combName,\n Algorithm = comb,\n RequiredSelections = _RequiredSelections)\n return sel\n\ndef LcToKpiMaker(_combName,config,_RequiredSelections,_decayDescriptors):\n ### Cuts on the Kpi combination\n ### sumpt, mass and docachi2 cuts\n _CombinationCut = \"(ACHILD(PT,1)+ACHILD(PT,2) > %(Lc2Kpi_SUMPT_MIN)s *MeV)\"\\\n \"& (AM+10 > %(Lc2Kpi_MASS_MIN)s *MeV)\"\\\n \"& (AM-10 < %(Lc2Kpi_MASS_MAX)s *MeV)\"\\\n \"& (ADOCACHI2CUT(%(Lc2Kpi_DOCACHI2_MAX)s, ''))\" % config\n ### mass, vertex quality and FD cuts\n _MotherCut = \"(M > %(Lc2Kpi_MASS_MIN)s *MeV)\"\\\n \"& (M < %(Lc2Kpi_MASS_MAX)s *MeV)\"\\\n \"& (VFASPF(VCHI2/VDOF)< %(Lc2Kpi_VCHI2NDF_MAX)s)\"\\\n \"& (BPVVDCHI2 > %(Lc2Kpi_FDCHI2_MIN)s)\" % config\n _comb = CombineParticles( name = \"Comb\"+_combName,\n DecayDescriptors = _decayDescriptors,\n CombinationCut = _CombinationCut,\n MotherCut = _MotherCut)\n sel = Selection(\"Sel\"+_combName,\n Algorithm = _comb,\n RequiredSelections = _RequiredSelections)\n return sel\n\ndef LbMaker(_combName,config,_RequiredSelections,_decayDescriptors):\n preambulo = ['PXLC = ACHILD(PX,1)',\n 'PYLC = ACHILD(PY,1)',\n 'PZLC = ACHILD(PZ,1)',\n 'ELC = ACHILD(E,1)',\n 'PXPIPI = ACHILD(CHILD(PX,2),2)',\n 'PYPIPI = ACHILD(CHILD(PY,2),2)',\n 'PZPIPI = ACHILD(CHILD(PZ,2),2)',\n 'EPIPI = ACHILD(CHILD(E,2),2)',\n 'MLCPIPI = sqrt((EPIPI+ELC)**2 - (PXPIPI+PXLC)**2 - (PYPIPI+PYLC)**2 - (PZPIPI+PZLC)**2)',\n 'DELTA_MASS_LC = MLCPIPI - ACHILD(M,1)']\n comb = CombineParticles(name=\"Comb\"+_combName,\n DecayDescriptors = _decayDescriptors,\n Preambulo = preambulo,\n CombinationCut = \"(DELTA_MASS_LC < %(Lc2Kpi_DeltaMass_MAX)s *MeV)\"\\\n \"& (AM > %(Lc2Kpi_B_MASS_MIN)s *MeV) & (AM < %(Lc2Kpi_B_MASS_MAX)s *MeV)\"\\\n \"& (ACUTDOCACHI2(%(Lc2Kpi_B_DOCACHI2_MAX)s,''))\" % config,\n MotherCut = \"(M > %(Lc2Kpi_B_MASS_MIN)s *MeV) & (M < %(Lc2Kpi_B_MASS_MAX)s *MeV)\"\\\n \"& (MINTREE((ABSID=='Lambda_c+'),VFASPF(VZ)) - MINTREE((ABSID=='K*(892)-'),VFASPF(VZ)) > %(Lc2Kpi_DZ)s *mm )\"\\\n \"& (VFASPF(VCHI2/VDOF) < %(Lc2Kpi_B_VCHI2NDF_MAX)s)\"\n \"& (BPVDIRA> %(Lc2Kpi_B_DIRA_MIN)s)\"\\\n \"& (BPVVDCHI2 > %(Lc2Kpi_B_FDCHI2_MIN)s)\" %config)\n sel = Selection(\"Sel\"+_combName,\n Algorithm = comb,\n RequiredSelections = _RequiredSelections)\n if config[\"Lc2Kpi_REQUIRE_TOS\"] == True:\n sel_Hlt1TOS = TOSFilter( \"Sel\"+_combName+\"_Hlt1TOS\"\n ,sel\n ,\"Hlt1.*Decision%TOS\")\n sel_Hlt2TOS = TOSFilter( \"Sel\"+_combName+\"_Hlt2TOS\"\n ,sel_Hlt1TOS\n ,\"Hlt2.*Decision%TOS\")\n return sel_Hlt2TOS\n else:\n return sel\n \ndef TOSFilter( name, _input, _trigger ) :\n from Configurables import TisTosParticleTagger\n _tisTosFilter = TisTosParticleTagger( name + \"Tagger\" )\n _tisTosFilter.TisTosSpecs = { _trigger : 0 }\n return Selection( name\n , Algorithm = _tisTosFilter\n , RequiredSelections = [ _input ]\n )\n\n","sub_path":"DaVinciDev_v38r1p1/Phys/StrippingArchive/python/StrippingArchive/Stripping20r0p1/StrippingCharmFromBSemiForHadronAsy.py","file_name":"StrippingCharmFromBSemiForHadronAsy.py","file_ext":"py","file_size_in_byte":23088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"297016654","text":"# coding: utf-8\n\nimport pygame\nimport const\nimport settings\n\n\nclass Game:\n def __init__(self, display=None, caption=None, start_position=None, step=None):\n pygame.init()\n self.screen = pygame.display\n self.screen.set_mode(display)\n self.screen.set_caption(caption)\n self.x_pos, self.y_pos = start_position\n self.step = step\n\n self.run()\n\n def run(self):\n while True:\n self.track_input(pygame.event.get())\n self.reload_gameplay()\n\n def reload_gameplay(self):\n pygame.display.get_surface().fill(pygame.Color(1, 35, 53, 0))\n block = pygame.Surface((const.BLOCK_SIZE, const.BLOCK_SIZE))\n block.fill(pygame.Color(234, 234, 0, 0))\n pygame.display.get_surface().blit(block, (self.x_pos, self.y_pos))\n pygame.display.flip()\n\n def track_input(self, events):\n for event in events:\n\n if event.type == pygame.QUIT:\n exit(0)\n\n if event.type == pygame.KEYDOWN:\n self.identification_keyword(event.key)\n\n def identification_keyword(self, keyword):\n if keyword in settings.keymap.keys():\n self.move(settings.keymap[keyword])\n\n def move(self, direction):\n movement = {\n settings.KEYWORDS['up']: [\n self.x_pos,\n self.y_pos - self.step if self.y_pos >= const.BLOCK_SIZE else self.y_pos\n ],\n settings.KEYWORDS['right']: [\n self.x_pos + self.step if self.x_pos < const.DISPLAY_SIZE[0] - const.BLOCK_SIZE else self.x_pos,\n self.y_pos\n ],\n settings.KEYWORDS['down']: [\n self.x_pos,\n self.y_pos + self.step if self.y_pos < const.DISPLAY_SIZE[1] - const.BLOCK_SIZE else self.y_pos\n ],\n settings.KEYWORDS['left']: [\n self.x_pos - self.step if self.x_pos >= const.BLOCK_SIZE else self.x_pos,\n self.y_pos\n ]\n }\n self.x_pos, self.y_pos = movement[direction]\n\n\ndef main():\n Game(\n caption=const.CAPTION,\n display=const.DISPLAY_SIZE,\n start_position=const.START_POSITION,\n step=const.STEP\n )\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":2287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"307364267","text":"def hd(a,b):\n ans = \"\"\n if a == \"H\":\n ans = \"H\" if b == \"H\" else \"D\"\n else:\n ans = \"D\" if b == \"H\" else \"H\"\n\n return ans\n\ndef main():\n a , b = map(str, input().split())\n print(hd(a , b))\n\nif __name__ == '__main__':\n main()\n","sub_path":"abc/AtCoderBeginnerContest056/A.py","file_name":"A.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"433758849","text":"\nimport re\nimport requests\nfrom requests.exceptions import ProxyError\nfrom urllib3.exceptions import MaxRetryError\nimport json\nimport time\nfrom random import choice\nimport pymongo\n\nproxyHost = \"http-dyn.abuyun.com\"\nproxyPort = \"9020\"\nproxyUser = \"HP7Y01400L962MUD\"\nproxyPass = \"ED8926D78920DD44\"\n\nMONGO_URL='192.168.1.161'\nMONGO_DB='test'\nMONGO_PORT=27017\n\npattern1=re.compile('(我刚刚关注了)*.*?\\$.*?\\$.*?当前价.*',re.S)\npattern2=re.compile('(在¥.*?)*关注((股票)|(基金)|( 5:\n self.redirect(\"/member/answeroption\")\n \n result.answer1 = result.answer2 = result.answer3 = result.answer4 = result.answer5 = \"\"\n \n if answernum >= 1:\n result.answer1 = answer1\n if answernum >= 2:\n result.answer2 = answer2\n if answernum >= 3:\n result.answer3 = answer3\n if answernum >= 4:\n result.answer4 = answer4\n if answernum >= 5:\n result.answer5 = answer5\n\n result.answernum = answernum\n\n session.commit()\n \n self.redirect(\"/member/answeroption\" )\n session.close()\n\n","sub_path":"src/handlers/member/answeroptionedit.py","file_name":"answeroptionedit.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"128197355","text":"from bs4 import BeautifulSoup\nimport requests\nimport os\nimport boto3\nimport json\n\ns3 = boto3.client(\n 's3',\n aws_access_key_id=os.environ['MORPH_AWS_ACCESS_KEY'],\n aws_secret_access_key=os.environ['MORPH_AWS_SECRET_KEY'],\n region_name='eu-west-1'\n)\n\nres = requests.get(\"https://www.standardmedia.co.ke/business/category/19/business-news\")\n\nraw = res.content\n\nhtml = BeautifulSoup(raw, 'html.parser')\n\ndata = []\n\nbase_url = \"https://www.standardmedia.co.ke\"\n\nul = html.find(\"ul\", class_=\"business-lhs\")\n\nitems = ul.find_all(\"div\", class_=\"col-xs-6\")\n\nfor item in items:\n img_src = item.find(\"img\").get(\"src\")\n img_url = base_url+img_src\n text = item.find(\"h4\").text\n link = item.find(\"h4\").find(\"a\").get(\"href\")\n data.append({\n 'title':text,\n 'link':link,\n 'img':img_url\n })\n\ns3.put_object(\n Bucket='taxclock.codeforkenya.org',\n ACL='public-read',\n Key='data/standard-news.json',\n Body=json.dumps(data)\n)\n\nprint(\"ok\")\n","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"507392947","text":"\"\"\"StudyManager URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom django.conf.urls.static import static\nfrom django.conf import settings\nfrom django.views.generic.base import TemplateView\nfrom documents import views\nfrom registration import views as registration_views\n\n\nurlpatterns = [\n path('', TemplateView.as_view(template_name='home.html'), name='home'),\n path('admin/', admin.site.urls),\n path('upload/', views.upload),\n\n # paths to the document views/pages\n path('documents/', views.doclist),\n path('documents/information_systems_development/', views.ISDdocs),\n path('documents/enterprise_architecture_management/', views.EAMdocs),\n path('documents/management_information_systems', views.MISdocs),\n path('documents/business_process_management/', views.BPMdocs),\n path('documents/data_and_application_security/', views.DASdocs),\n path('documents/data_management/', views.DMdocs),\n path('documents/business_statistics/', views.BSdocs),\n path('documents/innovation_lab/', views.ILdocs),\n path('documents/information_systems_modelling/', views.ISMdocs),\n path('documents/digital_innovation/', views.DIdocs),\n path('documents/data_science/', views.DSdocs),\n path('documents/digital_business/', views.DBdocs),\n path('documents/emerging_it_topics/', views.EITdocs),\n path('documents/human_centered_design/', views.HCDdocs),\n path('documents/research_methods/', views.RMdocs),\n path('documents/research_seminar/', views.RSdocs),\n path('documents/project_seminar/', views.PSdocs),\n path('documents/master_thesis/', views.MTdocs),\n\n # path to like\n path('like/', views.like_document, name='like-document'),\n\n # This path is needed, to delete the file (compares primary key of file)\n path('documents/', views.deletedoc, name='deletedoc'),\n\n # include the auth app at registration/ - standard provided by django\n path('registration/', registration_views.signup, name='signup'),\n path('registration/', include('django.contrib.auth.urls')),\n\n # change password, register\n path('password_change/', registration_views.password_change, name='password_change'),\n path('registration/done/', TemplateView.as_view(template_name='registration/signup_done.html'), name='signup_done'),\n\n# add the static path for the media root to the urlpatterns, to make the documents accessable/downloadable\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","sub_path":"StudyManager/StudyManager/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"273291482","text":"import argparse\n\nimport numpy\nimport theano\nimport cPickle as pkl\n\nfrom doc_nmt import (load_params, init_params, init_tparams,\n build_model, pred_probs, prepare_data)\n\nfrom data_iterator import TextIterator\n\nprofile = False\n\ndef main(model, dictionary, dictionary_target, source, target, context, outfile, wordbyword):\n\n # load model model_options\n with open('%s.pkl' % model, 'rb') as f:\n options = pkl.load(f)\n\n valid_noshuf = TextIterator(source, target, context,\n dictionary, dictionary_target,\n n_words_source=options['n_words_src'], n_words_target=options['n_words'],\n batch_size=options['valid_batch_size'], maxlen=2000, shuffle=False,\n tc=options['kwargs'].get('tc', False))\n\n # allocate model parameters\n params = init_params(options)\n\n # load model parameters and set theano shared variables\n params = load_params(model, params)\n tparams = init_tparams(params)\n\n trng, use_noise, \\\n x, x_mask, y, y_mask, xc, xc_mask, \\\n opt_ret, \\\n cost, cost_, xc_mask_2, xc_mask_3 = \\\n build_model(tparams, options)\n inps = [x, x_mask, y, y_mask, xc, xc_mask, xc_mask_2, xc_mask_3]\n\n f_log_probs = theano.function(inps, cost, profile=profile)\n\n valid_errs = pred_probs(f_log_probs, prepare_data, options, valid_noshuf, verbose=True)\n numpy.save(outfile, valid_errs)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('model', type=str)\n parser.add_argument('dictionary', type=str)\n parser.add_argument('dictionary_target', type=str)\n parser.add_argument('source', type=str)\n parser.add_argument('target', type=str)\n parser.add_argument('context', type=str)\n parser.add_argument('outfile', type=str)\n\n args = parser.parse_args()\n\n main(args.model, args.dictionary, args.dictionary_target,\n args.source, args.target, args.context, args.outfile)\n\n","sub_path":"doc_logprobs.py","file_name":"doc_logprobs.py","file_ext":"py","file_size_in_byte":1986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"70038490","text":"import sys\r\nimport torch\r\nfrom subprocess import check_output\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n# Check if it GPU with memory > 4GB is available\r\ndef on_gpu():\r\n gpu = False\r\n if torch.cuda.is_available():\r\n try:\r\n gpu_index, gpu_name, memory_total, memory_free = check_output(\r\n [\"C:/Program Files/NVIDIA Corporation/NVSMI/nvidia-smi.exe\", \"--query-gpu=index,name,memory.total,memory.free\",\r\n \"--format=csv,noheader,nounits\"]).decode(sys.stdout.encoding).strip().split(\",\")\r\n memory_free = int(memory_free)\r\n if memory_free >= 4000:\r\n gpu = True\r\n except:\r\n gpu = True\r\n return gpu\r\n\r\n\r\n# Plot loss for each epoch\r\ndef plotting(epochs, loss_array, path):\r\n fig, ax = plt.subplots()\r\n ax.plot(range(1, epochs+1), loss_array)\r\n # ax.set_xticks(range(0, epochs+1, 50))\r\n ax.set(xlabel='epochs', ylabel='loss',\r\n title=('Loss epochs ='+ str(epochs)))\r\n ax.grid()\r\n fig.savefig(path+'Loss_epochs_'+str(epochs)+'.png')\r\n plt.close(fig)\r\n\r\n\r\n# Print stats about current calculations to console and to file\r\ndef print_stats(stats_file, text, print_to_console=True):\r\n with open(stats_file, 'a') as f:\r\n if isinstance(text, list):\r\n for t in text:\r\n f.write(t + \"\\n\")\r\n if print_to_console:\r\n print(t)\r\n else:\r\n f.write(text + \"\\n\")\r\n if print_to_console:\r\n print(text)\r\n f.close()\r\n","sub_path":"codes/stats_scripts.py","file_name":"stats_scripts.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"581806198","text":"import logging\n\ndef get_logger(name=__name__):\n '''\n get a custom logger\n :param name: name of the file\n :return: logger instance\n '''\n # Create log handler\n logHandler = logging.StreamHandler()\n\n # Set handler format\n logFormat = logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\", datefmt=\"%d-%b-%y\")\n logHandler.setFormatter(logFormat)\n # Create logger\n logger = logging.getLogger(name)\n # Add handler to logger\n logger.addHandler(logHandler)\n logger.setLevel(logging.DEBUG)\n\n return logger\n","sub_path":"custom_logger.py","file_name":"custom_logger.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"548499101","text":"from django.conf.urls import include, url\n\nfrom django.contrib import admin\n\nfrom planner.views import department_load_summary\n\nadmin.autodiscover()\n\nurlpatterns = [#'',# is '' OK?\n\n url(r'^$', department_load_summary, name='department_load_summary'),\n url(r'^planner/', include('planner.urls')),\n\n url(r'^accounts/', include('django.contrib.auth.urls')),\n #url(r'^accounts/login/', include('django.contrib.auth.urls')),\n\n url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n url(r'^admin/', admin.site.urls),\n]\n","sub_path":"four_year_plan/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"274960105","text":"# Copyright (c) 2018-2020 Simons Observatory.\n# Full license can be found in the top level \"LICENSE\" file.\n\"\"\"Hardware configuration utilities.\n\"\"\"\n\nimport os\nimport re\nimport copy\n\nfrom collections import OrderedDict\n\nimport gzip\n\nimport numpy as np\n\nimport toml\n\n\nclass Hardware(object):\n \"\"\"Class representing a specific hardware configuration.\n\n The data is stored in a dictionary, and can be loaded / dumped to disk\n as well as trimmed to include only a subset of detectors.\n\n Args:\n path (str, optional): If specified, configuration is loaded from this\n file during construction.\n\n \"\"\"\n def __init__(self, path=None):\n self.data = OrderedDict()\n if path is not None:\n self.load(path)\n\n def dump(self, path, overwrite=False, compress=False):\n \"\"\"Write hardware config to a TOML file.\n\n Dump data to a TOML format file, optionally compressing the contents\n with gzip and optionally overwriting the file.\n\n Args:\n path (str): The file to write.\n overwrite (bool): If True, overwrite the file if it exists.\n If False, then existing files will cause an exception.\n compress (bool): If True, compress the data with gzip on write.\n\n Returns:\n None\n\n \"\"\"\n if os.path.exists(path):\n if overwrite:\n os.remove(path)\n else:\n raise RuntimeError(\"Dump path {} already exists. Use \"\n \"overwrite option\".format(path))\n if compress:\n with gzip.open(path, \"wb\") as f:\n dstr = toml.dumps(self.data)\n f.write(dstr.encode())\n else:\n with open(path, \"w\") as f:\n dstr = toml.dumps(self.data)\n f.write(dstr)\n return\n\n def load(self, path):\n \"\"\"Read data from a TOML file.\n\n The file can either be regular text or a gzipped version of a TOML\n file.\n\n Args:\n path (str): The file to read.\n\n Returns:\n None\n\n \"\"\"\n dstr = None\n try:\n with gzip.open(path, \"rb\") as f:\n dstr = f.read()\n self.data = toml.loads(dstr.decode())\n except OSError:\n with open(path, \"r\") as f:\n dstr = f.read()\n self.data = toml.loads(dstr)\n return\n\n def wafer_map(self):\n \"\"\"Construct wafer mapping to other auxilliary data.\n\n Given the current data state, build dictionaries to go from wafers\n to all other non-detector info: telescopes, tubes, cards, crates,\n and bands. This is a convenient mapping when pruning the hardware\n information or doing other kinds of lookups.\n\n Returns:\n (dict): Nested dictionaries from wafers to other properties.\n\n \"\"\"\n result = OrderedDict()\n\n tube_to_tele = dict()\n for tele, props in self.data[\"telescopes\"].items():\n for tb in props[\"tubes\"]:\n tube_to_tele[tb] = tele\n\n wafer_to_tube = dict()\n for tb, props in self.data[\"tubes\"].items():\n for wf in props[\"wafers\"]:\n wafer_to_tube[wf] = tb\n\n crate_to_card = dict()\n for crate, props in self.data[\"crates\"].items():\n for card in props[\"cards\"]:\n crate_to_card[card] = crate\n\n result[\"cards\"] = {x: y[\"card\"]\n for x, y in self.data[\"wafers\"].items()}\n result[\"crates\"] = {x: crate_to_card[y[\"card\"]]\n for x, y in self.data[\"wafers\"].items()}\n result[\"bands\"] = {x: y[\"bands\"]\n for x, y in self.data[\"wafers\"].items()}\n result[\"tubes\"] = wafer_to_tube\n result[\"telescopes\"] = {x: tube_to_tele[wafer_to_tube[x]] for x in\n list(self.data[\"wafers\"].keys())}\n return result\n\n def select(self, telescopes=None, tubes=None, match=dict()):\n \"\"\"Select a subset of detectors.\n\n Select detectors whose properties match some criteria. A new Hardware\n object is created and returned. If a matching expression is not\n specified for a given property name, then this is equivalent to\n selecting all values of that property.\n\n Before selecting on detector properties, any telescope / tube filtering\n criteria are first applied.\n\n Each key of the \"match\" dictionary should be the name of a detector\n property to be considered for selection (e.g. band, wafer, pol, pixel).\n The value is a matching expression which can be:\n\n - A list of explicit values to match.\n - A string containing a regex expression to apply.\n\n Example:\n Imagine you wanted to select all 90GHz detectors on wafers 25 and\n 26 which have \"A\" polarization and are located in pixels 20-29\n (recall the \".\" matches a single character)::\n\n new = hw.select(match={\"wafer\": [\"25\", \"26\"],\n \"band\": \"MF.1\",\n \"pol\": \"A\",\n \"pixel\": \"02.\"})\n\n Args:\n telescopes (str): A regex string to apply to telescope names or a\n list of explicit names.\n tubes (str): A regex string to apply to tube names or a list of\n explicit names.\n match (dict): The dictionary of property names and their matching\n expressions.\n\n Returns:\n (Hardware): A new Hardware instance with the selected detectors.\n\n \"\"\"\n # First parse any telescope and tube options into a list of wafers\n wselect = None\n tbselect = None\n if telescopes is not None:\n tbselect = list()\n for tele in telescopes:\n tbselect.extend(self.data[\"telescopes\"][tele][\"tubes\"])\n if tubes is not None:\n if tbselect is None:\n tbselect = list()\n tbselect.extend(tubes)\n if tbselect is not None:\n wselect = list()\n for tb in tbselect:\n wselect.extend(self.data[\"tubes\"][tb][\"wafers\"])\n\n dets = self.data[\"detectors\"]\n\n # Build regex matches for each property\n reg = dict()\n if \"wafer\" in match:\n # Handle wafer case separately, since we need to merge any\n # match with our telescope / tube selection of wafers above.\n k = \"wafer\"\n v = match[k]\n if wselect is None:\n # Just the regular behavior\n if isinstance(v, list):\n reg[k] = re.compile(r\"(\"+\"|\".join(v)+r\")\")\n else:\n reg[k] = re.compile(v)\n else:\n # Merge our selection\n wall = list(wselect)\n if isinstance(v, list):\n wall.extend(v)\n else:\n wall.append(v)\n reg[k] = re.compile(r\"(\"+\"|\".join(wall)+r\")\")\n elif wselect is not None:\n # No pattern in the match dictionary, just our list from the\n # telescope / tube selection.\n reg[\"wafer\"] = re.compile(r\"(\"+\"|\".join(wselect)+r\")\")\n\n for k, v in match.items():\n if (k == \"wafer\"):\n # Already handled above\n continue\n else:\n if isinstance(v, list):\n reg[k] = re.compile(r\"(\"+\"|\".join(v)+r\")\")\n else:\n reg[k] = re.compile(v)\n\n # Go through all detectors selecting things that match all fields\n newwafers = set()\n newdets = OrderedDict()\n for d, props in dets.items():\n keep = True\n for k, v in reg.items():\n if k in props:\n test = v.match(props[k])\n if test is None:\n keep = False\n break\n if keep:\n newwafers.add(props[\"wafer\"])\n newdets[d] = copy.deepcopy(props)\n\n # Now compute the reduced set of auxilliary data needed for these\n # detectors.\n wafermap = self.wafer_map()\n\n # Copy this data\n hw = Hardware()\n hw.data = OrderedDict()\n for k, v in wafermap.items():\n hw.data[k] = OrderedDict()\n tocopy = set()\n for wf in newwafers:\n if isinstance(v[wf], list):\n for iv in v[wf]:\n tocopy.add(iv)\n else:\n tocopy.add(v[wf])\n for elem in tocopy:\n hw.data[k][elem] = copy.deepcopy(self.data[k][elem])\n\n # Copy over the wafer data\n hw.data[\"wafers\"] = OrderedDict()\n for wf in newwafers:\n hw.data[\"wafers\"][wf] = copy.deepcopy(self.data[\"wafers\"][wf])\n\n # And the detectors...\n hw.data[\"detectors\"] = newdets\n\n return hw\n","sub_path":"sotodlib/core/hardware.py","file_name":"hardware.py","file_ext":"py","file_size_in_byte":9120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"433366990","text":"import random\nimport RandomPartition\nimport myTesting\n\ndef getIthNum(myList,i,begin = None,end = None,function = lambda x:x):\n try:\n if begin == None and end == None:\n begin = 0\n end = len(myList)\n if i >= end or i < begin:\n raise IndexError\n else:\n initpos = random.randint(begin,end-1)\n par = RandomPartition.partition(myList,initpos,begin,end,function)\n if par > initpos:\n if par == i:\n return myList[initpos]\n else:\n RandomPartition.swap(myList,initpos,par)\n if i > par:\n return getIthNum(myList,i,begin = par+1,end = end,function = function)\n else: return getIthNum(myList,i,begin = begin,end = par,function = function) \n else:\n if par + 1 == i:\n return myList[initpos]\n else:\n RandomPartition.swap(myList,initpos,par+1)\n if i > par + 1:\n return getIthNum(myList,i,begin = par+2,end = end,function = function)\n else: return getIthNum(myList,i,begin = begin,end = par+1,function = function)\n except IndexError:\n print('This list does not contain ith largest number.')\n\ndef getIthItemLinear(myList,i,begin = None,end = None,function = lambda x:x):\n if begin == None and end == None:\n begin = 0\n end = len(myList)\n listOfMedium = []\n tempList = []\n count = 0\n for c in range(begin,end):\n if count == 5:\n listOfMedium.append(findMedium(tempList))\n tempList = []\n count = 0\n else:\n tempList.append(myList[c])\n count += 1\n\n\ndef findMedium(myList):\n myAnswer = myTesting.mergeSort(myList)\n return myAnswer[(len(myAnswer)-1)//2]\n","sub_path":"code/Medium.py","file_name":"Medium.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"570169400","text":"'''\nCreated on 2013-4-8\n@author: Bobi Pu, bobi.pu@usc.edu\n'''\nfrom DBController import DBController\nfrom bs4 import BeautifulSoup, element\nimport re, cookielib\nfrom FSLog import logException\nfrom mechanize import Browser, _http\nfrom dateutil import parser\nfrom datetime import datetime\n\n\nclass DataExtractor(object):\n def __init__(self):\n self.db = DBController()\n self.br = self.login()\n \n def login(self):\n br = Browser()\n cj = cookielib.LWPCookieJar()\n br.set_cookiejar(cj)\n \n br.set_handle_equiv(True)\n br.set_handle_redirect(True)\n br.set_handle_referer(True)\n br.set_handle_robots(False)\n br.set_handle_refresh(_http.HTTPRefreshProcessor(), max_time=2)\n \n br.open('http://www.fatsecret.com/Auth.aspx?pa=s')\n br.select_form(nr=0)\n #name attr of login tr\n #PLEASE input your username and password here!!!!\n br['_ctl0:_ctl7:Logincontrol1:Name'] = 'username'\n br['_ctl0:_ctl7:Logincontrol1:Password'] = 'password'\n br.submit()\n return br\n \n #========================================================================================\n # URLType: 0 memberURL, 1 weightHistory, 2 dietHistory, 3 groups, 4 challenges, 5 buddies\n #========================================================================================\n def getURL(self, user, URLType):\n if URLType == 0:\n return 'http://fatsecret.com/member/' + '+'.join(user['name'].encode('utf-8', 'ignore').split())\n if user['serverId'] is None:\n return None\n elif URLType == 1:\n return 'http://www.fatsecret.com/Default.aspx?pa=memh&id=' + user['serverId']\n elif URLType == 2:\n return 'http://www.fatsecret.com/Diary.aspx?pa=mdcs&id=' + user['serverId']\n elif URLType == 3:\n return 'http://www.fatsecret.com/Default.aspx?pa=memgrps&id=' + user['serverId']\n elif URLType == 4:\n return 'http://www.fatsecret.com/Default.aspx?pa=memchals&id=' + user['serverId']\n elif URLType == 5:\n return 'http://www.fatsecret.com/Default.aspx?pa=memb&id=' + user['serverId']\n else:\n raise Exception('invalid URL type')\n \n def convertUserIdToUserList(self, userId):\n if userId is None or userId == []:\n return self.db.getAllUserList()\n elif isinstance(userId, list) and userId != []:\n userList = []\n for v in userId:\n user = self.db.getUserById(v)\n if user is not None:\n userList.append(user)\n return userList\n elif isinstance(userId, int):\n user = self.db.getUserById(userId)\n return [user] if user is not None else []\n else:\n raise Exception('invalid input userId')\n \n def getServerId(self, userId=None):\n users = self.convertUserIdToUserList(userId)\n for user in users:\n if 'serverId' in user and user['serverId'] is not None:\n continue\n serverId = None\n try:\n memberURL = self.getURL(user, 0)\n page = self.br.open(memberURL)\n soup = BeautifulSoup(page.read())\n result = soup.find('div', attrs={'align' : 'right', 'class' : 'smallText', 'style' : 'padding-top:5px'})\n if result is not None:\n for tag in result.contents:\n if isinstance(tag, element.Tag) and 'href' in tag.attrs and tag.attrs['href'].find('id') != -1:\n serverId = tag.attrs['href'].split('id=')[1]\n break \n except Exception as e:\n logException(user['id'], self.getServerId.__name__, e)\n finally: \n self.db.updateServerId(user['id'], serverId)\n \n def getWeightHistory(self, userId=None):\n users = self.convertUserIdToUserList(userId)\n for user in users:\n diet, startWeight, goalWeight, weightHistory = None, None, None, None\n try:\n if user['serverId'] is not None:\n weightHistoryURL = self.getURL(user, 1)\n page = self.br.open(weightHistoryURL)\n soup = BeautifulSoup(page.read())\n tag = soup.find('b')\n diet = tag.contents[1].text\n tag = soup.find(attrs={'style' : 'padding:0px 10px'})\n startWeight = float(tag.contents[1].split(': ')[1].split()[0])\n goalWeight = float(tag.contents[0].text.split(': ')[1].split()[0])\n weightList, dateList = [], []\n for tag in soup.findAll(attrs={'class' : 'borderBottom date'}):\n dateList.append(parser.parse(tag.text))\n for tag in soup.findAll(attrs={'class' : 'borderBottom weight'}):\n weightList.append(float(tag.text.split()[0]))\n weightHistory = zip(dateList, weightList)\n weightHistory = sorted(weightHistory, key= lambda record : record[0])\n except Exception as e:\n logException(user['id'], self.getWeightHistory.__name__, e)\n finally:\n self.db.updateWeightHistory(user['id'], diet, startWeight, goalWeight, weightHistory)\n \n def getDietHistory(self, userId=None):\n users = self.convertUserIdToUserList(userId)\n for user in users:\n dietHistory = None\n try:\n if user['serverId'] is not None:\n dietHistoryURL = self.getURL(user, 2)\n page = self.br.open(dietHistoryURL)\n soup = BeautifulSoup(page.read())\n months = soup.findAll('td', attrs={'colspan' : '6', 'class' : 'borderBottom'})\n monthList = []\n if months == []:\n raise Exception('no diet history records')\n for month in months:\n monthList.append(datetime.strptime(month.text, '%B %Y'))\n rows = soup.findAll('tr', attrs={'valign' : 'middle'})\n prevDay = 32\n monthIndex = 0\n dietHistory = []\n for row in rows:\n try:\n if len(row.contents) != 13:\n continue\n day = int(re.sub('[^0-9]', '', row.contents[1].text))\n if day >= prevDay:\n monthIndex += 1 \n prevDay = day\n date = datetime(monthList[monthIndex].year, monthList[monthIndex].month, day)\n food = self.getIntFromRawString(row.contents[3].text)\n RDI = self.getDecimalFromPercentageString(row.contents[5].text)\n fat, protein, carbs = self.getDataFromNutrionalSummary(row.contents[7].text)\n exercise = self.getIntFromRawString(row.contents[9].text)\n net = self.getIntFromRawString(row.contents[11].text)\n dietHistory.append((date, food, RDI, fat, protein, carbs, exercise, net))\n except Exception as e:\n logException(user['id'], self.getDietHistory.__name__, e, 'scrape row error')\n if 'dietHistory' in user and user['dietHistory'] is not None:\n dietHistory = self.mergeDietTrack(user['dietHistory'], dietHistory)\n else:\n dietHistory.sort(key=lambda item : item[0])\n except Exception as e:\n logException(user['id'], self.getDietHistory.__name__, e)\n finally:\n self.db.updateDietHistory(user['id'], dietHistory)\n \n def getGroup(self, userId=None):\n users = self.convertUserIdToUserList(userId)\n for user in users:\n groupIdList = []\n try:\n if user['serverId'] is not None:\n groupURL = self.getURL(user, 3)\n page = self.br.open(groupURL)\n soup = BeautifulSoup(page.read())\n results = soup.findAll('td', attrs={'width' : '50', 'align' : 'center'})\n for tag in results:\n groupName = tag.contents[1].attrs['title']\n group = self.db.addNewGroup(groupName)\n self.db.addUserInGroup(user['id'], group['id'])\n groupIdList.append(group['id'])\n except Exception as e:\n logException(user['id'],self.getGroup. __name__, e)\n finally:\n self.db.addGroupInUser(user['id'], groupIdList)\n \n def getChallenge(self, userId=None):\n users = self.convertUserIdToUserList(userId)\n for user in users:\n challengeIdList = []\n try:\n if user['serverId'] is not None:\n challengeURL = self.getURL(user, 4)\n page = self.br.open(challengeURL)\n soup = BeautifulSoup(page.read())\n results = soup.findAll('td', attrs={'width' : '50', 'align' : 'center'})\n for tag in results:\n challengeName = tag.contents[1].attrs['title']\n challenge = self.db.addNewChallenge(challengeName)\n self.db.addUserInChallenge(user['id'], challenge['id'])\n challengeIdList.append(challenge['id'])\n except Exception as e:\n logException(user['id'], self.getChallenge.__name__, e)\n finally:\n self.db.addChallengeInUser(user['id'], challengeIdList)\n \n def getBuddy(self, userId=None):\n users = self.convertUserIdToUserList(userId)\n for user in users:\n buddyIdList = []\n try:\n if user['serverId'] is not None:\n buddyURL = self.getURL(user, 5)\n while True:\n page = self.br.open(buddyURL)\n soup = BeautifulSoup(page.read())\n results = soup.findAll('a', attrs={'class' : 'member', 'onmouseout' : 'hideTip()'})\n for tag in results:\n if tag.text != '':\n buddyName = tag.text.strip()\n buddy = self.db.addNewUser(buddyName)\n buddyIdList.append(buddy['id'])\n if 'serverId' not in buddy:\n self.getServerId(buddy['id'])\n result = soup.find('span', attrs={'class' : 'next'})\n if result is None:\n break\n else:\n buddyURL = 'http://fatsecret.com/' + result.contents[0].attrs['href']\n except Exception as e:\n logException(user['id'], self.getBuddy.__name__, e)\n finally:\n self.db.addBuddyInUser(user['id'], buddyIdList)\n \n def mergeDietTrack(self, oldTrack, newTrack):\n oldTrack, newTrack = sorted(oldTrack, key= lambda item : item[0]), sorted(newTrack, key= lambda item: item[0])\n i = 0\n for item in oldTrack:\n if item[0] >= newTrack[0][0]:\n break\n i += 1\n return oldTrack[0 : i] + newTrack\n \n def cleanNonNumercial(self, dataString):\n return re.sub('[^0-9.]', '', dataString.strip())\n \n def getIntFromRawString(self, dataString):\n dataString = self.cleanNonNumercial(dataString)\n return int(dataString) if dataString != '' else None\n \n def getDataFromNutrionalSummary(self, dataString):\n if dataString.strip() == '':\n return None, None, None\n fat = float(dataString.split('fat: ')[1].split('g')[0])\n protein = float(dataString.split('protein: ')[1].split('g')[0])\n carbs = float(dataString.split('carbs: ')[1].split('g')[0])\n return fat, protein, carbs\n \n def getDecimalFromPercentageString(self, dataString):\n dataString = self.cleanNonNumercial(dataString)\n return float(self.cleanNonNumercial(dataString)) / 100 if dataString != '' else None\n \n \n \n ","sub_path":"DataExtractor.py","file_name":"DataExtractor.py","file_ext":"py","file_size_in_byte":12625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"187344614","text":"from PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\n__author__ = 'dustin'\n\nclass FPUtilityObjects(object):\n @staticmethod\n def hrule(parent):\n hrule = QFrame(parent=None)\n hrule.setFrameShape(QFrame.HLine)\n hrule.setFrameShadow(QFrame.Sunken)\n return hrule\n\n @staticmethod\n def vrule(parent=None):\n vrule = QFrame(parent)\n vrule.setFrameShape(QFrame.VLine)\n vrule.setFrameShadow(QFrame.Sunken)\n return vrule\n\nclass FPGroupBox(QGroupBox):\n def __init__(self, parent=None, title=\"\", layout=QVBoxLayout):\n super(FPGroupBox, self).__init__(parent)\n self.setTitle(title)\n self._layout_generator = layout\n\n scroll_view = QScrollArea(self)\n scroll_view.setWindowFlags(scroll_view.windowFlags() | Qt.FramelessWindowHint)\n scroll_view.setFrameShape(QFrame.NoFrame)\n scroll_view.setWidgetResizable(True)\n scroll_view.setStyleSheet(\"QScrollBar{width: 8px}\")\n scroll_view.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)\n\n self._layout = QVBoxLayout(self)\n self._layout.addWidget(scroll_view)\n\n self._content = QWidget(parent)\n self._content_layout = layout(self._content)\n\n scroll_view.setWidget(self._content)\n\n def addWidget(self, widget):\n self._content_layout.addWidget(widget)\n\n def clearWidgets(self):\n item = self._content_layout.takeAt(0)\n while item:\n del item\n item = self._content_layout.takeAt(0)\n\nclass FPShortProfileView(QWidget):\n clicked = pyqtSignal()\n def __init__(self, parent=None, name=\"\", title=\"\"):\n super(FPShortProfileView, self).__init__(parent)\n layout = QVBoxLayout(self)\n layout.addWidget(QLabel(name))\n layout.addWidget(QLabel(title))\n\n def mousePressEvent(self, QMouseEvent):\n self.clicked.emit()\n","sub_path":"gui/fpmiscobjs.py","file_name":"fpmiscobjs.py","file_ext":"py","file_size_in_byte":1880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"282620664","text":"'''\r\nCreated on 12. 11. 2014\r\n\r\n@author: Tomas\r\n'''\r\nimport pygame\r\nfrom pygame import gfxdraw\r\nfrom mouse import MouseListener\r\nimport settings\r\n\r\nclass Component(MouseListener):\r\n def __init__(self, x, y, w, h):\r\n MouseListener.__init__(self)\r\n self._x = x\r\n self._y = y\r\n self.w = w\r\n self.h = h\r\n self.parent = None\r\n \r\n @property\r\n def x(self):\r\n if self.parent is None:\r\n return self._x\r\n else:\r\n return self._x + self.parent.x\r\n @x.setter\r\n def x(self, value):\r\n self._x = value\r\n @property\r\n def y(self):\r\n if self.parent is None:\r\n return self._y\r\n else:\r\n return self._y + self.parent.y\r\n @y.setter\r\n def y(self, value):\r\n self._y = value\r\n def mouse_in(self, mx, my, app):\r\n return (mx >= self.x and my >= self.y and \r\n mx < self.x + self.w and my < self.y + self.h)\r\n def collide(self, comp):\r\n return (comp.x + comp.w >= self.x and\r\n comp.y + comp.y >= self.y and \r\n self.x + self.w > comp.x and\r\n self.y + self.h > comp.y)\r\n\r\nclass ExButton(Component):\r\n outline_size = 5\r\n def __init__(self, x, y, image, exercise,\r\n unfocused_color = settings.white,\r\n focused_color = settings.yellow):\r\n self.image = image\r\n self.r = self.image.get_rect().size[0]//2\r\n #self.r = 25\r\n Component.__init__(self, x, y, 2*self.r, 2*self.r)\r\n self.exercise = exercise\r\n \r\n self.unfocused_color = unfocused_color\r\n self.focused_color = focused_color\r\n self.clicked = False\r\n self.rplus = 0\r\n \r\n def render(self, surface, app):\r\n color = self.unfocused_color\r\n if self.focused:\r\n color = self.focused_color\r\n \r\n if self.focused:\r\n self.rplus += 3\r\n gfxdraw.filled_circle(surface, self.x + self.r, self.y + self.r, self.r + self.rplus, settings.outline)\r\n gfxdraw.aacircle( surface, self.x + self.r, self.y + self.r, self.r + self.rplus, settings.outline)\r\n gfxdraw.filled_circle(surface, self.x + self.r, self.y + self.r, self.r - 3 + self.rplus, color)\r\n gfxdraw.aacircle( surface, self.x + self.r, self.y + self.r, self.r - 3 + self.rplus, color)\r\n surface.blit(self.image, (self.x, self.y))\r\n if self.focused:\r\n self.rplus -= 3\r\n \r\n def mouse_pressed(self, button, app):\r\n if button == 1 and self.focused:\r\n self.clicked = True\r\n def mouse_released(self, button, app):\r\n if button == 1 and self.focused and self.clicked:\r\n app.change_exercise(self.exercise.index)\r\n \r\n \r\n \r\nclass CheckButton(Component):\r\n def __init__(self, app, exercise_index):\r\n Component.__init__(self, app.w - 75, app.h - 75, 50, 50)\r\n self.exercise_index = exercise_index\r\n self.check = pygame.image.load('res/check.png')\r\n self.next = pygame.image.load('res/next.png' )\r\n self.r = 25\r\n self.unfocused_color = settings.white\r\n self.focused_color = settings.yellow\r\n self.clicked = False\r\n def render(self, surface, app):\r\n color = self.unfocused_color\r\n if self.focused:\r\n color = self.focused_color\r\n gfxdraw.filled_circle(surface, self.x + self.r, self.y + self.r, self.r, settings.outline)\r\n gfxdraw.aacircle( surface, self.x + self.r, self.y + self.r, self.r, settings.outline)\r\n gfxdraw.filled_circle(surface, self.x + self.r, self.y + self.r, self.r - 3, color)\r\n gfxdraw.aacircle( surface, self.x + self.r, self.y + self.r, self.r - 3, color)\r\n if self.focused:\r\n surface.blit(self.next, (self.x, self.y))\r\n else:\r\n surface.blit(self.check, (self.x, self.y))\r\n def mouse_pressed(self, button, app):\r\n if button == 1 and self.focused:\r\n self.clicked = True\r\n def mouse_released(self, button, app):\r\n if button == 1 and self.focused and self.clicked:\r\n app.change_exercise(self.exercise_index)\r\n\r\nclass EscapeButton(Component):\r\n def __init__(self, app):\r\n Component.__init__(self, app.w - 75, 25, 50, 50)\r\n self.escape = pygame.image.load('res/escape.png')\r\n self.r = 25\r\n self.unfocused_color = settings.white\r\n self.focused_color = settings.yellow\r\n self.clicked = False\r\n def render(self, surface, app):\r\n color = self.unfocused_color\r\n if self.focused:\r\n color = self.focused_color\r\n gfxdraw.filled_circle(surface, self.x + self.r, self.y + self.r, self.r, settings.outline)\r\n gfxdraw.aacircle( surface, self.x + self.r, self.y + self.r, self.r, settings.outline)\r\n gfxdraw.filled_circle(surface, self.x + self.r, self.y + self.r, self.r - 3, color)\r\n gfxdraw.aacircle( surface, self.x + self.r, self.y + self.r, self.r - 3, color)\r\n surface.blit(self.escape, (self.x, self.y))\r\n def mouse_pressed(self, button, app):\r\n if button == 1 and self.focused:\r\n self.clicked = True\r\n def mouse_released(self, button, app):\r\n if button == 1 and self.focused and self.clicked:\r\n app.menu_mode()\r\n\r\nclass Scroll(Component):\r\n def __init__(self, x, y, w, h, max_y):\r\n Component.__init__(self, x, y, w, h)\r\n self.min_y = y\r\n self.max_y = max_y\r\n self.range = self.max_y - self.min_y\r\n self.dragged = False\r\n self.mouse = None\r\n def level(self):\r\n if self.parent:\r\n return (self.y - self.parent.y - self.min_y) / self.max_y\r\n else:\r\n return (self.y - self.min_y) / self.max_y\r\n def render(self, surface, app):\r\n if self.focused:\r\n pygame.draw.rect(surface, (50, 50, 50), (self.x, self.y, self.w, self.h))\r\n else:\r\n pygame.draw.rect(surface, (75, 75, 75), (self.x, self.y, self.w, self.h))\r\n def mouse_pressed(self, button, app):\r\n if self.focused and button == 1:\r\n self.dragged = True\r\n self.mouse = app.mouse\r\n self._y -= app.mouse.y\r\n elif button == 4:\r\n self._y -= 40\r\n if self._y < self.min_y:\r\n self._y = self.min_y\r\n elif button == 5:\r\n self._y += 40\r\n if self._y > self.max_y:\r\n self._y = self.max_y\r\n def mouse_released(self, button, app):\r\n if button == 1 and self.dragged:\r\n self.dragged = False\r\n self._y = self.y\r\n if self.parent:\r\n self._y -= self.parent.y\r\n self.mouse = None\r\n @property\r\n def y(self):\r\n if not self.parent:\r\n if not self.mouse: \r\n return self._y\r\n else:\r\n y = self._y + self.mouse.y\r\n if y > self.max_y:\r\n return self.max_y\r\n elif y < self.min_y:\r\n return self.min_y\r\n else:\r\n return y\r\n else:\r\n if not self.mouse:\r\n return self._y + self.parent.y\r\n else:\r\n y = self._y + self.mouse.y\r\n if y > self.max_y:\r\n return self.max_y + self.parent.y\r\n elif y < self.min_y:\r\n return self.min_y + self.parent.y\r\n else:\r\n return y + self.parent.y\r\n \r\nclass ButtonTextPanel(Component):\r\n def __init__(self, text):\r\n Component.__init__(self, 0, 0, 740, 50)\r\n self.text = text\r\n \r\n self.f_image = pygame.Surface((self.w, self.h), pygame.SRCALPHA)\r\n self.f_image.fill(settings.alpha)\r\n text_image = settings.font1.render(text, True, settings.text)\r\n self.f_image.blit(text_image, ((self.w - text_image.get_rect().size[0]) / 2, (self.h - text_image.get_rect().size[1])/2))\r\n \r\n self.u_image = pygame.Surface((self.w, self.h), pygame.SRCALPHA)\r\n self.u_image.fill(settings.alpha)\r\n text_image = settings.font2.render(text, True, settings.text)\r\n self.u_image.blit(text_image, ((self.w - text_image.get_rect().size[0]) / 2, (self.h - text_image.get_rect().size[1])/2))\r\n self.clicked = False\r\n \r\n def render(self, surface, app, x = 0, y = 0):\r\n if self.focused:\r\n surface.blit(self.u_image, (x, y))\r\n else:\r\n surface.blit(self.f_image, (x, y))\r\n def mouse_pressed(self, button, app):\r\n if self.focused:\r\n self.clicked = True\r\n def mouse_released(self, button, app):\r\n if self.focused and self.clicked:\r\n app.exercise_mode(self.text + \".txt\")\r\n self.clicked = False\r\nclass Menu(Component):\r\n def __init__(self, x, y, w, h, tp, app):\r\n Component.__init__(self, x, y, w, h)\r\n self.tp = tp\r\n self.total_h = 0\r\n for p in tp:\r\n self.total_h += p.h\r\n self.surf = pygame.Surface((self.w, self.h), pygame.SRCALPHA)\r\n self.scroll_on = self.total_h > self.h\r\n if self.scroll_on: \r\n self.scroll = Scroll(self.w - 10, 0, 10, 40, self.h - 40)\r\n self.scroll.parent = self\r\n app.mouselis.append(self.scroll)\r\n def render(self, surface, app):\r\n self.surf.fill(settings.alpha)\r\n shift = 0\r\n if self.scroll_on:\r\n shift = int((self.total_h - self.h)*self.scroll.level())\r\n self.scroll.render(surface, None)\r\n y = -shift\r\n for t in self.tp:\r\n t.render(self.surf, app, 0, y)\r\n y += t.h\r\n surface.blit(self.surf, (self.x, self.y))\r\n def mouse_in(self, mx, my, app):\r\n if Component.mouse_in(self, mx, my, app):\r\n my -= self.y\r\n if self.scroll_on:\r\n my += int((self.total_h - self.h)*self.scroll.level())\r\n for btp in self.tp:\r\n btp.focused = False\r\n total_y = 0\r\n for btp in self.tp:\r\n total_y += btp.h\r\n if my < total_y:\r\n btp.focused = True\r\n break\r\n return False\r\n def mouse_pressed(self, button, app):\r\n for btp in self.tp:\r\n btp.mouse_pressed(button, app)\r\n def mouse_released(self, button, app):\r\n for btp in self.tp:\r\n btp.mouse_released(button, app)\r\n \r\n \r\n \r\n \r\n \r\n \r\n\r\n ","sub_path":"others/exercises/component.py","file_name":"component.py","file_ext":"py","file_size_in_byte":10630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"586532315","text":"# coding: utf-8\n\nimport time\nimport pickle\nimport socket\nimport logging\nimport queue\n# import argparse\nimport threading\nfrom Node import Node\n\nlogging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(name)-15s %(levelname)-8s %(message)s',\n datefmt='%m-%d %H:%M:%S')\n\nclass Entity(Node):\n def __init__(self, own_id, address, root_id, root_address, name='Entity', timeout=3):\n Node.__init__(self, own_id, address, root_id, root_address, name, timeout)\n\n def run(self):\n self.socket.bind(self.address)\n\n self.neighbor_advertise()\n\n done = False\n while not done:\n p, addr = self.recv()\n if p is not None:\n o = pickle.loads(p)\n #self.logger.info('O: %s', o)\n if o['method'] == 'NODE_JOIN':\n self.neighbor_ack(o['args'])\n elif o['method'] == 'PRINT_RING':\n self.print_ring()\n elif o['method'] == 'PRINT_TABLE':\n self.print_table()\n elif o['method'] == 'NODE_DISCOVERY':\n self.propagate_table(o['args']) \n\n elif o['method'] == 'ORDER': # need to wrap in TOKEN\n msg = { 'method' : 'TOKEN' , \n 'args' : { 'method' : o['method'], \n 'args' : { 'client_addr' : addr, \n 'order' : o['args'] }, \n 'dest_id' : self.node_table['Receptionist'] }}\n self.send(self.successor_address, msg)\n\n elif o['method'] == 'PICKUP': # need to wrap in TOKEN\n msg = { 'method' : 'TOKEN' , \n 'args' : { 'method' : o['method'], \n 'args' : { 'client_addr' : addr, \n 'order' : o['args'] }, \n 'dest_id' : self.node_table['Employee'] }}\n self.send(self.successor_address, msg)\n\n elif o['method'] == 'TOKEN': # send to worker\n if o['args']=='EMPTY':\n if not self.queueOut.empty():\n nextMessage = self.queueOut.get()\n if nextMessage != None:\n # wrap in TOKEN\n if nextMessage['method'] == 'ORDER_RECVD':\n self.send(nextMessage['args']['client_addr'], nextMessage)\n\n elif nextMessage['method']=='DELIVER': #enviar pra cliente caso method seja deliver\n msg = { 'method':'DELIVER' ,\n 'args': { 'ticket': nextMessage['args']['orderTicket']\n }}\n\n clientAddress = nextMessage['args']['client_addr']\n self.logger.debug('Sending client %s food', nextMessage['args']['orderTicket'])\n self.send(clientAddress, msg)\n\n else:\n msg = { 'method' : 'TOKEN', 'args' : nextMessage }\n msg['args']['dest_id'] = self.node_table[nextMessage['args']['dest']]\n \n #permite mais que um cozinheiro, pois enviamos na mensagem que cozinheiro e que pediu e podemos-lhe responder\n if nextMessage['method'] == 'EQPT_REQ':\n msg['args']['args']['cookReq'] = self.node_table[nextMessage['args']['cook']]\n self.send(self.successor_address, msg)\n self.logger.debug('Sending Token: %s', nextMessage['method'])\n else:\n self.send(self.successor_address, o)\n elif o['args']['dest_id']==self.own_id:\n self.logger.debug('Sending object to Worker Thread')\n self.queueIn.put(o['args'])\n msg = { 'method' : 'TOKEN', 'args' : 'EMPTY' }\n self.send(self.successor_address, msg) #ja o recebeu e agora vai enviar um token vazio para o proximo\n else: \n self.send(self.successor_address, o)\n\n\n","sub_path":"Entity.py","file_name":"Entity.py","file_ext":"py","file_size_in_byte":4525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"284180305","text":"from __future__ import print_function\nfrom csv import reader\nfrom pyspark import SparkContext\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql import SQLContext\nfrom pyspark.sql.functions import *\n\nimport sys\n\nMUNI = 10\nTYPE = 2\nORIGIN = 5\nDEST = 6\nIDENT = 1 #ident\nCITY_INDEX = 0\n\nsc = SparkContext()\nspark = SparkSession.builder.getOrCreate()\nsqlContext = SQLContext(spark)\n\ndef processString(input_string):\n\tif input_string == None:\n\t\treturn input_string\n\treturn input_string.lstrip().rstrip().upper()\n\ndef getSqlList(lst):\n\treturn \"(\\\"\"+'\\\",\\\"'.join(lst)+\"\\\")\"\n\ndef getAirportTypeListFor(temp_list):\n\treturn [processString(x) for x in temp_list]\n\ndef getCityMapper(filename):\n\tcity_map = {}\n\tmapper_rdd = sc.textFile(filename)\n\tmapper_list = mapper_rdd \\\n\t\t\t\t\t.map(lambda line: (processString(line.split(',')[0]),processString(line.split(',')[1]))) \\\n\t\t\t\t\t.collect()\n\tfor key, val in mapper_list:\n\t\tcity_map[key] = val\n\treturn city_map\n\ndef findMapping(city_string):\n\tcity_string = processString(city_string)\n\tif city_string in city_mapper:\n\t\treturn city_mapper[city_string]\n\treturn city_string\n\ndef getCityList(filename):\n\tcity_rdd = sc.textFile(filename)\n\tcity_list = city_rdd\\\n\t\t.map(lambda line: (findMapping(processString(line.split(',')[0])), 1)) \\\n\t\t.reduceByKey(lambda x,y: x + y) \\\n\t\t.map(lambda x: x[0]) \\\n\t\t.collect()\n\treturn city_list\n\ndef getAirportDataDFrame(filename):\n\tairport = sc.textFile(filename)\n\tschema = ['ident','type','municipality']\n\tvalid_data = airport \\\n\t\t.mapPartitions(lambda line: reader(line)) \\\n\t\t.map(lambda arr: [processString(x) if i != MUNI else findMapping(processString(x)) for i,x in enumerate(arr)]) \\\n\t\t.filter(lambda arr: arr[TYPE] in airport_type_list and arr[IDENT] != '' and arr[MUNI] != '' ) \\\n\t\t.map(lambda arr: [arr[IDENT], arr[TYPE], arr[MUNI]]) \\\n\t\t.collect()\n\treturn spark.createDataFrame(valid_data, schema)\n\ndef getAllAirportCodes():\n\tairport_dataframe.registerTempTable('airport_df')\n\tdf1 = sqlContext.sql ( \n\t\t\t\"\"\"\n\t\t\t\tSELECT DISTINCT ident\n\t\t\t\tFROM airport_df\n\t\t\t\"\"\"\n\t)\n\tres = df1.collect()\n\treturn [x['ident'] for x in res]\n\ndef getOnlyCityListAirportCodes():\n\tairport_dataframe.registerTempTable('airport_df')\n\tdf1 = sqlContext.sql (\"\"\"\n\t\tSELECT DISTINCT ident \n\t\tFROM airport_df \n\t\tWHERE municipality in \"\"\" + getSqlList(city_list))\n\tres = df1.collect()\n\treturn [x['ident'] for x in res]\n\ndef getCovidDataFrame(filename):\n\tcovid = sc.textFile(filename)\n\tvalid_data = covid \\\n\t\t.mapPartitions(lambda line: reader(line)) \\\n\t\t.map(lambda arr: [processString(x) if i != CITY_INDEX else findMapping(processString(x)) for i,x in enumerate(arr)]) \\\n\t\t.filter(lambda arr: arr[CITY_INDEX] in city_list) \\\n\t\t.collect()\n\tschema = ['city', 'date', 'cases', 'deaths']\n\treturn spark.createDataFrame(valid_data, schema)\n\ndef getFlightDataFrame(filename):\n\tdf1 = spark.read.csv(filename, header='true')\n\tdf1.registerTempTable('df1')\n\tdf1 = sqlContext.sql(\n\t\t\t\"\"\"\n\t\t\tSELECT * \n\t\t\tFROM\n\t\t\t(\n\t\t\t\tSELECT \n\t\t\t\tUPPER(TRIM(origin)) as origin,\n\t\t\t\tUPPER(TRIM(destination)) as destination,\n\t\t\t\tUPPER(TRIM(SUBSTRING(day,1,10))) as day\n\t\t\t\tFROM df1\n\t\t\t) as res\n\t\t\tWHERE \n\t\t\t(\n\t\t\t\tres.origin != '' and \n\t\t\t\tres.destination != '' and\n\t\t\t\tres.origin != res.destination and\n\t\t\t\t(res.origin in {only_selectected_city_air_codes:} \n\t\t\t\t\tor res.destination in {only_selectected_city_air_codes:}) and\n\t\t\t\tres.origin in {all_city_air_codes:} and\n\t\t\t\tres.destination in {all_city_air_codes:}\n\t\t\t)\n\t\t\t\"\"\".format(\n\t\t\t\tonly_selectected_city_air_codes = getSqlList(only_city_list_airport_codes),\n\t\t\t\tall_city_air_codes = getSqlList(all_airport_codes)\n\t\t\t\t)\n\t\t)\n\treturn df1\n\ndef getInterCityFlightDataFrame():\n\tflight_dataframe.registerTempTable('flight_df')\n\tairport_dataframe.registerTempTable('airport_df')\n\treturn sqlContext.sql(\n\t\"\"\"\n\t\tSELECT a1.municipality as from_city,\n\t\td.from_airport as from_airport,\n\t\td.to_city as to_city,\n\t\td.to_airport as to_airport,\n\t\td.day as day\n\t\tFROM airport_df as a1\n\t\tINNER JOIN (\n\t\t\tSELECT \n\t\t\t\tf.origin as from_airport,\n\t\t\t\ta2.municipality as to_city,\n\t\t\t\tf.destination as to_airport,\n\t\t\t\tf.day as day\n\t\t\tFROM airport_df as a2\n\t\t\tINNER JOIN flight_df as f\n\t\t\tON a2.ident = f.destination\n\t\t) as d\n\t\tON a1.ident = d.from_airport\n\t\"\"\"\n\t)\n\ndef getCityFromAndToFlightCountsDataFrame():\n\tinter_city_flight_dataframe.registerTempTable('inter_city_flight_df')\n\tdf1 = sqlContext.sql(\n\t\t\t\"\"\"\n\t\t\tSELECT \n\t\t\t\tfrom_city as city,\n\t\t\t\tday as day,\n\t\t\t\tCOUNT(*) as outgoing_flight_count\n\t\t\tFROM inter_city_flight_df\n\t\t\tGROUP BY from_city, day\n\t\t\t\"\"\"\n\t\t)\n\tdf2 = sqlContext.sql(\n\t\t\t\"\"\"\n\t\t\tSELECT \n\t\t\t\tto_city as city,\n\t\t\t\tday as day,\n\t\t\t\tCOUNT(*) as incoming_flight_count\n\t\t\tFROM inter_city_flight_df\n\t\t\tGROUP BY to_city, day\n\t\t\t\"\"\"\n\t\t)\n\tdf1.registerTempTable('df1')\n\tdf2.registerTempTable('df2')\n\tdf3 = sqlContext.sql(\n\t\t\"\"\"\n\t\tSELECT \n\t\t\tdf1.city as city,\n\t\t\tdf1.day as day,\n\t\t\tdf2.incoming_flight_count as incoming_flight_count,\n\t\t\tdf1.outgoing_flight_count as outgoing_flight_count\n\t\tFROM df1\n\t\tINNER JOIN df2\n\t\tON df1.city = df2.city and df1.day = df2.day\n\t\tWHERE df1.city IN \"\"\" +getSqlList(city_list) + \"\"\" ORDER BY df1.day\"\"\"\n\t)\n\treturn df3\n\ndef getCovidFlightCountDataFrame():\n\tcovid_dataframe.registerTempTable('covid_count_df')\n\tcity_from_and_to_flight_counts_dataframe.registerTempTable('flight_count_df')\n\tdf1 = sqlContext.sql(\n\t\t\"\"\"\n\t\tSELECT \n\t\t\tcc.city as city,\n\t\t\tcc.date as day,\n\t\t\tcc.cases as cases,\n\t\t\tcc.deaths as deaths,\n\t\t\tfc.incoming_flight_count as incoming_flight_count,\n\t\t\tfc.outgoing_flight_count as outgoing_flight_count\n\t\tFROM covid_count_df as cc\n\t\tINNER JOIN flight_count_df as fc\n\t\tON cc.city = fc.city and cc.date = fc.day\n\t\tORDER BY cc.city, cc.date\n\t\t\"\"\"\n\t\t)\n\treturn df1\n\ndef saveData(df, filename):\n\tdf.write.option(\"header\",\"true\").csv(filename)\n\n\nif __name__ == \"__main__\":\n\n\tif len(sys.argv) != 6:\n\t\tprint(\"Wrong usage, all arguments not provided.\")\n\t\texit(-1)\n\n\tmap_list_path = sys.argv[1]\n\tcity_list_path = sys.argv[2]\n\tairport_dataset_path = sys.argv[3]\n\tmerged_flight_dataset_path = sys.argv[4]\n\tcovid_disease_dataset_path = sys.argv[5]\n\n\tprint('map_list_path: ' + map_list_path)\n\tprint('city_list_path: ' + city_list_path)\n\tprint('airport_dataset_path: ' + airport_dataset_path)\n\tprint('merged_flight_dataset_path: ' + merged_flight_dataset_path)\n\tprint('covid_disease_dataset_path: ' + covid_disease_dataset_path)\n\n\tprint('Processing has started. It will take 5 to 10 minutes from the start time ......')\n\tcity_mapper = getCityMapper(map_list_path)\n\tcity_list = getCityList(city_list_path)\n\tairport_type_list = getAirportTypeListFor(['medium_airport','large_airport'])\n\tairport_dataframe = getAirportDataDFrame(airport_dataset_path)\n\tall_airport_codes = getAllAirportCodes() ##Uses airport_dataframe\n\tonly_city_list_airport_codes = getOnlyCityListAirportCodes() ##Uses airport_dataframe\n\tflight_dataframe = getFlightDataFrame(merged_flight_dataset_path)\n\tcovid_dataframe = getCovidDataFrame(covid_disease_dataset_path)\n\tinter_city_flight_dataframe = getInterCityFlightDataFrame() ##Uses flight_dataframe and airport_dataframe\n\tcity_from_and_to_flight_counts_dataframe = getCityFromAndToFlightCountsDataFrame() ##Uses inter_city_flight_dataframe\n\tcovid_flight_count_dataframe = getCovidFlightCountDataFrame() ##Uses inter_city_flight_dataframe and covid_dataframe\n\n\tsaveData(inter_city_flight_dataframe, 'inter_city_flight_data.out')\n\tsaveData(covid_flight_count_dataframe, 'covid_flight_count_data.out')\n\t\n","sub_path":"Code/driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":7400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"249077773","text":"#encoding=utf-8\nimport re\nfrom django import forms\n\n\nclass CarInfoForm(forms.Form):\n\n err_msg = {\n 'wrong_license_plate_num': u'请输入正确的车牌号码',\n 'wrong_engine_no': u'请输入正确的发动机号后6位',\n }\n\n province = forms.CharField(max_length=10, error_messages={'required': '请选择省份'})\n cityname = forms.CharField(max_length=10, error_messages={'required': '请选择城市'})\n car_province = forms.CharField(max_length=1, error_messages={'required': '请输入车牌号码'})\n license_plate_num = forms.CharField(max_length=6, error_messages={'required': '请输入车牌号码'})\n engine_no = forms.CharField(max_length=6, error_messages={'required': '请输入发动机号后6位'})\n\n def clean_license_plate_num(self):\n license_plate_num = self.cleaned_data['license_plate_num']\n if not re.search(r'^([0-9A-Z]{6})$', license_plate_num):\n raise forms.ValidationError(self.err_msg['wrong_license_plate_num'])\n return license_plate_num\n\n def clean_engine_no(self):\n engine_no = self.cleaned_data['engine_no']\n if len(engine_no) != 6:\n raise forms.ValidationError(self.err_msg['wrong_engine_no'])\n return engine_no\n","sub_path":"vehicle_violation_query/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"359509958","text":"# import re\n\n\ndef word_count(s):\n # Your code here\n counts = {}\n\n words = s.lower().split()\n\n ignore = [\n \":\",\n \";\",\n \",\",\n \".\",\n \"-\",\n \"+\",\n \"=\",\n \"/\",\n \"\\\\\",\n \"|\",\n \"[\",\n \"]\",\n \"{\",\n \"}\",\n \"(\",\n \")\",\n \"*\",\n \"^\" \"&\",\n '\"',\n ]\n\n # replacer = re.sub(ignore, \"\", words)\n # ignored = s.replace('\"', \" \")\n for w in words:\n for special in ignore:\n w = w.replace(special, \" \")\n if w != \"\":\n if w in counts:\n counts[w] = counts[w] + 1\n else:\n counts[w] = 1\n return counts\n\n\nif __name__ == \"__main__\":\n # print(word_count(\"\"))\n # print(word_count(\"Hello\"))\n print(word_count('Hello, my cat. And my cat doesn\\'t say \"hello\" back.'))\n print(\n word_count(\n \"This is a test of the emergency broadcast network. This is only a test.\"\n )\n )\n\n","sub_path":"applications/word_count/word_count.py","file_name":"word_count.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"167477676","text":"#!/usr/bin/python\n\n# V1.2\nimport numpy as np\n\n\nclass DiffDriveController():\n \"\"\"\n Class used for controlling the robot linear and angular velocity\n \"\"\"\n def __init__(self, max_speed, max_omega):\n self.kp = .5 # Must be positive\n self.ka = 1 # Must be positive with ka - kp > 0\n self.kb = -.5 # Must be negative\n self.error_tol = 0.05\n self.MAX_SPEED = max_speed\n self.MAX_OMEGA = max_omega\n\n # Characteristic Polynomial: \n # (lambda + kp)(lambda^2 + (ka - kp)*lambda - kp*kb) = 0\n\n\n \n def compute_vel(self, state, goal):\n \"\"\"\n Function that computes the desired outputs given the state and goal\n Inputs:\n state - a numpy vector of size 3 by 1 with components (x,y,theta)\n goal - a numpy vector of size 2 by 1 specifying the location of the goal\n Outputs: a tuple with 3 elements\n v - a number specifying the forward speed (in m/s) of the robot (should \n be no more than max_speed)\n omega - a number specifying the angular velocity (in rad/s) of the robot\n (should be no more than max_omega)\n done - a boolean value specifying if the robot has reached its goal (or\n is close enough\n \"\"\"\n\n KP = self.kp\n KA = self.ka\n KB = self.kb\n delx = goal[0] - state[0]\n dely = goal[1] - state[1]\n theta = state[2]\n tworoots = np.roots([1, KA - KP, -KP * KB])\n w = [-KP, tworoots[0], tworoots[1]]\n # if w[0]<0 and w[1]<0 and w[2]<0:\n p = np.sqrt((delx ** 2) + (dely ** 2))\n alpha = -theta + np.arctan2(dely, delx)\n beta = -theta - alpha\n\n v = p * KP\n if np.all(v > self.MAX_SPEED):\n v = self.MAX_SPEED\n\n omega = (alpha * KA) + (beta * KB)\n if np.all(omega > self.MAX_OMEGA):\n omega = self.MAX_OMEGA\n\n if np.all(p < 0.15):\n done = True\n else:\n done = False\n\n vw = (v, omega, done)\n return vw\n\n\nif __name__ == '__main__':\n\n diff_drive_controller = DiffDriveController(0.5, 1.6)\n\n # Tests:\n print(\"Running test::Straight Forward: (5, 0, 0)\")\n test_start = np.array([0, 0, 0])\n test_goal = np.array([5, 1, 0]) # 5m, 5m, 45 degrees\n diff_drive_controller.compute_vel(test_start, test_goal)\n\n print(\"\\nRunning test::Left: (5m, 5m, 0)\")\n test_start = np.array([0, 0, 0])\n test_goal = np.array([5, 5, 0])\n print(diff_drive_controller.compute_vel(test_start, test_goal))\n\n print(\"Running test::Turning: (5m, 5m, 45 Degrees)\")\n test_start = np.array([0,0,0])\n test_goal = np.array([5,5, np.pi/4]) # 5m, 5m, 45 degrees\n diff_drive_controller.compute_vel(test_start, test_goal)\n\n # return [dx,dy,self._angle,self._marker_num]\n # ('Measurements: ', [0.25480768607482129, -0.022549956977450526, 0.016922701984059048, 0])\n # ('Computed command vel: ', (0.25580355244306036, 1.6, False))\n # state = np.array([meas[0], meas[1], meas[2]])\n # vw = self.diff_drive_controller.compute_vel(state, goal)\n\n print(\"Running Test::Tag: (0.25480768607482129, -0.022549956977450526, 0.016922701984059048)\")\n test_start = np.array([0, 0, 0])\n test_goal = np.array([0.25480768607482129, -0.022549956977450526, 0.016922701984059048]) # 5m, 5m, 45 degrees\n print(diff_drive_controller.compute_vel(test_start, test_goal))\n\n print(\"Running Test::Tag: (0.2329683769187918, 0.011103904204575101, -0.084052242784899228, 0)\")\n test_start = np.array([0, 0, 0])\n test_goal = np.array([0.2329683769187918, 0.011103904204575101, -0.084052242784899228, 0]) # 5m, 5m, 45 degrees\n print(diff_drive_controller.compute_vel(test_start, test_goal))\n","sub_path":"Capstone/Practical/diff_drive_controller/DiffDriveController.py","file_name":"DiffDriveController.py","file_ext":"py","file_size_in_byte":3737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"228241562","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom unittest import main, TestCase\nfrom growlnotify import growlnotify\n\n\nclass Test(TestCase):\n def test(self):\n growlnotify(\"заголовок\",\"сообщение\")\n growlnotify(u\"заголовок\",u\"сообщение\")\n growlnotify(\"title only\")\n growlnotify(\"title\",\"long description\")\n growlnotify(\"title\",\"\"\"multi\nline\ndescription\"\"\")\n growlnotify(\"iCal\",image=\"Chrome.png\")\n\n def test_app(self):\n growlnotify(\"iCal\",app=\"iCal\")\n growlnotify(\"System preferences\",app=\"System preferences\")\n\nif __name__ == \"__main__\":\n main()","sub_path":"tests/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"152318494","text":"from django.db import models\n\nfrom wagtail.core.models import Page, Orderable\nfrom wagtail.admin.edit_handlers import (\n FieldPanel,\n StreamFieldPanel,\n MultiFieldPanel,\n InlinePanel,\n)\nfrom wagtail.api import APIField\nfrom wagtail.images.edit_handlers import ImageChooserPanel\nfrom wagtail.core.fields import StreamField\nfrom modelcluster.fields import ParentalKey\n#from django.core import serializer\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\n\nfrom streams import blocks\nfrom streams import constants\nfrom streams.models import LocationTag, TripType, FareTable\n\n\n# Create your models here.\n\nclass OutstationRoutePage(Page):\n template=\"outstation/outstation_route_page.html\"\n\n banner_title=models.CharField(max_length=100, null=False)\n banner_image=models.ForeignKey(\n \"wagtailimages.Image\",\n null=False,\n on_delete=models.CASCADE,\n related_name=\"+\")\n\n start_location = models.CharField(max_length=100, null=False)\n start_location_details = models.TextField(null=False, help_text=\"Add start location details\")\n start_location_map_icon = models.ForeignKey(\n \"wagtailimages.Image\",\n null=True, blank=True,\n on_delete=models.SET_NULL,\n related_name=\"+\")\n destination = models.CharField(max_length=100, null=False)\n destination_details = models.TextField(null=False, help_text=\"Add destination details\")\n destination_map_icon = models.ForeignKey(\n \"wagtailimages.Image\",\n null=True, blank=True,\n on_delete=models.SET_NULL,\n related_name=\"+\")\n\n tourist_places_on_route = StreamField(\n [\n (\"tourist_location\", blocks.PlaceDetailBlock()),\n ],\n null=True,\n blank=True\n )\n\n tourist_places_in_destination = StreamField(\n [\n (\"tourist_location\", blocks.PlaceDetailBlock()),\n ],\n null=True,\n blank=True\n )\n\n road_condition_rating = models.PositiveSmallIntegerField()\n road_map = models.TextField(null=False, help_text=\"Add road map details\")\n best_time_to_visit = models.TextField(null=False, help_text=\"Add road map details\")\n alternate_routes = models.TextField(null=False, help_text=\"Add alternate route details\")\n road_condition = models.TextField(null=False, help_text=\"Add road condition details\")\n\n total_distance = models.PositiveSmallIntegerField()\n likes = models.ManyToManyField(User, related_name='likes', blank=True)\n\n api_fields = [\n APIField(\"banner_title\"),\n APIField(\"banner_image\"),\n APIField(\"start_location\"),\n APIField(\"start_location_details\"),\n APIField(\"start_location_images\"),\n APIField(\"start_location_map_icon\"),\n APIField(\"destination\"),\n APIField(\"destination_details\"),\n APIField(\"destination_images\"),\n APIField(\"destination_map_icon\"),\n APIField(\"tourist_places_on_route\"),\n APIField(\"tourist_places_in_destination\"),\n APIField(\"road_condition_rating\"),\n APIField(\"road_map\"),\n APIField(\"best_time_to_visit\"),\n APIField(\"alternate_routes\"),\n APIField(\"road_condition\"),\n APIField(\"total_distance\"),\n ]\n\n content_panels = Page.content_panels+ [\n FieldPanel(\"banner_title\"),\n ImageChooserPanel(\"banner_image\"),\n FieldPanel(\"start_location\"),\n FieldPanel(\"start_location_details\"),\n MultiFieldPanel([\n InlinePanel(\"start_location_images\"),\n ], heading=\"Start Location Images\" ),\n ImageChooserPanel(\"start_location_map_icon\"),\n FieldPanel(\"destination\"),\n FieldPanel(\"destination_details\"),\n MultiFieldPanel([\n InlinePanel(\"destination_images\"),\n ], heading=\"Destination Images\" ),\n ImageChooserPanel(\"destination_map_icon\"),\n StreamFieldPanel(\"tourist_places_on_route\"),\n StreamFieldPanel(\"tourist_places_in_destination\"),\n FieldPanel(\"road_condition_rating\"),\n FieldPanel(\"road_map\"),\n FieldPanel(\"best_time_to_visit\"),\n FieldPanel(\"alternate_routes\"),\n FieldPanel(\"road_condition\"),\n FieldPanel(\"total_distance\"),\n ]\n\n def get_context(self, request, *args, **kwargs):\n context = super().get_context(request, *args, **kwargs)\n\n location_tags = LocationTag.objects.all()\n context[\"location_tags\"] = location_tags\n\n trip_types = TripType.objects.all()\n context[\"trip_types\"] = trip_types\n\n fare_table = FareTable.objects.all()\n context[\"fare_table\"] = fare_table\n\n context[\"total_likes\"] = self.total_likes()\n\n context[\"total_reviews\"] = self.total_reviews()\n\n context[\"AMENITIES\"] = constants.AMENITIES\n\n context[\"data_api\"] = settings.REST_API_ENDPOINT\n\n return context\n\n def total_likes(self):\n return self.likes.count()\n\n def total_reviews(self):\n return self.page_review.count()\n\nclass StartLocationImages(Orderable):\n page = ParentalKey(\"outstation.OutstationRoutePage\", related_name=\"start_location_images\", null=False, blank=False)\n start_location_image = models.ForeignKey(\n \"wagtailimages.Image\",\n null=True, blank=False,\n on_delete=models.SET_NULL,\n related_name=\"+\"\n )\n panels = [\n ImageChooserPanel(\"start_location_image\"),\n ]\n\nclass DestinationImages(Orderable):\n page = ParentalKey(\"outstation.OutstationRoutePage\", related_name=\"destination_images\", null=False, blank=False)\n destination_image = models.ForeignKey(\n \"wagtailimages.Image\",\n null=True, blank=False,\n on_delete=models.SET_NULL,\n related_name=\"+\"\n )\n panels = [\n ImageChooserPanel(\"destination_image\"),\n ]\n","sub_path":"outstation/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"597825644","text":"from multiprocessing import Process\r\nfrom multiprocessing import Queue\r\nfrom lib.common.Util import *\r\nimport threading\r\n\r\nimport time\r\nimport os\r\n\r\n\r\nclass DataSampleClass:\r\n def __init__(self):\r\n self.data_a = \"\"\r\n self.data_b = \"\"\r\n\r\n\r\ndef test_worker_process(queue_data):\r\n process_id = os.getpid()\r\n data_obj = DataSampleClass()\r\n print(\"Start process {0}...\".format(process_id))\r\n for index in range(10, 0, -1):\r\n time.sleep(1)\r\n data_str = \"process{1}-count[{0}]th ...\".format(index, process_id)\r\n data_obj.data_a = data_str\r\n data_obj.data_b = index\r\n queue_data.put(data_obj)\r\n #print(data_str)\r\n print(\"End process {0}\".format(process_id))\r\n\r\n\r\ndef test_worker_thread(queue_data):\r\n print(\"Reader thread started\")\r\n while True:\r\n record = queue_data.get()\r\n if record is None: # We send this as a sentinel to tell the listener to quit.\r\n print(\"Reader thread exits\")\r\n break\r\n else:\r\n debug_print(record.data_a)\r\n debug_print(record.data_b)\r\n\r\n\r\ndef test_processes():\r\n q_data = Queue()\r\n reader = threading.Thread(target=test_worker_thread, args=(q_data,))\r\n reader.start()\r\n process_list = []\r\n worker1 = Process(target=test_worker_process, args=(q_data,))\r\n worker2 = Process(target=test_worker_process, args=(q_data,))\r\n worker3 = Process(target=test_worker_process, args=(q_data,))\r\n process_list.append(worker1)\r\n process_list.append(worker2)\r\n process_list.append(worker3)\r\n\r\n for each_worker in process_list:\r\n each_worker.start()\r\n\r\n for each_worker in process_list:\r\n each_worker.join()\r\n\r\n q_data.put_nowait(None)\r\n reader.join()\r\n\r\n print(\"End all processes \")\r\n\r\n\r\nif __name__ == '__main__':\r\n test_processes()\r\n","sub_path":"test/TestMultiProcessing.py","file_name":"TestMultiProcessing.py","file_ext":"py","file_size_in_byte":1844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"48349567","text":"\"\"\"\nAddition of binary numbers\n\"\"\"\n\ndef main():\n with open('input.txt') as file:\n a, b = file.read().strip().split('\\n')\n s3 = 0\n answer = ''\n for i in range(max(len(a), len(b)) + 1):\n # breakpoint()\n try:\n s1 = int(a[-i-1])\n except IndexError:\n s1 =0\n try:\n s2 = int(b[-i-1])\n except IndexError:\n s2 = 0\n if s1 + s2 + s3 < 2:\n answer += str(s1+s2 + s3)\n s3 = 0\n else:\n answer += str((s1 + s2 + s3) % 2)\n s3 = 1\n\n if answer[-1] == '0':\n answer = answer[:-1]\n print(answer[::-1])\n\nif __name__ == '__main__':\n main()\n","sub_path":"theory/12th_sprint_algorithms/H.py","file_name":"H.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"163385154","text":"import os.path\nfrom data_loader.dataLoader import *\nfrom data_loader.trainDataProcess import TrainDataProcess\nfrom PIL import Image\n\nclass ImageSegmentValDataLoader(DataLoader):\n def __init__(self, path, batch_size=1, img_size=(768, 320)):\n super().__init__(path)\n # self.img_files = sorted(glob.glob('%s/*.*' % path))\n self.trainDataProcess = TrainDataProcess()\n with open(path, 'r') as file:\n self.img_files = file.readlines()\n\n self.img_files = [path.replace('\\n', '') for path in self.img_files]\n\n self.seg_files = [path.replace('JPEGImages', 'SegmentLabel').replace('.png', '.png').replace('.jpg', '.png') for path in\n self.img_files]\n\n self.nF = len(self.img_files) # number of image files\n self.nB = math.ceil(self.nF / batch_size) # number of batches\n self.batch_size = batch_size\n self.imageSize = img_size\n self.color = (127.5, 127.5, 127.5)\n # self.volid_label_seg = [0, 1, 4, 5, 8, 9, 10, 11, 12, 14, 16, 18, 22, 25, 26, 28, 31, 32, 33, 34, 35, 36,\n # 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 51, 53, 58, 59, 60, 62, 63, 64, 65]\n # self.valid_label_seg = [[54, 55, 56, 61], [19, 20, 21], [52, 57], [7, 13], [2, 3, 15, 29], [6, 17], [30],\n # [27], [23, 24], [48], [49, 50]]\n self.volid_label_seg = []\n self.valid_label_seg = [[0], [1]]\n\n assert self.nB > 0, 'No images found in path %s' % path\n\n # RGB normalization values\n # self.rgb_mean = np.array([60.134, 49.697, 40.746], dtype=np.float32).reshape((1, 3, 1, 1))\n # self.rgb_std = np.array([29.99, 24.498, 22.046], dtype=np.float32).reshape((1, 3, 1, 1))\n\n def __iter__(self):\n self.count = -1\n self.shuffled_vector = np.arange(self.nF)\n return self\n\n def __next__(self):\n self.count += 1\n if self.count == self.nB:\n raise StopIteration\n\n ia = self.count * self.batch_size\n ib = min((self.count + 1) * self.batch_size, self.nF)\n\n width = self.imageSize[0]\n height = self.imageSize[1]\n\n img_all = []\n seg_all = []\n for index, files_index in enumerate(range(ia, ib)):\n img_path = self.img_files[self.shuffled_vector[files_index]]\n seg_path = self.seg_files[self.shuffled_vector[files_index]]\n\n # judge the path\n if not os.path.exists(img_path):\n print (\"{} not exists.\".format(img_path))\n\n if not os.path.exists(seg_path):\n print (\"{} not exists.\".format(seg_path))\n\n img = cv2.imread(img_path) # BGR\n seg = Image.open(seg_path)\n\n if img is None:\n continue\n\n h, w, _ = img.shape\n img, ratio, padw, padh = self.trainDataProcess.resize_square(img, (width, height), self.color)\n #----------seg------------------------------------------------------------------\n ratio = min(float(width) / w, float(height) / h) # ratio = old / new\n new_shape = [round(h * ratio), round(w * ratio)]\n seg = self.trainDataProcess.encode_segmap(np.array(seg, dtype=np.uint8), self.volid_label_seg, self.valid_label_seg)\n #seg = np.array(seg, dtype=np.uint8)\n #print(set(list(seg.flatten())))\n seg = cv2.resize(seg, (new_shape[1], new_shape[0]))\n seg = np.pad(seg, ((padh // 2, padh - (padh // 2)), (padw // 2, padw - (padw // 2))), 'constant', constant_values=250)\n ################################################################################\n\n # seg\n valid_masks = np.zeros(seg.shape)\n for l in range(0, len(self.valid_label_seg)):\n valid_mask = seg == l\n valid_masks += valid_mask\n valid_masks[valid_masks==0]=-1\n seg = np.float32(seg) * valid_masks\n seg[seg < 0] = 250\n seg = np.uint8(seg)\n\n img_all.append(img)\n seg_all.append(seg)\n\n numpyImages = np.stack(img_all)[:, :, :, ::-1]\n torchImages = self.convertTorchTensor(numpyImages)\n\n #-------------seg\n seg_all = torch.from_numpy(np.array(seg_all)).long()\n\n return torchImages, seg_all\n\n def __len__(self):\n return self.nB # number of batches","sub_path":"data_loader/imageSegmentValDataLoader.py","file_name":"imageSegmentValDataLoader.py","file_ext":"py","file_size_in_byte":4413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"118164793","text":"import setuptools\nimport popen2\nimport os\nimport tempfile\nimport shlex\nimport numpy\nimport glob\nimport sys\nimport os\n\n\nNAME = \"shtools\"\nEXTENSION_NAME = \"pyshtools\"\nDESCRIPTION = \"Python wrapper to the SHTOOLS algorithm\"\nLONG_DESCRIPTION =\"\"\"Python wrapper to the SHTOOLS algorithm\"\"\"\nMAINTAINER = \"Amit Aides\"\nMAINTAINER_EMAIL = \"amitibo@tx.technion.ac.il\"\nURL = \"https://bitbucket.org/amitibo/pyshdom\"\nLICENSE = \"MIT\"\nVERSION = \"0.0.1\"\n\nclassifiers = ['Development Status :: 3 - Alpha',\n 'Programming Language :: Python',\n 'License :: OSI Approved :: MIT License',\n 'Intended Audience :: Science/Research',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Operating System :: OS Independent']\n\n#\n# Set this to True for compiling the parallel\n# version of the SHDOM algorithm.\n#\nPARALLEL_SUPPORT = False\n\n#\n# f2py stuff\n#\nF2PY_CMD = 'f2py'\nF2PY_MODULE_NAME = '_SHTOOLS'\nF2PY_SRC_PATH = 'src'\nF2PY_SIGN_FILE = '{path}/pyshtools.pyf'.format(path=F2PY_SRC_PATH)\nF2PY_SHTOOLS_FILES = glob.glob('{path}/*.f95'.format(path=F2PY_SRC_PATH))\nSRCSLAPACK2 = ['EigValSym2.f95', 'EigValVecSym2.f95', 'EigValVecSymTri2.f95', 'SHExpandLSQ2.f95', 'SHMTDebias2.f95', 'SHMTVarOpt2.f95']\nSRCSFFTW2 = ['MakeGridDH2.f95', 'MakeGridDHC2.f95', 'MakeGridGLQ2.f95', 'MakeGridGLQC2.f95',\n 'SHExpandDH2.f95', 'SHExpandDHC2.f95', 'SHExpandGLQ2.f95', 'SHExpandGLQC2.f95',\n 'MakeGravGradGridDH2.f95', 'MakeGravGridDH2.f95', 'MakeMagGridDH2.f95']\n\nF2PY_SHTOOLS_FILES = filter(lambda x: not x.split('\\\\')[-1] in SRCSLAPACK2, F2PY_SHTOOLS_FILES)\nF2PY_SHTOOLS_FILES = filter(lambda x: not x.split('\\\\')[-1] in SRCSFFTW2, F2PY_SHTOOLS_FILES)\n\ndef configuration(parent_package='',top_path=None):\n from numpy.distutils.misc_util import Configuration\n \n config = Configuration(\n NAME,\n parent_package,\n top_path,\n version = VERSION,\n maintainer = MAINTAINER,\n maintainer_email = MAINTAINER_EMAIL,\n description = DESCRIPTION,\n license = LICENSE,\n url = URL,\n long_description = LONG_DESCRIPTION\n )\n\n\n config.add_extension(\n name=F2PY_MODULE_NAME,\n sources=[F2PY_SIGN_FILE] + F2PY_SHTOOLS_FILES,\n libraries=['libopenblas', 'libfftw3-3'],\n include_dirs=['fftw', 'OpenBLAS/include'],\n library_dirs=['fftw', 'OpenBLAS/lib'],\n extra_compile_args=['-static', '-static-libgcc', '-static-libstdc++'],\n f2py_options=[]#'--debug-capi']\n )\n\n config.add_extension(\n name='_constant',\n sources=['src/PlanetsConstants.f95'],\n f2py_options=['--quiet']#'--debug-capi']\n )\n\n \n return config\n\n\nif __name__ == \"__main__\":\n\n from numpy.distutils.core import setup\n \n setup(\n configuration=configuration,\n packages = setuptools.find_packages(),\n include_package_data = True,\n platforms = [\"any\"],\n requires=[\"numpy\"],\n tests_require = ['nose',],\n test_suite='nose.collector',\n zip_safe = True,\n classifiers =classifiers\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":3147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"317427481","text":"from glob import glob\nimport os\nfrom config.mario_config import config\nimport tensorflow as tf\n\nblock_size = config[\"block_size\"]\nscreen_H = config[\"screen_H\"]\nscreen_W = config[\"screen_W\"]\nseq_len = config[\"seq_len\"]\ndown_sample = config[\"down_sample\"]\n\ndef mario_dataset(data_dir='{}/Datasets/MarioDemo'.format(os.getenv('DATASET_ROOT'))):\n filenames = glob(os.path.join(data_dir, '*.tfrecord'))\n print('='*80)\n print(filenames)\n\n feature_description = {\n 'blocks': tf.io.FixedLenFeature([seq_len, 8, block_size, block_size], tf.float32),\n 'action': tf.io.FixedLenFeature([seq_len], tf.int64),\n 'mask': tf.io.FixedLenFeature([seq_len], tf.float32),\n 'total_reward': tf.io.FixedLenFeature([], tf.float32)\n }\n\n def _parse_function(example_proto):\n # Parse the input tf.Example proto using the dictionary above.\n record = tf.io.parse_single_example(example_proto, feature_description)\n return record['blocks'], record['action'], record['mask'], record['total_reward']\n\n\n dataset = tf.data.TFRecordDataset(filenames)\n dataset = dataset.map(_parse_function, num_parallel_calls=128)\n dataset = dataset.repeat()\n dataset = dataset.shuffle(buffer_size=20)\n dataset = dataset.batch(256)\n dataset = dataset.prefetch(1)\n dataset = dataset.make_initializable_iterator()\n\n return dataset\n\n","sub_path":"prior_learning/mario/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"548233047","text":"from yahoo_fin import stock_info as si\n\n\ntry:\n si.get_live_price(\"aapl\")\n print(si)\n fprice1 = int(input(\"\\nPlease enter the beginning period stock price: \"))\n fprice2 = int(input(\"\\nPlease enter the ending period stock price: \"))\n fmoney = int(input(\"\\nPlease enter the amount invested: \"))\n discount = .85\n if fprice1 > fprice2:\n fcheap = fprice2 * discount\n fmax = fprice1\n else:\n fcheap = fprice1 * discount\n fmax = fprice2\n\n fshares = int (fmoney / fcheap) \n int(round(fshares))\n\n print(\"\\n\" + str(fcheap) + \" is the price of the stock after the discount. \\n\" )\n print(str(fshares) + \" is the number of shares purchased. \\n\" )\n\n fprofit = int((fmax * fshares) - (fcheap * fshares))\n print(str(fprofit) + \" $ is the profit you made over the last 6 months. \\n\" )\nexcept:\n print(\"\\n Unknown error, please abort program and try again.\\n\")\n","sub_path":"Stocks.gyp","file_name":"Stocks.gyp","file_ext":"gyp","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"187732034","text":"#Importing random library\r\nimport random \r\n\r\n#Creating a class called Agent\r\n#Generating agents and environment inside the class \r\nclass Agent():\r\n def __init__(self, environment, agents):\r\n self.y = random.randint(0,99)\r\n self.x = random.randint(0,99)\r\n self.environment = environment\r\n self.store = 0\r\n self.agents = agents\r\n self.store = 0\r\n#Generating movement for agents\r\n def move(self):\r\n if random.random() < 0.5:\r\n self.y = (self.y + 1) % 100\r\n else:\r\n self.y = (self.y - 1) % 100\r\n\r\n if random.random() < 0.5:\r\n self.x = (self.x + 1) % 100\r\n else:\r\n self.x = (self.x - 1) % 100\r\n \r\n#Making agents eat units of the environment\r\n def eat(self):\r\n if self.environment[self.y][self.x] > 10:\r\n self.environment[self.y][self.x] -= 10\r\n self.store += 10\r\n#Moving the distance between agents calculation into the class\r\n def distance_between(self, agents_row_b):\r\n return (((self.x - agents_row_b.x)**2) +\r\n ((self.y - agents_row_b.y)**2))**0.5\r\n#Creating a function sharing with neighbours inside the class,\r\n#in order to make the agents communicate with each other\r\n def share_with_neighbours(self, neighbourhood):\r\n for i in range(len(self.agents)):\r\n distance = self.distance_between(self.agents[i])\r\n if distance <= neighbourhood:\r\n total = self.store + self.agents[i].store \r\n average = total/2\r\n self.store = average\r\n self.agents[i].store = average\r\n print(average) #printing the average to see if function works","sub_path":"Practical 7/agentframework.py","file_name":"agentframework.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"90313017","text":"# (C) Copyright 2014 Voyager Search\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import division\nimport os\nimport sys\nimport logging\nimport multiprocessing\nimport arcpy\nfrom utils import status\n\n\ndef global_job(*args):\n \"\"\"Create a global job object.\"\"\"\n global job\n job = args[0]\n\n\ndef update_row(fields, rows, row):\n \"\"\"Updates the coded values in a row with the coded value descriptions.\"\"\"\n field_domains = {f.name: f.domain for f in fields if f.domain}\n fields_values = zip(rows.fields, row)\n for j, x in enumerate(fields_values):\n if x[0] in field_domains:\n domain_name = field_domains[x[0]]\n row[j] = job.domains[domain_name][x[1]]\n return row\n\n\ndef worker(data_path):\n \"\"\"The worker function to index feature data and tabular data.\"\"\"\n if data_path:\n job.connect_to_zmq()\n geo = {}\n entry = {}\n dsc = arcpy.Describe(data_path)\n\n if dsc.dataType == 'Table':\n field_types = job.search_fields(data_path)\n fields = field_types.keys()\n query = job.get_table_query(dsc.name)\n constraint = job.get_table_constraint(dsc.name)\n if query and constraint:\n expression = \"\"\"{0} AND {1}\"\"\".format(query, constraint)\n else:\n if query:\n expression = query\n else:\n expression = constraint\n mapped_fields = job.map_fields(dsc.name, fields, field_types)\n with arcpy.da.SearchCursor(data_path, fields, expression) as rows:\n for i, row in enumerate(rows, 1):\n if job.domains:\n row = update_row(dsc.fields, rows, list(row))\n #mapped_fields = job.map_fields(dsc.name, fields, field_types)\n mapped_fields = dict(zip(mapped_fields, row))\n mapped_fields['_discoveryID'] = job.discovery_id\n mapped_fields['title'] = dsc.name\n oid_field = filter(lambda x: x in ('FID', 'OID', 'OBJECTID'), rows.fields)\n if oid_field:\n fld_index = rows.fields.index(oid_field[0])\n else:\n fld_index = i\n entry['id'] = '{0}_{1}_{2}'.format(job.location_id, os.path.basename(data_path), row[fld_index])\n entry['location'] = job.location_id\n entry['action'] = job.action_type\n entry['entry'] = {'fields': mapped_fields}\n job.send_entry(entry)\n else:\n sr = arcpy.SpatialReference(4326)\n geo['spatialReference'] = dsc.spatialReference.name\n geo['code'] = dsc.spatialReference.factoryCode\n field_types = job.search_fields(dsc.catalogPath)\n fields = field_types.keys()\n query = job.get_table_query(dsc.name)\n constraint = job.get_table_constraint(dsc.name)\n if query and constraint:\n expression = \"\"\"{0} AND {1}\"\"\".format(query, constraint)\n else:\n if query:\n expression = query\n else:\n expression = constraint\n if dsc.shapeFieldName in fields:\n fields.remove(dsc.shapeFieldName)\n field_types.pop(dsc.shapeFieldName)\n if dsc.shapeType == 'Point':\n with arcpy.da.SearchCursor(dsc.catalogPath, ['SHAPE@'] + fields, expression, sr) as rows:\n mapped_fields = job.map_fields(dsc.name, list(rows.fields[1:]), field_types)\n for i, row in enumerate(rows):\n if job.domains:\n row = update_row(dsc.fields, rows, list(row))\n geo['lon'] = row[0].firstPoint.X #row[0][0]\n geo['lat'] = row[0].firstPoint.Y #row[0][1]\n if job.include_wkt:\n geo['wkt'] = row[0].WKT\n #mapped_fields = job.map_fields(dsc.name, list(rows.fields[1:]), field_types)\n mapped_fields = dict(zip(mapped_fields, row[1:]))\n mapped_fields['_discoveryID'] = job.discovery_id\n mapped_fields['title'] = dsc.name\n mapped_fields['geometry_type'] = dsc.shapeType\n entry['id'] = '{0}_{1}_{2}'.format(job.location_id, os.path.basename(data_path), i)\n entry['location'] = job.location_id\n entry['action'] = job.action_type\n entry['entry'] = {'geo': geo, 'fields': mapped_fields}\n job.send_entry(entry)\n else:\n with arcpy.da.SearchCursor(dsc.catalogPath, ['SHAPE@'] + fields, expression, sr) as rows:\n mapped_fields = job.map_fields(dsc.name, list(rows.fields[1:]), field_types)\n for i, row in enumerate(rows):\n if job.domains:\n row = update_row(dsc.fields, rows, list(row))\n geo['xmin'] = row[0].extent.XMin\n geo['xmax'] = row[0].extent.XMax\n geo['ymin'] = row[0].extent.YMin\n geo['ymax'] = row[0].extent.YMax\n if job.include_wkt:\n geo['wkt'] = row[0].WKT\n #mapped_fields = job.map_fields(dsc.name, list(rows.fields[1:]), field_types)\n mapped_fields = dict(zip(mapped_fields, row[1:]))\n mapped_fields['_discoveryID'] = job.discovery_id\n mapped_fields['title'] = dsc.name\n mapped_fields['geometry_type'] = dsc.shapeType\n entry['id'] = '{0}_{1}_{2}'.format(job.location_id, os.path.basename(data_path), i)\n entry['location'] = job.location_id\n entry['action'] = job.action_type\n entry['entry'] = {'geo': geo, 'fields': mapped_fields}\n job.send_entry(entry)\n\n\ndef run_job(esri_job):\n \"\"\"Determines the data type and each dataset is sent to the worker to be processed.\"\"\"\n status_writer = status.Writer()\n status_writer.send_percent(0.0, \"Initializing... 0.0%\", 'esri_worker')\n job = esri_job\n dsc = arcpy.Describe(job.path)\n\n # A single feature class or table.\n if dsc.dataType in ('DbaseTable', 'FeatureClass', 'Shapefile', 'Table'):\n global_job(job, int(arcpy.GetCount_management(job.path).getOutput(0)))\n worker(job.path)\n return\n\n # A geodatabase (.mdb, .gdb, or .sde).\n elif dsc.dataType == 'Workspace':\n arcpy.env.workspace = job.path\n feature_datasets = arcpy.ListDatasets('*', 'Feature')\n tables = []\n tables_to_keep = job.tables_to_keep()\n tables_to_skip = job.tables_to_skip()\n if job.tables_to_keep:\n for t in tables_to_keep:\n [tables.append(os.path.join(job.path, tbl)) for tbl in arcpy.ListTables(t)]\n [tables.append(os.path.join(job.path, fc)) for fc in arcpy.ListFeatureClasses(t)]\n for fds in feature_datasets:\n [tables.append(os.path.join(job.path, fds, fc)) for fc in arcpy.ListFeatureClasses(wild_card=t, feature_dataset=fds)]\n else:\n [tables.append(os.path.join(job.path, tbl)) for tbl in arcpy.ListTables()]\n [tables.append(os.path.join(job.path, fc)) for fc in arcpy.ListFeatureClasses()]\n for fds in feature_datasets:\n [tables.append(os.path.join(job.path, fds, fc)) for fc in arcpy.ListFeatureClasses(feature_dataset=fds)]\n\n if tables_to_skip:\n for t in tables_to_keep:\n [tables.remove(os.path.join(job.path, tbl)) for tbl in arcpy.ListTables(t)]\n [tables.remove(os.path.join(job.path, fc)) for fc in arcpy.ListFeatureClasses(t)]\n for fds in feature_datasets:\n [tables.remove(os.path.join(job.path, fds, fc)) for fc in arcpy.ListFeatureClasses(wild_card=t, feature_dataset=fds)]\n\n # A geodatabase feature dataset, SDC data, or CAD dataset.\n elif dsc.dataType == 'FeatureDataset' or dsc.dataType == 'CadDrawingDataset':\n tables_to_keep = job.tables_to_keep()\n tables_to_skip = job.tables_to_skip()\n arcpy.env.workspace = job.path\n if tables_to_keep:\n tables = []\n for tbl in tables_to_keep:\n [tables.append(os.path.join(job.path, fc)) for fc in arcpy.ListFeatureClasses(tbl)]\n tables = list(set(tables))\n else:\n tables = [os.path.join(job.path, fc) for fc in arcpy.ListFeatureClasses()]\n if tables_to_skip:\n for tbl in tables_to_skip:\n [tables.remove(os.path.join(job.path, fc)) for fc in arcpy.ListFeatureClasses(tbl) if fc in tables]\n\n # Not a recognized data type.\n else:\n sys.exit(1)\n\n if job.multiprocess:\n # Multiprocess larger databases and feature datasets.\n multiprocessing.log_to_stderr()\n logger = multiprocessing.get_logger()\n logger.setLevel(logging.INFO)\n pool = multiprocessing.Pool(initializer=global_job, initargs=(job,))\n for i, _ in enumerate(pool.imap_unordered(worker, tables), 1):\n status_writer.send_percent(i / len(tables), \"{0:%}\".format(i / len(tables)), 'esri_worker')\n # Synchronize the main process with the job processes to ensure proper cleanup.\n pool.close()\n pool.join()\n else:\n for i, tbl in enumerate(tables, 1):\n global_job(job)\n worker(tbl)\n status_writer.send_percent(i / len(tables), \"{0} {1:%}\".format(tbl, i / len(tables)), 'esri_worker')\n return\n","sub_path":"locations/workers/esri_worker.py","file_name":"esri_worker.py","file_ext":"py","file_size_in_byte":10448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"107292586","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport numpy.linalg as npl\nimport matplotlib.pyplot as mp\nfrom householder_transforms import *\n\ndef bidiagonal(A):\n \"\"\" Réduction de matrices sous forme bidiagonale.\n Entrée:\n A matrice quelconque.\n Sortie:\n Tuple (Ql, BD, Qr) :\n - Ql matrice de changement de base à gauche,\n - BD matrice bidiagonale,\n - Qr matrice de changement de base à droite.\n \"\"\"\n A = np.matrix(A, dtype='f')\n n, m = A.shape\n Qleft = np.eye(n, dtype='f')\n Qright = np.eye(m, dtype='f')\n BD = np.matrix(A)\n for i in range(n):\n x = BD[i:, i]\n y = np.zeros([n-i, 1])\n if x[0, 0] > 0:\n y[0, 0] = npl.norm(x)\n else:\n y[0, 0] = -npl.norm(x)\n Qleft[i:, i:] = householder_product_right(x, y, Qleft[i:, i:])\n BD[i:, i:] = householder_product_left(x, y, BD[i:, i:])\n if i < n-1:\n x = BD[i, i+1:].T\n y = np.zeros([m-(i+1), 1])\n if x[0, 0] > 0:\n y[0, 0] = npl.norm(x)\n else:\n y[0, 0] = -npl.norm(x)\n Qright[i+1:, i:] = householder_product_left(x, y, Qright[i+1:, i:])\n BD[i:, i+1:] = householder_product_right(x, y, BD[i:, i+1:])\n return Qleft, BD, Qright","sub_path":"3-image-compression-svd/bidiagonal_matrix.py","file_name":"bidiagonal_matrix.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"58288399","text":"# -*- coding: utf-8 -*-\n# __author__ = 'XingHuan'\n# 6/24/2018\n\n# Copyright 2018 XingHuan\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom sins.db.models import Person, File, PermissionGroup, Project, ProjectPersonConnection, current_user, database, JOIN\n\n\nclass PermissionGroupError(Exception): pass\n\n\ndef get_current_user_instance():\n query = (Person\n .select(Person, File, PermissionGroup)\n .join_from(Person, File, join_type=JOIN.LEFT_OUTER)\n .join_from(Person, PermissionGroup, join_type=JOIN.LEFT_OUTER)\n .where(Person.user_login == current_user)\n )\n return query[0]\n\n\ndef get_current_permissions(user=None):\n if user is None:\n user = get_current_user_instance()\n global_permission_group = user.permission_group\n if global_permission_group is None:\n raise PermissionGroupError('current user doesn\\'t has permission')\n\n project_permissions = {}\n query = (ProjectPersonConnection\n .select(ProjectPersonConnection, Person, Project, PermissionGroup)\n .join_from(ProjectPersonConnection, Person)\n .join_from(ProjectPersonConnection, Project)\n .join_from(ProjectPersonConnection, PermissionGroup, join_type=JOIN.LEFT_OUTER)\n .where(Person.id == user.id)\n )\n for q in query:\n project_permissions.update({q.project.id: q.permission_group})\n\n return global_permission_group, project_permissions\n\n\nif database.table_exists(Person.db_table_name()):\n current_user_object = get_current_user_instance()\n global_permission, project_permissions = get_current_permissions(current_user_object)\n\n","sub_path":"sins/db/current.py","file_name":"current.py","file_ext":"py","file_size_in_byte":2172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"50398667","text":"#coding:utf-8\n\n\n#Definition of ListNode\nclass ListNode(object):\n def __init__(self, val, next=None):\n self.val = val\n self.next = next\n\n\"\"\"\n@param head: the head of linked list.\n@return: a middle node of the linked list\n\"\"\"\nclass Solution:\n \n\tdef middleNode(self, head):\n\t\t# write your code here\n\t\tslow = fast = head\n\t\tif head:\n\t\t\twhile fast.next!=None and fast.next.next!=None:\n\t\t\t\tslow = slow.next\n\t\t\t\tfast = fast.next.next\n\t\treturn slow\nif __name__==\"__main__\":\n\ts = Solution()\n\tlist1 = ListNode(1,ListNode(2,ListNode(3)))\n\tlist2 = ListNode(1,ListNode(2))\n\tprint(s.middleNode(list1).val)\n\tprint(s.middleNode(list2).val)","sub_path":"lintcode刷题/228.py","file_name":"228.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"244156089","text":"import copy\nimport warnings\n\nfrom .area import Area\nfrom .areas import Areas\nfrom .repeating import Repeating\nfrom ..fields import Field\n\n\nclass RepeatingArea(Repeating):\n \"\"\"\n Represents multiple identical Areas on a page.\n\n A root argument is required, which is expected to be a non-unique Field\n on the page.\n\n A collection of Areas are built from every instance of the root that is\n found. Every other Field provided in the arguments is populated inside\n each Area.\n\n In the following example, there's a table with 15 rows. Each row has\n two cells. The sixth row in the table should have an item with the\n name \"Banana\" and a price of \"$7.00\"\n\n >>> from stere.areas import RepeatingArea\n >>> from stere.fields import Root, Link, Text\n >>>\n >>> class Inventory(Page):\n >>> def __init__(self):\n >>> self.inventory_items = RepeatingArea(\n >>> root=Root('xpath', '//table/tr'),\n >>> name=Link('xpath', './td[1]'),\n >>> price=Text('xpath', './td[2]'),\n >>> )\n\n >>> inventory = Inventory()\n >>> assert 15 == len(inventory.areas)\n >>> assert \"Banana\" == inventory.areas[5].name\n >>> assert \"$7.00\" == inventory.areas[5].price\n \"\"\"\n\n def __init__(self, **kwargs):\n if kwargs.get('root') is None:\n raise ValueError('RepeatingArea requires a Root Field.')\n\n self.root = kwargs['root']\n\n if kwargs.get('items') is not None:\n raise ValueError('\"items\" is a reserved parameter.')\n\n self.items = {}\n for k, v in kwargs.items():\n if not isinstance(v, Field):\n raise ValueError(\n 'RepeatingArea arguments can only be Field objects.',\n )\n if k != 'root':\n self.items[k] = v\n # Field (in plural) can be accessed directly.\n setattr(self, f'{k}s', v)\n\n self.repeater = Area\n self.repeater_name = self.repeater.__name__\n\n def new_container(self):\n \"\"\"Get a new instance of the container this class uses.\n\n Returns:\n Areas\n\n \"\"\"\n return Areas()\n\n @property\n def areas(self):\n \"\"\"Find all instances of the root,\n then return a list of Areas: one for each root.\n\n Returns:\n Areas: list-like collection of every Area that was found.\n\n Example:\n\n >>> def test_stuff():\n >>> listings = MyPage().my_repeating_area.areas\n >>> listings[0].my_input.fill('Hello world')\n\n \"\"\"\n return self.children()\n\n def children(self):\n \"\"\"Find all instances of the root,\n then return a list of Areas: one for each root.\n\n Returns:\n Areas: list-like collection of every Area that was found.\n\n Example:\n\n >>> def test_stuff():\n >>> listings = MyPage().my_repeating_area.areas\n >>> listings[0].my_input.fill('Hello world')\n\n \"\"\"\n all_roots = self._all_roots()\n container = self.new_container()\n\n for item in all_roots:\n copy_items = copy.deepcopy(self.items)\n for field_name in copy_items.keys():\n copy_items[field_name]._element.parent_locator = item\n\n new_area = self.repeater(**copy_items)\n container.append(new_area)\n return container\n\n def area_with(self, field_name, field_value):\n \"\"\"Find an Area where the Field's value matches an expected value.\n\n Arguments:\n field_name (str): The name of the Field object.\n field_value (str): The value of the Field object.\n\n Returns:\n Area: The Area object that matches the search.\n\n Example:\n\n >>> class Inventory(Page):\n >>> def __init__(self):\n >>> self.items = RepeatingArea(\n >>> root=Root('xpath', '//my_xpath_string'),\n >>> description=Text('xpath', '//my_xpath_string')\n >>> )\n >>>\n >>> def test_stuff():\n >>> inventory = Inventory()\n >>> found_area = inventory.items.area_with(\n >>> \"description\", \"Bananas\")\n\n \"\"\"\n warnings.warn(\n 'RepeatingArea.areas_with() is deprecated.'\n ' Use RepeatingArea.areas.containing() instead.',\n FutureWarning,\n )\n for area in self.areas:\n field = getattr(area, field_name)\n\n if field.value == field_value:\n return area\n\n raise ValueError(f'Could not find {field_value} in any {field_name}.')\n","sub_path":"stere/areas/repeating_area.py","file_name":"repeating_area.py","file_ext":"py","file_size_in_byte":4725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"239278600","text":"import numpy as np\nimport pandas as pd\nimport re\nfrom pprint import pprint\nfrom selenium import webdriver\nfrom selenium.common.exceptions import NoSuchElementException\nimport lxml.html\nfrom lxml.html.clean import Cleaner\nfrom bs4 import BeautifulSoup\nfrom time import sleep\nfrom scrape_538 import clean_team_names\nimport credentials # <- Not for github!\nfrom scrape_unib import wait_for_page_ready\n\n\ndef show_more(driver):\n # Find and click the Show more button until all is shown\n while True:\n try:\n btn = driver.find_element_by_css_selector(\n \"button.KambiBC-my-bets-summary__show-more-button\"\n )\n btn.click()\n wait_for_page_ready(driver)\n except NoSuchElementException:\n # Done\n break\n\n\ndef login(driver):\n # Log in\n driver.find_element_by_css_selector(\"input[name=username]\").send_keys(\n credentials.usr\n )\n driver.find_element_by_css_selector(\"input[name=password]\").send_keys(\n credentials.pswd\n )\n driver.find_element_by_css_selector(\"button[data-test-name=btn-login]\").click()\n\n \ndef download_my_bets():\n # Open a browser\n driver = webdriver.Firefox()\n driver.implicitly_wait(10)\n driver.get(\"https://www.unibet.eu/betting/sports/bethistory/\")\n\n # Accept cookies if not already done\n if False: #not driver.get_cookie(\"CookieConsent\"):\n # Cookie bar animation is slow\n sleep(10)\n driver.find_element_by_css_selector(\"#CybotCookiebotDialogBodyButtonAccept\").click()\n wait_for_page_ready(driver)\n\n # Do all browser handling in a try-finally block\n try:\n wait_for_page_ready(driver)\n login(driver)\n wait_for_page_ready(driver)\n show_more(driver)\n html = driver.page_source\n finally:\n driver.quit()\n \n # Clean\n html = lxml.html.clean.clean_html(html)\n\n return html\n\n\ndef scrape_info_from_html(html):\n # Parse the html\n doc = lxml.html.document_fromstring(html)\n coupons = doc.cssselect(\"li.KambiBC-my-bets-summary__item\")\n\n # Extract info from each coupon\n # fmt: off\n l = []\n for coupon in coupons:\n d = {}\n d[\"coupon-date\"] = coupon.cssselect(\"span.KambiBC-my-bets-summary__coupon-date\")[0].text\n d[\"status\"] = coupon.cssselect(\"span.KambiBC-my-bets-summary__coupon-status\")[0].text\n \n # Various fields with the same class\n fields = coupon.cssselect(\"\"\"\n div.KambiBC-my-bets-summary__field > \n span.KambiBC-my-bets-summary__value\n \"\"\")\n d[\"bet-on\"] = fields[0].text\n d[\"stake\"] = fields[1].text\n d[\"odds\"] = fields[2].text\n d[\"coupon-id\"] = fields[3].text\n\n d[\"event-list-name\"] = coupon.cssselect(\".KambiBC-my-bets-summary-coupon__event-list-name\")[0].text\n\n # Info alleen relevant voor open bets\n if d['status'] == \"Open\":\n d[\"potential-payout\"] = fields[4].text\n d[\"cash-out\"] = coupon.cssselect(\"span.KambiBC-react-cash-out-button__value > span\")[1].text\n\n # Info alleen relevant voor gesloten bets\n if d['status'] in (\"Won\", \"Void\", \"Cash Out confirmed\"):\n d['payout'] = coupon.cssselect(\"span.KambiBC-my-bets-summary-payout__value\")[0].text\n \n l.append(d)\n\n # Make a dataframe\n return pd.DataFrame(l)\n\n\ndef data_prep(df):\n for c in (\"stake\", \"odds\", \"cash-out\", \"potential-payout\", \"payout\"):\n df[c] = df[c].apply(get_float_from_string)\n \n teamnames = df[\"event-list-name\"].str.split(\" - \", expand=True)\n df[\"home-team\"] = teamnames[0]\n df[\"away-team\"] = teamnames[1]\n return df\n\n\ndef get_float_from_string(s):\n # €3.45 to 3.45\n # (2.34) to 2.34\n m = re.search(r\"\\d+\\.\\d+\", str(s))\n if m:\n return float(m.group(0))\n else:\n return None\n\n\nif __name__ == \"__main__\":\n # Open a browser and download the website with my bets\n html = download_my_bets()\n\n # Scrape useful info\n df = scrape_info_from_html(html)\n\n # Data preparation\n df = data_prep(df)\n\n # Done & save\n print(df.shape)\n print(df.head())\n df.to_csv(\"./data/my_bets.csv\")\n","sub_path":"my_bets.py","file_name":"my_bets.py","file_ext":"py","file_size_in_byte":4184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"482413399","text":"import sublime\nimport sublime_plugin\nimport json\nimport os\nimport golangconfig\n\nfrom .gotools_util import Buffers\nfrom .gotools_util import GoBuffers\nfrom .gotools_util import Logger\nfrom .gotools_util import ToolRunner\n\nclass GotoolsSuggestions(sublime_plugin.EventListener):\n CLASS_SYMBOLS = {\n \"func\": \"ƒ\",\n \"var\": \"ν\",\n \"type\": \"ʈ\",\n \"package\": \"ρ\"\n }\n\n def on_query_completions(self, view, prefix, locations):\n if not GoBuffers.is_go_source(view): return\n if not golangconfig.setting_value(\"autocomplete\")[0]: return\n\n gocodeFlag = [\"-f=json\", \"-sock=none\"] if golangconfig.setting_value(\"gocode_client_mode\")[0] else [\"-f=json\"]\n suggestionsJsonStr, stderr, rc = ToolRunner.run(view, \"gocode\", gocodeFlag + [\"autocomplete\", view.file_name(), str(locations[0])], stdin=Buffers.buffer_text(view))\n\n suggestionsJson = json.loads(suggestionsJsonStr)\n\n Logger.log(\"DEBUG: gocode output: \" + suggestionsJsonStr)\n\n if rc != 0:\n Logger.status(\"no completions found: \" + str(e))\n return []\n\n if len(suggestionsJson) > 0:\n return ([GotoolsSuggestions.build_suggestion(j) for j in suggestionsJson[1]], sublime.INHIBIT_WORD_COMPLETIONS)\n else:\n return []\n\n @staticmethod\n def build_suggestion(json):\n label = '{0: <30.30} {1: <40.40} {2}'.format(\n json[\"name\"],\n json[\"type\"],\n GotoolsSuggestions.CLASS_SYMBOLS.get(json[\"class\"], \"?\"))\n return (label, json[\"name\"])\n","sub_path":"gotools_suggestions.py","file_name":"gotools_suggestions.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"487998076","text":"from django import forms\nfrom django.contrib.admin.widgets import RelatedFieldWidgetWrapper\nfrom django.utils.translation import gettext_lazy as _\nfrom mptt.forms import TreeNodeChoiceField\n\nfrom .models import Category\n\n\nclass CategoryAdminForm(forms.ModelForm):\n \"\"\"\n Form for Category's Admin.\n \"\"\"\n parent = TreeNodeChoiceField(\n label=_('Parent category'),\n empty_label=_('No parent category'),\n level_indicator='|--', required=False,\n queryset=Category.objects.all())\n\n def __init__(self, *args, **kwargs):\n super(CategoryAdminForm, self).__init__(*args, **kwargs)\n self.fields['parent'].widget = RelatedFieldWidgetWrapper(\n self.fields['parent'].widget,\n Category.parent.field.remote_field,\n self.admin_site)\n\n def clean_parent(self):\n \"\"\"\n Check if category parent is not selfish.\n \"\"\"\n data = self.cleaned_data['parent']\n if data == self.instance:\n raise forms.ValidationError(\n _('A category cannot be parent of itself.'),\n code='self_parenting')\n return data\n\n class Meta:\n \"\"\"\n CategoryAdminForm's Meta.\n \"\"\"\n model = Category\n fields = forms.ALL_FIELDS\n","sub_path":"categories/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"601670480","text":"#Accepted on leetcode\r\n#Time complexity - O(n) as we traverse entire string\r\n#Space omplexity - O(n) since we are using physical stack\r\n\r\nclass Solution(object):\r\n def calculate(self, s):\r\n \"\"\"\r\n :type s: str\r\n :rtype: int\r\n \"\"\"\r\n curr = 0\r\n sign = '+'\r\n i = 0\r\n stack = []\r\n while i < len(s):\r\n c = s[i]\r\n #Case1: check if it is a digit\r\n if c.isdigit():\r\n curr = curr * 10 + int(c)\r\n if not c.isdigit() and c!= ' ' or i == len(s)-1:\r\n if sign == '+':\r\n stack.append(+curr)\r\n elif sign == '-':\r\n stack.append(-curr)\r\n elif sign == '*':\r\n stack.append(+(stack.pop() * curr))\r\n elif sign == '/':\r\n stack.append(+(stack.pop() / curr))\r\n curr = 0\r\n sign = c\r\n i += 1\r\n curr = 0\r\n while len(stack) > 0:\r\n curr += stack.pop()\r\n return curr","sub_path":"basicCalculator2.py","file_name":"basicCalculator2.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"469662889","text":"from random import randint\nclass Player():\n \n defense = [\"A\", \"B\", \"C\", \"D\"]\n ring = []\n \n def __init__(self, name):\n self.name = name\n self.block = \"B\"\n self.health = 6\n\n def show_block(self):\n return self.block\n\n def show_health(self):\n return self.health\n \n def change_block(self, new_block):\n if new_block in Player.defense:\n self.block = new_block\n print(\"OK\")\n else:\n pass\n\n def super_hit(self, target):\n if self.health > 1:\n self.health -= 1\n risk = randint(1, 6)\n test = randint(1, 6)\n if risk == test:\n x = randint(2, 4)\n target.health -= x\n print(\"Вы успешно применили особый приём!\")\n if target.health <= 0:\n print(target.name + \" нокаутирован! Бой завершен.\")\n else:\n target.change_block()\n else:\n target.change_block()\n print(\"Особый приём не удался.\")\n else:\n pass\n \n\n def hit(self, target, aim):\n\n if 6 > int(aim) > 0:\n\n if aim == 1 and target.block == \"A\":\n print(\"Ваш удар блокирован.\")\n target.change_block()\n elif aim == 1 and target.block != \"A\":\n target.health -= 1\n print(\"Вы нанесли удар!\")\n if target.health <= 0:\n print(target.name + \" нокаутирован! Бой завершен.\")\n else:\n target.change_block()\n elif aim == 2 and (target.block == \"A\" or target.block == \"B\"):\n print(\"Ваш удар блокирован.\")\n target.change_block()\n elif aim == 2 and target.block != \"A\" and target.block != \"B\":\n target.health -= 1\n print(\"Вы нанесли удар!\")\n if target.health <= 0:\n print(target.name + \" нокаутирован! Бой завершен.\")\n else:\n target.change_block()\n elif aim == 3 and (target.block == \"B\" or target.block == \"C\"):\n print(\"Ваш удар блокирован.\")\n target.change_block()\n elif aim == 3 and target.block != \"B\" and target.block != \"C\":\n target.health -= 1\n print(\"Вы нанесли удар!\")\n if target.health <= 0:\n print(target.name + \" нокаутирован! Бой завершен.\")\n else:\n target.change_block()\n elif aim == 4 and (target.block == \"C\" or target.block == \"D\"):\n print(\"Ваш удар блокирован.\")\n target.change_block()\n elif aim == 4 and target.block != \"C\" and target.block != \"D\":\n target.health -= 1\n print(\"Вы нанесли удар!\")\n if target.health <= 0:\n print(target.name + \" нокаутирован! Бой завершен.\")\n else:\n target.change_block()\n elif aim == 5 and target.block == \"D\":\n print(\"Ваш удар блокирован.\")\n target.change_block()\n elif aim == 5 and target.block != \"D\":\n target.health -= 1\n print(\"Вы нанесли удар!\")\n if target.health <= 0:\n print(target.name + \" нокаутирован! Бой завершен.\")\n else:\n target.change_block()\n else:\n pass\n\nclass Enemy(Player):\n\n def __init__(self, name):\n self.name = name\n self.block = Player.defense[randint(0, 3)]\n self.health = 6\n\n def change_block(self):\n self.block = Player.defense[randint(0, 3)]\n while True:\n n = randint(1, 10)\n if n > 2:\n Player.ring[1].hit()\n break\n if n <= 2 and self.health > 1:\n Player.ring[1].super_hit()\n break\n\n def hit(self, target = \"\", aim = \"\"):\n target = Player.ring[0]\n aim = randint(1, 5)\n if aim == 1 and target.block == \"A\":\n print(\"Вы блокировали удар.\")\n elif aim == 1 and target.block != \"A\":\n target.health -= 1\n print(\"Вам нанесён удар!\")\n if target.health <= 0:\n print(target.name + \" нокаутирован! Бой завершен.\")\n else:\n pass\n elif aim == 2 and (target.block == \"A\" or target.block == \"B\"):\n print(\"Вы блокировали удар.\")\n elif aim == 2 and target.block != \"A\" and target.block != \"B\":\n target.health -= 1\n print(\"Вам нанесён удар!\")\n if target.health <= 0:\n print(target.name + \" нокаутирован! Бой завершен.\")\n else:\n pass\n elif aim == 3 and (target.block == \"B\" or target.block == \"C\"):\n print(\"Вы блокировали удар.\")\n elif aim == 3 and target.block != \"B\" and target.block != \"C\":\n target.health -= 1\n print(\"Вам нанесён удар!\")\n if target.health <= 0:\n print(target.name + \" нокаутирован! Бой завершен.\")\n else:\n pass\n elif aim == 4 and (target.block == \"C\" or target.block == \"D\"):\n print(\"Вы блокировали удар.\")\n elif aim == 4 and target.block != \"C\" and target.block != \"D\":\n target.health -= 1\n print(\"Вам нанесён удар!\")\n if target.health <= 0:\n print(target.name + \" нокаутирован! Бой завершен.\")\n else:\n pass\n elif aim == 5 and target.block == \"D\":\n print(\"Вы блокировали удар.\")\n elif aim == 5 and target.block != \"D\":\n target.health -= 1\n print(\"Вам нанесён удар!\")\n if target.health <= 0:\n print(target.name + \" нокаутирован! Бой завершен.\")\n else:\n pass\n\n def super_hit(self, target = \"\"):\n target = Player.ring[0]\n self.health -= 1\n risk = randint(1, 6)\n test = randint(1, 6)\n if risk == test:\n x = randint(2, 4)\n target.health -= x\n print(\"Против вас использован особый приём!\")\n if target.health <= 0:\n print(target.name + \" нокаутирован! Бой завершен.\")\n else:\n print(\"Вам удалось отразить особую атаку.\")\n#\ndef battle(name, ename):\n global me\n global you\n if name == \"\":\n name = \"Tyler\"\n if ename == \"\":\n ename = \"Cornelius\"\n me = Player(name)\n Player.ring.append(me)\n you = Enemy(ename)\n Player.ring.append(you)\n#\nprint(\"Чтобы выбрать зону атаки, введите число от 1 до 5. \")\nprint(\"Чтобы изменить зону защиты, введите 'A', 'B', 'C' или 'D'. \")\nprint(\"Чтобы использовать особый приём, введите 'X'. \")\nwhile True:\n qu = input(\"Чтобы покинуть программу, введите 'Q': \")\n if qu == \"Q\":\n break\n name = input(\"Введите ваше имя: \")\n ename = input(\"Введите имя противника: \")\n Player.ring.clear()\n battle(name, ename)\n k = 1\n while True:\n p = me.show_health()\n q = you.show_health()\n if p <= 0:\n print(\"Вы проиграли.\")\n break\n if q <= 0:\n print(\"Вы победили!\")\n break\n \n print(\"Раунд \" + str(k))\n x = input(\"GO! \" )\n if not x.isdigit():\n x = x.upper()\n if x == \"A\":\n me.change_block(\"A\")\n elif x == \"B\":\n me.change_block(\"B\")\n elif x == \"C\":\n me.change_block(\"C\")\n elif x == \"D\":\n me.change_block(\"D\")\n elif x == \"X\":\n me.super_hit(you)\n k+=1\n if x.isdigit():\n x = int(x)\n if x == 1:\n me.hit(you, 1)\n k+=1\n elif x == 2:\n me.hit(you, 2)\n k+=1\n elif x == 3:\n me.hit(you, 3)\n k+=1\n elif x == 4:\n me.hit(you, 4)\n k+=1\n elif x == 5:\n me.hit(you, 5)\n k+=1\n \n \n \n\n","sub_path":"python/4thWeek/bk.py","file_name":"bk.py","file_ext":"py","file_size_in_byte":9191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"311714836","text":"from sklearn.linear_model import LinearRegression\n\nimport pandas\n\nimport numpy\n\n# reading the data from the file \ndataset = pandas.read_csv(\"data.csv\") # you can replace the data from the big data \nHrs = int(input((\"Enter no. of hours you study : \")))\nprint(\"data we have :\\n\",dataset)\n\ny = dataset[\"marks\"]\nx = dataset[\"hrs\"]\n\nx = x.values.reshape(-1,1)\nmodel = LinearRegression()\nmodel.fit(x,y)\n\nprint(\"Model predcted marks : \",model.predict([[Hrs]])[0]) # no of hours will give us the marks \n\n\"\"\"\nOUTPUT :\n\nEnter no. of hours you study : 4\ndata we have :\n hrs marks\n0 2 20\n1 3 30\n2 7 80\n3 9 90\nModel predcted marks : 41.83206106870229\n\n\"\"\"\n","sub_path":"Data_science/Simple linear Regression.py","file_name":"Simple linear Regression.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"217246258","text":"class Solution:\n def canJump(self,A):\n '''\n 1,definition\n -dp[i]: if we can jump to index i\n 2,initialization\n -dp[0]=True\n 3,equation\n -dp[i]=true if we can find dp[j]==true and j+A[j]>=i\n 4,answer\n -dp[len(A)-1]\n '''\n if not A:\n return False\n n = len(A)\n dp = [False for _ in range(n)]\n # print(dp)\n dp[0] = True\n for i in range(n):\n for j in range(i):\n if dp[j] and j + A[j] >= i:\n dp[i] = True\n break\n return dp[n-1]\n\na = Solution()\nA = [2,3,1,1,4]\nprint(a.canJump(A))","sub_path":"lintcode/第九层/116_跳跃游戏.py","file_name":"116_跳跃游戏.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"63802269","text":"import csv\nimport numpy as np\nimport sys\nimport analysis\n\n\nclass Data:\n def __init__(self, filename=None, headers=None, lists=None):\n self.filename = filename\n self.headers = headers\n self.types = []\n self.data = []\n self.header2col = {}\n self.file = None\n #if self.kmeans is true, that means that this was created using kmeans\n self.kmeans = False\n if self.filename is not None:\n self.read(self.filename)\n\n def write(self, filename, headers=None):\n if headers == None:\n tmp = self.headers\n else:\n tmp = self.limit_columns(headers)\n\n file = open(filename, 'w') \n \n for i in range(len(tmp)):\n if tmp[i] == \"None\":\n del tmp[i]\n\n for i in range(len(tmp)):\n file.write(tmp[i])\n if i != len(tmp) - 1:\n file.write(', ')\n\n file.write(\"\\n\")\n\n for i in range(len(tmp)):\n file.write('numeric')\n if i != len(tmp) - 1:\n file.write(', ')\n \n file.write(\"\\n\")\n\n for i in range(len(self.data)):\n for j in range(len(tmp)):\n file.write(str(self.data[i,j]))\n if j != len(self.data)-1:\n file.write(', ')\n file.write(\"\\n\") \n print(\"file written to \" + filename)\n\n def read(self, filename):\n # get our file\n file = open(filename, 'rU')\n # read it with the csv module\n csv_reader = csv.reader(file)\n # non_numeric will hold the indices of the non-numeric columns\n non_numeric = []\n x = 0\n # loop over the lines\n for line in csv_reader:\n if x == 0: # the first line will be headers\n self.headers = line\n for i in range(len(self.headers)):\n self.headers[i] = self.headers[i].strip()\n elif x == 1: # the second line will be types\n self.types = line\n for i in range(len(self.types)):\n self.types[i] = self.types[i].strip()\n # add i to to the list of non-numeric indices so that\n # we can remove all non-numeric headers, types, and data\n if self.types[i] != 'numeric':\n non_numeric.append(i)\n else: # the rest will be data\n # self.data.append(line)\n for row in self.data:\n for item in row:\n item.strip()\n\n # temp_line allows self.data to be a 2d matrix\n temp_line = []\n for i in range(len(line)):\n if self.types[i] == \"numeric\":\n temp_line.append(line[i])\n self.data.append(temp_line)\n x += 1\n\n # This loop converts data to floats to do math on the data\n for i in range(len(self.data)):\n for j in range(len(self.data[i])):\n self.data[i][j] = float(self.data[i][j])\n\n # reverse the array so that we don't get out of bounds errors\n non_numeric.reverse()\n for nn in non_numeric:\n del self.headers[nn]\n del self.types[nn]\n # i will be the index for header2col\n i = 0\n for header in self.headers:\n self.header2col[header] = i\n i += 1\n\n # make it a matrix\n self.data = np.matrix(self.data)\n return\n\n \n # limit the columns to whatever the user enters or to the first 10\n def limit_columns(self, headers=None):\n # initialize the list for the indices of columns we care about\n relevant_headers = []\n\n # check if the user entered an argument\n if headers == None:\n # if not, just take the first 10 columns\n if len(self.headers) > 2:\n relevant_headers = [i for i in range(2)]\n else:\n relevant_headers = [i for i in range(len(self.headers))]\n else:\n # if they did, get those indices\n for header in headers:\n if isinstance(header, int):\n relevant_headers.append(header)\n else:\n relevant_headers.append(self.header2col[header])\n return self.data[:, relevant_headers]\n\n # limit the rows to either the row indices the user enters or 0-9\n def limit_rows(self, indices=[i for i in range(10)]):\n return self.data[indices, :]\n\n #add a column using hstack\n def add_column(self, column):\n if column.shape[0] != self.data.shape[0] or column.shape[1] != 1:\n print(\"Something went wrong. You need a\", self.data.shape[0], \"by 1 numpy matrix.\")\n else:\n #use hstack to put it on the right\n self.data = np.hstack((self.data, column))\n\n # add a row of data using vstack\n def add_point(self, values):\n # check that the length works\n if len(values) != len(self.headers):\n print(\"You need\", len(self.headers), \"values, but you gave\", len(values))\n else:\n # use vstack to put it on the bottom\n self.data = np.vstack((self.data, values))\n\n # accessors\n def get_filename(self):\n return self.filename\n\n def get_headers(self):\n return self.headers\n\n def get_types(self):\n return self.types\n\n def get_data(self):\n return self.data\n\n def set_data(self, d):\n self.data = d\n\n def getHeader2col(self):\n return self.header2col\n\n def get_num_dimensions(self):\n return self.data[0].size\n\n def get_num_points(self):\n return len(self.data)\n\n def get_row(self, row_index):\n return self.data[row_index]\n\n def get_value(self, header, row_index):\n return self.data[row_index, self.header2col[header]]\n\n def set_kmeans(self, val=True):\n self.kmeans = val\n\n def get_kmeans(self):\n return self.kmeans\n\n\n\ndef main(argv):\n # test command line arguments\n if len(argv) < 2:\n print('Usage: python %s ' % (argv[0]))\n exit(0)\n\n # create a data object, which reads in the data\n dobj = Data(argv[1])\n headers = dobj.get_headers()\n # test the five analysis functions\n print([headers[0], headers[2]])\n print(\"Data range by column:\", analysis.data_range([headers[0], headers[2]], dobj))\n print(\"Mean:\", analysis.mean([headers[0], headers[2]], dobj))\n print(\"Standard deviation:\", analysis.stdev([headers[0], headers[2]], dobj))\n print(\"Normalize columns separately:\", analysis.normalize_columns_separately([headers[0], headers[2]], dobj))\n print(\"Normalize columns together:\", analysis.normalize_columns_together([headers[0], headers[2]], dobj))\n print(\"Median:\", analysis.median([headers[0], headers[2]], dobj))\n print(\"Median Separately:\", analysis.median_separately([headers[0], headers[2]], dobj))\n print(\"just few rows:\", dobj.limit_rows())\n print(\"just a few columns. I changed the limit to 2 for demonstration purposes:\", dobj.limit_columns())\n print(\"Data range overall:\", analysis.data_range([headers[0], headers[2]], dobj, True))\n print(\"The next two print statements get the last row of data. I add a row of data in between,\"\n \"so they are different.\")\n print(dobj.get_row(-1))\n dobj.add_point([1, 2, 3])\n print(dobj.get_row(-1))\n\nclass PCAData(Data):\n def __init__(self, projected_data, eigenvectors=np.matrix([]), eigenvalues=np.matrix([]),\n original_data_means=np.matrix([]), original_data_headers=[]):\n \n Data.__init__(self, headers=original_data_headers)\n self.eigenvalues = eigenvalues\n self.eigenvectors = eigenvectors\n self.mean_data_values = original_data_means\n self.original_data_headers = original_data_headers\n self.data = projected_data\n self.types = ['numeric' for _ in range(self.data[0].size)]\n self.headers = ['PCA' + str(i) for i in range(self.data[0].size)]\n self.header2col = {}\n for i in range(self.data[0].size):\n self.header2col[self.headers[i]] = i\n print(\"self.header2col\",self.header2col)\n\n def get_eigenvalues(self):\n return self.eigenvalues\n\n def get_eigenvectors(self):\n return self.eigenvectors\n\n def get_original_means(self):\n return self.mean_data_values\n\n def get_original_headers(self):\n ret = []\n for header in self.original_data_headers:\n ret.append(header)\n return ret\n\n\nif __name__ == \"__main__\":\n main(sys.argv)","sub_path":"Project7/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":8667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"323171743","text":"import syslog\nimport threading\nimport mysql.connector\nimport pika\nimport json\n\n\ndef log(log_type, log_msg):\n syslog.syslog(log_type, '{0}:{1}'.format(threading.currentThread().getName(), log_msg))\ndef log_long(log_type, log_msg):\n msg_max = 400\n msg_len = len(log_msg)\n chunks = int(msg_len / msg_max)\n for i in range(chunks):\n startindex = i * msg_max\n remaining_len = msg_len - startindex\n endindex = (i + 1) * msg_max if remaining_len > 0 else msg_len\n syslog.syslog(log_type, '- {0}:{1}'.format(threading.currentThread().getName(), msg[startindex : endindex]))\n\ndef GetRawProxies():\n log(syslog.LOG_INFO, 'Getting raw proxy table settings')\n cnx = mysql.connector.connect(host=db_host, user=db_user, password=db_pwd, database=db_name)\n cursor = cnx.cursor()\n cursor.execute(\"select id, type, ip, port, source from proxy_raw where valid=1\")\n results = cursor.fetchall()\n\n proxies = None\n maxid = None\n for row in results:\n _id = row[0]\n _protocol = row[1]\n _ip = row[2]\n _port = row[3]\n _source = row[4]\n\n if proxies is None:\n proxies = {}\n if _source not in proxies.keys():\n proxies[_source] = {}\n if _protocol not in proxies[_source].keys():\n proxies[_source][_protocol] = []\n\n proxies[_source][_protocol].append({'ip' : _ip, 'port' : _port})\n if maxid is None:\n maxid = _id\n if _id > maxid:\n maxid = _id\n cnx.close()\n return (proxies, maxid)\n\ndef InsertProxiesToDb(proxies, source):\n log(syslog.LOG_INFO, 'Upserting new proxies')\n\n cnx = mysql.connector.connect(host=db_host, user=db_user, password=db_pwd, database=db_name)\n cursor = cnx.cursor()\n\n NewProxies = None\n for proxy_type in proxies.keys():\n for proxy in proxies[proxy_type]:\n ip = proxy['ip']\n port = proxy['port']\n args = (source, proxy_type, ip, port)\n cursor.callproc('create_proxy', args)\n proxy_id = None\n for result_cursor in cursor.stored_results():\n for row in result_cursor:\n proxy_id = row[0]\n break;\n break;\n if proxy_id is not None:\n log(syslog.LOG_INFO, 'Added new proxy {0}://{1}:{2}'.format(proxy_type, ip, port))\n if NewProxies is None:\n NewProxies = {}\n if proxy_type not in NewProxies.keys():\n NewProxies[proxy_type] = []\n NewProxies[proxy_type].append({'id': proxy_id, 'ip' : ip, 'port' : port})\n cnx.close()\n return NewProxies\n\ndef ClearRawProxiesUpToId(maxid):\n log(syslog.LOG_INFO, 'Upserting new proxies')\n cnx = mysql.connector.connect(host=db_host, user=db_user, password=db_pwd, database=db_name)\n cursor = cnx.cursor()\n args = (maxid,)\n cursor.callproc('clear_raw_proxy', args)\n cnx.close()\n\ndef AddNewProxiesToRabbit(proxies):\n log(syslog.LOG_INFO, 'Adding new proxies to rabbit')\n log(syslog.LOG_INFO, 'Connecting to rabbit')\n rabbit_credentials = pika.PlainCredentials(rabbit_user, rabbit_pwd)\n rabbit_parameters = pika.ConnectionParameters(rabbit_host, rabbit_port, '/', rabbit_credentials)\n rabbit_connection = pika.BlockingConnection(rabbit_parameters)\n for proxy_type in proxies:\n queue = rabbit_q_http_proxy\n if proxy_type == 'https':\n queue = rabbit_q_https_proxy\n channel = rabbit_connection.channel()\n channel.queue_declare(queue=queue)\n for proxy in proxies[proxy_type]:\n log(syslog.LOG_INFO, 'Publishing new proxy to rabbit')\n msg = json.dumps(proxy)\n channel.basic_publish(exchange='', routing_key=queue, body=msg)\n channel.close()\n\nsyslog.openlog('sslproxy.org_loader', syslog.LOG_PID, syslog.LOG_USER)\n\nrabbit_user = 'guest'\nrabbit_pwd = 'guest'\nrabbit_host = '107.170.154.102'\nrabbit_port = 5672\nrabbit_url = 'amqp://{0}:{1}@{2}:{3}/'.format(rabbit_user, rabbit_pwd, rabbit_host, rabbit_port)\nrabbit_q_extractor = 'extractor'\nrabbit_q_dl_transcoder = 'downloadtranscoder'\nrabbit_q_http_proxy = 'http_proxy'\nrabbit_q_https_proxy = 'https_proxy'\n\n\n# setup mysql connection\ndb_host = '167.88.34.62'\ndb_user = 'Brun0'\ndb_pwd = '65UB3b3$'\ndb_name = 'vidblit'\n\n\nproxies, maxid = GetRawProxies()\nif proxies is not None:\n for source in proxies.keys():\n new_proxies = InsertProxiesToDb(proxies[source], source)\n if new_proxies is not None:\n AddNewProxiesToRabbit(new_proxies)\nif maxid is not None:\n ClearRawProxiesUpToId(maxid)","sub_path":"var/vidblit/scripts/python/proxy_db_loader.py","file_name":"proxy_db_loader.py","file_ext":"py","file_size_in_byte":4660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"271115887","text":"from time import sleep\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\n\n# init driver\ndriver = webdriver.Chrome(executable_path='/Users/andrei/Automation/python-selenium-automation/chromedriver')\ndriver.maximize_window()\n\n# open the page\ndriver.get('https://www.amazon.com/gp/help/customer/display.html')\ndriver.implicitly_wait(4)\n\ninput_field = driver.find_element(By.ID, 'helpsearch')\ninput_field.clear()\ninput_field.send_keys('Cancel order')\n\n\nsearch_icon = driver.find_element(By.ID, 'helpSearchSubmit')\nsearch_icon.click()\n\ntext = driver.find_element(By.XPATH, \"//p[@class='a-color-secondary']/b\").text\n\nassert text == 'Cancel order', f'Incorrect text {text}.'\n\ndriver.quit()\n","sub_path":"amazon_canceling_order.py","file_name":"amazon_canceling_order.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"617057861","text":"from setuptools import setup, find_packages\nfrom ansibleplaybookgrapher import __version__, __prog__\n\ntry:\n import pypandoc\n\n long_description = pypandoc.convert_file('Readme.md', 'rst')\nexcept:\n long_description = None\n\nsetup(name=__prog__,\n version=__version__,\n description=\"A command line tool to create a graph representing your Ansible playbook tasks and roles\",\n long_description=long_description,\n url=\"https://github.com/haidaraM/ansible-playbook-grapher\",\n author=\"HAIDARA Mohamed El Mouctar\",\n author_email=\"elmhaidara@gmail.com\",\n license=\"MIT\",\n install_requires=['graphviz==0.8.1', 'colour==0.1.5', 'lxml==4.1.1', 'ansible>=2.4.0'],\n tests_requires=['pytest==3.2.3', 'pytest-cov==2.5.1'],\n packages=find_packages(exclude=['tests']),\n package_data={\"ansible-playbook-grapher\": ['data/*']},\n include_package_data=True,\n download_url=\"https://github.com/haidaraM/ansible-playbook-grapher/archive/v\" + __version__ + \".tar.gz\",\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Environment :: Console',\n 'Topic :: Utilities',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 2.7',\n ],\n entry_points={\n 'console_scripts': [\n '%s = ansibleplaybookgrapher.cli:main' % __prog__\n ]\n })\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"618006432","text":"import typing\nfrom enum import Enum\nfrom urllib.parse import unquote_plus\n\nfrom starlette.datastructures import FormData, Headers, UploadFile\n\ntry:\n from multipart.multipart import parse_options_header\n import multipart\nexcept ImportError: # pragma: nocover\n parse_options_header = None # type: ignore\n multipart = None # type: ignore\n\n\nclass FormMessage(Enum):\n FIELD_START = 1\n FIELD_NAME = 2\n FIELD_DATA = 3\n FIELD_END = 4\n END = 5\n\n\nclass MultiPartMessage(Enum):\n PART_BEGIN = 1\n PART_DATA = 2\n PART_END = 3\n HEADER_FIELD = 4\n HEADER_VALUE = 5\n HEADER_END = 6\n HEADERS_FINISHED = 7\n END = 8\n\n\nclass FormParser:\n def __init__(\n self, headers: Headers, stream: typing.AsyncGenerator[bytes, None]\n ) -> None:\n assert (\n multipart is not None\n ), \"The `python-multipart` library must be installed to use form parsing.\"\n self.headers = headers\n self.stream = stream\n self.messages = [] # type: typing.List[typing.Tuple[FormMessage, bytes]]\n\n def on_field_start(self) -> None:\n message = (FormMessage.FIELD_START, b\"\")\n self.messages.append(message)\n\n def on_field_name(self, data: bytes, start: int, end: int) -> None:\n message = (FormMessage.FIELD_NAME, data[start:end])\n self.messages.append(message)\n\n def on_field_data(self, data: bytes, start: int, end: int) -> None:\n message = (FormMessage.FIELD_DATA, data[start:end])\n self.messages.append(message)\n\n def on_field_end(self) -> None:\n message = (FormMessage.FIELD_END, b\"\")\n self.messages.append(message)\n\n def on_end(self) -> None:\n message = (FormMessage.END, b\"\")\n self.messages.append(message)\n\n async def parse(self) -> FormData:\n # Callbacks dictionary.\n callbacks = {\n \"on_field_start\": self.on_field_start,\n \"on_field_name\": self.on_field_name,\n \"on_field_data\": self.on_field_data,\n \"on_field_end\": self.on_field_end,\n \"on_end\": self.on_end,\n }\n\n # Create the parser.\n parser = multipart.QuerystringParser(callbacks)\n field_name = b\"\"\n field_value = b\"\"\n\n items = (\n []\n ) # type: typing.List[typing.Tuple[str, typing.Union[str, UploadFile]]]\n\n # Feed the parser with data from the request.\n async for chunk in self.stream:\n if chunk:\n parser.write(chunk)\n else:\n parser.finalize()\n messages = list(self.messages)\n self.messages.clear()\n for message_type, message_bytes in messages:\n if message_type == FormMessage.FIELD_START:\n field_name = b\"\"\n field_value = b\"\"\n elif message_type == FormMessage.FIELD_NAME:\n field_name += message_bytes\n elif message_type == FormMessage.FIELD_DATA:\n field_value += message_bytes\n elif message_type == FormMessage.FIELD_END:\n name = unquote_plus(field_name.decode(\"latin-1\"))\n value = unquote_plus(field_value.decode(\"latin-1\"))\n items.append((name, value))\n elif message_type == FormMessage.END:\n pass\n\n return FormData(items)\n\n\nclass MultiPartParser:\n def __init__(\n self, headers: Headers, stream: typing.AsyncGenerator[bytes, None]\n ) -> None:\n assert (\n multipart is not None\n ), \"The `python-multipart` library must be installed to use form parsing.\"\n self.headers = headers\n self.stream = stream\n self.messages = [] # type: typing.List[typing.Tuple[MultiPartMessage, bytes]]\n\n def on_part_begin(self) -> None:\n message = (MultiPartMessage.PART_BEGIN, b\"\")\n self.messages.append(message)\n\n def on_part_data(self, data: bytes, start: int, end: int) -> None:\n message = (MultiPartMessage.PART_DATA, data[start:end])\n self.messages.append(message)\n\n def on_part_end(self) -> None:\n message = (MultiPartMessage.PART_END, b\"\")\n self.messages.append(message)\n\n def on_header_field(self, data: bytes, start: int, end: int) -> None:\n message = (MultiPartMessage.HEADER_FIELD, data[start:end])\n self.messages.append(message)\n\n def on_header_value(self, data: bytes, start: int, end: int) -> None:\n message = (MultiPartMessage.HEADER_VALUE, data[start:end])\n self.messages.append(message)\n\n def on_header_end(self) -> None:\n message = (MultiPartMessage.HEADER_END, b\"\")\n self.messages.append(message)\n\n def on_headers_finished(self) -> None:\n message = (MultiPartMessage.HEADERS_FINISHED, b\"\")\n self.messages.append(message)\n\n def on_end(self) -> None:\n message = (MultiPartMessage.END, b\"\")\n self.messages.append(message)\n\n async def parse(self) -> FormData:\n # Parse the Content-Type header to get the multipart boundary.\n content_type, params = parse_options_header(self.headers[\"Content-Type\"])\n boundary = params.get(b\"boundary\")\n\n # Callbacks dictionary.\n callbacks = {\n \"on_part_begin\": self.on_part_begin,\n \"on_part_data\": self.on_part_data,\n \"on_part_end\": self.on_part_end,\n \"on_header_field\": self.on_header_field,\n \"on_header_value\": self.on_header_value,\n \"on_header_end\": self.on_header_end,\n \"on_headers_finished\": self.on_headers_finished,\n \"on_end\": self.on_end,\n }\n\n # Create the parser.\n parser = multipart.MultipartParser(boundary, callbacks)\n header_field = b\"\"\n header_value = b\"\"\n raw_headers = [] # type: typing.List[typing.Tuple[bytes, bytes]]\n field_name = \"\"\n data = b\"\"\n file = None # type: typing.Optional[UploadFile]\n\n items = (\n []\n ) # type: typing.List[typing.Tuple[str, typing.Union[str, UploadFile]]]\n\n # Feed the parser with data from the request.\n async for chunk in self.stream:\n parser.write(chunk)\n messages = list(self.messages)\n self.messages.clear()\n for message_type, message_bytes in messages:\n if message_type == MultiPartMessage.PART_BEGIN:\n raw_headers = []\n data = b\"\"\n elif message_type == MultiPartMessage.HEADER_FIELD:\n header_field += message_bytes\n elif message_type == MultiPartMessage.HEADER_VALUE:\n header_value += message_bytes\n elif message_type == MultiPartMessage.HEADER_END:\n raw_headers.append((header_field.lower(), header_value))\n header_field = b\"\"\n header_value = b\"\"\n elif message_type == MultiPartMessage.HEADERS_FINISHED:\n headers = Headers(raw=raw_headers)\n content_disposition = headers.get(\"Content-Disposition\")\n content_type = headers.get(\"Content-Type\", \"\")\n disposition, options = parse_options_header(content_disposition)\n field_name = options[b\"name\"].decode(\"latin-1\")\n if b\"filename\" in options:\n filename = options[b\"filename\"].decode(\"latin-1\")\n file = UploadFile(filename=filename, content_type=content_type)\n else:\n file = None\n elif message_type == MultiPartMessage.PART_DATA:\n if file is None:\n data += message_bytes\n else:\n await file.write(message_bytes)\n elif message_type == MultiPartMessage.PART_END:\n if file is None:\n items.append((field_name, data.decode(\"latin-1\")))\n else:\n await file.seek(0)\n items.append((field_name, file))\n elif message_type == MultiPartMessage.END:\n pass\n\n parser.finalize()\n return FormData(items)\n","sub_path":"starlette/formparsers.py","file_name":"formparsers.py","file_ext":"py","file_size_in_byte":8330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"171634890","text":"# import sys\n\n# MAX = sys.maxsize\n\ndef coin_change_n_ways(Coins, m, total):\n # Instatiate memo table for tablization\n\n memo = [0] * (total + 1)\n\n # There is 1 way to get zero, no coins\n memo[0] = 1\n\n for coin in Coins:\n for j in range(coin, total + 1):\n memo[j] += memo[j - coin]\n\n # print(f'{memo}')\n return memo[total]\n\ndef coin_change_min_ways(Coins, m, amount):\n MAX = float('inf') # a number bigger than all others\n dp = [0] + [MAX] * amount\n\n for i in range(1, amount + 1):\n dp[i] = min([dp[i - c] if i - c >= 0 else MAX for c in Coins]) + 1\n\n return [dp[amount], -1][dp[amount] == MAX]\n\n\nif __name__ == '__main__':\n # Python3 f strings\n print(f'{coin_change_n_ways([1, 2, 5, 10], 4, 5)} ways to make 5')\n print(f'{coin_change_n_ways([1, 2, 5, 10], 4, 99)} ways to make 99')\n\n print(\n f'{coin_change_min_ways([1, 2, 4, 10], 4, 5)} min # of ways to make 5')\n print(\n f'{coin_change_min_ways([1, 2, 5, 10], 4, 99)} min # of ways to make 99')\n","sub_path":"algorithms/python/dp/coin_change.py","file_name":"coin_change.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"540805607","text":"#!/usr/bin/python3\nfrom googlejam.tools import *\n\n# how long will it take to get to the goal with given income, and wealth?\ndef simulate(income, goal, time=0, wealth=0):\n return time + ((goal - wealth) / income)\n\n# we can make a generator that just assumes we only buy farms\ndef simulation(farmcost,farmincome, farms = 0, time = 0, max_time=None):\n BASE_INCOME = 2.0\n while max_time == None or max_time > time:\n income = BASE_INCOME + farmincome * farms\n tick = simulate(income, farmcost)\n yield farms, income, time\n time += tick\n farms += 1\n \n\nclass Jam:\n split = \" \"\n cases = line_cases((\"C\",\"F\",\"X\"))\n def solve(F,X,C):\n farmincome,wincost,farmcost = map(lambda x:float(x[0]),[F,X,C])\n best_time = float(\"+inf\")\n for farms,income,time in simulation(farmcost,farmincome):\n time_to_win = simulate(income, wincost, time)\n time_to_next_farm = simulate(income, wincost, time)\n time_to_recover_cost = simulate(farmincome, farmcost, time)\n best_time = min(time_to_win,best_time)\n if time_to_win < time_to_recover_cost or time_to_win < time_to_next_farm:\n return \"{:.7f}\".format(best_time)\nimport googlejam.main\n","sub_path":"solutions_5709773144064000_0/Python/00500005/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"573978884","text":"import json\nfrom v20.base_entity import BaseEntity\nfrom v20.base_entity import Property\nfrom v20.base_entity import EntityDict\nfrom v20.request import Request\n\n\n\nclass Price(BaseEntity):\n _summary_format = \"\"\n _name_format = \"\"\n\n _properties = [\n Property(\n \"type\",\n \"Type\",\n \"The string \\\"PRICE\\\". Used to identify the a Price object when found in a stream.\",\n \"primitive\",\n \"string\",\n False,\n \"PRICE\"\n ),\n Property(\n \"instrument\",\n \"instrument\",\n \"The Price's Instrument.\",\n \"primitive\",\n \"primitives.InstrumentName\",\n False,\n None\n ),\n Property(\n \"time\",\n \"time\",\n \"The date/time when the Price was created\",\n \"primitive\",\n \"primitives.DateTime\",\n False,\n None\n ),\n Property(\n \"status\",\n \"status\",\n \"The status of the Price.\",\n \"primitive\",\n \"pricing.PriceStatus\",\n False,\n None\n ),\n Property(\n \"bids\",\n \"bids\",\n \"The list of prices and liquidity available on the Instrument's bid side. It is possible for this list to be empty if there is no bid liquidity currently available for the Instrument in the Account.\",\n \"array_object\",\n \"PriceBucket\",\n False,\n None\n ),\n Property(\n \"asks\",\n \"asks\",\n \"The list of prices and liquidity available on the Instrument's ask side. It is possible for this list to be empty if there is no ask liquidity currently available for the Instrument in the Account.\",\n \"array_object\",\n \"PriceBucket\",\n False,\n None\n ),\n Property(\n \"closeoutBid\",\n \"closeoutBid\",\n \"The closeout bid Price. This Price is used when a bid is required to closeout a Position (margin closeout or manual) yet there is no bid liquidity. The closeout bid is never used to open a new position.\",\n \"primitive\",\n \"pricing.PriceValue\",\n False,\n None\n ),\n Property(\n \"closeoutAsk\",\n \"closeoutAsk\",\n \"The closeout ask Price. This Price is used when a ask is required to closeout a Position (margin closeout or manual) yet there is no ask liquidity. The closeout ask is never used to open a new position.\",\n \"primitive\",\n \"pricing.PriceValue\",\n False,\n None\n ),\n Property(\n \"quoteHomeConversionFactors\",\n \"quoteHomeConversionFactors\",\n \"The factors used to convert quantities of this price's Instrument's quote currency into a quantity of the Account's home currency.\",\n \"object\",\n \"pricing.QuoteHomeConversionFactors\",\n False,\n None\n ),\n Property(\n \"unitsAvailable\",\n \"unitsAvailable\",\n \"Representation of many units of an Instrument are available to be traded for both long and short Orders.\",\n \"object\",\n \"pricing.UnitsAvailable\",\n False,\n None\n ),\n ]\n\n def __init__(self, **kwargs):\n super(Price, self).__init__()\n for prop in self._properties:\n setattr(self, prop.name, kwargs.get(prop.name, prop.default))\n\n @staticmethod\n def from_dict(data):\n\n body = {}\n if data.get('type') is not None:\n body['type'] = \\\n data.get('type')\n\n if data.get('instrument') is not None:\n body['instrument'] = \\\n data.get('instrument')\n\n if data.get('time') is not None:\n body['time'] = \\\n data.get('time')\n\n if data.get('status') is not None:\n body['status'] = \\\n data.get('status')\n\n if data.get('bids') is not None:\n body['bids'] = [\n PriceBucket.from_dict(d)\n for d in data.get('bids')\n ]\n\n if data.get('asks') is not None:\n body['asks'] = [\n PriceBucket.from_dict(d)\n for d in data.get('asks')\n ]\n\n if data.get('closeoutBid') is not None:\n body['closeoutBid'] = \\\n data.get('closeoutBid')\n\n if data.get('closeoutAsk') is not None:\n body['closeoutAsk'] = \\\n data.get('closeoutAsk')\n\n if data.get('quoteHomeConversionFactors') is not None:\n body['quoteHomeConversionFactors'] = \\\n QuoteHomeConversionFactors.from_dict(\n data['quoteHomeConversionFactors']\n )\n\n if data.get('unitsAvailable') is not None:\n body['unitsAvailable'] = \\\n UnitsAvailable.from_dict(\n data['unitsAvailable']\n )\n\n self = Price(**body)\n\n return self\n\n\nclass PriceBucket(BaseEntity):\n _summary_format = \"\"\n _name_format = \"\"\n\n _properties = [\n Property(\n \"price\",\n \"price\",\n \"The Price offered by the PriceBucket\",\n \"primitive\",\n \"pricing.PriceValue\",\n False,\n None\n ),\n Property(\n \"liquidity\",\n \"liquidity\",\n \"The amount of liquidity offered by the PriceBucket\",\n \"primitive\",\n \"integer\",\n False,\n None\n ),\n ]\n\n def __init__(self, **kwargs):\n super(PriceBucket, self).__init__()\n for prop in self._properties:\n setattr(self, prop.name, kwargs.get(prop.name, prop.default))\n\n @staticmethod\n def from_dict(data):\n\n body = {}\n if data.get('price') is not None:\n body['price'] = \\\n data.get('price')\n\n if data.get('liquidity') is not None:\n body['liquidity'] = \\\n data.get('liquidity')\n\n self = PriceBucket(**body)\n\n return self\n\n\nclass UnitsAvailable(BaseEntity):\n _summary_format = \"\"\n _name_format = \"\"\n\n _properties = [\n Property(\n \"long\",\n \"long\",\n \"The units available breakdown for long Orders.\",\n \"object\",\n \"pricing.UnitsAvailableDetails\",\n False,\n None\n ),\n Property(\n \"short\",\n \"short\",\n \"The units available breakdown for short Orders.\",\n \"object\",\n \"pricing.UnitsAvailableDetails\",\n False,\n None\n ),\n ]\n\n def __init__(self, **kwargs):\n super(UnitsAvailable, self).__init__()\n for prop in self._properties:\n setattr(self, prop.name, kwargs.get(prop.name, prop.default))\n\n @staticmethod\n def from_dict(data):\n\n body = {}\n if data.get('long') is not None:\n body['long'] = \\\n UnitsAvailableDetails.from_dict(\n data['long']\n )\n\n if data.get('short') is not None:\n body['short'] = \\\n UnitsAvailableDetails.from_dict(\n data['short']\n )\n\n self = UnitsAvailable(**body)\n\n return self\n\n\nclass UnitsAvailableDetails(BaseEntity):\n _summary_format = \"\"\n _name_format = \"\"\n\n _properties = [\n Property(\n \"default\",\n \"default\",\n \"The number of units that are available to be traded using an Order with a positionFill option of \\\"DEFAULT\\\". For an Account with hedging enabled, this value will be the same as the \\\"OPEN_ONLY\\\" value. For an Account without hedging enabled, this value will be the same as the \\\"REDUCE_FIRST\\\" value.\",\n \"primitive\",\n \"primitives.DecimalNumber\",\n False,\n None\n ),\n Property(\n \"reduceFirst\",\n \"reduceFirst\",\n \"The number of units that may are available to be traded with an Order with a positionFill option of \\\"REDUCE_FIRST\\\".\",\n \"primitive\",\n \"primitives.DecimalNumber\",\n False,\n None\n ),\n Property(\n \"reduceOnly\",\n \"reduceOnly\",\n \"The number of units that may are available to be traded with an Order with a positionFill option of \\\"REDUCE_ONLY\\\".\",\n \"primitive\",\n \"primitives.DecimalNumber\",\n False,\n None\n ),\n Property(\n \"openOnly\",\n \"openOnly\",\n \"The number of units that may are available to be traded with an Order with a positionFill option of \\\"OPEN_ONLY\\\".\",\n \"primitive\",\n \"primitives.DecimalNumber\",\n False,\n None\n ),\n ]\n\n def __init__(self, **kwargs):\n super(UnitsAvailableDetails, self).__init__()\n for prop in self._properties:\n setattr(self, prop.name, kwargs.get(prop.name, prop.default))\n\n @staticmethod\n def from_dict(data):\n\n body = {}\n if data.get('default') is not None:\n body['default'] = \\\n data.get('default')\n\n if data.get('reduceFirst') is not None:\n body['reduceFirst'] = \\\n data.get('reduceFirst')\n\n if data.get('reduceOnly') is not None:\n body['reduceOnly'] = \\\n data.get('reduceOnly')\n\n if data.get('openOnly') is not None:\n body['openOnly'] = \\\n data.get('openOnly')\n\n self = UnitsAvailableDetails(**body)\n\n return self\n\n\nclass QuoteHomeConversionFactors(BaseEntity):\n _summary_format = \"\"\n _name_format = \"\"\n\n _properties = [\n Property(\n \"positiveUnits\",\n \"positiveUnits\",\n \"The factor used to convert a positive amount of the Price's Instrument's quote currency into a positive amount of the Account's home currency. Conversion is performed by multiplying the quote units by the conversion factor.\",\n \"primitive\",\n \"primitives.DecimalNumber\",\n False,\n None\n ),\n Property(\n \"negativeUnits\",\n \"negativeUnits\",\n \"The factor used to convert a negative amount of the Price's Instrument's quote currency into a negative amount of the Account's home currency. Conversion is performed by multiplying the quote units by the conversion factor.\",\n \"primitive\",\n \"primitives.DecimalNumber\",\n False,\n None\n ),\n ]\n\n def __init__(self, **kwargs):\n super(QuoteHomeConversionFactors, self).__init__()\n for prop in self._properties:\n setattr(self, prop.name, kwargs.get(prop.name, prop.default))\n\n @staticmethod\n def from_dict(data):\n\n body = {}\n if data.get('positiveUnits') is not None:\n body['positiveUnits'] = \\\n data.get('positiveUnits')\n\n if data.get('negativeUnits') is not None:\n body['negativeUnits'] = \\\n data.get('negativeUnits')\n\n self = QuoteHomeConversionFactors(**body)\n\n return self\n\n\nclass Heartbeat(BaseEntity):\n _summary_format = \"Pricing Heartbeat {time}\"\n _name_format = \"\"\n\n _properties = [\n Property(\n \"type\",\n \"type\",\n \"The string \\\"HEARTBEAT\\\"\",\n \"primitive\",\n \"string\",\n False,\n \"HEARTBEAT\"\n ),\n Property(\n \"time\",\n \"time\",\n \"The date/time when the Heartbeat was created.\",\n \"primitive\",\n \"primitives.DateTime\",\n False,\n None\n ),\n ]\n\n def __init__(self, **kwargs):\n super(Heartbeat, self).__init__()\n for prop in self._properties:\n setattr(self, prop.name, kwargs.get(prop.name, prop.default))\n\n @staticmethod\n def from_dict(data):\n\n body = {}\n if data.get('type') is not None:\n body['type'] = \\\n data.get('type')\n\n if data.get('time') is not None:\n body['time'] = \\\n data.get('time')\n\n self = Heartbeat(**body)\n\n return self\n\nclass EntitySpec(object):\n Price = Price\n PriceBucket = PriceBucket\n UnitsAvailable = UnitsAvailable\n UnitsAvailableDetails = UnitsAvailableDetails\n QuoteHomeConversionFactors = QuoteHomeConversionFactors\n Heartbeat = Heartbeat\n\n def __init__(self, ctx):\n self.ctx = ctx\n\n\n def get(\n self,\n accountID,\n **kwargs\n ):\n \"\"\"Current Prices\n\n Get pricing information for a specified list of Instruments within an\n Account.\n\n Parameters\n ----------\n accountID : \n ID of the Account to fetch current Prices for.\n instruments : array, optional\n List of Instruments to get pricing for.\n since : , optional\n Date/Time filter to apply to the returned prices. Only prices with\n a time later than this filter will be provided.\n includeUnitsAvailable : , optional\n Flag that enables the inclusion of the unitsAvailable field in the\n returned Price objects.\n \"\"\"\n\n\n request = Request(\n 'GET',\n '/v3/accounts/{accountID}/pricing'\n )\n\n request.set_path_param(\n 'accountID',\n accountID\n )\n\n request.set_param(\n 'instruments',\n kwargs.get('instruments')\n )\n\n request.set_param(\n 'since',\n kwargs.get('since')\n )\n\n request.set_param(\n 'includeUnitsAvailable',\n kwargs.get('includeUnitsAvailable')\n )\n\n response = self.ctx.request(request)\n\n\n if response.content_type is None:\n return response\n\n if not response.content_type.startswith(\"application/json\"):\n return response\n\n jbody = json.loads(response.raw_body)\n\n parsed_body = {}\n\n if str(response.status) == \"200\":\n if jbody.get('prices') is not None:\n parsed_body['prices'] = [\n Price.from_dict(d)\n for d in jbody.get('prices')\n ]\n\n\n if str(response.status) == \"400\":\n if jbody.get('errorCode') is not None:\n parsed_body['errorCode'] = \\\n jbody.get('errorCode')\n\n if jbody.get('errorMessage') is not None:\n parsed_body['errorMessage'] = \\\n jbody.get('errorMessage')\n\n\n if str(response.status) == \"401\":\n if jbody.get('errorCode') is not None:\n parsed_body['errorCode'] = \\\n jbody.get('errorCode')\n\n if jbody.get('errorMessage') is not None:\n parsed_body['errorMessage'] = \\\n jbody.get('errorMessage')\n\n\n if str(response.status) == \"404\":\n if jbody.get('errorCode') is not None:\n parsed_body['errorCode'] = \\\n jbody.get('errorCode')\n\n if jbody.get('errorMessage') is not None:\n parsed_body['errorMessage'] = \\\n jbody.get('errorMessage')\n\n\n if str(response.status) == \"405\":\n if jbody.get('errorCode') is not None:\n parsed_body['errorCode'] = \\\n jbody.get('errorCode')\n\n if jbody.get('errorMessage') is not None:\n parsed_body['errorMessage'] = \\\n jbody.get('errorMessage')\n\n\n response.body = parsed_body\n\n return response\n\n\n def stream(\n self,\n accountID,\n **kwargs\n ):\n \"\"\"Price Stream\n\n Get a stream of Prices for an Account starting from when the request is\n made.\n\n Parameters\n ----------\n accountID : \n ID of the Account to stream Prices for.\n instruments : array, optional\n List of Instruments to stream Prices for.\n snapshot : , optional\n Flag that enables/disables the sending of a pricing snapshot when\n initially connecting to the stream.\n \"\"\"\n\n\n request = Request(\n 'GET',\n '/v3/accounts/{accountID}/pricing/stream'\n )\n\n request.set_path_param(\n 'accountID',\n accountID\n )\n\n request.set_param(\n 'instruments',\n kwargs.get('instruments')\n )\n\n request.set_param(\n 'snapshot',\n kwargs.get('snapshot')\n )\n\n request.set_stream(True)\n\n class Parser():\n def __init__(self, ctx):\n self.ctx = ctx\n\n def __call__(self, line):\n j = json.loads(line)\n\n type = j.get(\"type\")\n\n if type is None:\n return (\n \"pricing.Price\",\n self.ctx.pricing.Price.from_dict(j)\n )\n elif type == \"HEARTBEAT\":\n return (\n \"pricing.Heartbeat\",\n self.ctx.pricing.Heartbeat.from_dict(j)\n )\n\n return (\n \"pricing.Price\",\n self.ctx.pricing.Price.from_dict(j)\n )\n\n \n request.set_line_parser(\n Parser(self.ctx)\n )\n\n response = self.ctx.request(request)\n\n\n return response\n\n","sub_path":"src/v20/pricing.py","file_name":"pricing.py","file_ext":"py","file_size_in_byte":17837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"449277497","text":"import getopt\nimport os\nimport sys\n\nconfigMap = {}\n\ndef replace(line):\n\tnewLine = line.strip()\n\tif not newLine.startswith('#') and not len(newLine) == 0:\n\t\tfor key in configMap:\n\t\t\tnewLine = newLine.replace(key, configMap[key])\n\treturn newLine + os.linesep\n\ndef main():\n\n\ttry:\n\t\t### Open the config file\n\t\tconfigFile = open('config.txt', 'r')\n\n\t\t### Read each line\n\t\tfor line in configFile:\n\t\t\t### Split each line into key/value pairs\n\t\t\tline = line.strip()\n\t\t\tif not line.startswith('#') and not len(line) == 0:\n\t\t\t\tkeyValue = line.split('=')\n\t\t\t\tkey = keyValue[0].strip()\n\t\t\t\tkey = key.join(['${', '}'])\n\t\t\t\tvalue = keyValue[1].strip()\n\t\t\t\t### Add to the config map\n\t\t\t\tconfigMap[key] = value\n\tfinally:\n\t\tconfigFile.close()\n\n\ttry:\n\t\t### Open the template file\n\t\ttemplateFile = open('template.txt', 'r')\n\n\t\t### Open the target file\n\t\ttargetFile = open('target.txt', 'w', 1)\n\n\t\tfor line in templateFile:\n\t\t\t### Search each line for property tokens\n\t\t\tline = replace(line)\n\t\t\ttargetFile.write(line)\n\tfinally:\n\t\ttemplateFile.close()\n\t\ttargetFile.close()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"307602212","text":"from machine import Pin,PWM\nfrom simp_py import mon\nfrom array import array\nimport time\n# note\nC=2441\nD=2741\nE=3048\nF=3225\nG=3654\nA=4058\nB=4562\nC2=4882\nDUTY_ON=512\nDUTY_OFF=0\n\njingle_bells_s=array('H', [E,E,E,E,E,E,E,G,C,D,E,F,F,F,F,F,E,E,E,E,D,D,E,D,G])\njingle_bells_l=bytearray([1,1,2,1,1,2,1,1,1,1,4,1,1,1,1,1,1,1,1,1,1,1,1,2,2])\nlittle_lamb_s =array('H', [B,A,G,A,B,B,B,A,A,A,B,B,B,B,A,G,A,B,B,B,A,A,B,A,G,G])\nlittle_lamb_l= bytearray([1,1,1,1,1,1,2,1,1,2,1,1,2,1,1,1,1,1,1,2,1,1,1,1,2,2])\nsongs_info={\n b'jingle bells': [jingle_bells_s, jingle_bells_l],\n b'little lamb': [little_lamb_s, little_lamb_l],\n }\n \nclass SONGS:\n def __init__(self,pinx):\n global Pin, PWM\n self.speaker=PWM(Pin(pinx, Pin.OUT))\n self.speaker.duty(0)\n\n def run(self):\n global time, mon, songs_info\n while 1:\n if mon.chk_ureq():\n ureq = mon.get_ureq()\n song_info = songs_info.get(ureq,[None,None])\n if song_info[0]:\n uresp= b'%s played' % ureq\n mon.put_uresp(uresp) \n self.play(song_info)\n else:\n uresp= b'No music played'\n mon.put_uresp(uresp)\n time.sleep(0.02)\n \n def play(self,song_info):\n global DUTY_ON, DUTY_OFF\n song = song_info[0]\n note_len=song_info[1]\n for i in range(len(song)):\n self.speaker.freq(song[i])\n self.speaker.duty(DUTY_ON)\n time.sleep(0.4 * note_len[i])\n self.speaker.duty(DUTY_OFF)\n time.sleep(0.05)\n \n\nsongs = SONGS(21)\nsongs.run()\n","sub_path":"simp_py_examples/wifikit32/ex003_remote_songs.py","file_name":"ex003_remote_songs.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"14827552","text":"import re\nimport csv\nimport json\nfrom typing import Tuple, List\n\n\ndef rephrase_question(x: str) -> str:\n if len(x) < 2:\n return x\n if x[-1] == '?':\n x = x[:-1]\n for opener in ['how do i ', 'how do you ', 'how can i ', 'how to ', 'best way to ', 'can i ',\n 'is there a way to ', 'easiest way to ', 'best implementation for ',\n 'best implementation of ', 'what is the best way to ', 'what is the proper way to ',\n 'is it possible to ', 'would it be possible to '\n 'how ', 'c# how to ', 'c# how ', 'c# - ', 'c# ']:\n if x.lower().startswith(opener):\n x = x[len(opener):]\n for closer in [' in c#', ' with c#', ' using c#', ' c#']:\n if x.lower().endswith(closer):\n x = x[:-len(closer)]\n return x\n\n\ndef preprocess(x: str, remove_stars=False, remove_java_doc_vars=False, remove_html_tags=False, remove_comments=False,\n remove_start_and_end_quotes=False, rephrase=False, lower=False, to_edinburgh_format=False) -> str:\n if to_edinburgh_format:\n if x.endswith('\\n'):\n x = x[:-len('\\n')]\n x = x.replace('\\n', ' DCNL ')\n x = x.replace(' ', ' DCSP ')\n x = x.replace('\\t', ' DCSP ')\n if remove_java_doc_vars:\n x = re.sub(r'(?', ' ', x)\n x = x.replace('\\\\n', ' ').replace('\\n', ' ')\n x = x.replace('\\\\t', ' ').replace('\\t', ' ')\n if remove_stars:\n x = x.replace('/*', ' ').replace('*/', ' ').replace('*', ' ')\n if remove_start_and_end_quotes:\n x = x.strip()\n if x.startswith(\"'\"):\n x = x[len(\"'\"):]\n if x.endswith(\"'\"):\n x = x[:-len(\"'\")]\n if x.startswith('\"'):\n x = x[len('\"'):]\n if x.endswith('\"'):\n x = x[:-len('\"')]\n x = x.strip()\n x = re.sub(r'(\\s\\s+)', ' ', x)\n if rephrase:\n x = rephrase_question(x)\n if lower:\n x = x.lower()\n return x\n\n\ndef preprocess_csharp_or_java(x: str) -> str:\n return preprocess(x, remove_comments=True, remove_start_and_end_quotes=True)\n\n\ndef preprocess_javadoc(x: str) -> str:\n return preprocess(x, remove_stars=True, remove_java_doc_vars=True, remove_html_tags=True)\n\n\ndef preprocess_stackoverflow_summary(x: str) -> str:\n return preprocess(x, rephrase=True, remove_html_tags=True, lower=True, remove_start_and_end_quotes=True)\n\n\ndef preprocess_edinburgh_python_or_summary(x: str) -> str:\n return preprocess(x, remove_start_and_end_quotes=True)\n\n\ndef preprocess_user_generated_python(x: str) -> str:\n return preprocess(x, to_edinburgh_format=True)\n\n\ndef postprocess_edinburgh_format(x: str) -> str:\n x = x.replace(' DCNL ', '\\n').replace(' DCNL', '\\n').replace('DCML ', '\\n')\n x = x.replace(' DCSP ', ' ').replace(' DCSP', ' ').replace('DCSP ', ' ')\n return x\n\n\ndef tokenize_text(text: str) -> List[str]:\n \"\"\"\n Splits a text into tokens using a simple regex, and adds start and end tokens.\n \"\"\"\n words_re = re.compile(r'(\\w+|[^\\w\\s])')\n return [''] + words_re.findall(text) + ['']\n\n\ndef tokenize_texts(texts: List[str]) -> List[List[str]]:\n return [tokenize_text(text) for text in texts]\n\n\ndef edinburgh_dataset_as_generator(summaries_path: str, codes_path: str):\n summaries_file = open(summaries_path, encoding='utf-8', errors='ignore')\n codes_file = open(codes_path, encoding='utf-8', errors='ignore')\n\n def generator():\n while True:\n summary = summaries_file.readline()\n code = codes_file.readline()\n if len(summary) == 0:\n assert len(code) == 0\n summaries_file.seek(0)\n codes_file.seek(0)\n break\n assert len(code) > 0\n summary_prepped = preprocess_edinburgh_python_or_summary(summary)\n code_prepped = preprocess_edinburgh_python_or_summary(code)\n yield summary_prepped, code_prepped\n\n return generator\n\n\ndef load_edinburgh_dataset(path: str):\n train = list(edinburgh_dataset_as_generator(path + \"/data_ps.descriptions.train.txt\",\n path + \"/data_ps.declbodies.train.txt\")())\n val = list(edinburgh_dataset_as_generator(path + \"/data_ps.descriptions.valid.txt\",\n path + \"/data_ps.declbodies.valid.txt\")())\n test = list(edinburgh_dataset_as_generator(path + \"/data_ps.descriptions.test.txt\",\n path + \"/data_ps.declbodies.test.txt\")())\n return train, val, test\n\n\ndef load_iyer_file(filename: str) -> Tuple[List[str], List[str]]:\n dataset = load_iyer_dataset(filename)\n summaries = [example[0] for example in dataset]\n codes = [example[1] for example in dataset]\n return summaries, codes\n\n\ndef load_iyer_dataset(filename: str, alternate_summaries_filename: str = None) -> List[Tuple[str, str]]:\n file_contents = open(filename).readlines()\n if alternate_summaries_filename:\n alternate_file_contents = open(alternate_summaries_filename).readlines()\n dataset = []\n for line_num in range(len(file_contents)):\n line = file_contents[line_num]\n split_line = line.split('\\t')\n if len(split_line) == 5:\n summary = preprocess_stackoverflow_summary(split_line[2])\n code = preprocess_csharp_or_java(split_line[3])\n if alternate_summaries_filename is None:\n dataset.append((summary, code))\n else:\n alternate_summaries = []\n for alternate_idx in range(line_num + len(file_contents), len(alternate_file_contents),\n len(file_contents)):\n split_alt_line = alternate_file_contents[alternate_idx].split('\\t')\n if len(split_alt_line) == 2:\n alternate_summary = preprocess_stackoverflow_summary(split_alt_line[1])\n alternate_summaries.append(alternate_summary)\n dataset.append((summary, code, alternate_summaries))\n return dataset\n\n\ndef load_csv_dataset(filename: str) -> List[Tuple[str, str]]:\n file = open(filename, encoding='UTF8')\n reader = csv.reader(file)\n dataset = []\n for row in reader:\n summary = preprocess_stackoverflow_summary(row[0])\n code = preprocess_csharp_or_java(row[1])\n dataset.append((summary, code))\n return dataset\n\n\ndef json_java_dataset_as_generator(filename):\n file = open(filename, mode='r', encoding='utf-8')\n\n def generator():\n while True:\n row = file.readline()\n if len(row) == 0:\n file.seek(0)\n break\n json_row = json.loads(row)\n summary = preprocess_javadoc(json_row[\"nl\"])\n code = preprocess_csharp_or_java(json_row[\"code\"])\n yield summary, code\n\n return generator\n\n\ndef load_json_dataset(filename):\n generator = json_java_dataset_as_generator(filename)\n return list(generator())\n\n\ndef generator_from_list(rows):\n def generator():\n for row in rows:\n yield row\n return generator\n\n\ndef eof_text(text: str) -> str:\n text = \"\" + text + \"\"\n return text\n\n\ndef de_eof_text(text: str) -> str:\n if text.startswith(\"\"):\n text = text[len(\"\"):]\n if text.endswith(\"\"):\n text = text[:-len(\"\")]\n return text\n\n\ndef main():\n ex_dataset_file = open(\"../data/iyer_csharp/train.txt\").readlines()\n ex_dataset = []\n for line in ex_dataset_file:\n items = line.split('\\t')\n if len(items) == 5:\n ex_dataset.append(line.split('\\t')[2])\n\n print(ex_dataset[0])\n\n tokenized = tokenize_texts(ex_dataset)\n print(tokenized[0])\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/text_data_utils.py","file_name":"text_data_utils.py","file_ext":"py","file_size_in_byte":7979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"295116294","text":"'''\nimport copy\n\ndef find_same_number(some_list):\n temp_list = []\n for i in range(len(some_list)):\n temp_list = copy.copy(some_list)\n del temp_list[i]\n if some_list[i] in temp_list:\n return some_list[i]\n'''\ndef find_same_number(some_list):\n # 이미 나온 요소를 저장시켜줄 사전\n elements_seen_so_far = {}\n\n for element in some_list:\n # 이미 나온 요소인지 확인하고 맞으면 요소를 리턴한다\n if element in elements_seen_so_far:\n return element\n\n # 해당 요소를 사전에 저장시킨다\n elements_seen_so_far[element] = True\n \n\n# 중복되는 수 ‘하나’만 리턴합니다.\nprint(find_same_number([1, 4, 3, 5, 3, 2]))\nprint(find_same_number([4, 1, 5, 2, 3, 5]))\nprint(find_same_number([5, 2, 3, 4, 1, 6, 7, 8, 9, 3]))\n","sub_path":"codeit_algorithm/coal30.py","file_name":"coal30.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"58912890","text":"def dfs(graph,start):\n \"\"\"\n Returns all nodes of the graph visited using DFS.\n Iterative Approach.\n \"\"\"\n #keeps track of nodes to be visited\n stack = []\n #keeps track of nodes already visited\n res = []\n stack.append(start)\n while stack:\n #remove last node from stack\n curr_node = stack.pop()\n #check if node is visited\n if curr_node not in res:\n res.append(curr_node)\n adj_nodes = graph[curr_node]\n #add adjacent nodes to stack\n for i in adj_nodes:\n stack.append(i)\n return res\n \ngraph = {'A': ['B', 'C', 'E'],\n 'B': ['A','D', 'E'],\n 'C': ['A', 'F', 'G'],\n 'D': ['B', 'F'],\n 'E': ['A', 'B'],\n 'F': ['C', 'D'],\n 'G': ['C']}\nprint(dfs(graph,'A'))\n","sub_path":"depth-first-search.py","file_name":"depth-first-search.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"370976663","text":"# %%\n# Adapted from:\n# https://github.com/pytorch/examples/blob/master/mnist/main.py\n\nfrom __future__ import print_function\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\nfrom torch.optim.lr_scheduler import StepLR\n\nimport os\n\n# Clone ExpressTrain\nos.system(\"git clone https://github.com/asatriano/expresstrain/\")\n# Import ExpressTrain :)\nimport expresstrain as et\n\n# %%\n# Define your model:\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, 32, 3, 1)\n self.conv2 = nn.Conv2d(32, 64, 3, 1)\n self.dropout1 = nn.Dropout(0.25)\n self.dropout2 = nn.Dropout(0.5)\n self.fc1 = nn.Linear(9216, 128)\n self.fc2 = nn.Linear(128, 10)\n\n def forward(self, x):\n x = self.conv1(x)\n x = F.relu(x)\n x = self.conv2(x)\n x = F.relu(x)\n x = F.max_pool2d(x, 2)\n x = self.dropout1(x)\n x = torch.flatten(x, 1)\n x = self.fc1(x)\n x = F.relu(x)\n x = self.dropout2(x)\n x = self.fc2(x)\n return x \n #logits (or compute logsoftmax, and use a NLLLoss as a \n # custom loss function in ExpressTrainer)\n\n# %%\ndef main():\n # Training hyperparameters\n parser=argparse.ArgumentParser(description='PyTorch FashionMNIST Example')\n parser.add_argument('--random-seed', type=int, default=42, metavar='RS',\n help='input random seed integer (default: 42)')\n parser.add_argument('--batch-size', type=int, default=32, metavar='BS',\n help='input batch size to use at training (default: 32)')\n parser.add_argument('--batch-size-multiplier', type=int, default=2, metavar='BSM',\n help='input batch size multiplier for validation (default: 2)')\n parser.add_argument('--num-workers-dataloader', type=int, default=0, metavar='NM',\n help='input number of workers for dataloaders (default: 0)')\n parser.add_argument('--learning-rate', type=float, default=1e-2, metavar='LR',\n help='input training learnign rate (default: 3e-4)')\n parser.add_argument('--epochs', type=int, default=30, metavar='E',\n help='input training epochs (default=10)')\n parser.add_argument('--path-performance', type=str, default=None, metavar='PP',\n help='input saving path for loss and metrics (default: None)')\n parser.add_argument('--path-perf-model', type=str, default=None, metavar='PPM',\n help='input saving path for loss, metric, and model params')\n parser.add_argument('--use-fp16', action='store_true', default=False,\n help='input whether to use Automatic Mixed Precision (default: True)')\n\n args=parser.parse_args()\n\n# %%\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n print(f\"Device used: {device}\")\n\n torch.manual_seed(args.random_seed)\n \n # Define your transforms:\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])\n\n # Import your datasets\n dataset1 = datasets.FashionMNIST('./data', train=True, download=True,\n transform=transform)\n dataset2 = datasets.FashionMNIST('./data', train=False,\n transform=transform)\n\n # Define Dataloaders:\n train_kwargs = {'batch_size': args.batch_size,\n 'shuffle': True}\n valid_kwargs = {'batch_size': args.batch_size*args.batch_size_multiplier,\n 'shuffle': False}\n workers_kwargs = {'num_workers': args.num_workers_dataloader}\n\n train_kwargs.update(workers_kwargs)\n valid_kwargs.update(workers_kwargs)\n train_loader = torch.utils.data.DataLoader(dataset1, **train_kwargs)\n valid_loader = torch.utils.data.DataLoader(dataset2, **valid_kwargs)\n\n # Instance your favourite model and optimizer\n model = Net().to(device)\n optimizer = optim.AdamW(model.parameters(), lr=args.learning_rate)\n\n # Define your favourite metric\n def accuracy(preds, targets):\n assert(len(preds)==len(targets))\n correct=torch.sum(preds == targets)\n return correct/len(targets)\n \n metric_used=accuracy\n\n # Subclass Express Train:\n class CustomExpressTrain(et.ExpressTrain):\n def __init__(self, **kwargs):\n super(CustomExpressTrain, self).__init__()\n self.initialize_all(kwargs)\n\n def on_train_epoch_begin(self):\n print(f\"\\nMessage before epoch {self.epoch+1} - Today is a great day :)\")\n \n # Instance your Custom Express Train trainer\n trainer_kwargs={'train_loader': train_loader,\n 'valid_loader': valid_loader,\n 'model': model,\n 'num_classes': 10,\n 'device': device,\n 'learning_rate': args.learning_rate,\n 'optimizer': optimizer,\n 'metric_used': metric_used,\n 'path_performance': args.path_performance,\n 'path_performance_and_model': args.path_perf_model}\n if args.use_fp16==True:\n print(\"Using Automatic Mixed Precision\")\n trainer_kwargs.update({'fp16': args.use_fp16})\n \n trainer=CustomExpressTrain(**trainer_kwargs)\n\n trainer.fit(args.epochs)\n \n\nif __name__ == '__main__':\n main()\n","sub_path":"examples/fashion_mnist_example_on_train_epoch_begin.py","file_name":"fashion_mnist_example_on_train_epoch_begin.py","file_ext":"py","file_size_in_byte":5524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"247795894","text":"from contextlib import contextmanager\nimport datetime\nimport os\nimport shlex\nimport subprocess\nimport sys\n\nfrom cookiecutter.utils import rmtree\nimport jinja2\n\n\n@contextmanager\ndef inside_dir(dirpath):\n \"\"\"\n Execute code from inside the given directory\n :param dirpath: String, path of the directory the command is being run.\n \"\"\"\n old_path = os.getcwd()\n try:\n os.chdir(dirpath)\n yield\n finally:\n os.chdir(old_path)\n\n\n@contextmanager\ndef suppressed_hook_items(skip_github_and_circleci_creation=True,\n skip_fix_script=False):\n \"\"\"A context manager which sets an env variable to suppress different\n hook items\n\n \"\"\"\n os.environ['SKIP_GITHUB_AND_CIRCLECI_CREATION'] =\\\n '1' if skip_github_and_circleci_creation else '0'\n os.environ['SKIP_FIX_SCRIPT'] = '1' if skip_fix_script else '0'\n try:\n yield\n finally:\n del os.environ['SKIP_GITHUB_AND_CIRCLECI_CREATION']\n del os.environ['SKIP_FIX_SCRIPT']\n\n\ndef errmsg(exception):\n if isinstance(exception, jinja2.exceptions.TemplateSyntaxError):\n return f\"Found error at {exception.filename}:{exception.lineno}\"\n else:\n return str(exception)\n\n\n@contextmanager\ndef bake_in_temp_dir(cookies, skip_fix_script=False, *args, **kwargs):\n \"\"\"\n Delete the temporal directory that is created when executing the tests\n :param cookies: pytest_cookies.Cookies,\n cookie to be baked and its temporal files will be removed\n \"\"\"\n with suppressed_hook_items(skip_github_and_circleci_creation=True,\n skip_fix_script=skip_fix_script):\n result = cookies.bake(*args, **kwargs)\n assert result is not None, result\n assert result.exception is None, errmsg(result.exception)\n assert result.exit_code == 0\n assert hasattr(result, 'project_path'), result\n try:\n yield result\n finally:\n rmtree(str(result.project_path))\n\n\ndef run_inside_dir(command, dirpath):\n \"\"\"\n Run a command from inside a given directory, returning the exit status\n :param command: Command that will be executed\n :param dirpath: String, path of the directory the command is being run.\n \"\"\"\n with inside_dir(dirpath):\n return subprocess.check_call(shlex.split(command))\n\n\ndef check_output_inside_dir(command, dirpath):\n \"Run a command from inside a given directory, returning the command output\"\n with inside_dir(dirpath):\n return subprocess.check_output(shlex.split(command))\n\n\ndef project_info(result):\n \"\"\"Get toplevel dir, project_slug, and project dir from baked cookies\"\"\"\n project_path = str(result.project_path)\n project_slug = os.path.split(project_path)[-1]\n project_dir = os.path.join(project_path, result.context['package_name'])\n return project_path, project_slug, project_dir\n\n\ndef test_bake_and_run_build(cookies):\n with bake_in_temp_dir(cookies) as result:\n assert result.project_path.is_dir()\n assert result.exit_code == 0\n assert result.exception is None\n\n found_toplevel_files = [f.name for f in result.project_path.iterdir()]\n assert 'setup.py' in found_toplevel_files\n assert 'python_boilerplate' in found_toplevel_files\n assert 'tox.ini' in found_toplevel_files\n assert 'tests' in found_toplevel_files\n assert 'README.rst' in found_toplevel_files\n assert 'LICENSE' in found_toplevel_files\n assert 'fix.sh' in found_toplevel_files\n\n assert run_inside_dir('make ratchet-typecoverage', str(result.project_path)) == 0\n assert run_inside_dir('make coverage', str(result.project_path)) == 0\n assert run_inside_dir('make quality', str(result.project_path)) == 0\n # The supplied Makefile does not support win32\n if sys.platform != \"win32\":\n output = check_output_inside_dir(\n 'make help',\n str(result.project_path)\n )\n assert b\"run precommit quality checks\" in \\\n output\n license_file_path = result.project_path / 'LICENSE'\n now = datetime.datetime.now()\n assert str(now.year) in license_file_path.open().read()\n print(\"test_bake_and_run_build path\", str(result.project_path))\n\n\nTRICKY_QUOTE_CHARACTERS_CONTEXT = {\n 'full_name': 'name \"quote\" O\\'connor',\n 'project_short_description':\n 'The greatest project ever created by name \"quote\" O\\'connor.',\n}\n\n\ndef test_bake_without_author_file(cookies):\n with bake_in_temp_dir(\n cookies,\n extra_context={'create_author_file': 'n'},\n skip_fix_script=True,\n ) as result:\n found_toplevel_files = [f.name for f in result.project_path.iterdir()]\n assert 'AUTHORS.rst' not in found_toplevel_files\n doc_files = [f.name for f in (result.project_path / 'docs').iterdir()]\n assert 'authors.rst' not in doc_files\n\n # Assert there are no spaces in the toc tree\n docs_index_path = result.project_path / 'docs/index.rst'\n with open(str(docs_index_path)) as index_file:\n assert 'contributing\\n history' in index_file.read()\n\n # Check that\n manifest_path = result.project_path / 'MANIFEST.in'\n with open(str(manifest_path)) as manifest_file:\n assert 'AUTHORS.rst' not in manifest_file.read()\n\n\ndef test_bake_selecting_license(cookies):\n license_strings = {\n 'MIT license': 'MIT ',\n 'BSD license': 'Redistributions of source code must retain the ' +\n 'above copyright notice, this',\n 'ISC license': 'ISC License',\n 'Apache Software License 2.0':\n 'Licensed under the Apache License, Version 2.0',\n 'GNU General Public License v3': 'GNU GENERAL PUBLIC LICENSE',\n }\n for license, target_string in license_strings.items():\n with bake_in_temp_dir(\n cookies,\n extra_context={'open_source_license': license},\n skip_fix_script=True,\n ) as result:\n assert target_string in (result.project_path / 'LICENSE').open().read()\n assert license in (result.project_path / 'setup.py').open().read()\n license_file_path = result.project_path / 'LICENSE'\n now = datetime.datetime.now()\n assert str(now.year) in license_file_path.open().read()\n\n\ndef test_bake_not_open_source(cookies):\n with bake_in_temp_dir(\n cookies,\n extra_context={'open_source_license': 'Not open source'},\n skip_fix_script=True,\n ) as result:\n found_toplevel_files = [f.name for f in result.project_path.iterdir()]\n assert 'setup.py' in found_toplevel_files\n assert 'LICENSE' not in found_toplevel_files\n assert 'License' not in (result.project_path / 'README.rst').open().read()\n\n\ndef test_bake_with_no_console_script(cookies):\n context = {'command_line_interface': \"No command-line interface\"}\n context.update(TRICKY_QUOTE_CHARACTERS_CONTEXT)\n with suppressed_hook_items():\n result = cookies.bake(extra_context=context)\n project_path, project_slug, project_dir = project_info(result)\n found_project_files = os.listdir(project_dir)\n assert \"cli.py\" not in found_project_files\n\n setup_path = os.path.join(project_path, 'setup.py')\n with open(setup_path, 'r') as setup_file:\n assert 'entry_points' not in setup_file.read()\n assert run_inside_dir('make ratchet-typecoverage', str(result.project_path)) == 0\n assert run_inside_dir('make coverage', str(result.project_path)) == 0\n assert run_inside_dir('make quality', str(result.project_path)) == 0\n assert run_inside_dir('make docs BROWSER=echo', str(result.project_path)) == 0\n\n\ndef test_bake_with_argparse_console_script_files(cookies):\n context = {'command_line_interface': 'Argparse'}\n context.update(TRICKY_QUOTE_CHARACTERS_CONTEXT)\n with suppressed_hook_items():\n result = cookies.bake(extra_context=context)\n assert result is not None\n assert result.project_path is not None\n\n project_path, project_slug, project_dir = project_info(result)\n found_project_files = os.listdir(project_dir)\n assert \"cli.py\" in found_project_files\n\n setup_path = os.path.join(project_path, 'setup.py')\n with open(setup_path, 'r') as setup_file:\n assert 'entry_points' in setup_file.read()\n assert run_inside_dir('make ratchet-typecoverage', str(result.project_path)) == 0\n assert run_inside_dir('make coverage', str(result.project_path)) == 0\n assert run_inside_dir('make quality', str(result.project_path)) == 0\n","sub_path":"tests/test_bake_project.py","file_name":"test_bake_project.py","file_ext":"py","file_size_in_byte":8613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"111837592","text":"import matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\nfonts = {\"font.size\": 14}\r\nplt.rcParams.update(fonts)\r\n\r\nprint(\"Choose which Task to run by typing the one of the letters (C, D, E, F):\")\r\nprint(\"Task C - Equilibrium plots\")\r\nprint(\"Task D - Probability histogram\")\r\nprint(\"Task E - Phase transition plots\")\r\nprint(\"Task F - Critical temperature\")\r\n\r\nTask = input(\"Write here (C, D, E, F): \")\r\n\r\nif Task == \"C\":\r\n MCcycles = []\r\n # Ordered, T=1.0\r\n E_mean_O1 = []; M_mean_O1 = []; Nconfigs_O1 = []\r\n # Unordered, T=1.0\r\n E_mean_U1 = []; M_mean_U1 = []; Nconfigs_U1 = []\r\n # Ordered, T=2.4\r\n E_mean_O2 = []; M_mean_O2 = []; Nconfigs_O2 = []\r\n # Unordered, T=2.4\r\n E_mean_U2 = []; M_mean_U2 = []; Nconfigs_U2 = []\r\n\r\n with open(\"Ordered_1\") as file:\r\n lines = file.readlines()\r\n #Skip the first two lines\r\n for j in range(2,len(lines)):\r\n line = lines[j]\r\n vals = line.split()\r\n MCcycles.append(float(vals[0]))\r\n E_mean_O1.append(float(vals[1]))\r\n M_mean_O1.append(float(vals[2]))\r\n Nconfigs_O1.append(float(vals[3]))\r\n\r\n with open(\"Unordered_1\") as file:\r\n lines = file.readlines()\r\n #Skip the first two lines\r\n for j in range(2,len(lines)):\r\n line = lines[j]\r\n vals = line.split()\r\n E_mean_U1.append(float(vals[1]))\r\n M_mean_U1.append(float(vals[2]))\r\n Nconfigs_U1.append(float(vals[3]))\r\n\r\n with open(\"Ordered_2_4\") as file:\r\n lines = file.readlines()\r\n #Skip the first two lines\r\n for j in range(2,len(lines)):\r\n line = lines[j]\r\n vals = line.split()\r\n E_mean_O2.append(float(vals[1]))\r\n M_mean_O2.append(float(vals[2]))\r\n Nconfigs_O2.append(float(vals[3]))\r\n\r\n with open(\"Unordered_2_4\") as file:\r\n lines = file.readlines()\r\n #Skip the first two lines\r\n for j in range(2,len(lines)):\r\n line = lines[j]\r\n vals = line.split()\r\n E_mean_U2.append(float(vals[1]))\r\n M_mean_U2.append(float(vals[2]))\r\n Nconfigs_U2.append(float(vals[3]))\r\n\r\n plt.figure()\r\n plt.title(\"Mean energy, $\\\\langle E \\\\rangle$,\\n with ordered spin\")\r\n plt.plot(MCcycles, E_mean_O1)\r\n plt.xlabel(\"# of Monte Carlo cycles\")\r\n plt.ylabel(\"$\\\\langle$ E $\\\\rangle$\")\r\n plt.legend([\"T=1.0\"])\r\n plt.tight_layout()\r\n plt.savefig(\"Figures/E_Ordered_1.png\")\r\n\r\n plt.figure()\r\n plt.title(\"Mean energy, $\\\\langle E \\\\rangle$,\\n with ordered spin\")\r\n plt.plot(MCcycles, E_mean_O2, \"orange\")\r\n plt.xlabel(\"# of Monte Carlo cycles\")\r\n plt.ylabel(\"$\\\\langle$ E $\\\\rangle$\")\r\n plt.legend([\"T=2.4\"])\r\n plt.tight_layout()\r\n plt.savefig(\"Figures/E_Ordered_2_4.png\")\r\n\r\n plt.figure()\r\n plt.title(\"Mean energy, $\\\\langle E \\\\rangle$,\\n with random spin\")\r\n plt.plot(MCcycles, E_mean_U1)\r\n plt.plot(MCcycles, E_mean_U2)\r\n plt.xlabel(\"# of Monte Carlo cycles\")\r\n plt.ylabel(\"$\\\\langle$ E $\\\\rangle$\")\r\n plt.legend([\"T=1.0\", \"T=2.4\"])\r\n plt.tight_layout()\r\n plt.savefig(\"Figures/E_Random.png\")\r\n\r\n plt.figure()\r\n plt.title(\"Mean absolute Magnetization, $\\\\langle |M| \\\\rangle$,\\n\"\r\n + \" with ordered spin\")\r\n plt.plot(MCcycles, M_mean_O1)\r\n plt.xlabel(\"# of Monte Carlo cycles\")\r\n plt.ylabel(\"$\\\\langle |M| \\\\rangle$\")\r\n plt.legend([\"T=1.0\"])\r\n plt.tight_layout()\r\n plt.savefig(\"Figures/M_Ordered_1.png\")\r\n\r\n plt.figure()\r\n plt.title(\"Mean absolute Magnetization, $\\\\langle |M| \\\\rangle$,\\n\"\r\n + \" with ordered spin\")\r\n plt.plot(MCcycles, M_mean_O2, \"orange\")\r\n plt.xlabel(\"# of Monte Carlo cycles\")\r\n plt.ylabel(\"$\\\\langle |M| \\\\rangle$\")\r\n plt.legend([\"T=2.4\"])\r\n plt.tight_layout()\r\n plt.savefig(\"Figures/M_Ordered_2_4.png\")\r\n\r\n plt.figure()\r\n plt.title(\"Mean absolute Magnetization, $\\\\langle |M| \\\\rangle$,\\n\"\r\n + \" with random spin\")\r\n plt.plot(MCcycles, M_mean_U1)\r\n plt.plot(MCcycles, M_mean_U2)\r\n plt.xlabel(\"# of Monte Carlo cycles\")\r\n plt.ylabel(\"$\\\\langle |M| \\\\rangle$\")\r\n plt.legend([\"T=1.0\", \"T=2.4\"])\r\n plt.tight_layout()\r\n plt.savefig(\"Figures/M_Random.png\")\r\n\r\n plt.figure()\r\n plt.title(\"# of accepted configurations (normalized)\\n w/ ordered spin\"\r\n + \" as function of MC cycles\")\r\n plt.plot(MCcycles, Nconfigs_O1)\r\n plt.xlabel(\"# of Monte Carlo cycles\")\r\n plt.ylabel(\"Accepted configurations (normalized)\")\r\n plt.legend([\"T=1.0\"])\r\n plt.tight_layout()\r\n plt.savefig(\"Figures/Accpt_Ordered_1.png\")\r\n\r\n plt.figure()\r\n plt.title(\"# of accepted configurations (normalized)\\n w/ ordered spin\"\r\n + \" as function of MC cycles\")\r\n plt.plot(MCcycles, Nconfigs_O2, \"orange\")\r\n plt.xlabel(\"# of Monte Carlo cycles\")\r\n plt.ylabel(\"Accepted configurations (normalized)\")\r\n plt.legend([\"T=2.4\"])\r\n plt.tight_layout()\r\n plt.savefig(\"Figures/Accpt_Ordered_2_4.png\")\r\n\r\n plt.figure()\r\n plt.title(\"# of accepted configurations (normalized)\\n w/ random spin\"\r\n + \" as function of MC cycles\")\r\n plt.plot(MCcycles, Nconfigs_U1)\r\n plt.plot(MCcycles, Nconfigs_U2)\r\n plt.xlabel(\"# of Monte Carlo cycles\")\r\n plt.ylabel(\"Accepted configurations (normalized)\")\r\n plt.legend([\"T=1.0\", \"T=2.4\"])\r\n plt.tight_layout()\r\n plt.savefig(\"Figures/Accpt_Random.png\")\r\n\r\n with open(\"Nconfig_vs_Temp\") as file:\r\n lines = file.readlines()\r\n T = []\r\n Nconfigs = []\r\n for i in range(2, len(lines)):\r\n line = lines[i]\r\n vals = line.split()\r\n T.append(float(vals[0]))\r\n Nconfigs.append(float(vals[1]))\r\n\r\n plt.figure()\r\n plt.title(\"# of accepted configurations (normalized)\\n as function of T\")\r\n plt.plot(T, Nconfigs)\r\n plt.xlabel(\"Temperatures [kT/J]\")\r\n plt.ylabel(\"Accepted configurations (normalized)\")\r\n plt.tight_layout()\r\n plt.savefig(\"Figures/AccptVsT.png\")\r\n plt.show()\r\n\r\n\r\nif Task == \"D\":\r\n filenames = [\"Probability_1\",\"Probability_24\"]\r\n for i in filenames:\r\n with open(i) as file:\r\n lines = file.readlines()\r\n E = []\r\n counts = []\r\n max_count = 0\r\n most_probable_energy = 0\r\n for j in range(1,len(lines)):\r\n line = lines[j]\r\n vals = line.split()\r\n energy = float(vals[0])\r\n count = float(vals[1])\r\n E.append((energy))\r\n counts.append((count))\r\n if count > max_count:\r\n max_count = count\r\n most_prob_energy = energy\r\n plt.figure()\r\n props = dict(boxstyle=\"square\", facecolor=\"wheat\", alpha=1)\r\n if i == \"Probability_1\":\r\n plt.title(\"Probability distribution, P(E),\\n with L=20 and T=1.0\")\r\n plt.bar(E, counts, width = 4)\r\n plt.xlim(-805,-770)\r\n plt.text(0.3*(plt.xlim()[1]-plt.xlim()[0])+plt.xlim()[0] ,plt.ylim()[1]*0.85,\r\n \"Most probable energy:\\n\" + str(most_prob_energy), bbox = props)\r\n plt.xlabel(\"Energy\")\r\n plt.ylabel(\"Energy counts, P(E)\")\r\n plt.tight_layout()\r\n plt.savefig(\"Figures/Prob_1.png\")\r\n else:\r\n plt.title(\"Probability distribution, P(E),\\n with L=20 and T=2.4\")\r\n plt.bar(E, counts, width = 3)\r\n plt.xlim(-705,-305)\r\n plt.text(0.02*(plt.xlim()[1]-plt.xlim()[0])+plt.xlim()[0] ,plt.ylim()[1]*0.85,\r\n \"Most probable energy:\\n\" + str(most_prob_energy), bbox = props)\r\n plt.xlabel(\"Energy\")\r\n plt.ylabel(\"Energy counts, P(E)\")\r\n plt.tight_layout()\r\n plt.savefig(\"Figures/Prob_2_4.png\")\r\n plt.show()\r\n\r\nif Task == \"E\":\r\n with open(\"Phase_transitions\") as file:\r\n lines = file.readlines()\r\n T = []\r\n E = []\r\n M = []\r\n Cv = []\r\n Xi = []\r\n split = []\r\n for j in range(2, len(lines)):\r\n line = lines[j]\r\n vals = line.split()\r\n split.append(len(line))\r\n T.append(float(vals[0]))\r\n E.append(float(vals[1]))\r\n M.append(float(vals[2]))\r\n Cv.append(float(vals[3]))\r\n Xi.append(float(vals[4]))\r\n\r\n t_split = len(split)/4.\r\n t1 = int(t_split)\r\n t2 = int(2*t_split)\r\n t3 = int(3*t_split)\r\n t4 = int(4*t_split)\r\n temps = T[:t1]\r\n\r\n plt.figure()\r\n plt.title(\"Phase transition plot for $\\\\langle E(T) \\\\rangle$:\\n\"\r\n + \"L=[40,60,80,100], $\\\\Delta$t=0.002, MC=5e5\")\r\n plt.plot(temps, E[:t1], label=\"40x40\")\r\n plt.plot(temps, E[t1:t2], label=\"60x60\")\r\n plt.plot(temps, E[t2:t3], label=\"80x80\")\r\n plt.plot(temps, E[t3:t4], label=\"100x100\")\r\n plt.xlabel(\"Temperatures [kT/J]\")\r\n plt.ylabel(\"$\\\\langle$ E(T) $\\\\rangle$\")\r\n plt.tight_layout()\r\n plt.legend()\r\n plt.savefig(\"Figures/Phase_E(T).png\")\r\n\r\n plt.figure()\r\n plt.title(\"Phase transition plot for $\\\\langle |M(T)| \\\\rangle$:\\n\"\r\n + \"L=[40,60,80,100], $\\\\Delta$t=0.002, MC=5e5\")\r\n plt.plot(temps, M[:t1], label=\"40x40\")\r\n plt.plot(temps, M[t1:t2], label=\"60x60\")\r\n plt.plot(temps, M[t2:t3], label=\"80x80\")\r\n plt.plot(temps, M[t3:t4], label=\"100x100\")\r\n plt.xlabel(\"Temperatures [kT/J]\")\r\n plt.ylabel(\"$\\\\langle |M(T)| \\\\rangle$\")\r\n plt.tight_layout()\r\n plt.legend()\r\n plt.savefig(\"Figures/Phase_M(T).png\")\r\n\r\n plt.figure()\r\n plt.title(\"Phase transition plot for $C_v$:\\n\"\r\n + \"L=[40,60,80,100], $\\\\Delta$t=0.002, MC=5e5\")\r\n plt.plot(temps, Cv[:t1], label=\"40x40\")\r\n plt.plot(temps, Cv[t1:t2], label=\"60x60\")\r\n plt.plot(temps, Cv[t2:t3], label=\"80x80\")\r\n plt.plot(temps, Cv[t3:t4], label=\"100x100\")\r\n plt.xlabel(\"Temperatures [kT/J]\")\r\n plt.ylabel(\"Specific heat $C_v$\")\r\n plt.tight_layout()\r\n plt.legend()\r\n plt.savefig(\"Figures/Phase_Cv(T).png\")\r\n\r\n plt.figure()\r\n plt.title(\"Phase transition plot for $\\\\chi$:\\n\"\r\n + \"L=[40,60,80,100], $\\\\Delta$t=0.002, MC=5e5\")\r\n plt.plot(temps, Xi[:t1], label=\"40x40\")\r\n plt.plot(temps, Xi[t1:t2], label=\"60x60\")\r\n plt.plot(temps, Xi[t2:t3], label=\"80x80\")\r\n plt.plot(temps, Xi[t3:t4], label=\"100x100\")\r\n plt.xlabel(\"Temperatures [kT/J]\")\r\n plt.ylabel(\"Susceptibility $\\\\chi$\")\r\n plt.tight_layout()\r\n plt.legend()\r\n plt.savefig(\"Figures/Phase_Xi(T).png\")\r\n plt.show()\r\n\r\nif Task == \"F\":\r\n with open(\"Phase_transitions\") as file:\r\n lines = file.readlines()\r\n T = []\r\n E = []\r\n M = []\r\n Cv = []\r\n Xi = []\r\n split = []\r\n for j in range(2, len(lines)):\r\n line = lines[j]\r\n pieces = line.split()\r\n split.append(len(line))\r\n T.append(float(pieces[0]))\r\n E.append(float(pieces[1]))\r\n M.append(float(pieces[2]))\r\n Cv.append(float(pieces[3]))\r\n Xi.append(float(pieces[4]))\r\n\r\n t_split = len(split)/4.\r\n t1 = int(t_split)\r\n t2 = int(2*t_split)\r\n t3 = int(3*t_split)\r\n t4 = int(4*t_split)\r\n temps = T[:t1]\r\n TCCv = []\r\n TCX = []\r\n\r\n for i in range(int(len(E)/len(temps))):\r\n listCv = Cv[i*len(temps):len(temps)*(i+1)]\r\n listXi = Xi[i*len(temps):len(temps)*(i+1)]\r\n maxCv = max(listCv)\r\n maxXi = max(listXi)\r\n TCCv.append(temps[listCv.index(maxCv)])\r\n TCX.append(temps[listXi.index(maxXi)])\r\n print(\"Tc for Cv =\",temps[listCv.index(maxCv)])\r\n print(\"Tc for Xi =\",temps[listXi.index(maxXi)])\r\n\r\n #Performing a linear regression to find critical temp in thermodyn. limit\r\n TCCv = np.array(TCCv)\r\n TCX = np.array(TCX)\r\n Llist = 1.0/np.array([40,60,80,100])\r\n TC = 2./(np.log(1+np.sqrt(2)))\r\n\r\n linreg1 = np.polyfit(Llist,TCCv,1)\r\n linreg2 = np.polyfit(Llist,TCX,1)\r\n\r\n plt.figure()\r\n plt.title(\"Phase transitions for specific heat with\\n $T_C$ for\"\r\n + \" analytic value and L=[40,60,80,100]\")\r\n plt.xlabel(\"T [kT/J]\")\r\n plt.ylabel(\"Specific heat $\\langle$$C_v$$\\\\rangle$ [$J^2/kT^2$]\")\r\n plt.plot(temps[:t1], Cv[:t1], label=\"_nolegend_\")\r\n plt.plot(temps[:t1], Cv[t1:t2], label=\"_nolegend_\")\r\n plt.plot(temps[:t1], Cv[t2:t3], label=\"_nolegend_\")\r\n plt.plot(temps[:t1], Cv[t3:t4], label=\"_nolegend_\")\r\n for i in range(int(len(E)/len(temps))):\r\n plt.plot([TCCv[i], TCCv[i]], [min(Cv[i*len(temps):len(temps)*(i+1)]),\r\n max(Cv[i*len(temps):len(temps)*(i+1)])], \"--\")\r\n plt.plot([TC, TC], [min(Cv), max(Cv)], \"black\")\r\n plt.legend([\"TC: L = 40\",\"TC: L = 60\",\"TC: L = 80\",\"TC: L = 100\", \"Analytic TC\"])\r\n plt.tight_layout()\r\n plt.savefig(\"Figures/CV_TC.png\")\r\n\r\n print(\"The estimated Critical Temperature from our simulations is Tc = %g \"\r\n %(0.5*(linreg1[1]+linreg2[1])))\r\n print(\"Exact critical temperature is around %.4f\" %TC)\r\n plt.show()\r\n","sub_path":"Project4/Codes/plots.py","file_name":"plots.py","file_ext":"py","file_size_in_byte":13059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"417511529","text":"import requests\nwebsite = input('Please paste in line up webpage: ')\nresult = requests.get(website , timeout = 10)\n#r = requests.get('http://github.com', allow_redirects=False)\n\nc = result.content\nfrom bs4 import BeautifulSoup\nimport pandas as pd\n#think it could be working now :)\nsoup = BeautifulSoup(c,\"html.parser\")\n\n#print (soup.prettify())\n#kinda working\n\nsTable = soup.find_all('div', class_=\"list-inouts\")\n#print(sTable)#print(soup.find_all('table', class_='ladder zebra player-ratings'))\nA=[]\nB=[]\n\n\n\nfor row in sTable:\n cells = row.findAll('ul')\n# print (cells[0].get_text())\n# print(\"Hello\")\n# print(cells[0])\n players1 = cells[0].find_all('li')\n #print(cells[2])\n## print (\"hello\")\n# print(players1)\n# print(len(players1))\n i = 0\n while i < len(players1):\n if i > 0:\n players1[i] = players1[i].get_text().split(' ')\n del players1[i][0:4]\n players1[i] = ' '.join(players1[i])\n# print(players1[i])\n else:\n players1[i] = players1[i].get_text()\n# print(players1[i])\n i += 1\n players2 = cells[1].find_all('li')\n i = 0\n while i < len(players2):\n if i > 0:\n players2[i] = players2[i].get_text().split(' ')\n del players2[i][0:4]\n players2[i] = ' '.join(players2[i])\n# print(players2[i])\n else:\n players2[i] = players2[i].get_text()\n# print(players2[i])\n i += 1\n A.append(players1)\n B.append(players2)\nH1=A[0]\n\nA1=B[0]\n\nH2=A[1]\n\nA2=B[1]\n\nH3=A[2]\n\nA3=B[2]\n\nH4=A[3]\n\nA4=B[3]\n\nH5=A[4]\n\nA5=B[4]\n\nH6=A[5]\n\nA6=B[5]\n\nH7=A[6]\n\nA7=B[6]\n\nH8=A[7]\n\nA8=B[7]\n\nH9=A[8]\n\nA9=B[8] \n#print(A)\n\n'''\n for i in players1[2]:\n \n del i[0:4]\n print(i)\n'''\n \n\n\n#import pandas to convert list to data frame\ndf = pd.DataFrame(H1,columns=['G1 Home Team'])\ndf['G1 Away Team']=A1\ndf['G2 Home Team']=H2\ndf['G2 Away Team']=A2\ndf['G3 Home Team']=H3\ndf['G3 Away Team']=A3\ndf['G4 Home Team']=H4\ndf['G4 Away Team']=A4\ndf['G5 Home Team']=H5\ndf['G5 Away Team']=A5\ndf['G6 Home Team']=H6\ndf['G6 Away Team']=A6\ndf['G7 Home Team']=H7\ndf['G7 Away Team']=A7\ndf['G8 Home Team']=H8\ndf['G8 Away Team']=A8\ndf['G9 Home Team']=H9\ndf['G9 Away Team']=A9\n##df['Club']=C\n#df['Position']=D\n#df['Trend']=E\n#df['Points']=F\nprint(df)\ndf.to_csv(\"TeamLists.csv\")\n","sub_path":"AFL/TeamListScrape.py","file_name":"TeamListScrape.py","file_ext":"py","file_size_in_byte":2354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"605611998","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport yaml\nfrom os import path as osp\nfrom jinja2 import Template\n\n\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\nwork_dir = osp.dirname(osp.dirname(osp.realpath(__file__)))\nif work_dir not in sys.path:\n sys.path.insert(0, work_dir)\n\nfrom utils.rrd_helper import get_rrds_by_kind\n\nytemplate = Template(open(\"/opt/ops/hosts.yaml\", \"r\").read())\nyhosts = yaml.load(ytemplate.render())\n\nlocations = yhosts.get('locations', [])\nbusinesses = yhosts.get('businesses', [])\nhosts = yhosts.get('hosts', [])\nhosts_tpl = yhosts.get('hosts_tpl', {})\n\ndef merge_dict(dst, tpl):\n for k in tpl.keys():\n if not dst.has_key(k):\n dst[k] = tpl[k]\n continue\n vd = dst[k]\n vt = tpl.get(k)\n if not isinstance(vd, type(vt)):\n # warn\n continue\n if isinstance(vd, dict):\n merge_dict(dst[k], vt)\n\ndef update_host(host):\n tpl = hosts_tpl.get(host.get('inherit'))\n if tpl:\n merge_dict(host, tpl)\n host.pop('inherit')\n return host\n\ndef expand_hosts_tpl():\n for k,v in hosts_tpl.items():\n tpl = hosts_tpl.get(v.get('inherit'))\n if tpl:\n if tpl.has_key('inherit'):\n raise Exception(\"mulltiple inherit found\")\n else:\n merge_dict(v, tpl)\n v.pop('inherit')\n\ndef get_hosts():\n expand_hosts_tpl()\n\n for host in hosts:\n tpl = hosts_tpl.get(host.get('inherit'))\n if tpl:\n merge_dict(host, tpl)\n host.pop('inherit')\n host['rrds'] = get_rrds_by_kind(host.get('kind', None))\n\n return hosts\n\ndef get_locations(hosts):\n locs = []\n for l in locations:\n hs = []\n for h in filter(lambda v:v.get('location', None) == l['id'], hosts):\n s = h.get('walks')\n if not s: continue\n n = s.get('net')\n if not n: continue\n ns = filter(lambda v:v['name'] == 'wan', n)\n if len(ns) < 1: continue\n hs.append({\n 'name' : h['name'],\n 'wan' : osp.join('rrd', h['id'], '-'.join(['net', 'wan.rrd']))\n })\n hs.sort()\n locs.append({'id' : l['id'], 'name' : l['name'], 'hosts' : hs})\n return locs\n\ndef get_businesses(hosts):\n buss = []\n for l in businesses:\n hs = []\n for h in filter(lambda v:l['id'] in v.get('business', []), hosts):\n e = { 'name' : h['name'] }\n\n n = h.get('walks', {}).get('net')\n if n:\n ns = filter(lambda v:v['name'] == 'wan', n)\n if len(ns) == 1:\n e['wan'] = osp.join('rrd',\n h['id'], '-'.join(['net', 'wan.rrd']))\n\n n = h.get('rrds', {}).get('get')\n if n:\n if 'udp' in n:\n e['udp'] = osp.join('rrd', h['id'], 'udp.rrd')\n if 'cpu' in n:\n e['cpu'] = osp.join('rrd', h['id'], 'cpu.rrd')\n if 'mem' in n:\n e['mem'] = osp.join('rrd', h['id'], 'mem.rrd')\n hs.append(e)\n hs.sort()\n buss.append({'id' : l['id'], 'name' : l['name'], 'hosts' : hs})\n return buss\n\nif __name__ == '__main__':\n pass\n","sub_path":"scripts/yastat/hosts.py","file_name":"hosts.py","file_ext":"py","file_size_in_byte":3295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"194147735","text":"'''\nProblem: Staying Put\n\nYou start at index 0 in an array with length 'h'.\nAt each step, you can move to the left,\nmove to the right, or stay in the same place\n(Note: Stay in the same place also takes one step).\nHow many possible ways are you still at index 0 after you have walked 'n' step?\n\nExample: n = 3\n1. right->left->stay\n2. right->stay->left\n3. stay->right->left\n4. stay->stay->stay\n'''\n\nclass Solution:\n paths = None\n \n def __init__(self):\n self.paths = {}\n self.paths[0] = []\n self.paths[1] = ['s']\n self.paths[2] = ['lr','rl']\n\n def num_ways(self, n):\n return 0\n\ns = Solution()\n\nprint(s.num_ways(0)) # 0\nprint(s.num_ways(1)) # 1 - s\nprint(s.num_ways(2)) # 2 - lr, rl\nprint(s.num_ways(3)) # 7 - lrs, rls, rsl, lsr, srl, slr, sss\n# 4 - lrlr lrrl llrr | rllr rlrl rrll (6) + lrss + lsrs + lssr+ rlss, rsls + rssl + sslr + ssrl + ssss (9) = 15\n# 5 - lrslr, lrlsr, llrsr, lrsrl, lrrsl\n\n\n# . . .\n# for num_ways(k):\n# either: \n# num_ways(k-1) and STAY\n# --> num_ways(k-1) + 1 WAYS\n# OR\n# num_ways(k-2) and go right and left\n#\n#\n","sub_path":"leetcode/discussion/TODO_staying_put.py","file_name":"TODO_staying_put.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"321737363","text":"# ----------------------------------------------------------------------\n\nimport os\nimport copy\nimport itertools\nimport numpy as np\n\n# ----------------------------------------------------------------------\n\nfrom triqs.lattice.tight_binding import TBLattice, energies_on_bz_path\nfrom triqs.lattice.utils import parse_hopping_from_wannier90_hr_dat\nfrom triqs.lattice.utils import parse_lattice_vectors_from_wannier90_wout\n\n# ----------------------------------------------------------------------\ndef extend_wannier90_to_spin(hoppings, num_wann):\n\n hoppings_spin = {}\n for key, value in list(hoppings.items()):\n hoppings_spin[key] = np.kron(np.eye(2), value) # orbital fastest idx\n\n return hoppings_spin, 2 * num_wann\n\n# ----------------------------------------------------------------------\ndef tight_binding_model(crystal_field=0., lambda_soc=0.): \n\n paths = [os.getcwd(), os.path.dirname(__file__)]\n for p in paths:\n if os.path.isfile(p + '/w2w_hr.dat'):\n path = p\n break\n\n # -- Read Wannier90 results\n\n hoppings, num_wann = parse_hopping_from_wannier90_hr_dat(path + '/w2w_hr.dat')\n orbital_names = [str(i) for i in range(num_wann)]\n units = parse_lattice_vectors_from_wannier90_wout(path + '/w2w.wout')\n\n # -- Extend to spinful model from non-spin polarized Wannier90 result\n hoppings_spin, num_wann_spin = extend_wannier90_to_spin(hoppings, num_wann)\n orbital_names_spin = [\"\".join(tup) for tup in itertools.product(['up_', 'do_'], orbital_names)]\n\n # ------------------------------------------------------------------\n # order is xy_up, xz_up, yz_up, xy_dn, xz_dn, yz_dn \n\n def lambda_matrix_pavarini(lam_xy, lam_z): # according to https://arxiv.org/pdf/1612.03060.pdf\n lam_loc = np.zeros((6,6),dtype=complex)\n lam_loc[0,5] = lam_xy/2.0\n lam_loc[0,4] = 1j*lam_xy/2.0\n lam_loc[1,2] = -1j*lam_z/2.0\n lam_loc[2,3] = -lam_xy/2.0\n lam_loc[1,3] = -1j*lam_xy/2.0\n lam_loc[4,5] = 1j*lam_z/2.0\n lam_loc = lam_loc + np.transpose(np.conjugate(lam_loc))\n return lam_loc\n\n def lambda_matrix(lam_xy, lam_z):\n lam_loc = np.zeros((6,6),dtype=complex)\n lam_loc[0,4] = 1j*lam_xy/2.0\n lam_loc[0,5] = lam_xy/2.0\n lam_loc[1,2] = 1j*lam_z/2.0\n lam_loc[1,3] = -1j*lam_xy/2.0\n lam_loc[2,3] = -lam_xy/2.0\n lam_loc[4,5] = -1j*lam_z/2.0\n lam_loc = lam_loc + np.transpose(np.conjugate(lam_loc))\n return lam_loc\n\n def cf_matrix(cf_xy, cf_z):\n cf_loc = np.zeros((6,6),dtype=complex)\n cf_loc[0,0] = cf_xy\n cf_loc[1,1] = cf_z\n cf_loc[2,2] = cf_z\n cf_loc[3,3] = cf_xy\n cf_loc[4,4] = cf_z\n cf_loc[5,5] = cf_z\n return cf_loc\n\n cf_loc = cf_matrix(-crystal_field, +crystal_field)\n d_lam_loc = lambda_matrix(lambda_soc, lambda_soc) \n\n # add soc and cf terms\n hoppings = copy.deepcopy(hoppings_spin)\n hoppings[(0,0,0)] += d_lam_loc+ cf_loc\n\n # ------------------------------------------------------------------\n\n tb_lattice = TBLattice(\n units = units,\n hoppings = hoppings,\n orbital_positions = [(0,0,0)]*num_wann_spin,\n orbital_names = orbital_names_spin,\n )\n\n tb_lattice.lambda_soc = lambda_soc\n tb_lattice.crystal_field = crystal_field\n \n return tb_lattice\n","sub_path":"Sr2RuO4/tight_binding_model.py","file_name":"tight_binding_model.py","file_ext":"py","file_size_in_byte":3402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"496503271","text":"#!/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nimport os\nimport tornado.ioloop\nimport tornado.web\nimport tornado.websocket\nimport json\nimport re\nfrom db_access import DB_Access\nimport base64\nimport sys\n\ndb = DB_Access()\n\nglobal_text = \"\"\nserver = None\n\n\nclass MainHandler(tornado.web.RequestHandler):\n\n def get(self):\n self.render(\"index.html\")\n\nclass NewContentHandler(tornado.web.RequestHandler):\n\n def get(self):\n self.render(\"new_content.html\")\n\ndef args_to_dict(args):\n answer = {'id':'','tag':[]}\n work = re.split(',',args)\n i=0\n# print(base64.decodestring(work[i+1].encode(\"ascii\")).decode(\"utf8\"))\n while(i.+)\", APIHandler),\n (r\"/api\", APIHandler),\n (r\"/new_content\", NewContentHandler),\n ],\n template_path=os.path.join(os.getcwd(), \"templates\"),\n static_path=os.path.join(os.getcwd(), \"static\"),\n \n )\n application.listen(8889)\n print=('サーバー起動')\n server = tornado.ioloop.IOLoop.instance()\n server.start()\n\n \nif __name__ == \"__main__\":\n serve_forever()\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"190358174","text":"from gridWorld import GridWorld\nfrom qAgent import QAgent\n\n\ndef main() :\n env = GridWorld()\n agent = QAgent()\n\n for episode in range (1000):\n done = False\n state = env.reset()\n while not done:\n action = agent.select_action(state)\n state_prime , reward, done = env.step(action)\n agent.update_table_sarsa((state, action, reward, state_prime))\n state = state_prime\n agent.anneal_eps()\n agent.show_table()\n\n\nif __name__ == \"__main__\" :\n main()\n","sub_path":"chapter-6/sarsa.py","file_name":"sarsa.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"1496659","text":"# **************************************************\n# * Printing out the first n square numbers *\n# * These arguments are passed from Command Line *\n# * into the program *\n# **************************************************\n\nimport sys # COMMAND LINE ARGUMENTS:\n # In the command line, we can start a program \n # with additional arguments.\n # 'sys' module: an additional module of the Python,\n # that manages all Arguments in command line. \n\nMax = int(sys.argv[1]) # sys.argv[1] is the second Argument in command line.\n # set how many items the list have (List Length).\n\nmyList = [] # set an empty List: 'myList'\n\n\nfor x in range(1, Max+1):\n n = x**2\n myList.append(str(n)) # Add an item to the end of the list; \n # (item previously converted from integer to string)\n\nmyString = \", \".join(myList) # concatenation of the strings in the List: 'myList'\nprint(myString)\n\n\n","sub_path":"square_numbers2.py","file_name":"square_numbers2.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"231932564","text":"import ctypes\ndef powers_of_two(num):\n for i in range(1,num+1):\n yield 2**i\n# for curr_value in powers_of_two(6):\n# print(curr_value)\n# print()\n\n\ndef sum_lst(lst,low):\n if low == len(lst):\n return 0\n else:\n if isinstance(lst[low],list):\n return sum_lst(lst,low+1) + sum_lst(lst[low],0)\n else:\n return lst[low]+ sum_lst(lst,low+1)\n\n\ndef sum_nested_lst(lst):\n sum = 0\n for elem in lst:\n if isinstance(elem,list):\n sum += sum_nested_lst(elem)\n else:\n sum += elem\n return sum\n\n\nlst = [[1, 2], [3, [[4], 5]], 6]\nprint(sum_lst(lst,0))\nprint(sum_nested_lst(lst))\nprint()\n\n\ndef sort_first(lst,count,high,ind):\n if count==len(lst):\n return\n else:\n if lst[high]pivot:\n small -= 1\n if lst[big] > pivot and lst[small] < pivot:\n lst[big],lst[small]=lst[small],lst[big]\n lst[small-1],lst[0]=lst[0],lst[small-1]\nlst = [54,26,93,17,77,56,44,55,20]\nsort( lst)\nprint(lst)\nprint()\n\n\n# def string(n):\n# return (n * ctypes.py_object)()\n# class MyString:\n# def __init__(self,s=string(0)):\n# self.string=s\n#\n# def __len__(self):\n# return len(self.string)\n#\n# def __iter__(self):\n# for i in range(len(self.string)):\n# yield self.string[i]\n#\n# def __str__(self):\n# return str(self.string)\n#\n# def __repr__(self):\n# return str(self)\n#\n# def __getitem__(self, item):\n# return self.string[item]\n#\n# def __add__(self,other):\n# result = MyString()\n# result.string = self.string + other\n# return result\n#\n# def __radd__(self, other):\n# result = MyString()\n# result.string = other + self.string\n# return result\n#\n# def upper(self):\n# return self.string.upper()\n#\n# def __iadd__(self, other):\n# self.string = self.string + other.string\n# return self\n# st1 = MyString ()\n# print(st1)\n# st1 = st1 + \"hi\"\n# print(st1)\n# st2 = \"hello\"\n# print(st2 + st1)\n# print(st1 . upper ())\n# print()\n\n\ndef decimal_to_binary(int):\n if int == 1:\n return \"1\"\n else:\n bin = 0\n if int%2==1:\n bin = 1\n return decimal_to_binary(int//2) + str(bin)\n# print(decimal_to_binary(30))\n# print()\n\n\ndef solve_hanoi(n,start,dest,spare):\n if n == 1:\n print(\"move disk from %s to %s\" % (start, dest))\n # move the biggest to dest\n else:\n solve_hanoi(n-1,start,spare,dest)\n # move all from start to spare except for the biggest one\n print(\"move disk from %s to %s\"%(start,dest))\n solve_hanoi(n-1, spare, dest,start)\n # move the rest from spare to dest\n# solve_hanoi(3,\"a\",\"c\",\"b\")\n\n\n# def board_game(lst):\n#\n\n\n\n","sub_path":"Lab/lab6.py","file_name":"lab6.py","file_ext":"py","file_size_in_byte":3280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"472250371","text":"from gevent import pywsgi\r\nfrom geventwebsocket.handler import WebSocketHandler\r\n\r\n\r\n \r\ndef nick(ws,*argv):\r\n ws.name = argv[0]\r\n\r\ndef join(ws,*argv):\r\n ws.room = argv[0]\r\n if not rooms.has_key(ws.room):\r\n rooms[ws.room] = []\r\n rooms[ws.room].append(ws)\r\n _toChannel(ws,(ws.name+\" join \"+ws.room))\r\n\r\ndef who(ws,*argv):\r\n _toChannel(ws,(ws.name+\" say \"+argv[0])) \r\n\r\n\r\ndef quit(ws,*argv):\r\n rooms[ws.room].remove(ws)\r\n _toChannel(ws,(ws.name+\" quit! \"))\r\n\r\ndef _toChannel(ws,*argv):\r\n for people in rooms[ws.room]:\r\n people.send(argv[0])\r\n\r\ndispatcher = {u\"/nick\":nick,u\"/join\":join,u\"/who\":who,u\"/quit\":quit}\r\nrooms = {}\r\n\r\ndef websocket_app(environ, start_response):\r\n ws = environ[\"wsgi.websocket\"]\r\n while 1:\r\n msg = ws.receive()\r\n if msg is None:\r\n break\r\n arr = msg.split(\" \") \r\n dispatcher[arr[0]](ws,arr[1])\r\n\r\n\r\nserver = pywsgi.WSGIServer((\"\", 9000), websocket_app,\r\n handler_class=WebSocketHandler)\r\nserver.serve_forever()","sub_path":"chat_gevent+websocket+irc/Server/wsocketserver.py","file_name":"wsocketserver.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"446849431","text":"#!/usr/bin/python\n#有n个人围成一圈,顺序排号。\n#从第一个人开始报数(从1到3报数),凡报到3的人退出圈子,问最后留下的是原来第几号的那位\n#抽象为排队进门问题\n\nn=5 #初始化n=34\narr = list(range(1,n+1)) #所有人门外站成一队a,进门的人进入队列b\ncount,a,b=0,arr,[] #计数器,还没开始报数进门的队列a,进门后的人组成的新队列b\n\nwhile len(a+b)>1: #循环到只剩一人\n num=a.pop(0) #排队进门,每进一人即a.pop(0)\n count+=1 #每进一人,记一次数\n if count%3!=0:\n b.append(num)\n if a==[]:\n a,b=b,[]\nprint(a[0])\n","sub_path":"tuiquan.py","file_name":"tuiquan.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"372080261","text":"\n\nfrom xai.brain.wordbase.nouns._parricide import _PARRICIDE\n\n#calss header\nclass _PARRICIDES(_PARRICIDE, ):\n\tdef __init__(self,): \n\t\t_PARRICIDE.__init__(self)\n\t\tself.name = \"PARRICIDES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"parricide\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_parricides.py","file_name":"_parricides.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"188497515","text":"# coding=utf-8\n\"\"\"\n Cálculo del punto de fuga usando HoughLines\n\"\"\"\nimport cv2\nimport os\nfrom sys import argv\nimport numpy as np\n\nHOUGH_THRESHOLD = 80\nVERT_MULTIPLIER = float(argv[1])\nCAM = False\n\nimage_index = 0\nimages = os.listdir(\"./vanishing_point\")\n\ncap = cv2.VideoCapture(0)\n\nwhile True:\n if CAM:\n _, img = cap.read()\n else:\n img = cv2.imread(os.path.join(\"./vanishing_point\",images[image_index]))\n\n img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n img_gray = cv2.GaussianBlur(img_gray, (3, 3), 0)\n\n rows, cols = img_gray.shape\n\n canny = cv2.Canny(img, 150, 120, apertureSize=3)\n\n cv2.imshow(\"Canny\", canny)\n\n lines = cv2.HoughLines(canny, 1, np.pi / 180, HOUGH_THRESHOLD)\n\n dummy = np.zeros((rows, cols)).astype(np.uint8)\n\n e = 0.1\n\n if not lines is None:\n for rho, theta in lines[0]:\n a = np.cos(theta)\n b = np.sin(theta)\n x = rho * a\n y = rho * b\n x1 = int(x + 1000 * (-b))\n y1 = int(y + 1000 * (a))\n x2 = int(x - 1000 * (-b))\n y2 = int(y - 1000 * (a))\n m = np.abs(np.arctan2(y2 - y1, x2 - x1))\n if (m > e and m < np.pi / 2 - e*VERT_MULTIPLIER) or (m > np.pi / 2 + e*VERT_MULTIPLIER and m < np.pi - e):\n ALPHA = m\n temp = np.zeros((rows, cols))\n cv2.line(temp, (x1, y1), (x2, y2), (1), 10)\n cv2.line(img, (x1, y1), (x2, y2), (255, 0, 0))\n dummy = dummy + temp\n\n max_voted = np.max(dummy)\n if max_voted > 30:\n HOUGH_THRESHOLD += 5\n elif max_voted < 10:\n HOUGH_THRESHOLD -= 5\n\n a_max = np.argmax(dummy)\n i = a_max % cols\n j = a_max / cols\n cross_size = int(rows * 0.1)\n offset = 5\n cv2.line(img, (i, j + offset), (i, j + offset + cross_size), (0, 0, 255),3)\n cv2.line(img, (i + offset, j), (i + offset + cross_size, j), (0, 0, 255),3)\n cv2.line(img, (i, j - offset), (i, j - offset - cross_size), (0, 0, 255),3)\n cv2.line(img, (i - offset, j), (i - offset - cross_size, j), (0, 0, 255),3)\n\n cv2.imshow(str(images[image_index]), img)\n key = cv2.waitKey(1)\n if key & 0xFF == ord('q'):\n break\n elif key & 0xFF == ord('c'):\n CAM = not CAM\n HOUGH_THRESHOLD = 80\n elif key & 0xFF == ord('i'):\n HOUGH_THRESHOLD = 80\n cv2.destroyWindow(images[image_index])\n image_index += 1\n image_index = image_index % len(images)\n","sub_path":"t3/houghlines_opencv.py","file_name":"houghlines_opencv.py","file_ext":"py","file_size_in_byte":2529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"238019187","text":"# -*- coding: utf-8 -*-\n# @Author : caijizhi\n# @FileName: web_auto01.py\n# @Software: PyCharm\n#京东查询\nfrom selenium import webdriver\nimport time\n\ndriver = webdriver.Chrome()\ndriver.maximize_window()\nurl = r'https://www.jd.com/'\ndriver.get(url)\ndriver.find_element_by_xpath(\"//*[@clstag='h|keycount|head|search_c']\").send_keys('电脑')\ndriver.find_element_by_xpath(\"//*[@clstag='h|keycount|head|search_a']\").click()\ndriver.implicitly_wait(3)\ndriver.find_element_by_xpath('''//*[@id=\"J_goodsList\"]/ul/li[7]/div/div[3]/a''').click()\n\ndriver.implicitly_wait(5)\ndsa = driver.window_handles\ndriver.switch_to.window(dsa[1])\n\ndriver.implicitly_wait(30)\ndriver.quit()\n","sub_path":"webday01/web_auto01.py","file_name":"web_auto01.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"120621754","text":"#!/usr/bin/python\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.decomposition import PCA\nimport math\nimport numpy as np\nimport math\nimport sys\n\n#train=[[1,2,2,1],[2,1,1,1],[1,1,1,1],[1,1,2,1],[1,1,1,2]]\n#label=[1,1,2,1,2]\n#test=[[1,1,2,1],[1,1,1,1]]\n\ntrain=[]\nlabel=[]\ntest=[]\nactualLabel=[]\npredicted={}\n#Local machine \n#trainFile='features.csv'\n#testFile='test.csv'\n#Big red II\n#trainFile='/N/dc2/scratch/anikgaik/dm/features.csv'\n#testFile='/N/dc2/scratch/anikgaik/dm/test.csv'\ntrainFile='/N/dc2/scratch/hydargah/dm/featureFiles/2gramTrain.features'\ntestFile='/N/dc2/scratch/hydargah/dm/featureFiles/2gramTest.features'\ntestFileList=[]\nl=0\nnewTrain=[]\nnewTest=[]\nexplained_train_var_ratio=[]\nexplained_test_var_ratio=[]\n\ndef CALL_PCA():\n\tglobal l,newTrain,newTest,explained_train_var_ratio,explained_test_var_ratio,label,testFileList\n\tf=open(trainFile)\n\tf.readline()\n\tfor line in iter(f):\n\t\ttoken=line.split(',')\n\t\tl=len(token)\n\t\ttrain.append(token[1:len(token)-1])\n\t\tlabel.append(token[-1].replace('\\n',''))\n\tf.close()\n\n\tf=open(testFile)\n\tf.readline()\n\tfor line in iter(f):\n\t\ttoken=line.split(',')\n\t\ttestFileList.append(token[0])\t\n\t\ttest.append(token[1:])\n\t\t#actualLabel.append(token[-1].replace('\\n',''))\n\tf.close()\n\t#print(\"Train : \")\n\t#print(train)\n\t#print(\"Test : \")\n\t#print(test)\n\tprint(\"******* PCA o train data ************\")\n\tX=np.array(train)\n\tpca=PCA(n_components=30)\n\tnewTrain=pca.fit_transform(X)\n\tprint(\"***** Transormed train data *****\")\n\tprint(newTrain)\n\tprint(\"***** Explained Variance by data ******\")\n\texplained_train_var_ratio=pca.explained_variance_ratio_\n\tprint(explained_train_var_ratio)\n\n\tprint(\"******* PCA on test data ************\")\n\tX=np.array(test)\n\tpca=PCA(n_components=30)\n\tnewTest=pca.fit_transform(X)\n\tprint(\"***** Transormed test data *****\")\n\tprint(newTest)\n\tprint(\"***** Explained Variance by data ******\")\n\texplained_test_var_ratio=pca.explained_variance_ratio_\n\tprint(explained_train_var_ratio)\n\t#explained_variance=[]\n\t#explained_variance.extend(pca.explained_variance_ratio_)\n\t#for var in pca.explained_variance_ratio_:\n\t\t#if var > 0.8:\n\t\t\t#print(x_new[explained_variance.index(var)])\n\t#print(x_new)\n\n\n\ndef main():\n\t#with open(trainFile) as f:\n\t#\tfirstLine=f.readline()\n\t#\tfor line in f:\n\t#\t\ttoken=line.split(',')\n\t#\t\ttrain.append(token[1:len(token)-1])\n\t#\t\tlabel.append(token[-1].replace('\\n',''))\n\n\t#with open(testFile) as f:\n\t#\tfirstLine=f.readline()\n\t#\tfor line in f:\n\t#\t\ttoken=line.split(',')\n\t#\t\ttestFileList.append(token[0])\t\n\t#\t\ttest.append(token[1:len(token)-1])\n\t#\t\tactualLabel.append(token[-1].replace('\\n',''))\n\n\t#for l in train:\n\t\t#print(l)\n\n\t#print(\"Label : \")\n\t#print(label)\t\t\n\n\tCALL_PCA()\n\n\th=RandomForestClassifier(n_estimators=100)\n\th.fit(newTrain,label)\n\t#predicted_probs = [[testFileList[index],index + 1, x[0],x[1],x[2],x[3],x[4],x[5],x[6],x[7],x[8],max(x[0],x[1],x[2],x[3],x[4],x[5],x[6],x[7],x[8])] for index, x in enumerate(h.predict_proba(test))]\n\tpredicted_probs = [[testFileList[index],x[0],x[1],x[2],x[3],x[4],x[5],x[6],x[7],x[8],max(x[0],x[1],x[2],x[3],x[4],x[5],x[6],x[7],x[8])] for index, x in enumerate(h.predict_proba(newTest))]\n\n\tprint(\"predicted_probs :\")\n\tprint(predicted_probs)\n\n\t#for probList in predicted_probs:\n\t#\tif probList[1] in predicted:\n\t#\t\tprint(\"ERROR\")\n\t#\telse:\n\t#\t\tpredicted[probList[1]]=getPredictionCLass(probList)#probList.index(probList[-1])\n\n\t \n\twith open('Tprediction.csv', 'w') as the_file:\n\t\tfor prediction in predicted_probs:\t\t\n\t\t\tthe_file.write(str(prediction[:len(prediction)-1]))\n\t\t\tthe_file.write('\\n')\n\tdata=\"\"\t\t\n\twith open('Tprediction.csv','r') as the_file:\n\t\tfor line in the_file:\n\t\t\tdata+=line.replace('[','').replace(']','').replace(\"'\",'').replace('.asm','')\n\n\twith open('prediction.csv', 'w') as the_file:\n\t\tthe_file.write(data)\n\n\t#print(\"actualLabel : \")\n\t#print(actualLabel)\n\t#print(\"predicted : \")\n\t#print(predicted)\n\n\t#wt=calculateWeight(actualLabel,predicted)\n\t#print(\"Weight : \"+str(wt))\n\ndef calculateWeight(actualLabel,predicted):\n\tcount=0\n\tsuperCount=0\n\tfor label in predicted:\n\t\tsuperCount+=1\n\t\t#print(str(predicted[label])+\",\"+actualLabel[label-1])\n\t\tif str(predicted[label])==actualLabel[label-1] :\n\t\t\tcount+=1\n\t#print(count)\n\treturn(float(count)/superCount)\n\ndef getPredictionCLass(probList):\n\tfor entry in probList[1:]:\n\t\tif(entry>=0.7):\n\t\t\t#print(\"Hit\")\n\t\t\treturn probList.index(entry)\n\treturn probList.index(probList[-1])\n\nif __name__==\"__main__\":\n\torig_stdout=sys.stdout\n\tf=file('out1.txt','w')\n\tsys.stdout=f\n\tmain()\n","sub_path":"RandomForest.py","file_name":"RandomForest.py","file_ext":"py","file_size_in_byte":4468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"234348873","text":"# -*- coding: utf-8 -*-\n# https://www.gis-py.com/entry/py-latlon2mesh こちらのコードを参照 4次メッシュ部分を追加\n#\n# 2021.10.7 Y.Kayama\n#\n\ndef getmeshID(lat, lon):\n #1次メッシュ\n quotient_lat, remainder_lat = divmod(lat * 60, 40)\n first2digits = str(quotient_lat)[0:2]\n\n #1次メッシュ下2けた\n last2digits = str(lon - 100)[0:2]\n remainder_lon = lon - int(last2digits) - 100\n\n #1次メッシュ\n first_mesh = first2digits + last2digits\n \n #if level == 1:\n # return meshid_1\n\n #2次メッシュ上1けた\n first1digits, remainder_lat = divmod(remainder_lat, 5)\n\n #2次メッシュ下1けた\n last1digits, remainder_lon = divmod(remainder_lon * 60, 7.5)\n\n #2次メッシュ\n second_mesh = first_mesh + str(first1digits)[0:1] + str(last1digits)[0:1]\n\n #3次メッシュ上1けた\n first1digits, remainder_lat = divmod(remainder_lat * 60, 30)\n\n #3次メッシュ下1けた\n last1digits, remainder_lon = divmod(remainder_lon * 60, 45)\n\n #3次メッシュ\n third_mesh = second_mesh + str(first1digits)[0:1] + str(last1digits)[0:1]\n \n \n \n first25dg, rem_lat = divmod(remainder_lat , 0.75)\n last25dg, rem_lon = divmod( remainder_lon , 1.125 )\n\n\n\n first5dg, rem_lat = divmod(remainder_lat , 0.15)\n last5dg, rem_lon = divmod( remainder_lon , 0.225 )\n \n\n \n #4次メッシュ y けた\n first1digit, remainder_lat = divmod(remainder_lat , 15)\n\n\n print(\"first1 \" + str(first1digit) + \" remainder_lat \" + str(remainder_lat) )\n \n #4次メッシュ x けた\n last1digit, remainder_lon = divmod(remainder_lon , 22.5)\n \n print(\"last1 \" + str(last1digit) + \" remainder_lon \" + str(remainder_lon) )\n \n if first1digit > 0:\n if last1digit > 0:\n digit = \"4\"\n else:\n digit = \"3\"\n else:\n if last1digit > 0:\n digit = \"2\"\n else:\n digit = \"1\"\n \n \n \n forth_mesh = third_mesh + digit\n \n m25_mesh = third_mesh + \"3\" + format(int( first25dg),'02')+ format(int(last25dg),'02')\n \n\n m5_mesh = third_mesh + \"2\" + format(int( first5dg),'03')+ format(int(last5dg),'03')\n print(\"1次メッシュ:\" + first_mesh)\n print(\"2次メッシュ:\" + second_mesh)\n print(\"3次メッシュ:\" + third_mesh)\n \n print(\"4次メッシュ:\" + forth_mesh)\n print(\"25mメッシュ:\" + m25_mesh)\n\n print(\"5mメッシュ:\" + m5_mesh)\n\n\n return forth_mesh\n\nif __name__ == '__main__':\n#129.988013,32.342565\n getmeshID(32.343503, 129.987654)\n # getmeshID(35.7007777, 139.71475)","sub_path":"latlonmesh.py","file_name":"latlonmesh.py","file_ext":"py","file_size_in_byte":2678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"260994759","text":"from random import randint\n\n# 게임 반복 실행\ndef game_start(): \n\n com = com_choice()\n\n while True:\n user = user_choice()\n result = check(user, com)\n\n strike = result_strike(result)\n ball = result_ball(result, strike)\n result_total(strike, ball)\n\n\n# 컴퓨터의 세수 뽑기\ndef com_choice():\n\n number = []\n i = 0\n\n while i < 3:\n random_number = randint(1, 9)\n if random_number in number:\n continue\n else:\n number.append(random_number)\n i += 1\n\n return number\n\n\n# 유저의 세수 뽑기\ndef user_choice():\n\n user = 0\n\n while True:\n user = input('숫자를 입력해주세요. ex)123 : ')\n if user.isdigit() == False:\n print(\"문자나 기호가 아닌 숫자를 입력해주세요\")\n continue\n elif len(user) != 3:\n print(\"세자리 수를 입력해주세요\")\n continue\n elif user[0] == user[1] or user[0] == user[2] or user[1] == user[2]:\n print(\"서로 다른 수를 입력해주세요\")\n continue\n else:\n break\n \n user = [int(i) for i in user] \n return user\n\n\n# 컴퓨터와 유저의 수 확인\ndef check(user, com):\n\n strike = 0\n ball = 0\n i = 0\n\n while i < 3:\n if user[i] == com[i]:\n strike += 1\n elif user[i] in com:\n ball += 1\n i += 1\n \n return strike, ball\n\n\n# 스트라이크 단독 결과 처리\ndef result_strike(result):\n\n strike = result[0]\n ball = result[1]\n\n if strike == 3:\n print(\"3스트라이크! \")\n print(\"3개의 숫자를 모두 맞히셨습니다! 축하합니다.\")\n game_start()\n elif strike > 0 and ball == 0:\n print(\"%d 스트라이크\" % strike)\n \n return strike\n\n\n# 볼 단독 결과 처리\ndef result_ball(result, strike):\n\n ball = result[1]\n\n if ball > 0 and strike == 0:\n print(\"%d볼\" % ball)\n\n return ball\n\n\n# 스트라이크, 볼 혼합 결과 처리\ndef result_total(strike, ball):\n\n if strike == 0 and ball == 0:\n print(\"포볼\")\n elif strike > 0 and ball > 0:\n print(\"%d 스트라이크 %d볼\" % (strike, ball))\n\n\ngame_start()","sub_path":"level-test.py","file_name":"level-test.py","file_ext":"py","file_size_in_byte":2270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"477229956","text":"import requests\nfrom conftest import print_timing\nfrom fixtures import session\nfrom fixtures import base_url\nimport os\nfrom maxfreq import max_freq\nfrom conftest import print_in_shell\nfrom conftest import getRandomFilter\n\nclass TestDelete:\n @max_freq(50/3600)\n# @print_timing\n def test_delete_diagram(self, base_url, session):\n # Prepare\n # request list of diagrams using the session id\n HOSTNAME = os.environ.get('application_hostname')\n diagrams_response = session.get('/rest/dependency-map/1.0/diagram?searchTerm=&startAt=0&maxResults=50')\n assert diagrams_response.status_code == 200\n\n # To make it thread save need to create the diagram before removing\n\n # Get user\n diagrams_response = session.get('/rest/dependency-map/1.0/user')\n assert diagrams_response.status_code == 200\n userKey = diagrams_response.json()[\"key\"]\n\n # Get filter key\n diagrams_response = session.get('/rest/dependency-map/1.0/filter?searchTerm=&page=0&resultsPerPage=25')\n assert diagrams_response.status_code == 200\n\n #Get filterKey randomly among the project in the project file\n filterKey= getRandomFilter(session)\n\n # Create diagram\n payload ={\n 'name':\"F100\",\n 'author':userKey,\n 'layoutId':2,\n 'filterKey': filterKey,\n 'boxColorFieldKey': 'status',\n 'groupedLayoutFieldKey': 'status',\n 'matrixLayoutHorizontalFieldKey': 'status',\n 'matrixLayoutVerticalFieldKey': 'priority',\n 'showTypeIcons': True,\n 'parallelIssueBatches': 4,\n 'issuesPerRow': 5,\n 'secondaryIssues': 1,\n 'boxType': 0\n }\n\n diagrams_response = session.post('/rest/dependency-map/1.0/diagram',\n json=payload)\n assert diagrams_response.status_code == 200\n diagramId = diagrams_response.json()['id']\n\n #remove\n diagrams_response2 = session.delete('/rest/dependency-map/1.0/diagram/' + str(diagramId))\n assert diagrams_response2.status_code == 200\n print_in_shell(\"Diagram removed\" + str(diagramId))\n #print_in_shell( diagrams_response.json() );\n\n #get all diagrams after delete\n diagrams_response = session.get('/rest/dependency-map/1.0/diagram?searchTerm=&startAt=0&maxResults=50')\n assert diagrams_response.status_code == 200\n","sub_path":"app/pytests/test_delete_diagram.py","file_name":"test_delete_diagram.py","file_ext":"py","file_size_in_byte":2443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"189811720","text":"from django.contrib import admin\nfrom training.models import * \nfrom django.utils.translation import ugettext_lazy as _\n\ndef submissions(object):\n return object.submission_set.all().count()\nsubmissions.short_description = _(u'Submissions')\n\nclass SessionAdmin(admin.ModelAdmin):\n list_display = ('name', 'created', 'price', submissions)\n list_filter = ('name',)\n\nadmin.site.register(Session, SessionAdmin)\n\nclass DayAdmin(admin.ModelAdmin):\n list_display = ('date', 'session', 'food', 'drinks')\n list_filter = ('date',)\n\nadmin.site.register(Day, DayAdmin)\n\ndef session(object):\n return object.session.name\nsession.short_description = _(u'Session')\n\nclass SubmissionAdmin(admin.ModelAdmin):\n list_display = ('date', 'first_name', 'last_name', 'mobile', 'email', session, 'confirmed') \n list_filter = ('date', 'confirmed') \n model = Submission\n\nadmin.site.register(Submission, SubmissionAdmin)\n","sub_path":"project/training/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"123447481","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom ..address import Provider as AddressProvider\n\n\nclass Provider(AddressProvider):\n cities = (\n 'Москва', 'Санкт-Перербург', 'Красногорск', 'Пенза', 'Владивосток',\n 'Уфа', 'Новороссийск', 'Мурманск', 'Брянск', 'Пермь'\n )\n # City name can be either existing or fake (50/50%).\n city_formats = cities + (\n '{{last_name_m}}ск',\n '{{last_name_f}}горск',\n ) * int(len(cities) / 2)\n","sub_path":"faker/providers/ru_RU/address.py","file_name":"address.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"456057071","text":"from django.test import TestCase\nfrom django.core.exceptions import ValidationError\nfrom lists.models import Item, List\n\nclass ItemAndListModelsTest(TestCase):\n\n def test_saving_and_retrieving_items_inlist(self):\n new_list= List()\n new_list.save()\n\n first_item= Item()\n first_item.text= 'The first (ever) List item'\n first_item.list = new_list\n first_item.save()\n\n second_item= Item()\n second_item.text= 'Item the second'\n second_item.list = new_list\n second_item.save()\n\n saved_list = List.objects.first()\n self.assertEqual(saved_list, new_list)\n\n saved_items= Item.objects.all()\n self.assertEqual(saved_items.count(),2)\n\n first_saved_item= saved_items[0]\n second_saved_item= saved_items[1]\n self.assertEqual(first_saved_item.text,'The first (ever) List item')\n self.assertEqual(first_saved_item.list, new_list)\n self.assertEqual(second_saved_item.text,'Item the second')\n self.assertEqual(second_saved_item.list, new_list)\n\n def test_cannot_save_empty_list_items(self):\n new_list= List.objects.create()\n item = Item(list= new_list, text='')\n with self.assertRaises(ValidationError):\n item.save()\n item.full_clean()\n","sub_path":"superlists/lists/tests/test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"229094642","text":"#! /usr/bin/env python\n\n# PDF TEXT SEARCH v1.0\n# by Ben Byram-Wigfield\n\n# Minimal function for searching text in a PDF for a string.\n# Useful safety tip: PDFKit's page index starts at zero\n\nimport sys\nfrom Quartz import PDFDocument\nfrom Foundation import NSURL\n\t\t\ndef pdfSearch(filepath, searchString):\n\tpdfURL = NSURL.fileURLWithPath_(filepath)\n\tpdfDoc = PDFDocument.alloc().initWithURL_(pdfURL)\n\tif pdfDoc:\n\t\tsearchResults = (pdfDoc.findString_withOptions_(searchString, 0))\n\t\tif searchResults:\n\t\t\tfor result in searchResults:\n\t\t\t\teachPage = result.pages()\n\t\t\t\tprint (\"\\'\"+ searchString+\"\\' was found on page: \"+str(pdfDoc.indexForPage_(eachPage[0])+1)) \n\t\telse:\n\t\t\tprint(\"Nothing found.\")\n\telse:\n\t\tprint(\"Not a valid PDF.\")\n\treturn\n\nif __name__ == \"__main__\":\n\t# Set the filepath and searchString to your desired values\n\tfilepath = '/Users/ben/Desktop/Untitled.pdf'\n\tsearchString = 'office'\n\tpdfSearch(filepath, searchString)","sub_path":"legacy (python 2)/Shell_Scripts/pdfsearch.py","file_name":"pdfsearch.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"249779878","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('api_aestudiar', '0004_auto_20141031_2110'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='escuela',\n name='email',\n field=models.CharField(max_length=500, null=True),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='escuela',\n name='jurisdiccion',\n field=models.CharField(max_length=500, null=True),\n preserve_default=True,\n ),\n ]\n","sub_path":"aestudiar/api_aestudiar/migrations/0005_auto_20141101_0350.py","file_name":"0005_auto_20141101_0350.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"427037010","text":"# ----------------------------------------------------------------------\n#\n# Brad T. Aagaard, U.S. Geological Survey\n# Charles A. Williams, GNS Science\n# Matthew G. Knepley, University at Buffalo\n#\n# This code was developed as part of the Computational Infrastructure\n# for Geodynamics (http://geodynamics.org).\n#\n# Copyright (c) 2010-2022 University of California, Davis\n#\n# See LICENSE.md for license information.\n#\n# ----------------------------------------------------------------------\n#\n# @file tests/fullscale/viscoelasticity/nofaults-3d/axialstrain_genmaxwell_soln.py\n#\n# @brief Analytical solution to axial strain relaxation problem for a generalized Maxwell viscoelastic material.\n#\n# 3-D axial strain solution for linear generalized Maxwell viscoelastic material.\n#\n# Uz=U0\n# ----------\n# | |\n# Ux=0 | | Ux=0\n# | |\n# | |\n# ----------\n# Uz=0\n#\n# Dirichlet boundary conditions\n# Ux(-4000,y,z) = 0\n# Ux(+4000,y,z) = 0\n# Uy(x,-4000,z) = 0\n# Uy(x,+4000,z) = 0\n# Uz(x,y,-8000) = 0\n# Uz(x,y,+0) = U0\n#\n\nimport numpy\n\n# Physical properties.\np_density = 2500.0\np_vs = 3464.1016\np_vp = 6000.0\np_viscosity_1 = 9.46728e17\np_viscosity_2 = 4.73364e17\np_viscosity_3 = 1.893456e18\np_shear_ratio_1 = 0.25\np_shear_ratio_2 = 0.25\np_shear_ratio_3 = 0.25\n\n# Applied displacement.\nU0 = 1.0\n\n# Derived properties.\np_mu = p_density*p_vs*p_vs\np_lambda = p_density*p_vp*p_vp - 2.0*p_mu\np_youngs = p_mu*(3.0*p_lambda + 2.0*p_mu)/(p_lambda + p_mu)\np_poissons = 0.5*p_lambda/(p_lambda + p_mu)\np_shear_ratio_0 = 1.0 - p_shear_ratio_1 - p_shear_ratio_2 - p_shear_ratio_3\np_tau_1 = p_viscosity_1/(p_mu*p_shear_ratio_1)\np_tau_2 = p_viscosity_2/(p_mu*p_shear_ratio_2)\np_tau_3 = p_viscosity_3/(p_mu*p_shear_ratio_3)\n\n# Time information.\nyear = 60.0*60.0*24.0*365.25\ndt = 0.025*year\nstartTime = dt\nendTime = 0.5*year\nnumSteps = 20\ntimeArray = numpy.linspace(startTime, endTime, num=numSteps, dtype=numpy.float64)\n\n# Uniform strain field (3D).\ne0 = U0/8000.0\nexx = numpy.zeros(numSteps, dtype=numpy.float64)\neyy = numpy.zeros(numSteps, dtype=numpy.float64)\nezz = e0*numpy.ones(numSteps, dtype=numpy.float64)\nexy = numpy.zeros(numSteps, dtype=numpy.float64)\neyz = numpy.zeros(numSteps, dtype=numpy.float64)\nexz = numpy.zeros(numSteps, dtype=numpy.float64)\n\n# Deviatoric strains.\neMean = (exx + eyy + ezz)/3.0\neDevxx = exx - eMean\neDevyy = eyy - eMean\neDevzz = ezz - eMean\neDevxy = exy\neDevyz = eyz\neDevxz = exz\n\n# Deviatoric stresses.\ntimeFac1 = numpy.exp(-timeArray/p_tau_1)\ntimeFac2 = numpy.exp(-timeArray/p_tau_2)\ntimeFac3 = numpy.exp(-timeArray/p_tau_3)\nsDevxx = 2.0*p_mu*eDevxx*(p_shear_ratio_0 + p_shear_ratio_1*timeFac1 + p_shear_ratio_2*timeFac2 + p_shear_ratio_3*timeFac3)\nsDevyy = 2.0*p_mu*eDevyy*(p_shear_ratio_0 + p_shear_ratio_1*timeFac1 + p_shear_ratio_2*timeFac2 + p_shear_ratio_3*timeFac3)\nsDevzz = 2.0*p_mu*eDevzz*(p_shear_ratio_0 + p_shear_ratio_1*timeFac1 + p_shear_ratio_2*timeFac2 + p_shear_ratio_3*timeFac3)\nsDevxy = numpy.zeros_like(sDevxx)\nsDevyz = numpy.zeros_like(sDevxx)\nsDevxz = numpy.zeros_like(sDevxx)\n\n# Total stresses.\nsMean = e0*(3.0*p_lambda + 2.0*p_mu)/3.0\nsxx = sDevxx + sMean\nsyy = sDevyy + sMean\nszz = sDevzz + sMean\nsxy = sDevxy\nsyz = sDevyz\nsxz = sDevxz\n\n# Get viscous strains from initial deviatoric strains (strain rate = 0).\neVisxx_1 = eDevxx*timeFac1\neVisyy_1 = eDevyy*timeFac1\neViszz_1 = eDevzz*timeFac1\neVisxy_1 = eDevxy\neVisyz_1 = eDevyz\neVisxz_1 = eDevxz\neVisxx_2 = eDevxx*timeFac2\neVisyy_2 = eDevyy*timeFac2\neViszz_2 = eDevzz*timeFac2\neVisxy_2 = eDevxy\neVisyz_2 = eDevyz\neVisxz_2 = eDevxz\neVisxx_3 = eDevxx*timeFac3\neVisyy_3 = eDevyy*timeFac3\neViszz_3 = eDevzz*timeFac3\neVisxy_3 = eDevxy\neVisyz_3 = eDevyz\neVisxz_3 = eDevxz\n\n# ----------------------------------------------------------------------\nclass AnalyticalSoln(object):\n \"\"\"Analytical solution to axial extension problem.\n \"\"\"\n SPACE_DIM = 3\n TENSOR_SIZE = 6\n\n def __init__(self):\n self.fields = {\n \"displacement\": self.displacement,\n \"density\": self.density,\n \"shear_modulus\": self.shear_modulus,\n \"bulk_modulus\": self.bulk_modulus,\n \"shear_modulus_ratio\": self.shear_modulus_ratio,\n \"maxwell_time\": self.maxwell_time,\n \"cauchy_strain\": self.strain,\n \"cauchy_stress\": self.stress,\n \"viscous_strain\": self.viscous_strain,\n \"initial_amplitude\": self.initial_displacement,\n }\n return\n\n def getField(self, name, mesh_entity, pts):\n field = self.fields[name](pts)\n return field\n\n def displacement(self, locs):\n \"\"\"Compute displacement field at locations.\n \"\"\"\n (npts, dim) = locs.shape\n disp = numpy.zeros((numSteps, npts, self.SPACE_DIM), dtype=numpy.float64)\n disp[:,:, 2] = numpy.dot(ezz.reshape(numSteps, 1), (locs[:, 2] + 8000.0).reshape(1, npts))\n above = numpy.where(locs[:, 2] > -0.1)\n below = numpy.where(locs[:, 2] < -7999.9)\n disp[:, above, 2] = U0\n disp[:, below, 2] = 0.0\n return disp\n\n def initial_displacement(self, locs):\n \"\"\"Compute initial displacement field at locations.\n \"\"\"\n (npts, dim) = locs.shape\n disp = numpy.zeros((1, npts, self.SPACE_DIM), dtype=numpy.float64)\n disp[0,:, 2] = e0*(locs[:, 2] + 8000.0).reshape(1, npts)\n above = numpy.where(locs[:, 2] > -0.1)\n below = numpy.where(locs[:, 2] < -7999.9)\n disp[0, above, 2] = U0\n disp[0, below, 2] = 0.0\n return disp\n\n def density(self, locs):\n \"\"\"Compute density field at locations.\n \"\"\"\n (npts, dim) = locs.shape\n density = p_density * numpy.ones((1, npts, 1), dtype=numpy.float64)\n return density\n\n def shear_modulus(self, locs):\n \"\"\"Compute shear modulus field at locations.\n \"\"\"\n (npts, dim) = locs.shape\n shear_modulus = p_mu * numpy.ones((1, npts, 1), dtype=numpy.float64)\n return shear_modulus\n\n def bulk_modulus(self, locs):\n \"\"\"Compute bulk modulus field at locations.\n \"\"\"\n (npts, dim) = locs.shape\n bulk_modulus = (p_lambda + 2.0 / 3.0 * p_mu) * numpy.ones((1, npts, 1), dtype=numpy.float64)\n return bulk_modulus\n\n def maxwell_time(self, locs):\n \"\"\"Compute Maxwell time field at locations.\n \"\"\"\n (npts, dim) = locs.shape\n maxwell_time = numpy.zeros((1, npts, 3), dtype=numpy.float64)\n maxwell_time[0,:, 0] = p_tau_1\n maxwell_time[0,:, 1] = p_tau_2\n maxwell_time[0,:, 2] = p_tau_3\n return maxwell_time\n\n def shear_modulus_ratio(self, locs):\n \"\"\"Compute shear modulus ratio field at locations.\n \"\"\"\n (npts, dim) = locs.shape\n shear_modulus_ratio = numpy.zeros((1, npts, 3), dtype=numpy.float64)\n shear_modulus_ratio[0,:, 0] = p_shear_ratio_1\n shear_modulus_ratio[0,:, 1] = p_shear_ratio_2\n shear_modulus_ratio[0,:, 2] = p_shear_ratio_3\n return shear_modulus_ratio\n\n def strain(self, locs):\n \"\"\"Compute strain field at locations.\n \"\"\"\n (npts, dim) = locs.shape\n strain = numpy.zeros((numSteps, npts, self.TENSOR_SIZE), dtype=numpy.float64)\n strain[:,:, 0] = exx.reshape(numSteps, 1)\n strain[:,:, 1] = eyy.reshape(numSteps, 1)\n strain[:,:, 2] = ezz.reshape(numSteps, 1)\n strain[:,:, 3] = exy.reshape(numSteps, 1)\n strain[:,:, 4] = eyz.reshape(numSteps, 1)\n strain[:,:, 5] = exz.reshape(numSteps, 1)\n return strain\n\n def stress(self, locs):\n \"\"\"Compute stress field at locations.\n \"\"\"\n (npts, dim) = locs.shape\n stress = numpy.zeros((numSteps, npts, self.TENSOR_SIZE), dtype=numpy.float64)\n stress[:,:, 0] = sxx.reshape(numSteps, 1)\n stress[:,:, 1] = syy.reshape(numSteps, 1)\n stress[:,:, 2] = szz.reshape(numSteps, 1)\n stress[:,:, 3] = sxy.reshape(numSteps, 1)\n stress[:,:, 4] = syz.reshape(numSteps, 1)\n stress[:,:, 5] = sxz.reshape(numSteps, 1)\n return stress\n\n def viscous_strain(self, locs):\n \"\"\"Compute viscous strain field at locations.\n \"\"\"\n (npts, dim) = locs.shape\n viscous_strain = numpy.zeros((numSteps, npts, 3*self.TENSOR_SIZE), dtype=numpy.float64)\n viscous_strain[:,:, 0] = eVisxx_1.reshape(numSteps, 1)\n viscous_strain[:,:, 1] = eVisyy_1.reshape(numSteps, 1)\n viscous_strain[:,:, 2] = eViszz_1.reshape(numSteps, 1)\n viscous_strain[:,:, 3] = eVisxy_1.reshape(numSteps, 1)\n viscous_strain[:,:, 4] = eVisyz_1.reshape(numSteps, 1)\n viscous_strain[:,:, 5] = eVisxz_1.reshape(numSteps, 1)\n viscous_strain[:,:, 6] = eVisxx_2.reshape(numSteps, 1)\n viscous_strain[:,:, 7] = eVisyy_2.reshape(numSteps, 1)\n viscous_strain[:,:, 8] = eViszz_2.reshape(numSteps, 1)\n viscous_strain[:,:, 9] = eVisxy_2.reshape(numSteps, 1)\n viscous_strain[:,:, 10] = eVisyz_2.reshape(numSteps, 1)\n viscous_strain[:,:, 11] = eVisxz_2.reshape(numSteps, 1)\n viscous_strain[:,:, 12] = eVisxx_3.reshape(numSteps, 1)\n viscous_strain[:,:, 13] = eVisyy_3.reshape(numSteps, 1)\n viscous_strain[:,:, 14] = eViszz_3.reshape(numSteps, 1)\n viscous_strain[:,:, 15] = eVisxy_3.reshape(numSteps, 1)\n viscous_strain[:,:, 16] = eVisyz_3.reshape(numSteps, 1)\n viscous_strain[:,:, 17] = eVisxz_3.reshape(numSteps, 1)\n return viscous_strain\n\n\n# End of file\n","sub_path":"tests/fullscale/viscoelasticity/nofaults-3d/axialstrain_genmaxwell_soln.py","file_name":"axialstrain_genmaxwell_soln.py","file_ext":"py","file_size_in_byte":9544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"300296778","text":"# %%\nfrom datetime import datetime\n\nimport cv2\nimport h5py\nimport numpy as np\nfrom keras import Sequential\nfrom keras.layers import MaxPooling2D, TimeDistributed, Conv2D, Flatten, Dense\nfrom keras.losses import Huber\nfrom keras.optimizers import Adam\nfrom tqdm import tqdm\n\n# %%\nsample_per_second = 2\n# Frame per second\nfps = 20\n# Blood volume signal reading per second\nrds = 256\n# Path to pickled data file\npath_to_hdf5 = \"data/data_in/data.hdf5\"\npath_to_video = \"data/data_in/data.avi\"\n\nsignal_sampling_rate = rds // sample_per_second\nframe_sampling_rate = fps // sample_per_second\n\nvideo_capture = cv2.VideoCapture(path_to_video)\nn_frames = int(video_capture.get(cv2.CAP_PROP_FRAME_COUNT))\n\nresolution = (int(video_capture.get(3)),\n int(video_capture.get(4)))\n\nhdf5_file = h5py.File(path_to_hdf5, 'r')\nbvp_raw = np.array(hdf5_file['pulse'])\n\nsampled_frames_buffer = []\n\nfor frame_number in tqdm(range(n_frames)):\n ret, frame = video_capture.read()\n if frame_number % frame_sampling_rate == 0:\n sampled_frames_buffer.append(frame)\n\nvideo_capture.release()\ncv2.destroyAllWindows()\n\nprint(f\"Sampled {len(sampled_frames_buffer)} frames out of {n_frames}.\")\n\nsampled_frames_buffer = np.array(sampled_frames_buffer, dtype=np.float32)\nbvp_raw_sampled = bvp_raw[::signal_sampling_rate]\n\nx_train = sampled_frames_buffer\ny_train = bvp_raw_sampled\n\nassert x_train.shape[0] == y_train.shape[0], f\"Shapes not matching!\"\n\n# %%\n\nmodel = Sequential()\n\nmodel.add(TimeDistributed(Conv2D(64, (3, 3), padding='same', strides=(2, 2), activation='relu'),\n input_shape=(32, 121, 480, 640, 3)))\n\nmodel.add(\n TimeDistributed(\n Conv2D(64, (3, 3),\n padding='same', strides=(2, 2), activation='relu')\n )\n)\nmodel.add(\n TimeDistributed(\n MaxPooling2D((2, 2), strides=(2, 2))\n )\n)\n# Second conv, 128\nmodel.add(\n TimeDistributed(\n Conv2D(128, (3, 3),\n padding='same', strides=(2, 2), activation='relu')\n )\n)\nmodel.add(\n TimeDistributed(\n Conv2D(128, (3, 3),\n padding='same', strides=(2, 2), activation='relu')\n )\n)\nmodel.add(\n TimeDistributed(\n MaxPooling2D((2, 2), strides=(2, 2))\n )\n)\n\nmodel.add(Flatten())\nmodel.add(Dense(1))\n\nadam = Adam(lr=0.0001)\nmodel.compile(optimizer=adam, loss=Huber(), metrics=['mae'])\nhistory = model.fit(x_train, y_train, epochs=100)\nmodel.save(f\"model_{datetime.now().strftime('%Y%m%d-%H%M%S')}\")\nhf = h5py.File('history_data.h5', 'w')\nhf.create_dataset('history', data=history)\nhf.close()\n","sub_path":"2d_convolution_time_distributed.py","file_name":"2d_convolution_time_distributed.py","file_ext":"py","file_size_in_byte":2550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"262798133","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nimport scipy, scipy.spatial\n\nimport matplotlib.pyplot as plt\n\n#\tReference made to http://stanford.edu/~cpiech/cs221/handouts/kmeans.html\n\n\ndef stoppingCondition(centroids, oldCentroids, iterations):\n\tif (iterations == 10000):\n\t\treturn True\n\treturn np.allclose(centroids, oldCentroids,1.0)\n\ndef getRandomCenters(data,k):\n\titern = 0\n\tcenters = []\n\twhile(itern < k):\n\t\tidx = np.random.randint(0,data.shape[0])\n\t\tcenters.append(data[idx])\n\t\titern += 1\n\n\treturn np.array(centers)\n\ndef getLabels(data, centroids):\n\tdata_array = np.array(data)\n\tcentroid_array = np.array(centroids)\n\ttree = scipy.spatial.cKDTree(centroid_array)\n\t_, indices = tree.query(data_array)\n\treturn indices\n\n\ndef getCentroids(data,labels,k):\n\ttheoretical_centers = []\n\tcluster_sum_x = {}\n\tcluster_sum_y = {}\n\tcluster_sum_z = {}\n\tfor i in range(0,k):\n\t\tcluster_sum_x[str(i)] = []\n\t\tcluster_sum_y[str(i)] = []\n\t\tcluster_sum_z[str(i)] = []\n\tdata_idx = 0\n\tfor idx in labels:\n\t\tcluster_sum_x[str(idx)].append(data[data_idx][0])\n\t\tcluster_sum_y[str(idx)].append(data[data_idx][1])\n\t\tcluster_sum_z[str(idx)].append(data[data_idx][2])\n\t\tdata_idx += 1\n\n\n\tfor j in range(0,k):\n\t\ttheoretical_centers.append(\n\t\t\t[sum(cluster_sum_x[str(j)])/len(cluster_sum_x[str(j)]),\n\t\t\tsum(cluster_sum_y[str(j)])/len(cluster_sum_y[str(j)]),\n\t\t\tsum(cluster_sum_z[str(j)])/len(cluster_sum_z[str(j)]),\n\t\t\t])\n\n\ttree = scipy.spatial.cKDTree(np.array(data))\n\t_,indices = tree.query(np.array(theoretical_centers))\n\treturn np.array(data)[indices]\n\ndef my_kmeans(data, k):\n # \n # The variable 'data' contains data points in its rows.\n # Initilize 'k' centers randomized. Afterwards apply Floyd's algorithm\n # until convergence to get a solution of the k-means problem. Utilize\n # 'scipy.spatial.cKDTree' for nearest neighbor computations.\n #\n # \n\n #Randomly initialize cluster centroids\n\n centers = getRandomCenters(data,k)\n iterations = 0\n oldCenters = np.zeros(centers.shape)\n while not stoppingCondition(centers,oldCenters,iterations):\n iterations += 1\t\t#iterate\n oldCenters = centers \t#set oldcentroids to current set of centroids\n index = getLabels(data, centers) #gets the indices pointing to the closest centroids\n centers = getCentroids(data, index,k)\n \n return index,centers\n\n\n","sub_path":"Sheet6/my_kmeans.py","file_name":"my_kmeans.py","file_ext":"py","file_size_in_byte":2353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"202341018","text":"from excelerator import exceptions\nfrom excelerator.main.tableparser import TableParser\nimport pytest\n\n# --- errors during instantiation ---------------------------------------------\n\n\n@pytest.mark.parametrize('io', [\n (None, None),\n (-1, ValueError),\n (0, ValueError),\n (1,None),\n (5,None),\n ('a', TypeError),\n (4.0, TypeError),\n])\ndef test_bad_row_num(io):\n header_row_num, expected_error = io\n try:\n TableParser(\n header_row_num=header_row_num,\n worksheet=\"does not matter\"\n )\n actual_error = None\n except ValueError:\n actual_error = ValueError\n except TypeError:\n actual_error = TypeError\n assert actual_error == expected_error\n\n\n# --- path and sheetname errors -----------------------------------------------\n\ndef test_table_reader_invalid_extension(fixture_path):\n\n # GIVEN a file path with an invalid (i.e. not excel) extension...\n path_with_incorrect_extension = fixture_path.parent / 'not_excel.docx'\n\n # WHEN user try to read from that file path...\n tr = TableParser(\n path=path_with_incorrect_extension,\n worksheet='does not matter'\n )\n try:\n tr.get_fields()\n\n # THEN raise an error\n except exceptions.ExceleratorError:\n return # i.e. PASS the test\n assert False\n\n\ndef test_table_reader_missing_worksheet(fixture_path):\n\n # GIVEN an Excel worksheet that doesn't contain the desired worksheet name...\n missing_ws_name = 'missing_ws'\n\n # WHEN user tries to read from this non-existent worksheet...\n tr = TableParser(\n path=fixture_path,\n worksheet=missing_ws_name,\n )\n try:\n tr.get_fields()\n\n # THEN raise an error\n except exceptions.ExceleratorError:\n return # i.e. PASS the test\n assert False\n","sub_path":"tests/test_exceptions.py","file_name":"test_exceptions.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"547304478","text":"import numpy as np\nfrom .augmentor import DataAugment\nfrom scipy import ndimage\n\nclass Blur(DataAugment):\n \"\"\"\n Apply blur transformation, to augment for defocussed images.\n \"\"\"\n\n def __init__(self, min_sigma=2, max_sigma=8, min_slices=1, max_slices=4, p=0.5):\n \"\"\"Initialize parameters.\n Args:\n sigma is the std of the Gaussian Kernel\n \"\"\"\n super(Blur, self).__init__(p=p)\n self.min_sigma = min_sigma\n self.max_sigma = max_sigma\n self.min_slices = min_slices\n self.max_slices = max_slices\n\n def set_params(self):\n # No change in sample size\n pass\n\n def blur(self, img, slices, sigma):\n \"\"\"\n blur image\n \"\"\"\n for z in slices:\n img[z] = ndimage.filters.gaussian_filter(img[z], sigma=sigma)\n\n return img\n\n def __call__(self, data, random_state):\n if random_state is None:\n random_state = np.random.RandomState()\n img = data['image']\n sigma = random_state.randint(low=self.min_sigma, high=self.max_sigma + 1, dtype=np.uint8)\n n_slices = random_state.randint(low=self.min_slices, high=self.max_slices + 1, dtype=np.uint8)\n slices = random_state.randint(low=0, high=img.shape[0], size=n_slices, dtype=np.uint8)\n uniq_slices = np.unique(slices)\n # print('Slices: ', slices)\n # print('Sigma: ', sigma)\n data['image'] = self.blur(img, uniq_slices, sigma)\n return data\n\n","sub_path":"torch_connectomics/data/augmentation/blur.py","file_name":"blur.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"220981529","text":"import tensorflow as tf\n#import matplotlib.pyplot as plt\n\nx_data = [1, 2, 3]\ny_data = [1, 2, 3]\n\nW = tf.Variable(tf.random_normal([1]), name='weight')\nX = tf.placeholder(tf.float32)\nY = tf.placeholder(tf.float32)\n\n#hypothesis\nhypothesis = X * W\n\n#cost/loss func.\ncost = tf.reduce_mean(tf.square(hypothesis - Y))\n\n#minimize1: 앞에서 배운 미분...식ㄱ으로 사용할 경우의 수식\nlearning_rate = 0.1\ngradient = tf.reduce_mean((W * X - Y)*X)\ndescent = W - learning_rate * gradient\nupdate = W.assign(descent)\n\n#session\nsess = tf.Session()\n\nsess.run(tf.global_variables_initializer())\n\nW_val = []\ncost_val = []\n\n\nfor step in range (21):\n sess.run(update, feed_dict={X: x_data, Y: y_data})\n print(step, sess.run(cost, feed_dict={X: x_data, Y: y_data}), sess.run(W))\n\n#show graph to me!\n#plt.plot(W_val, cost_val)\n#plt.show()\n","sub_path":"lec03/CostMinimize_update.py","file_name":"CostMinimize_update.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"330884596","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 27 23:12:04 2019\n\n@author: yoelr\n\"\"\"\nimport biosteam as bst\nimport biosteam.compounds as cp\nfrom biorefineries.lipidcane import species as lcspecies\nimport pandas as pd\nfrom copy import copy as _copy\n\n__all__ = ('species', 'get_grouped_species', 'species_groups')\n\n# %% Constants\n\n# Common structural carbohydrates properties\n\n# Assume heat capacity of lignin, cellulose, and hemicellulose\n# and all components at 350 K are about the same [2,2].\nCp_cellulosic = 1.364\n\n# Heat of combustions of lignocellulosic material are\n# based on wood as an approximiation [4]. Also, assume\n# structural carbohydrates have the same heat of combustion\n# as cellulose.\n# These properties match NREL's\nHc_lignin = 21e3 # (J/g)\nHc_cellulosic = 17000 # (J/g)\ncal2joule = 4.184\n\n# %% Initialize species object and define functions\n\nlcspecies = bst.Species.tospecies([*lcspecies.biodiesel_species,\n *lcspecies.ethanol_species,\n *lcspecies.pretreatment_species])\n\nsp = bst.Species()\n\ndef remove(ID):\n try: missing.remove(ID)\n except ValueError: raise ValueError(f\"'{ID}' not a required species\")\n\ndef addspecies(*IDs, cls=None, **kwargs):\n if cls is None: cls = bst.Chemical\n sp.extend([cls(i, **kwargs) for i in IDs])\n for ID in IDs: remove(ID)\n\ndef addchemical(ID, ChemID=None, phase=None, **properties): \n setattr(sp, ID, cp.StaticChemical(ChemID or ID, phase=phase, **properties))\n remove(ID)\n \ndef addsubstance(ID, *args, **kwargs):\n setattr(sp, ID, cp.Substance(ID, *args, **kwargs))\n remove(ID)\n\ndef addfrom(species, ID, species_ID=None):\n if not species_ID: species_ID = ID\n cmp = _copy(getattr(species, species_ID))\n setattr(sp, ID, cmp)\n cmp.ID = species_ID\n remove(ID)\n\ndef copy(ID, ChemID):\n cmp = _copy(getattr(sp, ChemID))\n setattr(sp, ID, cmp)\n cmp.ID = ChemID\n remove(ID)\n\n# Species to define\nspecies_IDs = [\n 'Water', 'Ethanol', 'Glucose', 'Galactose',\n 'Mannose', 'Xylose', 'Arabinose', 'Cellobiose',\n 'Sucrose', 'GlucoseOligomer', 'GalactoseOligomer',\n 'MannoseOligomer', 'XyloseOligomer', 'ArabinoseOligomer',\n 'Extract','SolubleLignin','HMF', 'Furfural', 'AceticAcid',\n 'LacticAcid', 'Xylitol', 'Glycerol', 'SuccinicAcid',\n 'NH3', 'H2SO4', 'NH4SO4', 'AmmoniumAcetate', 'DAP',\n 'HNO3', 'NaNO3', 'NaOH', 'CellulaseNutrients',\n 'Denaturant', 'Oil', 'Cellulose', 'Galactan', 'Mannan', 'Glucan',\n 'Xylan', 'Arabinan', 'Lignin', 'Acetate', 'Protein',\n 'Ash', 'Enzyme', 'DenaturedEnzyme', 'Z_mobilis', 'T_reesei',\n 'Biomass', 'Tar', 'CaO', 'CaSO4', 'Graphite', 'N2', 'O2', 'CO2',\n 'CH4', 'H2S', 'SO2', 'NO', 'CO', 'AmmoniumSulfate', 'NO2', 'CSL',\n 'WWTsludge', 'Cellulase'\n]\nmissing = species_IDs.copy()\n\n# %% Define species\n\n# TODO: Add heats of combustion\n\n# As is in data bank\naddspecies('Water', 'Ethanol', 'AceticAcid', 'Furfural', 'Glycerol',\n 'H2SO4', 'LacticAcid', 'SuccinicAcid') \nsp.SuccinicAcid.Hf = -940.26e3 # kJ/mol\nsp.LacticAcid.T = sp.LacticAcid.Tb = 122+273.15\nsp.LacticAcid.H_int_l_T_ref_l_to_Tb = sp.LacticAcid.H\nsp.LacticAcid.Hvap_Tbm = sp.LacticAcid.Hvap\nsp.LacticAcid.Hf = -163122*cal2joule\naddchemical('HNO3', 'NitricAcid')\naddchemical('Denaturant', 'Octane')\naddchemical('DAP', 'Diammonium Phosphate')\naddchemical('AmmoniumAcetate')\naddchemical('NH4SO4', 'AmmoniumSulfate')\naddchemical('NaNO3', 'SodiumNitrate')\naddchemical('Oil', 'Oleic Acid', phase='l')\naddchemical('HMF')\n\n# Will remain in the vapor phase\naddspecies('N2', 'NH3', 'O2', 'CH4', 'H2S', 'SO2', cls=cp.StaticChemical)\naddchemical('CO2', phase='g')\n\n# Analagous vapors\naddsubstance('NO2', MW=46.01, obj=sp.N2, Hf=7925*cal2joule)\naddsubstance('NO', MW=30.01, obj=sp.N2, Hf=82.05)\naddsubstance('CO', MW=28.01, obj=sp.N2, Hf=-110.522)\n\n# Will remain as solid\naddspecies('Glucose', 'Xylose', 'Sucrose',\n cls=cp.StaticChemical, Cp=Cp_cellulosic)\nsp.Glucose.Hf = -300428*cal2joule\nsp.Xylose.Hf = -249440*cal2joule\nsp.Sucrose.Hf = -480900*cal2joule\naddspecies('CaSO4', 'Graphite', 'AmmoniumSulfate',\n cls=cp.StaticChemical, Cp=Cp_cellulosic)\n\n# Analagous sugars\ncopy('Mannose', 'Glucose')\ncopy('Galactose', 'Glucose')\ncopy('Arabinose', 'Xylose')\n\n# Other analogues\ncopy('CellulaseNutrients', 'Glucose')\ncopy('Extract', 'Glucose')\ncopy('Acetate', 'AceticAcid')\nsp.Acetate.Hf = -103373\ncopy('Tar', 'Xylose')\n\n# Species taken from previous study\naddfrom(lcspecies, 'CaO')\naddfrom(lcspecies, 'Ash')\naddfrom(lcspecies, 'NaOH')\naddsubstance('Lignin', Cp=Cp_cellulosic, Hf=-108248*cal2joule, MW=152.15)\ncopy('SolubleLignin', 'Lignin')\n\n# Create structural carbohydrates\naddsubstance('GlucoseOligomer', obj=sp.Glucose, Cp=Cp_cellulosic,\n MW=162.1424, Hf=-233200*cal2joule)\ncopy('GalactoseOligomer', 'GlucoseOligomer')\ncopy('MannoseOligomer', 'GlucoseOligomer')\naddsubstance('XyloseOligomer', obj=sp.Xylose, Cp=Cp_cellulosic,\n MW=132.11612, Hf=-182100*cal2joule)\ncopy('ArabinoseOligomer', 'XyloseOligomer')\n\n# Other\naddsubstance('Z_mobilis', MW=24.6265, Hf=-31169.39*cal2joule)\naddsubstance('T_reesei', MW=23.8204, Hf=-23200.01*cal2joule)\naddsubstance('Biomass', MW=23.238, Hf=-23200.01*cal2joule)\naddsubstance('Cellulose', MW=162.1406, Hf=-233200.06*cal2joule, Hc=Hc_cellulosic)\naddsubstance('Protein', MW=22.8396, Hf=-17618*cal2joule)\naddsubstance('Enzyme', MW=24.0156, Hf=-17618*cal2joule)\naddsubstance('Glucan', MW=162.14, Hf=-233200*cal2joule)\naddsubstance('Xylan', MW=132.12, Hf=-182100*cal2joule)\naddsubstance('Xylitol', MW=152.15, Hf=-243145*cal2joule)\naddsubstance('Cellobiose', MW=342.30, Hf=-480900*cal2joule)\naddsubstance('CSL', MW=1, Hf=sp.Protein.Hf/4+sp.Water.Hf/2+sp.LacticAcid.Hf/4)\ncopy('DenaturedEnzyme', 'Enzyme')\ncopy('Arabinan', 'Xylan')\ncopy('Mannan', 'Glucan')\ncopy('Galactan', 'Glucan')\n\n# %% TODO: Maybe remove this\n\n# missing.extend([\n# 'OtherSugars', 'SugarOligomers', \n# 'InorganicSolubleSolids',\n# 'Furfurals', 'OtherOrganics',\n# 'OtherOrganics', 'OtherOrganics',\n# 'COxSOxNOxH2S', 'OtherStructuralCarbs',\n# 'CellMass',\n# 'OtherInsolubleSolids',\n# 'OrganicSolubleSolids',\n# ])\n# copy('OtherSugars', 'Arabinose')\n# copy('SugarOligomers', 'GlucoseOligomer')\n# copy('OrganicSolubleSolids', 'SolubleLignin')\n# copy('InorganicSolubleSolids', 'DAP')\n# copy('Furfurals', 'HMF')\n# copy('OtherOrganics', 'Denaturant')\n# copy('COxSOxNOxH2S', 'NO')\n# copy('OtherStructuralCarbs', 'Arabinan')\n\n# For waste water\ncopy('WWTsludge', 'Z_mobilis')\ncopy('Cellulase', 'Enzyme')\n# copy('OtherInsolubleSolids', 'Tar')\nbst.Stream.species = species = sp\n\n# %% Grouped species\n\nspecies_groups = dict(\n OtherSugars = ['Arabinose',\n 'Mannose',\n 'Galactose',\n 'Cellobiose',\n 'Sucrose'],\n SugarOligomers = ['GlucoseOligomer',\n 'XyloseOligomer',\n 'GalactoseOligomer',\n 'ArabinoseOligomer',\n 'MannoseOligomer'],\n OrganicSolubleSolids = ['AmmoniumAcetate',\n 'SolubleLignin',\n 'Extract', \n 'LacticAcid', \n 'Cellulase'],\n InorganicSolubleSolids = ['AmmoniumSulfate',\n 'DAP',\n 'NaOH',\n 'HNO3',\n 'NaNO3'],\n Furfurals = ['Furfural',\n 'HMF'],\n OtherOrganics = ['Glycerol',\n 'Denaturant',\n 'Oil',\n 'SuccinicAcid',\n 'Xylitol'],\n COxSOxNOxH2S = ['NO',\n 'NO2',\n 'SO2',\n 'CO',\n 'H2S'],\n Protein = ['Protein',\n 'Enzyme',\n 'DenaturedEnzyme'],\n CellMass = ['WWTsludge',\n 'Z_mobilis',\n 'T_reesei'],\n OtherInsolubleSolids = ['Tar',\n 'Ash',\n 'Graphite',\n 'Lime'],\n OtherStructuralCarbohydrates = ['Arabinan', \n 'Mannan', \n 'Galactan']\n)\n\ndef get_grouped_species(stream, units='kmol/hr'):\n s = bst.Stream(species=species)\n s.setflow(flow=stream.mol, species=stream.species.IDs)\n return pd.Series({i:s.getflow(*j, units=units).sum() for i, j in species_groups.items()})\n\nassert not missing, str(missing)\n\n# Fix sugar properties\n\n\n# cellulosic = ('Glucan', 'Xylan', 'GlucoseOligomer','XyloseOligomer',\n# 'Acetate', 'SolubleLignin')\n\n","sub_path":"BioSTEAM 1.x.x/build/lib/biorefineries/cornstover/species.py","file_name":"species.py","file_ext":"py","file_size_in_byte":8746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"210573975","text":"import random\nfrom functools import wraps\n\nfrom app import app\nfrom app.config import CACHE_GLOBAL_PREFIX\nfrom app.utils.redis_helper import redis_manage\n\n\nclass CachePrefix:\n API_ADDRESS_BASE_LOC = 'address/baseLoc/'\n API_VER_CODE = 'verCode/'\n API_SALT = 'salt/'\n API_SID = 'sid/'\n API_TASK_DEVICE = 'taskDevice/'\n API_FREE_REGISTER = 'free/'\n API_DEVICE_UPGRADE = 'dUpgrade/'\n\n DB_VERSION_COUNT = 'versionCount/'\n DB_CDC_COUNT = 'cdcCount/'\n DB_VERSION_LIST = 'dvList/'\n DB_VERSION_ITEM = 'dvItem/'\n DB_DEVICE_CMD = 'deviceCmd/'\n DB_QR_GEN = 'qrGen/'\n DB_DEVICE = 'device/'\n DB_DEVICE_COUNT = 'deviceCount/'\n DB_DEVICE_CITY_LIST = 'deviceCityList/'\n DB_DEVICE_TYPE_MAP = 'deviceTypeMap/'\n DB_DEVICE_CHAN_CITY_LIST = 'deviceChanCityList/'\n DB_DEVICE_TYPE = 'deviceType/'\n DB_DEVICE_TYPE_AUTH = 'deviceTypeAuth/'\n DB_DEVICE_DEFINE = 'deviceDefine/'\n DB_DEVICE_CHANNEL = 'deviceChannel/'\n DB_DEVICE_CHANNEL_LIST = 'dcList/'\n DB_DEVICE_CHANNEL_MAP = 'dcMap/'\n DB_DEVICE_TYPE_DICT = 'deviceTypeDict/'\n DB_DEVICE_ALIAS = 'deviceAlias/'\n DB_CHAN_DEVICE_TYPE = 'chanDeviceType/'\n DB_USER_CMD = 'udCmd/'\n DB_USER_CMD_LIST = 'udCmdList/'\n DB_DEVICE_CHAN_BIND = 'deviceChanBind/'\n DB_MONITOR_LIST = 'monitorList/'\n DB_MONITOR_NEW_LIST = 'monitorNewList/'\n DB_DEVICE_DISCOUNT = 'deviceDiscount/'\n DB_DEVICE_ACTION = 'deviceAction/'\n DB_ORDER = 'order/'\n DB_ORDER_COUNT = 'orderCount/'\n DB_USER_LOGIN = 'userLogin/'\n DB_DEVICE_AGENT = 'deviceAgent/'\n DB_PERMISSION = 'permission/'\n DB_PERMISSION_LIST = 'permissionList/'\n DB_PERMISSION_BIND = 'permissionBind/'\n DB_ALARM_TODAY = 'alarmToday/'\n DB_CHAN_AGENT = 'deviceChanAgent/'\n DB_LOC_PROVINCE = 'locProvince/'\n DB_LOC_CITY = 'locCity/'\n DB_TAG = 'tag/'\n DB_DEVICE_TASK = 'task/'\n DB_DEVICE_BUFF = 'buff/'\n DB_GAME_ORDER = 'orderGame/'\n DB_WX_CONFIG = 'wxCfg/'\n DB_HX_PAY_ORDER = 'hxOrderId/'\n DB_WX_CODE = 'wxCode/'\n DB_CONFIG_LOG = 'configLog/'\n DB_TOTAL_INCOME = 'totalIncome/'\n DB_TODAY_INCOME = 'todayIncome/'\n DB_ALL_INCOME = 'allIncome/'\n DB_MONTH_INCOME = 'monthIncome/'\n DB_ALL_FANS = 'allFans/'\n DB_TODAY_FANS = 'todayFans/'\n DB_GRID_INFO = 'gridInfo/'\n DB_PLAYABLE = 'playable/'\n DB_PCD = 'pcd/'\n DB_DEVICE_START = 'deviceStart/'\n DB_GRID_ID_START = 'gridIdStart/'\n DB_CHAN_MONITOR_LIST = 'chanMonitorList/'\n\n\ndef api_cache(prefix='tm/', ignore_first=True, timeout=60, name='', noneable=False, random_timeout=None):\n def decorator(func):\n @wraps(func)\n def wrapper_fun(*args, **kwargs):\n\n none_flag = '!#$ None $#!'\n key_time = random_timeout[0] + random.randint(0, random_timeout[1]) if random_timeout else timeout\n pos_key = '/'.join([str(arg) for arg in args[1 if ignore_first else 0:]])\n kwargs_key = '/'.join([str(kwargs[key]) for key in kwargs])\n cache_key = CACHE_GLOBAL_PREFIX + prefix + pos_key + ('/' + kwargs_key if pos_key else kwargs_key)\n\n if name:\n cache_key += name\n\n cache = app.flask_cache.get(cache_key)\n if noneable and type(cache) is str and cache == none_flag:\n return\n if cache:\n return cache\n\n exe_res = func(*args, **kwargs)\n\n if exe_res is not None:\n app.flask_cache.set(cache_key, exe_res, timeout=key_time)\n elif noneable:\n app.flask_cache.set(cache_key, none_flag, timeout=key_time)\n return exe_res\n return wrapper_fun\n return decorator\n\n\ndef clear_api_cache(prefix='tm/', *args, **kwargs):\n\n pos_key = '/'.join([str(arg) for arg in args])\n kwargs_key = '/'.join([str(kwargs[key]) for key in kwargs])\n cache_key = CACHE_GLOBAL_PREFIX + prefix + pos_key + ('/' + kwargs_key if pos_key else kwargs_key)\n\n app.flask_cache.delete(cache_key)\n\n\ndef clear_cache_fuzzy(*fuzzy_keys):\n\n if not fuzzy_keys:\n return\n\n pool = redis_manage.get_redis_pool()\n pipe = pool.pipeline()\n for fk in fuzzy_keys:\n pipe.keys('flask_cache_{}{}*'.format(CACHE_GLOBAL_PREFIX, fk))\n\n find = pipe.execute()\n for line in find:\n for k in line:\n pipe.delete(k.decode())\n pipe.execute()\n\n\ndef get(cache_key):\n return app.flask_cache.get(CACHE_GLOBAL_PREFIX + cache_key)\n\n\ndef save(cache_key, data, timeout=50):\n return app.flask_cache.set(CACHE_GLOBAL_PREFIX + cache_key, data, timeout=timeout)\n\n\ndef remove(cache_key):\n return app.flask_cache.delete(CACHE_GLOBAL_PREFIX + cache_key)\n\n\nif __name__ == '__main__':\n clear_cache_fuzzy(CachePrefix.DB_DEVICE_CHANNEL_LIST, CachePrefix.DB_DEVICE_CHANNEL_MAP)\n","sub_path":"app/utils/cache_utils.py","file_name":"cache_utils.py","file_ext":"py","file_size_in_byte":4769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"255924234","text":"\"\"\"\nCreated On: 20th Sept 2015\n@author: Amitayush Thakur,Jaiwant Rawat,Ashish Tilokani\n\"\"\"\n\nimport subprocess\nimport os\nimport platform\n\nPDF_TO_TEXT = '../xpdfbin-win-3.04/bin64/pdftotext.exe'\nPDF_TO_TEXT_ubuntu = 'pdftotext'\nPATH_TO_DATASET = 'TestData/'\nPDF_TO_TEXT_win = '..\\\\xpdfbin-win-3.04\\\\bin64\\\\pdftotext.exe'\nPATH_TO_DATASET_win = '..\\\\TestData\\\\'\n\ndef pdfToText(path):\n if platform.system()=='Linux':\n subprocess.call([PDF_TO_TEXT_ubuntu,path])\n else:\n subprocess.call([PDF_TO_TEXT,path])\n\n\ndef convertTextToPDF():\n print('Conversion starting ....')\n dirList = [x for x in os.listdir(PATH_TO_DATASET)]\n files = []\n for dirName in dirList:\n print('Changing Directory to '+dirName+' ...... \\n\\n\\n')\n for x in os.listdir(PATH_TO_DATASET+dirName):\n if str(x)[-3:] == 'pdf':\n pathName = PATH_TO_DATASET+dirName+'/'+str(x)\n files.append(pathName)\n pdfToText(pathName)\n print('File '+pathName+' has been converted')\n context = {\n 'dirList':str(dirList),\n 'fileList':str(files)\n }\n return context\n\ndef __main__():\n #pdfToText(PATH_TO_DATASET+'PhysicsMathematics\\\\'+'PhysRevLett.105.136805.pdf')\n print(str(convertTextToPDF()))\n\n__main__()\n","sub_path":"pdfToText.py","file_name":"pdfToText.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"217016514","text":"\"\"\"\n--------------------------------------------------------------------------------\n# Method\n- Serial in single CPU\n - CPU->IO->CPU->IO\n- Parallel CPU & IO\n - CPU->CPU\n - IO->IO\n - ->CPU\n - threading\n - asyncio\n- Parallel between CPUs\n - CPU1->IO1\n - CPU2->IO\n - multiprocessing\n- Parallel between PCs\n - PC1\n - PC2\n# Import\n- threading\n- multiprocessing\n- asyncio\n - parallel function in single thread\n- lock\n - file\n- queue\n - customer-producer\n- Pool\n- subprocess\n\n--------------------------------------------------------------------------------\n- Intensive\n - computation\n - Compress\n - Encrypt\n - Regular expression search\n - IO\n - Need huge data in external device\n - File\n - Worm\n - Database\n- Compare\n - Process\n - Private memory\n - Multi-CPU\n - Thread\n - One CPU\n - rootcause: global interpreter lock(GIL)\n - IO-bound\n- Coroutine\n - asyncio sharing same data in one thread \n - the number of thread = X0,000\n\n--------------------------------------------------------------------------------\n# global interpreter lock(GIL)\n- python is 100~200 times slower than C++\n- rootcause\n - dynamic language: calculate while explainig\n - data type check\n - GIL\n- http://www.dabeaz.com/python/UnderstandingGIL.pdf\n - when a thread is running, it holds the GIL\n - GIL released on I/O(read, write, send, recv, etc.)\n - simplifies the management of shared resources\n\n\"\"\"\nimport threading\nimport requests\nimport time\nimport random\nimport queue\nfrom bs4 import BeautifulSoup\n\ndef run_time(func):\n def wrapper(*args, **kw):\n start = time.time()\n res = func(*args, **kw)\n end = time.time()\n print('{0} run time is {1} second'.format(func.__name__, (end - start)))\n return res\n return wrapper\n\n# ----------------------- test_1 ------------------- #\ndef test_func(a, b):\n print(\"test_func\", a, b)\n return\n\ndef test_1():\n t = threading.Thread(target = test_func, args=(100,200))\n t.start()\n t.join()\n return\n\n# ----------------------- test_2 ------------------- #\n# urls = [\n# \"https://www.cnblogs.com/#p3\",\n# \"https://www.cnblogs.com/#p4\"\n# ]\n\nurls = [\n f\"https://www.cnblogs.com/#p{page}\"\n for page in range(1, 50+1)\n]\n\ndef craw(url):\n r = requests.get(url)\n print(url, len(r.text))\n return r.text\n\ndef parser(html):\n # class=\"post-item-title\"\n soup = BeautifulSoup(html, \"html.parser\")\n links = soup.find_all(\"a\", class_=\"post-item-title\")\n return [(link[\"href\"], link.get_text()) for link in links]\n\ndef do_craw(url_queue:queue.Queue, html_queue:queue.Queue):\n # producer\n while True:\n url = url_queue.get()\n html = craw(url)\n html_queue.put(html)\n print(threading.current_thread().name, f\"craw {url}\", \"url_queue.size=\", url_queue.qsize())\n time.sleep(random.randint(1,2))\n return\n\ndef do_parser(html_queue:queue.Queue, fout):\n # consumer\n while True:\n html = html_queue.get()\n results = parser(html)\n for res in results:\n fout.write(str(res) + \"\\n\")\n print(threading.current_thread().name, f\"results.size\", len(results), \"html_queue.size=\", html_queue.qsize())\n time.sleep(random.randint(1,2))\n return\n\n@run_time\ndef single_thread():\n for url in urls:\n craw(url)\n return\n\n@run_time\ndef multi_thread():\n threads = []\n for url in urls:\n threads.append(threading.Thread(target=craw, args=(url,)))\n for thread in threads:\n thread.start()\n for thread in threads:\n thread.join() \n return\n\ndef test_2():\n # craw(urls[0])\n # single_thread() # 4.76s\n multi_thread() # 1.22s\n return\n\ndef test_3():\n for result in parser(craw(urls[2])):\n print(result)\n return\n\ndef test_4():\n \"\"\"\n pipeline: \n input -> processor_1 -> output_1 -> ... -> processor_X -> output_X -> processor_N -> output\n input: urls\n producer: craw: download urls and get html\n consumer: parser: parse html and save the result\n output: SQL\n communication between processor:\n q = queue.Queue()\n q.put(item)\n item = q.get()\n q.qsize()\n q.empty()\n q.full()\n \"\"\"\n url_queue = queue.Queue()\n html_queue = queue.Queue()\n for url in urls:\n url_queue.put(url)\n \n for i in range(3):\n t = threading.Thread(target=do_craw, args=(url_queue, html_queue), name=f\"craw{i}\")\n t.start()\n \n fout = open(\"results.txt\", \"w\")\n for i in range(2):\n t = threading.Thread(target=do_parser, args=(html_queue, fout), name=f\"parser{i}\")\n t.start()\n return\n\nclass Account:\n def __init__(self, balance):\n self.balance = balance\n return\n\nlock = threading.Lock()\n\ndef draw(account, amount):\n with lock:\n if account.balance >= amount:\n time.sleep(0.1)\n account.balance -= amount\n print(threading.current_thread().name, \"success, now=\", account.balance)\n else:\n print(threading.current_thread().name, \"failed, now=\", account.balance)\n return\n\ndef test_5():\n account = Account(1000)\n ta = threading.Thread(target=draw, args=(account, 800), name=\"ta\")\n tb = threading.Thread(target=draw, args=(account, 800), name=\"tb\")\n ta.start()\n tb.start()\n return\n\ndef test_6():\n \"\"\"\n thread pool life cycle\n CREATE(alloc)\n start |\n READY <----------------------\n | |\n obtain cpu resource |\n | |\n RUNNING ---sleep/op ---> HANGUP\n |\n END(free)\n task_list\n thread_pool\n reuse thread\n not include CREATE and END\n \"\"\"\n from concurrent.futures import ThreadPoolExecutor, as_completed\n # with ThreadPoolExecutor() as pool:\n # results = pool.map(craw, urls)\n # for result in results:\n # print(result)\n\n # with ThreadPoolExecutor() as pool:\n # futures = [pool.submit(craw, url) for url in urls]\n # # order first\n # for future in futures:\n # print(future.result())\n # # complete first\n # for future in as_completed(futures):\n # print(future.result())\n \n with ThreadPoolExecutor() as pool:\n htmls = pool.map(craw, urls)\n htmls = list(zip(urls, htmls))\n for url, html in htmls:\n print(url, html)\n \n with ThreadPoolExecutor() as pool:\n futures = {}\n for url, html in htmls:\n future = pool.submit(parser, html)\n futures[future] = url\n # for future, url in futures.items():\n # print(url, future.result())\n\n for future in as_completed(futures):\n url = futures[future]\n print(url, future.result())\n return\n\nif __name__ == \"__main__\":\n # test_1()\n # test_2()\n # test_3()\n # test_4()\n # test_5()\n test_6()\n\n","sub_path":"python/others/multi_process.py","file_name":"multi_process.py","file_ext":"py","file_size_in_byte":6502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"48962219","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.dashboard, name='dashboard'),\n url(r'^create$', views.create, name='create'),\n url(r'^add$', views.add, name='add'),\n url(r'^delete/(?P\\d+)$', views.delete, name='delete'),\n url(r'^addproduct/(?P\\d+)$', views.addproduct, name='addproduct'),\n url(r'^removeproduct/(?P\\d+)$', views.removeproduct, name='removeproduct'),\n url(r'^product/(?P\\d+)$', views.product, name='product'),\n]\n","sub_path":"apps/wishlist/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"398495600","text":"'''\n풀이\n냅색dp\n2차원 dp를 사용\ndp[i][j] = i번 숫자를 만드는데 마지막 숫자가 j인 경우의 수\n'''\nimport sys\ninput = sys.stdin.readline\n\nT = int(input())\ndp = [[0 for i in range(4)] for j in range(100001)]\nmod = 1000000009\n\ndp[1]=[0,1,0,0]\ndp[2]=[0,0,1,0]\ndp[3]=[0,1,1,1]\n\nfor i in range(4,100001):\n dp[i][1] = (dp[i-1][2] + dp[i-1][3]) % mod\n dp[i][2] = (dp[i-2][1] + dp[i-2][3]) % mod\n dp[i][3] = (dp[i-3][1] + dp[i-3][2]) % mod\n\n\nfor _ in range(T):\n print(sum(dp[int(input())]) % mod)\n\n\n#다른 풀이\n'''\nend1[i] = i번째수가 1로 끝나는 경우의 수\nend2[i] = i번째수가 2로 끝나는 경우의 수\n'''\n\nimport sys\n \ninput = sys.stdin.readline\nflush = sys.stdout.flush\n\nend1 = [None, 1, 0, 1]\nend2 = [None, 0, 1, 1]\nend3 = [None, 0, 0, 1]\nfor _ in range(10**5):\n tmp1 = end2[-1] + end3[-1]\n tmp2 = end1[-2] + end3[-2]\n tmp3 = end1[-3] + end2[-3]\n end1.append(tmp1 % (10**9 + 9))\n end2.append(tmp2 % (10**9 + 9))\n end3.append(tmp3 % (10**9 + 9))\n\nfor _ in range(int(input())):\n n = int(input())\n print((end1[n] + end2[n] + end3[n]) % (10**9 + 9))\n","sub_path":"백준/Python/알고파/냅색DP/15990(1, 2, 3 더하기 5).py","file_name":"15990(1, 2, 3 더하기 5).py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"478268885","text":"n = int(input())\n\np = [0, 0,] + list(map(int, input().split()))\n\ns = [0,] + list(map(int, input().split()))\n\nd = {}\n\nfor i in range(n):\n\td[p[i]] = min(d.get(p[i], 2e9), s[i])\n\nanswer, ok = 0, True\n\nfor i in range(1, n + 1):\n\t# print(f's[{i}] = {s[i]}')\n\t\n\tif s[i] == -1:\n\t\t# print(f's[p[{i}]] = s[{p[i]}] = {s[p[i]]}')\n\t\ts[i] = d.get(i, s[p[i]])\n\t\n\tif s[i] < s[p[i]]:\n\t\tok = False\n\t\tbreak\n\n\tanswer += s[i] - s[p[i]]\n\nprint(answer if ok else -1)\n","sub_path":"codeforces/1099/D.py","file_name":"D.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"301252204","text":"\n# coding: utf-8\n\n# In[1]:\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport mpl_finance as mplf\nimport matplotlib.dates as mdates\nfrom matplotlib.lines import Line2D\nimport Risk_Ratio_Functions\n\n\n# In[2]:\n\ndef Candle_plot(Stock_data, stadate, enddate, trade_dates, trade_prices, buy_dates, buy_prices, sell_dates, sell_prices):\n \n # 输入Stock_data,并处理索引、日期格式、按索引日期排序\n Stock_data = Stock_data.set_index('date')\n Stock_data.index = pd.to_datetime(Stock_data.index)\n Stock_data = Stock_data.sort_index(axis=0, ascending=True)\n \n # 风险决策指标\n Risk_Ratio = Risk_Ratio_Functions.Risk_Ratio(Stock_data)\n Risk_Ratio = Risk_Ratio[['Decision','MAJCQX']]\n \n # 运筹帷幄指标,指标构建\n DA = 6\n ZFXF_YCWW = pd.DataFrame()\n ZFXF_YCWW['LLV_min'] = Stock_data['close'].rolling(window=DA).min()\n ZFXF_YCWW['HHV_max'] = Stock_data['close'].rolling(window=DA).max()\n ZFXF_YCWW['ABS'] = np.abs(ZFXF_YCWW['HHV_max']/ZFXF_YCWW['LLV_min']-1)\n ZFXF_YCWW['MAX'] = ZFXF_YCWW['ABS'].rolling(window=DA).max()\n ZFXF_YCWW['DA'] = DA\n ZFXF_YCWW.loc[ZFXF_YCWW['MAX']>0.1,'DA'] = 3\n ZFXF_YCWW['Tomorrow'] = Stock_data['close'].rolling(window=DA).mean()\n ZFXF_YCWW.loc[ZFXF_YCWW['DA'] == 3,'Tomorrow'] = Stock_data['close'].rolling(window=3).mean()\n ZFXF_YCWW['YCWW'] = ZFXF_YCWW['Tomorrow'].shift(1)\n \n # 运筹帷幄指标,买卖信号点构建\n ZFXF_YCWW['Buy_point'] = 0\n ZFXF_YCWW.loc[Stock_data['close'] > ZFXF_YCWW['YCWW'],'Buy_point'] = 1\n ZFXF_YCWW['Sell_point'] = 0\n ZFXF_YCWW.loc[Stock_data['close'] < ZFXF_YCWW['YCWW'],'Sell_point'] = -1\n ZFXF_YCWW['Points'] = ZFXF_YCWW['Buy_point']+ZFXF_YCWW['Sell_point']\n \n # 运筹帷幄指标,形成【成住坏空(2,1,-2,-1)】信号点\n g = len(ZFXF_YCWW)\n ZFXF_YCWW['order'] = np.arange(0,g,1)\n ZFXF_YCWW.loc[g-1:g,'Points'] = -1\n ZFXF_YCWW['BS_point'] = ZFXF_YCWW['Points'].shift(1)\n ZFXF_YCWW['BS_point'] = ZFXF_YCWW['BS_point'].fillna(-1)\n ZFXF_YCWW['BS_point'] = (ZFXF_YCWW['Points'] - ZFXF_YCWW['BS_point'])/2+ZFXF_YCWW['Points']\n \n # 合并两大指标\n Stock_Risk_Ratio_ZFXF_YCWW = pd.concat([Stock_data,Risk_Ratio,ZFXF_YCWW[['YCWW','Tomorrow','BS_point']]], axis=1)\n \n # 设置目标时间区间\n Stock_Risk_Ratio_ZFXF_YCWW = Stock_Risk_Ratio_ZFXF_YCWW[Stock_Risk_Ratio_ZFXF_YCWW.index >= pd.to_datetime(stadate)]\n Stock_Risk_Ratio_ZFXF_YCWW = Stock_Risk_Ratio_ZFXF_YCWW[Stock_Risk_Ratio_ZFXF_YCWW.index <= pd.to_datetime(enddate)]\n k = len(Stock_Risk_Ratio_ZFXF_YCWW)\n \n # 设置id,作为坐标系的x轴数据,最后用date来标注\n Stock_Risk_Ratio_ZFXF_YCWW['date'] = Stock_Risk_Ratio_ZFXF_YCWW.index\n Stock_Risk_Ratio_ZFXF_YCWW = Stock_Risk_Ratio_ZFXF_YCWW.reset_index(drop=True)\n Stock_Risk_Ratio_ZFXF_YCWW['id'] = Stock_Risk_Ratio_ZFXF_YCWW.index+1\n \n # ochl数据设置,date数据设置\n Stock_Risk_Ratio_ZFXF_YCWW_ochl = Stock_Risk_Ratio_ZFXF_YCWW[['id','open','close','high','low']]\n Stock_Risk_Ratio_ZFXF_YCWW_date = Stock_Risk_Ratio_ZFXF_YCWW.set_index('date')\n \n # 设置交易点的时间和价格\n Trades = pd.DataFrame()\n Trades['price'] = trade_prices\n Trades['date'] = 0\n Trades['id'] = 0\n \n for t in range(0,len(trade_dates)):\n T_date = Stock_Risk_Ratio_ZFXF_YCWW.loc[Stock_Risk_Ratio_ZFXF_YCWW['date'] == pd.to_datetime(trade_dates[t]),'date'].values\n T_id = Stock_Risk_Ratio_ZFXF_YCWW.loc[Stock_Risk_Ratio_ZFXF_YCWW['date'] == pd.to_datetime(trade_dates[t]),'id'].values\n Trades.loc[t:t+1,'date'] = T_date\n Trades.loc[t:t+1,'id'] = T_id\n \n Trades = Trades.set_index('id')\n tp = trade_prices\n idt = Trades.index.values\n \n # 设置买卖连线的时间和价格\n Buys = pd.DataFrame()\n Buys['price'] = buy_prices\n Buys['date'] = 0\n Buys['id'] = 0\n \n Sells = pd.DataFrame()\n Sells['price'] = sell_prices\n Sells['date'] = 0\n Sells['id'] = 0\n \n for p in range(0,len(buy_dates)):\n Buys_date = Stock_Risk_Ratio_ZFXF_YCWW.loc[Stock_Risk_Ratio_ZFXF_YCWW['date'] == pd.to_datetime(buy_dates[p]),'date'].values\n Buys_id = Stock_Risk_Ratio_ZFXF_YCWW.loc[Stock_Risk_Ratio_ZFXF_YCWW['date'] == pd.to_datetime(buy_dates[p]),'id'].values\n Buys.loc[p:p+1,'date'] = Buys_date\n Buys.loc[p:p+1,'id'] = Buys_id\n \n Buys = Buys.set_index('id')\n by = buy_prices\n idby = Buys.index.values\n \n for q in range(0,len(sell_dates)):\n Sells_date = Stock_Risk_Ratio_ZFXF_YCWW.loc[Stock_Risk_Ratio_ZFXF_YCWW['date'] == pd.to_datetime(sell_dates[q]),'date'].values\n Sells_id = Stock_Risk_Ratio_ZFXF_YCWW.loc[Stock_Risk_Ratio_ZFXF_YCWW['date'] == pd.to_datetime(sell_dates[q]),'id'].values\n Sells.loc[q:q+1,'date'] = Sells_date\n Sells.loc[q:q+1,'id'] = Sells_id\n \n Sells = Sells.set_index('id')\n sl = sell_prices\n idsl = Sells.index.values\n \n # 交易收益率\n prf = pd.DataFrame()\n prf['sl'] = sell_prices\n prf['by'] = buy_prices\n # 每次交易收益率\n prf['prf'] = (prf['sl']-prf['by'])/prf['by']\n # 格式化卖出日期为可计算的date2num值\n prf['sd'] = sell_dates\n prf['sd'] = pd.to_datetime(prf['sd']).astype(np.object)\n prf['sd'] = mdates.date2num(prf['sd'])\n # 格式化买入日期为可计算的date2num值\n prf['bd'] = buy_dates\n prf['bd'] = pd.to_datetime(prf['bd']).astype(np.object)\n prf['bd'] = mdates.date2num(prf['bd'])\n # 持仓时间\n prf['ri'] = prf['sd'] - prf['bd']\n # 每次年化收益\n prf['Yprf'] = prf['prf']/prf['ri']*365\n # 格式化百分比\n prf['prf'] = prf['prf'].apply(lambda x: format(x, '.2%'))\n prf['Yprf'] = prf['Yprf'].apply(lambda x: format(x, '.2%'))\n prfs = prf['prf'].values\n Yprfs = prf['Yprf'].values\n \n # 设置画出K线图的数据datarray\n # 画出风险决策曲线的数据(Id,De),(ID,JCQX)\n # 设置替代x轴的日期数据(idx,date)\n datarray = Stock_Risk_Ratio_ZFXF_YCWW_ochl.values\n De = Stock_Risk_Ratio_ZFXF_YCWW['Decision'].values\n JCQX = Stock_Risk_Ratio_ZFXF_YCWW['MAJCQX'].values\n Id = Stock_Risk_Ratio_ZFXF_YCWW['id'].values\n idx = np.arange(0, k, 2)\n date = Stock_Risk_Ratio_ZFXF_YCWW_date.index[idx].date\n \n # 画出运筹帷幄买卖信号点的数据(idb,buy),(ids,sell)\n buy_point = Stock_Risk_Ratio_ZFXF_YCWW.loc[Stock_Risk_Ratio_ZFXF_YCWW['BS_point']==2.0,['YCWW','BS_point']]\n sell_point = Stock_Risk_Ratio_ZFXF_YCWW.loc[Stock_Risk_Ratio_ZFXF_YCWW['BS_point']==-2.0,['YCWW','BS_point']]\n buy = buy_point['YCWW'].values.round(3)\n idb = buy_point.index+1\n sell = sell_point['YCWW'].values.round(3)\n ids = sell_point.index+1\n \n # 判断起始画线点应当为买点,如果为卖点,则删除sell_point的第一个点,使得buy_point的第一个点为画线点\n if buy_point.index[0] > sell_point.index[0]:\n sell_point = sell_point.iloc[1:,:]\n \n sell2 = sell_point['YCWW'].values.round(3)\n ids2 = sell_point.index+1\n \n # 运筹帷幄指标预测值\n idp = Stock_Risk_Ratio_ZFXF_YCWW.index[-1]\n # Today's YCWW\n ptd = Stock_Risk_Ratio_ZFXF_YCWW['YCWW'].tail(1).values.round(4)\n ptd = float(ptd[0])\n # Tomorrow's YCWW\n ptmr = Stock_Risk_Ratio_ZFXF_YCWW['Tomorrow'].tail(1).values.round(4)\n ptmr = float(ptmr[0])\n \n # 画图\n plt.figure(2,figsize=(14,6), dpi=300)\n ax1 = plt.subplot(111)\n \n # 先画风险决策曲线,在最底层;并画网格\n plt.plot(Id, De, color ='Orange', linewidth=2.0, alpha=0.8)\n plt.plot(Id, JCQX, color ='Brown', linewidth=2.0, alpha=0.8)\n plt.axhline(y=20, color='cyan', linewidth=1.0, alpha=1.0)\n plt.axhline(y=60, color='yellow', linewidth=1.0, alpha=1.0)\n plt.axhline(y=70, color='gold', linewidth=1.0, alpha=1.0)\n plt.axvline(x=Id[0], color='LightPink', linestyle='--', linewidth=1.0, alpha=1.0)\n plt.axvline(x=Id[-1], color='MediumOrchid', linestyle='--', linewidth=1.0, alpha=1.0)\n plt.axvline(x=Id[-1]+1, color='RosyBrown', linestyle='--', linewidth=1.0, alpha=1.0)\n plt.grid(linestyle=':', alpha=0.5)\n \n # 设置x轴,风险决策曲线的y轴\n plt.xticks(idx, date)\n plt.xticks(rotation=45)\n plt.ylim(10,80,10)\n \n # 设置candlestick_ochl\n ax2 = ax1.twinx()\n mplf.candlestick_ochl(ax2, datarray, width=0.8, colorup='red', colordown='green', alpha=0.6)\n \n # 设置买卖信号点\n plt.scatter(idb, buy, color ='blue', s=50, alpha=1.0, zorder=2)\n plt.scatter(ids, sell, color ='brown', s=50, alpha=1.0, zorder=3)\n \n # 设置交易价格点\n plt.scatter(idt, tp, color ='black', s=80, alpha=1.0, zorder=4)\n plt.scatter(idt, tp, color ='yellow', s=30, alpha=1.0, zorder=5)\n \n # 设置预测点\n plt.scatter(idp+1, ptd, color ='black', s=80, alpha=1.0, zorder=6)\n plt.scatter(idp+1, ptd, color ='cyan', s=30, alpha=1.0, zorder=7)\n plt.scatter(idp+2, ptmr, color ='black', s=80, alpha=1.0, zorder=6)\n plt.scatter(idp+2, ptmr, color ='Lime', s=30, alpha=1.0, zorder=7)\n \n # 标注卖点的价格和标签样式\n for a,b in zip(ids,sell):\n ax2.text(a, b*1.01, b, ha='center', va= 'center', bbox = dict(facecolor = \"blue\", alpha = 0.2))\n \n # 标注买点的价格和标签样式\n for c,d in zip(idb,buy):\n ax2.text(c, d*0.995, d, ha='center', va= 'center', bbox = dict(facecolor = \"magenta\", alpha = 0.2))\n \n # 标注交易点的价格和标签样式\n for e,f in zip(idt,tp):\n ax2.text(e, f*1.01, f, ha='center', va= 'center', bbox = dict(facecolor = \"yellow\", alpha = 0.5))\n \n # 标注预测值的价格和标签样式\n ax2.text(idp+1, ptd*1.004, ptd, ha='center', va= 'center', bbox = dict(facecolor = \"cyan\", alpha = 0.2))\n ax2.text(idp+2, ptmr*1.004, ptmr, ha='center', va= 'center', bbox = dict(facecolor = \"Lime\", alpha = 0.2))\n \n # 预测买卖点的连线\n for m,n,p,q in zip(idb,ids2,buy,sell2):\n ax2.add_line(Line2D((m,n), (p,q), linewidth=3, color='magenta', zorder=1))\n \n # 成交买卖点的连线\n for u,v,h,g in zip(idby,idsl,by,sl):\n ax2.add_line(Line2D((u,v), (h,g), linewidth=3, color='blue', zorder=1))\n \n # 交易收益率,年化收益率\n for r,s,t in zip(idsl,sl,prfs):\n ax2.text(r, s*1.024, t, ha='center', va= 'center', bbox = dict(facecolor = \"magenta\", alpha = 0.5))\n for x,y,z in zip(idsl,sl,Yprfs):\n ax2.text(x, y*1.037, z, ha='center', va= 'center', bbox = dict(facecolor = \"cyan\", alpha = 0.5))\n \n return plt.show()\n\n","sub_path":"Candle/Candle_PlusX_SEx.py","file_name":"Candle_PlusX_SEx.py","file_ext":"py","file_size_in_byte":10720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"52704948","text":"# ref: https://adventofcode.com/2019/day/23\nfrom typing import Callable, Generator, List, Optional\n\n\ndef main(inp):\n output_buffers: List[List[int]] = [[] for _ in range(50)]\n input_buffers: List[List[int]] = [[nid] for nid in range(50)]\n result: Optional[int] = None\n def handle_input(nid: int) -> Callable[[], int]:\n def handle_input_inner():\n if len(input_buffers[nid]) == 0:\n return -1\n return input_buffers[nid].pop(0)\n return handle_input_inner\n def handle_output(nid: int) -> Callable[[int], None]:\n def handle_output_inner(value: int):\n nonlocal result\n output_buffers[nid].append(value)\n if len(output_buffers[nid]) == 3:\n dest_nid, x, y = output_buffers[nid]\n if dest_nid == 255:\n result = y\n else:\n output_buffers[nid] = []\n input_buffers[dest_nid].extend((x, y))\n return handle_output_inner\n computers = [run_controlled_execution_intcode_computer(inp, handle_input(nid), handle_output(nid)) for nid in range(50)]\n while True:\n for c in computers:\n next(c)\n if result is not None:\n return result\n\n\ndef run_controlled_execution_intcode_computer(program: str, handle_input: Callable[[], int], handle_output: Callable[[int], None]) -> Generator[None, None, None]:\n data = [int(x) for x in program.split(',')]\n index = 0\n offset = 0\n memory = {}\n rules = [\n {'opcode': 99, 'params': 0, 'fn': lambda args: args['halt']()},\n {'opcode': 1, 'params': 3, 'fn': lambda args: args['write'](3, args['read'](1) + args['read'](2))},\n {'opcode': 2, 'params': 3, 'fn': lambda args: args['write'](3, args['read'](1) * args['read'](2))},\n {'opcode': 3, 'params': 1, 'fn': lambda args: args['write'](1, args['input']())},\n {'opcode': 4, 'params': 1, 'fn': lambda args: args['output'](1)},\n {'opcode': 5, 'params': 2, 'fn': lambda args: args['jump'](args['read'](2)) if args['read'](1) != 0 else None},\n {'opcode': 6, 'params': 2, 'fn': lambda args: args['jump'](args['read'](2)) if args['read'](1) == 0 else None},\n {'opcode': 7, 'params': 3, 'fn': lambda args: args['write'](3, 1) if args['read'](1) < args['read'](2) else args['write'](3, 0)},\n {'opcode': 8, 'params': 3, 'fn': lambda args: args['write'](3, 1) if args['read'](1) == args['read'](2) else args['write'](3, 0)},\n {'opcode': 9, 'params': 1, 'fn': lambda args: args['offset'](args['read'](1))}\n ]\n is_halt = False\n while not is_halt:\n opcode = data[index] % 100\n modes = list(int(x) for x in reversed(f'{data[index]:06}'))[2:]\n try:\n rule = [x for x in rules if x['opcode'] == opcode][0]\n except IndexError:\n raise ValueError(f'Illegal opcode {opcode} at index {index}')\n is_jump = False\n def halt():\n nonlocal is_halt\n is_halt = True\n def read(pos):\n mode = modes[pos-1]\n i = data[index+pos] if mode == 0 else index+pos if mode == 1 else data[index+pos]+offset\n if i >= len(data):\n if i not in memory:\n return 0\n return memory[i]\n return data[i]\n def write(pos, value):\n mode = modes[pos-1]\n i = data[index+pos] if mode == 0 else index+pos if mode == 1 else data[index+pos]+offset\n if i >= len(data):\n memory[i] = value\n else:\n data[i] = value\n def output(pos):\n mode = modes[pos-1]\n i = data[index+pos] if mode == 0 else index+pos if mode == 1 else data[index+pos]+offset\n if i > len(data):\n this_output = memory[i]\n else:\n this_output = data[i]\n handle_output(this_output)\n def jump(pos):\n nonlocal is_jump, index\n is_jump = True\n index = pos\n def set_offset(val):\n nonlocal offset\n offset += val\n args1 = {\n 'halt': halt,\n 'read': read,\n 'write': write,\n 'input': handle_input,\n 'output': output,\n 'jump': jump,\n 'offset': set_offset,\n }\n rule['fn'](args1)\n if not is_jump:\n index += rule['params']+1\n if is_halt:\n return\n yield\n\n\nif __name__ == '__main__':\n with open('23.txt', 'r') as f:\n contents = f.read()\n print(main(contents.strip()))\n","sub_path":"23a.py","file_name":"23a.py","file_ext":"py","file_size_in_byte":4121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"180942856","text":"\"\"\"\nEmit a value only after a given idle time (emits meanwhile are skipped).\nDebounce can also be used for a timeout functionality.\n\nUsage:\n\n>>> import asyncio\n>>> from broqer import Subject, op\n>>> s = Subject()\n>>> _d = s | op.Debounce(0.1) | op.Sink(print)\n>>> s.emit(1)\n>>> s.emit(2)\n>>> asyncio.get_event_loop().run_until_complete(asyncio.sleep(0.05))\n>>> s.emit(3)\n>>> asyncio.get_event_loop().run_until_complete(asyncio.sleep(0.15))\n3\n>>> _d.dispose()\n\nWhen debounce is retriggered you can specify a value to emit:\n\n>>> debounce_publisher = s | op.Debounce(0.1, False)\n>>> _d = debounce_publisher | op.Sink(print)\n>>> s.emit(False)\nFalse\n>>> asyncio.get_event_loop().run_until_complete(asyncio.sleep(0.15))\n>>> s.emit(True)\n>>> asyncio.get_event_loop().run_until_complete(asyncio.sleep(0.05))\n>>> s.emit(False)\n>>> asyncio.get_event_loop().run_until_complete(asyncio.sleep(0.05))\n>>> s.emit(True)\n>>> asyncio.get_event_loop().run_until_complete(asyncio.sleep(0.15))\nTrue\n\nReseting is also possible:\n\n>>> s.emit(False)\nFalse\n>>> s.emit(True)\n>>> asyncio.get_event_loop().run_until_complete(asyncio.sleep(0.15))\nTrue\n>>> debounce_publisher.reset()\nFalse\n>>> asyncio.get_event_loop().run_until_complete(asyncio.sleep(0.15))\n\n>>> _d.dispose()\n\"\"\"\nimport asyncio\nimport sys\nfrom typing import Any # noqa\n\nfrom broqer import Publisher, Subscriber, default_error_handler, NONE\n\nfrom .operator import Operator\n\n\nclass Debounce(Operator):\n \"\"\" Emit a value only after a given idle time (emits meanwhile are\n skipped). Debounce can also be used for a timeout functionality.\n\n :param duetime: time in seconds to be waited for debounce\n :param retrigger_value: value used to emit when value has changed\n :param error_callback: error callback to be registered\n :param loop: asyncio loop to be used\n \"\"\"\n def __init__(self, duetime: float,\n retrigger_value: Any = NONE,\n error_callback=default_error_handler, *, loop=None) -> None:\n\n if duetime < 0:\n raise ValueError('duetime has to be positive')\n\n Operator.__init__(self)\n\n self.duetime = duetime\n self._retrigger_value = retrigger_value\n self._call_later_handler = None # type: asyncio.Handle\n self._error_callback = error_callback\n self._state = NONE # type: Any\n self._next_state = NONE # type: Any\n self._loop = loop or asyncio.get_event_loop()\n\n def unsubscribe(self, subscriber: Subscriber) -> None:\n Operator.unsubscribe(self, subscriber)\n if not self._subscriptions:\n self._state = NONE\n self._next_state = NONE\n if self._call_later_handler:\n self._call_later_handler.cancel()\n self._call_later_handler = None\n\n def get(self):\n if self._retrigger_value is not NONE and (\n not self._subscriptions or self._state is NONE):\n return self._retrigger_value\n return self._state\n\n def emit_op(self, value: Any, who: Publisher) -> None:\n if who is not self._publisher:\n raise ValueError('Emit from non assigned publisher')\n\n if value == self._next_state:\n # skip if emit will result in the same value as the scheduled one\n return\n\n if self._call_later_handler:\n self._call_later_handler.cancel()\n self._call_later_handler = None\n\n if self._retrigger_value is not NONE and \\\n self._state != self._retrigger_value:\n # when retrigger_value is defined and current state is different\n self.notify(self._retrigger_value)\n self._state = self._retrigger_value\n self._next_state = self._retrigger_value\n if value == self._retrigger_value:\n # skip if emit will result in the same value as the current one\n return\n\n if value == self._state:\n self._next_state = self._state\n return\n\n self._next_state = value\n\n self._call_later_handler = \\\n self._loop.call_later(self.duetime, self._debounced)\n\n def _debounced(self):\n self._call_later_handler = None\n try:\n self.notify(self._next_state)\n self._state = self._next_state\n except Exception: # pylint: disable=broad-except\n self._error_callback(*sys.exc_info())\n\n def reset(self):\n \"\"\" Reset the debounce time \"\"\"\n if self._retrigger_value is not NONE:\n self.notify(self._retrigger_value)\n self._state = self._retrigger_value\n self._next_state = self._retrigger_value\n if self._call_later_handler:\n self._call_later_handler.cancel()\n self._call_later_handler = None\n","sub_path":"broqer/op/debounce.py","file_name":"debounce.py","file_ext":"py","file_size_in_byte":4774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"181310843","text":"from __future__ import annotations\n\nfrom pathlib import Path\n\n\ndef get_scannable_file_paths(root_folder: str | Path | None = None, files: list[str] | None = None) -> set[Path]:\n \"\"\"Finds Bicep files\"\"\"\n\n file_paths: set[Path] = set()\n\n if root_folder:\n root_path = Path(root_folder)\n file_paths = {file_path for file_path in root_path.rglob(\"*.bicep\")}\n if files:\n for file in files:\n if file.endswith(\".bicep\"):\n file_paths.add(Path(file))\n\n return file_paths\n\n\ndef clean_file_path(file_path: Path) -> Path:\n path_parts = [part for part in file_path.parts if part not in (\".\", \"..\")]\n\n return Path(*path_parts)\n","sub_path":"checkov/bicep/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"507819287","text":"import json\nimport sys\n\nfrom flask import (\n render_template, request, redirect, session, url_for,\n jsonify, flash)\n\nfrom requirements import app\nfrom requirements.github import github\nfrom requirements.models import db, User, Repo\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/testdata')\ndef testdata():\n db.drop_all()\n db.create_all()\n me = User('blah1')\n me.username = 'Mellen'\n me.is_member = True\n \n notme = User('blah3')\n notme.username = 'Keith'\n notme.is_member = True\n\n ro = User('blah2')\n ro.username = 'Refresh Oxford'\n ro.is_member = False\n\n repo1 = Repo()\n repo1.repo_name = 'Mellen\\'s first repo'\n\n db.session.add(me)\n db.session.add(notme)\n db.session.add(ro)\n db.session.add(repo1)\n db.session.commit()\n\n me.repos.append(repo1)\n\n ro.members.append(me)\n ro.members.append(notme)\n\n db.session.commit()\n \n result = '{0}, {1}, {2}'.format(str(me), str(ro), str(repo1)).replace('<', '<')\n result += '
'\n result += \"Mellen's repo is called '{0}'\".format(str(me.repos[0])).replace('<', '<')\n result += '
'\n result += \"{0} is member of {1}\".format(str(me), str(me.orgs[0])).replace('<', '<')\n result += '
'\n for member in ro.members:\n result += \"{0} has member {1}
\".format(str(ro), str(member)).replace('<', '<')\n return result\n\n@app.route('/db_reset')\ndef db_reset():\n db.drop_all()\n db.create_all()\n\n return 'ok'\n\n\n@app.route('/create_user/')\ndef create_user(token):\n user = User(token)\n db.session.add(user)\n db.session.commit()\n\n return 'ok'\n\n\ndef _get_repos(data):\n _repos = {}\n data = json.loads(data.raw_data)\n for x in data:\n if x['language'] and x['language'].lower() == 'python':\n key = x['owner']['login']\n l = [{\n 'name': x['name'],\n 'url': x['html_url'],\n }]\n if key in _repos:\n _repos[key].append(l[0])\n else:\n _repos[key] = l\n return _repos\n\n\n@app.route('/sync')\ndef sync():\n if 'github_token' in session:\n user_repos = github.get('user/repos')\n user_repos = _get_repos(user_repos)\n\n orgs = github.get('user/orgs')\n for x in json.loads(orgs.raw_data):\n org_repos = github.get('orgs/{0}/repos'.format(x['login']))\n org_repos = _get_repos(org_repos)\n user_repos = dict(user_repos, **org_repos)\n\n return jsonify(user_repos)\n return redirect(url_for('login'))\n\n\n@app.route('/login')\ndef login():\n if session.get('github_token', None) is None:\n return github.authorize(callback=url_for('authorized', _external=True))\n else:\n flash(\"You're already logged in.\", 'info')\n return redirect(url_for('sync'))\n\n\n@app.route('/logout')\ndef logout():\n session.pop('github_token', None)\n flash(\"You've successfully logged out.\", 'info')\n return redirect(url_for('index'))\n\n\n@app.route('/login/authorized')\n@github.authorized_handler\ndef authorized(resp):\n if resp is None:\n flash('Access denied: reason=%s error=%s' % (\n request.args['error_reason'],\n request.args['error_description']), 'error')\n\n if 'access_token' in resp:\n token = (resp['access_token'], '')\n\n user = User.query.filter_by(access_token=token[0]).first()\n if user is None:\n user = User(token[0])\n db.session.add(user)\n user.access_token = token[0]\n db.session.commit()\n\n session['user_id'] = user.id\n session['github_token'] = token\n return redirect(url_for('sync'))\n return flash(str(resp), 'error')\n\n\n@github.tokengetter\ndef get_github_oauth_token():\n return session.get('github_token')\n","sub_path":"requirements/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"531984141","text":"# -*- coding: utf-8 -*- #\n# Copyright 2019 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Declarative Hooks for Cloud SCC surface arguments.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport re\n\nfrom apitools.base.py import encoding\n\nfrom googlecloudsdk.api_lib.scc import securitycenter_client as sc_client\nfrom googlecloudsdk.command_lib.util.apis import yaml_data\nfrom googlecloudsdk.command_lib.util.args import resource_args\nfrom googlecloudsdk.command_lib.util.concepts import concept_parsers\nfrom googlecloudsdk.core import properties\n\n\ndef AppendOrgArg():\n \"\"\"Add Organization as positional resource.\"\"\"\n org_spec_data = yaml_data.ResourceYAMLData.FromPath(\"scc.organization\")\n arg_specs = [\n resource_args.GetResourcePresentationSpec(\n verb=\"to be used for the SCC (Security Command Center) command\",\n name=\"organization\",\n required=True,\n prefixes=False,\n positional=True,\n resource_data=org_spec_data.GetData()),\n ]\n return [concept_parsers.ConceptParser(arg_specs, [])]\n\n\ndef SourcePropertiesHook(source_properties_dict):\n \"\"\"Hook to capture \"key1=val1,key2=val2\" as SourceProperties object.\"\"\"\n messages = sc_client.GetMessages()\n return encoding.DictToMessage(source_properties_dict,\n messages.Finding.SourcePropertiesValue)\n\n\ndef SecurityMarksHook(parsed_dict):\n \"\"\"Hook to capture \"key1=val1,key2=val2\" as SecurityMarks object.\"\"\"\n messages = sc_client.GetMessages()\n security_marks = messages.SecurityMarks()\n security_marks.marks = encoding.DictToMessage(\n parsed_dict, messages.SecurityMarks.MarksValue)\n return security_marks\n\n\ndef GetOrganization(args):\n \"\"\"Prepend organizations/ to org if necessary.\"\"\"\n resource_pattern = re.compile(\"organizations/[0-9]+\")\n id_pattern = re.compile(\"[0-9]+\")\n if not args.organization:\n organization = properties.VALUES.scc.organization.Get()\n else:\n organization = args.organization\n assert resource_pattern.match(organization) or id_pattern.match(\n organization), (\n \"Organization must match either organizations/[0-9]+ or [0-9]+.\")\n if resource_pattern.match(organization):\n return organization\n return \"organizations/\" + organization\n\n\ndef GetDefaultOrganization():\n \"\"\"Prepend organizations/ to org if necessary.\"\"\"\n resource_pattern = re.compile(\"organizations/[0-9]+\")\n id_pattern = re.compile(\"[0-9]+\")\n organization = properties.VALUES.scc.organization.Get()\n assert resource_pattern.match(organization) or id_pattern.match(\n organization), (\n \"Organization must match either organizations/[0-9]+ or [0-9]+.\")\n if resource_pattern.match(organization):\n return organization\n return \"organizations/\" + organization\n\n\ndef CleanUpUserInput(mask):\n \"\"\"Removes spaces from a field mask provided by user.\"\"\"\n return mask.replace(\" \", \"\")\n\n\ndef GetOrganizationFromResourceName(resource_name):\n resource_pattern = re.compile(\"organizations/[0-9]+\")\n assert resource_pattern.match(resource_name), (\n \"When providing a full resource path, it must also include the pattern \"\n \"organizations/[0-9]+.\")\n list_organization_components = resource_name.split(\"/\")\n return list_organization_components[0] + \"/\" + list_organization_components[1]\n\n\ndef GetSourceFromResourceName(resource_name):\n # TODO(b/129564913) Cleanup regex's into single variable.\n resource_pattern = re.compile(\"organizations/[0-9]+/sources/[0-9]+\")\n assert resource_pattern.match(resource_name), (\n \"When providing a full resource path, it must also include the pattern \"\n \"organizations/[0-9]+/sources/[0-9]+.\")\n list_source_components = resource_name.split(\"/\")\n return (GetOrganizationFromResourceName(resource_name) + \"/\" +\n list_source_components[2] + \"/\" + list_source_components[3])\n","sub_path":"google-cloud-sdk/lib/googlecloudsdk/command_lib/scc/hooks.py","file_name":"hooks.py","file_ext":"py","file_size_in_byte":4458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"388225024","text":"import base64\nimport json\nimport os\n\nimport requests\nimport urllib3\n\nfrom beans.people import People\nfrom beans.topic import Topic\nfrom oauth import *\nfrom urls import *\nfrom utils import *\n\n__all__ = [\"ZhihuClient\"]\n\nlogger = get_common_logger(__file__, \"zhihu.log\")\n\n\nclass BaseClient:\n CAPTCHA_FILE = \"captcha.gif\"\n\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n def __init__(self):\n self._session = requests.session()\n self._login_token = None\n self._session.auth = ZhihuOAuth()\n self._session.verify = False\n\n def kill_captcha(self):\n if self._show_captcha():\n killed = False\n while not killed:\n self._get_captcha()\n captcha = input(\"Please input captcha:\")\n killed, reason = self._verify_captcha(captcha)\n if killed:\n logger.info(\"Verify captcha success,ready to sign in.\")\n else:\n logger.warning(\"Verify captcha failed:{0}\".format(reason))\n\n def _show_captcha(self) -> bool:\n try:\n logger.info(\"Checking if need captcha...\")\n resp = self._session.get(URL_CAPTCHA)\n show_captcha = resp.json()[\"show_captcha\"]\n logger.info(\"Need captcha:%s\", show_captcha)\n return show_captcha\n except (requests.RequestException, KeyError):\n logger.warning(\"Show captcha exception.\", exc_info=True)\n return True\n\n def _get_captcha(self):\n try:\n logger.info(\"Getting captcha...\")\n resp = self._session.put(URL_CAPTCHA)\n image = base64.decodebytes(resp.json()[\"img_base64\"].encode(\"utf8\"))\n with open(ZhihuClient.CAPTCHA_FILE, \"wb\") as f:\n f.write(image)\n path = os.path.join(os.getcwd(), ZhihuClient.CAPTCHA_FILE)\n logger.info(\"Captcha file:\" + path)\n except (requests.RequestException, KeyError, UnicodeDecodeError, UnicodeEncodeError):\n logger.warning(\"Get captcha exception\", exc_info=True)\n\n def _verify_captcha(self, captcha) -> (bool, str):\n try:\n logger.info(\"Verifying captcha...\")\n data = {\"input_text\": captcha}\n resp = self._session.post(URL_CAPTCHA, data=data)\n json_dict = resp.json()\n if \"error\" in json_dict:\n return False, str(json_dict[\"error\"][\"message\"])\n return True, \"\"\n except (requests.RequestException, KeyError, ValueError):\n logger.warning(\"Verify captcha exception.\", exc_info=True)\n\n def get_web_captcha(self):\n try:\n logger.info(\"Getting web captcha...\")\n # 先访问验证码页面,获取cookie\n self._session.get(WEB_URL_CAPTCHA_HOME, timeout=10)\n # 带着cookie获取验证码\n json_resp = self._session.get(WEB_URL_CAPTCHA, timeout=10).json()\n if \"img_base64\" in json_resp:\n return Result(success=True, data=json_resp)\n return Result(success=False, error=json_resp)\n except(requests.RequestException, ValueError) as e:\n logger.warning(\"Get web captcha exception.\", exc_info=True)\n return Result(False, error={\"error\": {\"code\": 500, \"name\": str(e)}})\n\n def verify_web_captcha(self, captcha):\n try:\n logger.info(\"Verifying web captcha...\")\n url = WEB_URL_CAPTCHA\n data = json.dumps({\"captcha\": captcha})\n resp = self._session.post(url, data=data, timeout=10).json()\n logger.info(\"Verify web captcha done. {}\".format(resp))\n return \"error\" not in resp\n except(requests.RequestException, ValueError):\n logger.warning(\"Verify web captcha exception.\", exc_info=True)\n return False\n\n def login(self, username=None, password=None):\n try:\n self.kill_captcha()\n\n data = dict(LOGIN_DATA)\n data[\"username\"] = username or input(\"Please input your username:\")\n data[\"password\"] = password or input(\"Please input your password:\")\n ZhihuOAuth.login_signature(data)\n\n logger.info(\"Signing in...\")\n logger.info(\"Post data:{0}\".format(data))\n json_resp = self._session.post(URL_LOGIN, data=data).json()\n if \"error\" in json_resp:\n return Result(success=False, error=json_resp)\n self._login_token = ZhihuToken.from_dict(json_resp)\n self._session.auth = ZhihuOAuth(self._login_token)\n return Result(success=True, data=json_resp)\n except (requests.RequestException, ValueError) as e:\n logger.warning(\"Login exception.\", exc_info=True)\n return Result(False, error={\"error\": {\"code\": 500, \"name\": str(e)}})\n\n\nclass ZhihuClient(BaseClient):\n\n @staticmethod\n def topic_detail(tid=ID_TOPIC_ROOT) -> (bool, dict):\n \"\"\"\n 话题详情\n \"\"\"\n try:\n logger.info(\"Getting topic detail:{0}\".format(tid))\n url = URL_TOPIC.format(tid)\n resp = requests.get(url, auth=ZhihuOAuth(), verify=False)\n resp_json = resp.json()\n if \"error\" in resp_json:\n logger.warning(\"Get topic detail error:{}\".format(resp_json))\n return Result(success=False, error=resp_json)\n return Result(success=True, data=resp_json)\n except (requests.RequestException, ValueError) as e:\n logger.warning(\"Get topic detail failed.\", exc_info=True)\n return Result(False, error={\"error\": {\"code\": 500, \"name\": str(e)}})\n\n @staticmethod\n def topic_children(tid=ID_TOPIC_ROOT, offset=0, limit=10000 * 10) -> (bool, list):\n \"\"\"\n 话题的子话题\n \"\"\"\n try:\n logger.info(\"Getting topic children... id:{} offset:{} limit:{}\".format(tid, offset, limit))\n url = URL_TOPIC_CHILDREN.format(tid)\n include = \"data[*].\" + \",\".join(Topic.INCLUDE_DATA)\n data = {\"offset\": offset, \"limit\": limit, \"include\": include}\n resp = requests.get(url, auth=ZhihuOAuth(), data=data, verify=False)\n json_resp = resp.json()\n if \"data\" in json_resp:\n return Result(success=True, data=json_resp[\"data\"])\n logger.warning(\"Get topic children error:{}\".format(json_resp))\n return Result(False, error=json_resp)\n except (requests.RequestException, ValueError) as e:\n logger.warning(\"Get topic children exception.\", exc_info=True)\n return Result(False, error={\"error\": {\"code\": 500, \"name\": str(e)}})\n\n @staticmethod\n def topic_parent(tid=ID_TOPIC_ROOT) -> (bool, list):\n \"\"\"\n 话题的父话题\n \"\"\"\n try:\n logger.info(\"Getting topic parent... id:{}\".format(tid))\n url = URL_TOPIC_PARENT.format(tid)\n include = \"data[*].\" + \",\".join(Topic.INCLUDE_DATA)\n data = {\"limit\": 10000, \"include\": include}\n resp = requests.get(url, auth=ZhihuOAuth(), data=data, verify=False)\n json_resp = resp.json()\n if \"data\" in json_resp:\n return Result(success=True, data=json_resp[\"data\"])\n logger.warning(\"Get topic parents error:{}\".format(json_resp))\n return Result(False, error=json_resp)\n except (requests.RequestException, ValueError) as e:\n logger.warning(\"Get topic parent exception:{0}\", exc_info=True)\n return Result(False, error={\"error\": {\"code\": 500, \"name\": str(e)}})\n\n @staticmethod\n def topic_best_answerer(tid=ID_TOPIC_ROOT, offset=0, limit=20) -> (bool, list):\n \"\"\"\n 话题最佳回答者\n \"\"\"\n try:\n logger.info(\"Getting topic best answerer... id:{} offset:{}\".format(tid, offset))\n url = URL_TOPIC_BEST_ANSWERERS.format(tid)\n data = {\"offset\": offset, \"limit\": limit}\n json_resp = requests.get(url, auth=ZhihuOAuth(), data=data, verify=False).json()\n if \"data\" in json_resp:\n return Result(success=True, data=json_resp[\"data\"])\n logger.warning(\n \"Get topic best answerer error.id:{} offset:{} error:{}\".format(tid, offset, json_resp))\n return Result(False, error=json_resp)\n except(requests.RequestException, ValueError) as e:\n logger.warning(\"Get topic best answerer exception.\", exc_info=True)\n return Result(False, error={\"error\": {\"code\": 500, \"name\": str(e)}})\n\n @staticmethod\n def topic_follower(tid=ID_TOPIC_ROOT, offset=0, limit=20):\n \"\"\"\n 话题关注者\n \"\"\"\n try:\n logger.info(\"Getting topic follower... id:{} offset:{}\".format(tid, offset))\n url = URL_TOPIC_FOLLOWERS.format(tid)\n include = \"data[*].\" + \",\".join(People.INCLUDE_DATA)\n data = {\"offset\": offset, \"limit\": limit}\n json_resp = requests.get(url, data, auth=ZhihuOAuth(), verify=False, timeout=10).json()\n if \"data\" in json_resp:\n return Result(success=True, data=json_resp[\"data\"])\n logger.warning(\"Get topic follower error. id:{} offset:{} error:{}\".format(tid, offset, json_resp))\n return Result(False, error=json_resp)\n except(requests.RequestException, ValueError) as e:\n logger.warning(\"Get topic follower error.\", exc_info=True)\n return Result(False, error={\"error\": {\"code\": 500, \"name\": str(e)}})\n\n @staticmethod\n def people_detail(pid):\n \"\"\"\n 用户详情\n \"\"\"\n try:\n logger.info(\"Getting people detail...\".format(pid))\n url = URL_PEOPLE_DETAIL.format(pid)\n json_resp = requests.get(url, auth=ZhihuOAuth(), verify=False, timeout=10).json()\n if \"error\" not in json_resp:\n return Result(success=True, data=json_resp)\n logger.warning(\"Get people detail error. id:{}. error:{}\".format(pid, json_resp))\n return Result(False, error=json_resp)\n except(requests.RequestException, ValueError) as e:\n logger.warning(\"Get topic follower error.\", exc_info=True)\n return Result(False, error={\"error\": {\"code\": 500, \"name\": str(e)}})\n\n @staticmethod\n def people_follower_web(url_token, offset=0, limit=20):\n \"\"\"\n 用户关注者---网页端\n \"\"\"\n try:\n url = WEB_URL_PEOPLE_FOLLOWERS.format(url_token)\n logger.info(\"Getting people:%s follower... offset:%s limit:%s\", url_token, str(offset), str(limit))\n include = \"data[*].\" + \",\".join(People.INCLUDE_DATA)\n data = {\"offset\": offset, \"limit\": limit}\n json_resp = requests.get(url, data=data, auth=ZhihuOAuth(), verify=False, timeout=10).json()\n if \"data\" in json_resp:\n return Result(success=True, data=json_resp[\"data\"])\n logger.warning(\"Get people:%s follower failed. reason:%s\", url_token, json_resp)\n return Result(False, error=json_resp)\n except requests.RequestException as e:\n logger.warning(\"Get people follower error.\", exc_info=True)\n return Result(False, error={\"error\": {\"code\": 500, \"name\": str(e)}})\n except ValueError as e:\n logger.warning(\"Get people follower error.\", exc_info=True)\n return Result(False, error={\"error\": {\"code\": 404, \"name\": str(e)}})\n\n @staticmethod\n def topic_essence_feeds(tid, offset=0, limit=100):\n \"\"\"\n 话题精华回答\n \"\"\"\n try:\n url = URL_TOPIC_ESSENCE_FEEDS.format(tid)\n logger.info(\"Getting topic:%s essence feeds... offset:%s limit:%s\", tid, str(offset), str(limit))\n data = {\"offset\": offset, \"limit\": limit}\n json_resp = requests.get(url, data=data, auth=ZhihuOAuth(), verify=False, timeout=10).json()\n if \"data\" in json_resp:\n return Result(success=True, data=json_resp[\"data\"])\n logger.warning(\"Get topic:%s essence feeds failed. reason:%s\", tid, json_resp)\n return Result(False, error=json_resp)\n except (requests.RequestException, ValueError) as e:\n logger.warning(\"Get topic essence feeds failed.\", exc_info=True)\n return Result(False, error={\"error\": {\"code\": 500, \"name\": str(e)}})\n\n @staticmethod\n def topic_unanswered_questions(tid, offset=0, limit=100):\n \"\"\"\n 话题下未回答的问题\n \"\"\"\n try:\n url = URL_TOPIC_UNANSWERED_QUESTIONS.format(tid)\n logger.info(\"Getting topic:%s unanswered questions... offset:%s limit:%s\", tid, str(offset), str(limit))\n data = {\"offset\": offset, \"limit\": limit}\n json_resp = requests.get(url, data=data, auth=ZhihuOAuth(), verify=False, timeout=10).json()\n if \"data\" in json_resp:\n return Result(success=True, data=json_resp[\"data\"])\n logger.warning(\"Get topic:%s unanswered questions failed. reason:%s\", tid, json_resp)\n return Result(False, error=json_resp)\n except (requests.RequestException, ValueError) as e:\n return Result(False, error={\"error\": {\"code\": 500, \"name\": str(e)}})\n\n @staticmethod\n def question(qid):\n \"\"\"\n 问题详情\n \"\"\"\n try:\n url = URL_QUESTION.format(qid)\n logger.info(\"Getting question:%s...\", qid)\n json_resp = requests.get(url, auth=ZhihuOAuth(), verify=False, timeout=10).json()\n if \"error\" not in json_resp:\n return Result(success=True, data=json_resp)\n logger.warning(\"Get question:%s failed. reason:%s\", qid, json_resp)\n return Result(False, error=json_resp)\n except (requests.RequestException, ValueError) as e:\n return Result(False, error={\"error\": {\"code\": 500, \"name\": str(e)}})\n\n @staticmethod\n def question_answers(qid, offset=0, limit=100):\n \"\"\"\n 问题回答\n \"\"\"\n try:\n url = URL_QUESTION_ANSWERS.format(qid)\n logger.info(\"Getting question:%s answers... offset:%s limit:%s\", qid, str(offset), str(limit))\n data = {\"offset\": offset, \"limit\": limit}\n json_resp = requests.get(url, data=data, auth=ZhihuOAuth(), verify=False, timeout=10).json()\n if \"data\" in json_resp:\n return Result(success=True, data=json_resp[\"data\"])\n logger.warning(\"Get question:%s answers failed. reason:%s\", qid, json_resp)\n return Result(False, error=json_resp)\n except (requests.RequestException, ValueError) as e:\n return Result(False, error={\"error\": {\"code\": 500, \"name\": str(e)}})\n\n @staticmethod\n def question_followers(qid, offset=0, limit=100):\n \"\"\"\n 问题关注者\n \"\"\"\n try:\n url = URL_QUESTIONS_FOLLOWERS.format(qid)\n logger.info(\"Getting question:%s followers... offset:%s limit:%s\", qid, str(offset), str(limit))\n data = {\"offset\": offset, \"limit\": limit}\n json_resp = requests.get(url, data=data, auth=ZhihuOAuth(), verify=False, timeout=10).json()\n if \"data\" in json_resp:\n return Result(success=True, data=json_resp[\"data\"])\n logger.warning(\"Get question:%s followers failed. reason:%s\", qid, json_resp)\n return Result(False, error=json_resp)\n except (requests.RequestException, ValueError) as e:\n return Result(False, error={\"error\": {\"code\": 500, \"name\": str(e)}})\n\n @staticmethod\n def answer_collections_web(aid, offset=0, limit=20):\n \"\"\"\n 问题收藏夹\n \"\"\"\n try:\n url = WEB_URL_ANSWER_COLLECTIONS.format(aid)\n logger.info(\"Getting answer:%s collections... offset:%s limit:%s\", aid, str(offset), str(limit))\n data = {\"offset\": offset, \"limit\": limit, \"include\": \"data[*].follower_count,answer_count,creator\"}\n json_resp = requests.get(url, data=data, auth=ZhihuOAuth(), verify=False, timeout=10).json()\n if \"data\" in json_resp:\n return Result(success=True, data=json_resp[\"data\"])\n logger.warning(\"Get answer:%s collections failed. reason:%s\", aid, json_resp)\n return Result(False, error=json_resp)\n except (requests.RequestException, ValueError) as e:\n return Result(False, error={\"error\": {\"code\": 500, \"name\": str(e)}})\n\n @staticmethod\n def recommend_column(offset=0, limit=20):\n \"\"\"\n 推荐专栏\n \"\"\"\n try:\n url = URL_RECOMMEND_COLUMNS\n logger.info(\"Getting recommend columns... offset:%s limit:%s\", str(offset), str(limit))\n data = {\"offset\": offset, \"limit\": limit, \"seed\": \"7\", \"include\": \"data[*].topics\"}\n json_resp = requests.get(url, data=data, auth=ZhihuOAuth(), verify=False, timeout=10).json()\n if isinstance(json_resp, list):\n return Result(True, data=json_resp)\n\n logger.warning(\"Get recommend columns failed. reason:%s\", json_resp)\n return Result(success=False, error=json_resp)\n except (requests.RequestException, ValueError) as e:\n return Result(False, error={\"error\": {\"code\": 500, \"name\": str(e)}})\n\n @staticmethod\n def guest_token():\n try:\n url = URL_GUEST_TOKEN\n logger.info(\"Getting guest token...\")\n data = {\"source\": \"com.zhihu.android\"}\n json_resp = requests.post(url, data=data, auth=ZhihuOAuth(), verify=False, timeout=10).json()\n if \"error\" in json_resp:\n return Result(False, error=json_resp)\n return Result(success=True, data=json_resp)\n except (requests.RequestException, ValueError) as e:\n return Result(False, error={\"error\": {\"code\": 500, \"name\": str(e)}})\n\n\nif __name__ == \"__main__\":\n pass\n","sub_path":"zhihu.py","file_name":"zhihu.py","file_ext":"py","file_size_in_byte":18052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"103767572","text":"#!/usr/bin/env python3\n#\n# Copyright 2020 Red Hat\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\nimport argparse\nimport os\nfrom pathlib import Path\nimport shutil\nimport sys\nimport logging\nfrom datetime import datetime, timedelta\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--dry-run', action='store_true')\nparser.add_argument('--retention-days', type=int, default=31)\nparser.add_argument('--log-path-dir', default='/var/www/logs')\nparser.add_argument('--debug', action='store_true')\nargs = parser.parse_args()\nlogging.basicConfig(\n format='%(asctime)s %(levelname)-5.5s %(message)s',\n level=logging.DEBUG if args.debug else logging.INFO)\nlog = logging.getLogger()\n\n\ndef check_dir_path(log_path):\n p = Path(log_path)\n if not p.exists():\n print(\"Can not find provided dir path %s\" % log_path)\n sys.exit(1)\n return p.resolve()\n\n\ndef delete_dir(dir_path):\n shutil.rmtree(dir_path)\n\n\ndef get_jobdir(dirs, files):\n dirs_name = set(map(lambda s: s.name, dirs))\n\n def is_zuul():\n return 'zuul-info' in dirs_name\n\n def is_jenkins():\n return 'ara-database' in dirs_name\n\n def is_jenkins_console():\n return 'consoleText.txt' in files\n\n def is_empty_dir():\n return not files and not dirs\n\n return is_zuul() or is_jenkins() or is_jenkins_console() or is_empty_dir()\n\n\n# (dirs, files)\n# DirContent = typing.Tuple[typing.Set[Path], typing.Set[str]]\n\n\ndef ls(dir_path):\n dirs = set()\n files = set()\n for entry in os.listdir(dir_path):\n entry_path = dir_path / entry\n if entry_path.is_dir():\n dirs.add(entry_path)\n elif entry_path.exists():\n files.add(entry)\n return (dirs, files)\n\n\ndef find_old_files(calculated_time, log_path):\n queue = set((log_path, ))\n while queue:\n root = queue.pop()\n current_dirs, current_files = ls(root)\n if get_jobdir(current_dirs, current_files):\n log.debug(\"%s : is a job dir\", root)\n dir_date = datetime.fromtimestamp(os.path.getctime(root))\n if dir_date < calculated_time:\n yield root\n else:\n log.debug(\"%s : walking\", root)\n queue = queue.union(current_dirs)\n\n\ndef search_and_destroy(calculated_time, dry_run, log_path):\n for job_dir in find_old_files(calculated_time, log_path):\n log.debug(\"%s : removing old logs\", job_dir)\n if not dry_run and log_path != job_dir:\n delete_dir(job_dir)\n\n\nif __name__ == \"__main__\":\n root = check_dir_path(args.log_path_dir)\n calculated_time = datetime.now() - timedelta(days=args.retention_days)\n search_and_destroy(calculated_time, args.dry_run, root)\n","sub_path":"ansible/roles/sf-logserver/files/purge-logs.py","file_name":"purge-logs.py","file_ext":"py","file_size_in_byte":3194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"335371815","text":"#!/usr/bin/env python3.5\nfrom flask import Flask, render_template\nfrom execute import DEFAULT, hdmi\n\napp = Flask(__name__)\n#app.run(host='0.0.0.0', port='5002')\n#wsgi_app = app.wsgi_app\n\n@app.route('/')\ndef hello_world():\n col = DEFAULT[1]\n default = DEFAULT[0]\n return render_template('index.html', state=default, color=col)\n hdmi(DEFAULT)\n\n\n\n\nif __name__ == \"__main__\":\n hello_world()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"218298787","text":"#!/usr/bin/python3.8.5\nfrom opty import algy, funky\nimport numpy as np\nimport sys\nfrom configparser import ConfigParser\nimport random\n\nconf = ConfigParser()\nconf.read(sys.argv[1])\n\nh = conf['GENERAL'].getfloat('h')\ne = conf['GENERAL'].getfloat('e')\nverbose = conf['GENERAL'].getboolean('verbose')\nstep = conf['simplex'].getfloat('step')\nalpha = conf['simplex'].getfloat('alpha')\nbeta = conf['simplex'].getfloat('beta')\ngamma = conf['simplex'].getfloat('gamma')\nsigma = conf['simplex'].getfloat('sigma')\ndx = np.fromstring(conf['hooke_jeeves'].get('dx'), sep=' ')\ne_hj = np.fromstring(conf['hooke_jeeves'].get('e'), sep=' ')\n\nif len(dx) == 1:\n dx = dx[0]\n\nif len(e_hj) == 1:\n e_hj = e_hj[0]\n\n\ndef __format(x, f):\n if x is None:\n return 'No minimum found'\n\n return \"x = {:4.3f} {:4.3f} f(x) = {:4.3f} cc = {:6d} gc = {:<4d} hc = {:<4d}\".format(\n x[0], x[1], f(x), f.call_count, f.gradient_call_count, f.hessian_call_count)\n\n\n# ZAD 1 ======================================================================\nprint('\\n========== Stohastic gradient descent on f3 (zad 1) =============\\n')\n\nf3 = funky.F3()\nx = algy.stohastic_gradient_descent(f3, np.array([0, 0]), line_search=False)\nprint(__format(x, f3))\n\nf3 = funky.F3()\nx = algy.stohastic_gradient_descent(f3, np.array([0.0, 0.0]), line_search=True)\nprint(__format(x, f3))\n# ZAD 2 ======================================================================\nprint('\\n=========== SGD and Newton-Raphson on f1 and f2(zad 2) ==========\\n')\n\nprint(\"Rosenbrock's banana\")\nf1 = funky.RosenbrocksBanana()\nprint('SGD')\nx = algy.stohastic_gradient_descent(f1, np.array([-1.9, 2.0]), line_search=True)\nprint(__format(x, f1))\nf1 = funky.RosenbrocksBanana()\nprint('Newton-Raphson')\nx = algy.newton_raphson(f1, np.array([-1.9, 2.0]), line_search=True)\nprint(__format(x, f1))\n\nprint('F2')\nf2 = funky.F2()\nprint('SGD')\nx = algy.stohastic_gradient_descent(f2, np.array([0.1, 0.3]), line_search=True)\nprint(__format(x, f2))\nf2 = funky.F2()\nprint('Newton-Raphson')\nx = algy.newton_raphson(f2, np.array([0.1, 0.3]), line_search=True)\nprint(__format(x, f2))\n# ZAD 3 ======================================================================\nprint('\\n========== Roddy Rich - The Box on f1 and f2 (zad 3) ============\\n')\n\nexplicit_constraint = funky.ExplicitContraint(-100.0, 100.0)\ninequality_constraints = [funky.g1, funky.g2]\n\nprint(\"Rosenbrock's banana\")\nf1 = funky.RosenbrocksBanana()\nx = algy.box_algorithm(f1, np.array([-1.9, 2]), explicit_constraint, inequality_constraints)\nprint(__format(x, f1))\n\nprint('F2')\nf2 = funky.F2()\nx = algy.box_algorithm(f2, np.array([0.1, 0.3]), explicit_constraint, inequality_constraints)\nprint(__format(x, f2))\n\n# ZAD 4 ======================================================================\nprint('\\n=== Transformed simplex by Nelder & Mead on f1 and f2 (zad 4) ===\\n')\n\nprint(\"Rosenbrock's banana\")\nf1 = funky.RosenbrocksBanana()\nx = algy.contsrained_simplex_nelder_mead(f1, np.array([3.0, 7.0]), [funky.g1, funky.g2])\nprint(__format(x, f1))\n\nprint(\"F2\")\nf2 = funky.F2()\nx = algy.contsrained_simplex_nelder_mead(f2, np.array([0.1, 0.3]), [funky.g1, funky.g2])\nprint(__format(x, f2))\n\n# ZAD 5 ======================================================================\nprint('\\n====== Transformed simplex by Nelder & Mead on f4 (zad 5) =======\\n')\nf4 = funky.F4()\nx0 = np.array([5.0, 5.0])\ninequality_constraints = [funky.g3, funky.g4]\nequality_constraints = [funky.h1]\nx = algy.contsrained_simplex_nelder_mead(f4, x0, inequality_constraints, equality_constraints)\nprint(__format(x, f4))\n","sub_path":"dz3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"405573249","text":"from flask import Flask, request, render_template\r\n\r\nimport os\r\nimport torch\r\nfrom torch.utils import data \r\n\r\nimport albumentations\r\nimport pretrainedmodels\r\n\r\nimport numpy as np\r\nimport torch.nn as nn\r\n\r\nfrom torch.nn import functional as F\r\n\r\nfrom wtfml.data_loaders.image.classification import ClassificationDataset\r\nfrom wtfml.engine.engine import Engine\r\n\r\napp = Flask(__name__)\r\nUPLOAD_FOLDER = \"D:/pythonProject/melanoma/static\" \r\nDEVICE = \"cpu\"\r\nMODEL = None\r\n\r\n\r\nclass SEResNext50_32x4d(nn.Module):\r\n def __init__(self, pretrained=\"imagenet\"):\r\n super(SEResNext50_32x4d, self).__init__()\r\n self.base_model = pretrainedmodels.__dict__[\r\n \"se_resnext50_32x4d\"\r\n ](pretrained=pretrained)\r\n self.l0 = nn.Linear(2048, 1)\r\n\r\n def forward(self, image, targets):\r\n bs, _, _, _ = image.shape\r\n x = self.base_model.features(image)\r\n x = F.adaptive_avg_pool2d(x, 1)\r\n x = x.reshape(bs, -1)\r\n out = torch.sigmoid(self.l0(x))\r\n loss = 0\r\n return out, loss\r\n\r\n\r\ndef predict(image_path, model):\r\n mean = (0.485, 0.456, 0.406)\r\n std = (0.229, 0.224, 0.225)\r\n\r\n test_aug = albumentations.Compose(\r\n [\r\n albumentations.Normalize(mean, std, max_pixel_value=255.0, always_apply=True),\r\n ]\r\n )\r\n\r\n test_images = [image_path]\r\n test_targets = [0]\r\n\r\n test_dataset = ClassificationDataset(\r\n image_paths=test_images,\r\n targets=test_targets,\r\n resize=None,\r\n augmentations=test_aug\r\n )\r\n\r\n test_loader = data.DataLoader(\r\n test_dataset,\r\n batch_size=1,\r\n shuffle=False,\r\n num_workers=0\r\n )\r\n eng = Engine(model, device=DEVICE, optimizer=None)\r\n\r\n predictions = eng.predict(test_loader)\r\n\r\n return np.vstack((predictions)).ravel()\r\n\r\n\r\n@app.route(\"/\", methods=[\"GET\", \"POST\"])\r\ndef upload_predict():\r\n if request.method == \"POST\":\r\n image_file = request.files[\"image\"]\r\n if image_file:\r\n image_location = os.path.join(\r\n UPLOAD_FOLDER,\r\n image_file.filename\r\n )\r\n image_file.save(image_location)\r\n pred = predict(image_location, MODEL)[0]\r\n return render_template(\"index.html\", prediction=pred, image_loc=image_file.filename)\r\n return render_template(\"index.html\", prediction=0, image_loc=None)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n MODEL = SEResNext50_32x4d(pretrained=None)\r\n MODEL.load_state_dict(torch.load(\"model.bin\", map_location=torch.device(DEVICE)))\r\n MODEL.to(DEVICE)\r\n # app.run(host=\"0.0.0.0\", port=12000, debug=True)\r\n app.run(debug=True) \r\n ","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"94177488","text":"import csv \r\nimport plotly as py\r\nimport plotly.graph_objs as go\r\n#from datetime import datetime \r\n\r\nopen_file = open(\"MODIS_C6_Australia_NewZealand_MCD14DL_NRT_2019331.txt\", \"r\")\r\n\r\ncsv_file = csv.reader(open_file, delimiter=\",\")\r\n\r\nheader_row = next(csv_file)\r\n\r\nfor index,column_header in enumerate(header_row):\r\n print(index, column_header)\r\n\r\nbrights,lons,lats,hover_texts = [],[],[],[]\r\n\r\n\"\"\" for row in csv_file:\r\n try:\r\n bright = int(row[header_row.index(\"brightness\")]) \r\n lon = int(row[header_row.index(\"longitude\")])\r\n lat = int(row[header_row.index(\"latitude\")])\r\n except ValueError:\r\n print(\"Missing data\")\r\n\r\n else:\r\n brights.append(bright)\r\n lons.append(lon)\r\n lats.append(lat)\r\n hover_texts.append(hover_texts) \"\"\"\r\n\r\n\r\nfor row in csv_file:\r\n bright = float(row[header_row.index(\"brightness\")]) \r\n lon = float(row[header_row.index(\"longitude\")])\r\n lat = float(row[header_row.index(\"latitude\")])\r\n brights.append(bright)\r\n lons.append(lon)\r\n lats.append(lat)\r\n hover_texts.append(hover_texts)\r\n\r\nprint(brights[:10])\r\n\r\n\r\n\r\n\r\n\r\nfrom plotly.graph_objs import Scattergeo,Layout\r\nfrom plotly import offline as go\r\n\r\ndata = [{\r\n 'type': 'scattergeo',\r\n 'lon': lons,\r\n 'lat': lats,\r\n 'marker':{\r\n 'size':[.04*bright for bright in brights],\r\n 'color': brights,\r\n 'colorscale': 'Viridis',\r\n 'reversescale':True,\r\n 'colorbar':{'title': 'Brightness'}\r\n \r\n },\r\n}]\r\n\r\n\r\nmy_layout = Layout(title=\"Australian Fires- November 2019\")\r\n\r\nfig = {'data': data, 'layout':my_layout}\r\n\r\ngo.plot(fig,filename='aussie_firesNOV.html')\r\n","sub_path":"aussie_firesNOV.py","file_name":"aussie_firesNOV.py","file_ext":"py","file_size_in_byte":1679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"459017844","text":"\"\"\"init\n\nRevision ID: 31ef45f148a4\nRevises: None\nCreate Date: 2016-09-01 11:51:28.268997\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '31ef45f148a4'\ndown_revision = None\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('love',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('daily_id', sa.Integer(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('users',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('email', sa.String(length=64), nullable=True),\n sa.Column('username', sa.String(length=64), nullable=True),\n sa.Column('password_hash', sa.String(length=64), nullable=True),\n sa.Column('create_time', sa.String(length=64), nullable=True),\n sa.Column('love_counter', sa.Integer(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index('ix_users_email', 'users', ['email'], unique=True)\n op.create_index('ix_users_username', 'users', ['username'], unique=True)\n op.create_table('daily',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('title', sa.String(length=64), nullable=True),\n sa.Column('url', sa.String(length=128), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('daily')\n op.drop_index('ix_users_username', 'users')\n op.drop_index('ix_users_email', 'users')\n op.drop_table('users')\n op.drop_table('love')\n ### end Alembic commands ###\n","sub_path":"service/migrations/versions/31ef45f148a4_init.py","file_name":"31ef45f148a4_init.py","file_ext":"py","file_size_in_byte":1694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"444019266","text":"\"\"\"\nConvenient utilities, include training, evaluation and prediction.\n\"\"\"\nimport os\nimport logging\n\nimport numpy as np\n\nfrom ticktock import tick\n\n\n@tick\ndef train(sess, env, X_data, y_data, X_valid=None, y_valid=None, epochs=1,\n load=False, shuffle=True, batch_size=128, name='model'):\n \"\"\"\n Train a TF model by running env.train_op.\n \"\"\"\n if load:\n print('Loading saved model')\n return env.saver.restore(sess, 'model/{}'.format(name))\n\n print('Train model')\n n_sample = X_data.shape[0]\n n_batch = int((n_sample + batch_size - 1) / batch_size)\n for epoch in tqdm(range(epochs)):\n if shuffle:\n print('\\nShuffling data')\n ind = np.arange(n_sample)\n np.random.shuffle(ind)\n X_data = X_data[ind]\n y_data = y_data[ind]\n\n for batch in range(n_batch):\n print(' batch {0}/{1}'.format(batch + 1, n_batch), end='\\r')\n start = batch * batch_size\n end = min(n_sample, start + batch_size)\n sess.run(env.train_op, feed_dict={env.x: X_data[start:end],\n env.y: y_data[start:end],\n env.training: True})\n if X_valid is not None:\n evaluate(sess, env, X_valid, y_valid)\n\n if hasattr(env, 'saver'):\n print('\\n Saving model')\n os.makedirs('model', exist_ok=True)\n env.saver.save(sess, 'model/{}'.format(name))\n\n\n@tick\ndef evaluate(sess, env, X_data, y_data, batch_size=128):\n \"\"\"\n Evaluate TF model by running env.loss and env.acc.\n \"\"\"\n print('\\nEvaluating')\n\n n_sample = X_data.shape[0]\n n_batch = int((n_sample + batch_size - 1) / batch_size)\n loss, acc = 0, 0\n\n for batch in range(n_batch):\n print(' batch {0}/{1}'.format(batch + 1, n_batch), end='\\r')\n start = batch * batch_size\n end = min(n_sample, start + batch_size)\n cnt = end - start\n batch_loss, batch_acc = sess.run(\n [env.loss, env.acc],\n feed_dict={env.x: X_data[start:end],\n env.y: y_data[start:end]})\n loss += batch_loss * cnt\n acc += batch_acc * cnt\n loss /= n_sample\n acc /= n_sample\n\n print(' loss: {0:.4f} acc: {1:.4f}'.format(loss, acc))\n return loss, acc\n\n\n@tick\ndef predict(sess, env, X_data, batch_size=128):\n \"\"\"\n Do inference by running env.ybar.\n \"\"\"\n print('\\nPredicting')\n n_classes = env.ybar.get_shape().as_list()[1]\n\n n_sample = X_data.shape[0]\n n_batch = int((n_sample + batch_size - 1) / batch_size)\n yval = np.empty((n_sample, n_classes))\n\n for batch in range(n_batch):\n print(' batch {0}/{1}'.format(batch + 1, n_batch), end='\\r')\n start = batch * batch_size\n end = min(n_sample, start + batch_size)\n batch_y = sess.run(env.ybar, feed_dict={env.x: X_data[start:end]})\n yval[start:end] = batch_y\n print()\n return yval\n","sub_path":"src/utils/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":2973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"25709710","text":"from flask import Flask\nfrom flask import render_template\n\nmyapp = Flask(__name__)\n\n\n@myapp.route(\"/hello/\")\ndef hellofoo():\n return 'hello world'\n\n@myapp.route(\"/\")\ndef homepage():\n rawhtml = render_template('homepage.html')\n return rawhtml\n\nif __name__ == '__main__':\n myapp.run(debug=True, use_reloader=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"188813388","text":"import pytest\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom webdriver_manager.firefox import GeckoDriverManager\nfrom webdriver_manager.microsoft import IEDriverManager\nfrom webdriver_manager.opera import OperaDriverManager\nfrom selenium import webdriver\n\ndef pytest_addoption(parser):\n parser.addoption(\n \"--browser_name\", default='chrome', action=\"store\"\n )\n\n@pytest.fixture(scope=\"class\")\ndef setup(request):\n browsername=request.config.getoption(\"--browser_name\")\n if browsername=='chrome':\n options=webdriver.ChromeOptions()\n options.add_argument('--start_maximized')\n\n driver=webdriver.Chrome(ChromeDriverManager().install())\n driver.maximize_window()\n request.cls.driver=driver\n yield\n driver.close()\n\n\n elif browsername==\"firefox\":\n print('firefox')\n options=webdriver.FirefoxOptions()\n options.add_argument('--start_maximized')\n\n driver = webdriver.Firefox(executable_path=GeckoDriverManager().install())\n driver.maximize_window()\n\n request.cls.driver = driver\n yield\n driver.close()\n elif browsername==\"IE\":\n print('Hello')\n options=webdriver.IeOptions()\n options.add_argument('--start_maximized')\n driver=webdriver.Ie(executable_path=IEDriverManager().install(),options=options)\n request.cls.driver=driver\n yield\n driver.close()\n\n else:\n print('Error')\n","sub_path":"framework/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"576675894","text":"from __future__ import division\nimport sys\nimport os\nimport wave\nimport importlib\nimport re\n\n\nclass Engine(object):\n\t\"\"\"\n\tMorse engine that will encode/decode a text message to morse.\n\t\"\"\"\n\t# The morse code.\n\tlocale = str\n\t# The morse loaded library.\n\tmorse_lib = None\n\t# The stored input as text or signal.\n\tinput = None\n\t# The stored output as text or signal.\n\toutput = None\n\t# Words per minute.\n\t# Standard is 20 words per minute. Default word is a five character word.\n\twpm = 20\n\t# Characters per minute. The speed at which the signal is played.\n\t# Standard is a 100 characters per minute, without the space. (Add 20)\n\treal_cpm = 120\n\t# Speed index.\n\t# The speed index computed from the real_cpm.\n\tspeed_index = 0.5\n\t# Signal sound frequency\n\tfreq = 800\n\n\tdef __init__(self, locale = 'ITU'):\n\t\t\"\"\"\n\t\tConstructor.\n\t\t:param locale: The morse code locale (international, us, etc...)\n\t\t\"\"\"\n\t\tself.locale = locale\n\n\tdef load_morse_lib(self):\n\t\t\"\"\"\n\t\tLoads the morse code for the locale that was set.\n\t\t\"\"\"\n\t\tself.morse_lib = getattr(importlib.import_module(\"morse\"), self.locale.upper())\n\n\tdef flush_input(self):\n\t\t\"\"\"\n\t\tFlushes the stored input.\n\t\t\"\"\"\n\t\tself.input = None\n\n\tdef flush_output(self):\n\t\t\"\"\"\n\t\tFlushes the output.\n\t\t\"\"\"\n\t\tself.output = None\n\n\tdef store_text_input(self, text):\n\t\t\"\"\"\n\t\tStores a text to be encoded.\n\t\t:param text: The text to be encoded.\n\t\t\"\"\"\n\t\tif type(self.input) is 'str':\n\t\t\tself.flush_input()\n\t\t\tself.input += text\n\n\tdef encode(self, text, locale = None):\n\t\t\"\"\"\n\t\tEncodes a text to a morse signal.\n\t\t:param text: The text to encode.\n\t\t:param locale: The locale to encode.\n\t\t:return:\n\t\t\"\"\"\n\t\t# Buffer to store the encoded text.\n\t\tbuffer = []\n\t\tfor char in text:\n\t\t\t# If the character is alphanumeric\n\t\t\tif re.match('[a-zA-Z0-9]', char):\n\t\t\t\tuchar = char.upper()\n\t\t\t\tif uchar in self.morse_lib:\n\t\t\t\t\tbuffer.append(self.morse_lib[uchar])\n\t\t\t\t\tbuffer.append(self.morse_lib['LSPACE'])\n\t\t\t# If the character is a space\n\t\t\telif re.match('\\s', char):\n\t\t\t\tbuffer.append(self.morse_lib['WSPACE'])\n\t\t\t# If the character is a non word character\n\t\t\telif re.match('\\W', char):\n\t\t\t\tif char in self.morse_lib:\n\t\t\t\t\tbuffer.append(self.morse_lib[char])\n\t\t\t\t\tbuffer.append(self.morse_lib['LSPACE'])\n\t\treturn buffer\n\n\tdef decode(self, signal, locale = None):\n\t\t\"\"\"\n\t\tDecodes a morse message to a text message.\n\t\t:param message: The message to decode, an array of.\n\t\t:param locale: The locale to use for decoding.\n\t\t:return:\n\t\t\"\"\"\n\t\t# @todo : add method to get a signal from the input.\n\t\t# Buffer to store the decoded message.\n\t\tbuffer = []\n\t\tfor s in signal:\n\t\t\tpass\n\n\tdef compute_speed_index(self, wpm):\n\t\t\"\"\"\n\t\tComputes the index for a given cpm number.\n\t\t:param wpm: The words per minute.\n\t\t:return: float\n\t\t\"\"\"\n\t\tcpm = wpm * 5\n\t\treal_cpm = cpm + wpm\n\n\t\treturn (60 / real_cpm) * 0.1\n\n\tdef play(self, encoded, wpm = None, freq = None):\n\t\t\"\"\"\n\t\tPlays an encoded morse message.\n\t\t:param encoded: The encoded message.\n\t\t:param wpm: The wpm.\n\t\t:param freq: The sound signal frequency.\n\t\t\"\"\"\n\t\tif wpm is not None:\n\t\t\tspeed_index = self.compute_speed_index(wpm)\n\t\telse:\n\t\t\tspeed_index = self.compute_speed_index(self.wpm)\n\t\tif freq is None:\n\t\t\tfreq = self.freq\n\t\tfor signal in encoded:\n\t\t\tfor subsignal in signal:\n\t\t\t\tif subsignal is not 0:\n\t\t\t\t\tdur = subsignal * speed_index\n\t\t\t\t\tos.system('play --no-show-progress --null --channels 1 synth %s sine %f' % (dur, freq))\n\t\t\t\telif subsignal is 0:\n\t\t\t\t\tdur = 1 * speed_index\n\t\t\t\t\tos.system('play --no-show-progress --null --channels 1 synth %s sine %f' % (dur, 0))\n","sub_path":"engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":3519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"427588986","text":"import firedrake as fe\nimport sapphire.simulations.navier_stokes_boussinesq\nimport typing\n\n\nclass Simulation(sapphire.simulations.navier_stokes_boussinesq.Simulation):\n \n def __init__(self, *args,\n mesh_dimensions = (20, 20),\n hotwall_temperature = 0.5,\n coldwall_temperature = -0.5,\n reynolds_number = 1.,\n rayleigh_number = 1.e6,\n prandtl_number = 0.71,\n **kwargs):\n \n if \"solution\" not in kwargs:\n \n kwargs[\"mesh\"] = fe.UnitSquareMesh(*mesh_dimensions)\n \n self.hotwall_id = 1\n \n self.coldwall_id = 2\n \n self.hotwall_temperature = fe.Constant(hotwall_temperature)\n \n self.coldwall_temperature = fe.Constant(coldwall_temperature)\n \n super().__init__(\n *args,\n reynolds_number = reynolds_number,\n rayleigh_number = rayleigh_number,\n prandtl_number = prandtl_number,\n **kwargs)\n \n def dirichlet_boundary_conditions(self):\n \n W = self.solution.function_space()\n \n d = self.solution.function_space().mesh().geometric_dimension()\n \n return [\n fe.DirichletBC(\n self.solution_subspaces[\"u\"],\n (0,)*d,\n \"on_boundary\"),\n fe.DirichletBC(\n self.solution_subspaces[\"T\"],\n self.hotwall_temperature,\n self.hotwall_id),\n fe.DirichletBC(\n self.solution_subspaces[\"T\"],\n self.coldwall_temperature,\n self.coldwall_id)]\n ","sub_path":"sapphire/simulations/examples/heat_driven_cavity.py","file_name":"heat_driven_cavity.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"645297538","text":"#! /usr/bin/env python3\n############################################################\n# Copyright(c) 2017, Sara Mirzaee #\n############################################################\nimport os\nimport sys\nimport logging\nimport warnings\n\n\nwarnings.filterwarnings(\"ignore\")\n\nmpl_logger = logging.getLogger('matplotlib')\nmpl_logger.setLevel(logging.WARNING)\n\nfiona_logger = logging.getLogger('fiona')\nfiona_logger.propagate = False\n\nimport time\nimport numpy as np\nimport h5py\n\nimport minopy.minopy_utilities as mut\nfrom minopy.objects.arg_parser import MinoPyParser\nfrom mintpy.utils import ptime\nfrom minopy.objects.slcStack import slcStack\nimport minopy.objects.inversion_utils as iut\nfrom skimage.measure import label\nfrom isceobj.Util.ImageUtil import ImageLib as IML\nfrom mintpy.objects import cluster\nfrom mpi4py import MPI\nfrom math import ceil\n#from minopy.lib import utils as iut\n#from minopy.lib import invert as iv\n#from minopy.lib.inversion import PPhaseLink\n#################################\n\n\ndef main(iargs=None):\n '''\n Phase linking process.\n '''\n\n\n Parser = MinoPyParser(iargs, script='phase_inversion')\n inps = Parser.parse()\n\n inversionObj = PhaseLink(inps)\n\n if inps.unpatch_flag:\n inversionObj.unpatch()\n inversionObj.close()\n\n else:\n\n box_list = []\n for box in inversionObj.box_list:\n index = inversionObj.box_list.index(box)\n out_folder = inversionObj.out_dir + '/PATCHES/PATCH_{}'.format(index)\n if not os.path.exists(out_folder + '/quality.npy'):\n box_list.append(box)\n\n if inps.mpi_flag:\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n size = comm.Get_size()\n np.random.seed(seed=rank)\n\n if size > len(box_list):\n num = 1\n else:\n num = ceil(len(box_list) // size)\n print(len(box_list), num)\n index = np.arange(0, len(box_list), num)\n index[-1] = len(box_list)\n\n if rank < len(index):\n time_passed = inversionObj.loop_patches(box_list[index[rank]:index[rank+1]])\n comm.gather(time_passed, root=0)\n else:\n inversionObj.loop_patches(inversionObj.box_list)\n\n MPI.Finalize()\n\n return None\n\n\ndef write_hdf5_block(fhandle, data, datasetName, block=None):\n \"\"\"Write data to existing HDF5 dataset in disk block by block.\n Parameters: data - np.ndarray 1/2/3D matrix\n datasetName - str, dataset name\n block - list of 2/4/6 int, for\n [zStart, zEnd,\n yStart, yEnd,\n xStart, xEnd]\n mode - str, open mode\n Returns: fname\n \"\"\"\n\n # default block value\n if block is None:\n\n # data shape\n if isinstance(data, list):\n shape=(len(data),)\n else:\n shape = data.shape\n\n # set default block as the entire data\n if len(shape) ==1:\n block = [0, shape[0]]\n elif len(shape) == 2:\n block = [0, shape[0],\n 0, shape[1]]\n elif len(shape) == 3:\n block = [0, shape[0],\n 0, shape[1],\n 0, shape[2]]\n\n if len(block) == 6:\n fhandle[datasetName][block[0]:block[1],\n block[2]:block[3],\n block[4]:block[5]] = data\n\n elif len(block) == 4:\n fhandle[datasetName][block[0]:block[1],\n block[2]:block[3]] = data\n\n elif len(block) == 2:\n fhandle[datasetName][block[0]:block[1]] = data\n\n return\n\n\ndef get_shp_row_col_f(data, input_slc, def_sample_rows, def_sample_cols, azimuth_window,\n range_window, reference_row, reference_col, distance_threshold):\n rslc = None\n testvec = None\n S1 = None\n data1 = None\n data_all = None\n res = None\n ks_label = None\n ksres = None\n\n row_0 = data[0]\n col_0 = data[1]\n n_image = input_slc.shape[0]\n length = input_slc.shape[1]\n width = input_slc.shape[2]\n\n sample_rows = row_0 + def_sample_rows\n sample_rows[sample_rows < 0] = -1\n sample_rows[sample_rows >= length] = -1\n\n sample_cols = col_0 + def_sample_cols\n sample_cols[sample_cols < 0] = -1\n sample_cols[sample_cols >= width] = -1\n\n sample_cols = sample_cols[np.flatnonzero(sample_cols >= 0)]\n sample_rows = sample_rows[np.flatnonzero(sample_rows >= 0)]\n\n ref_row = reference_row - azimuth_window + len(sample_rows)\n ref_col = reference_col - range_window + len(sample_cols)\n\n x, y = np.meshgrid(sample_cols.astype(int), sample_rows.astype(int), sparse=False)\n\n rslc = input_slc[:, y, x].reshape(n_image, -1)\n testvec = np.sort(np.abs(rslc), axis=0)\n S1 = np.sort(np.abs(input_slc[:, row_0, col_0])).reshape(n_image, 1)\n\n data1 = np.repeat(S1, testvec.shape[1], axis=1)\n data_all = np.concatenate((data1, testvec), axis=0)\n\n res = 1 * (np.apply_along_axis(mut.ecdf_distance, 0, data_all) <= distance_threshold)\n res = res.reshape(len(sample_rows), len(sample_cols))\n ks_label = label(res, background=0, connectivity=2)\n ksres = 1 * (ks_label == ks_label[ref_row, ref_col])\n\n return ksres, sample_rows[0], sample_cols[0]\n\n\ndef process_patch_f(box=None, range_window=None, azimuth_window=None, width=None, length=None,\n n_image=None, slcStackObj=None, distance_threshold=None, def_sample_rows=None,\n def_sample_cols=None, reference_row=None, reference_col=None, phase_linking_method=None,\n total_num_mini_stacks=None, default_mini_stack_size=None):\n big_box = None\n row1 = None\n row2 = None\n col1 = None\n col2 = None\n lin = None\n sam = None\n coords = None\n overlap_length = None\n overlap_width = None\n box_width = None\n box_length = None\n quality = None\n rslc_ref = None\n patch_slc_images = None\n\n box_width = box[2] - box[0]\n box_length = box[3] - box[1]\n\n rslc_ref = np.empty([n_image, box_length, box_width], dtype='complex')\n quality = np.empty([box_length, box_width], dtype='float')\n\n big_box = iut.get_big_box(box, range_window, azimuth_window, width, length)\n\n # In box coordinate\n row1 = box[1] - big_box[1]\n row2 = box[3] - big_box[1]\n col1 = box[0] - big_box[0]\n col2 = box[2] - big_box[0]\n\n lin = np.arange(row1, row2)\n overlap_length = len(lin)\n sam = np.arange(col1, col2)\n overlap_width = len(sam)\n lin, sam = np.meshgrid(lin, sam)\n coords = set(map(lambda y, x: (int(y), int(x)),\n lin.T.reshape(overlap_length * overlap_width, 1),\n sam.T.reshape(overlap_length * overlap_width, 1)))\n\n patch_slc_images = slcStackObj.read(datasetName='slc', box=big_box)\n\n def invert_coord_f(data):\n CCG = None\n result = {}\n shp = None\n row0 = None\n col0 = None\n coh_mat = None\n num_shp = None\n shp_rows = None\n shp_cols = None\n squeezed_images = None\n vec_refined = None\n amp_refined = None\n\n\n # big box coordinate:\n shp, row0, col0 = get_shp_row_col_f(data, patch_slc_images, def_sample_rows, def_sample_cols, azimuth_window,\n range_window, reference_row, reference_col, distance_threshold)\n\n num_shp = len(shp[shp > 0])\n shp_rows, shp_cols = np.nonzero(shp)\n shp_rows = np.array(shp_rows + row0).astype(int)\n shp_cols = np.array(shp_cols + col0).astype(int)\n\n CCG = np.array(patch_slc_images[:, shp_rows, shp_cols])\n coh_mat = mut.est_corr(CCG)\n\n if num_shp > 20:\n\n if 'sequential' in phase_linking_method:\n vec_refined, squeezed_images = iut.sequential_phase_linking(CCG, phase_linking_method, 10,\n total_num_mini_stacks)\n\n else:\n vec_refined = mut.phase_linking_process(CCG, 0, phase_linking_method, squeez=False)\n\n else:\n vec_refined = mut.test_PS(coh_mat)\n\n amp_refined = np.array(np.mean(np.abs(CCG), axis=1)).reshape(-1, 1)\n vec_refined /= np.abs(vec_refined)\n vec_refined *= amp_refined\n\n if 'sequential' in phase_linking_method and num_shp > 20:\n vec_refined = iut.datum_connect(squeezed_images, vec_refined, default_mini_stack_size)\n\n result['x'] = data[1]\n result['y'] = data[0]\n result['rvector'] = vec_refined\n result['quality'] = mut.gam_pta(np.angle(coh_mat), vec_refined)\n\n return result\n\n results = map(invert_coord_f, coords)\n num_points = len(coords)\n\n prog_bar = ptime.progressBar(maxValue=num_points)\n t = 0\n time0 = time.time()\n for result in results:\n rf = result['y'] - row1\n cf = result['x'] - col1\n rslc_ref[:, rf:rf+1, cf:cf+1] = result['rvector'].reshape(-1, 1, 1)\n quality[rf:rf+1, cf:cf+1] = result['quality']\n prog_bar.update(t + 1, every=20, suffix='{}/{} pixels, box: {}'.format(t + 1, num_points, box))\n t += 1\n print('Total time: {} s'.format(time.time() - time0))\n\n patch_slc_images = None\n\n return rslc_ref, quality, box\n\n\nclass PhaseLink:\n def __init__(self, inps):\n\n self.inps = inps\n self.work_dir = inps.work_dir\n self.phase_linking_method = inps.inversion_method\n self.range_window = int(inps.range_window)\n self.azimuth_window = int(inps.azimuth_window)\n self.patch_size = int(inps.patch_size)\n if inps.mpi_flag:\n self.mpi_flag = True\n else:\n self.mpi_flag = False\n #self.numWorker = int(inps.numWorker)\n #self.config = inps.config\n #self.cluster = inps.cluster\n self.out_dir = self.work_dir + '/inverted'\n os.makedirs(self.out_dir, exist_ok='True')\n\n self.shp_test = inps.shp_test\n self.shp_function = self.get_shp_function()\n\n # read input slcStack.h5\n self.slc_stack = inps.slc_stack # slcStack.h5 file\n self.slcStackObj = slcStack(self.slc_stack)\n self.metadata = self.slcStackObj.get_metadata()\n self.all_date_list = self.slcStackObj.get_date_list()\n self.n_image, self.length, self.width = self.slcStackObj.get_size()\n\n # total number of neighbouring pixels\n self.shp_size = self.range_window * self.azimuth_window\n\n # threshold for shp test based on number of images to test\n self.distance_thresh = mut.ks_lut(self.n_image, self.n_image, alpha=0.01)\n\n # split the area in to patches of size 'self.patch_size'\n self.box_list, self.num_box = self.patch_slice(inps)\n\n # default number of images in each ministack\n self.mini_stack_default_size = 10\n if 'sequential' in self.phase_linking_method:\n self.total_num_mini_stacks = self.n_image // self.mini_stack_default_size\n else:\n self.total_num_mini_stacks = 1\n\n self.sample_rows, self.sample_cols, self.reference_row, self.reference_col = self.window_for_shp()\n\n self.RSLCfile = os.path.join(self.out_dir, 'rslc_ref.h5')\n self.patch_slc_images = None\n\n if 'sequential' in self.phase_linking_method:\n self.sequential = True\n else:\n self.sequential = False\n\n return\n\n def get_shp_function(self):\n \"\"\"\n Reads the shp testing function based on template file\n Returns: shp_function\n -------\n\n \"\"\"\n if self.shp_test == 'ks':\n shp_function = mut.ks2smapletest\n elif self.shp_test == 'ad':\n shp_function = mut.ADtest\n elif self.shp_test == 'ttest':\n shp_function = mut.ttest_indtest\n else: # default is KS 2 sample test\n shp_function = mut.ks2smapletest\n return shp_function\n\n def window_for_shp(self):\n \"\"\"\n Shp window to be placed on each pixel\n Returns rows, cols, reference pixel row index, reference pixel col index\n -------\n\n \"\"\"\n sample_rows = np.arange(-((self.azimuth_window - 1) / 2), ((self.azimuth_window - 1) / 2) + 1, dtype=int)\n reference_row = np.array([(self.azimuth_window - 1) / 2], dtype=int)\n\n sample_cols = np.arange(-((self.range_window - 1) / 2), ((self.range_window - 1) / 2) + 1, dtype=int)\n reference_col = np.array([(self.range_window - 1) / 2], dtype=int)\n\n return sample_rows, sample_cols, reference_row, reference_col\n\n def patch_slice(self, inps):\n \"\"\"\n Slice the image into patches of size patch_size\n box = (x0 y0 x1 y1) = (col0, row0, col1, row1) for each patch with respect to the whole image\n Returns box list, number of boxes\n -------\n\n \"\"\"\n\n patch_row_1 = np.arange(0, self.length - self.azimuth_window, self.patch_size, dtype=int)\n patch_row_2 = patch_row_1 + self.patch_size\n patch_row_2[-1] = self.length\n\n patch_col_1 = np.arange(0, self.width - self.range_window, self.patch_size, dtype=int)\n patch_col_2 = patch_col_1 + self.patch_size\n patch_col_2[-1] = self.width\n num_box = len(patch_col_1) * len(patch_row_1)\n\n box_list = []\n for i in range(len(patch_row_1)):\n for j in range(len(patch_col_1)):\n box = (patch_col_1[j], patch_row_1[i], patch_col_2[j], patch_row_2[i])\n box_list.append(box)\n\n return box_list, num_box\n\n def initiate_output(self):\n\n RSLC = h5py.File(self.RSLCfile, 'a')\n\n if 'slc' in RSLC.keys():\n RSLC['slc'].resize(self.n_image, 0)\n else:\n self.metadata['FILE_TYPE'] = 'slc'\n for key, value in self.metadata.items():\n RSLC.attrs[key] = value\n\n RSLC.create_dataset('slc',\n shape=(self.n_image, self.length, self.width),\n maxshape=(None, self.length, self.width),\n chunks=True,\n dtype='complex64')\n\n RSLC.create_dataset('quality',\n shape=(self.length, self.width),\n maxshape=(self.length, self.width),\n chunks=True,\n dtype='float')\n\n RSLC['quality'][:, :] = -1\n\n # 1D dataset containing dates of all images\n dsName = 'dates'\n dsDataType = np.string_\n data = np.array(self.all_date_list, dtype=dsDataType)\n RSLC.create_dataset(dsName, data=data)\n\n RSLC.close()\n\n return\n\n def loop_patches(self, box_list):\n\n\n start_time = time.time()\n\n data_kwargs = {\n \"range_window\" : self.range_window,\n \"azimuth_window\" : self.azimuth_window,\n \"width\" : self.width,\n \"length\" : self.length,\n \"n_image\" : self.n_image,\n \"slcStackObj\" : self.slcStackObj,\n \"distance_threshold\" : self.distance_thresh,\n \"def_sample_rows\" : self.sample_rows,\n \"def_sample_cols\" : self.sample_cols,\n \"reference_row\" : self.reference_row,\n \"reference_col\" : self.reference_col,\n \"phase_linking_method\" : self.phase_linking_method,\n \"total_num_mini_stacks\" : self.total_num_mini_stacks,\n \"default_mini_stack_size\" : self.mini_stack_default_size\n }\n\n self.mpi_flag = True\n for box in box_list:\n data_kwargs['box'] = box\n index = self.box_list.index(box)\n\n out_folder = self.out_dir + '/PATCHES/PATCH_{}'.format(index)\n os.makedirs(self.out_dir + '/PATCHES', exist_ok=True)\n os.makedirs(out_folder, exist_ok=True)\n if os.path.exists(out_folder + '/quality.npy'):\n break\n #continue\n\n if self.mpi_flag:\n rslc_ref, quality = process_patch_f(**data_kwargs)[:-1]\n else:\n # rslc_ref, quality = process_patch_f(**data_kwargs)[:-1]\n box_width = box[2] - box[0]\n box_length = box[3] - box[1]\n rslc_ref = np.empty([self.n_image, box_length, box_width], dtype='complex')\n quality = np.empty([box_length, box_width], dtype='float')\n\n cluster_obj = cluster.DaskCluster('local', 4)\n cluster_obj.open()\n\n # run dask\n rslc_ref, quality = cluster_obj.run(func=process_patch_f,\n func_data=data_kwargs,\n results=[rslc_ref, quality])\n\n # close dask cluster and client\n cluster_obj.close()\n\n np.save(out_folder + '/rslc_ref', rslc_ref)\n np.save(out_folder + '/quality', quality)\n break\n m, s = divmod(time.time() - start_time, 60)\n print('time used: {:02.0f} mins {:02.1f} secs.\\n'.format(m, s))\n return # m, s\n\n def unpatch(self):\n if os.path.exists(self.RSLCfile):\n print('rslc_ref.h5 exists, skip unpatching ...')\n\n else:\n self.initiate_output()\n print('open HDF5 file rslc_ref.h5 in a mode')\n with h5py.File(self.RSLCfile, 'a') as fhandle:\n for index, box in enumerate(self.box_list):\n patch_dir = self.out_dir + '/PATCHES/PATCH_{}'.format(index)\n rslc_ref = np.load(patch_dir + '/rslc_ref.npy')\n quality = np.load(patch_dir + '/quality.npy')\n\n print('-' * 50)\n print(\"unpatch block {}/{} : {}\".format(index, self.num_box, box))\n\n # wrapped interferograms 3D\n block = [0, self.n_image, box[1], box[3], box[0], box[2]]\n write_hdf5_block(fhandle=fhandle,\n data=rslc_ref,\n datasetName='slc',\n block=block)\n\n # temporal coherence - 2D\n block = [box[1], box[3], box[0], box[2]]\n write_hdf5_block(fhandle=fhandle,\n data=quality,\n datasetName='quality',\n block=block)\n\n print('close HDF5 file rslc_ref.h5.')\n\n return\n\n def close(self):\n if os.path.exists(self.RSLCfile):\n import multiprocessing as mp\n from functools import partial\n\n print('open HDF5 file rslc_ref.h5 in r mode')\n\n num_cores = mp.cpu_count()\n pool = mp.Pool(processes=num_cores)\n date_list = self.all_date_list\n out_dir = self.out_dir\n width = self.width\n length = self.length\n RSLCfile = self.RSLCfile\n func = partial(write_wrapped, date_list, out_dir, width, length, RSLCfile)\n pool.map(func, self.all_date_list)\n pool.close()\n pool.join()\n\n print('open HDF5 file rslc_ref.h5 in r mode')\n fhandle = h5py.File(self.RSLCfile, 'r')\n print('write quality file')\n quality_file = self.out_dir + '/quality'\n if not os.path.exists(quality_file):\n quality_memmap = np.memmap(quality_file, mode='write', dtype='float32', shape=(self.length, self.width))\n IML.renderISCEXML(quality_file, bands=1, nyy=self.length, nxx=self.width, datatype='float32',\n scheme='BIL')\n else:\n quality_memmap = np.memmap(quality_file, mode='r+', dtype='float32', shape=(self.length, self.width))\n\n quality_memmap[:, :] = fhandle['quality']\n quality_memmap = None\n print('close HDF5 file rslc_ref.h5.')\n\n fhandle.close()\n\n else:\n print('rslc_ref.h5 does not exist!')\n\n return\n\n\ndef write_wrapped(date_list, out_dir, width, length, RSLCfile, date):\n\n d = date_list.index(date)\n print('write wrapped_phase {}'.format(date))\n wrap_date = os.path.join(out_dir, 'wrapped_phase', date)\n os.makedirs(wrap_date, exist_ok=True)\n out_name = os.path.join(wrap_date, date + '.slc')\n if not os.path.exists(out_name):\n fhandle = h5py.File(RSLCfile, 'r')\n out_rslc = np.memmap(out_name, dtype='complex64', mode='w+', shape=(length, width))\n out_rslc[:, :] = fhandle['slc'][d, :, :]\n fhandle.close()\n IML.renderISCEXML(out_name, bands=1, nyy=length, nxx=width, datatype='complex64',\n scheme='BSQ')\n else:\n IML.renderISCEXML(out_name, bands=1, nyy=length, nxx=width, datatype='complex64',\n scheme='BSQ')\n return\n\n#################################################\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"dev/phase_inversion.py","file_name":"phase_inversion.py","file_ext":"py","file_size_in_byte":21197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"491632052","text":"# from batch_test.nmc_vf_base.tests_function import *\n# from batch_test.nmc_vf_method import *\n\nimport subprocess\nimport sys\nimport nmc_verification.nmc_vf_base.tool.path_tools as path_tools\nimport os\n\n\ndef get_tests_path(path=__file__):\n path = os.path.dirname(path)\n\n if path[-5:] == 'tests':\n return path\n elif len(path) == 3:\n return\n else:\n path = get_tests_path(path)\n return path\n\n\npath = get_tests_path()\n\npath_list = path_tools.get_path_list_in_dir(path)\n\n# 挑选出 .py文件路径\npy_list = []\nfor path in path_list:\n if path[-3:] == '.py' and path[-8:] != 'tests.py':\n py_list.append(path)\n\n print(path + '运行中')\n cmd = ['python', path, 'b', 'c']\n subprocess.call(cmd, 0, None, None, None, None)\n","sub_path":"tests/batch_test/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"58413950","text":"import time\nimport RPi.GPIO as GPIO\nimport pygame, sys, os\nimport iniPi\nimport sqlPi\nimport ipPi\nimport timePi\nimport subprocess\nimport git\nimport datetime\nimport subprocess\n\nfrom pygame.locals import *\nfrom iniPi import * \nfrom git import Repo\n\nrepo = Repo(\"/home/pi/alarmPi\")\nassert not repo.bare\n\nos.putenv('SDL_FBDEV', '/dev/fb1')\n\n#init menu\ntimerMenu=30\nposMenu =50\nspaceMenu = 30\n#Dev Test Stage Prod GitPull Exit\ninfoTxt = [\"Prod\", \"Stage\",\"Test\", \"Dev\", \"Git pull\", \"Exit\"]\nposCur = 20\n\npygame.init()\n# 2 put in iniPi\nicO=pygame.image.load(ic16PathS+ \"power-standby\" +ic16PathE)\nicX=pygame.image.load(ic16PathS+ \"check\" +ic16PathE)\nicUp=pygame.image.load(ic16PathS+ \"caret-top\" +ic16PathE)\nicDown=pygame.image.load(ic16PathS+ \"caret-bottom\" +ic16PathE)\n\nDISPLAYSURF = pygame.display.set_mode((scrWidth, scrHeigth))\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(4, GPIO.IN, pull_up_down=GPIO.PUD_UP)\nGPIO.setup(5, GPIO.IN, pull_up_down=GPIO.PUD_UP)\nGPIO.setup(17, GPIO.IN, pull_up_down=GPIO.PUD_UP)\nGPIO.setup(23, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n\n\nfontSelL=pygame.font.SysFont(iniPi.font, iniPi.font_sizeXXl)\n\n#GPIO.output(27,GPIO.HIGH)\npygame.mouse.set_visible(False)\n\n#pygame.display.update()\n#time.sleep(3)\nclock = pygame.time.Clock()\n\nwhile True:\n os.system('clear')\n DISPLAYSURF.fill(iniPi.WHITE)\n time2Display = datetime.datetime.now().strftime(\"%H:%M\")\n hour2Display = int(datetime.datetime.now().strftime(\"%H\")) \n date2Display = datetime.datetime.now().strftime(\"%d\") \n \n #timerMenuShow = timerMenu \n timer2Display = fontSelL.render(\"Timer : %2d\"% (timerMenu) , True, iniPi.BLACK) \n for nbrMenu2Dis in range (0, 6):\n disInfoTxt = fontSelL.render(infoTxt[nbrMenu2Dis], True, iniPi.BLACK)\n nb2dis= posMenu +spaceMenu * nbrMenu2Dis \n DISPLAYSURF.blit(disInfoTxt, (64, nb2dis)) \n \n #screen \n pygame.draw.rect(DISPLAYSURF, iniPi.RED, (32,posCur,256,30), 3)\n DISPLAYSURF.blit(icO, (icOPosX, icOPosY))\n DISPLAYSURF.blit(icX, (icXPosX, icXPosY))\n DISPLAYSURF.blit(icDown, (icDownPosX, icDownPosY))\n DISPLAYSURF.blit(icUp, (icUpPosX, icUpPosY))\n \n pygame.display.update()\n clock.tick(60) # Limit the frame rate to 60 FPS.\n\n if (not GPIO.input(5)):\n # X\n #clkX+=1\n if posCur == 20:\n os.execl('/home/pi/alarmPi/runProd.sh', '')\n if posCur == 50:\n os.execl('/home/pi/alarmPi/runStage.sh', '')\n if posCur == 80:\n os.execl('/home/pi/alarmPi/runTest.sh', '')\n if posCur == 110:\n\t\t\t#os.execl('/home/pi/alarmPi/runTest.sh', '')\n #if posCur == 110:\n os.execl('/home/pi/alarmPi/runDev.sh', '')\n if posCur == 140:\n g = git.Git('/home/pi/alarmPi')\n g.pull('origin','master')\n \n # restart python soft to update change\n os.execl('/home/pi/alarmPi/runme.sh', '')\n if posCur == 170:\n pygame.quit()\n\t\t#if posCur == 170:\n\t\t\t#pygame.quit() \n exit() \n if (not GPIO.input(23)):\n # pygame\n # O\n O.quit()\n sys.exit()\n \n if (not GPIO.input(4)):\n #VOL LOW\n #clkDown+=1\n if posCur < 170: #(50 + 30*(len(infoTxt))):\n posCur+=30\n \n if (not GPIO.input(17)):\n #VOL HIGH\n #clkUp\n if posCur > 20:\n posCur-=30 \n \n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit() \n print(posCur)\n #timerMenu = timerMenu - 0.1\n #if (timerMenu < 0):\n # pygame.quit()\n time.sleep(0.1)","sub_path":"devMenu01.py","file_name":"devMenu01.py","file_ext":"py","file_size_in_byte":4179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"50672490","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error, r2_score\nfrom sklearn import model_selection\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.ensemble import RandomForestRegressor\nimport openpyxl\n\n\n# In[ ]:\n\n\nfrom warnings import filterwarnings\nfilterwarnings(\"ignore\")\n\n\n# In[ ]:\n\n\ndf = pd.read_excel('//MPSTLPSRV2//TalepTahminPaylasim2//model//verison.xlsx')\ndf1 = pd.read_excel('//MPSTLPSRV2//TalepTahminPaylasim2/model//üretim.xlsx')\n\n\n# In[ ]:\n\n\ndf.info()\ndf1.info()\n\n\n# In[ ]:\n\n\ndf = df.dropna()\n\n\n# In[ ]:\n\n\nsehir=df[\"Şehir\"]\ny_üret=df[\"Radiation\"]\n\n\n# In[ ]:\n\n\ntoplam=0\ntoplaagg=0\naksaray_agırlık=0.1\nkonya_agırlık=0.5\nnigde_agırlık=0.1\nkaraman_agırlık=0.1\nnevsehir_agırlık=0.1\nkırsehir_agırlık=0.1\ntoplaagırlık=aksaray_agırlık+nigde_agırlık+konya_agırlık+karaman_agırlık+nevsehir_agırlık+kırsehir_agırlık\nprint(aksaray_agırlık)\nprint(nigde_agırlık)\nprint(konya_agırlık)\nprint(karaman_agırlık)\nprint(nevsehir_agırlık)\nprint(kırsehir_agırlık)\n\n\n# In[ ]:\n\n\nprint(toplaagırlık)\n\n\n# In[ ]:\n\n\nk=df[[\"Radiation\",\"Solar Power\",\"Temperature\",\"Precipitation\",\"Wind Speed\",\"Low Cloud\", \"Medium Cloud\",\"High Cloud\",\"Effective Cloud\",\"Clear Radiation\"]]\n\n\n# In[ ]:\n\n\nfor v in range(len(y_üret.index)): \n if (sehir[v] == 'AKSARAY') :\n k[v:v+1]=k[v:v+1]*aksaray_agırlık \n elif(sehir[v] == 'NİĞDE') :\n k[v:v+1]=k[v:v+1]*nigde_agırlık\n elif(sehir[v] == 'KONYA') :\n k[v:v+1]=k[v:v+1]*konya_agırlık\n elif(sehir[v] == 'KARAMAN') :\n k[v:v+1]=k[v:v+1]*karaman_agırlık\n elif(sehir[v] == 'KIRŞEHİR') :\n k[v:v+1]=k[v:v+1]*kırsehir_agırlık\n elif(sehir[v] == 'NEVŞEHİR') :\n k[v:v+1]=k[v:v+1]*nevsehir_agırlık\n\n\n# In[ ]:\n\n\ndf[[\"Radiation\",\"Solar Power\",\"Temperature\",\"Precipitation\",\"Wind Speed\",\"Low Cloud\", \"Medium Cloud\",\"High Cloud\",\"Effective Cloud\",\"Clear Radiation\"]]=k[[\"Radiation\",\"Solar Power\",\"Temperature\",\"Precipitation\",\"Wind Speed\",\"Low Cloud\", \"Medium Cloud\",\"High Cloud\",\"Effective Cloud\",\"Clear Radiation\"]]\ntable = pd.pivot_table(df,index=['Tarih'],aggfunc=np.sum)\n\n\n# In[ ]:\n\n\ndf1.index=table.index\n\n\n# In[ ]:\n\n\ntable=table.join(df1)\n\n\n# In[ ]:\n\n\ny = table.loc[:,[\"GES NET\"]]\nc=table.loc[:,[\"Radiation\",\"Effective Cloud\",\"Medium Cloud\",\"Solar Power\",\"Precipitation\",\"Low Cloud\",\"Clear Radiation\",\"Temperature\"]]\nprint('X Shape: ', c.shape)\nprint('Y Shape: ', y.shape)\n\n\n# In[ ]:\n\n\nc_train, c_test, y_train, y_test = train_test_split(c,\n y,\n test_size = 0.00015,\n random_state= 42)\n\n\n# In[ ]:\n\n\nrf_model= RandomForestRegressor(n_estimators = 10, random_state = 42)\nrf_tuned = rf_model.fit(c,y.values.ravel())\n\n\n# In[ ]:\n\n\nrf_model = RandomForestRegressor(random_state= 42,\n max_depth=8, \n max_features=7,\n min_samples_split= 10,\n n_estimators= 20000)\nrf_tuned = rf_model.fit(c_train,y_train)\n\n\n# In[ ]:\n\n\nimport pypyodbc\ndb = pypyodbc.connect(\n 'Driver={SQL Server};'\n 'Server=10.242.1.135;'\n 'Database=Mepas;'\n 'UID=TalepTahminGoruntuleme;'\n 'PWD=Mepas@951;')\nimlec = db.cursor()\n\n\n# In[ ]:\n\n\ndf = pd.read_sql_query(\"SELECT Sehir,Tarih,tip, Sirket,Temperature,Precipitation,WindSpeed,LowCloud,MediumCloud,HighCloud,EffectiveCloud,Radiation,ClearRadiation,SolarPower From K3HavaTahmini\",db)\n\n\n# In[ ]:\n\n\nhava=df[df['tip'] == 'Tahmin']\n\n\n# In[ ]:\n\n\nhava_tahmin=df[df['tip'] == 'Tahmin']\n\n\n# In[ ]:\n\n\nhava_tahmin.rename(columns ={'sehir':'Şehir','tarih':'Tarih','radiation':'Radiation','temperature':'Temperature','precipitation':'Precipitation','windspeed':'Wind Speed','lowcloud':'Low Cloud','mediumcloud':'Medium Cloud','highcloud':'High Cloud','effectivecloud':'Effective Cloud','clearradiation':'Clear Radiation','solarpower':'Solar Power'},inplace=True)\n\n\n# In[ ]:\n\n\nhava_tahmin.reset_index(drop=True,inplace=True)\n\n\n# In[ ]:\n\n\nsehir_tah=hava_tahmin[\"Şehir\"]\nke=hava_tahmin[[\"Radiation\",\"Solar Power\",\"Temperature\",\"Precipitation\",\"Wind Speed\",\"Low Cloud\", \"Medium Cloud\",\"High Cloud\",\"Effective Cloud\",\"Clear Radiation\"]]\nfor v in range(len(hava_tahmin.index)): \n if (sehir_tah[v] == 'AKSARAY') :\n ke[v:v+1]=ke[v:v+1]*aksaray_agırlık\n elif(sehir_tah[v] == 'NİĞDE') :\n ke[v:v+1]=ke[v:v+1]*nigde_agırlık\n elif(sehir_tah[v] == 'KONYA') :\n ke[v:v+1]=ke[v:v+1]*konya_agırlık\n elif(sehir_tah[v] == 'KARAMAN') :\n ke[v:v+1]=ke[v:v+1]*karaman_agırlık\n elif(sehir_tah[v] == 'KIRŞEHİR') :\n ke[v:v+1]=ke[v:v+1]*kırsehir_agırlık\n elif(sehir_tah[v] == 'NEVŞEHİR') :\n ke[v:v+1]=ke[v:v+1]*nevsehir_agırlık\n\n\n# In[ ]:\n\n\nhava_tahmin[[\"Radiation\",\"Solar Power\",\"Temperature\",\"Precipitation\",\"Wind Speed\",\"Low Cloud\", \"Medium Cloud\",\"High Cloud\",\"Effective Cloud\",\"Clear Radiation\"]]=ke[[\"Radiation\",\"Solar Power\",\"Temperature\",\"Precipitation\",\"Wind Speed\",\"Low Cloud\", \"Medium Cloud\",\"High Cloud\",\"Effective Cloud\",\"Clear Radiation\"]]\n\n\n# In[ ]:\n\n\ntable_tah = pd.pivot_table(hava_tahmin,index=['Tarih'],aggfunc=np.sum)\n\n\n# In[ ]:\n\n\nc_tahmin=table_tah.loc[:,[\"Radiation\",\"Effective Cloud\",\"Medium Cloud\",\"Solar Power\",\"Precipitation\",\"Low Cloud\",\"Clear Radiation\",\"Temperature\"]]\nc_tahmin=c_tahmin.reset_index(drop=True)\n\n\n# In[ ]:\n\n\nimport datetime\ntarih =datetime.datetime.now()\nzaman_damgası = datetime.datetime.timestamp(tarih)\nzaman_damgası\nz=str(zaman_damgası) + \".xlsx\"\n\n\n# In[ ]:\n\n\nwb = openpyxl.Workbook()\nsayfa = wb.active\nfor v in range(len(c_tahmin[\"Radiation\"].index)): \n y_pred =rf_tuned.predict(c_tahmin.loc[[v],[\"Radiation\",\"Effective Cloud\",\"Medium Cloud\",\"Solar Power\",\"Precipitation\",\"Low Cloud\",\"Clear Radiation\",\"Temperature\"]]) \n r = v + 1\n sayfa.cell(row = r, column = 2).value = float(y_pred)\n sayfa.cell(row = r, column = 1).value = table_tah.index[v]\nwb.save(z)\nwb.close()\n\n\n# In[ ]:\n\n\nimport smtplib\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom email.mime.base import MIMEBase\nfrom email import encoders\nsender = 'info@mepasenerji.com'\nreceivers = ['enerjipiyasasi@mepasenerji.com']\nport = 25\nmsg=MIMEMultipart()\nmsg['Subject'] = 'Random Forest K3 Tahmini'\nmsg['From'] = 'info@mepasenerji.com'\nmsg['To'] = 'enerjipiyasasi@mepasenerji.com'\neklenti_dosya_ismi=z\ndsgFilename='tahmin.xlsx'\nmsg.attach(MIMEText(\"merhaba K3 Random Forest tahmini ektedir.Saygılarımla\"))\nwith(open(eklenti_dosya_ismi,'rb')) as eklenti_dosyasi:\n payload=MIMEBase('application', 'vnd.openxmlformats-officedocument.spreadsheetml.sheet',name=dsgFilename)\n payload.set_payload(eklenti_dosyasi.read())\n encoders.encode_base64(payload)\n payload.add_header(\"Content-Decomposition\",\"attachment\",filename=eklenti_dosya_ismi)\n msg.attach(payload)\n msg_str=msg.as_string()\nwith smtplib.SMTP('mail.mepasenerji.com', 25) as server:\n server.sendmail(sender, receivers, msg_str)\n print(\"Successfully sent email\")\n\n\n# In[ ]:\n\n\nimport os\nos.remove(z, dir_fd=None)\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"seker.py","file_name":"seker.py","file_ext":"py","file_size_in_byte":7305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"516847779","text":"import math\n\nclass particle:\n m = 1.\n x = 0.\n y = 0.\n z = 0.\n vx = 0.\n vy = 0.\n vz = 0.\n fx = 0.\n fy = 0.\n fz = 0.\n def __init__(self,mm,px,py,pz):\n self.m = mm\n self.x = px\n self.y = py\n self.z = pz\n\n def dump(self):\n print(\"p: \", self.x,self.y,self.z)\n print(\"v: \", self.vx,self.vy,self.vz)\n print(\"f: \", self.fx,self.fy,self.fz)\n\nclass simulation:\n def __init__(self):\n self.particles = []\n self.epsilon = 1.\n self.sigma = 1.\n self.alpha = 0.0167\n self.cut = 1.3\n self.ts = 0.000005\n self.current = 0\n self.total = 1000000\n self.pe = 0.\n\n def force(self,a, b):\n rm = math.sqrt( ((a.x + b.x )/2)**2 + ((a.y + b.y )/2)**2 + ((a.z + b.z )/2)**2 )\n delx = a.x - b.x\n dely = a.y - b.y\n delz = a.z - b.z\n rsq = (delx)**2 +(dely)**2 +(delz)**2 \n r2inv = 1./rsq\n r6inv = r2inv*r2inv*r2inv\n shrink = (self.sigma * (1-rm* self.alpha *self.current/self.total)) ** 6\n forza = 24. * self.epsilon * shrink * r6inv*( 2 * shrink * r6inv -1) *r2inv\n pe = 2* self.epsilon *shrink * r6inv *( shrink*r6inv-1)\n return [forza*delx, forza*dely, forza*delz,pe]\n \n \n\n def compute(self,d):\n self.pe = 0.\n for i,p in enumerate(self.particles):\n lista = self.particles[:]\n lista.pop(i)\n p.fx = 0.\n p.fy = 0.\n p.fz = 0.\n for j,pp in enumerate(lista):\n f = self.force(p,pp)\n p.fx += f[0]\n p.fy += f[1]\n p.fz += f[2]\n self.pe += f[3]\n\n if d == 1:\n print(\"particella \",i, \"step \",self.current)\n p.dump()\n self.pe /= len(self.particles)\n \n\n def update(self):\n for i,p in enumerate(self.particles):\n p.x += p.vx * self.ts\n p.y += p.vy * self.ts\n p.z += p.vz * self.ts\n\n p.vx += p.fx / p.m * self.ts\n p.vy += p.fy / p.m * self.ts\n p.vz += p.fz / p.m * self.ts\n \n self.current +=1\n \n \n def energydump(self):\n #print(\"energie step\", self.current)\n print('pe =',self.pe) \n ke = 0.\n for i in self.particles:\n ke += 1/2*i.m*(i.vx*i.vx+i.vy*i.vy+i.vz*i.vz)\n #print(\"ke =\",ke)\n #print(\"etot =\", self.pe+ke)\n \n \n def run(self,dumps):\n while self.current < self.total+1:\n if self.current % dumps == 0:\n self.compute(0)\n self.energydump()\n self.update()\n else:\n self.compute(0)\n self.update()\n \n \n\nsim = simulation()\n\na = particle(1,30,0,0)\nb = particle(1,30,1.3,0)\nc = particle(1,30,0,1.3)\nsim.particles.append(a)\nsim.particles.append(b)\nsim.particles.append(c)\n\nsim.run(50000)\n","sub_path":"sim.py","file_name":"sim.py","file_ext":"py","file_size_in_byte":3019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"50526016","text":"from base_de_dados2 import avaliacoes\nfrom math import sqrt\n\ndef euclidiana (usuario1, usuario2):\n i = False\n for item in avaliacoes[usuario1]:\n if item in avaliacoes[usuario2]:\n i = True\n break\n if not i:\n return 0\n r = sqrt(sum([pow(avaliacoes[usuario1][item] - avaliacoes[usuario2][item], 2) for item in avaliacoes[usuario1] if item in avaliacoes[usuario2]]))\n return (1/(1+r))\n\ndef getSimilares(usuario):\n similaridade = [(euclidiana(usuario, outro), outro) for outro in avaliacoes if outro != usuario]\n similaridade.sort()\n similaridade.reverse()\n return similaridade\n\ndef getRecomendacoes(usuario):\n totais = {}\n somaSimilaridade = {}\n for outro in avaliacoes:\n if outro == usuario: continue\n similaridade = euclidiana(usuario, outro)\n if similaridade <= 0: continue\n for item in avaliacoes[outro]:\n #### somente o que o usuário alvo ainda não avaliou\n if item not in avaliacoes[usuario]:\n totais.setdefault(item, 0)\n totais[item] += avaliacoes[outro][item] * similaridade\n somaSimilaridade.setdefault(item, 0)\n somaSimilaridade[item] += similaridade\n rankings = [(total/somaSimilaridade[item], item) for item, total in totais.items()]\n rankings.sort()\n rankings.reverse()\n return rankings\n\nprint(getRecomendacoes('11979149'))","sub_path":"recomendacao.py","file_name":"recomendacao.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"434934168","text":"from . import app\nfrom .factory import AppFactory\nfrom .error_handler import EcommerceException\nfrom flask import jsonify, request\nimport re\nimport os\n\n_database = AppFactory.db\n_collection = _database.collection('products')\n\n@app.route('/api/products')\ndef serveProducts():\n print(\"Fetching Products\")\n query = _collection.stream()\n print(\"Products fetched\")\n products = []\n for q in query:\n products.append({'id':q.id,'data':q.to_dict()})\n return jsonify({'status_code':200,'products':products})\n\n@app.route('/api/products', methods=['POST'])\ndef addProduct():\n _fields = AppFactory.constants['base_fields']\n _field_keys = list(_fields.keys()) \n _form_keys = list(request.args)\n missingFields = set(_field_keys) - set(_form_keys)\n missingFields = list(missingFields)\n # Lets just trust the client for now\n if len(missingFields):\n raise EcommerceException(name = \"Missing Fields\", description = {'description':'Supply all fields','missing_field':missingFields,'supplied_fields':_form_keys})\n # Will not reach here on err\n name = request.args['product_name']\n name_slug = re.sub('[^a-zA-Z0-9]','-',name)\n data = dict(request.args)\n data['slug'] = name_slug\n _collection.document().set(data)\n return jsonify({'status_code':200,'status-info':'Success','product-slug':name_slug})\n\n\n# @app.route('/categories')\n# def listCategories():\n# print(\"Fetching Products\")\n# query = _collection.stream()\n# print(\"Products fetched\")\n# categories = []\n# for q in query:\n# products.append(q.to_dict()['category'])\n# return jsonify({'status_code':200,'categories':categories})\n\n# @app.route('/categories/')\n# def productsInCategory():\n# print(\"Fetching Products\")\n# query = _collection.stream()\n# print(\"Products fetched\")\n# categories = []\n# for q in query:\n# products.append(q.to_dict()['category'])\n# return jsonify({'status_code':200,'categories':categories})","sub_path":"application/core/listing.py","file_name":"listing.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"258310966","text":"import gc\nfrom sys import getrefcount\nimport resource\nfrom functools import partial\n\nimport multiprocessing as mp\nfrom flask import Flask, render_template, request, json, Response, jsonify\nfrom bert_sample import SentencePrediction,MaskedLM\nfrom flask_cors import CORS,cross_origin\n# import numba\n# from numba import jit, njit, prange, config, generated_jit\n\n\napp = Flask(__name__)\n\n\n\n\ndef mem():\n print('Memory usage : % 2.2f MB' % round(\n resource.getrusage(resource.RUSAGE_SELF).ru_maxrss/1024.0/1024.0,1)\n )\n\n\ndef get_post_data():\n \"\"\"\n 从请求中获取参数\n :return:\n \"\"\"\n data = {}\n if request.content_type.startswith('application/json'):\n data = request.get_data()\n data = json.loads(data)\n else:\n for key, value in request.form.items():\n if key.endswith('[]'):\n data[key[:-2]] = request.form.getlist(key)\n else:\n data[key] = value\n return data\n\n@app.route(\"/\")\ndef hello():\n return \"Bert 中文测试!\"\n\n@app.route(\"/tools/sentence/prediction\")\n\ndef tools_sentence_prediction():\n return render_template(\"tools_sentence_prediction.html\",**locals())\n\n@app.route(\"/json/sentence/prediction\" ,methods=['GET', 'POST'])\n@cross_origin()\ndef json_sentence_prediction():\n data= get_post_data()\n print('data',data)\n # paragraph = request.args.get('text')\n # previous_line=request.args.get('sentence')\n paragraph = data['text']\n previous_line = data['sentence']\n print('paragraph',paragraph)\n print('previous_line',previous_line)\n\n if paragraph and previous_line:\n nextS=SentencePrediction()\n nextS.model_init(model='/home/terry/pan/github/bert/test/last/')\n # nextS.model_init()\n next_line=nextS.sentence(paragraph,previous_line)\n\n #释放内存\n nextS.free_ram()\n del nextS\n gc.collect()\n # print(len(next_line))\n # print('next_line',next_line[:10])\n # data=next_line[:10].tolist()\n data={'data':next_line}\n\n data['msg']='返回预测结果'\n else:\n data={'msg':'数据不完整'}\n\n\n return jsonify(data)\n # return \"Hello World!\"\n\n\n@app.route(\"/tools/sentence/gaicuo\")\ndef tools_sentence_gaicuo():\n return render_template(\"tools_sentence_gaicuo.html\",**locals())\n\n#改错\n@app.route(\"/json/sentence/gaicuo\" ,methods=['GET', 'POST'])\n@cross_origin()\ndef json_sentence_gaicuo():\n data= get_post_data()\n print('data',data)\n # paragraph = request.args.get('text')\n # previous_line=request.args.get('sentence')\n text1 = data['text1']\n text2 = data['text2']\n # print('paragraph',paragraph)\n # print('previous_line',previous_line)\n\n if text1 and text2:\n\n text_new,text_new1=mlm(text1,text2)\n # p = mp.Pool(1)\n # prod_x=partial(mlm, text2=text2) # prod_x has only one argument x (y is fixed to 10)\n # text_new,text_new1 = p.map(prod_x,text1)\n # print rslt\n# print(getrefcount(mlm))\n # print(len(next_line))\n # print('next_line',next_line[:10])\n # data=next_line[:10].tolist()\n\n data={'data':{\n 'text_new':text_new,\n 'text_new1':text_new1\n\n }\n\n }\n\n data['msg']='返回预测结果'\n else:\n data={'msg':'数据不完整'}\n\n gc.collect()\n return jsonify(data)\n # return \"Hello World!\"\n\ndef mlm(text1,text2):\n print(text1,text2)\n mem()\n # nextS=SentencePrediction()\n # next_line=nextS.sentence(paragraph,previous_line)\n mlm=MaskedLM()\n #初始化模型\n mlm.model_init(model='/home/terry/pan/github/bert/test/last/')\n # text1=\"今天天气好吗 \"\n # text2=\"估计n牛错。\"\n indexed_tokens,segments_ids= mlm.sentence_pre(text1,text2)\n\n # print(t)\n text_new,text_new1=mlm.prediction(indexed_tokens,segments_ids)\n #释放内存\n# mlm.free_ram()\n print('getrefcount(mlm)',getrefcount(mlm))\n #释放显存\n # mlm.clear()\n # # mlm.free_ram()\n mem()\n del mlm\n gc.collect()\n mem()\n\n return text_new,text_new1\n\nif __name__ == \"__main__\":\n #app.run()\n app.run(\n host='0.0.0.0',\n threaded=True,\n # debug=True,\n port=8110,\n # debug=True\n )\n CORS(app)\n","sub_path":"bert_server/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"331920026","text":"import random\nimport markovify\nfrom discord import Member, Embed\nfrom discord.ext import commands\nfrom core import checks\nfrom core.models import PermissionLevel\n\n\nclass Mimic(commands.Cog, name=\"Mimic\"):\n def __init__(self, bot):\n self.bot = bot\n\n @checks.has_permissions(PermissionLevel.ADMINISTRATOR)\n @commands.command(\n name=\"mimic\",\n aliases=[\"mock\", \"parrot\"]\n )\n async def mimic(self, ctx, member: Member):\n \"\"\"\n Make webhooks to mimic a member's writing style.\n \"\"\"\n\n messages = []\n start_filter = [\">\", \"!\", \"$\", \"?\"]\n word_filter = [\">mimic\"]\n count = 0\n\n await ctx.message.delete()\n\n async with ctx.channel.typing():\n async for message in ctx.channel.history(limit=None):\n count += 1\n if count > 10000:\n break\n if len(messages) == 100:\n break\n if message.author == member:\n for char in start_filter:\n if message.clean_content.startswith(char):\n continue\n\n for word in word_filter:\n if word in message.clean_content:\n continue\n messages.append(message)\n\n clean_strings = list(map(lambda m: m.clean_content, messages))\n if len(clean_strings) < 10:\n embed = Embed(title=\"❌ Not enough messages sent by member recently. ❌\")\n await ctx.send(embed=embed)\n return\n\n models = []\n for string in clean_strings:\n try:\n models.append(markovify.Text(input_text=string, well_formed=True))\n except KeyError:\n pass\n final_model = markovify.combine(models)\n text = final_model.make_short_sentence(300)\n\n if text:\n webhook = await ctx.channel.create_webhook(name=\"mimic\")\n await webhook.send(content=text, username=member.name, avatar_url=member.avatar_url)\n await webhook.delete()\n else:\n embed = Embed(title=\"❌ Could not generate coherent sentence from corpus. ❌\")\n await ctx.send(embed=embed)\n\n\ndef setup(bot):\n bot.add_cog(Mimic(bot))\n","sub_path":"mimic/mimic.py","file_name":"mimic.py","file_ext":"py","file_size_in_byte":2384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"241112647","text":"#!/usr/bin/python3\n# coding=utf-8\nimport os\nfrom multiprocessing import Process\nimport time\n\nclass MyProcess(Process):\n def __init__(self):\n super().__init__()\n\n def run(self):\n for i in range(3):\n print('进程pid = %s'%os.getpid())\n time.sleep(1)\n\ndef main():\n p = MyProcess()\n p.start()\n\nif __name__ == '__main__':\n main()\n","sub_path":"j05day-多进程.py/9继承process.py","file_name":"9继承process.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"95375795","text":"import logging\nimport time\nfrom math import cos\n\nfrom .message_define import MyMessage\nfrom .utils import transform_list_to_tensor\nfrom ....core.distributed.communication.message import Message\nfrom ....core.distributed.fedml_comm_manager import FedMLCommManager\n\n\nclass FedAVGClientManager(FedMLCommManager):\n def __init__(\n self, args, trainer, comm=None, rank=0, size=0, backend=\"MPI\",\n ):\n super().__init__(args, comm, rank, size, backend)\n self.trainer = trainer\n self.num_rounds = args.comm_round\n self.round_idx = 0\n self.worker_id = self.rank - 1\n\n def run(self):\n super().run()\n\n def register_message_receive_handlers(self):\n self.register_message_receive_handler(MyMessage.MSG_TYPE_S2C_INIT_CONFIG, self.handle_message_init)\n self.register_message_receive_handler(\n MyMessage.MSG_TYPE_S2C_SYNC_MODEL_TO_CLIENT, self.handle_message_receive_model_from_server,\n )\n\n def handle_message_init(self, msg_params):\n global_model_params = msg_params.get(MyMessage.MSG_ARG_KEY_MODEL_PARAMS)\n # client_index = msg_params.get(MyMessage.MSG_ARG_KEY_CLIENT_INDEX)\n\n average_weight_dict = msg_params.get(MyMessage.MSG_ARG_KEY_AVG_WEIGHTS)\n client_schedule = msg_params.get(MyMessage.MSG_ARG_KEY_CLIENT_SCHEDULE)\n client_indexes = client_schedule[self.worker_id]\n\n self.round_idx = 0\n self.__train(global_model_params, client_indexes, average_weight_dict)\n\n def start_training(self):\n self.round_idx = 0\n # self.__train()\n\n def handle_message_receive_model_from_server(self, msg_params):\n logging.info(\"handle_message_receive_model_from_server.\")\n global_model_params = msg_params.get(MyMessage.MSG_ARG_KEY_MODEL_PARAMS)\n # client_index = msg_params.get(MyMessage.MSG_ARG_KEY_CLIENT_INDEX)\n\n average_weight_dict = msg_params.get(MyMessage.MSG_ARG_KEY_AVG_WEIGHTS)\n client_schedule = msg_params.get(MyMessage.MSG_ARG_KEY_CLIENT_SCHEDULE)\n client_indexes = client_schedule[self.worker_id]\n\n self.round_idx += 1\n self.__train(global_model_params, client_indexes, average_weight_dict)\n if self.round_idx == self.num_rounds - 1:\n # post_complete_message_to_sweep_process(self.args)\n self.finish()\n\n def send_result_to_server(self, receive_id, weights, client_runtime_info):\n message = Message(MyMessage.MSG_TYPE_C2S_SEND_MODEL_TO_SERVER, self.get_sender_id(), receive_id,)\n message.add_params(MyMessage.MSG_ARG_KEY_MODEL_PARAMS, weights)\n # message.add_params(MyMessage.MSG_ARG_KEY_NUM_SAMPLES, local_sample_num)\n message.add_params(MyMessage.MSG_ARG_KEY_CLIENT_RUNTIME_INFO, client_runtime_info)\n self.send_message(message)\n\n def add_client_model(self, local_agg_model_params, model_params, weight=1.0):\n # Add params that needed to be reduces from clients\n for name, param in model_params.items():\n if name not in local_agg_model_params:\n local_agg_model_params[name] = param * weight\n else:\n local_agg_model_params[name] += param * weight\n\n def __train(self, global_model_params, client_indexes, average_weight_dict):\n logging.info(\"#######training########### round_id = %d\" % self.round_idx)\n\n if hasattr(self.args, \"simulation_gpu_hetero\"):\n # runtime_speed_ratio\n # runtime_speed_ratio * t_train - t_train\n # time.sleep(runtime_speed_ratio * t_train - t_train)\n simulation_gpu_hetero = self.args.simulation_gpu_hetero\n runtime_speed_ratio = self.args.gpu_hetero_ratio * self.worker_id / self.args.worker_num\n\n if hasattr(self.args, \"simulation_environment_hetero\"):\n # runtime_speed_ratio\n # runtime_speed_ratio * t_train - t_train\n # time.sleep(runtime_speed_ratio * t_train - t_train)\n if self.args.simulation_environment_hetero == \"cos\":\n runtime_speed_ratio = self.args.environment_hetero_ratio * \\\n (1 + cos(self.round_idx / self.num_rounds*3.1415926 + self.worker_id))\n else:\n raise NotImplementedError\n\n\n local_agg_model_params = {}\n client_runtime_info = {}\n for client_index in client_indexes:\n logging.info(\n \"#######training########### Simulating client_index = %d, average weight: %f \"\n % (client_index, average_weight_dict[client_index])\n )\n start_time = time.time()\n self.trainer.update_model(global_model_params)\n self.trainer.update_dataset(int(client_index))\n weights, local_sample_num = self.trainer.train(self.round_idx)\n self.add_client_model(local_agg_model_params, weights, weight=average_weight_dict[client_index])\n if hasattr(self.args, \"simulation_gpu_hetero\"):\n t_train = time.time() - start_time\n time.sleep(runtime_speed_ratio * t_train)\n end_time = time.time()\n client_runtime = end_time - start_time\n client_runtime_info[client_index] = client_runtime\n logging.info(\n \"#######training########### End Simulating client_index = %d, consuming time: %f\"\n % (client_index, client_runtime)\n )\n self.send_result_to_server(0, local_agg_model_params, client_runtime_info)\n","sub_path":"python/fedml/simulation/mpi/fedavg_seq/FedAvgClientManager.py","file_name":"FedAvgClientManager.py","file_ext":"py","file_size_in_byte":5488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"227677665","text":"from .ptest import PerformanceTest\nimport time\nimport os\nimport logging\nimport sys\n__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\nclass DecentralizedPerformanceTest(PerformanceTest):\n\n def __init__(self, num_events=50, event_interval=0.3, wait_factor=10):\n super().__init__(num_events=num_events,\n event_interval=event_interval,wait_factor=wait_factor)\n self.set_logger()\n\n def set_logger(self):\n self.logger = logging.getLogger(__name__)\n handler = logging.StreamHandler()\n handler.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(prefix)s - %(message)s')\n handler.setFormatter(formatter)\n self.logger.addHandler(handler)\n self.logger.setLevel(logging.INFO)\n self.logger = logging.LoggerAdapter(self.logger, self.prefix)\n\n def test_network(self, network=None, num_hosts=None, network_name=\"\"):\n \"\"\" Interface method from PerformanceTest superclass; implementation for\n decentralized performance testing of pub/sub system in provided network\n args:\n network (Mininet object) - network to start and create hosts on\n num_hosts (int) - number of hosts in network\n network_name - alias of network, used to create folders for data/logs\n \"\"\"\n self.prefix['prefix'] = f'DECENTRAL-NET-{network_name}-TEST - '\n self.successes = 0\n self.failures = 0\n self.comments = []\n if network and num_hosts:\n # For subs, write data files into a folder within this folder\n # named after the network\n data_folder = os.path.join(__location__, f\"data/decentralized/{network_name}\")\n log_folder = os.path.join(__location__, f\"logs/decentralized/{network_name}\")\n # File for pass/fail check results\n test_results_file = os.path.join(__location__, f\"test_results/decentralized/{network_name}.csv\")\n\n # Make folders dynamically since the names are variable\n try:\n os.mkdir(data_folder)\n except FileExistsError:\n pass\n try:\n os.mkdir(log_folder)\n except FileExistsError:\n pass\n\n # Start the network\n network.start()\n # self.debug(\"Starting a pingAll test...\")\n # network.pingAll()\n self.debug(\"Starting network...\")\n num_subscribers = (num_hosts - 1) // 2\n num_publishers = num_hosts - 1 - num_subscribers\n\n self.debug(\n f'With {num_hosts} hosts, there will be 1 Broker, '\n f'{num_publishers} Publishers, and {num_subscribers} Subscribers...'\n )\n\n\n # Set up broker on first host (h1)\n broker_command = (\n f'python3 driver.py '\n '--broker 1 --verbose '\n f'--indefinite ' # max event count only matters for subscribers who write files at end.\n f'&> {log_folder}/broker.log &'\n )\n broker_host = network.hosts[0]\n broker_host.cmd(broker_command)\n broker_ip = broker_host.IP()\n self.debug(f'Broker set up! (IP: {broker_ip})')\n\n subscribers = [\n network.hosts[i] for i in range(1, num_subscribers + 1)\n ]\n publishers = [\n network.hosts[i] for i in range(num_subscribers + 1, num_hosts)\n ]\n self.debug(f\"Starting {num_subscribers} subscribers...\")\n\n for index,host in enumerate(subscribers):\n host.cmd(\n 'python3 driver.py '\n '--subscriber 1 '\n '--topics A --topics B --topics C '\n f'--max_event_count {self.num_events} '\n f'--broker_address {broker_ip} '\n f'--filename {data_folder}/subscriber-{index}.csv '\n f'--verbose &> {log_folder}/sub-{index}.log &'\n )\n self.debug(\"Subscribers created!\")\n\n self.debug(f\"Creating {num_publishers} publishers...\")\n for index,host in enumerate(publishers):\n host.cmd(\n f'python3 driver.py '\n '--publisher 1 '\n f'--sleep {self.event_interval} '\n f'--indefinite ' # max event count only matters for subscribers who write files at end.\n '--topics A --topics B --topics C '\n f'--broker_address {broker_ip} '\n f'--max_event_count {self.num_events} '\n f'--verbose &> {log_folder}/pub-{index}.log &'\n )\n self.debug(\"Publishers created!\")\n\n # Scale the wait time by a constant factor and with number of hosts\n wait_time = self.wait_factor * (self.num_events * self.event_interval)\n self.debug(f\"Waiting for {wait_time} seconds for data to generate...\")\n time.sleep(wait_time)\n self.debug(f\"Finished waiting!\")\n\n self.debug(\"Running unittest assertions to verify data was written...\")\n\n for index,host in enumerate(subscribers):\n ## Run assertions here. Data files should have been produced with num_events + 1\n # lines each. If this is is true, then the Pub/Sub system worked. That's the only\n # way the subscriber would be able to write the expected number of results.\n check_result = self.data_file_written_successfully(\n filename=f'{data_folder}/subscriber-{index}.csv'\n )\n self.comments.append(check_result[1])\n if not check_result[0]:\n self.failures += 1\n else:\n self.successes += 1\n\n self.debug(f\"Writing pass/fail test results to {test_results_file}\")\n with open(test_results_file, 'w') as f:\n f.write('pass,fail,comments\\n')\n f.write(f'{self.successes},{self.failures},{\",\".join(self.comments)}')\n\n self.debug(\"Killing processes...\")\n for index,host in enumerate(subscribers):\n out = host.cmd(f'kill %1')\n\n for index,host in enumerate(publishers):\n out = host.cmd(f'kill %1')\n\n self.debug(f\"Processes killed. Stopping network '{network_name}'...\")\n # Stop the network\n network.stop()\n self.debug(f\"Network '{network_name}' stopped!\")\n\n return {\n 'successes': self.successes,\n 'failures': self.failures,\n 'comments': self.comments\n }\n else:\n self.logger.error(\"Need to pass network and num_hosts to initialize_network()\")\n return {\n 'successes': self.successes,\n 'failures': self.failures,\n 'comments': self.comments,\n 'error': \"Need to pass network and num_hosts to initialize_network()\"\n }\n","sub_path":"src/performance_tests/decentralized.py","file_name":"decentralized.py","file_ext":"py","file_size_in_byte":7139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"88493555","text":"import pytest\nfrom fastapi.testclient import TestClient\n\n\n@pytest.fixture(autouse=True)\ndef app(monkeypatch):\n \"\"\"patch in our env vars required for pydantic settings\"\"\"\n monkeypatch.setenv(\"GRAPHQL_KEY\", \"test\")\n monkeypatch.setenv(\"BASIC_USERNAME\", \"test\")\n monkeypatch.setenv(\"BASIC_PASSWORD\", \"test\")\n\n # import app after patching env vars\n from app.main import app\n\n return app\n\n\n@pytest.fixture()\ndef client(app):\n return TestClient(app)\n\n\ndef test_root(client):\n response = client.get(\"/\")\n assert response.status_code == 404\n assert response.json() == {\"detail\": \"Not Found\"}\n\n\ndef test_ready(client):\n response = client.get(\"/ready\")\n assert response.status_code == 200\n assert \"Hello\" in response.json().keys()\n","sub_path":"test/test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"160868698","text":"import pandas as pd\nimport matplotlib.pyplot as plt\n\ndef plot_history(history):\n hist = pd.DataFrame(history.history)\n hist['epoch'] = history.epoch\n\n plt.figure()\n plt.xlabel('Epoch')\n plt.ylabel('Mean Abs Error [MPG]')\n plt.plot(hist['epoch'], hist['mean_absolute_error'],\n label='Train Error')\n plt.plot(hist['epoch'], hist['val_mean_absolute_error'],\n label = 'Val Error')\n plt.ylim([0,5])\n plt.legend()\n\n plt.figure()\n plt.xlabel('Epoch')\n plt.ylabel('Mean Square Error [$MPG^2$]')\n plt.plot(hist['epoch'], hist['mean_squared_error'],\n label='Train Error')\n plt.plot(hist['epoch'], hist['val_mean_squared_error'],\n label = 'Val Error')\n plt.ylim([0,20])\n plt.legend()\n plt.show()\n","sub_path":"Regression/history_plotter.py","file_name":"history_plotter.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"178785511","text":"# Managed Objects implementation\nfrom pysnmp.smi import builder\n\n# MIB Builder is normally pre-created by SNMP engine\nmibBuilder = builder.MibBuilder()\n\n#\n# This may be done in a stand-alone file and then loaded up\n# by SNMP Agent\n#\n\n# A base class for a custom Managed Object\nMibScalarInstance, = mibBuilder.importSymbols(\n 'SNMPv2-SMI', 'MibScalarInstance'\n )\n\n# Managed object specification\nsysLocation, = mibBuilder.importSymbols('SNMPv2-MIB', 'sysLocation')\n\n# Custom Managed Object\nclass MySysLocationInstance(MibScalarInstance):\n def readGet(self, name, *args):\n # Just return a custom value\n return name, self.syntax.clone('The Leaky Cauldron')\n \nsysLocationInstance = MySysLocationInstance(\n sysLocation.name, (0,), sysLocation.syntax\n )\n\n# Register Managed Object with a MIB tree\nmibBuilder.exportSymbols(\n # '__' prefixed MIB modules take precedence on indexing\n '__MY-LOCATION-MIB', sysLocationInstance=sysLocationInstance\n )\n\nif __name__ == '__main__':\n #\n # This is what is done internally by Agent.\n #\n from pysnmp.smi import instrum, exval\n\n mibInstrum = instrum.MibInstrumController(mibBuilder)\n\n print('Remote manager read access to MIB instrumentation (table walk)')\n oid, val = (), None\n while 1:\n oid, val = mibInstrum.readNextVars(((oid, val),))[0]\n if exval.endOfMib.isSameTypeWith(val):\n break\n print(oid, val.prettyPrint())\n","sub_path":"examples/smi/agent/custom-managed-object.py","file_name":"custom-managed-object.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"128032487","text":"from binascii import hexlify, unhexlify\nimport base64\nimport operator\nimport random\n\ndef hex_to_base64(hex_string):\n byte_seq = unhexlify(hex_string)\n return base64.b64encode(byte_seq)\n\ndef fixed_xor(hex_str1, hex_str2):\n if len(hex_str1) != len(hex_str2):\n raise ValueError(\"arguments different length\")\n byte_seq1 = unhexlify(hex_str1)\n byte_seq2 = unhexlify(hex_str2)\n\n res_list_ints = [a ^ b for a,b in zip(byte_seq1, byte_seq2)]\n res_bytes_seq = bytearray(res_list_ints)\n return hexlify(res_bytes_seq)\n\n# is plaintext likely English text or gibberish?\ndef score(plaintext_hex):\n plaintext = unhexlify(plaintext_hex)\n score = 0\n most_common = list(map(ord, \" etaoinshr\")) # https://en.wikipedia.org/wiki/Letter_frequency\n vowels = list(map(ord, \"aeiouAEIOU \"))\n ascii_chars = list(range(65,91)) + list(range(97,127)) # A-Z, a-z\n\n for c in plaintext:\n if c in most_common:\n score += 1\n return score\n\ndef int_to_two_char_hex(n):\n hexn = hex(n)[2:]\n if len(hexn) == 1:\n return '0' + hexn\n elif len(hexn) == 2:\n return hexn\n else:\n raise ValueError(\"wrong length integer\")\n\ndef decode_single_key_xor(ciphertext, key):\n key_len = len(ciphertext) // 2\n long_key = int_to_two_char_hex(key) * key_len\n plaintext_hex = fixed_xor(ciphertext, long_key)\n return plaintext_hex\n\ndef find_xor_cipher_key_scores(ciphertext):\n scores = {}\n\n for key in range(32, 127):\n plaintext = decode_single_key_xor(ciphertext, key)\n scores[key] = score(plaintext)\n\n # items are scores, keys are... keys\n sorted_scores = sorted(scores.items(), key=operator.itemgetter(1))\n return sorted_scores\n\ndef find_xor_cipher_key_and_score(ciphertext):\n sorted_scores = find_xor_cipher_key_scores(ciphertext)\n return sorted_scores[-1]\n\ndef find_xor_cipher_key(ciphertext):\n best_key = find_xor_cipher_key_and_score(ciphertext)[0]\n return best_key\n\ndef solve_set1_challenge3(ciphertext):\n best_key = find_xor_cipher_key(ciphertext)\n plaintext = unhexlify(decode_single_key_xor(ciphertext, best_key))\n return plaintext\n\ndef repeating_key_xor(plaintext_byte_seq, short_key_byte_seq):\n long_key_length = 1 + (len(plaintext_byte_seq) // len(short_key_byte_seq))\n long_key_byte_seq = (short_key_byte_seq * long_key_length)[:len(plaintext_byte_seq)]\n\n res_list_ints = [a ^ b for a,b in zip(plaintext_byte_seq, long_key_byte_seq)]\n res_bytes_seq = bytearray(res_list_ints)\n return hexlify(res_bytes_seq)\n\ndef hamming_distance(bs1, bs2):\n print(len(bs1), len(bs2))\n if len(bs1) != len(bs2):\n raise ValueError(\"arguments not same length\")\n xord_bytes = [bin(el1 ^ el2)[2:] for el1, el2 in zip(bs1, bs2)]\n num_of_ones = sum(x.count('1') for x in xord_bytes)\n return num_of_ones\n\ndef pkcs7_padding(block, block_length):\n n = block_length - len(block)\n if n == 0:\n return block\n elif n < 0:\n raise ValueError(\"block length too short\")\n return block + bytes(n * [n])\n\n# Write a function to generate a random AES key; that's just 16 random bytes.\ndef generate_random_AES_key():\n keysize = 16\n random_AES_key = b''\n for i in range(0, keysize):\n random_AES_key += bytes([random.randrange(256)])\n return random_AES_key\n\ndef five_to_ten_random_bytes():\n num_random_bytes = random.randrange(5,11)\n random_bytes = b''\n for i in range(0, num_random_bytes):\n random_bytes += bytes([random.randrange(256)])\n return random_bytes\n","sub_path":"natas/lib.py","file_name":"lib.py","file_ext":"py","file_size_in_byte":3533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"65380909","text":"import numpy as np\nfrom astropy.io import fits\nfrom astropy.io import ascii\nfrom astropy.table import Table\nfrom astropy.visualization import SqrtStretch\nfrom astropy.visualization.mpl_normalize import ImageNormalize\nimport glob\nfrom photutils import daofind\nimport matplotlib.pyplot as plt\nfrom scipy import ndimage\nfrom scipy import stats\n\n################################# PART 1 ######################################\n###############################################################################\nprint('PART 1')\n#Loading FITS data for ROXs 42b and ROXs 12\nsource_dir42b = \"/Users/zvander/KOA_5096/NIRC2/calibrated\"\nsource_dir12 = \"/Users/zvander/KOA_6199/NIRC2/calibrated\"\nfile_list42b = glob.glob(source_dir42b + '/*.fits') #ROXs 42b data\nfile_list12 = glob.glob(source_dir12 + '/*.fits') #ROXs 12 data\nobj_names42b = [x[41:] for x in file_list42b] #File names w/o source directory\nobj_names12 = [x[41:] for x in file_list12] #File names w/o source directory\n\n#Retrieves header info and data for each file\nhdu_list42b = [fits.open(x) for x in file_list42b] \nhdu_list12 = [fits.open(x) for x in file_list12]\n\n#Separate image data from header info\nimage_data42b = [x[0].data for x in hdu_list42b]\nimage_data12 = [x[0].data for x in hdu_list12]\nimage_header42b = [x[0].header for x in hdu_list42b]\nimage_header12 = [x[0].header for x in hdu_list12]\n\n################################### PART 2 ####################################\n###############################################################################\nprint('PART 2')\n#Python function to measure x/y positions of the star in each image\ndef getcentroids(imagedata,box):\n nx = len(imagedata)\n xmin,xmax,ymin,ymax = box #box borders, eye-balled\n \n if nx < 50:\n rows = np.arange(0,nx)\n centroids = np.zeros((nx,2))\n for i in rows:\n #Index image data within defined box\n image_box = imagedata[i][ymin:ymax, xmin:xmax] \n #Using DAOFIND to locate the star within the defined box \n sources = daofind(image_box,100.,2.126, exclude_border=True)\n #Add xmin and ymin values onto the found sources for true pixel value\n centroids[i,:] = [sources['xcentroid']+xmin, sources['ycentroid']+ymin]\n else: #Condition applied to part 8\n image_box = imagedata[ymin:ymax, xmin:xmax]\n sources = daofind(image_box,100.,2.216, exclude_border=True)\n centroids = [sources['xcentroid']+xmin,sources['ycentroid']+ymin]\n return centroids\n\n#Get centroids of the star for each image by calling the above function\nbox1 = [604.,620.,462.,482.]\ncentroids42b = getcentroids(image_data42b,box1)\ncentroids12 = getcentroids(image_data12,box1)\n\n################################### PART 3 ####################################\n###############################################################################\nprint('PART 3')\n#A python implementation of \"fshift.pro\" which shifts image according to\n#desired pixel value\ndef fshiftp(imagedata,centroids):\n transdata = list(imagedata)\n nx = len(centroids[:,0])\n rows = np.arange(0,nx)\n for i in rows:\n delx = 512.0 - centroids[i,0] #Using 512 shifts image to center\n dely = 512.0 - centroids[i,1]\n fracx,intx = np.modf(delx)\n fracy,inty = np.modf(dely)\n #Shift image by the integer part of delx and dely\n rollx = np.roll(imagedata[i], int(intx), axis=1)\n rollxy = np.roll(rollx, int(inty), axis=0)\n #Use bi-linear interpolation between four pixels to estimate\n #pixel value at the fractional location of delx and dely\n transdata[i] = rollxy*((1-fracx)*(1-fracy)) + \\\n np.roll(rollxy,1,axis=0)*((1-fracx)*fracy) + \\\n np.roll(rollxy,1,axis=1)*(fracx*(1-fracy)) + \\\n np.roll(np.roll(rollxy,1,axis=0),1,axis=1)*fracx*fracy\n return transdata\n\n#Shift data so all centroids on centered on the same x,y position\ntranslated42b = fshiftp(image_data42b,centroids42b)\ntranslated12 = fshiftp(image_data12 ,centroids12)\nbox2 = [507.,522.,507.,522.]\ntcentroids42b = getcentroids(translated42b,box2)\ntcentroids12 = getcentroids(translated12,box2)\n\n\n#Write data to file\ndef datafile(obj_names,centroids,tcentroids,outfile):\n names = ['File Name','x-centroid','y-centroid','x-aligned','y-aligned']\n data = [obj_names,centroids[:,0],centroids[:,1],tcentroids[:,0],tcentroids[:,1]]\n table = Table(data, names=names)\n ascii.write(table, outfile, formats={'x-centroid': '%.3f',\\\n 'y-centroid': '%.3f','x-aligned': '%.3f','y-aligned': '%.3f'})\n return\n\n#Summed and median-combined Images\nimage_sum42b = np.sum(translated42b, axis=0)\nimage_sum12 = np.sum(translated12 , axis=0)\nimage_med42b = np.median(translated42b, axis=0)\nimage_med12 = np.median(translated12 , axis=0)\n\n################################## PART 4 #####################################\n###########################################s####################################\nprint('PART 4')\ndef img_rotate(imagedata,headers):\n nx = len(imagedata)\n rows = np.arange(0,nx)\n rotated_data = list(imagedata)\n par = np.asarray([headers[x]['PARANG'] for x in rows])\n rot = np.asarray([headers[x]['ROTPPOSN'] for x in rows])\n el = np.asarray([headers[x]['EL'] for x in rows])\n inst = np.asarray([headers[x]['INSTANGL'] for x in rows])\n PA = par+rot-el-inst\n #PA angle is negative in the rotation call to make it go clockwise\n for i in rows:\n rotated_data[i] = ndimage.interpolation.rotate(imagedata[i],-PA[i],reshape=False)\n\n return rotated_data\n \nrot_data42b = img_rotate(translated42b,image_header42b)\nrot_data12 = img_rotate(translated12 ,image_header12)\n\nrot_sum42b = np.sum(rot_data42b, axis=0)\nrot_med42b = np.median(rot_data42b, axis=0)\nrot_sum12 = np.sum(rot_data12 , axis=0)\nrot_med12 = np.median(rot_data12 , axis=0)\n\n################################## PART 5 #####################################\n###############################################################################\nprint('PART 5')\n#Need to figure out the median value in each concentric ring away from\n#the central star\n\ndef azimed(imagedata,centroids):\n dring = 2.0 #Width of the ring\n maxring = 515.0 #Maximum size of the rings, approximate full image size\n y,x = np.mgrid[0:1024,0:1024] #Defining grids of x & y values\n x = x.astype(float) #Convert to float to avoid arithmetic errors\n y = y.astype(float)\n nx = len(imagedata)\n newimg = list(imagedata)\n #For loops will sequentially find the pixel values within the rings of \n #various radii and subtract the median pixel value of the from from every \n #pixel within it.\n for i in range (0,nx):\n rsep = np.sqrt((x-centroids[i,0])**2 + (y-centroids[i,1])**2)\n #Iterate from smallest to biggest ring\n for k in range(0,int(maxring/dring)):\n #Find values within the ring\n ringind = np.where((rsep >= k*dring) & (rsep < (k+1)*dring))\n #Index the image within the ring\n ringdata = imagedata[i][ringind]\n #Find median pixel value within the ring\n ringmed = np.median(ringdata)\n #Subtract median pixel from values within the ring\n newimg[i][ringind] = ringdata - ringmed \n return newimg\n \nazimage42b = azimed(translated42b,tcentroids42b)\nazimage12 = azimed(translated12 ,tcentroids12)\n#Sum and median combined images for the brightness profile corrected images\nazsum42b = np.sum(azimage42b,axis=0)\nazsum12 = np.sum(azimage12 ,axis=0)\nazmed42b = np.median(azimage42b,axis=0)\nazmed12 = np.median(azimage12 ,axis=0)\n\n#Rotating and median/sum combining the brightness profile corrected images \nrot_azimage42b = img_rotate(azimage42b,image_header42b)\nrot_azimage12 = img_rotate(azimage12 ,image_header12)\n#Sums and Medians\nrot_azsum42b = np.sum(rot_azimage42b, axis=0)\nrot_azmed42b = np.median(rot_azimage42b, axis=0)\nrot_azsum12 = np.sum(rot_azimage12 , axis=0)\nrot_azmed12 = np.median(rot_azimage12 , axis=0)\n\n################################## PART 6 #####################################\n###############################################################################\nprint('PART 6')\n#Function \ndef medratio(imagedata,medimg,centroids):\n y,x = np.mgrid[0:1024,0:1024] #Defining grids of x & y values\n x = x.astype(float) #Convert to float to avoid errors\n y = y.astype(float)\n rmin = 10.0 #Inner edge of the annulus\n rmax = 35.0 #Outer edge of the annulus\n nx = len(imagedata)\n new_imgs = list(imagedata)\n for i in range(0,nx):\n #Find where x/y separations fall within the defined annulus\n rsep = np.sqrt((x-centroids[i,0])**2 + (y-centroids[i,1])**2)\n annulus = np.where((rsep >= rmin) & (rsep <= rmax))\n #Divide the science and calibration(median) images to get correction ratio\n ratio = np.median(imagedata[i][annulus]/medimg[annulus])\n #Subtract the flux-corrected median image from the science image\n new_imgs[i] = imagedata[i] - ratio*medimg\n return new_imgs\n \n#Median subtracting the translated images \nmensub_img42b = medratio(translated42b,image_med42b,tcentroids42b)\nmensub_img12 = medratio(translated12 ,image_med12 ,tcentroids12)\n\n#Correcting for the brightness profiles of median subtracted images \nmensub_azimage42b = azimed(mensub_img42b,tcentroids42b)\nmensub_azimage12 = azimed(mensub_img12 ,tcentroids12)\n\n#Rotating the median subtracted images \nrot_mensub_azimage42b = img_rotate(mensub_azimage42b,image_header42b)\nrot_mensub_azimage12 = img_rotate(mensub_azimage12 ,image_header12)\n\n#Summing and median-combining the median subtracted images\nADI_sum42b = np.sum(rot_mensub_azimage42b , axis=0)\nADI_med42b = np.median(rot_mensub_azimage42b, axis=0)\nADI_sum12 = np.sum(rot_mensub_azimage12 , axis=0)\nADI_med12 = np.median(rot_mensub_azimage12 , axis=0)\n\n################################### PART 7 ####################################\n###############################################################################\n#print('PART 7')\ndef imgratios(imagedata1,imagedata2,filenames2,source):\n y,x = np.mgrid[0:1024,0:1024] #Defining grids of x & y values\n x = x.astype(float) #Convert to float to avoid arithmetic errors\n y = y.astype(float)\n rsep = np.sqrt(x**2 + y**2)\n rmin = 10.0 #Inner edge of the annulus\n rmax = 50.0 #Outer edge of the annulus\n nx1 = len(imagedata1)\n nx2 = len(imagedata2)\n new_imgs = list(imagedata1)\n match_list = []\n print('Matching Best-PSF Files for ' + source)\n for i in range(0,nx1):\n chisqrs = list(np.zeros(nx2))\n for k in range(0,nx2):\n #Find image indices within the annulus\n annulus = np.where((rsep >= rmin) & (rsep <= rmax))\n #Divide science image annulus by calibrator image annulus and get\n #the median value of this ratio\n img_ratio = np.median(imagedata1[i][annulus]/imagedata2[k][annulus])\n #Multiply calibrator image by the corection ratio\n cal_img = imagedata2[k] * img_ratio\n #Subtract calibrator from science within the annulus\n res_img = np.abs(imagedata1[i][annulus] - cal_img[annulus])\n #Chi-squared test of the residuals\n chi,p = stats.chisquare(res_img)\n chisqrs[k]= np.sum(chi)\n #Find index of minimum chi-squared\n minind = np.where(chisqrs <= np.amin(chisqrs))\n #Subtract best-fitting calibrator image from the science image\n new_imgs[i][annulus] = imagedata1[i][annulus] - imagedata2[minind[0]][annulus]\n print(filenames2[minind[0]]) \n match_list.append(filenames2[minind[0]])\n return new_imgs\n \nprint('Image Ratios 1...')\nimgratios42b = imgratios(translated42b,translated12 ,obj_names12,'ROXs 42b')\nprint('Image Ratios 2...')\nimgratios12 = imgratios(translated12 ,translated42b,obj_names42b,'ROXs 12')\nprint('Median Subtracting...')\n#Median subtract the images\nmed_psf42b = medratio(imgratios42b,image_med42b,tcentroids42b)\nmed_psf12 = medratio(imgratios12 ,image_med12 ,tcentroids12)\nprint('Brightness Profiles...')\n#Brightness profile corrections\naz_med_psf42b = azimed(med_psf42b,tcentroids42b)\naz_med_psf12 = azimed(med_psf12 ,tcentroids12)\nprint('Rotating')\n#Rotating the images\nrot_az_med_psf42b = img_rotate(az_med_psf42b,image_header42b)\nrot_az_med_psf12 = img_rotate(az_med_psf12 ,image_header12)\nprint('Sum and Median...')\n#Sum and Median combing the images\nADI_psfsum42b = np.sum(rot_az_med_psf42b , axis=0)\nADI_psfmed42b = np.median(rot_az_med_psf42b, axis=0)\nADI_psfsum12 = np.sum(rot_az_med_psf12 , axis=0)\nADI_psfmed12 = np.median(rot_az_med_psf12 , axis=0)\n\n################################### PART 8 ####################################\n###############################################################################\nprint('PART 8')\nboxp1 = [620.,640.,501.,521.] #Box for source 1 in ROXs 42b images\nboxp2 = [542.,562.,460.,480.] #Box for source 2 in ROXs 42b images\nboxp3 = [475.,495.,679.,699.] #Box for source in ROXs 12 images\n\n#Part 4 Median-Combined Images\np4centroid42b1 = getcentroids(rot_med42b,boxp1)\np4centroid42b2 = getcentroids(rot_med42b,boxp2)\np4centroid12 = getcentroids(rot_med12,boxp3)\n#part 5 Median-Combined Images\np5centroid42b1 = getcentroids(rot_azmed42b,boxp1)\np5centroid42b2 = getcentroids(rot_azmed42b,boxp2)\np5centroid12 = getcentroids(rot_azmed12 ,boxp3)\n#Part 6 Median Combined Images\np6centroid42b1 = getcentroids(ADI_med42b,boxp1)\np6centroid42b2 = getcentroids(ADI_med42b,boxp2)\np6centroid12 = getcentroids(ADI_med12 ,boxp3)\n#Part 7 Median-Combined Images\np7centroid42b1 = getcentroids(ADI_psfmed42b,boxp1)\np7centroid42b2 = getcentroids(ADI_psfmed42b,boxp2)\np7centroid12 = getcentroids(ADI_psfmed12 ,boxp3)\n\ncentroidarr = [p4centroid42b1,p4centroid42b2,p4centroid12,\\\n p5centroid42b1,p5centroid42b2,p5centroid12,\\\n p6centroid42b1,p6centroid42b2,p6centroid12,\\\n p7centroid42b1,p7centroid42b2,p7centroid12]\n \n#Pixel scale for the narrow FOV is 0.009942 arcsec/pixel \npixscale = 0.009942\nnx = len(centroidarr)\nrsep_2scale = list(centroidarr)\nfor i in range(0,nx):\n sep = np.sqrt((centroidarr[i][0] - 512.0)**2 + (centroidarr[i][1] - 512.0)**2)\n rsep_2scale[i] = sep*pixscale\n\n#Calculating the position angles for the objects in each image\nPAs = list(np.zeros(nx))\nfor i in range(0,nx):\n x = centroidarr[i][0] - 512.0\n y = centroidarr[i][1] - 512.0\n if (x > 0.0 and y >= 0.0): #First quadrant\n PAs[i] = np.arctan(np.abs(x/y))\n elif (x >= 0.0 and y < 0.0): #Second quadrant\n PAs[i] = (np.pi/2.0) + np.arctan(np.abs(y/x))\n elif (x <= 0.0 and y < 0.0): #Third quadrant\n PAs[i] = (np.pi) + np.arctan(np.abs(x/y))\n elif (x < 0.0 and y >= 0.0): #Fourth quadrant\n PAs[i] = ((3.0*np.pi)/2.0) + np.arctan(np.abs(y/x))\n\n#Creating a data table with the projected separation and PA values \nnames = ['Object','Processing','Separation (as)','PA (deg)']\nObject = ['ROXs 42b 1','ROXs 42b 2','ROXs 12','ROXs 42b 1','ROXs 42b 2','ROXs 12',\\\n 'ROXs 42b 1','ROXs 42b 2','ROXs 12','ROXs 42b 1','ROXs 42b 2','ROXs 12']\nProcess = ['Part 4','Part 4','Part 4','Part 5','Part 5','Part 5',\\\n 'Part 6','Part 6','Part 6','Part 7','Part 7','Part 7']\ndata = [Object,Process,rsep_2scale,np.degrees(PAs)]\ntable = Table(data, names=names)\nprint(table)\n\n################################# Plotting ####################################\nprint('Plotting...')\n\n#Uncomment sections of plotting code for plots you'd like to see:\n\n##Part 2 Figures Demonstrating centroids\n#plt.figure(0)\n#norm = ImageNormalize(stretch=SqrtStretch())\n#plt.imshow(image_data42b[1], cmap='Greys', origin='lower',norm=norm)\n#plt.plot([centroids12[1,0]],[centroids12[1,1]],'g+',markersize=10,markeredgewidth=2)\n#plt.colorbar()\n#plt.figure(1)\n#norm = ImageNormalize(stretch=SqrtStretch())\n#plt.imshow(image_data12[1], cmap='Greys', origin='lower',norm=norm)\n#plt.plot([centroids12[1,0]],[centroids12[1,1]],'g+',markersize=10,markeredgewidth=2)\n#plt.colorbar()\n#plt.show()\n\n##Part 3 Figures Demonstrating Shifting - Sum and Median Images for both objects\n#plt.figure(2)\n#norm = ImageNormalize(stretch=SqrtStretch())\n#plt.imshow(image_sum42b, cmap='Greys', origin='lower',norm=norm)\n#plt.plot([tcentroids42b[1,0]],[tcentroids42b[1,1]],'g+',markersize=10,markeredgewidth=2)\n#plt.colorbar()\n#plt.figure(3)\n#norm = ImageNormalize(stretch=SqrtStretch())\n#plt.imshow(image_med42b, cmap='Greys', origin='lower',norm=norm)\n#plt.plot([tcentroids42b[1,0]],[tcentroids42b[1,1]],'g+',markersize=10,markeredgewidth=2)\n#plt.colorbar()\n#plt.show()\n#plt.figure(4)\n#norm = ImageNormalize(stretch=SqrtStretch())\n#plt.imshow(image_sum12, cmap='Greys', origin='lower',norm=norm)\n#plt.plot([tcentroids12[1,0]],[tcentroids12[1,1]],'g+',markersize=10,markeredgewidth=2)\n#plt.colorbar()\n#plt.figure(5)\n#norm = ImageNormalize(stretch=SqrtStretch())\n#plt.imshow(image_med12, cmap='Greys', origin='lower',norm=norm)\n#plt.plot([tcentroids12[1,0]],[tcentroids12[1,1]],'g+',markersize=10,markeredgewidth=2)\n#plt.colorbar()\n#plt.show()\n\n##Part 4 Figures Demonstrating Rotation - Median Images only\n#plt.figure(6)\n#norm = ImageNormalize(stretch=SqrtStretch())\n#plt.imshow(rot_med42b, cmap='Greys', origin='lower',norm=norm)\n#plt.plot([tcentroids42b[1,0]],[tcentroids42b[1,1]],'g+',markersize=10,markeredgewidth=2)\n#plt.colorbar()\n#plt.figure(7)\n#norm = ImageNormalize(stretch=SqrtStretch())\n#plt.imshow(rot_med12, cmap='Greys', origin='lower',norm=norm)\n#plt.plot([tcentroids12[1,0]],[tcentroids12[1,1]],'g+',markersize=10,markeredgewidth=2)\n#plt.colorbar()\n#plt.show()\n\n##Part 5 figures demonstrating brightness profile correction, median images only\n#plt.figure(8)\n#norm = ImageNormalize(stretch=SqrtStretch())\n#plt.imshow(rot_azmed42b, cmap='Greys', origin='lower',norm=norm)\n#plt.plot([tcentroids42b[1,0]],[tcentroids42b[1,1]],'g+',markersize=10,markeredgewidth=2)\n#plt.colorbar()\n#plt.figure(9)\n#norm = ImageNormalize(stretch=SqrtStretch())\n#plt.imshow(rot_azmed12, cmap='Greys', origin='lower',norm=norm)\n#plt.plot([tcentroids12[1,0]],[tcentroids12[1,1]],'g+',markersize=10,markeredgewidth=2)\n#plt.colorbar()\n#plt.show()\n\n#Part 6 Figures demonstrating median image subtraction, median images only\n#plt.figure(10)\n#norm = ImageNormalize(stretch=SqrtStretch())\n#plt.imshow(ADI_med42b, cmap='Greys', origin='lower',norm=norm)\n#plt.plot([tcentroids42b[1,0]],[tcentroids42b[1,1]],'g+',markersize=10,markeredgewidth=2)\n#plt.colorbar()\n#plt.figure(11)\n#norm = ImageNormalize(stretch=SqrtStretch())\n#plt.imshow(ADI_med12, cmap='Greys', origin='lower',norm=norm)\n#plt.plot([tcentroids12[1,0]],[tcentroids12[1,1]],'g+',markersize=10,mew=2)\n#plt.colorbar()\n#plt.show()\n\n#Part 7 Figures demonstrating best-PSF subtraction, median images only\n#plt.figure(12)\n#norm = ImageNormalize(stretch=SqrtStretch())\n#plt.imshow(ADI_psfmed42b, cmap='Greys', origin='lower',norm=norm)\n#plt.plot([tcentroids42b[1,0]],[tcentroids42b[1,1]],'g+',markersize=10,markeredgewidth=2)\n#plt.colorbar()\n#plt.figure(13)\n#norm = ImageNormalize(stretch=SqrtStretch())\n#plt.imshow(ADI_psfmed12, cmap='Greys', origin='lower',norm=norm)\n#plt.plot([tcentroids12[1,0]],[tcentroids12[1,1]],'g+',markersize=10,markeredgewidth=2)\n#plt.colorbar()\n#plt.show()\n","sub_path":"exoimgprocess.py","file_name":"exoimgprocess.py","file_ext":"py","file_size_in_byte":19473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"276183491","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 28 21:09:05 2018\n\n@author: steven\n\ncam_cal: Takes glob of images located in ../camera_cal\nfinds points from chessboard image to calculate the distortion correction\ncoefficients for the camera\n\nmeant for a 9,6 chessboard pattern\n\"\"\"\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport glob\nimport pickle\n\n# Read in and make a list of calibration images\nimages = glob.glob('../camera_cal/calibration*.jpg')\n\n# Arrays to store object points and image points from all the images\n\nobjpoints = [] # 3D points in real world space\nimgpoints = [] # 2D points in image space\n\n# prepare object points\nnx = 9# the number of inside corners in x\nny = 6# the number of inside corners in y\n\n# Prepare object points, like (0,0,0), (1,0,0),(2,0,0) ...,(8,5,0)\nobjp = np.zeros((ny*nx,3), np.float32)\nobjp[:,:2] = np.mgrid[0:nx,0:ny].T.reshape(-1,2) # x, y coordinates\n\n\nfor idx, fname in enumerate(images):\n img = cv2.imread(fname)\n \n # Convert to grayscale\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n \n # Find the chessboard corners\n ret, corners = cv2.findChessboardCorners(gray,(nx, ny), None)\n \n # If found, add object points, image points\n if ret == True:\n print('working on ', fname)\n imgpoints.append(corners)\n objpoints.append(objp)\n \n # Draw and display the corners\n cv2.drawChessboardCorners(img, (nx, ny), corners, ret)\n write_name = '../camera_cal/corners_found/corners_found'+str(idx)+'.jpg'\n cv2.imwrite(write_name,img)\n\n# load image for size reference\nimg = cv2.imread('../camera_cal/calibration1.jpg')\nimg_size = (img.shape[1],img.shape[0])\n\n# find camera cal from cal images objpoint and imgpoints\nret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size,None,None)\n\n# save the camera calibration results for later\ndist_pickle = {}\ndist_pickle[\"mtx\"] = mtx\ndist_pickle[\"dist\"] = dist\npickle.dump(dist_pickle, open('./calibration_pickle.p','wb'))","sub_path":"src/cam_cal.py","file_name":"cam_cal.py","file_ext":"py","file_size_in_byte":2054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"158567581","text":"import time\nfrom hoe.connectSql import outCont\n\n\noutInput = 1\nwhile int(outInput)==1:\n st1 = outCont(0)[0]\n st2 = outCont(1)[0]\n st3 = st1 + st2\n print('\\r总用户数:%d------空闲中:%d位。抢单中:%d位。' % (st3, st1, st2), end=\"\")\n time.sleep(0.1)\n\n","sub_path":"checkDataTest/viewApay.py","file_name":"viewApay.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"501666929","text":"# -*- coding: utf-8 -*-\n\n# This file is part of CRAY ONLINE.\n#\n# CRAY ONLINE is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# CRAY ONLINE is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with CRAY ONLINE; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA\n\nimport wx, sys, os, webbrowser\nfrom wx import richtext\nimport engine, network, dialogs, gameframe, room, updater\nfrom printer import DeckPrinter\n\nID_NEW = 10001\nID_OPEN = 10002\nID_SAVE = 10003\nID_SAVEAS = 10004\nID_PRINT = 10005\nID_CLOSE = 10006\nID_ABOUT = 10007\nID_ADD = 10008\nID_POPUP_REMOVE = 10009\nID_REMOVE = 10010\nID_POPUP_ADD = 10011\nID_CONNECT = 10013\nID_LISTEN = 10014\nID_PLAY = 10015\nID_ADVANCED = 10016\nID_ROOMS = 10017\nID_WEB = 10018\nID_SETTINGS = 10019\nID_UPDATE = 10020\n\n\nclass MainFrame(wx.Frame):\n def __init__(self, engine, *args, **kwargs):\n \n wx.Frame.__init__(self, *args, **kwargs)\n\n self.Centre() # Centro il frame nello screen\n self.Engine = engine # Creo una variabile che punta all'engine\n self.SelectedFromDeck = ''\n self.SelectedFromSide = False\n self.panel = wx.Panel(self) # Creo il Panel che conterrà i controlli ed i sizer\n self.vbox1 = wx.BoxSizer(wx.VERTICAL) # Sizer Verticale n°1\n self.vbox2 = wx.BoxSizer(wx.VERTICAL) # Sizer Verticale n°2\n self.vbox3 = wx.BoxSizer(wx.VERTICAL) # Sizer Verticale n°3\n self.vbox4 = wx.BoxSizer(wx.VERTICAL) # Sizer Verticale n°4\n self.vbox5 = wx.BoxSizer(wx.VERTICAL) # Sizer Verticale n°5\n self.hbox = wx.BoxSizer(wx.HORIZONTAL) # Sizer Orizzontale che contiene quelli verticali\n self.hmbox1 = wx.BoxSizer(wx.HORIZONTAL) # Sizer per la parte centrale\n self.hmbox2 = wx.BoxSizer(wx.HORIZONTAL) # *\n self.hmbox3 = wx.BoxSizer(wx.HORIZONTAL) # *\n self.hmbox4 = wx.BoxSizer(wx.HORIZONTAL) # *\n self.hmbox5 = wx.BoxSizer(wx.HORIZONTAL) # *\n self.hmbox6 = wx.BoxSizer(wx.HORIZONTAL) # *\n self.hmbox7 = wx.BoxSizer(wx.HORIZONTAL) # *\n self.hmbox8 = wx.BoxSizer(wx.HORIZONTAL) # *\n self.hmbox9 = wx.BoxSizer(wx.HORIZONTAL) # *\n self.hmbox10 = wx.BoxSizer(wx.HORIZONTAL) # *\n self.hmbox11 = wx.BoxSizer(wx.VERTICAL) # *\n self.hvbox1 = wx.BoxSizer(wx.HORIZONTAL)\n #self.Bind(wx.EVT_SHOW, self.OnUpdate)\n self.Bind(wx.EVT_CLOSE, self.OnClose)\n # Menu\n self.Menu = wx.MenuBar()\n \n self.CardCount = wx.StaticText(self.panel, -1, 'Cards Total: 0')\n \n # Status Bar\n self.StatusBar = wx.StatusBar(self,-1) \n self.SetStatusBar(self.StatusBar) \n self.StatusBar.SetStatusText(self.Engine.GetNameVersion(), 0) \n # End Status Bar\n\n # CardSearch Control\n self.CardSearchCtrl = wx.TextCtrl(self.panel, -1, \"\") \n self.CardSearchCtrl.Bind(wx.EVT_TEXT, self.OnSearchInput, self.CardSearchCtrl) \n # End CardSearch Control\n\n # CardList Control\n li = self.Engine.GetAllCards() \n self.DatabaseCardCount = len(li)\n self.CardListCtrl = wx.ListCtrl(parent = self.panel, style = wx.LC_REPORT | wx.LC_SINGLE_SEL | wx.LC_NO_HEADER | wx.LC_HRULES ) \n \n self.CardListCtrl.InsertColumn(0, 'Name') \n self.CardListCtrl.InsertColumn(1, 'CardID')\n n = 0 \n for c in li:\n idx = self.CardListCtrl.InsertStringItem(n, c.Name)\n self.CardListCtrl.SetStringItem(idx, 1, c.CardID)\n n += 1\n self.CardCount.SetLabel('Cards Total: ' + str(n))\n self.CardListCtrl.SetColumnWidth(0, 175) \n self.CardListCtrl.SetColumnWidth(1, 0) \n self.CardListCtrl.Bind(wx.EVT_LIST_ITEM_SELECTED, self.OnCardSelected)\n self.CardListCtrl.Bind(wx.EVT_LIST_ITEM_RIGHT_CLICK, self.OnCardListItemRClick)\n self.CardListCtrl.Bind(wx.EVT_LEFT_DCLICK, self.OnAddCard)\n # End CardList Control\n \n\n \n self.MonsterHeaderText = wx.StaticText(self.panel, -1, 'Normal Units: 0')\n self.MonsterListCtrl = wx.ListCtrl(self.panel, -1, style = wx.LC_REPORT | wx.LC_SINGLE_SEL | wx.LC_NO_HEADER | wx.LC_HRULES)\n self.MonsterListCtrl.InsertColumn(0, 'Normal Units')\n self.MonsterListCtrl.SetColumnWidth(0, 175)\n self.MonsterListCtrl.InsertColumn(1, 'CardID')\n self.MonsterListCtrl.SetColumnWidth(1, 0)\n self.MonsterListCtrl.Bind(wx.EVT_LIST_ITEM_SELECTED, self.OnMonsterCardSelected)\n self.MonsterListCtrl.Bind(wx.EVT_LIST_ITEM_RIGHT_CLICK, self.OnMonsterListItemRClick)\n self.MonsterListCtrl.Bind(wx.EVT_LEFT_DCLICK, self.OnRemoveCard)\n \n self.TriggerHeaderText = wx.StaticText(self.panel, -1, 'Trigger Units: 0')\n self.TriggerListCtrl = wx.ListCtrl(self.panel, -1, style = wx.LC_REPORT | wx.LC_SINGLE_SEL | wx.LC_NO_HEADER | wx.LC_HRULES)\n self.TriggerListCtrl.InsertColumn(0, 'Trigger Units')\n self.TriggerListCtrl.SetColumnWidth(0, 175)\n self.TriggerListCtrl.InsertColumn(1, 'CardID')\n self.TriggerListCtrl.SetColumnWidth(1, 0)\n self.TriggerListCtrl.Bind(wx.EVT_LIST_ITEM_SELECTED, self.OnTriggerCardSelected)\n self.TriggerListCtrl.Bind(wx.EVT_LIST_ITEM_RIGHT_CLICK, self.OnTriggerListItemRClick)\n self.TriggerListCtrl.Bind(wx.EVT_LEFT_DCLICK, self.OnRemoveCard)\n \n self.DeckCountText = wx.StaticText(self.panel, -1, 'Deck: 0')\n \n self.G0CountText = wx.StaticText(self.panel, -1, 'G0: 0')\n self.G1CountText = wx.StaticText(self.panel, -1, 'G1: 0')\n self.G2CountText = wx.StaticText(self.panel, -1, 'G2: 0')\n self.G3CountText = wx.StaticText(self.panel, -1, 'G3: 0')\n \n self.HTCountText = wx.StaticText(self.panel, -1, 'HT: 0')\n self.STCountText = wx.StaticText(self.panel, -1, 'ST: 0')\n self.CTCountText = wx.StaticText(self.panel, -1, 'CT: 0')\n self.DTCountText = wx.StaticText(self.panel, -1, 'DT: 0')\n \n #Monsterlist Popup\n self.MonsterListPopupMenu = wx.Menu()\n item = wx.MenuItem(self.MonsterListPopupMenu,ID_POPUP_REMOVE,self.Engine.GetLangString('Remove'))\n item.SetBitmap(self.Engine.GetSkinImage('Totrunk'))\n self.Bind(wx.EVT_MENU, self.OnRemoveCard, item)\n self.MonsterListPopupMenu.AppendItem(item)\n # End Listbox\n\n self.TriggerListPopupMenu = wx.Menu()\n item = wx.MenuItem(self.TriggerListPopupMenu,ID_POPUP_REMOVE,self.Engine.GetLangString('Remove'))\n item.SetBitmap(self.Engine.GetSkinImage('Totrunk'))\n self.Bind(wx.EVT_MENU, self.OnRemoveCard, item)\n self.TriggerListPopupMenu.AppendItem(item)\n \n self.CardNameCtrl = wx.StaticText(self.panel, -1, style=wx.ALIGN_CENTRE)\n self.CardImageCtrl = wx.StaticBitmap(self.panel, -1, size=(165,240))\n self.CardDescriptionCtrl = wx.TextCtrl(self.panel, -1,size=(330,140), style=wx.TE_MULTILINE | wx.TE_READONLY | wx.TE_BESTWRAP)\n # End\n \n self.BuildUI()\n\n # Layout\n \n self.vbox1.Add(self.hvbox1, 0, wx.ALL | wx.EXPAND, 2) # Aggiungo il CardSearchCtrl al vbox1 (control,proprortions,styles,margins)\n self.hvbox1.Add(self.CardSearchCtrl, 1, wx.ALL | wx.EXPAND, 2) # Aggiungo il CardSearchCtrl al vbox1 (control,proprortions,styles,margins)\n self.hvbox1.Add(self.CardReloadCtrl, 0, wx.ALL | wx.EXPAND, 2)\n self.vbox1.Add(self.CardListCtrl, 1, wx.ALL | wx.EXPAND, 0) # Aggiungo il CardListCtrl al vbox1 (control,proprortions,styles,margins)\n self.vbox1.Add(self.CardCount, 0, wx.ALL | wx.EXPAND, 2)\n \n self.vbox2.Add(self.hmbox1, 0, wx.ALL | wx.EXPAND, 2) # Parte Centrale del Layout\n self.vbox2.Add(self.hmbox2, 1, wx.ALL | wx.EXPAND, 2) # *\n self.vbox3.Add(self.hmbox3, 0, wx.ALL | wx.EXPAND, 2) # *\n self.vbox3.Add(self.hmbox4, 1, wx.ALL | wx.EXPAND, 2) # *\n self.vbox4.Add(self.hmbox5, 0, wx.ALL | wx.EXPAND, 2) # *\n self.vbox4.Add(self.hmbox6, 1, wx.ALL | wx.EXPAND, 2) # *\n #self.vbox2.Add(self.hmbox7, 0, wx.ALL | wx.EXPAND, 2) # *\n #self.vbox2.Add(self.hmbox8, 1, wx.ALL | wx.EXPAND, 2) # *\n #self.vbox3.Add(self.hmbox9, 0, wx.ALL | wx.EXPAND, 2) # *\n #self.vbox3.Add(self.hmbox10, 1, wx.ALL | wx.EXPAND, 2) # *\n self.vbox4.Add(self.hmbox11, 1, wx.ALL | wx.EXPAND, 2) # *\n self.vbox4.AddSpacer(25,0) # *\n self.vbox4.AddSpacer(25,0) # *\n self.vbox4.AddSpacer(25,0) # *\n self.hmbox1.Add(self.MonsterHeaderText, 1, wx.ALL | wx.EXPAND, 2) # *\n self.hmbox2.Add(self.MonsterListCtrl, 1, wx.ALL | wx.EXPAND, 2) # *\n self.hmbox3.Add(self.TriggerHeaderText, 1, wx.ALL | wx.EXPAND, 2) # *\n self.hmbox4.Add(self.TriggerListCtrl, 1, wx.ALL | wx.EXPAND, 2) # *\n self.hmbox11.Add(self.DeckCountText, 0, wx.ALL | wx.EXPAND, 2) # *\n self.hmbox11.Add(self.G0CountText, 0, wx.ALL | wx.EXPAND, 2) # *\n self.hmbox11.Add(self.G1CountText, 0, wx.ALL | wx.EXPAND, 2) # *\n self.hmbox11.Add(self.G2CountText, 0, wx.ALL | wx.EXPAND, 2) # *\n self.hmbox11.Add(self.G3CountText, 0, wx.ALL | wx.EXPAND, 2) # *\n \n self.hmbox11.Add(self.HTCountText, 0, wx.ALL | wx.EXPAND, 2) # *\n self.hmbox11.Add(self.STCountText, 0, wx.ALL | wx.EXPAND, 2) # *\n self.hmbox11.Add(self.CTCountText, 0, wx.ALL | wx.EXPAND, 2) # *\n self.hmbox11.Add(self.DTCountText, 0, wx.ALL | wx.EXPAND, 2) # *\n\n self.vbox5.Add(self.CardNameCtrl, 0, wx.ALL | wx.ALIGN_CENTER , 4)\n self.vbox5.Add(self.CardImageCtrl, 0, wx.ALL | wx.ALIGN_CENTER, 4)\n self.vbox5.Add(self.CardDescriptionCtrl, 1, wx.ALL | wx.EXPAND | wx.ALIGN_CENTER, 4)\n \n self.hbox.Add(self.vbox1, 1, wx.EXPAND | wx.ALL, 2) # Aggiungo il VBox1 al HBox (control,proprortions,styles,margins)\n self.hbox.Add(self.vbox2, 1, wx.EXPAND | wx.ALL, 2) # Aggiungo il VBox2 al HBox (control,proprortions,styles,margins)\n self.hbox.Add(self.vbox3, 1, wx.EXPAND | wx.ALL, 2) # Aggiungo il VBox3 al HBox (control,proprortions,styles,margins)\n self.hbox.Add(self.vbox4, 0, wx.EXPAND | wx.ALL, 2) # Aggiungo il VBox3 al HBox (control,proprortions,styles,margins)\n self.hbox.Add(self.vbox5, 0, wx.EXPAND | wx.ALL, 2) # Aggiungo il VBox3 al HBox (control,proprortions,styles,margins)\n self.panel.SetSizer(self.hbox) # Setto il sizer del panel\n self.panel.Layout() # Layout del panel\n \n # End Layout\n \n self.AdvancedSearchFrame = dialogs.AdvancedSearch(self)\n self.CardSearchCtrl.SetFocus()\n\n def BuildUI(self, changes=0):\n if changes:\n self.CardReloadCtrl.Destroy()\n self.GetToolBar().Destroy()\n self.CardListPopupMenu.Destroy()\n self.MonsterListPopupMenu.Destroy()\n \n # Menu\n self.Menu = wx.MenuBar()\n\n self.mFile = wx.Menu()\n self.mGame = wx.Menu()\n self.mTools = wx.Menu()\n self.mHelp = wx.Menu()\n\n # File Menu\n item = wx.MenuItem(self.mFile,ID_NEW,self.Engine.GetLangString('New'))\n item.SetBitmap(self.Engine.GetSkinImage('New'))\n self.Bind(wx.EVT_MENU, self.OnNew, item)\n self.mFile.AppendItem(item)\n\n item = wx.MenuItem(self.mFile,ID_OPEN,self.Engine.GetLangString('Open'))\n item.SetBitmap(self.Engine.GetSkinImage('Open'))\n self.Bind(wx.EVT_MENU, self.OnOpen, item)\n self.mFile.AppendItem(item)\n\n item = wx.MenuItem(self.mFile,ID_SAVE,self.Engine.GetLangString('Save'))\n item.SetBitmap(self.Engine.GetSkinImage('Save'))\n self.Bind(wx.EVT_MENU, self.OnSave, item)\n self.mFile.AppendItem(item)\n\n item = wx.MenuItem(self.mFile,ID_SAVEAS,self.Engine.GetLangString('SaveAs'))\n item.SetBitmap(self.Engine.GetSkinImage('SaveAs'))\n self.Bind(wx.EVT_MENU, self.OnSaveAs, item)\n self.mFile.AppendItem(item)\n \n item = wx.MenuItem(self.mFile,ID_ADVANCED,self.Engine.GetLangString('Advanced Search'))\n item.SetBitmap(self.Engine.GetSkinImage('Find'))\n self.Bind(wx.EVT_MENU, self.OnAdvancedSearchMenu, item)\n self.mFile.AppendItem(item)\n\n item = wx.MenuItem(self.mFile,ID_PRINT,self.Engine.GetLangString('Print'))\n item.SetBitmap(self.Engine.GetSkinImage('Print'))\n self.Bind(wx.EVT_MENU, self.OnPrint, item)\n self.mFile.AppendItem(item)\n\n item = wx.MenuItem(self.mFile,ID_CLOSE,self.Engine.GetLangString('Close'))\n item.SetBitmap(self.Engine.GetSkinImage('Close'))\n self.Bind(wx.EVT_MENU, self.OnMenuClose, item)\n self.mFile.AppendItem(item)\n # End File Menu \n \n # Game Menu\n \n item = self.mGame.Append(ID_CONNECT, text = self.Engine.GetLangString('Connect'))\n self.Bind(wx.EVT_MENU, self.OnConnectMenu, item)\n \n item = self.mGame.Append(ID_LISTEN, text = self.Engine.GetLangString('Listen'))\n self.Bind(wx.EVT_MENU, self.OnListenMenu, item)\n\n item = self.mGame.Append(ID_PLAY, text = self.Engine.GetLangString('Test'))\n self.Bind(wx.EVT_MENU, self.OnPlayMenu, item)\n\n item = wx.MenuItem(self.mGame,-1,self.Engine.GetLangString('Rooms'))\n item.SetBitmap(self.Engine.GetSkinImage('Chat'))\n self.Bind(wx.EVT_MENU, self.OnRoomsMenu, item)\n self.mGame.AppendItem(item)\n # End Game Menu\n\n # Help\n item = wx.MenuItem(self.mHelp,ID_SETTINGS,self.Engine.GetLangString('Preferences'))\n item.SetBitmap(self.Engine.GetSkinImage('Preferences'))\n self.Bind(wx.EVT_MENU, self.OnSettings, item)\n self.mHelp.AppendItem(item)\n\n item = wx.MenuItem(self.mHelp,ID_UPDATE,self.Engine.GetLangString('Update'))\n item.SetBitmap(self.Engine.GetSkinImage('Update'))\n self.Bind(wx.EVT_MENU, self.OnUpdate, item)\n self.mHelp.AppendItem(item)\n #TO DO: Fix images updater\n #item = wx.MenuItem(self.mHelp,-1,self.Engine.GetLangString('Images Update'))\n #item.SetBitmap(self.Engine.GetSkinImage('Update'))\n #self.Bind(wx.EVT_MENU, self.OnImagesUpdate, item)\n #self.mHelp.AppendItem(item)\n \n item = wx.MenuItem(self.mHelp,ID_WEB,self.Engine.GetLangString('CRAY ONLINE.Web'))\n item.SetBitmap(self.Engine.GetSkinImage('Web'))\n self.Bind(wx.EVT_MENU, self.OnWeb, item)\n self.mHelp.AppendItem(item)\n\n\n item = wx.MenuItem(self.mHelp,ID_ABOUT,self.Engine.GetLangString('About'))\n item.SetBitmap(self.Engine.GetSkinImage('About'))\n self.Bind(wx.EVT_MENU, self.OnAbout, item)\n self.mHelp.AppendItem(item)\n # End Help Menu\n\n self.Menu.Append(self.mFile, self.Engine.GetLangString('File'))\n self.Menu.Append(self.mGame, self.Engine.GetLangString('Game'))\n self.Menu.Append(self.mHelp, self.Engine.GetLangString('Help'))\n self.SetMenuBar(self.Menu)\n \n # ToolBar\n self.toolbar = self.CreateToolBar()\n self.toolbar.SetToolBitmapSize((16,16))\n self.toolbar.AddLabelTool(ID_NEW, 'New', self.Engine.GetSkinImage('New'), shortHelp = self.Engine.GetLangString('New'), longHelp = self.Engine.GetLangString('Create a new deck'))\n self.toolbar.AddLabelTool(ID_OPEN, 'Open', self.Engine.GetSkinImage('Open'), shortHelp = self.Engine.GetLangString('Open'), longHelp = self.Engine.GetLangString('Open an existent deck'))\n self.toolbar.AddLabelTool(ID_SAVE, 'Save', self.Engine.GetSkinImage('Save'), shortHelp = self.Engine.GetLangString('Save'), longHelp = self.Engine.GetLangString('Save current deck'))\n self.toolbar.AddLabelTool(ID_SAVEAS, 'Save As...', self.Engine.GetSkinImage('SaveAs'), shortHelp = self.Engine.GetLangString('Save As...'), longHelp = self.Engine.GetLangString('Save current deck to a new path'))\n self.toolbar.AddLabelTool(ID_PRINT, 'Print', self.Engine.GetSkinImage('Print'), shortHelp = self.Engine.GetLangString('Print'), longHelp = self.Engine.GetLangString('Print current deck'))\n self.toolbar.AddSeparator()\n self.toolbar.AddLabelTool(ID_ADVANCED, 'Advanced Search', self.Engine.GetSkinImage('Find'), shortHelp = self.Engine.GetLangString('Advanced Search'), longHelp = self.Engine.GetLangString('Open the Advanced Search Form'))\n self.toolbar.Realize()\n # End ToolBar\n \n # CardReload Control\n self.CardReloadCtrl = wx.BitmapButton(self.panel, -1, self.Engine.GetSkinImage('Reload'))\n self.CardReloadCtrl.SetToolTipString(self.Engine.GetLangString('Reload'))\n self.CardReloadCtrl.Bind(wx.EVT_BUTTON, self.OnCardReload)\n # EndCardRefresh Control\n \n # CardList Popup\n self.CardListPopupMenu = wx.Menu()\n item = wx.MenuItem(self.CardListPopupMenu,ID_POPUP_ADD,self.Engine.GetLangString('Add'))\n item.SetBitmap(self.Engine.GetSkinImage('Todeck'))\n self.Bind(wx.EVT_MENU, self.OnAddCard, item)\n self.CardListPopupMenu.AppendItem(item)\n \n if changes:\n self.hvbox1.Add(self.CardReloadCtrl, 0, wx.ALL | wx.EXPAND, 2)\n\n def RefreshCardList(self):\n self.MonsterListCtrl.DeleteAllItems()\n self.TriggerListCtrl.DeleteAllItems()\n mo = self.Engine.Deck.GetMonsters()\n mo.sort(lambda x, y: cmp(x.Name, y.Name))\n for c in mo:\n idx = self.MonsterListCtrl.InsertStringItem(self.MonsterListCtrl.GetItemCount(), c.Name)\n self.MonsterListCtrl.SetStringItem(idx, 1, c.CardID)\n sp = self.Engine.Deck.GetTrigger()\n sp.sort(lambda x, y: cmp(x.Name, y.Name))\n for c in sp:\n idx = self.TriggerListCtrl.InsertStringItem(self.TriggerListCtrl.GetItemCount(), c.Name)\n self.TriggerListCtrl.SetStringItem(idx, 1, c.CardID)\n self.MonsterListCtrl.SetColumnWidth(0, 200)\n self.TriggerListCtrl.SetColumnWidth(0, 200)\n self.MonsterHeaderText.SetLabel('Normal Units: ' + str(self.MonsterListCtrl.GetItemCount()))\n self.TriggerHeaderText.SetLabel('Trigger Units: ' + str(self.TriggerListCtrl.GetItemCount()))\n self.DeckCountText.SetLabel('Deck: %s' % str(self.MonsterListCtrl.GetItemCount()+self.TriggerListCtrl.GetItemCount()))\n g0 = self.Engine.Deck.GetG0()\n self.G0CountText.SetLabel('G0: %s' % str(g0))\n g1 = self.Engine.Deck.GetG1()\n self.G1CountText.SetLabel('G1: %s' % str(g1))\n g2 = self.Engine.Deck.GetG2()\n self.G2CountText.SetLabel('G2: %s' % str(g2))\n g3 = self.Engine.Deck.GetG3()\n self.G3CountText.SetLabel('G3: %s' % str(g3))\n \n ht = self.Engine.Deck.CheckHT2()\n self.HTCountText.SetLabel('HT: %s' % str(ht))\n st = self.Engine.Deck.CheckST()\n self.STCountText.SetLabel('ST: %s' % str(st))\n ct = self.Engine.Deck.CheckCT()\n self.CTCountText.SetLabel('CT: %s' % str(ct))\n dt = self.Engine.Deck.CheckDT()\n self.DTCountText.SetLabel('DT: %s' % str(dt))\n\n def ShowDialog(self, message, title, style, parent=None):\n if parent == None:\n parent = self\n dialog = wx.MessageDialog(parent,message,title,style)\n return dialog.ShowModal()\n \n def OnConnectMenu(self, event):\n tu = self.TriggerListCtrl.GetItemCount()\n nu = self.MonsterListCtrl.GetItemCount()\n if tu !=16 or nu !=34:\n self.ShowDialog('Your deck is not ready!', '', wx.OK | wx.ICON_ERROR, self)\n self.OkButton.Enable()\n else:\n self.Engine.GameFrame = gameframe.GameFrame(self.Engine)\n self.Engine.Game = self.Engine.GameFrame.Game\n self.Engine.Network = network.Network(self.Engine.Game)\n dialog = dialogs.ConnectionDialog(self)\n dialog.ShowModal()\n\n \n def OnListenMenu(self, event):\n tu = self.TriggerListCtrl.GetItemCount()\n nu = self.MonsterListCtrl.GetItemCount()\n if tu !=16 or nu !=34:\n self.ShowDialog('Your deck is not ready!', '', wx.OK | wx.ICON_ERROR, self)\n self.OkButton.Enable()\n else:\n self.Engine.GameFrame = gameframe.GameFrame(self.Engine)\n self.Engine.Game = self.Engine.GameFrame.Game\n self.Engine.Network = network.Network(self.Engine.Game)\n dialog = dialogs.ListenDialog(self)\n dialog.ShowModal()\n\n def OnPlayMenu(self, event):\n self.Engine.GameFrame = gameframe.GameFrame(self.Engine)\n self.Engine.Game = self.Engine.GameFrame.Game\n self.Engine.Network = network.Network(self.Engine.Game)\n self.Engine.Game._nick = self.Engine.GetSetting('Nick')\n self.Engine.Game.Shuffle()\n self.Engine.GameFrame.Show()\n\n def OnRoomsMenu(self, event):\n tu = self.TriggerListCtrl.GetItemCount()\n nu = self.MonsterListCtrl.GetItemCount()\n if tu !=16 or nu !=34:\n self.ShowDialog('Your deck is not ready!', '', wx.OK | wx.ICON_ERROR, self)\n self.OkButton.Enable()\n else:\n self.Engine.GameFrame = gameframe.GameFrame(self.Engine)\n self.Engine.Game = self.Engine.GameFrame.Game\n self.Engine.Network = network.Network(self.Engine.Game)\n dialog = room.Login(self)\n dialog.ShowModal()\n try: dialog.EndTimer()\n except: pass \n \n def OnAdvancedSearchMenu(self, event):\n self.AdvancedSearchFrame.Show()\n \n def OnCardListItemRClick(self,event):\n self.panel.PopupMenu(self.CardListPopupMenu)\n \n def OnMonsterListItemRClick(self,event):\n self.panel.PopupMenu(self.MonsterListPopupMenu)\n \n def OnTriggerListItemRClick(self,event):\n self.panel.PopupMenu(self.TriggerListPopupMenu)\n\n def OnSearchInput(self, event):\n input = self.CardSearchCtrl.GetValue()\n if len(input) < 3: #Optimal choice is 3 letters. Less can slow down pc.\n return\n li = self.Engine.FindCardByNameLike(input)\n self.CardListCtrl.DeleteAllItems()\n n=0\n for c in li:\n idx = self.CardListCtrl.InsertStringItem(n,c.Name)\n self.CardListCtrl.SetStringItem(idx, 1, c.CardID)\n n+=1\n \n def OnCardReload(self, event):\n li = self.Engine.GetAllCards()\n self.CardListCtrl.DeleteAllItems()\n n=0\n for c in li:\n idx = self.CardListCtrl.InsertStringItem(n,c.Name)\n self.CardListCtrl.SetStringItem(idx, 1, c.CardID)\n n+=1\n self.CardListCtrl.SetColumnWidth(0, 200)\n self.CardSearchCtrl.SetValue('')\n\n def OnCardSelected(self, event):\n cardID = self.CardListCtrl.GetItem(event.m_itemIndex, 1).GetText()\n card = self.Engine.FindCardByCardID(cardID)\n self.SelectedFromDeck = cardID\n self.ShowCardInfo(card)\n\n def OnMonsterCardSelected(self, event):\n cardID = self.MonsterListCtrl.GetItem(event.m_itemIndex, 1).GetText()\n card = self.Engine.FindCardByCardID(cardID)\n self.SelectedFromDeck = cardID\n self.ShowCardInfo(card)\n \n def OnTriggerCardSelected(self, event):\n cardID = self.TriggerListCtrl.GetItem(event.m_itemIndex, 1).GetText()\n card = self.Engine.FindCardByCardID(cardID)\n self.SelectedFromDeck = cardID\n self.ShowCardInfo(card)\n\n # TO DO: \n def ShowCardInfo(self,card):\n self.CardNameCtrl.SetLabel(card.Name)\n self.CardImageCtrl.SetBitmap(self.Engine.GetBigCardImage(card))\n desc = card.Class + '/'\n desc += card.Nation + '/'\n desc += card.Clan + '/'\n desc += card.Race + '\\n'\n desc += 'GRADE:' + card.Grade + '\\n'\n if card.Skill != '':\n desc += 'SKILL: ' + card.Skill + '\\n'\n if card.Class == 'Trigger Unit':\n desc += 'TRIGGER: ' + card.Trigger + '\\n'\n desc += 'POWER:' + card.Power + ' CRITICAL:' + card.Critical +' SHIELD:' + card.Shield + '\\n'\n if card.Effect != '':\n desc += '\\n' + card.Effect + '\\n'\n if card.Illustrator != '?' and card.Illustrator != '':\n desc += '\\n' + 'Illustrator: ' + card.Illustrator\n if card.Text != '?' and card.Text != '':\n desc += '\\n' + 'Card text: ' + card.Text + '\\n'\n desc += '\\n'+card.CardID + '\\n'\n self.CardDescriptionCtrl.SetValue(desc)\n self.panel.SendSizeEvent()\n\n def OnAddCard(self, event):\n if self.SelectedFromDeck == '':\n return\n c = self.Engine.FindCardByCardID(self.SelectedFromDeck)\n cc = self.Engine.Deck.CheckCard(self.SelectedFromDeck)\n ht = self.Engine.Deck.CheckHT(self.SelectedFromDeck)\n #ct = self.Engine.Deck.CheckTrigers(self.SelectedFromDeck)\n if cc < 4:\n if c.Trigger == '+5000 Power, Heal':\n if ht < 4:\n self.Engine.Deck.Add(c)\n else:\n self.Engine.Deck.Add(c)\n self.RefreshCardList()\n\n def OnRemoveCard(self, event):\n if self.SelectedFromDeck == '':\n return\n self.Engine.Deck.RemoveCardID(self.SelectedFromDeck)\n self.SelectedFromDeck = ''\n self.RefreshCardList()\n\n def OnNew(self, event):\n self.Engine.NewDeck()\n self.SelectedFromDeck = ''\n self.RefreshCardList()\n\n def OnOpen(self, event=None, path=''):\n if path == '':\n dialog = wx.FileDialog(self, self.Engine.GetLangString('Open...'), \"\", \"\", \"XML Deck files (*.xml)|*.xml\", wx.FD_OPEN)\n dialog.SetPath(os.path.join(self.Engine.DecksDirectory,'deck.xml'))\n if dialog.ShowModal() != wx.ID_OK:\n dialog.Destroy()\n return\n else:\n name = dialog.GetFilename()\n dir = dialog.GetDirectory()\n path = os.path.join(dir,name)\n dialog.Destroy()\n self.Engine.OpenDeck(path)\n self.Engine.DeckPath = path\n self.RefreshCardList()\n\n def OnSave(self, event):\n if self.Engine.DeckPath != '':\n self.Engine.SaveDeck(self.Engine.Deck,self.Engine.DeckPath)\n else:\n self.OnSaveAs(event)\n\n def OnSaveAs(self, event):\n dialog = wx.FileDialog(self, self.Engine.GetLangString(\"Save As...\"), \"\", \"\", \"XML Deck files (*.xml)|*.xml\", wx.FD_SAVE)\n dialog.SetPath(os.path.join(self.Engine.DecksDirectory,'deck.xml'))\n if dialog.ShowModal() == wx.ID_OK:\n name = dialog.GetFilename()\n dir = dialog.GetDirectory()\n path = os.path.join(dir,name)\n if not path.endswith('.xml'):\n path += '.xml'\n self.Engine.SaveDeck(self.Engine.Deck,path)\n self.Engine.DeckPath = path\n dialog.Destroy()\n\n def OnPrint(self, event):\n self.printData = wx.PrintData()\n self.printData.SetPaperId(wx.PAPER_A4)\n self.printData.SetPrintMode(wx.PRINT_MODE_PRINTER)\n pdd = wx.PrintDialogData(self.printData)\n pdd.SetToPage(1)\n printer = wx.Printer(pdd)\n printout = DeckPrinter(self.Engine.Deck)\n if not printer.Print(self, printout, True):\n pass\n else:\n self.printData = wx.PrintData( printer.GetPrintDialogData().GetPrintData() )\n printout.Destroy()\n\n def OnMenuClose(self, event):\n self.Close()\n\n def OnClose(self, event):\n if self.ShowDialog(self.Engine.GetLangString('Are you sure to quit?'), '?', wx.YES_NO | wx.NO_DEFAULT | wx.ICON_QUESTION) == wx.ID_YES:\n try: self.Engine.GameFrame.Close()\n except: pass\n self.Engine.SaveSettings({'LastDeckPath':self.Engine.DeckPath})\n sys.exit()\n\n def OnWeb(self, event=None):\n try:\n webbrowser.open_new_tab('http://vanguard.jproject.xz.lt/')\n except: pass\n\n def OnSettings(self, event=None):\n dialogs.SettingsDialog(self).ShowModal()\n self.BuildUI(1)\n\n def OnImportDeck(self, event=None):\n dialogs.ImportDeckDialog(self).ShowModal()\n\n # Update Check\n def OnUpdate(self, event=None):\n ud = updater.UpdateDialog(self, self.Engine)\n toupdate = updater.CheckUpdate(self.Engine.BaseDirectory)\n ''''if len(toupdate) > 0:\n if self.ShowDialog(self.Engine.GetLangString('An update is avaible, would you like to update now?'),'',wx.YES_NO | wx.ICON_QUESTION | wx.YES_DEFAULT) == wx.ID_YES:\n updater.Update(self.Engine.BaseDirectory,toupdate)\n self.ShowDialog(self.Engine.GetLangString('Now you can restart the application.'),'',wx.OK | wx.ICON_INFORMATION)\n sys.exit()\n else:\n self.ShowDialog(self.Engine.GetLangString('No update needed.'), '', wx.OK | wx.ICON_INFORMATION)'''\n\n def OnImagesUpdate(self, event=None):\n '''Images Updating'''\n updater.ImagesUpdateDialog(self, self.Engine)\n\n # Mostra la finestra About\n def OnAbout(self, event = None):\n info = wx.AboutDialogInfo()\n info.SetName(self.Engine.GetName())\n info.SetWebSite('http://code.google.com/p/cardfight-vanguard-vds/')\n info.SetWebSite('http://vanguard.jproject.xz.lt')\n info.SetVersion(self.Engine.GetVersion())\n info.SetDescription('CRAY ONLINE is a multi-platform Cardfight!! Vanguard Dueling and Deck Building application written in Python and using wxPython as GUI Library.')\n info.SetLicense(\"\"\"CRAY ONLINE is free software; you can redistribute it and/or modify it \nunder the terms of the GNU General Public License as published by the Free Software Foundation; \neither version 2 of the License, or (at your option) any later version.\n\nCRAY ONLINE is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; \nwithout even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. \nSee the GNU General Public License for more details. You should have received a copy of \nthe GNU General Public License along with J_PROJECT; if not, write to \nthe Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\"\"\")\n info.AddDeveloper(\"J_BYYX (code, database, images), FelGrand (database)\")\n info.AddArtist(\"J_BYYX, Jovan Dmitrović\")\n wx.AboutBox(info)","sub_path":"mainform.py","file_name":"mainform.py","file_ext":"py","file_size_in_byte":30740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"276116186","text":"from PyQt5.QtCore import pyqtSignal, Qt, QRegExp\nfrom PyQt5.QtGui import QFontDatabase, QSyntaxHighlighter, QTextCharFormat, QFont, QColor\nfrom PyQt5.QtWidgets import QMessageBox, QDialog\n\nfrom gui.code_edit import Ui_CodeEditDialog\nfrom src.gui.file_transfer_dialog import FileTransferDialog\nfrom src.utility.settings import Settings\n\n\nclass CodeEditDialog(QDialog, Ui_CodeEditDialog):\n mcu_file_saved = pyqtSignal()\n\n def __init__(self, parent, connection):\n super(CodeEditDialog, self).__init__(None, Qt.WindowCloseButtonHint)\n self.setupUi(self)\n\n geometry = Settings().retrieve_geometry(\"editor\")\n if geometry:\n self.restoreGeometry(geometry)\n\n self._connection = connection\n\n self.saveLocalButton.clicked.connect(self._save_local)\n self.saveMcuButton.clicked.connect(self._save_to_mcu)\n #self.runButton.clicked.connect(self._run_file)\n self.runButton.hide()\n\n fixed_font = QFontDatabase.systemFont(QFontDatabase.FixedFont)\n self.codeEdit.setFont(fixed_font)\n\n self.highlighter = Highlighter(self.codeEdit.document())\n\n if connection and connection.is_connected():\n self.connected(connection)\n else:\n self.disconnected()\n\n def closeEvent(self, event):\n Settings().update_geometry(\"editor\", self.saveGeometry())\n super(CodeEditDialog, self).closeEvent(event)\n\n def disconnected(self):\n self._connection = None\n self.saveMcuButton.setEnabled(False)\n\n def connected(self, connection):\n self._connection = connection\n self.saveMcuButton.setEnabled(True)\n\n def _save_local(self):\n path = self.localPathEdit.text()\n if not path:\n QMessageBox.warning(self, \"Invalid path\", \"Enter correct path for local file.\")\n return\n\n try:\n with open(path, \"w\") as file:\n file.write(self.codeEdit.toPlainText())\n except IOError:\n QMessageBox.critical(self, \"Save operation failed\", \"Couldn't save the file. Check path and permissions.\")\n\n def _save_to_mcu(self):\n name = self.remotePathEdit.text()\n if not name:\n QMessageBox.warning(self, \"Invalid name\", \"Enter correct name for remote file.\")\n return\n\n content = self.codeEdit.toPlainText()\n if not content:\n QMessageBox.warning(self, \"Empty file\", \"Can't write empty file.\")\n return\n\n progress_dlg = FileTransferDialog(FileTransferDialog.UPLOAD)\n progress_dlg.finished.connect(self.mcu_file_saved.emit)\n progress_dlg.show()\n self._connection.write_file(name, content, progress_dlg.transfer)\n\n def set_code(self, local_path, remote_path, code):\n self.codeEdit.clear()\n self.codeEdit.setPlainText(code)\n self.localPathEdit.setText(local_path if local_path else \"\")\n self.remotePathEdit.setText(remote_path if remote_path else \"\")\n\n\ndef format(color, style=''):\n \"\"\"Return a QTextCharFormat with the given attributes.\n \"\"\"\n _color = QColor()\n _color.setNamedColor(color)\n\n _format = QTextCharFormat()\n _format.setForeground(_color)\n if 'bold' in style:\n _format.setFontWeight(QFont.Bold)\n if 'italic' in style:\n _format.setFontItalic(True)\n\n return _format\n\n\n# Syntax styles that can be shared by all languages\nSTYLES = {\n 'keyword': format('blue'),\n 'operator': format('red'),\n 'brace': format('darkGray'),\n 'defclass': format('black', 'bold'),\n 'string': format('magenta'),\n 'string2': format('darkMagenta'),\n 'comment': format('darkGreen', 'italic'),\n 'self': format('darkRed', 'italic'),\n 'numbers': format('brown'),\n}\n\nclass Highlighter(QSyntaxHighlighter):\n\n # Python keywords\n keywords = [\n 'and', 'assert', 'break', 'class', 'continue', 'def',\n 'del', 'elif', 'else', 'except', 'exec', 'finally',\n 'for', 'from', 'global', 'if', 'import', 'in',\n 'is', 'lambda', 'not', 'or', 'pass', 'print',\n 'raise', 'return', 'try', 'while', 'yield',\n 'None', 'True', 'False']\n\n # Python operators\n operators = [\n '=',\n # Comparison\n '==', '!=', '<', '<=', '>', '>=',\n # Arithmetic\n '\\+', '-', '\\*', '/', '//', '\\%', '\\*\\*',\n # In-place\n '\\+=', '-=', '\\*=', '/=', '\\%=',\n # Bitwise\n '\\^', '\\|', '\\&', '\\~', '>>', '<<']\n\n # Python braces\n braces = ['\\{', '\\}', '\\(', '\\)', '\\[', '\\]']\n\n def __init__(self, parent=None):\n super(Highlighter, self).__init__(parent)\n\n keywordFormat = QTextCharFormat()\n keywordFormat.setForeground(Qt.darkBlue)\n keywordFormat.setFontWeight(QFont.Bold)\n\n\n\n keywordPatterns = [\"\\\\bchar\\\\b\", \"\\\\bclass\\\\b\", \"\\\\bconst\\\\b\",\n \"\\\\bdouble\\\\b\", \"\\\\benum\\\\b\", \"\\\\bexplicit\\\\b\", \"\\\\bfriend\\\\b\",\n \"\\\\binline\\\\b\", \"\\\\bint\\\\b\", \"\\\\blong\\\\b\", \"\\\\bnamespace\\\\b\",\n \"\\\\boperator\\\\b\", \"\\\\bprivate\\\\b\", \"\\\\bprotected\\\\b\",\n \"\\\\bpublic\\\\b\", \"\\\\bshort\\\\b\", \"\\\\bsignals\\\\b\", \"\\\\bsigned\\\\b\",\n \"\\\\bslots\\\\b\", \"\\\\bstatic\\\\b\", \"\\\\bstruct\\\\b\",\n \"\\\\btemplate\\\\b\", \"\\\\btypedef\\\\b\", \"\\\\btypename\\\\b\",\n \"\\\\bunion\\\\b\", \"\\\\bunsigned\\\\b\", \"\\\\bvirtual\\\\b\", \"\\\\bvoid\\\\b\",\n \"\\\\bvolatile\\\\b\", \"\\\\bfrom\\\\b\", \"\\\\bimport\\\\b\"]\n\n self.highlightingRules = [] #[(QRegExp(pattern), 0, keywordFormat) for pattern in keywordPatterns]\n\n #classFormat = QTextCharFormat()\n #classFormat.setFontWeight(QFont.Bold)\n #classFormat.setForeground(Qt.darkMagenta)\n #self.highlightingRules.append((QRegExp(\"\\\\bQ[A-Za-z]+\\\\b\"), classFormat))\n\n #singleLineCommentFormat = QTextCharFormat()\n #singleLineCommentFormat.setForeground(Qt.red)\n #self.highlightingRules.append((QRegExp(\"//[^\\n]*\"), singleLineCommentFormat))\n\n #self.multiLineCommentFormat = QTextCharFormat()\n #self.multiLineCommentFormat.setForeground(Qt.red)\n\n #quotationFormat = QTextCharFormat()\n #quotationFormat.setForeground(Qt.darkGreen)\n #self.highlightingRules.append((QRegExp(\"\\\".*\\\"\"), quotationFormat))\n\n #functionFormat = QTextCharFormat()\n #functionFormat.setFontItalic(True)\n #functionFormat.setForeground(Qt.blue)\n #self.highlightingRules.append((QRegExp(\"\\\\b[A-Za-z0-9_]+(?=\\\\()\"), functionFormat))\n\n self.commentStartExpression = QRegExp(\"/\\\\*\")\n self.commentEndExpression = QRegExp(\"\\\\*/\")\n\n# self.highlightingRules = [QRegExp(\"'''\"), STYLES['string2']]\n# self.highlightingRules.append((QRegExp(\"'''\"), STYLES['string2']))\n# self.highlightingRules.append((QRegExp('\"\"\"'), STYLES['string2']))\n\n rules = []\n # Keyword, operator, and brace rules\n\n rules += [(r'\\b%s\\b' % w, STYLES['keyword'])\n for w in self.keywords]\n\n rules += [(r'%s' % o, STYLES['operator'])\n for o in self.operators]\n\n rules += [(r'%s' % b, STYLES['brace'])\n for b in self.braces]\n\n # All other rules\n\n rules += [\n # 'self'\n (r'\\bself\\b', STYLES['self']),\n # Double-quoted string, possibly containing escape sequences\n (r'\"[^\"\\\\]*(\\\\.[^\"\\\\]*)*\"', STYLES['string']),\n # Single-quoted string, possibly containing escape sequences\n (r\"'[^'\\\\]*(\\\\.[^'\\\\]*)*'\", STYLES['string']),\n # 'def' followed by an identifier\n (r'\\bdef\\b\\s*(\\w+)', STYLES['defclass']),\n # 'class' followed by an identifier\n (r'\\bclass\\b\\s*(\\w+)', STYLES['defclass']),\n # From '#' until a newline\n (r'#[^\\n]*', STYLES['comment']),\n # Numeric literals\n (r'\\b[+-]?[0-9]+[lL]?\\b', STYLES['numbers']),\n (r'\\b[+-]?0[xX][0-9A-Fa-f]+[lL]?\\b', STYLES['numbers']),\n (r'\\b[+-]?[0-9]+(?:\\.[0-9]+)?(?:[eE][+-]?[0-9]+)?\\b', STYLES['numbers']),\n ]\n\n rules += [(\"'''\", STYLES['string2'])]\n rules += [('\"\"\"', STYLES['string2'])]\n\n self.highlightingRules = [(QRegExp(pat), fmt)\n for (pat, fmt) in rules]\n\n def highlightBlock(self, text):\n for pattern, format in self.highlightingRules:\n expression = QRegExp(pattern)\n index = expression.indexIn(text)\n while index >= 0:\n length = expression.matchedLength()\n self.setFormat(index, length, format)\n index = expression.indexIn(text, index + length)\n\n self.setCurrentBlockState(0)\n\n startIndex = 0\n if self.previousBlockState() != 1:\n startIndex = self.commentStartExpression.indexIn(text)\n\n while startIndex >= 0:\n endIndex = self.commentEndExpression.indexIn(text, startIndex)\n\n if endIndex == -1:\n self.setCurrentBlockState(1)\n commentLength = len(text) - startIndex\n else:\n commentLength = endIndex - startIndex + self.commentEndExpression.matchedLength()\n\n self.setFormat(startIndex, commentLength, self.multiLineCommentFormat)\n startIndex = self.commentStartExpression.indexIn(text, startIndex + commentLength);","sub_path":"src/gui/code_edit_dialog.py","file_name":"code_edit_dialog.py","file_ext":"py","file_size_in_byte":9310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"3173459","text":"import datetime\nimport itertools\nimport time\nimport logging\nimport enum\nfrom typing import Callable, Iterable, Collection, Any\n\nfrom . import timing\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\n\ndef try_loop(\n *functions_to_run: Collection[Callable[[Any], None]],\n wait_after_success: datetime.timedelta = datetime.timedelta(hours = 1),\n wait_after_failure: datetime.timedelta = datetime.timedelta(minutes = 1),\n begin_text: str = 'Beginning loop',\n complete_text: str = 'Completed loop',\n):\n \"\"\"\n Run the given functions in a loop.\n\n Parameters\n ----------\n functions_to_run\n Call these functions in order during each loop\n wait_after_success\n How long to wait after a loop completes before trying again if the loop succeeds\n wait_after_failure\n How long to wait after a loop fails (i.e., raises an exception) before trying again\n begin_text\n A string to print at the beginning of the loop\n complete_text\n A string to print at the end of the loop\n \"\"\"\n while True:\n logger.info(begin_text)\n\n with timing.BlockTimer() as timer:\n failed = False\n for f in functions_to_run:\n try:\n f()\n except Exception as e:\n logger.exception(f'Exception encountered while executing loop function: {f}')\n failed = True\n\n logger.info(f'{complete_text}. Elapsed time: {timer.wall_time_elapsed}')\n\n if failed:\n s, wait = 'failed', wait_after_failure\n else:\n s, wait = 'succeeded', wait_after_success\n\n logger.info(f'Loop cycle {s}, next cycle in {wait.total_seconds()} seconds')\n time.sleep(wait.total_seconds())\n\n\ndef grouper(iterable: Iterable, n: int, fill_value = None) -> Iterable:\n \"\"\"\n\n Parameters\n ----------\n iterable\n An iterable to chunk\n n\n The size of the chunks\n fill_value\n A value to fill with when iterable has run out of values, but the last chunk isn't full\n\n Returns\n -------\n Iterable\n An iterator over length ``n`` groups of the input iterable\n \"\"\"\n args = [iter(iterable)] * n\n return itertools.zip_longest(*args, fillvalue = fill_value)\n\n\nclass StrEnum(str, enum.Enum):\n \"\"\"An :class:`enum.Enum` whose members are also strings.\"\"\"\n\n def __repr__(self):\n return f'{self.__class__.__name__}.{self.value.upper()}'\n\n def __str__(self):\n return self.value\n","sub_path":"src/simulacra/utils/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":2529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"328211467","text":"from django.db import models\n# from enum import Enum\nfrom django.contrib.auth.models import User\nfrom django.utils.translation import gettext as _\nfrom model_utils import Choices\n\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\n\n\n# class State(models.Model):\n# state = models.CharField(max_length=20)\n\n# def __str__(self):\n# return self.state\n\n# class Profile(AbstractUser):\n# address = models.CharField(max_length=100)\n# city = models.CharField(max_length=100)\n# state = models.ForeignKey(State, on_delete=models.CASCADE, related_name=\"customer\")\n# zipcode = models.CharField(max_length=10, blank = True)\n\nclass Profile(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n address = models.CharField(max_length=100)\n city = models.CharField(max_length=100)\n # state = models.CharField(max_length=40)\n # state = models.ForeignKey(State, on_delete=models.CASCADE, related_name=\"customer\")\n zipcode = models.CharField(max_length=10, blank = True)\n STATE = Choices(('AL', 'Alabama'), \n ('AK', 'Alaska'),\n ('AZ', 'Arizona'), \n ('AR', 'Arkansas'), \n ('CA', 'California'),\n ('CO', 'Colorado'), \n ('CT', 'Connecticut'),\n ('DE', 'Delaware'),\n ('DC', 'District of Columbia'), \n ('FL', 'Florida'), \n ('GA', 'Georgia'), \n ('HI', 'Hawaii'), \n ('ID', 'Idaho'), \n ('IL', 'Illinois'), \n ('IN', 'Indiana'), \n ('IA', 'Iowa'), \n ('KS', 'Kansas'), \n ('KY', 'Kentucky'), \n ('LA', 'Louisiana'), \n ('ME', 'Maine'), \n ('MD', 'Maryland'), \n ('MA', 'Massachusetts'), \n ('MI', 'Michigan'), \n ('MN', 'Minnesota'), \n ('MS', 'Mississippi'), \n ('MO', 'Missouri'), \n ('MT', 'Montana'), \n ('NE', 'Nebraska'), \n ('NV', 'Nevada'), \n ('NH', 'New Hampshire'), \n ('NJ', 'New Jersey'), \n ('NM', 'New Mexico'), \n ('NY', 'New York'), \n ('NC', 'North Carolina'), \n ('ND', 'North Dakota'), \n ('OH', 'Ohio'), \n ('OK', 'Oklahoma'), \n ('OR', 'Oregon'), \n ('PA', 'Pennsylvania'), \n ('RI', 'Rhode Island'), \n ('SC', 'South Carolina'), \n ('SD', 'South Dakota'), \n ('TN', 'Tennessee'), \n ('TX', 'Texas'), \n ('UT', 'Utah'), \n ('VT', 'Vermont'), \n ('VA', 'Virginia'), \n ('WA', 'Washington'), \n ('WV', 'West Virginia'), \n ('WI', 'Wisconsin'), \n ('WY', 'Wyoming'))\n state = models.CharField(choices=STATE, max_length=30)\n \n \n def __str__(self):\n return self.user.first_name\n\n@receiver(post_save, sender=User)\ndef update_user_profile(sender, instance, created, **kwargs):\n if created:\n Profile.objects.create(user=instance)\n instance.profile.save()\n\nclass Product(models.Model):\n # Profile\n profile = models.ForeignKey(Profile, on_delete=models.CASCADE, related_name=\"product\")\n product_name = models.CharField(max_length = 100)\n description = models.TextField()\n qty = models.PositiveSmallIntegerField()\n price = models.DecimalField(max_digits=6, decimal_places=2)\n image = models.ImageField()\n CONDITION = Choices(('Brand new', _('Brand new')), ('Used', _('Used')), ('Like new', _('Like new')))\n # condition = models.CharField(max_length=10, choices=[(tag, tag.value) for tag in ConditionChoice])\n condition = models.CharField(choices=CONDITION, default=CONDITION['Brand new'], max_length=10)\n CATEGORY = Choices(\n ('clothes', _('Clothes')),\n ('shoes', _('Shoes')),\n ('handbags', _('Handbags')),\n ('baby_stuff', _('Baby Stuff')),\n ('household', _('Household')),\n ('electronics', _('Electronics')),\n ('furniture', _('Furniture')),\n ('misc', _('Miscellaneous'))\n )\n category = models.CharField(choices=CATEGORY, max_length=11)\n STATUS = Choices(\n ('sold', _('Sold')),\n ('avaialable', _('Available'))\n )\n sellingStatus = models.CharField(choices=STATUS, max_length=10)\n \n\n def __str__(self):\n return self.product_name\n\n\n\nclass Message(models.Model):\n profile = models.ForeignKey(Profile, on_delete=models.CASCADE, related_name=\"message\")\n content = models.TextField()\n\n def __str__(self):\n return self.content\n\n \n \n# insert into sellnbuy_profile(id, address, city, zipcode, state, user_id)\n# VALUES(1, \"1234 14th street\", \"Gaithersburg\", \"20889\", \"Maryland\", 2);\n\n","sub_path":"sellnbuy/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"444802205","text":"import torch\nfrom models import Actor, Critic\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\n\nclass TD3(object):\n \"\"\" TD3 plus the Ensemble of Critics as an agent object\n to act and update the networkweights, save and laod the weights\n \"\"\"\n def __init__(self, state_dim, action_dim, max_action, args):\n self.actor = Actor(state_dim, action_dim, max_action).to(args.device)\n self.actor_target = Actor(state_dim, action_dim, max_action).to(args.device)\n self.actor_target.load_state_dict(self.actor.state_dict())\n self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), args.lr_actor)\n self.critic = Critic(state_dim, action_dim).to(args.device)\n self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), args.lr_critic)\n self.target_critic = Critic(state_dim, action_dim).to(args.device)\n self.target_critic.load_state_dict(self.target_critic.state_dict())\n self.max_action = max_action\n self.batch_size = args.batch_size\n self.discount = args.discount\n self.tau = args.tau \n self.policy_noise = args.policy_noise\n self.noise_clip = args.noise_clip\n self.policy_freq = args.policy_freq\n self.device = args.device\n self.step = 0\n self.beta = 0\n \n def select_action(self, state):\n state = torch.Tensor(state.reshape(1, -1)).to(self.device)\n return self.actor(state).cpu().data.numpy().flatten()\n \n def compute_beta(self, replay_buffer):\n batch_states, batch_next_states, batch_actions, batch_rewards, batch_dones = replay_buffer.get_last_k_trajectories()\n #state = torch.Tensor(batch_states).to(self.device)\n #action = torch.Tensor(batch_actions).to(self.device)\n store_batch = []\n # create a R_i for the k returns from the buffer\n i = 0\n for idx in range(len(batch_states)):\n if batch_dones[idx][0] != 0:\n i = 0\n print(\"done \", batch_dones[idx])\n R_i = self.discount**i * batch_rewards[idx][0]\n store_batch.append((batch_states[idx], batch_actions[idx], R_i))\n i += 1\n #print(store_batch)\n delta = 0\n for b in store_batch:\n s ,a ,r = b[0], b[1], b[2]\n a = torch.Tensor(a).to(self.device).unsqueeze(0) \n s = torch.Tensor(s).to(self.device).unsqueeze(0) \n Q1, Q2 = self.critic(a, s)\n Q = 0.5 * (Q1 + Q2)\n delta += Q -r \n delta *= (1. / len(batch_states))\n print(\"delta \", delta)\n beta = torch.clamp(delta, 0, 1)\n\n \n\n\n\n\n \n def train(self, replay_buffer, writer, iterations):\n \"\"\" Update function for the networkweights of the Actor and Critis \n current and Target by useing the 3 new features of the TD3 paper\n to the DDPG implementation\n 1. Delay the policy Updates \n 2. Two crtitc networks take the min Q value\n 3. Target Policy Smoothing\n Own use an Ensemble Approach of delayed updated critics \n \n \"\"\"\n self.step += 1\n \n for it in range(iterations):\n \n # Step 1: Sample a batch of transitions (s, s’, a, r) from the memory\n batch_states, batch_next_states, batch_actions, batch_rewards, batch_dones = replay_buffer.sample(self.batch_size)\n # convert the numpy arrays to tensors object \n # if cuda is available send data to gpu\n state = torch.Tensor(batch_states).to(self.device)\n next_state = torch.Tensor(batch_next_states).to(self.device)\n action = torch.Tensor(batch_actions).to(self.device)\n reward = torch.Tensor(batch_rewards).to(self.device)\n done = torch.Tensor(batch_dones).to(self.device)\n \n # Step 2: use the Target Actor to create the action of the next\n # state (part of the TD-Target \n next_action = self.actor_target(next_state)\n \n # Step 3: Add Gaussian noise to the action for exploration \n # clip the action value in case its outside the boundaries \n noise = torch.Tensor(batch_actions).data.normal_(0, self.policy_noise).to(self.device)\n noise = noise.clamp(-self.noise_clip, self.noise_clip)\n next_action = (next_action + noise).clamp(-self.max_action, self.max_action)\n \n # Step 4: Use the differenet Target Critis (delayed update)\n # and min of two Critic from TD3 to create the different Q Targets\n # compute the average of all Q Targets to get a single value\n target_Q1, target_Q2 = self.target_critic(next_state, next_action) \n target_Q_min = torch.min(target_Q1, target_Q2)\n \n \n # Step 5: Create the update based on the bellman equation \n target_Q = reward + ((1 - done) * self.discount * target_Q).detach()\n \n # Step 6: Use the critic compute the Q estimate for current state and action\n current_Q1, current_Q2 = self.critic(state, action) \n \n # Step 7: Compute the critc loss with the mean squard error\n # loss function \n critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(current_Q2, target_Q)\n writer.add_scalar('critic_loss', critic_loss , self.step)\n \n\n # Step 8: Backpropagate this Critic loss and update the parameters\n # of the two Critic models use the adam optimizer\n \n self.critic_optimizer.zero_grad()\n critic_loss.backward()\n self.critic_optimizer.step()\n \n # Step 9: Delayed update og Actor model\n if it % self.policy_freq == 0:\n actor_loss = -self.critic.Q1(state, self.actor(state)).mean()\n writer.add_scalar('actor_loss', actor_loss , self.step)\n self.actor_optimizer.zero_grad()\n actor_loss.backward()\n self.actor_optimizer.step()\n \n # Step 10: we update the weights of the Critic target by polyak averaging\n # hyperparameter tau determines the combination of current and\n # target weights\n for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):\n target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)\n \n for param, target_param in zip(self.critic.parameters(), self.target_critic.parameters()):\n target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)\n \n \n \n # Making a save method to save a trained model\n def save(self, filename, directory):\n torch.save(self.actor.state_dict(), '%s/%s_actor.pth' % (directory, filename))\n torch.save(self.critic.state_dict(), '%s/%s_critic.pth' % (directory, filename))\n \n # Making a load method to load a pre-trained model\n def load(self, filename, directory):\n self.actor.load_state_dict(torch.load('%s/%s_actor.pth' % (directory, filename)))\n self.critic.load_state_dict(torch.load('%s/%s_critic.pth' % (directory, filename)))\n","sub_path":"BTD3/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":7374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"432594934","text":"\"\"\"\nConsider the numbers 6969 and 9116. When you rotate them 180 degrees (upside down), these numbers remain the same. To clarify, if we write them down on a paper and turn the paper upside down, the numbers will be the same. Try it and see! Some numbers such as 2 or 5 don't yield numbers when rotated.\n\nGiven a range, return the count of upside down numbers within that range. For example, solve(0,10) = 3, because there are only 3 upside down numbers >= 0 and < 10. They are 0, 1, 8.\n\nMore examples in the test cases.\n\nGood luck!\n\nIf you like this Kata, please try\n\nSimple Prime Streaming\n\nLife without primes\n\nPlease also try the performance version of this kata at Upside down numbers - Challenge Edition\n\n\"\"\"\nud = {\"0\": \"0\", \"1\": \"1\", \"6\": \"9\", \"8\": \"8\", \"9\": \"6\"}\n\n\ndef solve(a, b):\n cnt = 0\n for i in range(a, b):\n n = str(i)\n ans = []\n for j in n:\n if j not in ud.keys():\n ans.append(\"NaN\")\n break\n else:\n ans.append(ud[j])\n n2 = \"\".join(ans)\n if n[::-1] == n2:\n cnt += 1\n\n return cnt\n","sub_path":"6kyu_Upside_down_numbers.py","file_name":"6kyu_Upside_down_numbers.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"585899967","text":"from PIL import Image, ImageDraw, ImageFont\ndef fontsize(mes,position,styl,matn,color,size):\n\tchat_id = mes.from_user.id\n\tim = Image.open(str(mes.from_user.id)+\".jpg\")\n\tWidth,Height = im.size\n\tdraw = ImageDraw.Draw(im)\n\tfont = ImageFont.truetype(styl, size)\n\ttext_w, text_h = draw.textsize(str(matn),font=font)\n\tif position == \"tepa_chap\":\n\t\tx = 0\n\t\ty = 5\n\telif position == \"tepa\":\n\t\tx = (Width-text_w)/2\n\t\ty = 5\n\telif position == \"tepa,_ong\":\n\t\tx = (Width-text_w)-5\n\t\ty = 5\n\telif position == \"orta_chap\":\n\t\tx = 0\n\t\ty = (Height-text_h)/2\n\telif position == \"orta\":\n\t\tx = (Width-text_w)/2\n\t\ty = (Height-text_h)/2\n\telif position == \"orta_ong\":\n\t\tx = (Width-text_w)\n\t\ty = (Height-text_h)/2\n\telif position == \"past_chap\":\n\t\tx = Width-text_w-5\n\t\ty = Height-text_h-5\n\telif position == \"past\":\n\t\tx = (Width-text_w)/2-5\n\t\ty = Height-text_h-5\n\telif position == \"past_ong\":\n\t\tx = Width-text_w-5\n\t\ty = Height-text_h-10-5\n\tdraw.text((x,y), str(matn), fill=color,font=font)\n\tok = im.save(str(chat_id)+'water.jpg', \"JPEG\")\n\tif ok:\n\t\treturn \"true\"","sub_path":"fontsize.py","file_name":"fontsize.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"437105066","text":"###통계학도로서 나는 기본적으로 불린저밴드기법은 말이 안된다고 생각함\n###하지만 구현을 해보는 목적으로 프로젝트를 해보겠음\nimport pandas as pd\nimport numpy as np\nimport sys\nimport matplotlib as pt\nimport matplotlib.pyplot as plt\npt.use(\"Qt5Agg\") ##플랏\nsys.path.append(r'C:\\Users\\pc\\Desktop\\stock\\Modeling\\Basic\\wrapper')\nfrom wrapper import data_pred\nfrom wrapper import visualization\nfrom wrapper import trading\n\npath = 'C:/Users/pc/Desktop/stock/Modeling/Basic/data/'\ncd = 'S&P 500'\nfile_name = path+cd+' Historical Data.csv'\ndf = pd.read_csv(file_name, index_col='Date')\n#print(df)\n\n###전처리\nld = data_pred.LoadData()\ndf = ld.date_formatting(df)\ndf = ld.price_df_trimming(df, cd) ##시세만 때서 처리\n#print(df)\n\n##볼린저 밴드 계산\nn = 20\nsigma = 2\n\n##센터라인\ndf['center'] = df[cd].rolling(n).mean()\n##어퍼 bound\ndf['ub'] = df['center'] + sigma*df[cd].rolling(n).std()\n##로워 bound\ndf['lb'] = df['center'] - sigma*df[cd].rolling(n).std()\n#print(df)\n\n\n####데이터 샘플링\nbase_date = '2018-01-08'\nsample = df[base_date:].copy()\n#print(sample[10:30])\n\n## trading book 생성\ntrd = trading.Trade()\ntrd.create_trade_book(sample,cd)\n\n\n#print(trd.book)\n#print(sample)\n## 조건 생성 ##trading bollinger_band로 생성\ntrd.BB_trade(df, base_date,trd.book,cd,n,sigma)\n#print(trd.book.tail())\n###수익률\ntrd.returns(trd.book, cd)\nprint(trd.book['2018-02-01':'2018-03-20'])\n\n###벤치마크 수익률\nbm_rtn = round( trd.book[cd].iloc[-1] / trd.book[cd].iloc[0], 4)\n#print( \"BM return : \", bm_rtn )\n##초과수익률\nACC_rtn = trd.book['acc return'].iloc[-1]\nexs_rtn = ACC_rtn - bm_rtn\n#print( 'Excess return : ', round(exs_rtn, 4))\n\n###시각화 해보자\nv = visualization.Visualize()\n#v.BB_trend_view(sample, cd)\n###plt.show()\n\n### 언제 포지션 들고 있었는지\n#v.position_view(trd.book, cd)\n#plt.show()\n\n\n\n","sub_path":"Basic/Mean_rvt_Bollinger.py","file_name":"Mean_rvt_Bollinger.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"65017688","text":"import argparse\nimport numpy as np\nimport os\nimport pandas as pd\n\ndef load_air_data(data_path, steps=1, threshold=0.005):\n air_csv = np.genfromtxt(\n data_path, dtype=np.float32, delimiter=',',\n skip_header=1, usecols=(1, 2, 3, 4, 5, 6),\n )\n print('air_csv data shape:', air_csv.shape)\n # air_data = np.zeros([1, air_csv.shape[0], air_csv.shape[1]], dtype=np.float32)\n ground_truth = np.zeros([1, air_csv.shape[0]], dtype=np.float32)\n\n for row in range(air_csv.shape[0]):\n if row > steps - 1:\n after = air_csv[row][-1]\n before = air_csv[row - steps][-1]\n rate = (after - before) / before\n # if rate > 0:\n # ground_truth[index][row] = 1\n # else:\n # ground_truth[index][row] = 0\n if rate < -threshold:\n ground_truth[0][row] = 0\n elif rate > threshold:\n ground_truth[0][row] = 2\n else:\n ground_truth[0][row] = 1\n\n air_data = air_csv[np.newaxis, :, :]\n print('air_data', air_data.shape, 'ground_truth', ground_truth.shape)\n return air_data, ground_truth\n\n\ndef load_EOD_data(data_path, market_name, tickers, steps=1, threshold=0.005):\n eod_data = []\n ground_truth = []\n for index, ticker in enumerate(tickers):\n single_EOD = np.genfromtxt(\n os.path.join(data_path, market_name + '_' + ticker + '_1.csv'),\n dtype=np.float32, delimiter=',', skip_header=False\n )\n if market_name == 'NASDAQ':\n single_EOD = single_EOD[:-1, :]\n if index == 0:\n print('single EOD data shape:', single_EOD.shape)\n eod_data = np.zeros([len(tickers), single_EOD.shape[0],\n single_EOD.shape[1] - 5], dtype=np.float32)\n ground_truth = np.zeros([len(tickers), single_EOD.shape[0]],\n dtype=np.float32)\n\n for row in range(single_EOD.shape[0]):\n if row > steps - 1:\n after = single_EOD[row][-1]\n before = single_EOD[row - steps][-1]\n rate = (after - before) / before\n # if rate > 0:\n # ground_truth[index][row] = 1\n # else:\n # ground_truth[index][row] = 0\n if rate < -threshold:\n ground_truth[index][row] = 0\n elif rate > threshold:\n ground_truth[index][row] = 2\n else:\n ground_truth[index][row] = 1\n\n eod_data[index, :, :] = single_EOD[:, 5:]\n return eod_data, ground_truth\n\ndef load_EOD_data_(data_path, market_name, tickers, steps=1, threshold=0.005):\n eod_data = []\n ground_truth = []\n for index, ticker in enumerate(tickers):\n single_EOD = np.genfromtxt(\n os.path.join(data_path, market_name + '_' + ticker + '_1.csv'),\n dtype=np.float32, delimiter=',', skip_header=False\n )\n if market_name == 'NASDAQ':\n single_EOD = single_EOD[:-1, :]\n if index == 0:\n print('single EOD data shape:', single_EOD.shape)\n eod_data = np.zeros([len(tickers), single_EOD.shape[0],\n single_EOD.shape[1] - 5], dtype=np.float32)\n ground_truth = np.zeros([len(tickers), single_EOD.shape[0]],\n dtype=np.float32)\n\n for row in range(single_EOD.shape[0] - steps):\n if row > steps - 1:\n after = 0\n for i in range(row, row+steps):\n after += single_EOD[i][-1]\n after /= steps\n before = single_EOD[row-1][-1]\n rate = (after - before) / before\n # if rate > 0:\n # ground_truth[index][row] = 1\n # else:\n # ground_truth[index][row] = 0\n if rate < -threshold:\n ground_truth[index][row] = 0\n elif rate > threshold:\n ground_truth[index][row] = 2\n else:\n ground_truth[index][row] = 1\n\n eod_data[index, :, :] = single_EOD[:, 5:]\n return eod_data, ground_truth\n\ndef get_batch(eod_data, gt_data, offset, seq_len):\n return eod_data[:, offset:offset+seq_len, :], \\\n gt_data[:, offset+seq_len]\n\n\nif __name__ == '__main__':\n desc = 'train a rank lstm model'\n parser = argparse.ArgumentParser(description=desc)\n parser.add_argument('-p', help='path of EOD data',\n default='../data/2013-01-01')\n parser.add_argument('-m', help='market name', default='NASDAQ')\n parser.add_argument('-t', help='fname for selected tickers')\n parser.add_argument('-l', default=4,\n help='length of historical sequence for feature')\n parser.add_argument('-u', default=64,\n help='number of hidden units in lstm')\n parser.add_argument('-s', default=1,\n help='steps to make prediction')\n parser.add_argument('-r', default=0.001,\n help='learning rate')\n parser.add_argument('-a', default=1,\n help='alpha, the weight of ranking loss')\n parser.add_argument('-g', '--gpu', type=int, default=0, help='use gpu')\n args = parser.parse_args()\n\n if args.t is None:\n args.t = args.m + '_tickers_qualify_dr-0.98_min-5_smooth.csv'\n args.gpu = (args.gpu == 1)\n tickers = np.genfromtxt(os.path.join(args.p, '..', args.t),\n dtype=str, delimiter='\\t', skip_header=False)\n\n eod_data, ground_truth = load_EOD_data(data_path=args.p, market_name=args.m, tickers=tickers, steps=1)\n print(eod_data.shape, ground_truth.shape)\n","sub_path":"training/load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":5817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"182422726","text":"#!/usr/local/bin/python3.7\nr\"\"\"Script to create a Python Dict of Windows UBR versions.\"\"\"\n\nimport requests\nimport bs4\nimport pprint\n\nurl_top_level = \"https://docs.microsoft.com/en-us/windows/release-information/\"\nurl = \"https://winreleaseinfoprod.blob.core.windows.net/winreleaseinfoprod/en-US.html\"\nhtml_file = \"/u/admin/tmp/win_build_ubr.html\"\n\n\ndef dwnload_html(url, html_file):\n r\"\"\"Download the HTML page to a file.\"\"\"\n\n with open(html_file, \"w\") as f:\n h = (requests.get(url)).text\n f.write(h)\n return (0)\n\n\ndef parse_win_table_v1(html_file):\n r\"\"\"Parse the HTML document, one tag at a time.\"\"\"\n # Parse the downloaded file to reduce HTTP calls.\n with open(html_file, \"r\") as f:\n sauce = f.read()\n\n soup = bs4.BeautifulSoup(sauce, \"html.parser\")\n\n update = (soup.find('h3').find_next_sibling('p').next_sibling).split()[2]\n print (update)\n\n for x in soup.find_all('h4'):\n t = x.find_next_sibling('table')\n os_details = x.text.replace(')','').split()\n os_ver = os_details[2]\n os_build = os_details[5]\n if (os_ver == '1507'):\n os_build = os_details[-1]\n for t_row in t.find_all(\"tr\")[1::]:\n t_data = t_row.find_all(\"td\")\n (build, ubr) = t_data[0].text.split('.')\n release_date = t_data[1].text\n k = (t_data[3].text)\n if len(k) > 0:\n kb = k.split()[-1]\n else:\n kb = t_data[3].text\n print (os_ver, os_build, build, ubr, release_date, kb)\n exit()\n\n\ndef parse_win_table(html_file):\n r\"\"\"Parse the HTML document, one tag at a time.\"\"\"\n # Parse the downloaded file to reduce HTTP calls.\n with open(html_file, \"r\") as f:\n sauce = f.read()\n\n soup = bs4.BeautifulSoup(sauce, \"html.parser\")\n\n update = (soup.find('h3').find_next_sibling('p').next_sibling).split()[2]\n print (update)\n\n build_dict = {}\n for x in soup.find_all('h4'):\n t = x.find_next_sibling('table')\n os_details = x.text.replace(')','').split()\n os_ver = os_details[2]\n os_build = os_details[5]\n if (os_ver == '1507'):\n os_build = os_details[-1]\n all_kbs = []\n ubr_dict = {}\n for t_row in t.find_all(\"tr\")[1::]:\n t_data = t_row.find_all(\"td\")\n (build, ubr) = t_data[0].text.split('.')\n release_date = t_data[1].text\n k = (t_data[3].text)\n if len(k) > 0:\n kb = k.split()[-1]\n else:\n kb = t_data[3].text\n all_kbs.append(kb)\n ubr_dict[ubr] = [release_date, kb, build]\n # print (os_ver, os_build, build, ubr, release_date, kb)\n ubr_dict[\"kbs\"] = all_kbs\n build_dict[os_ver] = ubr_dict\n # pprint.pprint(build_dict)\n # exit()\n # pprint.pprint(build_dict)\n # exit()\n return (build_dict)\n\n\ndef create_dict():\n build_ubr_dict_template = {\n \"build\": {\n \"ubr\": [\"release_date\", \"kb\", \"os_build\"],\n \"kbs\": [\"all_kbs\"]\n },\n \"1909\": {\n \"116\": ['2019-05-21', \"4505057\", \"18362\"],\n \"kbs\": [\"4505057\", \"4497935\", \"4503293\"]\n }\n }\n\ndef main():\n r\"Fabled main where it all begins.\"\n\n # Enable as needed html_file = dwnload_html(url)\n os_ver_dict = parse_win_table(html_file)\n pprint.pprint(os_ver_dict)\n exit()\n\n ubr = \"/u/tmp/16299.lst\"\n win10_ubr = create_dict()\n build_dict = {}\n ubr_dict = {}\n all_kbs = []\n with open (ubr, 'r') as f:\n for line in f:\n data = line.split()\n (build, ubr) = data[0].split('.')\n release_date = data[1]\n kb = data[-1]\n all_kbs.append(kb)\n ubr_dict[ubr] = [release_date, kb]\n build_dict[build] = ubr_dict\n build_dict[\"kbs\"] = all_kbs\n # print (build_dict)\n print (build_dict[\"kbs\"])\n print(build_dict[\"16299\"][\"431\"])\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"python/admin/w10_ubr.py","file_name":"w10_ubr.py","file_ext":"py","file_size_in_byte":4017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"521965296","text":"from proteus import *\nfrom proteus.default_p import *\nfrom tank import *\nfrom proteus.mprans import Dissipation\nfrom proteus import Context\n\nct = Context.get()\n\nLevelModelType = Dissipation.LevelModel\nif useOnlyVF:\n RD_model = None\n LS_model = None\n dissipation_model = 3\n ME_model = 2\nelse:\n RD_model = 3\n LS_model = 2\n ME_model = 6\n kappa_model = 5\n\ndissipation_model_flag = 1\nif ct.useRANS >= 2:\n dissipation_model_flag=2\ncoefficients = Dissipation.Coefficients(V_model=0+int(ct.movingDomain),\n ME_model=ME_model+int(ct.movingDomain),\n LS_model=LS_model+int(ct.movingDomain),\n RD_model=RD_model+int(ct.movingDomain),\n kappa_model=kappa_model+int(ct.movingDomain),\n dissipation_model_flag=dissipation_model_flag+int(ct.movingDomain),#1 -- K-epsilon, 2 -- K-omega\n useMetrics=useMetrics,\n rho_0=rho_0,nu_0=nu_0,\n rho_1=rho_1,nu_1=nu_1,\n g=g,\n c_mu=ct.opts.Cmu,sigma_e=ct.opts.sigma_e,\n sc_uref=dissipation_sc_uref,\n sc_beta=dissipation_sc_beta)\n\nkInflow=ct.kInflow\n\ndissipationInflow=ct.dissipationInflow\n\n\ndirichletConditions = {0: lambda x, flag: domain.bc[flag].dissipation_dirichlet.init_cython()}\n\nadvectiveFluxBoundaryConditions = {0: lambda x, flag: domain.bc[flag].dissipation_advective.init_cython()}\n\ndiffusiveFluxBoundaryConditions = {0: {},\n 1: {1: lambda x, flag: domain.bc[flag].dissipation_diffusive.init_cython()},\n }\n\n \nclass ConstantIC:\n def __init__(self,cval=0.0):\n self.cval=cval\n def uOfXT(self,x,t):\n return self.cval\n\ninitialConditions = {0:ConstantIC(cval=dissipationInflow)}\n","sub_path":"2d/sediment/pipe_scour/dissipation_p.py","file_name":"dissipation_p.py","file_ext":"py","file_size_in_byte":2043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"62235395","text":"from pathlib import Path\n\nimport pytest\n\n\ndef pytest_addoption(parser):\n parser.addoption(\n \"--validate\", action=\"store_true\", help=\"validate JSON from previous test run against exported schema\"\n )\n\n\n@pytest.fixture(scope=\"session\", autouse=True)\ndef set_up_overall(request):\n # in all pytest runs except --validate (which uses the files), clear away the JSON examples and generate fresh\n if not request.config.getoption(\"--validate\", default=False):\n _data_path = Path(__file__).parent.resolve() / \"tests\" / \"qcschema_instances\"\n for fl in _data_path.rglob(\"*.json\"):\n fl.unlink()\n\n\ndef pytest_runtest_setup(item):\n # there's a bug where can only set options if specify path in call, so needs to be ``pytest qcelemental/ --validate``\n\n # skip the validate-generated-instances-against-exported-schema tests on most ``pytest`` runs.\n # run only the validate-generated-instances-against-exported-schema tests on ``pytest --validate`` runs.\n if not item.config.getoption(\"--validate\", default=False) and item.name.startswith(\"test_qcschema\"):\n pytest.skip(\"can't run with --validate option\")\n elif item.config.getoption(\"--validate\", default=False) and not item.name.startswith(\"test_qcschema\"):\n pytest.skip(\"need --validate option to run\")\n\n\n# Uncomment below to probe for tests needing `@using_web`\n\n# import socket\n#\n# class block_network(socket.socket):\n# def __init__(self, *args, **kwargs):\n# raise Exception(\"Network call blocked\")\n#\n# socket.socket = block_network\n","sub_path":"qcelemental/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"350431544","text":"from PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\nfrom pyquery import PyQuery \nimport urllib,os.path,urllib.parse,re,base64,string,sys\n\ndef format_filename(s):\n \"\"\"Take a string and return a valid filename constructed from the string.\nUses a whitelist approach: any characters not present in valid_chars are\nremoved. Also spaces are replaced with underscores.\n \nNote: this method may produce invalid filenames such as ``, `.` or `..`\nWhen I use this method I prepend a date string like '2009_01_15_19_46_32_'\nand append a file extension like '.txt', so I avoid the potential of using\nan invalid filename.\n \n\"\"\"\n valid_chars = \"-_.() %s%s\" % (string.ascii_letters, string.digits)\n filename = ''.join(c for c in s if c in valid_chars)\n # filename = filename.replace(' ','_') # I don't like spaces in filenames.\n return filename\n\nclass CaptureThread(QThread):\n data_downloaded = pyqtSignal(object)\n def __init__(self, linkTags, mainApp ):\n QThread.__init__(self)\n self.linkTags = linkTags\n self.mainApp = mainApp\n\n def run(self):\n savedLinks = []\n for tag in self.mainApp.linkTags:\n link = (self.mainApp.pq(tag).attr(\"href\"))\n if(link in savedLinks):\n continue\n if not link.startswith(\"http://www.autismspeaks.org\"):\n continue\n savedLinks.append(link)\n pq1 = PyQuery(url=link)\n title = pq1(\"h1.page-title\").text()\n content = pq1(\".main .body.node-content.prose\")\n if content.html():\n safefilename = format_filename(title)\n fileurl = os.path.join(self.mainApp.downloadsFolder,safefilename+\".html\")\n self.mainApp.displayMsg(fileurl)\n if(os.path.exists(fileurl)):\n self.mainApp.displayMsg(\"skip \"+title)\n else:\n with open(fileurl, \"w+\") as f:\n f.write(self.mainApp.template.format(title,content))\n\n self.data_downloaded.emit('downloaded')\n\nclass HelloWorld(QDialog):\n def __init__(self):\n QDialog.__init__(self)\n \n # layout\n layout = QVBoxLayout()\n\n urlLayout = QHBoxLayout()\n pathLabel = QLabel(\"Target URL:\")\n self.urlEdit = QLineEdit()\n urlLayout.addWidget(pathLabel)\n urlLayout.addWidget(self.urlEdit)\n layout.addLayout(urlLayout)\n\n\n outputurlLayout = QHBoxLayout()\n outputPathLabel = QLabel(\"Save Path:\")\n self.outputurlEdit = QLineEdit()\n outputurlLayout.addWidget(outputPathLabel)\n outputurlLayout.addWidget(self.outputurlEdit)\n layout.addLayout(outputurlLayout)\n\n\n self.btn = QPushButton(\"Capture\")\n self.statusLabel = QLabel()\n layout.addWidget(self.btn)\n layout.addWidget(self.statusLabel)\n self.setLayout(layout)\n\n # event\n self.btn.clicked.connect(self.capture)\n # default\n userhome = os.path.expanduser('~')\n self.downloadsFolder = os.path.abspath(userhome + '/Downloads/Captures')\n self.urlEdit.setText(\"http://www.autismspeaks.org/research\")\n self.outputurlEdit.setText(self.downloadsFolder)\n self.template = \"\"\"\n \n {}\n {}\n \n \"\"\"\n\n\n def capture(self):\n self.btn.setEnabled(False)\n self.downloadsFolder = self.outputurlEdit.text()\n self.displayMsg(\"start capturing\")\n if not os.path.exists(self.downloadsFolder):\n os.makedirs(self.downloadsFolder)\n\n\n\n self.pq = PyQuery(url=self.urlEdit.text())\n self.linkTags = self.pq(\"a[href]\")\n \n self.downloader = CaptureThread(self.linkTags, self)\n self.downloader.data_downloaded.connect(self.downloadComplete)\n self.downloader.start()\n\n def downloadComplete(self):\n self.displayMsg(\"done\")\n self.btn.setEnabled(True)\n\n def displayMsg(self,msg):\n self.statusLabel.setText(msg)\n print(msg)\n\nif __name__ == \"__main__\":\n\n\n\n app = QApplication(sys.argv)\n dialog = HelloWorld()\n dialog.setFixedSize(450,120)\n dialog.setWindowTitle(\"Site Capturer\")\n dialog.show()\n sys.exit(app.exec_())","sub_path":"SiteCapturer/SiteCapturer.py","file_name":"SiteCapturer.py","file_ext":"py","file_size_in_byte":4282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"250302525","text":"# The Three Musketeers Game\r\n\\\r\n# In all methods,\r\n# A 'location' is a two-tuple of integers, each in the range 0 to 4.\r\n# The first integer is the row number, the second is the column number.\r\n# A 'direction' is one of the strings \"up\", \"down\", \"left\", or \"right\".\r\n# A 'board' is a list of 5 lists, each containing 5 strings: \"M\", \"R\", or \"-\".\r\n# \"M\" = Musketeer, \"R\" = Cardinal Richleau's man, \"-\" = empty.\r\n# Each list of 5 strings is a \"row\"\r\n# A 'player' is one of the strings \"M\" or \"R\" (or sometimes \"-\").\r\n#\r\n# For brevity, Cardinal Richleau's men are referred to as \"enemy\".\r\n# 'pass' is a no-nothing Python statement. Replace it with actual code.\r\n\r\ndef create_board():\r\n global board\r\n \"\"\"Creates the initial Three Musketeers board and makes it globally\r\n available (That is, it doesn't have to be passed around as a\r\n parameter.) 'M' represents a Musketeer, 'R' represents one of\r\n Cardinal Richleau's men, and '-' denotes an empty space.\"\"\"\r\n m = 'M'\r\n r = 'R'\r\n board = [ [r, r, r, r, m],\r\n [r, r, r, r, r],\r\n [r, r, m, r, r],\r\n [r, r, r, r, r],\r\n [m, r, r, r, r] ]\r\n\r\ndef set_board(new_board):\r\n \"\"\"Replaces the global board with new_board.\"\"\"\r\n global board\r\n board = new_board\r\n\r\ndef get_board():\r\n \"\"\"Just returns the board. Possibly useful for unit tests.\"\"\"\r\n return board\r\n\r\n\r\ndef all_locations():\r\n \"\"\"Returns a list of all 25 locations on the board.\"\"\"\r\n return [(0,0),(0,1),(0,2),(0,3),(0,4),(1,0),(1,1),(1,2),(1,3),(1,4),(2,0),(2,1),(2,2),(2,3),(2,4),(3,0),(3,1),(3,2),(3,3),(3,4),(4,0),(4,1),(4,2),(4,3),(4,4)]\r\n\r\n\r\ndef string_to_location(s):\r\n \"\"\"Given a two-character string (such as 'A5'), returns the designated\r\n location as a 2-tuple (such as (0, 4)).\r\n The function should raise ValueError exception if the input\r\n is outside of the correct range (between 'A' and 'E' for s[0] and\r\n between '1' and '5' for s[1]\r\n \"\"\"\r\n all_loc = all_locations()\r\n letters = list('ABCDE')\r\n numbers = [i for i in range(1, 6)]\r\n locs = [x+str(y) for x in letters for y in numbers]\r\n list_loc = dict(zip(locs, all_loc))\r\n if s not in list_loc:\r\n raise ValueError\r\n else:\r\n return list_loc[s]\r\n\r\n#def string_to_location(s):\r\n \r\n # all_locations_ = all_locations()\r\n # list_locations = [[all_locations_[0], 'A1'], [all_locations_[1], 'A2'], [all_locations_[2], 'A3'], [all_locations_[3], 'A4'], [all_locations_[4], 'A5'], [all_locations_[5], 'B1'], [all_locations_[6], 'B2'], [all_locations_[7], 'B3'], [all_locations_[8], 'B4'], [all_locations_[9], 'B5'], [all_locations_[10], 'C1'], [all_locations_[11], 'C2'], [all_locations_[12], 'C3'], [all_locations_[13], 'C4'], [all_locations_[14], 'C5'], [all_locations_[15], 'D1'], [all_locations_[16], 'D2'], [all_locations_[17], 'D3'], [all_locations_[18], 'D4'], [all_locations_[19], 'D5'], [all_locations_[20], 'E1'], [all_locations_[21], 'E2'], [all_locations_[22], 'E3'], [all_locations_[23], 'E4'], [all_locations_[24], 'E5']] \r\n # strings = [x[1] for x in list_locations]\r\n #if all(x!=s for x in strings)== True:\r\n # raise ValueError\r\n #for i in range(len(list_locations)):\r\n # if list_locations[i][1] == s:\r\n # return list_locations[i][0]\r\n \r\ndef location_to_string(location):\r\n \"\"\"Returns the string representation of a location.\r\n Similarly to the previous function, this function should raise\r\n ValueError exception if the input is outside of the correct range\r\n \"\"\"\r\n all_loc = all_locations()\r\n letters = list('ABCDE')\r\n numbers = [i for i in range(1, 6)]\r\n locs = [x+str(y) for x in letters for y in numbers]\r\n list_loc = dict(zip(all_loc, locs))\r\n if location not in list_loc:\r\n raise ValueError\r\n else:\r\n return list_loc[location]\r\n\r\n#def location_to_string(location):\r\n # \"\"\"Returns the string representation of a location.\r\n # Similarly to the previous function, this function should raise\r\n # ValueError exception if the input is outside of the correct range\r\n # \"\"\" \r\n # all_locations_ = all_locations()\r\n # list_locations = [[all_locations_[0], 'A1'], [all_locations_[1], 'A2'], [all_locations_[2], 'A3'], [all_locations_[3], 'A4'], [all_locations_[4], 'A5'], [all_locations_[5], 'B1'], [all_locations_[6], 'B2'], [all_locations_[7], 'B3'], [all_locations_[8], 'B4'], [all_locations_[9], 'B5'], [all_locations_[10], 'C1'], [all_locations_[11], 'C2'], [all_locations_[12], 'C3'], [all_locations_[13], 'C4'], [all_locations_[14], 'C5'], [all_locations_[15], 'D1'], [all_locations_[16], 'D2'], [all_locations_[17], 'D3'], [all_locations_[18], 'D4'], [all_locations_[19], 'D5'], [all_locations_[20], 'E1'], [all_locations_[21], 'E2'], [all_locations_[22], 'E3'], [all_locations_[23], 'E4'], [all_locations_[24], 'E5']] \r\n # if all(x!=location for x in all_locations_):\r\n # raise ValueError\r\n #for i in range(len(list_locations)):\r\n # if list_locations[i][0] == location:\r\n # return list_locations[i][1]\r\n\r\ndef at(location):\r\n \"\"\"Returns the contents of the board at the given location.\r\n You can assume that input will always be in correct range.\"\"\"\r\n try:\r\n return board[location[0]][location[1]]\r\n except Exception:\r\n return \"None\"\r\n \r\n\r\ndef adjacent_location(location, direction):\r\n \"\"\"Return the location next to the given one, in the given direction.\r\n Does not check if the location returned is legal on a 5x5 board.\r\n You can assume that input will always be in correct range.\"\"\"\r\n if direction == \"right\":\r\n tup = (location[0], location[1]+1)\r\n return tup\r\n elif direction == \"left\":\r\n tup = (location[0], location[1]-1)\r\n return tup\r\n elif direction == \"up\":\r\n tup = (location[0]-1, location[1])\r\n return tup\r\n elif direction == \"down\":\r\n tup = (location[0]+1, location[1])\r\n return tup\r\n\r\n#def is_legal_move_by_musketeer(location, direction):\r\n \"\"\"Tests if the Musketeer at the location can move in the direction.\r\n You can assume that input will always be in correct range. Raises\r\n ValueError exception if at(location) is not 'M'\"\"\"\r\n # if at(location) != 'M':\r\n # raise ValueError\r\n # if at(location) == 'M' and at(adjacent_location(location, direction)) == 'R'\r\n # and 0 <= adjacent_location(location, direction)[0] <= 4\r\n # and 0 <= adjacent_location(location, direction)[1] <= 4:\r\n # return True\r\n #else:\r\n # return False\r\n \r\ndef is_legal_move_by_musketeer(location, direction):\r\n \"\"\"Tests if the Musketeer at the location can move in the direction.\r\n You can assume that input will always be in correct range. Raises\r\n ValueError exception if at(location) is not 'M'\"\"\"\r\n if at(location) != 'M':\r\n raise ValueError\r\n else:\r\n return at(adjacent_location(location, direction)) == 'R' and 0 <= adjacent_location(location, direction)[0] <= 4 and 0 <= adjacent_location(location, direction)[1] <= 4\r\n\r\n\r\n\r\ndef is_legal_move_by_enemy(location, direction):\r\n \"\"\"Tests if the enemy at the location can move in the direction.\r\n You can assume that input will always be in correct range. Raises\r\n ValueError exception if at(location) is not 'R'\"\"\"\r\n if at(location) != 'R':\r\n raise ValueError\r\n else:\r\n return at(adjacent_location(location, direction)) == '-' and 0 <= adjacent_location(location, direction)[0] <= 4 and 0 <= adjacent_location(location, direction)[1] <= 4\r\n \r\n\r\ndef is_legal_move(location, direction):\r\n \"\"\"Tests whether it is legal to move the piece at the location\r\n in the given direction.\r\n You can assume that input will always be in correct range.\"\"\"\r\n if at(location) == 'R':\r\n return at(adjacent_location(location, direction)) == '-' and 0 <= adjacent_location(location, direction)[0] <= 4 and 0 <= adjacent_location(location, direction)[1] <= 4\r\n elif at(location) == 'M':\r\n return at(adjacent_location(location, direction)) == 'R' and 0 <= adjacent_location(location, direction)[0] <= 4 and 0 <= adjacent_location(location, direction)[1] <= 4\r\n \r\n\r\ndef can_move_piece_at(location):\r\n \"\"\"Tests whether the player at the location has at least one move available.\r\n You can assume that input will always be in correct range.\r\n You can assume that input will always be in correct range.\"\"\"\r\n directions = [\"right\", \"left\", \"up\", \"down\"]\r\n for i in directions:\r\n if is_legal_move(location, i):\r\n return True\r\n return False\r\n \r\n\r\n\r\ndef has_some_legal_move_somewhere(who):\r\n \"\"\"Tests whether a legal move exists for player \"who\" (which must\r\n be either 'M' or 'R'). Does not provide any information on where\r\n the legal move is.\r\n You can assume that input will always be in correct range.\"\"\"\r\n var = 0\r\n for i in range(len(board)):\r\n for j in range(len(board[i])):\r\n if board[i][j] == who and can_move_piece_at((i,j)):\r\n return True\r\n return False\r\n\r\ndef possible_moves_from(location):\r\n \"\"\"Returns a list of directions ('left', etc.) in which it is legal\r\n for the player at location to move. If there is no player at\r\n location, returns the empty list, [].\r\n You can assume that input will always be in correct range.\"\"\"\r\n directions = [\"up\", \"right\", \"down\", \"left\"]\r\n right_directions = []\r\n for i in directions:\r\n if is_legal_move(location, i):\r\n right_directions.append(i)\r\n return right_directions\r\n\r\ndef is_legal_location(location):\r\n \"\"\"Tests if the location is legal on a 5x5 board.\r\n You can assume that input will always be a pair of integers.\"\"\"\r\n return 0 <= location[0] < 5 and 0 <= location[1] < 5\r\n \r\n \r\ndef is_within_board(location, direction):\r\n \"\"\"Tests if the move stays within the boundaries of the board.\r\n You can assume that input will always be in correct range.\"\"\"\r\n return is_legal_location(adjacent_location(location, direction))\r\n\r\n \r\ndef all_possible_moves_for(player):\r\n \"\"\"Returns every possible move for the player ('M' or 'R') as a list\r\n (location, direction) tuples.\r\n You can assume that input will always be in correct range.\"\"\"\r\n directions = [\"up\", \"right\", \"down\", \"left\"]\r\n possible_moves = []\r\n for i in range(len(board)):\r\n for j in range(len(board[i])):\r\n for k in directions:\r\n if board[i][j] == player and is_legal_move((i,j), k):\r\n possible_moves.append([(i,j),k])\r\n return possible_moves\r\n\r\ndef make_move(location, direction):\r\n \"\"\"Moves the piece in location in the indicated direction.\r\n Doesn't check if the move is legal. You can assume that input will always\r\n be in correct range.\"\"\"\r\n player = board[location[0]][location[1]]\r\n board[adjacent_location(location, direction)[0]][adjacent_location(location, direction)[1]] = player\r\n board[location[0]][location[1]] = '-'\r\n\r\ndef choose_computer_move(who):\r\n \"\"\"The computer chooses a move for a Musketeer (who = 'M') or an\r\n enemy (who = 'R') and returns it as the tuple (location, direction),\r\n where a location is a (row, column) tuple as usual.\r\n You can assume that input will always be in correct range.\"\"\"\r\n from random import randint\r\n all_possible_moves = all_possible_moves_for(who)\r\n i = randint(0, len(all_possible_moves)-1)\r\n return (all_possible_moves[i][0], all_possible_moves[i][1])\r\n \r\n\r\ndef is_enemy_win():\r\n \"\"\"Returns True if all 3 Musketeers are in the same row or column.\"\"\"\r\n row_positions = []\r\n column_positions = []\r\n for i in range(len(board)):\r\n for j in range(len(board[i])):\r\n if board[i][j] == 'M':\r\n row_positions.append(i)\r\n column_positions.append(j)\r\n return all(x==row_positions[0] for x in row_positions) or all(x==column_positions[0] for x in column_positions)\r\n \r\n \r\n\r\n#---------- Communicating with the user ----------\r\n\r\ndef print_board():\r\n print(\" 1 2 3 4 5\")\r\n print(\" ---------------\")\r\n ch = \"A\"\r\n for i in range(0, 5):\r\n print(ch, \"|\", end = \" \")\r\n for j in range(0, 5):\r\n print(board[i][j] + \" \", end = \" \")\r\n print()\r\n ch = chr(ord(ch) + 1)\r\n print()\r\n\r\ndef print_instructions():\r\n print()\r\n print(\"\"\"To make a move, enter the location of the piece you want to move,\r\nand the direction you want it to move. Locations are indicated as a\r\nletter (A, B, C, D, or E) followed by an integer (1, 2, 3, 4, or 5).\r\nDirections are indicated as left, right, up, or down (or simply L, R,\r\nU, or D). For example, to move the Musketeer from the top right-hand\r\ncorner to the row below, enter 'A5 left' (without quotes).\r\nFor convenience in typing, you may use lowercase letters.\"\"\")\r\n print()\r\n\r\ndef choose_users_side():\r\n \"\"\"Returns 'M' if user is playing Musketeers, 'R' otherwise.\"\"\"\r\n user = \"\"\r\n while user != 'M' and user != 'R':\r\n answer = input(\"Would you like to play Musketeer (M) or enemy (R)? \")\r\n answer = answer.strip()\r\n if answer != \"\":\r\n user = answer.upper()[0]\r\n return user\r\n\r\ndef get_users_move():\r\n \"\"\"Gets a legal move from the user, and returns it as a\r\n (location, direction) tuple.\"\"\" \r\n directions = {'L':'left', 'R':'right', 'U':'up', 'D':'down'}\r\n move = input(\"Your move? \").upper().replace(' ', '')\r\n if (len(move) >= 3\r\n and move[0] in 'ABCDE'\r\n and move[1] in '12345'\r\n and move[2] in 'LRUD'):\r\n location = string_to_location(move[0:2])\r\n direction = directions[move[2]]\r\n if is_legal_move(location, direction):\r\n return (location, direction)\r\n print(\"Illegal move--'\" + move + \"'\")\r\n return get_users_move()\r\n\r\ndef move_musketeer(users_side):\r\n \"\"\"Gets the Musketeer's move (from either the user or the computer)\r\n and makes it.\"\"\"\r\n if users_side == 'M':\r\n (location, direction) = get_users_move()\r\n if at(location) == 'M':\r\n if is_legal_move(location, direction):\r\n make_move(location, direction)\r\n describe_move(\"Musketeer\", location, direction)\r\n else:\r\n print(\"You can't move there!\")\r\n return move_musketeer(users_side)\r\n else: # Computer plays Musketeer\r\n (location, direction) = choose_computer_move('M') \r\n make_move(location, direction)\r\n describe_move(\"Musketeer\", location, direction)\r\n \r\ndef move_enemy(users_side):\r\n \"\"\"Gets the enemy's move (from either the user or the computer)\r\n and makes it.\"\"\"\r\n if users_side == 'R':\r\n (location, direction) = get_users_move()\r\n if at(location) == 'R':\r\n if is_legal_move(location, direction):\r\n make_move(location, direction)\r\n describe_move(\"Enemy\", location, direction)\r\n else:\r\n print(\"You can't move there!\")\r\n return move_enemy(users_side)\r\n else: # Computer plays enemy\r\n (location, direction) = choose_computer_move('R') \r\n make_move(location, direction)\r\n describe_move(\"Enemy\", location, direction)\r\n return board\r\n\r\ndef describe_move(who, location, direction):\r\n \"\"\"Prints a sentence describing the given move.\"\"\"\r\n new_location = adjacent_location(location, direction)\r\n print(who, 'moves', direction, 'from',\\\r\n location_to_string(location), 'to',\\\r\n location_to_string(new_location) + \".\\n\")\r\n\r\ndef start():\r\n \"\"\"Plays the Three Musketeers Game.\"\"\"\r\n users_side = choose_users_side()\r\n board = create_board()\r\n print_instructions()\r\n print_board()\r\n while True:\r\n if has_some_legal_move_somewhere('M'):\r\n board = move_musketeer(users_side)\r\n print_board()\r\n if is_enemy_win():\r\n print(\"Cardinal Richleau's men win!\")\r\n break\r\n else:\r\n print(\"The Musketeers win!\")\r\n break\r\n if has_some_legal_move_somewhere('R'):\r\n board = move_enemy(users_side)\r\n print_board()\r\n else:\r\n print(\"The Musketeers win!\")\r\n break\r\n \r\nif __name__ == \"__main__\":\r\n start()\r\n\r\n","sub_path":"three_musketeers.py","file_name":"three_musketeers.py","file_ext":"py","file_size_in_byte":16507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"236821060","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom sklearn.cluster import KMeans\r\n\r\nnp.random.seed(87654)\r\n\r\ndat = []\r\n\r\nt = 0.5\r\n\r\nfor i in range(20):\r\n\r\n# dat.append(np.random.uniform(size=2))\r\n c = np.random.randint(3)\r\n a = np.random.uniform() * 2 * 3.14\r\n r = t * np.sqrt(np.random.uniform())\r\n\r\n x = r * np.cos(a)\r\n y = r * np.sin(a)\r\n\r\n\r\n dat.append([c/4+x, c/4+y])\r\n\r\nplt.figure()\r\nfor i in range(1, 5):\r\n np.random.seed(98765432)\r\n\r\n inits = np.array([[0.95,0.95],[0.95,0.95],[0.95,0.95]\r\n\r\n ])\r\n km = KMeans(n_clusters=3, init=inits, max_iter=i, n_init=1)\r\n plt.subplot(2, 2, i)\r\n plt.xticks([])\r\n plt.yticks([])\r\n km.fit(dat)\r\n km.cluster_centers_ = np.sort(km.cluster_centers_, axis=0)\r\n c = km.predict(dat)\r\n plt.scatter(*zip(*dat), c=c)\r\n c = km.fit_predict(km.cluster_centers_)\r\n plt.scatter(*zip(*km.cluster_centers_), c='w', marker='*', s=240, edgecolors='r')\r\n plt.title('Iteration: %d'%i)\r\n print(km.cluster_centers_)\r\n\r\n","sub_path":"Chapter08/kmeans_intro.py","file_name":"kmeans_intro.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"113145178","text":"import math\r\nimport sys\r\nsys.setrecursionlimit(1000000)\r\ninf = open(\"in.in\", \"r\")\r\nouf = open('out.out','w')\r\n\r\ndef close_files():\r\n inf.close()\r\n ouf.close()\r\n\r\ndef precount():\r\n pass\r\n\r\nprintcounter = 0\r\ndef printstr(a):\r\n global printcounter\r\n printcounter +=1\r\n print ('Case #%d: %s' % (printcounter,a), file=ouf)\r\n\r\nfull = set(range(10))\r\ndef solvetest():\r\n s = inf.readline().strip()\r\n a = [0]*255\r\n for c in s:\r\n a[ord(c)]+=1\r\n b = [0]*10\r\n b[0] = a[ord('Z')]\r\n b[2] = a[ord('W')]\r\n b[3] = a[ord('H')]-a[ord('G')]\r\n b[4] = a[ord('U')]\r\n b[5] = a[ord('F')]-a[ord('U')]\r\n b[6] = a[ord('X')]\r\n b[7] = a[ord('V')]-b[5]\r\n b[8] = a[ord('G')]\r\n b[1] = a[ord('O')]-b[0]-b[2]-b[4]\r\n b[9] = a[ord('I')]-b[5]-b[6]-b[8]\r\n ans = [str(digit)*col for (digit,col) in enumerate(b)]\r\n #~ print(ans)\r\n #~ b[3] = \r\n printstr(''.join(ans))\r\n#precount()\r\ntestnum = int(inf.readline())\r\nfor test in range(testnum):\r\n solvetest()\r\nclose_files()\r\n\r\n","sub_path":"solutions_5648941810974720_0/Python/progiv/digits.py","file_name":"digits.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"254096390","text":"#3.3 Write a program to prompt for a score \r\n#between 0.0 and 1.0. If the score is out of \r\n#range, print an error. If the score is \r\n#between 0.0 and 1.0, print a grade using the \r\n#following table:\r\n#Score Grade\r\n#>= 0.9 A\r\n#>= 0.8 B\r\n#>= 0.7 C\r\n#>= 0.6 D\r\n#< 0.6 F\r\n#If the user enters a value out of range, \r\n#print a suitable error message and exit. \r\n#For the test, enter a score of 0.85.\r\n\r\n\r\ngrade = input(\"What is your grade point:\")\r\n\r\ntry:\r\n gr = float(grade)\r\nexcept:\r\n print(\"Not a number\")\r\n \r\nif gr > 1:\r\n print(\"Out of range\")\r\n quit()\r\nelif gr >= .9:\r\n print(\"A\")\r\nelif gr >= .8:\r\n print(\"B\")\r\nelif gr >= .7:\r\n print(\"C\")\r\nelif gr >= .6:\r\n print(\"D\")\r\nelse:\r\n print(\"F\")","sub_path":"PY4E assigment 3.2.py","file_name":"PY4E assigment 3.2.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"526031829","text":"with open(\"input.txt\") as input:\n raw_claims = [line.strip('\\n').split(\" \") for line in input.readlines()]\n claims = {}\n for claim in raw_claims:\n claims[claim[0]] = [[int(c) for c in claim[2].strip(':').split(',')], [int(c) for c in claim[3].split('x')]]\nsquare_inches = [[False for y in range(1000)] for x in range(1000)]\ncount = 0\nfor claim in claims:\n Claim = claims[claim]\n for y in range(Claim[0][1], Claim[0][1] + Claim[1][1]):\n for x in range(Claim[0][0], Claim[0][0] + Claim[1][0]):\n sq = square_inches[x][y]\n if sq == None: continue\n elif sq == False: square_inches[x][y] = True\n else:\n count+=1\n square_inches[x][y] = None\nprint(str(count))\n","sub_path":"18/3/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"96934600","text":"from django.conf.urls import url\nfrom . import views\nfrom .models import HouseInspectionFaq\n\nurlpatterns = [\n url(r'^faq/$', views.houseInspectionFaq, name='house_inspection_faq'),\n url(r'^manage_inspectors/$', views.manageInspectors, name='manage_inspectors'),\n url(r'^manage_inspectable_houses/$', views.manageInspectableHouses, name='manage_inspectable_houses'),\n url(r'^json/$', views.QuestionRequestJSON.as_view(), name='house_inspection_faq-json'),\n url(r'^question_list/$', views.QuestionList.as_view(), name='question_list'),\n url(r'^detail/(?P\\d+)$', views.FaqDetail.as_view(), name='house_inspection_faq-detail'),\n url(r'^create$', views.FaqCreate.as_view(model=HouseInspectionFaq), name='faq-create'),\n url(r'^update/(?P\\d+)$', views.FaqUpdate.as_view(), name='faq-update'),\n url(r'^delete/(?P\\d+)$', views.FaqDelete.as_view(), name='faq-delete'),\n url(r'^answer/(?P\\d+)$', views.InspectorAnswer.as_view(), name='inspector-answer'),\n url(r'^(?P[APFDS])/(?P\\d+)$', views.modify_status, name='modify-status'),\n url(r'^ta/update/(?P\\d+)$', views.FaqAnswer.as_view(), name='house_inspection-update-answer'),\n url(r'^ta/comment/(?P\\d+)$', views.FaqTaComment.as_view(), name='house_inspection-faq-comment'),\n]","sub_path":"ap/house_inspection/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"171029412","text":"#!/usr/bin/env python\n\nimport sys\n\nfileTime = open(\"Logs/logTime_Set.log\", \"r\")\nfileFor = open(\"Logs/logTime_For.log\", \"r\")\n\nsumTime_set=0.0\nsumCycle_set=0\n\nsumTime_for=0.0\nsumCycle_for=0\n\nfor line in fileTime:\n array = line.strip()\n array2 = array.split(\" \")\n sumTime_set += float(array2[3])\n sumCycle_set += int(array2[1])\n\nfor line in fileFor:\n array = line.strip()\n array2 = array.split(\" \")\n sumTime_for += float(array2[3])\n sumCycle_for += int(array2[1])\n\n\nprint (\"Time\\nSet: \"+ str(sumTime_set) + \" \\t For: \" + str(sumTime_for))\nprint (\"\\n\")\nprint (\"Cycle\\nSet: \"+ str(sumCycle_set) + \" \\t For: \" + str(sumCycle_for))\n\n\n","sub_path":"logTime.py","file_name":"logTime.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"37481046","text":"from django.http import HttpResponse, HttpResponseNotFound, HttpResponseRedirect, Http404\nfrom django.shortcuts import render\nfrom django.urls import reverse\nfrom django.template.loader import render_to_string\n\n# Create your views here.\n\ndays_week = {\n \"saturday\": \"Today is Saturday\",\n \"sunday\": \"Today is Sunday\",\n \"monday\": \"Today is Monday\",\n \"tuesday\": \"Today is Tuesday\",\n \"wednesday\": \"Today is Wednesday\",\n \"thursday\": \"Today is Thursday\",\n \"friday\": \"Today is Friday\",\n \"holiday\": None\n}\n\n\ndef index(request):\n return HttpResponse(render_to_string('challenges/index.html'))\n\n\ndef days(request, day):\n if days_week.get(day) is not None:\n context = {\n \"data\": days_week.get(day)\n }\n else:\n # response = render_to_string('404.html')\n # return HttpResponseNotFound(response)\n raise Http404()\n return render(request, \"challenges/days.html\", context)\n\n\ndef fun(request, string):\n if string == \"404\":\n return HttpResponseNotFound(\"It's not valid.\")\n return HttpResponse(f'

wow {string}

')\n\n\ndef fun_by_number(request, num):\n return HttpResponseRedirect(reverse(\"fun\", args=[f'{num}redirect']))\n\n\ndef days_of_week(request):\n days_name = list(days_week.keys())\n\n addresses = [reverse(\"days_of_the_week\", args=[day_name]) for day_name in days_name]\n addresses.reverse()\n context = {\n 'days': days_name,\n 'addresses': addresses\n }\n print(addresses)\n return render(request, \"challenges/listDays.html\", context)","sub_path":"challenges/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"411888307","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Oct 7 14:21:27 2020\r\n\r\n@author: Xu\r\n\r\n案例5:门店盈利能力对比图设计\r\n图表设计:\r\n1. 数据查询:\r\n 门店盈利能力对比分布数据查询程序,\r\n 负责从数据准备中生成的不同日期下门店盈利能力的表中,\r\n 查询门店的营业额,然后返回给调用程序。\r\n2. 图表创建:Bar 簇状图\r\n \r\n\r\n\"\"\"\r\nfrom pyecharts import options as opts\r\nfrom pyecharts.charts import Bar\r\nimport pymysql\r\n\r\n\r\n\r\n# 不同门店的月营业额\r\ndef month_store_query():\r\n # 数据连接\r\n connection = pymysql.connect(host='localhost',\r\n port=3306,\r\n user='root',\r\n password='123456',\r\n db='sakila',\r\n charset='utf8',\r\n cursorclass=pymysql.cursors.DictCursor)\r\n try:\r\n with connection.cursor() as cursor:\r\n # SQL 查询语句\r\n sql = \"select * from dm_month_store_amount \"\r\n try:\r\n # 执行 SQL 语句,返回影响的行数\r\n row_count = cursor.execute(sql)\r\n print(\"%d row(s) affected\" % (row_count))\r\n # 获取所有记录列表\r\n results = cursor.fetchall()\r\n dataX = []\r\n dataY1 = []\r\n dataY2 = []\r\n for row in results:\r\n # 此处不可以用索引访问:row[0]\r\n dataX.append(row[\"payment_date\"])\r\n dataY1.append(row[\"store_1\"])\r\n dataY2.append(row[\"store_2\"])\r\n # 打印结果\r\n print(\"支付时间:%s,门店1的月营业额:%.2f,门店2的月营业额:%.2f\" % (row[\"payment_date\"], row[\"store_1\"], row[\"store_2\"]))\r\n return dataX, dataY1, dataY2\r\n except:\r\n print(\"错误:数据查询操作失败\")\r\n finally:\r\n connection.close()\r\n\r\n\r\n\r\n\r\n\r\n# 执行主函数\r\nif __name__ == '__main__':\r\n dataX, dataY1, dataY2 = month_store_query()\r\n bar = Bar()\r\n bar.add_xaxis(dataX)\r\n bar.add_yaxis(\"Store 1\", dataY1)\r\n bar.add_yaxis(\"Store 2\", dataY2)\r\n bar.set_global_opts(xaxis_opts=opts.AxisOpts(type_=\"category\"),\r\n yaxis_opts=opts.AxisOpts(type_=\"value\"),\r\n title_opts={\"text\":\"商店盈利能力分析图\",\"subtext\":\"单位(美元)\"})\r\n bar.render('store_amount.html')\r\n","sub_path":"PyDataVisualFlask/model/data_5.py","file_name":"data_5.py","file_ext":"py","file_size_in_byte":2593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"613854382","text":"#!/usr/local/bin/python3\n\n\nclass LinkedList:\n def __init__(self, head):\n self.head = Node(head)\n self.head.next = self.head\n\n def add(self, data):\n tmp = self.head\n\n while tmp.next != self.head:\n tmp = tmp.next\n\n node = Node(data)\n tmp.next = node\n node.next = self.head\n\n\n def remove(self):\n pass\n\n def traversal(self):\n print(self.head.data)\n tmp = self.head.next\n while tmp != self.head:\n print(tmp.data)\n tmp = tmp.next\n\n\nclass Node:\n def __init__(self, data):\n self.data = data\n self.next = None\n\n\nmy_list = LinkedList(1)\nmy_list.add(2)\nmy_list.add(3)\nmy_list.add(4)\nmy_list.add(5)\nmy_list.traversal()\n","sub_path":"linked_list.py","file_name":"linked_list.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"442847050","text":"# Bubble Sort\n\nfrom typing import List\nimport pytest\n\n\nclass Solution():\n def bubbleSortOne(self, nums: List) -> List:\n length = len(nums)\n count = 0\n for i in range(length):\n for l in range(0, length-1):\n count += 1\n if nums[l] > nums[l+1]:\n nums[l], nums[l+1] = nums[l+1], nums[l]\n print(count)\n return nums\n\n def bubbleSortTwo(self, nums: List) -> List:\n length = len(nums)\n count = 0\n for i in range(length):\n flag = False\n for l in range(0, length-1-i):\n count += 1\n if nums[l] > nums[l+1]:\n nums[l], nums[l+1] = nums[l+1], nums[l]\n print(count)\n return nums\n\n def bubbleSortThree(self, nums: List) -> List:\n length = len(nums)\n count = 0\n for i in range(length):\n flag = False\n for l in range(0, length-1-i):\n count += 1\n if nums[l] > nums[l+1]:\n nums[l], nums[l+1] = nums[l+1], nums[l]\n flag = True\n if flag is False:\n break\n print(count)\n return nums\n\n def insertSord(self, nums: List) -> List:\n length = len(nums)\n count = 0\n for i in range(1, length):\n key = nums[i] # 要排序的牌\n j = i - 1 # 已排序好的最大元素的索引(剩下没有对比的最大索引值)\n while j >= 0 and nums[j] > nums[i]:\n count += 1\n # nums[i], nums[j] = nums[j], nums[i]\n nums[j+1] = nums[j]\n j -= 1\n nums[j+1] = key\n print(count)\n return nums\n\n\nclass TestBubbleSort():\n\n test_data_one = ([4, 5, 6, 3, 2, 1], [1, 2, 3, 4, 5, 6])\n\n @pytest.mark.parametrize('input_data, assert_data', [test_data_one])\n def test_bubble_sort(self, input_data, assert_data):\n assert Solution().bubbleSortThree(input_data) == assert_data\n\n @pytest.mark.parametrize('input_data, assert_data', [test_data_one])\n def test_insert_sort(self, input_data, assert_data):\n assert Solution().insertSord(input_data) == assert_data\n\n\nif __name__ == '__main__':\n import random\n import time\n data = [random.randint(0, 1000) for _ in range(10000)]\n \n # start = time.time()\n # result = Solution().bubbleSortOne(data)\n # print(result)\n # end = time.time()\n # print(f'spend time is {end-start}' )\n\n # result = Solution().bubbleSortTwo(data)\n # print(result)\n\n # start = time.time()\n # result = Solution().bubbleSortThree(data)\n # end = time.time()\n # print(f'spend time is {end-start}')\n\n start = time.time()\n result = Solution().insertSord(data)\n end = time.time()\n print(f'spend time is {end-start}')","sub_path":"tmp/sorted.py","file_name":"sorted.py","file_ext":"py","file_size_in_byte":2855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"190843516","text":"dictionary = {\n 'time': 'Thời gian',\n 'people': 'Con người',\n 'road': 'Con đường',\n 'school': 'Trường học',\n 'family': 'Gia đình',\n 'room': 'Căn phòng' \n }\nenglish_word = input(\"Nhập từ bạn cần dịch: \")\n\n\ndef translate(dictionary):\n global english_word\n for item in dictionary:\n if english_word == item:\n \n return item + \" : \" + dictionary[item]\n\ntranslate_word = translate(dictionary)\nif translate_word:\n print(translate_word)\nelse:\n print(english_word, \" not in the dictionary\")\n\n","sub_path":"dictionary.py","file_name":"dictionary.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"384609522","text":"##create experimental design for single.object.1000 experiment\nimport glob\nfrom scipy.io import loadmat\nimport os\nfrom numpy import random\nfrom experimental_design.src.experiments import fmri_experiment, make_design_matrix\nbase_directory = '/Users/tnaselar/Data/Presentation/single.object.boxes/pics/single.object.400/'\nimagery_run_directories = [base_directory+'imagery_001'] #glob.glob(base_directory+'imagery_*')\nblank_screen = 'rest_frame_%d.png'\nnumber_of_blank_screens = 4 #there are several jittered versions of the background to combat retinal fatigue\nseconds_per_state = 2 ##a \"state\" is either show a cue, show an image, or show a blank \nvols_per_state = 1\nframes_per_state = 20 ##100 msec per frame\nnumber_of_cue_frames = frames_per_state ##per active state\nnumber_of_blank_frames = frames_per_state ##per blank state\nnumber_of_stim_frames_pcp = frames_per_state ##to accomadate flashing stim will be broken up with one blank so we split into two\nnumber_of_unique_stimuli = 64 # 8 images @ 8 locations\nnumber_of_reps_per_unique_stimulus = 2 #each image shown 1 time at each location\nblank_states = [5, .4, 5] #5 blanks at start, ~1 blank per isi, 5 blanks at end\n\ndef imagery_state(cue):\n return [cue]*number_of_cue_frames #+[location]*number_of_stim_frames_img\n \ndef pcp_state(image):\n return [image]*number_of_stim_frames_pcp\n\n ##for each run we have\n ##folder ~ cannonical\n\t\t##cannoncial images of selected objects\n\t\t##text files giving label name for cannonical object\n ##folder ~ frame_images\n\t ##the various frames containning scaled/offset pictures of cannonical objects, their cues, and a blank.\n ##designmatrix: a .mat file with a design matrix in it.\n\ndef blank_state():\n from numpy import random\n return [blank_screen % (0)]*number_of_blank_frames\n #return [blank_screen % (random.randint(0,high=number_of_blank_screens)) for ii in range(number_of_blank_frames)]\n\n##create design matrix\ndesign_matrix = make_design_matrix(number_of_unique_stimuli, number_of_reps_per_unique_stimulus, blank_states = [5, .3, 5], seconds_per_state = 2)\n\n##for each run folder\nwhile imagery_run_directories:\n current_dir = imagery_run_directories.pop()\n img_stim_list = []\n img_stim_list.append(blank_state())\n pcp_stim_list = []\n pcp_stim_list.append(blank_state())\n image_frames = glob.glob(current_dir+\"/frame_files/*image*\")\n image_frames.sort()\n cue_frames = glob.glob(current_dir+\"/frame_files/*cue*\")\n cue_frames.sort()\n dx = list(random.permutation(len(image_frames)))\n while dx:\n random_index = dx.pop()\n current_image = os.path.split(image_frames[random_index])[1] ##this is equal to the image_id of the parent image for this object\n current_cue = os.path.split(cue_frames[random_index])[1]\n img_stim_list.append(imagery_state(current_cue))\n pcp_stim_list.append(pcp_state(current_image))\n ##instaniate an fmri_experiment object\n img_run = fmri_experiment(design_matrix, img_stim_list, seconds_per_state, vols_per_state)\n pcp_run = fmri_experiment(design_matrix, pcp_stim_list, seconds_per_state, vols_per_state)\n ##use object to build frame file and write it\n # img_run.print_frame_list(current_dir+'/frame_files/img_frame_list.txt')\n # pcp_run.print_frame_list(current_dir+'/frame_files/pcp_frame_list.txt')","sub_path":"scripts/experimental_design_for_boxes_experiment.py","file_name":"experimental_design_for_boxes_experiment.py","file_ext":"py","file_size_in_byte":3268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"652049679","text":"import discord\r\nfrom discord.ext import commands\r\nimport asyncio\r\nfrom discord.utils import get\r\nimport random\r\nimport json\r\nimport sys\r\nfrom discord.ext.commands import Bot, has_permissions, CheckFailure, BadArgument, MemberConverter\r\n\r\nwith open(r\".\\\\jenova\\\\cells\\\\moogles.json\", \"r\") as moogleslist:\r\n moogles = json.load(moogleslist)\r\n \r\nwith open(r\".\\\\jenova\\\\cells\\\\settings.json\", \"r\") as settingsjson:\r\n settings = json.load(settingsjson)\r\n \r\nwith open(r\".\\\\jenova\\\\cells\\\\itemslist.json\", \"r\") as itemsList:\r\n items = json.load(itemsList)\r\nwith open(r\".\\\\jenova\\\\cells\\\\cities.json\", \"r\") as citilist:\r\n citiesDict = json.load(citilist)\r\n\r\nlogchannel = int(settings['LogChannelID'])\r\n\r\n\r\nclass Moderation(commands.Cog):\r\n def __init__ (self, mog):\r\n self.mog = mog\r\n \r\n @commands.command(hidden=True)\r\n @commands.has_permissions(kick_members=True)\r\n async def setlvlchannel(self,ctx, channel_id: int = None):\r\n if channel_id == None:\r\n await ctx.send('Missing Channel ID')\r\n else:\r\n guild = str(ctx.author.guild)\r\n settings['Guilds'][guild]['LevelUpMessagesChannelID'] = channel_id\r\n channel_new = self.mog.get_channel(channel_id)\r\n await ctx.send(f'Level Up Notifications will be send to: **{channel_new}**')\r\n with open(r\".\\\\jenova\\\\cells\\\\settings.json\", \"w\") as settingsjson:\r\n json.dump(settings, settingsjson, indent=4)\r\n \r\n \r\n @commands.command(hidden= True)\r\n @commands.has_permissions(kick_members=True)\r\n async def setlogchannel(self, ctx, channel_id: int = None):\r\n if channel_id == None:\r\n await ctx.send('Missing Channel ID')\r\n else:\r\n guild = str(ctx.author.guild)\r\n settings['Guilds'][guild]['LogChannelID'] = channel_id\r\n channel_new = self.mog.get_channel(channel_id)\r\n await ctx.send(f'Events will be logged in: **{channel_new}**')\r\n with open(r\".\\\\jenova\\\\cells\\\\settings.json\", \"w\") as settingsjson:\r\n json.dump(settings, settingsjson, indent=4)\r\n \r\n \r\n @commands.command(hidden= True)\r\n @commands.has_permissions(kick_members=True)\r\n async def setwchannel(self, ctx, channel_id: int = None):\r\n if channel_id == None:\r\n await ctx.send('Missing Channel ID')\r\n else:\r\n guild = str(ctx.author.guild)\r\n settings['Guilds'][guild]['Welcome Channel ID'] = channel_id\r\n channel_new = self.mog.get_channel(channel_id)\r\n await ctx.send(f'Welcome messages will be sent to: **{channel_new}**')\r\n with open(r\".\\\\jenova\\\\cells\\\\settings.json\", \"w\") as settingsjson:\r\n json.dump(settings, settingsjson, indent=4) \r\n \r\n\r\n @commands.command(hidden= True)\r\n @commands.has_permissions(kick_members=True)\r\n async def kick(self, ctx, member: discord.Member, *, reason=\"No reason\"):\r\n await member.kick(reason=reason)\r\n embed = discord.Embed(colour=0xf3e24f,title=\"User Kicked\" ,description=f'{member.mention} was kicked by {ctx.author.mention}. [{reason}]')\r\n embed.set_author(name=f\"Moogle:\", icon_url=\"https://cdn.discordapp.com/attachments/642540375365386250/643017953604009985/smallkupo.png\")\r\n await ctx.send(embed=embed)\r\n \r\n channel = self.mog.get_channel(id=628590630464913440)\r\n\r\n await channel.send(embed=embed)\r\n log = self.mog.get_channel(id=logchannel)\r\n await log.send(f'User {member.name} was kicked by {ctx.author}.')\r\n\r\n @commands.command(hidden= True)\r\n @commands.has_permissions(ban_members=True)\r\n async def ban(self, ctx, member: discord.Member, *, reason=\"No reason\"):\r\n await member.ban(reason=reason)\r\n embed = discord.Embed(colour=0xf3e24f,title=\"User Banned\" ,description=f'{member.mention} was banned by {ctx.author.mention}. [{reason}]')\r\n embed.set_author(name=f\"Moogle:\", icon_url=\"https://cdn.discordapp.com/attachments/642540375365386250/643017953604009985/smallkupo.png\")\r\n await ctx.send(embed=embed)\r\n \r\n channel = self.mog.get_channel(id=628590630464913440)\r\n\r\n await channel.send(embed=embed)\r\n log = self.mog.get_channel(id=logchannel)\r\n await log.send(f'User {member.name} was banned by {ctx.author}.')\r\n\r\n @commands.command(aliases=['purge','clear'], hidden = True)\r\n @commands.has_permissions(manage_messages=True)\r\n async def delete(self, ctx, amount=2):\r\n '''Purges X amount of messages. 1 by default'''\r\n await ctx.channel.purge(limit=amount + 1)\r\n embed = discord.Embed(colour=0xf3e24f,title=\"Clean up\" ,description=f'{amount} messages got deleted.')\r\n embed.set_author(name=f\"Moogle:\", icon_url=\"https://cdn.discordapp.com/attachments/642540375365386250/643017953604009985/smallkupo.png\")\r\n error_msg = await ctx.send(embed=embed)\r\n channel_id = error_msg.channel.id\r\n message_id = error_msg.id\r\n await asyncio.sleep(7) \r\n await self.mog.http.delete_message(channel_id, message_id)\r\n \r\n #log = self.mog.get_channel(id=641074528923156490)\r\n #embed = discord.Embed(colour=0xf3e24f,title=\"Message Delete\" ,description=f'Message deleted by member at channel X')\r\n #embed.set_author(name=f\"Moogle:\", icon_url=\"https://cdn.discordapp.com/attachments/642540375365386250/643017953604009985/smallkupo.png\")\r\n #await log.send(embed=embed)\r\n\r\n @delete.error\r\n async def delete_error(self, ctx, error):\r\n if isinstance(error. commands.MissingRequiredArgument):\r\n await ctx.send(\"You need to specify an amount.\")\r\n if isinstance(error,commands.BadArgument):\r\n await ctx.send(\"Give an integer.\")\r\n raise error\r\n \r\n # @commands.command()\r\n # @commands.has_permissions(manage_messages=True)\r\n # async def addrole(self, ctx, member : discord.Member):\r\n # '''Adds a role to a member'''\r\n # await user.add_roles(role)\r\n # await ctx.send(f\"hey {ctx.author.name}, {user.name} has been giving a role called: {role.name}\"\r\n \r\n @commands.command(hidden=True)\r\n @commands.bot_has_permissions(manage_nicknames = True)\r\n async def nickname(self, ctx, *name):\r\n '''Change nickname of the bot (BOT OWNER ONLY)'''\r\n nickname = ' '.join(name)\r\n await ctx.me.edit(nick=nickname)\r\n if nickname:\r\n msg = f':ok: **{nickname}**'\r\n else:\r\n msg = f':ok: **{ctx.me.name}**'\r\n await ctx.send(msg)\r\n \r\n @commands.command(aliases=['restart', 'kill'], hidden=True)\r\n async def disconnect(self, ctx, code: int = None):\r\n \"\"\"Restart/kill the bot.\r\n Optionally set exit code for custom handling.\r\n \"\"\"\r\n shinras = discord.utils.get(ctx.author.guild.roles, id=int('628591050285645844'))\r\n if shinras in ctx.author.roles:\r\n \r\n codes = {'restart': 2, 'kill': 1}\r\n code = codes.get(ctx.invoked_with, code)\r\n if code is None:\r\n await ctx.send('Invalid exit code.')\r\n return\r\n self.mog.exit_status = code\r\n embed = discord.Embed(colour=0xf3e24f,title=\"Disconnecting...\" ,description=f'Moogle is leaving the server.')\r\n embed.set_author(name=f\"Moogle:\", icon_url=\"https://cdn.discordapp.com/attachments/642540375365386250/643017953604009985/smallkupo.png\")\r\n await ctx.send(embed=embed)\r\n await self.mog.logout()\r\n else:\r\n with open(r\".\\\\jenova\\\\cells\\\\stamps.json\", \"r\") as stampList:\r\n stamps = json.load(stampList)\r\n embed = discord.Embed(colour=0xf3e24f, description=f\"You do not have permission to do that.\")\r\n embed.set_thumbnail(url=f\"{stamps[0]['Sorry Kupo']['Image']}\")\r\n error_msg = await ctx.send(embed=embed)\r\n channel_id = error_msg.channel.id\r\n message_id = error_msg.id\r\n await asyncio.sleep(7) \r\n await self.mog.http.delete_message(channel_id, message_id)\r\n return\r\n \r\n # @disconnect.error\r\n # async def disconnect_error(self, error, ctx):\r\n # if isinstance(error, MissingPermissions):\r\n # text = 'Sorry {}, you do not have permissions to do that!'.format(ctx.message.author)\r\n # await self.mog.send_message(ctx.message.channel, text)\r\n \r\n @commands.command(hidden=True)\r\n async def id(self, ctx, text: str = None):\r\n '''Get Discord ids for channels, users or roles.'''\r\n first_class_role = discord.utils.get(ctx.author.guild.roles, id=634549759549505548)\r\n second_class_role = discord.utils.get(ctx.author.guild.roles, id=634549699159916573)\r\n shinras = discord.utils.get(ctx.author.guild.roles, id=628591050285645844)\r\n turks = discord.utils.get(ctx.author.guild.roles, id=628694622381670410)\r\n if first_class_role in ctx.author.roles or second_class_role in ctx.author.roles or shinras in ctx.author.roles or turks in ctx.author.roles or ctx.author.id == 132306444245532672:\r\n try:\r\n if text == str(ctx.author.name):\r\n await ctx.send(content=f\"Your id is `{ctx.author.id}`\")\r\n return\r\n except:\r\n pass\r\n \r\n try:\r\n text = text.lower()\r\n channel = discord.utils.get(self.mog.get_all_channels(), name=f'{text}')\r\n if channel == None:\r\n pass\r\n else:\r\n channel_id = str(channel.id)\r\n await ctx.send(content=f\"{channel} id is `{channel_id}`\")\r\n return\r\n except:\r\n pass\r\n \r\n try:\r\n member = discord.utils.get(self.mog.get_all_members(), name=f'{text}')\r\n if member == None:\r\n pass\r\n else:\r\n member_id = str(member.id)\r\n await ctx.send(content=f\"{member.name} id is `{member_id}`\")\r\n return\r\n except:\r\n pass\r\n \r\n await ctx.send(content=f\"Could not find anything with that name.\")\r\n else:\r\n with open(r\".\\\\jenova\\\\cells\\\\stamps.json\", \"r\") as stampList:\r\n stamps = json.load(stampList)\r\n embed = discord.Embed(colour=0xf3e24f, description=f\"You do not have permission to do that.\")\r\n embed.set_thumbnail(url=f\"{stamps[0]['Sorry Kupo']['Image']}\")\r\n error_msg = await ctx.send(embed=embed)\r\n channel_id = error_msg.channel.id\r\n message_id = error_msg.id\r\n await asyncio.sleep(7) \r\n await self.mog.http.delete_message(channel_id, message_id)\r\n return\r\n \r\n @commands.command()\r\n async def addreaction(self, ctx, messageid: int = None, reaction: str = None):\r\n first_class_role = discord.utils.get(ctx.author.guild.roles, id=634549759549505548)\r\n second_class_role = discord.utils.get(ctx.author.guild.roles, id=634549699159916573)\r\n shinras = discord.utils.get(ctx.author.guild.roles, id=628591050285645844)\r\n turks = discord.utils.get(ctx.author.guild.roles, id=628694622381670410)\r\n if first_class_role in ctx.author.roles or second_class_role in ctx.author.roles or shinras in ctx.author.roles or turks in ctx.author.roles or ctx.author.id == 132306444245532672:\r\n if messageid == None:\r\n await ctx.send('Missing ID. Must be int')\r\n elif reaction == None:\r\n await ctx.send('Missing reaction. Must be string')\r\n else:\r\n \r\n msg = await ctx.channel.fetch_message(messageid)\r\n await msg.add_reaction(f'{reaction}')\r\n await ctx.message.delete()\r\n else:\r\n with open(r\".\\\\jenova\\\\cells\\\\stamps.json\", \"r\") as stampList:\r\n stamps = json.load(stampList)\r\n embed = discord.Embed(colour=0xf3e24f, description=f\"You do not have permission to do that.\")\r\n embed.set_thumbnail(url=f\"{stamps[0]['Sorry Kupo']['Image']}\")\r\n error_msg = await ctx.send(embed=embed)\r\n channel_id = error_msg.channel.id\r\n message_id = error_msg.id\r\n await asyncio.sleep(7) \r\n await self.mog.http.delete_message(channel_id, message_id)\r\n return\r\n \r\n @commands.command(hidden=True)\r\n async def spawncard(self, ctx):\r\n channel_id = ctx.message.channel.id\r\n message_id = ctx.message.id\r\n await self.mog.http.delete_message(channel_id, message_id)\r\n first_class_role = discord.utils.get(ctx.author.guild.roles, id=634549759549505548)\r\n second_class_role = discord.utils.get(ctx.author.guild.roles, id=634549699159916573)\r\n shinras = discord.utils.get(ctx.author.guild.roles, id=628591050285645844)\r\n turks = discord.utils.get(ctx.author.guild.roles, id=628694622381670410)\r\n if first_class_role in ctx.author.roles or second_class_role in ctx.author.roles or shinras in ctx.author.roles or turks in ctx.author.roles or ctx.author.id == 132306444245532672:\r\n card_chance_to_spawn = 1\r\n if card_chance_to_spawn == 1:\r\n with open(r\".\\\\jenova\\\\cells\\\\triadcards.json\", \"r\") as cardlist:\r\n cards = json.load(cardlist)\r\n card_length = len(cards)\r\n card_loot = random.randrange(0,card_length)\r\n \r\n embed = discord.Embed(colour=0xf3e24f,title=\"A Triad Card has appeared!\" ,description=f'`$collect` it before someone else takes it!')\r\n embed.add_field(name=f\"Name:\", value=f\"{cards[card_loot]['Name']}\", inline=True)\r\n embed.set_thumbnail(url=f\"{cards[card_loot]['Image']}\")\r\n msg = await ctx.send(embed=embed)\r\n settings['cardID'] = str(cards[card_loot]['Name'])\r\n settings['lastCardSpawnedAtMSG'] = msg.id\r\n settings['lastCardSpawnedAtCHANNEL'] = msg.channel.id\r\n settings['lastCardImage'] = str(cards[card_loot]['Image'])\r\n with open(r\".\\\\jenova\\\\cells\\\\settings.json\", \"w\") as settingsjson:\r\n json.dump(settings, settingsjson, indent=4)\r\n else:\r\n with open(r\".\\\\jenova\\\\cells\\\\stamps.json\", \"r\") as stampList:\r\n stamps = json.load(stampList)\r\n embed = discord.Embed(colour=0xf3e24f, description=f\"You do not have permission to do that.\")\r\n embed.set_thumbnail(url=f\"{stamps[0]['Sorry Kupo']['Image']}\")\r\n error_msg = await ctx.send(embed=embed)\r\n channel_id = error_msg.channel.id\r\n message_id = error_msg.id\r\n await asyncio.sleep(7) \r\n await self.mog.http.delete_message(channel_id, message_id)\r\n return\r\n \r\n @commands.command(hidden=True)\r\n async def ytannounce(self, ctx, *,text: str = None):\r\n channel_id = ctx.message.channel.id\r\n message_id = ctx.message.id\r\n await self.mog.http.delete_message(channel_id, message_id)\r\n first_class_role = discord.utils.get(ctx.author.guild.roles, id=634549759549505548)\r\n second_class_role = discord.utils.get(ctx.author.guild.roles, id=634549699159916573)\r\n shinras = discord.utils.get(ctx.author.guild.roles, id=628591050285645844)\r\n turks = discord.utils.get(ctx.author.guild.roles, id=628694622381670410)\r\n if first_class_role in ctx.author.roles or second_class_role in ctx.author.roles or shinras in ctx.author.roles or turks in ctx.author.roles:\r\n yourStuffChannelID = int(settings['YourStuffChannelID']) #ORIGINAL\r\n yourStuffChannel = self.mog.get_channel(id=yourStuffChannelID) #ORIGINAL\r\n # yourStuffChannel = self.mog.get_channel(id=652670184259190794) #TEST\r\n Youtube = discord.utils.get(ctx.author.guild.roles, id=643858354946572318) #ORIGINAL\r\n # Youtube = discord.utils.get(ctx.author.guild.roles, id=int('651163230630051849')) #TEST\r\n await Youtube.edit(mentionable=True, reason=f'Pingrole') \r\n yt_msg = await yourStuffChannel.send(f\" {text}\\n{Youtube.mention} *To unsubscribe to these alerts please visit the Roles channel and choose the YouTube reactions.*\")\r\n await Youtube.edit(mentionable=False, reason=f'Pingrole')\r\n await yt_msg.add_reaction('<:ff7yeah:640202144821018674>')\r\n await yt_msg.add_reaction(':ff7miss:640202144842252288>')\r\n await yt_msg.add_reaction('<:ff7hit:640031263788367882>')\r\n else:\r\n await ctx.send(content=f\"You do not have permission to do that.\")\r\n \r\n \r\n \r\n \r\n \r\nasync def check_permission_roles(self, ctx):\r\n first_class_role = discord.utils.get(ctx.author.guild.roles, id=int('634549759549505548'))\r\n second_class_role = discord.utils.get(ctx.author.guild.roles, id=int('634549699159916573'))\r\n shinras = discord.utils.get(ctx.author.guild.roles, id=int('628591050285645844'))\r\n turks = discord.utils.get(ctx.author.guild.roles, id=int('628694622381670410'))\r\n if first_class_role not in ctx.author.roles or second_class_role not in ctx.author.roles or shinras not in ctx.author.roles or turks not in ctx.author.roles:\r\n await ctx.send('You do not have permission to do that.')\r\n return\r\n\r\n\r\ndef setup(mog):\r\n mog.add_cog(Moderation(mog))\r\n print('Moderation loaded.')\r\n","sub_path":"cogs/moderation.py","file_name":"moderation.py","file_ext":"py","file_size_in_byte":17745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"380154002","text":"# Copyright 2017 The Fuchsia Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n\"\"\"Recipe for checking licenses in the repo hosting third-party Rust crates.\"\"\"\n\nfrom recipe_engine.config import ReturnSchema, Single\nfrom recipe_engine.recipe_api import Property\n\n\nDEPS = [\n 'infra/jiri',\n 'recipe_engine/context',\n 'recipe_engine/path',\n 'recipe_engine/properties',\n 'recipe_engine/raw_io',\n 'recipe_engine/step',\n]\n\nPROPERTIES = {\n 'category': Property(kind=str, help='Build category', default=None),\n 'patch_gerrit_url': Property(kind=str, help='Gerrit host', default=None),\n 'patch_project': Property(kind=str, help='Gerrit project', default=None),\n 'patch_ref': Property(kind=str, help='Gerrit patch ref', default=None),\n 'patch_storage': Property(kind=str, help='Patch location', default=None),\n 'patch_repository_url': Property(kind=str, help='URL to a Git repository',\n default=None),\n}\n\n\ndef RunSteps(api, category, patch_gerrit_url, patch_project, patch_ref,\n patch_storage, patch_repository_url):\n api.jiri.ensure_jiri()\n\n with api.context(infra_steps=True):\n api.jiri.init()\n api.jiri.import_manifest('runtimes/rust',\n 'https://fuchsia.googlesource.com/manifest')\n api.jiri.import_manifest('build',\n 'https://fuchsia.googlesource.com/manifest')\n api.jiri.update()\n revision = api.jiri.project(['rust-crates']).json.output[0]['revision']\n api.step.active_result.presentation.properties['got_revision'] = revision\n\n cmd = [\n api.path['start_dir'].join('scripts', 'check_rust_licenses.py'),\n '--verify',\n '--directory',\n api.path['start_dir'].join('third_party', 'rust-crates', 'vendor'),\n ]\n api.step('verify licenses', cmd)\n\n\ndef GenTests(api):\n yield api.test('basic')\n yield api.test('patch') + api.properties(\n patch_ref='abcd1234',\n patch_gerrit_url='https://abcd.com/1234',\n )\n","sub_path":"recipes/third_party_rust_crates.py","file_name":"third_party_rust_crates.py","file_ext":"py","file_size_in_byte":2035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"351796838","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\n\ntrain=pd.read_csv('./data/dacon/comp1/train.csv',header=0,index_col=0) #0행이 header, 0열이 index/ header와 index모두 존재\ntest=pd.read_csv('./data/dacon/comp1/test.csv',header=0, index_col=0)\n# submission=pd.read_csv('./data/dacon/comp1/sample_submission.csv',header=0,index_col=0)\n\nprint(\"train.shape:\",train.shape) # (10000, 75) # x_train , x_test , y_train , y_test/ 평가도 train으로\nprint(\"test.shape:\",test.shape) # (10000, 71) # x_predict가 된다 # y값이 없다\n# print(\"submission.shape:\",submission.shape) # (10000, 4) # y_predict가 된다\n\n# test + submission = train\n# test는 y값이 없음\n\n#이상치는 알 수 없으나 결측치는 알 수 있다.\nprint(train.isnull().sum())\n\ntrain=train.interpolate() #보간법//선형//완벽하진 않으나 평타 85%//컬럼별로 선을 잡아서 빈자리 선에 맞게 그려준다//컬럼별 보간\ntrain=train.fillna(method='bfill')\nprint(train.isnull().sum())\nprint(\"train:\",train.head())\nprint(test.isnull().sum())\ntest=test.interpolate()\ntest=test.fillna(method='bfill')\nprint(\"test:\",test.head())\n\nnp.save('./data/comp1_train.npy',arr=train)\nnp.save('./data/comp1_test.npy',arr=test)\n\n# 1. 데이터\ntrain=np.load('./data/comp1_train.npy')\ntest=np.load('./data/comp1_test.npy')\n\nfrom sklearn.model_selection import train_test_split\nfrom keras.layers import Dense, LSTM, Conv2D, MaxPooling2D, Flatten, Input\nfrom keras.models import Sequential, Model\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler, RobustScaler\nfrom sklearn.decomposition import PCA\nfrom sklearn.model_selection import KFold, cross_val_score, RandomizedSearchCV\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.neighbors import KNeighborsRegressor\nfrom sklearn.svm import SVC\n\nx=train[0:,0:71]\ny=train[0:,71:]\nprint(\"x.shape:\",x.shape) # (10000, 71)\nprint(\"y.shape:\",y.shape) # (10000, 4)\n\nkfold=KFold(n_splits=3,shuffle=True) \n\nx_train,x_test,y_train,y_test=train_test_split(x,y,random_state=60,test_size=0.2)\n\nprint(\"x_train.shape:\",x_train.shape)\nprint(\"x_test.shape:\",x_test.shape)\n\nx_train=MinMaxScaler().fit_transform(x_train)\nx_test=MinMaxScaler().fit_transform(x_test)\ntest=MinMaxScaler().fit_transform(test)\n\n\n#RandomForest에서 제공하는 parameter\n# parameters={\n# 'n_estimators':[10,100],\n# 'max_depth':[6,8,10,12],\n# 'min_samples_leaf':[10,20,30],\n# 'min_samples_split':[10,20,30],\n# }\n\nmodel=RandomForestRegressor()\nmodel.fit(x_train,y_train)\n\ny_pred=model.predict(x_test)\nprint(\"y_pred:\",y_pred)\n\n# print(\"최적의 매개변수:\",model.best_estimator_)\n\nprint(\"x_train\",x_train)\nprint(\"x_test\",x_test)\n\nfrom sklearn.metrics import mean_absolute_error \nmae=mean_absolute_error(y_test,y_pred)\nprint(\"mae:\",mae)\n","sub_path":"data/dacon/comp1/dacon1_5.py","file_name":"dacon1_5.py","file_ext":"py","file_size_in_byte":2828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"233076154","text":"import subprocess\nimport random\n\ndef take_picture():\n n = random.randint(0, 100000) \n path = \"/tmp/\" + str(n) + \".jpg\"\n cmd = \"raspistill -o \" + path\n print(cmd)\n subprocess.call(cmd, shell=True)\n return path\n\nif __name__ == \"__main__\":\n take_picture()\n","sub_path":"take_picture.py","file_name":"take_picture.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"553430198","text":"from Model import classifier\nfrom DataProcessor import filepath\nimport pickle as pkl\n\n\ndef run():\n rel2id = pkl.load(open(filepath.rel2id_file('baidu'), 'rb'))\n problem = []\n print('Begin to run...')\n for rel in rel2id.values():\n try:\n classifier.stacking_model_fit_for_rel('baidu', rel)\n except Exception as e:\n print(rel, e)\n problem.append(rel)\n continue\n\n pkl.dump(problem, open('baidu_problem', 'wb'))\n print('End')","sub_path":"run/baidu.py","file_name":"baidu.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"445678695","text":"'''\n MODIFIED BY: Thomas Weathers - May 2020\n Created by: Erich Kramer - April 2017\n Apache License\n If using this code please cite creator.\n\n'''\n\nfrom Board import Board\n\n\nx = Board(15, 15);\n\n\nx.set_cell( 4, 4, 'x')\n\nx.set_cell( 1, 3, 'B')\n\n\nx.display()\n\ny = x.cloneBoard();\ny.display();\n","sub_path":"assignment_2/demoBoard.py","file_name":"demoBoard.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"34585378","text":"import os, sys\nfrom JMTucker.Tools.BasicAnalyzer_cfg import cms, process, add_analyzer\n\n#process.MessageLogger.cerr.FwkReport.reportEvery = 1\nprocess.maxEvents.input = 100\nprocess.source.fileNames = ['file:pat.root']\nprocess.TFileService.fileName = 'resolutions_histos.root'\n\n########################################################################\n\nfrom HLTrigger.HLTfilters.hltHighLevel_cfi import hltHighLevel\nprocess.goodDataFilter = hltHighLevel.clone()\nprocess.goodDataFilter.TriggerResultsTag = cms.InputTag('TriggerResults', '', 'PAT')\nprocess.goodDataFilter.HLTPaths = ['eventCleaningAll'] # can set to just 'goodOfflinePrimaryVertices', for example\nprocess.goodDataFilter.andOr = False # = AND\n\nprocess.triggerFilter = hltHighLevel.clone()\nprocess.triggerFilter.HLTPaths = ['HLT_QuadJet50_v*']\nprocess.triggerFilter.andOr = True # = OR\n\n########################################################################\n\nprocess.load('JMTucker.Tools.ResolutionsHistogrammer_cfi')\n\nfor x in ['WithTrigger']:\n setattr(process, 'histos' + x, process.histos.clone())\n\n########################################################################\n\nprocess.p0 = cms.Path( process.histos)\nprocess.p1 = cms.Path(process.triggerFilter * process.histosWithTrigger)\n\nif 'debug' in sys.argv:\n from JMTucker.Tools.CMSSWTools import file_event_from_argv\n file_event_from_argv(process)\n process.MessageLogger.cerr.FwkReport.reportEvery = 1\n process.histos.print_info = True\n process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')\n process.printList = cms.EDAnalyzer('ParticleListDrawer',\n maxEventsToPrint = cms.untracked.int32(100),\n src = cms.InputTag('genParticles'),\n printOnlyHardInteraction = cms.untracked.bool(False),\n useMessageLogger = cms.untracked.bool(False)\n )\n process.p0.insert(0, process.printList)\n\ndef run_on_data(dataset=None, datasets=None):\n if 'debug' in sys.argv:\n process.p.remove(process.printList)\n\n add_analyzer('EventIdRecorder')\n\n if dataset and datasets:\n veto_filter = cms.EDFilter('VetoOtherDatasets', datasets_to_veto = cms.vstring(*[d for d in datasets if d != dataset]))\n setattr(process, 'dataset%sOnly' % dataset, veto_filter)\n for path_name, path in process.paths_().iteritems():\n path.insert(0, veto_filter)\n\n#run_on_data('MultiJet', ['MultiJet', 'JetHT', 'MuHad', 'ElectronHad'])\n\nif __name__ == '__main__' and hasattr(sys, 'argv') and 'submit' in sys.argv:\n if 'debug' in sys.argv:\n raise RuntimeError('refusing to submit jobs in debug (verbose print out) mode')\n\n from JMTucker.Tools.Samples import background_samples, smaller_background_samples, mfv_signal_samples, data_samples\n from JMTucker.Tools.CRABSubmitter import CRABSubmitter\n\n def pset_adder(sample):\n to_add = []\n if not sample.is_mc:\n to_add.append('run_on_data()')\n return to_add\n\n cs = CRABSubmitter('ResolutionsHistos',\n total_number_of_events = -1,\n events_per_job = 10000,\n use_ana_dataset = True,\n CMSSW_use_parent = 1,\n pset_modifier = pset_adder\n )\n\n samples = mfv_signal_samples + background_samples + [s for s in smaller_background_samples if name not in 'ttgjets ttwjets ttzjets']\n cs.submit_all(samples)\n","sub_path":"Tools/test/resolutions_histos.py","file_name":"resolutions_histos.py","file_ext":"py","file_size_in_byte":3580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"67871465","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.4 (62061)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/ice/adverlet/vocabulary.py\n# Compiled at: 2008-12-22 07:00:12\n__license__ = 'GPL v.3'\nfrom zope.component import getUtilitiesFor\nfrom zope.schema.vocabulary import SimpleVocabulary\n\ndef tinyMcePluginsVocabulary(context):\n plugins = [\n 'style', 'layer', 'table', 'save', 'advhr', 'advimage', 'advlink', 'emotions', 'iespell', 'insertdatetime', 'preview', 'zoom', 'media', 'searchreplace', 'print', 'contextmenu', 'paste', 'directionality', 'fullscreen', 'noneditable', 'visualchars', 'nonbreaking', 'xhtmlxtras']\n return SimpleVocabulary.fromValues(plugins)","sub_path":"pycfiles/ice.adverlet-0.2.3-py2.4/vocabulary.py","file_name":"vocabulary.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"621441625","text":"from app.Named import Named\nfrom app.optimizers.IterativeOptimizer import IterativeOptimizer\nfrom model.SolutionCandidate import SolutionCandidate\nfrom model.Vector import Vector\nimport numpy as np\nfrom optimize.snopt7 import SNOPT_solver\nfrom config.Config import Config\n\n\nclass AugmentedLagrangianOptimizer(IterativeOptimizer, Named):\n def __init__(self, max_iter=100, tolerance=1e-06,\n teta=0.5, delta=10):\n IterativeOptimizer.__init__(self, max_iter, tolerance)\n self.teta = teta\n self.delta = delta\n self.model = None\n self.rho = None\n self.gamma = None\n self.gamma_hat = None\n self.snopt_sol = None\n\n def obj_f(self, status, x, needF, needG, cu, iu, ru):\n x_var = x[0:self.n]\n y_var = x[self.n:self.n * 2]\n\n Q = self.problem.Q.as_numpy_array()\n gamma = self.gamma.as_numpy_array()\n rho = self.rho.as_numpy_array()\n mu = self.problem.mu.as_numpy_array()\n\n obj = np.dot(np.dot(Q, x_var), x_var)\n gamma_obj = np.dot(gamma, x_var * y_var)\n rho_obj = 0.5 * np.dot(rho, x_var * x_var * y_var * y_var)\n\n f = [obj + gamma_obj + rho_obj, np.dot(x_var, mu), np.sum(x_var), np.sum(y_var)]\n g = [np.append(2 * np.dot(Q, x_var) + gamma * y_var + rho * x_var * y_var * y_var,\n gamma * x_var + rho * x_var * x_var * y_var)]\n return status, f, g\n\n def snopt_optimize(self):\n inf_bnd = 1.1e20\n x0 = self.candidate.to_standard_vector().as_numpy_array()\n x_low = np.array([0] * self.n * 2)\n x_upp = np.append(self.problem.u.as_numpy_array(), np.array([1] * self.n))\n f_low = np.array([-inf_bnd, self.problem.alpha, -inf_bnd, self.n - self.chi])\n f_upp = np.array([inf_bnd, inf_bnd, 1, inf_bnd])\n\n obj_row = 1\n snopt = SNOPT_solver()\n\n A = np.array([np.zeros(self.n * 2),\n np.append(self.problem.mu.as_numpy_array(), np.zeros(self.n)),\n np.append(np.ones(self.n), np.zeros(self.n)),\n np.append(np.zeros(self.n), np.ones(self.n))\n ])\n\n G = np.array([np.ones(self.n * 2),\n np.zeros(self.n * 2),\n np.zeros(self.n * 2),\n np.zeros(self.n * 2)\n ])\n #TODO: parameterize\n snopt.setOption(\"Specs file\", Config.get_specs_file())\n snopt.snopta(name='PAL', x0=x0, xlow=x_low, xupp=x_upp,\n Flow=f_low, Fupp=f_upp, A=A, G=G, ObjRow=obj_row,\n usrfun=self.obj_f)\n self.snopt_sol = snopt.x\n\n def _real_optimize(self):\n self.rho = Vector([10 for _ in range(0, self.n)])\n self.max_gamma = 1e20\n self.min_gamma = -1e20\n self.gamma = Vector.zeros(self.n)\n self.gamma_hat = Vector.zeros(self.n)\n while not self._stopping_condition():\n self.snopt_optimize()\n self.update_candidate()\n self._print_iteration_aftermath()\n self.update_rho()\n self.update_gamma()\n self.candidate = self.next_candidate\n self.iteration += 1\n for monitor in self.monitors:\n monitor.on_iteration_ending(self)\n\n def _print_header_columns(self):\n print(\"{:20} {:20} {:20} {:20}\".format(\"Iteration\", \"Objective\", \"PAL-Objective\", \"Maximum violation\"))\n\n def _print_iteration_aftermath(self):\n print(\n \"{:<20d} {:<20.2f} {:<20.2f} {:<20.2e}\".format(self.iteration,\n self.problem.objective(self.next_candidate.x),\n self.problem.pal_objective(self.next_candidate, self.gamma,\n self.rho),\n max(abs(self.next_candidate.x.elementwise_multiplication(\n self.next_candidate.y).as_numpy_array()))))\n\n def update_rho(self):\n rho_array = self.rho.as_numpy_array()\n for i in range(0, len(rho_array)):\n if abs(self.next_candidate.x.get(i) * self.next_candidate.y.get(i)) > self.teta * abs(\n self.candidate.x.get(i) * self.candidate.y.get(i)):\n rho_array[i] *= self.delta\n self.rho = Vector(rho_array)\n\n def update_gamma(self):\n self.gamma = self.gamma_hat + self.next_candidate.x_elementwise_multiplied_per_y().elementwise_multiplication(\n self.rho)\n gamma_list = []\n for i in range(0, self.gamma.length()):\n if self.gamma.get(i) > self.max_gamma:\n gamma_list.append(self.max_gamma)\n elif self.gamma.get(i) < self.min_gamma:\n gamma_list.append(self.min_gamma)\n else:\n gamma_list.append(self.gamma.get(i))\n self.gamma_hat = Vector(gamma_list)\n\n def update_candidate(self):\n x_array = []\n y_array = []\n for i in range(0, self.n):\n x_array.append(self.snopt_sol[i] if self.snopt_sol[i] >= 0 else 0)\n y_array.append(self.snopt_sol[self.n + i] if self.snopt_sol[self.n + i] >= 0 else 0)\n self.next_candidate = SolutionCandidate(Vector(x_array), Vector(y_array))\n\n def get_name(self):\n return \"Snopt\"\n","sub_path":"PortfolioTest/app/optimizers/augmented_lagrangian/AugmentedLagrangianOptimizer.py","file_name":"AugmentedLagrangianOptimizer.py","file_ext":"py","file_size_in_byte":5468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"595791374","text":"# -*- coding: utf-8 -*-\n# Copyright (c) 2020. Distributed under the terms of the MIT License.\nfrom typing import Dict\n\nimport numpy as np\nfrom pydefect.analyzer.band_edge_states import EdgeCharacters, EdgeCharacter\nfrom pymatgen import Spin, Structure\nfrom pymatgen.io.vasp import Procar, Vasprun, Outcar\nfrom vise.analyzer.vasp.band_edge_properties import eigenvalues_from_vasprun\n\n\nclass MakeEdgeCharacters:\n def __init__(self,\n procar: Procar,\n vasprun: Vasprun,\n outcar: Outcar,\n neighboring_atom_indices):\n self.orbs = procar.data\n self.structure = vasprun.final_structure\n self.eigenvalues = eigenvalues_from_vasprun(vasprun)\n self.nelect = outcar.nelect\n self.mag = outcar.total_mag if outcar.total_mag else 0.0\n self.neighboring_atom_indices = neighboring_atom_indices\n\n @property\n def edge_characters(self):\n chars = []\n for spin, eigenvalues in self.eigenvalues.items():\n if spin == Spin.up:\n vbm_band_idx = int(round((self.nelect + self.mag) / 2)) - 1\n else:\n vbm_band_idx = int(round((self.nelect - self.mag) / 2)) - 1\n cbm_band_idx = vbm_band_idx + 1\n\n vbm = np.amax(eigenvalues[:, vbm_band_idx], axis=0)\n vbm_kpt_idx = np.argwhere(eigenvalues[:, vbm_band_idx] == vbm)[0][0]\n hob_bottom_e = np.amin(eigenvalues[:, vbm_band_idx], axis=0)\n\n cbm = np.amin(eigenvalues[:, cbm_band_idx], axis=0)\n cbm_kpt_idx = np.argwhere(eigenvalues[:, cbm_band_idx] == cbm)[0][0]\n lub_top_e = np.amax(eigenvalues[:, cbm_band_idx], axis=0)\n\n vbm_character = calc_orbital_character(self.orbs, self.structure, spin, vbm_kpt_idx, vbm_band_idx)\n cbm_character = calc_orbital_character(self.orbs, self.structure, spin, cbm_kpt_idx, cbm_band_idx)\n\n if self.neighboring_atom_indices:\n vbm_participation_ratio = calc_participation_ratio(self.orbs, spin, vbm_kpt_idx, vbm_band_idx, self.neighboring_atom_indices)\n cbm_participation_ratio = calc_participation_ratio(self.orbs, spin, cbm_kpt_idx, cbm_band_idx, self.neighboring_atom_indices)\n else:\n vbm_participation_ratio = None\n cbm_participation_ratio = None\n\n chars.append(EdgeCharacter(hob_bottom_e, lub_top_e, vbm, cbm, vbm_character, cbm_character, vbm_participation_ratio, cbm_participation_ratio))\n\n return EdgeCharacters(chars)\n\n\ndef calc_participation_ratio(orbitals: Dict[Spin, np.ndarray],\n spin: Spin,\n kpt_index: int,\n band_index: int,\n atom_indices: list) -> float:\n \"\"\" Returns sum of participation ratios at atom_indices sites\n\n The PROCAR data of the form below. It should VASP uses 1-based indexing,\n but all indices are converted to 0-based here.::\n { spin: np.ndarray accessed with (k-point index, band index,\n ion index, orbital index) }\n\n Note that the k-point weight is not considered, so all the k-points are\n treated equally.\n\n Return (float):\n float of the participation ratio.\n \"\"\"\n sum_per_atom = np.sum(orbitals[spin][kpt_index, band_index, :, :], axis=1)\n return np.sum(sum_per_atom[atom_indices]) / np.sum(sum_per_atom)\n\n\ndef calc_orbital_character(orbitals,\n structure: Structure,\n spin: Spin,\n kpt_index: int,\n band_index: int):\n \"\"\" Consider the two pattern of orbitals.\n\n LORBIT 10 -> consider only \"s\", \"p\", \"d\" orbitals\n LORBIT >=11 -> consider \"s\", \"px\", \"py\", \"pz\",.. orbitals\n\n \"\"\"\n\n def projection_sum(atom_indices: tuple, first: int, last: int):\n end = last + 1\n procar_sum = np.sum(orbitals[spin]\n [kpt_index, band_index, atom_indices, first:end])\n return float(procar_sum)\n\n orbital_components = {}\n azimuthal = len(orbitals[Spin.up][0, 0, 0]) > 5\n\n for element in structure.symbol_set:\n # get list of index\n indices = structure.indices_from_symbol(element)\n if azimuthal:\n orbital_components[element] = \\\n [round(projection_sum(indices, 0, 0), 3),\n round(projection_sum(indices, 1, 3), 3),\n round(projection_sum(indices, 4, 8), 3)]\n try:\n orbital_components[element].append(round(projection_sum(indices, 9, 16), 3))\n except KeyError:\n pass\n else:\n orbital_components[element] = \\\n [round(projection_sum(indices, 0, 0), 3),\n round(projection_sum(indices, 1, 1), 3),\n round(projection_sum(indices, 2, 2), 3)]\n return orbital_components\n\n\n\n\n","sub_path":"pydefect/cli/vasp/make_edge_characters.py","file_name":"make_edge_characters.py","file_ext":"py","file_size_in_byte":4968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"230799564","text":"import os\nimport bpy \nimport bmesh\nimport scipy.io as sio\nimport sys\nimport numpy as np\n\n\n# this whole file is to make managable obj files from overly large or samll ones \n# this is done for the latent loss calcualtions only\n\n\ndef triangulate_edit_object(obj):\n\tme = obj.data\n\tbm = bmesh.from_edit_mesh(me)\n\tbmesh.ops.triangulate(bm, faces=bm.faces[:], quad_method=0, ngon_method=0)\n\tbmesh.update_edit_mesh(me, True)\n\n\n\n# import arguements\nmodel = sys.argv[-3]\nlocation_info = sys.argv[-2]\nlocation_obj = sys.argv[-1]\n\n#import object\nbpy.ops.import_scene.obj(filepath=model ) \nscene = bpy.context.scene\n\n\n# join components of mesh\nobs = []\nfor ob in scene.objects:\n\tif ob.type == 'MESH':\n\t\tobs.append(ob)\nctx = bpy.context.copy()\nctx['active_object'] = obs[0]\nctx['selected_objects'] = obs\nctx['selected_editable_bases'] = [scene.object_bases[ob.name] for ob in obs]\nbpy.ops.object.join(ctx)\no = bpy.context.selected_objects[0]\n\n\n# removes split normal, helps with decimation\nbpy.context.scene.objects.active = o \nbpy.ops.object.editmode_toggle()\nbpy.ops.mesh.select_all(action='SELECT')\nbpy.ops.mesh.customdata_custom_splitnormals_clear()\nbpy.ops.mesh.remove_doubles()\nbpy.ops.object.editmode_toggle()\n\n\n\n# shrinking the mesh to be a uniform size \n# no idea if this actually helps with training\n# it does make objects smaller which makes loading them much quicker to load during traineing \n# ideally all objects will be between 500 and 600 verts, but I allow between 400 and 700 verts \n# the object is used during training regardless, but not for the latent loss \nnot_possible = False\nnum = float(len(o.data.vertices))\nnew_num = num \norig = num\nfull_ratio = .01\nif num<30: \n\tnot_possible = True # if its too small then subsampling doesnt work well \n\n# if large then decimate to make the right size\n# or at least try to \nelif num> 550:\n\tfor i in range(5): \t\n\t\tmod = o.modifiers.new(name='decimate', type='DECIMATE')\n\t\tmod.ratio = max(550./num, full_ratio)\n\t\tfull_ratio /= (mod.ratio) \n\t\tbpy.ops.object.modifier_apply(modifier = mod.name)\n\t\to.modifiers.clear()\n\t\tif float(len(o.data.vertices)) < 550:\n\t\t\tnew_num = float(len(o.data.vertices))\n\t\t\tbreak\n\t\telse: \n\t\t\tnum = float(len(o.data.vertices)) \n\t\tif i == 4 and float(len(o.data.vertices)) > 700: # if it can't be made small enough then don't convert it \n\t\t\tnot_possible = True\n\t\tnew_num = float(len(o.data.vertices))\n# if small then try to make it larger \nelif num < 400: \n\tmod = o.modifiers.new(name=\"Remesh\", type='REMESH')\n\tmod.octree_depth = 6\n\tmod.use_remove_disconnected = False\n\tbpy.ops.object.modifier_apply(modifier = mod.name)\n\to.modifiers.clear()\n\tnum = float(len(o.data.vertices))\n\t# and then shrink it again \n\tif num> 500:\n\t\tfor i in range(5): \t\n\t\t\tmod = o.modifiers.new(name='decimate', type='DECIMATE')\n\t\t\tmod.ratio = 550./num \n\t\t\tbpy.ops.object.modifier_apply(modifier = mod.name)\n\t\t\to.modifiers.clear()\n\t\t\tif float(len(o.data.vertices)) < 600:\n\t\t\t\tbreak\n\t\t\telse: \n\t\t\t\tnum = float(len(o.data.vertices)) \n\telse:\n\t\tnot_possible = True \nprint ('-------------------------------------------------------------------------')\nprint ( float(len(o.data.vertices)) )\nprint ('-------------------------------------------------------------------------')\nif not_possible: \n\texit()\n\n# triangluate the object \nbpy.ops.object.editmode_toggle()\nbpy.ops.mesh.dissolve_limited()\ntriangulate_edit_object(o)\nbpy.ops.object.editmode_toggle()\n\n\n\n# now we record the object info\n\n# get initial face info\nme = o.data\nadj_new = np.zeros((600,600))\nmax_len = 0\nfaces = []\nfor poly in me.polygons:\n\tvs = []\n\tfor loop_index in range(poly.loop_start, poly.loop_start + poly.loop_total):\n\t\tvs.append(me.loops[loop_index].vertex_index)\n\tfaces.append(vs)\n\t\t\n\t\n\n# get initial vertex info, and normal info if you want it (I dont)\nbm = bmesh.new()\nbm.from_mesh(me)\nverts, normals = [0 for i in range(len(bm.verts))],[0 for i in range(len(bm.verts))]\nfor e,v in enumerate(bm.verts):\n\tverts[v.index] = v.co \n\tnormals[v.index] = v.normal\n\n\n# calculate adjacency matrix and final face, vertex and nromal infor \nverts_map = {}\ncount = 0\nfor face in faces: \n\tv1, v2, v3 = face \n\tfor v in face: \n\t\tif v not in verts_map: \n\t\t\tverts_map[v] = [count, verts[v], normals[v] ]\n\t\t\tcount += 1 \nadj = np.zeros((len(verts_map), len(verts_map)))\ntrue_verts = np.zeros((len(verts_map), 3))\ntrue_normals = np.zeros((len(verts_map), 3))\nfor e,face in enumerate(faces): \n\tv1, v2, v3 = face \n\tadj[verts_map[v1][0], verts_map[v1][0]] = 1 \n\tadj[verts_map[v2][0], verts_map[v2][0]] = 1 \n\tadj[verts_map[v3][0], verts_map[v3][0]] = 1 \n\tadj[verts_map[v1][0], verts_map[v2][0]] = 1 \n\tadj[verts_map[v2][0], verts_map[v1][0]] = 1 \n\tadj[verts_map[v1][0], verts_map[v3][0]] = 1 \n\tadj[verts_map[v3][0], verts_map[v1][0]] = 1 \n\tadj[verts_map[v2][0], verts_map[v3][0]] = 1 \n\tadj[verts_map[v3][0], verts_map[v2][0]] = 1 \n\tfaces[e] = [verts_map[v1][0], verts_map[v2][0], verts_map[v3][0]]\n\nfor _ , info in verts_map.items(): \n\tspot, position, normal = info \n\ttrue_verts[spot] = position \n\ttrue_normals[spot] = normal\n\nfor obj in bpy.data.objects:\n obj.select = False\no.select = True\n\n\n\n\n# save updated object, and object info \nbpy.ops.export_scene.obj(filepath=location_obj)\nsio.savemat(location_info, {'verts':np.array(true_verts), \n\t\t\t\t'normals': np.array(true_normals), \n\t\t\t\t'faces': np.array(faces), \n\t\t\t\t'orig_adj': adj\n\t\t\t\t}\n\t\t\t\t)\n\n","sub_path":"old_GEOMetrics/scripts/blender_convert.py","file_name":"blender_convert.py","file_ext":"py","file_size_in_byte":5337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"21649519","text":"import sys\nfrom shapely.geometry import Polygon, box\nimport csv \nimport pandas as pd\nfrom shapely import wkt\nimport geopandas as gdp\nimport geoplot\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\ndef polygon(p):\n \n poly = list(map(float, p))\n xy =()\n for i in range(len(poly)):\n if(i%2 == 0):\n xy+= (((poly[i+1]),poly[i]), )\n return xy\n\ndef main(csv_file, year):\n\n with open(csv_file) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count =0\n accq_start_dict= {'date' :[], 'tile':[], 'cloud':[], 'geometry': []}\n \n for row in csv_reader:\n if(line_count == 0):\n # print(f'Column names are \\n {\", \".join(row)}')\n line_count += 1\n first_row = row[57:67]\n else:\n geo = row[58:68] \n # print(row[0][10:16])\n # return\n gg = polygon(geo)\n gg= gg[0:3] + (gg[4],) + (gg[3],)\n pp = Polygon(gg[1:]) \n accq_start_dict['date'].append(row[2])\n accq_start_dict['tile'].append(row[0][10:16])\n accq_start_dict['cloud'].append(row[15])\n accq_start_dict['geometry'].append(pp)\n\n df = pd.DataFrame(data=accq_start_dict)\n df['date'] = pd.to_datetime(df['date'], format=\"%Y-%m-%d\")\n df['cloud'] = df['cloud'].astype(float)\n\n df['month'] = df['date'].dt.month\n df['year'] = df['date'].dt.year\n months = [0] * 12\n for i in range(1, 13):\n m = df.loc[(df['month'] == i) & (df['year'] == year)]\n m['geometry'] = m['geometry'].astype(str)\n m = m.groupby(['tile', 'geometry'])['cloud'].mean().to_frame().reset_index()\n m['geometry'] = m['geometry'].apply(wkt.loads)\n m = gdp.GeoDataFrame(m.drop(['geometry'], axis=1), crs={'init': 'epsg:4326'}, geometry=m.geometry)\n months[i-1] = m\n norm = mpl.colors.Normalize(vmin=0, vmax=100)\n cmap = mpl.cm.ScalarMappable(norm=norm, cmap='Reds').cmap\n\n fig, axarr = plt.subplots(4, 3, figsize=(30, 30))\n mon_name = ['January', 'February', 'March',\n 'April', 'May', 'June', 'July', \n 'August', 'September', 'October', 'November', 'December' ]\n\n fig.suptitle(\"Ethiopia: Landsat Cloud Cover percentage Choropleth \" + str(year), fontsize=35)\n # print(months[0])\n for i in range(12):\n x = (i) // 3\n y = (i) % 3\n geoplot.choropleth(\n months[i], hue= months[i]['cloud'],\n cmap=cmap, norm=norm, legend=True, ax=axarr[x, y]\n ) \n axarr[x, y].set_title(mon_name[i], fontsize=30)\n fig.savefig(\"landsat7_coverage_\" + str(year), dpi=None, facecolor='w', edgecolor='w',\n orientation='portrait')\n\n# count the arguments\narguments = len(sys.argv) - 1\n\n# output argument-wise\nposition = 1\ncsv_file = sys.argv[1]\nyear = int(sys.argv[2])\nif(arguments < 2):\n print(\"Too few arguments, exit\")\n exit(0)\n\n\n\n\nmain(csv_file, year)\n","sub_path":"landsat7.py","file_name":"landsat7.py","file_ext":"py","file_size_in_byte":3020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"45452958","text":"import numpy as np\nfrom numba import njit\nfrom collections import OrderedDict\nfrom numba.experimental import jitclass\nfrom numba import deferred_type, optional, int64, float64, boolean\nfrom lsnms.util import (\n distance_to_hypersphere,\n englobing_sphere,\n max_spread_axis,\n split_along_axis,\n rdist,\n)\n\n\nspecs = OrderedDict()\nnode_type = deferred_type()\nspecs[\"data\"] = float64[:, :]\nspecs[\"centroid\"] = float64[:]\nspecs[\"indices\"] = optional(int64[:])\nspecs[\"radius\"] = float64\nspecs[\"is_leaf\"] = boolean\nspecs[\"leaf_size\"] = int64\nspecs[\"left\"] = optional(node_type)\nspecs[\"right\"] = optional(node_type)\n\n\n@jitclass(specs)\nclass Node:\n \"\"\"\n Main object for the node class.\n\n Note that the tree building process is peculiar:\n Since jit classes methods can not be recursive (node.method can not call node.method), the\n tree building process (recursive node splitting and children instanciation) can not be done\n inside the Node.__init__ method (which is the natural way to do so).\n However, jit classes methods can call recursive functions: hence, the tree building process is\n delegated to an independant function (see `build` function).\n Consequently, this class must be used in the following way:\n ```\n # Instanciate the root node\n node = Node(data)\n # Reccursively attach children to each node\n node.build()\n ```\n\n For convenience, a wrapper `BallTree` class was implemented, encapsulating this process:\n ```\n tree = BallTree(data)\n tree.query_radius(...)\n ```\n \"\"\"\n\n def __init__(self, data, leaf_size=16, indices=None):\n # Stores the data\n self.data = data\n\n if len(self.data) == 0:\n raise ValueError(\"Empty data\")\n\n # Stores indices of each data point\n if indices is None:\n self.indices = np.arange(len(data))\n else:\n self.indices = indices\n\n self.leaf_size = leaf_size\n\n # Is it a leaf\n if len(data) <= leaf_size:\n self.is_leaf = True\n else:\n self.is_leaf = False\n # Determine centroid and radius\n centroid, radius = englobing_sphere(self.data)\n self.centroid = centroid\n self.radius = radius\n\n # Pre-assign empty children for typing\n self.left = None\n self.right = None\n\n def split(self):\n \"\"\"\n Splits a node into two children nodes.\n\n Returns\n -------\n Tuple[Node]\n Left children and right children\n \"\"\"\n splitdim = max_spread_axis(self.data)\n left_indices, right_indices = split_along_axis(self.data, splitdim)\n # Simply reference the data in the children, do not copy arrays\n left_node = Node(self.data[left_indices], self.leaf_size, self.indices[left_indices])\n right_node = Node(self.data[right_indices], self.leaf_size, self.indices[right_indices])\n return left_node, right_node\n\n def assign_left(self, node):\n \"\"\"\n Assigns the left node.\n Strangely enough, this needs to be delegated to an explicit method.\n \"\"\"\n self.left = node\n\n def assign_right(self, node):\n \"\"\"\n Assigns the right node.\n Strangely enough, this needs to be delegated to an explicit method.\n \"\"\"\n self.right = node\n\n def build(self):\n \"\"\"\n Reccursively build the children.\n Jit methods can not be explicitely recursive:\n `self.build` can not call `self.build`, but it can call a function\n which calls itself, the workaround used here.\n \"\"\"\n # Reccursively attach children to the parent\n build(self)\n\n def query_radius(self, X, max_radius):\n \"\"\"\n Return set of point in the dataset distance from less than `radius` to the query point X.\n\n Parameters\n ----------\n X : np.array\n Query point (single point only)\n radius : float\n max radius\n\n Returns\n -------\n np.array\n Indices of points within that radius from the query point\n\n Raises\n ------\n ValueError\n This function works on single query point only.\n \"\"\"\n if X.ndim > 1:\n raise ValueError(\"query_radius only works on single query point.\")\n if X.shape[-1] != self.dimensionality:\n raise ValueError(\"Tree and query dimensionality do not match\")\n # Initialize empty list of int64\n # Needs to be typed\n buffer = [0][:0]\n\n # Query recursive\n # Fills in-place the neighbors list\n query_radius(self, X, max_radius ** 2, buffer)\n\n # return np array for convenience\n return np.array(buffer)\n\n\nnode_type.define(Node.class_type.instance_type)\n\n\n@njit\ndef build(current):\n \"\"\"\n Reccursive building process.\n Since jit methods can not be recursive, it has to be a detached function.\n Otherwise, it would just be included inside the Node.__init__ method.\n\n Parameters\n ----------\n current : Node\n Current node to split if needed\n \"\"\"\n if not current.is_leaf:\n left, right = current.split()\n current.assign_left(left)\n current.assign_right(right)\n build(current.left)\n build(current.right)\n\n\n@njit\ndef query_radius(node, X, max_radius, buffer, dist_LB=0.0, is_root=True):\n \"\"\"\n This function should not be used as-is: jitted-class methods can not be recursive.\n The recursive query process is delegated here.\n\n This is a depth-first search: by ensuring that one chooses the closest\n node first, the algorithm will consequently first go to the node containing the point (if any)\n and then go backward in neighbors node, trimming each node too far from the `max_radius` given.\n\n Parameters\n ----------\n node: Node\n Currently visited node\n X : np.array\n Query point (one point).\n max_radius : float\n Max radius\n buffer : list\n List of currently-gathered neighbors. Stores in-place the neighbors along the search process\n dist_LB : float, optional\n Distance lower bound: distance from the query point to the currently visited node's\n hypersphere, by default 0.0\n is_root : bool, optional\n Whether the currently visited node is root, by default True\n \"\"\"\n if is_root:\n # If first call, no lower bound distance has already been computed\n dist_LB = distance_to_hypersphere(X, node.centroid, node.radius)\n\n # if query is outside the radius, then trim this node out\n if dist_LB > max_radius:\n return\n\n # If it's a leaf: check points inside\n elif node.is_leaf:\n for i, y in zip(node.indices, node.data):\n d = rdist(X, y)\n if d <= max_radius:\n buffer.append(i)\n\n # Else, continue search\n # Going for the closest node first ensures a depth-first search\n else:\n left_LB = distance_to_hypersphere(X, node.left.centroid, node.left.radius)\n right_LB = distance_to_hypersphere(X, node.right.centroid, node.right.radius)\n\n if left_LB < right_LB:\n query_radius(node.left, X, max_radius, buffer, left_LB, False)\n query_radius(node.right, X, max_radius, buffer, right_LB, False)\n else:\n query_radius(node.right, X, max_radius, buffer, right_LB, False)\n query_radius(node.left, X, max_radius, buffer, left_LB, False)\n\n\nspecs = OrderedDict()\nspecs[\"_root\"] = node_type\nspecs[\"data\"] = float64[:, :]\n\n\n@jitclass(specs)\nclass BallTree:\n \"\"\"\n Simple wrapper class to build the tree, it encapsulates root node instanciation,\n followed by the recursive building process, which can not be called in node.__init__\n because jitted methods can not be recursive.\n\n \"\"\"\n\n def __init__(self, data, leaf_size=16):\n self.data = data\n self._root = Node(data, leaf_size, None)\n self._root.build()\n\n def query_radius(self, X, radius):\n \"\"\"\n Return set of point in the dataset distance from less than `radius` to the query point X.\n\n Parameters\n ----------\n X : np.array\n Query point (single point only)\n radius : float\n max radius\n\n Returns\n -------\n np.array\n Indices of points within that radius from the query point\n \"\"\"\n return self._root.query_radius(X, radius)\n","sub_path":"lsnms/balltree.py","file_name":"balltree.py","file_ext":"py","file_size_in_byte":8444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"16632281","text":"from twilio.rest import Client\nimport requests\n\naccount_sid = 'AC7c6db64a68ca2877249bf0a2b25e9052'\nauth_token = '10852a7ac73b9563adf35663da0d80af'\nclient = Client(account_sid, auth_token)\n\n\ndef send():\n message = client.messages \\\n .create(\n body='Yo you know this Dude/Dudette, homie?',\n from_='+12073897132',\n media_url='https://images.pexels.com/photos/356378/pexels-photo-356378.jpeg?auto=compress&cs=tinysrgb&h=350',\n to='+13148256058'\n )\n\n print(message.sid)\n\nsend()","sub_path":"RPi/messenger.py","file_name":"messenger.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"581343519","text":"from django.urls import path\r\nfrom . import views\r\n\r\nurlpatterns = [\r\n path('', views.index, name=\"index\"),\r\n path('algebra/', views.algebra, name=\"algebra\"),\r\n path('simplex/', views.simplex, name=\"simplex\"),\r\n path('algebra/equations', views.algebra_equations, name=\"algebra_equations\"),\r\n path('algebra/equations/solve-equation', views.algebra_equations_solve_equation, name=\"algebra_equations_solve_equation\"),\r\n path('algebra/equations/system', views.algebra_equations_system, name=\"algebra_equations_system\"),\r\n path('algebra/equations/evaluate-expression', views.algebra_equations_expressions, name=\"algebra_equations_expressions\"),\r\n path('algebra/slope', views.algebra_slope, name=\"algebra_slope\"),\r\n path('algebra/graph', views.algebra_graph, name=\"algebra_graph\"),\r\n path('algebra/function-composition', views.algebra_composition, name=\"algebra_composition\"),\r\n path('algebra/partial-fractions', views.algebra_partial_fractions, name=\"algebra_partial_fractions\"),\r\n path('calculus', views.calculus, name=\"calculus\"),\r\n path('calculus/derivatives', views.calculus_derivatives, name=\"calculus_derivatives\"),\r\n path('calculus/derivatives/standard', views.calculus_derivatives_standard, name=\"calculus_derivatives_standard\"),\r\n path('calculus/derivatives/partial', views.calculus_derivatives_partial, name=\"calculus_derivatives_partial\"),\r\n path('calculus/integrals', views.calculus_integrals, name=\"calculus_integrals\"),\r\n path('calculus/integrals/indefinite', views.calculus_integrals_indefinite, name=\"calculus_integrals_indefinite\"),\r\n path('calculus/integrals/definite', views.calculus_integrals_definite, name=\"calculus_integrals_definite\"),\r\n path('calculus/graphical', views.calculus_graphical, name=\"calculus_graphical\"),\r\n path('calculus/graphical/arc-length', views.calculus_graphical_arc_length, name=\"calculus_graphical_arc_length\"),\r\n path('calculus/graphical/tangent-line', views.calculus_graphical_tangent_line, name=\"calculus_graphical_tangent_line\"),\r\n path('calculus/graphical/critical-points', views.calculus_graphical_critical_points, name=\"calculus_graphical_critical_points\"), \r\n path('calculus/limits', views.calculus_limits, name=\"calculus_limits\"),\r\n path('ordinary-differential-equation', views.ODE, name=\"ODE\"),\r\n path('ordinary-differential-equation/first-order', views.ODE_first, name=\"ODE_first\"), \r\n path('ordinary-differential-equation/first-order/linear', views.ODE_first_linear, name=\"ODE_first_linear\"),\r\n path('ordinary-differential-equation/first-order/separable', views.ODE_first_separable, name=\"ODE_first_separable\"),\r\n path('ordinary-differential-equation/first-order/exact', views.ODE_first_exact, name=\"ODE_first_exact\"),\r\n path('ordinary-differential-equation/first-order/euler', views.ODE_first_euler, name=\"ODE_first_euler\"),\r\n path('ordinary-differential-equation/second-order', views.ODE_second, name=\"ODE_second\"),\r\n path('ordinary-differential-equation/second-order/homgenous', views.ODE_second_homogenous, name=\"ODE_second_homogenous\"),\r\n path('linear-algebra', views.linear_algebra, name=\"linear_algebra\"), \r\n path('linear-algebra/matrix-on-matrix', views.linear_algebra_matrix_ops, name=\"linear_algebra_matrix_ops\"),\r\n path('linear-algebra/matrix-on-matrix/multiplication', views.linear_algebra_matrix_ops_multiplication, name=\"linear_algebra_matrix_ops_multiplication\"), \r\n path('linear-algebra/matrix-on-matrix/addition', views.linear_algebra_matrix_ops_addition, name=\"linear_algebra_matrix_ops_addition\"), \r\n path('linear-algebra/matrix-on-matrix/matrix-equation', views.linear_algebra_matrix_ops_equation, name=\"linear_algebra_matrix_ops_equation\"),\r\n path('linear-algebra/system', views.linear_algebra_system, name=\"linear_algebra_system\"),\r\n path('linear-algebra/matrix-info', views.linear_algebra_info, name=\"linear_algebra_info\"),\r\n path('linear-algebra/matrix-info/reduced-row-echelon-form', views.linear_algebra_info_RREF, name=\"linear_algebra_info_RREF\"), \r\n path('linear-algebra/matrix-info/determinant', views.linear_algebra_info_determinant, name=\"linear_algebra_info_determinant\"),\r\n path('linear-algebra/matrix-info/transpose', views.linear_algebra_info_transpose, name=\"linear_algebra_info_transpose\"), \r\n path('linear-algebra/matrix-info/inverse', views.linear_algebra_info_inverse, name=\"linear_algebra_info_inverse\"), \r\n path('linear-algebra/matrix-info/eigenvalues', views.linear_algebra_info_eigenvalues, name=\"linear_algebra_info_eigenvalues\"), \r\n path('geometry', views.geometry, name=\"geometry\"), \r\n path('geometry/area', views.geometry_area, name=\"geometry_area\"), \r\n path('geometry/perimeter', views.geometry_Perimeter, name=\"geometry_Perimeter\")\r\n]\r\n","sub_path":"MathCalculator/calculator/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":4784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"557803981","text":"from django.conf.urls import url\n\nfrom restcalls.core import views\n\nurlpatterns = [\n # path('admin/', admin.site.urls),\n url(r'calls', views.post_record_call, name='post_record_call'),\n url(r'bill/(?P\\d+)?/(?P\\d+)-(?P\\d+)$', views.get_bill, name='get_bill'),\n url(r'bill/(?P\\d+)?/$', views.get_bill, name='get_bill'),\n url(r'price-policy', views.PricePolicyView.as_view(), name='price_policy')\n\n]\n","sub_path":"restcalls/restcalls/core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"473484778","text":"from sqlalchemy import *\nfrom migrate import *\n\n\ndef upgrade(migrate_engine):\n # Upgrade operations go here. Don't create your own engine; bind\n # migrate_engine to your metadata\n meta = MetaData(bind=migrate_engine)\n t = Table('proc_DetectorIntercalibrationTable', meta, autoload=True)\n c = Column('detector_id', Integer)\n c.create(t)\n\n t = Table('proc_DetectorIntercalibrationTable', meta, autoload=True)\n t.c.user_value.alter(type=Float(32))\n t.c.user_error.alter(type=Float(32))\n\n t = Table('proc_BlanksTable', meta, autoload=True)\n t.c.user_value.alter(type=Float(32))\n t.c.user_error.alter(type=Float(32))\n\n t = Table('proc_BackgroundsTable', meta, autoload=True)\n t.c.user_value.alter(type=Float(32))\n t.c.user_error.alter(type=Float(32))\n\n\n t = Table('proc_FitTable', meta, autoload=True)\n c = Column('filter_outliers', Boolean)\n c.create(t)\n c = Column('filter_outlier_iterations', Integer)\n c.create(t)\n c = Column('filter_outlier_std_devs', Integer)\n c.create(t)\n\n\ndef downgrade(migrate_engine):\n # Operations to reverse the above upgrade go here.\n meta = MetaData(bind=migrate_engine)\n t = Table('proc_DetectorIntercalibrationTable', meta, autoload=True)\n t.c.detector_id.drop()\n\n t = Table('proc_FitTable', meta, autoload=True)\n t.c.filter_outliers.drop()\n t.c.filter_outlier_iterations.drop()\n t.c.filter_outlier_std_devs.drop()\n","sub_path":"src/database/migrate/isotopedb/versions/044_intercalibration_updates.py","file_name":"044_intercalibration_updates.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"490442877","text":"import json\n\nimport re\nfrom functools import lru_cache\nfrom urllib.parse import urlencode\nfrom os.path import abspath, dirname, join, pardir\nimport requests\nfrom indra.util import read_unicode_csv\n\nMESH_URL = 'https://id.nlm.nih.gov/mesh/'\nHERE = dirname(abspath(__file__))\nRESOURCES = join(HERE, pardir, 'resources')\nMESH_FILE = join(RESOURCES, 'mesh_id_label_mappings.tsv')\nMESH_REV_LOOKUPS = join(RESOURCES, 'mesh_name_id_maps.json')\n\n\nmesh_id_to_name = {}\nmesh_name_to_id = {}\nfor mesh_id, mesh_label in read_unicode_csv(MESH_FILE, delimiter='\\t'):\n mesh_id_to_name[mesh_id] = mesh_label\n mesh_name_to_id[mesh_label] = mesh_id\n\nwith open(MESH_REV_LOOKUPS, 'r') as f:\n mesh_name_to_id_name = json.load(f)\n\n\n@lru_cache(maxsize=1000)\ndef get_mesh_name_from_web(mesh_id):\n \"\"\"Get the MESH label for the given MESH ID using the NLM REST API.\n\n Parameters\n ----------\n mesh_id : str\n MESH Identifier, e.g. 'D003094'.\n\n Returns\n -------\n str\n Label for the MESH ID, or None if the query failed or no label was\n found.\n \"\"\"\n url = MESH_URL + mesh_id + '.json'\n resp = requests.get(url)\n if resp.status_code != 200:\n return None\n mesh_json = resp.json()\n try:\n label = mesh_json['@graph'][0]['label']['@value']\n except (KeyError, IndexError) as e:\n return None\n return label\n\n\ndef get_mesh_name(mesh_id, offline=False):\n \"\"\"Get the MESH label for the given MESH ID.\n\n Uses the mappings table in `indra/resources`; if the MESH ID is not listed\n there, falls back on the NLM REST API.\n\n Parameters\n ----------\n mesh_id : str\n MESH Identifier, e.g. 'D003094'.\n offline : bool\n Whether to allow queries to the NLM REST API if the given MESH ID is not\n contained in INDRA's internal MESH mappings file. Default is False\n (allows REST API queries).\n\n Returns\n -------\n str\n Label for the MESH ID, or None if the query failed or no label was\n found.\n \"\"\"\n indra_mesh_mapping = mesh_id_to_name.get(mesh_id)\n if offline or indra_mesh_mapping is not None:\n return indra_mesh_mapping\n # Look up the MESH mapping from NLM if we don't have it locally\n return get_mesh_name_from_web(mesh_id)\n\n\ndef get_mesh_id_name(mesh_term, offline=False):\n \"\"\"Get the MESH ID and name for the given MESH term.\n\n Uses the mappings table in `indra/resources`; if the MESH term is not\n listed there, falls back on the NLM REST API.\n\n Parameters\n ----------\n mesh_term : str\n MESH Descriptor or Concept name, e.g. 'Breast Cancer'.\n offline : bool\n Whether to allow queries to the NLM REST API if the given MESH term is\n not contained in INDRA's internal MESH mappings file. Default is False\n (allows REST API queries).\n\n Returns\n -------\n tuple of strs\n Returns a 2-tuple of the form `(id, name)` with the ID of the\n descriptor corresponding to the MESH label, and the descriptor name\n (which may not exactly match the name provided as an argument if it is\n a Concept name). If the query failed, or no descriptor corresponding to\n the name was found, returns a tuple of (None, None).\n \"\"\"\n indra_mesh_id = mesh_name_to_id.get(mesh_term)\n if indra_mesh_id is not None:\n return indra_mesh_id, mesh_term\n\n indra_mesh_id, new_term = \\\n mesh_name_to_id_name.get(mesh_term, (None, None))\n if indra_mesh_id is not None:\n return indra_mesh_id, new_term\n\n if offline:\n return None, None\n\n # Look up the MESH mapping from NLM if we don't have it locally\n return get_mesh_id_name_from_web(mesh_term)\n\n\n@lru_cache(maxsize=1000)\ndef get_mesh_id_name_from_web(mesh_term):\n \"\"\"Get the MESH ID and name for the given MESH term using the NLM REST API.\n\n Parameters\n ----------\n mesh_term : str\n MESH Descriptor or Concept name, e.g. 'Breast Cancer'.\n\n Returns\n -------\n tuple of strs\n Returns a 2-tuple of the form `(id, name)` with the ID of the\n descriptor corresponding to the MESH label, and the descriptor name\n (which may not exactly match the name provided as an argument if it is\n a Concept name). If the query failed, or no descriptor corresponding to\n the name was found, returns a tuple of (None, None).\n \"\"\"\n url = MESH_URL + 'sparql'\n query = \"\"\"\n PREFIX rdf: \n PREFIX rdfs: \n PREFIX xsd: \n PREFIX owl: \n PREFIX meshv: \n PREFIX mesh: \n PREFIX mesh2019: \n PREFIX mesh2018: \n PREFIX mesh2017: \n\n SELECT ?d ?dName ?c ?cName \n FROM \n WHERE {\n ?d a meshv:Descriptor .\n ?d meshv:concept ?c .\n ?d rdfs:label ?dName .\n ?c rdfs:label ?cName\n FILTER (REGEX(?dName,'^%s$','i') || REGEX(?cName,'^%s$','i'))\n }\n ORDER BY ?d\n \"\"\" % (mesh_term, mesh_term)\n args = {'query': query, 'format': 'JSON', 'inference': 'true'}\n # Interestingly, the following call using requests.get to package the\n # query does not work:\n # resp = requests.get(url, data=args)\n # But if the query string is explicitly urlencoded using urllib, it works:\n query_string = '%s?%s' % (url, urlencode(args))\n resp = requests.get(query_string)\n # Check status\n if resp.status_code != 200:\n return None, None\n\n try:\n # Try to parse the json response (this can raise exceptions if we\n # got no response).\n mesh_json = resp.json()\n\n # Choose the first entry (should usually be only one)\n id_uri = mesh_json['results']['bindings'][0]['d']['value']\n name = mesh_json['results']['bindings'][0]['dName']['value']\n except (KeyError, IndexError, json.decoder.JSONDecodeError) as e:\n return None, None\n\n # Strip the MESH prefix off the ID URI\n m = re.match('http://id.nlm.nih.gov/mesh/([A-Za-z0-9]*)', id_uri)\n assert m is not None\n id = m.groups()[0]\n return id, name\n\n\n","sub_path":"indra/databases/mesh_client.py","file_name":"mesh_client.py","file_ext":"py","file_size_in_byte":6377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"34973921","text":"__author__ = 'jbjose'\n\n\"\"\"\nWe will be making some simple 8x8 bitmap pictures. You will be given 8 hex values that can be 0-255 in decimal value (so\n1 byte). Each value represents a row. So 8 rows of 8 bits so a 8x8 bitmap picture.\n\"\"\"\n\nimport sys\nimport StringIO\nimport re\n\ndef get_bits(n):\n \"\"\"Given an integer, returns a bit representation of the integer\"\"\"\n def _next_bit(n):\n if (int(n,16)<=1):\n return str(int(n,16))\n return _next_bit(str(int(n,16)/2)) + str(int(n,16)%2)\n\n return \"{:0>4}\".format(_next_bit(n))\n\nsys.stdin = StringIO.StringIO(\"93 93 93 F3 F3 93 93 93\")\n\nbitmap = \"\"\nfor chunk in sys.stdin.readline().strip().split(\" \"):\n bitmap += (get_bits(chunk[0]) + get_bits(chunk[1])) + '\\n'\n\nbitmap = re.sub('0',' ', bitmap)\nbitmap = re.sub('1','x', bitmap)\nprint(bitmap)\n\n# OMG. Amazing solution by wenderen2\n# for hexvalue in raw_input().split():\n# for char in bin(int(hexvalue, 16))[2:].rjust(8, '0'):\n# print ' *'[int(char)],\n# print\n# lessons... didn't know bin() existed. Duh. Also, rjust is pretty cool. The replacement solution they use (using\n# indexes is pretty cool.","sub_path":"171.Easy.hexToBitmap.py","file_name":"171.Easy.hexToBitmap.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"202963794","text":"class Solution:\n\t# @param {integer} n\n\t# @param {integer} k\n\t# @return {string}\n\tdef getPermutation(self, n, k):\n\t\tk -= 1\n\t\tnums = []\n\t\tm = 1\n\t\tfor i in range(0, n):\n\t\t\tnums.append(i + 1)\n\t\t\tif(i != 0):\n\t\t\t\tm *= i\n\t\tans = ''\n\t\ti = n\n\t\twhile(i > 1):\n\t\t\ttmp = int(k / m)\n\t\t\t# print('k = ', k, ', m =', m, ', tmp =', tmp, ', num =', nums[tmp])\n\t\t\tans = ans + str(nums[tmp])\n\t\t\tk = k % m\n\t\t\tm = int(m / (i - 1))\n\t\t\tnums.remove(nums[tmp])\n\t\t\ti -= 1\n\t\tans = ans + str(nums[0])\n\t\treturn ans\n\ns = Solution()\nprint(s.getPermutation(3, 6))","sub_path":"Python/060M_Permutation_Sequence.py","file_name":"060M_Permutation_Sequence.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"180540100","text":"# Copyright 2020 StrongDM Inc\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# \n# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!\nimport grpc\n\nfrom . import nodes_pb2 as nodes__pb2\n\n\nclass NodesStub(object):\n \"\"\"Nodes make up the strongDM network, and allow your users to connect securely to your resources.\n There are two types of nodes:\n 1. **Relay:** creates connectivity to your datasources, while maintaining the egress-only nature of your firewall\n 1. **Gateways:** a relay that also listens for connections from strongDM clients\n \"\"\"\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.Create = channel.unary_unary(\n '/v1.Nodes/Create',\n request_serializer=nodes__pb2.NodeCreateRequest.SerializeToString,\n response_deserializer=nodes__pb2.NodeCreateResponse.FromString,\n )\n self.Get = channel.unary_unary(\n '/v1.Nodes/Get',\n request_serializer=nodes__pb2.NodeGetRequest.SerializeToString,\n response_deserializer=nodes__pb2.NodeGetResponse.FromString,\n )\n self.Update = channel.unary_unary(\n '/v1.Nodes/Update',\n request_serializer=nodes__pb2.NodeUpdateRequest.SerializeToString,\n response_deserializer=nodes__pb2.NodeUpdateResponse.FromString,\n )\n self.Delete = channel.unary_unary(\n '/v1.Nodes/Delete',\n request_serializer=nodes__pb2.NodeDeleteRequest.SerializeToString,\n response_deserializer=nodes__pb2.NodeDeleteResponse.FromString,\n )\n self.List = channel.unary_unary(\n '/v1.Nodes/List',\n request_serializer=nodes__pb2.NodeListRequest.SerializeToString,\n response_deserializer=nodes__pb2.NodeListResponse.FromString,\n )\n\n\nclass NodesServicer(object):\n \"\"\"Nodes make up the strongDM network, and allow your users to connect securely to your resources.\n There are two types of nodes:\n 1. **Relay:** creates connectivity to your datasources, while maintaining the egress-only nature of your firewall\n 1. **Gateways:** a relay that also listens for connections from strongDM clients\n \"\"\"\n\n def Create(self, request, context):\n \"\"\"Create registers a new Node.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Get(self, request, context):\n \"\"\"Get reads one Node by ID.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Update(self, request, context):\n \"\"\"Update patches a Node by ID.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Delete(self, request, context):\n \"\"\"Delete removes a Node by ID.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def List(self, request, context):\n \"\"\"List gets a list of Nodes matching a given set of criteria.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n\ndef add_NodesServicer_to_server(servicer, server):\n rpc_method_handlers = {\n 'Create': grpc.unary_unary_rpc_method_handler(\n servicer.Create,\n request_deserializer=nodes__pb2.NodeCreateRequest.FromString,\n response_serializer=nodes__pb2.NodeCreateResponse.SerializeToString,\n ),\n 'Get': grpc.unary_unary_rpc_method_handler(\n servicer.Get,\n request_deserializer=nodes__pb2.NodeGetRequest.FromString,\n response_serializer=nodes__pb2.NodeGetResponse.SerializeToString,\n ),\n 'Update': grpc.unary_unary_rpc_method_handler(\n servicer.Update,\n request_deserializer=nodes__pb2.NodeUpdateRequest.FromString,\n response_serializer=nodes__pb2.NodeUpdateResponse.SerializeToString,\n ),\n 'Delete': grpc.unary_unary_rpc_method_handler(\n servicer.Delete,\n request_deserializer=nodes__pb2.NodeDeleteRequest.FromString,\n response_serializer=nodes__pb2.NodeDeleteResponse.SerializeToString,\n ),\n 'List': grpc.unary_unary_rpc_method_handler(\n servicer.List,\n request_deserializer=nodes__pb2.NodeListRequest.FromString,\n response_serializer=nodes__pb2.NodeListResponse.SerializeToString,\n ),\n }\n generic_handler = grpc.method_handlers_generic_handler(\n 'v1.Nodes', rpc_method_handlers)\n server.add_generic_rpc_handlers((generic_handler,))\n","sub_path":"strongdm/nodes_pb2_grpc.py","file_name":"nodes_pb2_grpc.py","file_ext":"py","file_size_in_byte":5291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"570658490","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jan 21 08:35:23 2017\n\n\nProblem 3\n15/15 points (graded)\nAssume s is a string of lower case characters.\n\nWrite a program that prints the longest substring of s in which the letters occur in \nalphabetical order. For example, if s = 'azcbobobegghakl', then your program should print\n\nLongest substring in alphabetical order is: beggh\nIn the case of ties, print the first substring. For example, if s = 'abcbcd', then your program should print\n\nLongest substring in alphabetical order is: abc\n\n@author: z001hmb\n\"\"\"\ns = 'azcbobobegghakl'\nlongstr1 = s[0]\nlongstr2 = ''\nfor i in range(1,(len(s)),1):\n if s[i] >= s[i-1]:\n longstr1 = longstr1 + s[i]\n #print(\"Longstr1 value: \" + longstr1)\n else:\n if len(longstr1) > len(longstr2):\n longstr2 = longstr1\n #print(\"Longstr2 value: \" + longstr2)\n longstr1 = s[i]\n\nif len(longstr1) > len(longstr2):\n print(\"Longest substring in alphabetical order is: \", longstr1)\nelse:\n print(\"Longest substring in alphabetical order is: \", longstr2)\n\n","sub_path":"Programs/longeststring.py","file_name":"longeststring.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"52014715","text":"# For path editing\r\nimport os\r\n# For XML parsing\r\nimport xml.etree.ElementTree as ET\r\n# For benchmarking\r\nimport time\r\n\r\nfrom Command import Command\r\n\r\n# TODO: refactor\r\n\r\n# These must be in the Input directory.\r\nregistryFile = \"gl.xml\"\r\nfunctionsFile = \"FunctionList.txt\"\r\n\r\noutputHeader = \"FunctionDeclarations.hpp\"\r\noutputSource = \"FunctionLoaders.inl\"\r\n\r\n\r\n# First set up the directories.\r\nrelPath = os.path.dirname(__file__)\r\ninDir = os.path.join(relPath, \"Input\")\r\noutDir = os.path.join(relPath, \"Output\")\r\n\r\nregistryFile = os.path.join(inDir, registryFile)\r\n\r\nif not os.path.isfile(registryFile):\r\n raise Exception(\"OpenGL function registry file: \" + registryFile)\r\n\r\nprint(\"Reading OpenGL API registry...\")\r\nbeginTiming = time.perf_counter()\r\ntree = ET.parse(registryFile)\r\n\r\nroot = tree.getroot()\r\n\r\nprint(\"Finished parsing registry after\",\r\n round(time.perf_counter() - beginTiming, 3), \"seconds.\\n\")\r\n\r\nfunctionsFile = os.path.join(inDir, functionsFile)\r\n\r\nprint(\"Reading list of requested functions...\")\r\nbeginTiming = time.perf_counter()\r\n\r\nrequested = set()\r\n\r\nwith open(functionsFile) as f:\r\n for line in f.readlines():\r\n line = line.strip()\r\n if len(line) > 0 and line[0] != '#':\r\n requested.add(line)\r\n\r\nprint(len(requested), \"functions to be loaded.\")\r\nprint(\"Finished processing requested functions list after\",\r\n round(time.perf_counter() - beginTiming, 3), \"seconds.\\n\")\r\n\r\n\r\n# Get the commands.\r\ncmds = root.find(\"commands\")\r\n\r\n\r\ndef getParamNames(cmd):\r\n params = cmd.findall(\"param\")\r\n\r\n prLst = []\r\n\r\n for param in params:\r\n prLst.append(param.find(\"name\").text)\r\n\r\n return prLst\r\n\r\n\r\noutputHeader = os.path.join(outDir, outputHeader)\r\noutputSource = os.path.join(outDir, outputSource)\r\n\r\nprint(\"Starting to generate headers...\")\r\nbeginTiming = time.perf_counter()\r\n\r\nloaderList = []\r\n\r\nwith open(outputHeader, 'w') as hdr, open(outputSource, 'w') as src:\r\n # Write the first part of the header.\r\n includeOnce = \"#pragma once \\n\\n\"\r\n hdr.write(includeOnce)\r\n src.write(includeOnce)\r\n\r\n # Write notices for the files.\r\n generatedMsg = \"/// Warning: do not edit this file, it is automatically generated.\\n\\n\"\r\n \r\n hdr.write(generatedMsg)\r\n src.write(generatedMsg)\r\n \r\n for cmd in cmds:\r\n name = cmd.find(\"proto\").find(\"name\").text.strip()[2:]\r\n\r\n if name in requested:\r\n command = Command(cmd)\r\n header = command.toHeader()\r\n \r\n # First write the declaration.\r\n hdr.write(header)\r\n hdr.write(\";\\n\\n\")\r\n \r\n # First the function pointer.\r\n ptrName = \"PFN_\" + name\r\n\r\n src.write(\"static decltype(&\" + name + \") \" + ptrName + \";\\n\\n\")\r\n\r\n loaderList.append(ptrName\r\n + \" = reinterpret_cast(getFunctionPointer(\\\"gl\"\r\n + name + \"\\\"));\\n\")\r\n \r\n # Now write the loader.\r\n src.write(header)\r\n\r\n returnLine = \"\"\"\\n{{\\n\\treturn {}({});\\n}}\\n\\n\"\"\".format(\r\n ptrName,\r\n command.getParametersNames()\r\n )\r\n\r\n src.write(returnLine)\r\n\r\n requested.remove(name)\r\n\r\n src.write(\"void loadFunctions()\\n{\\n\\t\")\r\n src.write(\"\\n\\t\".join(loaderList))\r\n src.write(\"}\\n\")\r\n\r\nif len(requested) != 0:\r\n raise Exception(\"Could not find functions: \" + \", \".join(requested))\r\n \r\nprint(\"Finished generating headers after\",\r\n round(time.perf_counter() - beginTiming, 3), \"seconds\")\r\n","sub_path":"Generate.py","file_name":"Generate.py","file_ext":"py","file_size_in_byte":3597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"640098948","text":"from flask import Blueprint, jsonify, request\nfrom sima_web_api.api.users.utils import token_required\nfrom sima_web_api.api.sale.models import Sale, SaleList\nfrom sima_web_api.api.product.models import Product\nfrom sima_web_api.api import db\nimport datetime\n\nsale = Blueprint(\n \"sale\",\n __name__,\n url_prefix=\"/sale\",\n)\n\n\n@sale.route(\"/hello\")\ndef hello():\n return jsonify({\"message\": \"Sale blueprint working\"})\n\n\n# ----- Sale -----\n@sale.route(\"/all\", methods=[\"GET\"])\n@token_required\ndef sale_get_all(current_user):\n \"\"\"\n sale_get_all(current_user)\n\n HTTP Methods - GET\n\n To test if the module is working\n \"\"\"\n sales = Sale.query.all()\n\n sales_json = [\n {\n \"id\": sale.id,\n \"product\": Product.query.filter_by(id=sale.product_id),\n \"selling_price\": sale.selling_price,\n \"quantity\": sale.quantity,\n \"created_on\": sale.created_on,\n }\n for sale in sales\n ]\n\n return jsonify(sales_json)\n\n\n@sale.route(\"/sale_list/\", methods=[\"GET\"])\n@token_required\ndef sales_get_all_by_sale_list_id(current_user, sale_list_id):\n \"\"\"\n sales_get_all_by_sale_list_id(current_user, sale_list_id)\n\n HTTP Methods - GET\n\n To test if the module is working\n \"\"\"\n sales_by_sale_list_id = Sale.query.filter_by(sale_list_id=sale_list_id)\n\n sales_by_sale_list_id_json = [\n {\n \"id\": sale.id,\n \"product\": Product.query.filter_by(id=sale.product_id),\n \"selling_price\": sale.selling_price,\n \"quantity\": sale.quantity,\n \"created_on\": sale.created_on,\n }\n for sale in sales_by_sale_list_id\n ]\n\n return jsonify(sales_by_sale_list_id_json)\n\n\n@sale.route(\"/\", methods=[\"GET\"])\n@token_required\ndef sale_get_by_id(current_user, sale_id):\n \"\"\"\n sale_get_by_id(current_user, sale_id)\n\n HTTP Methods - GET\n\n To test if the module is working\n \"\"\"\n sale = Sale.query.filter_by(id=sale_id).first()\n sale_json = {\n \"id\": sale.id,\n \"product\": Product.query.filter_by(id=sale.product_id),\n \"quantity\": sale.quantity,\n \"sellingPrice\": sale.sellingPrice,\n \"created_on\": sale.created_on,\n }\n return jsonify(sale_json), 200\n\n\n@sale.route(\"/\", methods=[\"DELETE\"])\n@token_required\ndef sale_delete_by_id(current_user, sale_id):\n \"\"\"\n sale_delete_by_id(current_user, sale_id)\n\n HTTP Methods - DELETE\n\n Deletes resource\n \"\"\"\n sale = Sale.query.filter_by(id=sale_id).first()\n\n if sale:\n db.session.delete(sale)\n db.session.save()\n return jsonify({\"message\": \"Sale deleted successfully\"}), 200\n else:\n return jsonify({\"message\": \"Could not delete sale\"})\n\n\n@sale.route(\"/\", methods=[\"PUT\"])\n@token_required\ndef sale_update_by_id(current_user, sale_id):\n \"\"\"\n sale_update_by_id(current_user, sale_id)\n\n HTTP Methods - PUT\n\n Updates existing resources\n \"\"\"\n sale = Sale.query.filter_by(id=sale_id).first()\n\n data = request.get_json()\n\n try:\n if data[\"quantity\"]:\n sale.quantity = data[\"quantity\"]\n\n if data[\"sellingPrice\"]:\n sale.sellingPrice = data[\"sellingPrice\"]\n\n except KeyError:\n return jsonify({\"message\": \"Wrong data passed\"})\n\n db.session.commit()\n return jsonify({\"message\": \"Sale of product updated successfully\"}), 200\n\n\n# ----- SaleList -----\n@sale.route(\"/list\", methods=[\"POST\"])\n@token_required\ndef sale_list_create_new(current_user):\n \"\"\"\n sale_list_create_new(current_user)\n\n HTTP Methods - POST\n\n To send data\n \"\"\"\n data = request.get_json()\n\n new_sale_list = SaleList(\n created_on=str(datetime.date.today()),\n customer_name=data[\"customer_details\"][\"customer_name\"],\n customer_contact=data[\"customer_details\"][\"customer_contact\"],\n business_id=data[\"business_id\"],\n )\n db.session.add(new_sale_list)\n db.session.commit()\n\n for sale in data[\"sale_list\"]:\n new_sale = Sale(\n quantity=sale[\"quantity\"],\n selling_price=sale[\"selling_price\"],\n created_on=str(datetime.date.today()),\n product_id=sale[\"product_id\"],\n sale_list_id=new_sale_list.id,\n )\n db.session.add(new_sale)\n db.session.commit()\n return jsonify({\"message\": \"Sale created successfully\"}), 201\n\n\n@sale.route(\"/sale_list\", methods=[\"GET\"])\n@token_required\ndef sale_list_get_all(current_user, product_id):\n \"\"\"\n sale_list_get_all(current_user, product_id)\n\n HTTP Methods - GET\n\n To test if the module is working\n \"\"\"\n sales_list = SaleList.query.all()\n\n product_sales_list_json = [\n {\n \"name\": sale_list.name,\n \"customer_name\": sales_list.custome_name,\n \"customer_contact\": sales_list.customer_contact,\n }\n for sale_list in sales_list\n ]\n return jsonify(product_sales_list_json), 200\n\n\n@sale.route(\"/list/\", methods=[\"GET\"])\n@token_required\ndef sale_list_get_by_id(current_user, sale_list_id):\n \"\"\"\n sale_list_get_by_id(current_user, sale_list_id)\n\n HTTP Methods - GET\n\n To test if the module is working\n \"\"\"\n sale_list = SaleList.query.filter_by(id=sale_list_id).first()\n\n sale_list_json = {\n \"id\": sale_list.id,\n \"name\": sale_list.name,\n \"created_on\": sale_list.created_on,\n \"customer_name\": sale_list.customer_name,\n \"customer_contact\": sale_list.customer_contact,\n }\n\n return jsonify(sale_list_json)\n\n\n@sale.route(\"/list/\", methods=[\"DELETE\"])\n@token_required\ndef sale_list_delete_by_id(current_user, sale_list_id):\n \"\"\"\n sale_list_delete_by_id(current_user, sale_list_id)\n\n HTTP Methods - DELETE\n\n Deletes resource\n \"\"\"\n sale_list = SaleList.query.filter_by(id=sale_list_id).first()\n\n if sale_list:\n db.session.delete(sale_list)\n db.session.commit()\n return jsonify({\"message\": \"Sale list deleted successfully\"})\n else:\n return jsonify({\"message\": \"Could not delete sale list\"})\n\n\n# TODO: Implement later\n@sale.route(\"/sale_list\", methods=[\"DELETE\"])\n@token_required\ndef sale_list_delete_all(current_user):\n \"\"\"\n sale_list_delete_all(current_user)\n\n HTTP Methods - DELETE\n\n Deletes resource\n \"\"\"\n sale_list_all = SaleList.query.all().delete()\n return jsonify({\"message\": \"All salelists deleted\"})\n\n\n@sale.route(\"/list/\", methods=[\"PUT\"])\n@token_required\ndef sale_list_update_by_id(current_user, sale_list_id):\n \"\"\"\n sale_list_update_by_id(current_user,sale_list_id)\n\n HTTP Methods - PUT\n\n Updates existing resources\n \"\"\"\n sale_list = SaleList.query.filter_by(id=sale_list_id).first()\n data = request.get_json()\n\n try:\n if data[\"name\"]:\n sale_list.name = data[\"name\"]\n\n if data[\"customer_name\"]:\n sale_list.customer_name = data[\"customer_name\"]\n\n if data[\"customer_contact\"]:\n sale_list.customer_contact = data[\"customer_contact\"]\n\n except KeyError:\n return jsonify({\"message\": \"Wrong data passed\"})\n\n db.sesion.commit()\n return jsonify({\"message\": \"Product Sale list updated sucessfully\"}), 200\n","sub_path":"sima_web_api/api/sale/controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":7284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"524316308","text":"#################################################################################################################################################### \n# #\n# This file has been generated by Amit Patankar: #\n# Created by : amit.patankar # \n# Created on : 11-07-2013 #\n# Directory : /Desktop/ #\n# Purpose : This structure holds the attributes of each section in a text. #\n# #\n#################################################################################################################################################### \n\nimport csv\nfrom Values import *\nfrom Key import *\nfrom Data import *\n\nclass Scored_Test(object):\n\n #This is the default constructor with all variables defined.\n def __init__(self, test_id):\n self.test_id = test_id\n self.sections = {}\n self.missed_questions_index = {}\n self.missed_questions = {}\n self.missed_questions[WRITING_TYPE] = []\n self.missed_questions[MATH_TYPE] = []\n self.missed_questions[READING_TYPE] = []\n self.test_summary = None\n self.score_summary = None\n self.data = Data_Holder()\n self.qtypedict = {}\n self.essay = 7\n self.date = ''\n self.type = FULL_TEST\n\n #This sets the test summary upon creation and recreation.\n def set_summary(self, s):\n self.test_summary = s\n\n #This returns the section.\n def get_section(self, section_number):\n return self.sections[index]\n\n #This adds the scored section to the text.\n def add_section(self, section):\n assert(type(section) == Scored_Section)\n self.sections[section.index] = section\n\n #This returns the scored test id.\n def get_id(self):\n return self.test_id\n\n def make_sections(self, missed_array, section_type):\n current_section = Scored_Section(0, 0, 0, 0)\n for entry in missed_array:\n section_index = int(entry[0].split('_')[-2]) \n index = int(entry[0].split('_')[-1])\n q_id = entry[0]\n attempt = entry[1]\n if index != current_section.index:\n if current_section.is_valid():\n current_section.qa = current_section.size - current_section.qb - current_section.qm\n current_section.score = current_section.qa - round_rem(float(current_section.qm)/4)\n self.add_section(current_section)\n current_section = self.sections[section_index] \n\n #fill section summary\n if attempt == BLANK_ENTRY:\n self.missed_questions_index[section_index][index] = attempt\n self.test_summary.reports[section_type].add_blank()\n current_section.add_blank()\n else:\n self.missed_questions_index[section_index][index] = attempt \n self.test_summary.reports[section_type].add_miss()\n current_section.add_miss() \n self.test_summary.reports[section_type].incorrect_questions.append(entry)\n\n q = Scored_Question(current_section, q_id, attempt)\n self.qtypedict[q_id[6:]]= q.question_type\n current_section.add_question(q)\n self.missed_questions[section_type].append(entry)\n self.test_summary.reports[section_type].incorrect_questions.append(entry)\n\n #add last section\n if current_section.is_valid():\n current_section.qa = current_section.size - current_section.qb - current_section.qm\n current_section.score = current_section.qa - round_rem(float(current_section.qm)/4)\n self.add_section(current_section)\n\n self.test_summary.reports[section_type].qa = section_size(section_type) - self.test_summary.reports[section_type].qm - self.test_summary.reports[section_type].qb\n self.score_summary = Score_Summary(self.test_summary)\n\n #This recreates a scored test when given section information for missed question tuples.\n def recreate(self, sections):\n #sections will be passed in a dictionary indexed by type\n key = {}\n #key[index] = (section_type, size)\n #read from keyfile and create section shells for scored test based on type and index\n with open(test_directory(self.test_id) + DIR_SEP + KEYFILE, 'rU') as f:\n reader = csv.reader(f)\n for row in reader:\n if row != KEY_VECTOR or row[0] == '':\n index = int(row[0])\n section_type = int(row[1])\n size = int(row[2])\n if section_type == TRIAL_TYPE:\n continue \n key[index] = (section_type, size)\n if section_type == WRITING_TYPE:\n self.missed_questions_index[index] = {}\n self.sections[index] = Scored_Section(self, index, section_type, size)\n elif section_type == MATH_TYPE:\n self.missed_questions_index[index] = {}\n self.sections[index] = Scored_Section(self, index, section_type, size)\n elif section_type == READING_TYPE:\n self.missed_questions_index[index] = {}\n self.sections[index] = Scored_Section(self, index, section_type, size)\n\n #from sections missed questions, recreate into this scored test\n #current_section = Scored_Section(0, 0, 0, 0)\n\n #create Test Summary for Scored Test\n self.test_summary = Test_Summary(self.test_id)\n self.test_summary.essay = self.essay\n\n #WRITING RECONSTRUCTION\n self.make_sections(sections[WRITING_TYPE], WRITING_TYPE)\n\n #MATH RECONSTRUCTION\n self.make_sections(sections[MATH_TYPE], MATH_TYPE)\n\n #READING RECONSTRUCTION\n self.make_sections(sections[READING_TYPE], READING_TYPE)\n\n\n #String override method.\n def __str__(self):\n output = endl + \"TEST_ID: \" + self.test_id + endl\n output += endl\n output += (\"TYPE: \" + str(self.type) + endl)\n output += (\"DATE: \" + self.date + endl)\n output += (\"ESSAY: \" + str(self.essay) + endl) \n output += \"WRITING:\" + endl\n output += \"Missed Questions:\"\n output += str(self.missed_questions[WRITING_TYPE])\n output += endl\n output += endl\n output += \"MATH:\" + endl\n output += \"Missed Questions:\"\n output += str(self.missed_questions[MATH_TYPE])\n output += endl\n output += endl\n output += \"READING:\" + endl\n output += \"Missed Questions:\"\n output += str(self.missed_questions[READING_TYPE])\n output += endl\n output += endl\n output += SECTION_SEP\n output += endl\n return output\n\nclass Scored_Section(object):\n\n #This is the default constructor with all variables defined.\n def __init__(self, scored_test, index, section_type, size):\n self.scored_test = scored_test\n self.index = index\n self.type = section_type\n #self.section_summary = None\n self.missed_questions = []\n self.size = size\n self.score = 0 \n self.qa = 0\n self.qm = 0\n self.qb = 0\n\n #This sets the score for the section\n def set_score(self, score):\n self.score = score\n\n #This adds a missed question.\n def add_miss(self):\n self.qm += 1\n\n #This adds a blank question.\n def add_blank(self):\n self.qb += 1\n\n def calc_qa(self):\n self.qa = self.size - self.qm - self.qb\n\n def calc_score(self):\n self.score = self.qa - self.qm/4 - round_rem(float(self.qm)/4) \n\n #This adds the section summary for the section.\n def add_summary(self, qa, qm, qb):\n self.question_summary = Section_Summary(qa, qm, qb)\n\n #This adds a question.\n def add_question(self, q):\n self.missed_questions.append(q)\n\n #This returns the scored section id.\n def get_id(self):\n return self.scored_test.get_id() + FIELD_SEP + str(self.index)\n\n #This returns if the section is valid. Used in section recreation.\n def is_valid(self):\n return self.index != 0\n\n #String override method.\n def __str__(self):\n output = self.get_id()\n output += endl\n output += (section_name(self.type) + endl)\n output += (\"QA: \" + str(self.qa) + endl)\n output += (\"QM: \" + str(self.qm) + endl)\n output += (\"QB: \" + str(self.qb) + endl)\n output += (\"Raw Score: \" + str(self.score) + endl)\n return output\n\n#Base Scored Question Class\nclass Scored_Question(object):\n\n #This is the default constructor with all variables defined.\n def __init__(self, scored_section, q_id, incorrect_answer):\n self.scored_section = scored_section\n self.index = None\n self.correct_answer = None\n self.incorrect_answer = incorrect_answer\n self.difficulty = None\n self.question_type = None\n self.make_by_id(q_id)\n\n #This is the default constructor with all variables defined.\n def make_by_id(self, q_id):\n array = q_id.split('_')\n #test_id = array[0] + '_' + array[1]\n test_id = fuse_id_array(array[:-2])\n filename = test_id + DIR_SEP + \"Section \" + array[-2] + \".csv\"\n number = array[-1]\n with open(test_directory(filename), 'rU') as f:\n reader = csv.reader(f)\n for row in reader:\n if row[NUMBER_INDEX] == number:\n break\n\n self.index = int(row[NUMBER_INDEX])\n self.correct_answer = row[ANSWER_INDEX]\n self.difficulty = int(row[DIFFICULTY_INDEX])\n self.question_type = row[TYPE_INDEX]\n\n\n #This returns the scored question id.\n def get_id(self):\n return self.scored_section.get_id() + FIELD_SEP + str(num)\n\n#class to make question objects for class analytics, questions missed by majority of class\nclass Class_Question(object):\n def __init__(self, q, freq):\n self.question = q\n self.frequency = freq\n\n\n\n","sub_path":"userauth/Library/Scored.py","file_name":"Scored.py","file_ext":"py","file_size_in_byte":10919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"361519294","text":"# $Id: Alignment.py,v 1.1.2.1 2007/01/28 11:24:30 marcusva Exp $\n#\n# Copyright (c) 2007, Marcus von Appen\n# All rights reserved.\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"A widget which controls the alignment of its child.\"\"\"\n\nfrom Bin import Bin\nfrom Constants import *\nimport base\n\nclass Alignment (Bin):\n \"\"\"Alignment (width, height) -> Alignment\n\n A Bin widget class which controls the alignment of its child.\n\n The Alignment widget allows its child to be aligned at its edges\n using the 'align' attribute and set_align() method. Dependant on the\n alignment type (see also ALIGN_TYPES) the child will be placed\n differently within the Alignment.\n\n alignment.align = ALIGN_TOP\n alignment.set_align (ALIGN_TOP)\n\n However, not every alignment make sense, so a ALIGN_TOP | ALIGN_BOTTOM\n would cause the widget to be placed at the top. The priority\n order for the alignment follows. The lower the value, the higher the\n priority.\n\n Alignment Priority\n -----------------------\n ALIGN_TOP 0\n ALIGN_BOTTOM 1\n ALIGN_LEFT 0\n ALIGN_RIGHT 1\n ALIGN_NONE 2\n\n Default action (invoked by activate()):\n None\n \n Mnemonic action (invoked by activate_mnemonic()):\n None\n\n Attributes:\n align - Alignment of the child.\n \"\"\"\n def __init__ (self, width, height):\n Bin.__init__ (self)\n self._align = ALIGN_NONE\n self.minsize = width, height\n\n def set_align (self, align):\n \"\"\"A.set_align (...) -> None\n\n Sets the alignment for the child.\n\n Raises a TypeError, if the passed argument is not a value from\n ALIGN_TYPES.\n \"\"\"\n if not constants_is_align (align):\n raise TypeError (\"align must be a value from ALIGN_TYPES\")\n self._align = align\n self.dirty = True\n\n def draw_bg (self):\n \"\"\"A.draw_bg () -> Surface\n\n Draws the Alignment background surface and returns it.\n\n Creates the visible surface of the Alignment and returns it to the\n caller.\n \"\"\"\n return base.GlobalStyle.engine.draw_alignment (self)\n\n def draw (self):\n \"\"\"B.draw () -> None\n\n Draws the Alignment surface and places its child on it.\n \"\"\"\n Bin.draw (self)\n\n rect = self.image.get_rect ()\n if self.child:\n self.child.center = rect.center\n\n if self.align & ALIGN_TOP == ALIGN_TOP:\n self.child.top = rect.top + self.padding\n elif self.align & ALIGN_BOTTOM == ALIGN_BOTTOM:\n self.child.bottom = rect.bottom - self.padding\n if self.align & ALIGN_LEFT == ALIGN_LEFT:\n self.child.left = rect.left + self.padding\n elif self.align & ALIGN_RIGHT == ALIGN_RIGHT:\n self.child.right = rect.right - self.padding\n\n self.image.blit (self.child.image, self.child.rect)\n\n align = property (lambda self: self._align,\n lambda self, var: self.set_align (var),\n doc = \"The alignment to use for the child.\")\n","sub_path":"ocempgui/widgets/Alignment.py","file_name":"Alignment.py","file_ext":"py","file_size_in_byte":4349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"400521287","text":"import nltk, PyPDF2, docx, re, pathlib, sys, os\nfrom nltk.tokenize import word_tokenize\nfrom nltk.corpus import wordnet\nfrom collections import Counter\n\nclass Doc_Review:\n\n def __init__(self):\n print(\"\\nPress CTRL + C to quit\")\n try:\n self.input_file = input(\"\\nFile to search: \")\n self.tag = input(\"Phrase to match: \").lower() # Use lower case for comparison\n self.main()\n except KeyboardInterrupt:\n print('\\n\\nQuitting program . . .')\n print(\"Goodbye!\\n\")\n try:\n sys.exit(0)\n except SystemExit:\n os._exit(0)\n\n def main(self):\n global existing_file\n self.file_check()\n if(existing_file == True):\n result = self.search(self.tokenize(self.get_text()), self.synonyms())\n self.to_export_yes_no(result)\n\n\n ##################################\n # CHECK FOR VALID/SUPPORTED FILE #\n ##################################\n\n def file_check(self):\n global existing_file\n if((pathlib.Path(self.input_file).suffix == \".pdf\") or (pathlib.Path(self.input_file).suffix == \"\")): # Filter out non-PDF, default (no extension) is PDF\n if(os.path.isfile(self.input_file+\".pdf\")): # Error handling for nonexistent files\n existing_file = True\n else:\n print(\"Invalid file. Please enter an existing PDF file.\")\n existing_file = False\n else:\n print(\"Only PDF files are supported. Please enter a PDF file.\")\n existing_file = False\n\n\n #############################\n # IMPORT TEXT FROM DOCUMENT #\n #############################\n\n def get_text(self):\n pdfReader = PyPDF2.PdfFileReader(open(self.input_file+\".pdf\", 'rb'), strict=False)\n master_string = \"\" # Master string for storing all PDF text\n a = 1\n while(a < pdfReader.numPages): # Loop through all pages in PDF, append to master string\n master_string += pdfReader.getPage(a).extractText()\n a += 1\n return master_string\n\n #########################\n # RETURN TOKENIZED LIST #\n #########################\n\n def tokenize(self, master_string):\n tokenize_input = \" \".join(re.findall(r\"\\w+(?=n't)|n't|\\w+(?=')|'\\w+|\\w+\", master_string, re.IGNORECASE | re.DOTALL)) # Replace contractions with words (e.g. \"n't\" -> \"not\"), convert result (result is list) to string for tokenization\n words = [word for word in word_tokenize(tokenize_input) if word.isalpha()] # Use isalpha() to remove non-words (anything that is not FULLY composed of letters)\n return words\n\n ##################################\n # RETURN LIST OF SYNONYMS OF TAG #\n ##################################\n\n def synonyms(self):\n syns = wordnet.synsets(self.tag) # Get synonyms / related words\n synonyms = [] # List of synonyms\n for s in syns:\n for l in s.lemmas():\n if(l.name() not in synonyms):\n synonyms.append(l.name().lower()) # Add synonyms to list of relevant words\n return synonyms\n\n #######################\n # RETURN OUTPUT/COUNT #\n #######################\n\n def search(self, words, synonyms):\n relevant = [] # Count incidence of tag or synonyms\n for i in words:\n if(i.lower() == self.tag): # Match lower case for comparison\n relevant.append(i.capitalize()) # Add tag to relevant, sentence case for consistency\n else:\n for s in synonyms:\n if(i.lower() == s): # Match lower case for comparison\n relevant.append(i.capitalize()) # Add synonym to relevant, sentence case for consistency\n return Counter(relevant).most_common() # List of unique count (of relevant) sorted by frequency\n\n ##########################\n # EXPORT RESULTS TO WORD #\n ##########################\n\n def to_export_yes_no(self, result): # Checks whether to export (don't export if no results)\n if(len(result) == 0): # If no matches, don't export to word\n print(\"\\nNo matches for \"+\"\\\"\"+self.tag.capitalize()+\"\\\"\")\n else:\n self.export_to_word(result)\n\n def export_to_word(self, output): # Exports the file\n output_file = docx.Document() # Create cover page with doc review info\n output_file.add_paragraph(\"DOC REVIEW RESULTS\", \"Title\") # Title with style\n for i in range(len(output)): # List output with number of instances for each word\n if(output[i][0].lower() == self.tag): # Match lowercase for comparison purposes\n output_file.add_paragraph(\"KEYWORD:\", \"Heading1\")\n output_file.add_paragraph(output[i][0]+': '+str(output[i][1])+\" instances\")\n for i in range(len(output)): # List output with number of synonyms\n if(output[i][0].lower() != self.tag): # Match lowercase for comparison purposes\n output_file.add_paragraph(\"SYNONYMS OF KEYWORD:\", \"Heading1\")\n output_file.add_paragraph(output[i][0]+': '+str(output[i][1])+\" instances\")\n output_file.save(self.input_file+'-RESULTS.docx')\n print(\"\\nJob complete.\"+\"\\n\"+\"Results are available here: \"+self.input_file+'-RESULTS.docx')\n\n\n##############\n# RUN SCRIPT #\n##############\n\nwhile True:\n Doc_Review()\n","sub_path":"NLP_doc_review.py","file_name":"NLP_doc_review.py","file_ext":"py","file_size_in_byte":5391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"602140360","text":"from flask_script import Manager\n\nfrom indoor_position import create_app, db\n\nmanager = Manager(create_app)\n\n@manager.command\ndef createdb(drop_first=False):\n if drop_first:\n db.drop_all()\n db.create_all()\n\n\nif __name__ == '__main__':\n manager.run()\n","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"257793090","text":"#!/usr/bin/env python\r\n\r\nimport feedparser\r\ndef news():\r\n try:\r\n url = 'http://feeds.bbci.co.uk/news/world/rss.xml'\r\n #url = 'https://news.google.com/news?pz=1&cf=all&ned=en_ph&hl=en&topic=snc&output=rss'\r\n rss = feedparser.parse(url)\r\n #rss = feedparser.parse('http://feeds.bbci.co.uk/news/world/rss.xml')\r\n #rss = feedparser.parse('http://news.feedzilla.com/en_us/headlines/oddly-enough/top-stories.rss')\r\n\r\n newsfeed = rss.entries[0]['title'] + '. ' + rss.entries[0]['description'] + '. ' + rss.entries[1]['title'] + '. ' + rss.entries[1]['description'] + '. ' + rss.entries[2]['title'] + '. ' + rss.entries[2]['description'] + '. ' + rss.entries[3]['title'] + '. ' + rss.entries[3]['description'] + '. '\r\n\r\n # print newsfeed\r\n\r\n # Today's news from BBC\r\n news = 'And now, The latest stories from the World section of the BBC News. ' + newsfeed\r\n\r\n except rss.bozo:\r\n news = 'Failed to reach BBC News'\r\n\r\n # print news\r\n return news\r\n","sub_path":"get_url_news8.py","file_name":"get_url_news8.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"321550381","text":"import unittest\nfrom mycode import *\n\nclass MyFirstTest(unittest.TestCase):\n \n# def setUp(self):\n# print(\"We are about to start\")\n# \n# def tearDown(self):\n# print(\"We have just finished\")\n \n def test_hello(self):\n self.assertEqual(hello_world(), \"hello world\")\n \n def test_list(self):\n self.assertEqual(len(create_list(10)), 10)\n a = create_list(10)\n self.assertGreater(a[9], a[0])\n \n\nif __name__ == '__main__':\n unittest.main()","sub_path":"basic_tdd_example/mytests.py","file_name":"mytests.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"483662467","text":"import os\nimport datetime\nfrom utils.timer import Timer\n\nimport numpy as np\nimport cv2\n\nimport tensorflow as tf\n\nfrom config.configadapters import EFusionConfig\nfrom model.model import Model\nfrom model.cnn.yolov2tiny import YoloTiny\n\nfrom model.utils.utils import sigmoid, bbox_iou, softmax\n\n#slim = tf.contrib.slim\n\nfrom keras.models import Model as KModel\nfrom keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda\nfrom keras.layers.merge import Maximum\nfrom keras.layers.advanced_activations import LeakyReLU\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard\nfrom keras.optimizers import SGD, Adam, RMSprop\n\nfrom model.utils.preprocessing2 import BatchGenerator\nfrom model.utils.utils import BoundBox\n\n\nclass E_FUSION2MLP(Model):\n\n def __init__(self):\n config = EFusionConfig()\n self.classes = config.classes\n self.num_class = len(self.classes)\n self.image_size_width = config.image_size_width\n self.image_size_height = config.image_size_height\n self.grid_h = config.grid_h\n self.grid_w = config.grid_w\n self.boxes_per_cell = config.boxes_per_cell\n self.output_size = (self.grid_h * self.grid_w) * (self.num_class + self.boxes_per_cell * 5)\n self.scale = 1.0 * ((self.image_size_width + self.image_size_height)/2) / ((self.grid_h + self.grid_w)/2)\n self.num_cams = config.num_cameras\n self.object_scale = config.object_scale\n self.no_object_scale = config.no_object_scale\n self.class_scale = config.class_scale\n self.coord_scale = config.coord_scale\n\n self.learning_rate = config.learning_rate\n self.batch_size = config.batch_size\n self.alpha = config.alpha\n self.decay_steps = config.decay_steps\n self.decay_rate = config.decay_rate\n self.staircase = config.staircase\n self.weight_file = config.weight_file\n self.output_file = config.output_file\n # Eman - From the internet\n self.nb_box = 5\n max_box_per_image = 10\n self.class_wt = np.ones(self.num_class, dtype='float32')\n self.warmup_bs = 250\n\n self.images = [Input(shape=(self.image_size_height, self.image_size_width, 3)) for _ in range(self.num_cams)]\n self.true_boxes = [Input(shape=(1, 1, 1, max_box_per_image, 4)) for _ in range(self.num_cams)]\n\n self.model = KModel(inputs=[self.images[0], self.images[1], self.images[2], self.true_boxes[0], self.true_boxes[1], self.true_boxes[2]], outputs=self.build_network())\n\n yoloModel = YoloTiny()\n yoloModel.model.load_weights(\"D:\\\\Files\\\\Box Sync\\\\research\\\\source code\\\\E_System\\\\weights\\\\yolov2tinypretrained.h5\")\n\n model_layer_start_index = 0\n for index in range(len(yoloModel.model.layers)):\n if index == 31:\n break\n self.model.layers[model_layer_start_index].set_weights(yoloModel.model.layers[index].get_weights())\n self.model.layers[model_layer_start_index + 1].set_weights(yoloModel.model.layers[index].get_weights())\n self.model.layers[model_layer_start_index + 2].set_weights(yoloModel.model.layers[index].get_weights())\n model_layer_start_index = model_layer_start_index + 3\n\n # # initialize the weights of the detection layer\n layer = self.model.layers[-(4*self.num_cams)]\n weights = layer.get_weights()\n\n new_kernel = np.random.normal(size=weights[0].shape) / (self.grid_h * self.grid_w)\n new_bias = np.random.normal(size=weights[1].shape) / (self.grid_h * self.grid_w)\n\n layer.set_weights([new_kernel, new_bias])\n #self.model.load_weights(\"D:\\\\Files\\\\Box Sync\\\\research\\\\source code\\\\E_System\\\\weights\\\\esystempretrained.h5\")\n\n # print a summary of the whole model\n self.model.summary()\n\n \"\"\"\n For training, a loss function will be ran for each camera output layer.\n Each camera output layer is used for each camera.\n \"\"\"\n\n self.anchors = config.anchors\n self.n_anchors = config.n_anchors\n\n self.epoch = config.epoch\n self.save_iter = config.save_iter\n self.max_iter = config.max_iter\n self.summary_iter = config.summary_iter\n\n def build_network(self):\n \"\"\"\n Build individual yolo network that each camera would consist of.\n \"\"\"\n nets = [self.build_net(self.images[i], i) for i in range(self.num_cams)]\n \"\"\"\n Combine filtered images from each yolo network using element-wise maximum.\n \"\"\"\n fused_net = None\n\n if self.num_cams < 2:\n fused_net = nets[0]\n else:\n fused_net = Maximum()([nets[index] for index in range(self.num_cams)])\n\n fused_net = Dense(1024, activation='sigmoid')(fused_net)\n #x = Dropout(0.5)(x)\n fused_net = Dense(512, activation='sigmoid')(fused_net)\n #x = Dropout(0.5)(x)\n fused_net = Dense(256, activation='sigmoid')(fused_net)\n\n output_layers = [self.build_object_detector_layer(fused_net, cam_index) for cam_index in range(self.num_cams)]\n\n return output_layers\n\n def build_object_detector_layer(self, fused_net, cam_index):\n # Layer 9 - Object detection layer\n object_detector_net = Conv2D(self.nb_box * (4 + 1 + self.num_class),\n (1, 1), strides=(1, 1),\n padding='same',\n name='conv_9_' + str(cam_index),\n kernel_initializer='lecun_normal')(fused_net) # Eman changed here.\n object_detector_net = Reshape((self.grid_h, self.grid_w, self.nb_box, 4 + 1 + self.num_class))(object_detector_net)\n object_detector_net = Lambda(lambda args: args[0])([object_detector_net, self.true_boxes[cam_index]])\n\n return object_detector_net\n\n\n @staticmethod\n def build_net(input_image, index):\n # Layer 1\n x = Conv2D(16, (3, 3), strides=(1, 1), padding='same', name='conv_1_' + str(index), use_bias=False)(input_image)\n x = BatchNormalization(name='norm_1_' + str(index))(x)\n x = LeakyReLU(alpha=0.1)(x)\n x = MaxPooling2D(pool_size=(2, 2))(x)\n\n # Layer 2 - 5\n for i in range(0, 4):\n x = Conv2D(32 * (2 ** i), (3, 3), strides=(1, 1), padding='same', name='conv_' + str(i + 2) + '_' + str(index),\n use_bias=False)(x)\n x = BatchNormalization(name='norm_' + str(i + 2) + '_' + str(index))(x)\n x = LeakyReLU(alpha=0.1)(x)\n x = MaxPooling2D(pool_size=(2, 2))(x)\n\n # Layer 6\n x = Conv2D(512, (3, 3), strides=(1, 1), padding='same', name='conv_6_' + str(index), use_bias=False)(x)\n x = BatchNormalization(name='norm_6_' + str(index))(x)\n x = LeakyReLU(alpha=0.1)(x)\n x = MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding='same')(x)\n\n # Layer 7 - 8\n for i in range(0, 2):\n x = Conv2D(1024, (3, 3), strides=(1, 1), padding='same', name='conv_' + str(i + 7) + '_' + str(index), use_bias=False)(x)\n x = BatchNormalization(name='norm_' + str(i + 7) + '_' + str(index))(x)\n x = LeakyReLU(alpha=0.1)(x)\n\n return x\n\n def loss_layer1(self, y_true, y_pred):\n return self.loss_layer(y_true, y_pred, 0)\n\n def loss_layer2(self, y_true, y_pred):\n return self.loss_layer(y_true, y_pred, 1)\n\n def loss_layer3(self, y_true, y_pred):\n return self.loss_layer(y_true, y_pred, 2)\n\n def loss_layer(self, y_true, y_pred, cam_index):\n mask_shape = tf.shape(y_true)[:4]\n cell_x = tf.to_float(\n tf.reshape(tf.tile(tf.range(int(self.grid_w)), [int(self.grid_h)]), (1, int(self.grid_h), int(self.grid_w), 1, 1)))\n cell_y = tf.transpose(cell_x, (0, 2, 1, 3, 4))\n\n cell_grid = tf.tile(tf.concat([cell_x, cell_y], -1), [self.batch_size, 1, 1, 5, 1])\n\n coord_mask = tf.zeros(mask_shape)\n conf_mask = tf.zeros(mask_shape)\n class_mask = tf.zeros(mask_shape)\n\n seen = tf.Variable(0.)\n\n total_recall = tf.Variable(0.)\n\n \"\"\"\n Adjust prediction\n \"\"\"\n ### adjust x and y\n pred_box_xy = tf.sigmoid(y_pred[..., :2]) + cell_grid\n\n ### adjust w and h\n pred_box_wh = tf.exp(y_pred[..., 2:4]) * np.reshape(self.anchors, [1, 1, 1, self.nb_box, 2])\n\n ### adjust confidence\n pred_box_conf = tf.sigmoid(y_pred[..., 4])\n\n ### adjust class probabilities\n pred_box_class = y_pred[..., 5:]\n\n \"\"\"\n Adjust ground truth\n \"\"\"\n ### adjust x and y\n true_box_xy = y_true[..., 0:2] # relative position to the containing cell\n\n ### adjust w and h\n true_box_wh = y_true[..., 2:4] # number of cells accross, horizontally and vertically\n\n ### adjust confidence\n true_wh_half = true_box_wh / 2.\n true_mins = true_box_xy - true_wh_half\n true_maxes = true_box_xy + true_wh_half\n\n pred_wh_half = pred_box_wh / 2.\n pred_mins = pred_box_xy - pred_wh_half\n pred_maxes = pred_box_xy + pred_wh_half\n\n intersect_mins = tf.maximum(pred_mins, true_mins)\n intersect_maxes = tf.minimum(pred_maxes, true_maxes)\n intersect_wh = tf.maximum(intersect_maxes - intersect_mins, 0.)\n intersect_areas = intersect_wh[..., 0] * intersect_wh[..., 1]\n\n true_areas = true_box_wh[..., 0] * true_box_wh[..., 1]\n pred_areas = pred_box_wh[..., 0] * pred_box_wh[..., 1]\n\n union_areas = pred_areas + true_areas - intersect_areas\n iou_scores = tf.truediv(intersect_areas, union_areas)\n\n true_box_conf = iou_scores * y_true[..., 4]\n\n ### adjust class probabilities\n true_box_class = tf.to_int32(y_true[..., 5])\n\n \"\"\"\n Determine the masks\n \"\"\"\n ### coordinate mask: simply the position of the ground truth boxes (the predictors)\n coord_mask = tf.expand_dims(y_true[..., 4], axis=-1) * self.coord_scale\n\n ### confidence mask: penelize predictors + penalize boxes with low IOU\n # penalize the confidence of the boxes, which have IOU with some ground truth box < 0.6\n true_xy = self.true_boxes[cam_index][..., 0:2]\n true_wh = self.true_boxes[cam_index][..., 2:4]\n\n true_wh_half = true_wh / 2.\n true_mins = true_xy - true_wh_half\n true_maxes = true_xy + true_wh_half\n\n pred_xy = tf.expand_dims(pred_box_xy, 4)\n pred_wh = tf.expand_dims(pred_box_wh, 4)\n\n pred_wh_half = pred_wh / 2.\n pred_mins = pred_xy - pred_wh_half\n pred_maxes = pred_xy + pred_wh_half\n\n intersect_mins = tf.maximum(pred_mins, true_mins)\n intersect_maxes = tf.minimum(pred_maxes, true_maxes)\n intersect_wh = tf.maximum(intersect_maxes - intersect_mins, 0.)\n intersect_areas = intersect_wh[..., 0] * intersect_wh[..., 1]\n\n true_areas = true_wh[..., 0] * true_wh[..., 1]\n pred_areas = pred_wh[..., 0] * pred_wh[..., 1]\n\n union_areas = pred_areas + true_areas - intersect_areas\n iou_scores = tf.truediv(intersect_areas, union_areas)\n\n best_ious = tf.reduce_max(iou_scores, axis=4)\n conf_mask = conf_mask + tf.to_float(best_ious < 0.6) * (1 - y_true[..., 4]) * self.no_object_scale\n\n # penalize the confidence of the boxes, which are reponsible for corresponding ground truth box\n conf_mask = conf_mask + y_true[..., 4] * self.object_scale\n\n ### class mask: simply the position of the ground truth boxes (the predictors)\n class_mask = y_true[..., 4] * tf.gather(self.class_wt, true_box_class) * self.class_scale\n\n \"\"\"\n Warm-up training\n \"\"\"\n no_boxes_mask = tf.to_float(coord_mask < self.coord_scale / 2.)\n seen = tf.assign_add(seen, 1.)\n\n true_box_xy, true_box_wh, coord_mask = tf.cond(tf.less(seen, self.warmup_bs),\n lambda: [true_box_xy + (0.5 + cell_grid) * no_boxes_mask,\n true_box_wh + tf.ones_like(true_box_wh) * np.reshape(\n self.anchors,\n [1, 1, 1, self.nb_box, 2]) * no_boxes_mask,\n tf.ones_like(coord_mask)],\n lambda: [true_box_xy,\n true_box_wh,\n coord_mask])\n\n \"\"\"\n Finalize the loss\n \"\"\"\n nb_coord_box = tf.reduce_sum(tf.to_float(coord_mask > 0.0))\n nb_conf_box = tf.reduce_sum(tf.to_float(conf_mask > 0.0))\n nb_class_box = tf.reduce_sum(tf.to_float(class_mask > 0.0))\n\n loss_xy = tf.reduce_sum(tf.square(true_box_xy - pred_box_xy) * coord_mask) / (nb_coord_box + 1e-6) / 2.\n loss_wh = tf.reduce_sum(tf.square(true_box_wh - pred_box_wh) * coord_mask) / (nb_coord_box + 1e-6) / 2.\n loss_conf = tf.reduce_sum(tf.square(true_box_conf - pred_box_conf) * conf_mask) / (nb_conf_box + 1e-6) / 2.\n loss_class = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=true_box_class, logits=pred_box_class)\n loss_class = tf.reduce_sum(loss_class * class_mask) / (nb_class_box + 1e-6)\n\n loss = loss_xy + loss_wh + loss_conf + loss_class\n\n nb_true_box = tf.reduce_sum(y_true[..., 4])\n nb_pred_box = tf.reduce_sum(tf.to_float(true_box_conf > 0.5) * tf.to_float(pred_box_conf > 0.3))\n\n current_recall = nb_pred_box / (nb_true_box + 1e-6)\n total_recall = tf.assign_add(total_recall, current_recall)\n\n loss = tf.Print(loss, [tf.zeros((1))], message='\\nLoss for cam index: ' + str(cam_index) + '\\t', summarize=1000)\n loss = tf.Print(loss, [loss_xy], message='Loss XY \\t', summarize=1000)\n loss = tf.Print(loss, [loss_wh], message='Loss WH \\t', summarize=1000)\n loss = tf.Print(loss, [loss_conf], message='Loss Conf \\t', summarize=1000)\n loss = tf.Print(loss, [loss_class], message='Loss Class \\t', summarize=1000)\n loss = tf.Print(loss, [loss], message='Total Loss \\t', summarize=1000)\n loss = tf.Print(loss, [current_recall], message='Current Recall \\t', summarize=1000)\n loss = tf.Print(loss, [total_recall / seen], message='Average Recall \\t', summarize=1000)\n\n return loss\n\n def decode_netout(self, netout, obj_threshold=0.35, nms_threshold=0.3):\n grid_h, grid_w, nb_box = netout.shape[:3]\n\n boxes = []\n\n # decode the output by the network\n netout[..., 4] = sigmoid(netout[..., 4])\n netout[..., 5:] = netout[..., 4][..., np.newaxis] * softmax(netout[..., 5:])\n netout[..., 5:] *= netout[..., 5:] > obj_threshold\n\n for row in range(grid_h):\n for col in range(grid_w):\n for b in range(nb_box):\n # from 4th element onwards are confidence and class classes\n classes = netout[row, col, b, 5:]\n\n if np.sum(classes) > 0:\n # first 4 elements are x, y, w, and h\n x, y, w, h = netout[row, col, b, :4]\n\n x = (col + sigmoid(x)) / grid_w # center position, unit: image width\n y = (row + sigmoid(y)) / grid_h # center position, unit: image height\n w = self.anchors[2 * b + 0] * np.exp(w) / grid_w # unit: image width\n h = self.anchors[2 * b + 1] * np.exp(h) / grid_h # unit: image height\n confidence = netout[row, col, b, 4]\n\n box = BoundBox(x, y, w, h, confidence, classes)\n\n boxes.append(box)\n\n # suppress non-maximal boxes\n for c in range(1): # 1 class (label) Eman needs to be config\n sorted_indices = list(reversed(np.argsort([box.classes[c] for box in boxes])))\n\n for i in range(len(sorted_indices)):\n index_i = sorted_indices[i]\n\n if boxes[index_i].classes[c] == 0:\n continue\n else:\n for j in range(i + 1, len(sorted_indices)):\n index_j = sorted_indices[j]\n\n if bbox_iou(boxes[index_i], boxes[index_j]) >= nms_threshold:\n boxes[index_j].classes[c] = 0\n\n # remove the boxes which are less likely than a obj_threshold\n boxes = [box for box in boxes if box.get_score() > obj_threshold]\n\n return boxes\n\n def predict(self, image):\n self.model.load_weights(\"D:\\\\Files\\\\Box Sync\\\\research\\\\source code\\\\E_System\\\\weights\\\\model.h5\")\n\n input_image1 = self.adjust_image(image[0])\n input_image2 = self.adjust_image(image[1])\n input_image3 = self.adjust_image(image[2])\n\n dummy_array = dummy_array = np.zeros((1, 1, 1, 1, 10, 4)) # 10 is should be config\n (netout1, netout2, netout3) = self.model.predict([input_image1, input_image2, input_image3, dummy_array, dummy_array, dummy_array])\n\n netout1 = netout1[0]\n netout2 = netout2[0]\n netout3 = netout3[0]\n\n boxes1 = self.decode_netout(netout1)\n boxes2 = self.decode_netout(netout2)\n boxes3 = self.decode_netout(netout3)\n\n return boxes1, boxes2, boxes3\n\n def adjust_image(self, image):\n image = cv2.resize(image, (self.image_size_width, self.image_size_height))\n image = self.normalize(image)\n\n input_image = image[:, :, ::-1]\n input_image = np.expand_dims(input_image, 0)\n return input_image\n\n def train(self, train_imgs, valid_imgs):\n train_times = 1\n valid_times = 1\n saved_weights_name = 'esystempretrained.h5'\n ############################################\n # Compile the model\n ############################################\n losses = {\n \"lambda_1\": self.loss_layer1,\n \"lambda_2\": self.loss_layer2,\n \"lambda_3\": self.loss_layer3,\n }\n\n optimizer = Adam(lr=self.learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)\n self.model.compile(loss=losses, optimizer=optimizer, metrics=['accuracy'])\n\n ############################################\n # Make train and validation generators\n ############################################\n\n generator_config = {\n 'IMAGE_H': self.image_size_height,\n 'IMAGE_W': self.image_size_width,\n 'GRID_H': self.grid_h,\n 'GRID_W': self.grid_w,\n 'BOX': self.nb_box,\n 'LABELS': ['person'],\n 'CLASS': 1,\n 'ANCHORS': self.anchors,\n 'BATCH_SIZE': self.batch_size,\n 'TRUE_BOX_BUFFER': 10,\n }\n\n # batch generater\n train_batch = BatchGenerator(train_imgs, generator_config, norm=self.normalize)\n valid_batch = BatchGenerator(valid_imgs, generator_config, 0, norm=self.normalize)\n\n # early stopping\n early_stop = EarlyStopping(\n monitor='val_loss',\n min_delta=0.001,\n patience=4,\n mode='min',\n verbose=1)\n # checkpoint\n saved_weights_name = \"D:\\\\Files\\\\Box Sync\\\\research\\\\source code\\\\E_System\\\\weights\\\\\" + saved_weights_name.replace(\" \", \"\")\n checkpoint = ModelCheckpoint(\n saved_weights_name,\n monitor='val_loss',\n verbose=1,\n save_best_only=True,\n mode='min',\n period=1)\n\n # TensorBoard counter\n dir_name = \"yolo_tiny_yo\"\n # tb_counter = len([log for log in os.listdir(\"logs\") if dir_name in log]) + 1\n log_dir = \"logs/\" + dir_name + \"_\" + str(1)\n # TensorBoard dir\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n # TensorBoard\n tensorboard = TensorBoard(log_dir=log_dir,\n histogram_freq=0,\n write_graph=True,\n write_images=False)\n\n # model fit\n self.model.fit_generator(generator=train_batch,\n steps_per_epoch=len(train_batch) * train_times,\n epochs=self.epoch,\n verbose=1,\n validation_data=valid_batch,\n validation_steps=len(valid_batch) * valid_times,\n callbacks=[early_stop, checkpoint, tensorboard],\n workers=2,\n max_queue_size=8)\n\n def normalize(self, image):\n return image / 255.\n\n\ndef leaky_relu(alpha):\n def op(inputs):\n return tf.nn.relu(inputs) - alpha * tf.nn.relu(-inputs)\n return op\n\n\ndef slice_tensor(x, start, end=None):\n y = None\n if not end is None:\n if end < 0:\n y = x[..., start:]\n\n else:\n if end is None:\n end = start\n y = x[..., start:end + 1]\n\n return y\n\n\ndef resize_image(w, h, im):\n imsz = cv2.resize(im, (w, h))\n imsz = imsz / 255.\n imsz = imsz[:,:,::-1]\n return imsz\n","sub_path":"model/cnn/efusion2mlp.py","file_name":"efusion2mlp.py","file_ext":"py","file_size_in_byte":21406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"407501314","text":"from random import random\n\nn = random() * 9000 + 1000\nn = int(n)\nprint(n)\n\narr = []\n\ndef sum1(n):\n\tif (n < 10):\n\t\treturn n;\n\treturn n % 10 + sum1(int(n / 10));\n\ndef my_sum(n):\n\tprint(\"n = \", n)\n\tif n > 10:\n\t\tmy_sum(int(n/10))\n\t\tarr.append(int(n%10))\n\telse:\n\t\tarr.append(n)\n \nmy_sum(n)\nprint(sum1(n))\nprint(arr, sum(arr))","sub_path":"python-advanced-materials/potoki/test1/ttt.py","file_name":"ttt.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"139738894","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n@author: abhilash\r\n\"\"\"\r\n#load the csv file using read_csv function of pandas library\r\nfrom pandas import read_csv\r\nfrom sklearn.model_selection import KFold\r\nfrom sklearn.model_selection import cross_val_score\r\nfrom sklearn.linear_model import LogisticRegression\r\n\r\nfilename = 'pima-indians-diabetes.csv'\r\n#url = 'https://myfilecsv.com/test.csv'\r\nnames = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']\r\ndataframe = read_csv(filename, names=names)\r\n\r\narray = dataframe.values\r\n\r\n#splitting the array to input and output\r\nX = array[:,0:8]\r\nY = array[:,8]\r\n\r\n\r\n\r\nkfold = KFold(n_splits=10, random_state = 7)\r\nmodel = LogisticRegression(solver='liblinear')\r\nscoring = 'roc_auc'\r\n\r\nresults = cross_val_score(model, X, Y, cv=kfold, scoring=scoring)\r\nprint(\"AUC : %.3f (%.3f) \" % (results.mean(), results.std()))\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"algorithm_evaluation_metric_auc.py","file_name":"algorithm_evaluation_metric_auc.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"461630933","text":"import sys\nimport heapq\nimport math\nfrom config import INF\n\nclass DijkstraQueue:\n def create_priority_queue(self, source_index, counter, const_of_sort_with_heap):\n queue = []\n counter += const_of_sort_with_heap\n heapq.heappush(queue, (0, source_index))\n return queue\n\n def find_shortest_path(self, graph, source_index, destination_index):\n const_of_sort_with_heap = int(len(graph) * math.log(len(graph)))\n distances = []\n visitedNodes = []\n length = len(graph)\n counter = 0\n \n for i in range(length):\n distances.append(INF)\n visitedNodes.append(False)\n \n distances[source_index] = 0\n\n queue = self.create_priority_queue(source_index, counter, const_of_sort_with_heap)\n\n while (len(queue) != 0):\n counter += 1\n evaNode = heapq.heappop(queue)\n visitedNodes.append(evaNode)\n evaNodeName = evaNode[1]\n #evaluate neighbours\n for i in range(1, length):\n if not visitedNodes[i]:\n if graph[evaNodeName][i] != INF:\n weight = graph[evaNodeName][i]\n newWeight = weight + distances[evaNodeName]\n if newWeight < distances[i]:\n distances[i] = newWeight\n heapq.heappush(queue, (newWeight, i))\n counter += const_of_sort_with_heap\n \n if visitedNodes[destination_index]:\n return distances[destination_index], counter\n\n return distances[destination_index], counter\n\n ","sub_path":"dijkstra_prio_que.py","file_name":"dijkstra_prio_que.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"249095447","text":"\"\"\"\ncovered for loop and while loop to iterate the list\n\"\"\"\n\nfav_movies=[\"The holy grill\",\"the life of brain\"]\n# using for loop\nfor each_movies in fav_movies:\n print(each_movies)\n\n# using while loop:\ncount=0\nwhile(count\n

Batch Calculation of Iteration {0!s}:

\n \n \"\"\".format((iter))\n\n html_temp = terrplant_tables.table_all(terr)\n\n out_html_temp = batch_header + html_temp\n out_html_all[iter]=out_html_temp\n\n \ndef loop_html(thefile):\n reader = csv.reader(thefile.file.read().splitlines())\n header = reader.next()\n # logger.info(header)\n i=1\n ####Create a job queue and add each row of batch temeplate file as a task into it\n for row in reader:\n job_q.put([row, i])\n i=i+1\n\n all_threads = [Thread(target=html_table, args=(job_q, )) for j in range(thread_count)]\n for x in all_threads:\n x.start()\n for x in all_threads:\n job_q.put(None)\n for x in all_threads:\n x.join()\n\n html_timestamp = terrplant_tables.timestamp(\"\", jid_batch[0])\n out_html_all_sort = OrderedDict(sorted(out_html_all.items()))\n sum_html = terrplant_tables.table_all_sum(terrplant_tables.sumheadings, terrplant_tables.tmpl, application_rate, incorporation_depth, runoff_fraction, drift_fraction, ec25_nonlisted_seedling_emergence_monocot, ec25_nonlisted_seedling_emergence_dicot, noaec_listed_seedling_emergence_monocot, noaec_listed_seedling_emergence_dicot, \n rundry_out, runsemi_out, spray_out, totaldry_out, totalsemi_out, \n nms_rq_dry_out, nms_rq_semi_out, nms_rq_spray_out, \n lms_rq_dry_out, lms_rq_semi_out, lms_rq_spray_out, \n nds_rq_dry_out, nds_rq_semi_out, nds_rq_spray_out, \n lds_rq_dry_out, lds_rq_semi_out, lds_rq_spray_out)\n\n return html_timestamp + sum_html + \"\".join(out_html_all_sort.values())\n\n\n@require_POST\ndef terrplantBatchOutputPage(request):\n thefile = request.FILES['upfile']\n iter_html=loop_html(thefile)\n\n return iter_html, terr_all, jid_batch","sub_path":"models/terrplant/terrplant_batchoutput_old.py","file_name":"terrplant_batchoutput_old.py","file_ext":"py","file_size_in_byte":7603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"380839501","text":"from tabulate import tabulate\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom kappalib.plot import heatmap,linear_regression\n\n#class ANOVA():\n\nclass Descriptive():\n __slots__ = ['_n', '_missing', '_mean', '_median', '_variance', \n '_minimum', '_maximum', '_header', '_body']\n\n def __init__(self,result):\n for key, value in result.items():\n self.__setattr__('_' + key, value)\n self._set_table()\n \n def __getattr__(self, attr):\n raise ValueError(f'Oops, I Caught An Error! {attr.upper()} was not defined.')\n \n def n(self):\n return self._n\n \n def missing(self): \n return self._missing\n \n def mean(self): \n return self._mean\n \n def median(self): \n return self._median\n \n def variance(self): \n return self._variance\n \n def minimum(self): \n return self._minimum\n \n def maximum(self): \n return self._maximum\n\n def _set_table(self):\n self._header = ['', 'x']\n self._body = [['Number', self._n],\n ['Missing', self._missing],\n ['Mean', self._mean],\n ['Median', self._median],\n ['Variance', self._variance],\n ['Minimum', self._minimum],\n ['Maximum', self._maximum]]\n\n def summary(self):\n print('Descriptive')\n print(tabulate(self._body, self._header, tablefmt=\"psql\", floatfmt=\".3f\", stralign='center'))\n\nclass Correlation(): \n __slots__ = ['_names', '_r', '_stats', '_v', '_pvalue', '_header',\n '_body','_alternative', '_CI_l', '_CI_u', '_alpha', '_data']\n\n def __init__(self,result, confidence_interval):\n for key, value in result.items():\n self.__setattr__('_' + key, value)\n self._names = list(result['data'].keys())\n self._set_table(confidence_interval)\n \n def CI(self):\n return (self._CI_l, self._CI_u) \n \n def r(self):\n return self._r\n\n def pvalue(self):\n return self._pvalue\n\n def degrees_fredom(self):\n return self._v\n\n def __getattr__(self, attr):\n raise ValueError(f'Oops, I Caught An Error! {attr.upper()} was not defined.')\n \n def _set_table(self, confidence_interval):\n \n _r = np.tril(self._r)\n _p = np.tril(self._pvalue)\n\n self._body = list()\n self._header = ['',''] + self._names\n \n if confidence_interval:\n stats_descript = 'Pearson\\'s R\\np-value\\n{0}% CI Upper\\n{0}% CI Lower\\n'.format((1-self._alpha)*100)\n _cil = np.tril(self._CI_l)\n _ciu = np.tril(self._CI_u)\n \n for j,i in enumerate(self._names):\n data = list()\n k=0\n for r,p,cu,cl in zip(_r[j],_p[j],_ciu[j],_cil[j]):\n pvalue = 0 if p == 0 else '<0.001' if p < 0.001 else round(p,3)\n data.append('{:.3f}\\n{}\\n{:.3f}\\n{:.3f}\\n'.format(r,pvalue,cu,cl) if j>k else '-\\n-\\n-\\n-\\n')\n k += 1\n self._body.append([i,stats_descript]+data)\n \n else:\n stats_descript = 'Pearson\\'s R\\np-value\\n'\n \n for j,i in enumerate(self._names):\n data = list()\n k=0\n for r,p in zip(_r[j],_p[j]):\n pvalue = 0 if p == 0 else '<0.001' if p < 0.001 else round(p,3)\n data.append('{:.3f}\\n{}\\n'.format(r,pvalue) if j>k else '-\\n-\\n')\n k += 1\n self._body.append([i,stats_descript]+data)\n \n def summary(self):\n print('Correlation Matrix')\n print(tabulate(self._body, self._header, tablefmt=\"grid\", floatfmt=\".3f\", stralign='center'))\n if self._alternative == 'less':\n print(r'Note. H0: negative correlation')\n elif self._alternative == 'greater': \n print(r'Note. H0: positive correlation')\n\n def matrix(self):\n heatmap(self._r,vmin=-1,vmax=1,annot=True,cbar=True,xticklabels=self._names,yticklabels=self._names)\n \n def scatter(self,figsize=(10,10)):\n n = len(self._names)\n fig = plt.figure(figsize=figsize)\n\n z = 1\n for k,i in enumerate(self._names):\n for l,j in enumerate(self._names):\n if l<=k:\n ax = fig.add_subplot(n,n,z)\n linear_regression(self._data[i],self._data[j],plot=True,ax=ax)\n ax.set_xticks([])\n ax.set_yticks([])\n if l==0:\n ax.set_ylabel(self._names[k],rotation=0,labelpad=10, fontweight='bold')\n if k==n-1:\n ax.set_xlabel(self._names[l],labelpad=10, fontweight='bold')\n z+=1\n\n def summary_convert(self, format='latex'):\n \"\"\"\n Convert the summary table into others formats.\n \n\n Parameters\n ----------\n format : {'latex', 'html'}\n\n Returns\n -------\n Table string on the required format\n\n \"\"\"\n \n if format == 'latex':\n print('Correlation Matrix')\n print(tabulate(self._body, self._header, tablefmt=\"latex\", floatfmt=\".3f\", stralign='center'))\n \n if self._alternative == 'less':\n print(r'Note. $H_0$: negative correlation')\n elif self._alternative == 'greater': \n print(r'Note. $H_0$: positive correlation')\n \n elif format == 'html':\n print('' + tabulate(self._body, self._header, tablefmt=\"html\", floatfmt=\".3f\", stralign='center'))\n \n else:\n raise Exception(r'Error. {} not in [latex, html].'.format(format))\n\nclass TTest(): \n __slots__ = ['_x', '_y', '_ttest', '_stats', '_v', '_pvalue','_alpha',\n '_mean_difference', '_CI_l', '_CI_u', '_CohensD', \n '_method','_header','_body','_mu', '_alternative']\n\n def __init__(self,result, effect_size, mean_difference, confidence_interval):\n results = result.copy()\n for key, value in results.items():\n self.__setattr__('_' + key, value)\n del results['mu']\n del results['alternative']\n del results['method']\n del results['alpha']\n self._set_table(results, effect_size, mean_difference, confidence_interval)\n \n def stats(self):\n return self._stats\n\n def d(self):\n return self._CohensD\n\n def pvalue(self):\n return self._pvalue\n\n def mean_difference(self):\n return self._mean_difference\n\n def degrees_fredom(self):\n return self._v\n\n def CI(self):\n return (self._CI_l, self._CI_u)\n\n def __getattr__(self, attr):\n raise ValueError(f'Oops, I Caught An Error! {attr.upper()} was not defined.')\n\n def _set_table(self,results, effect_size, mean_difference, confidence_interval):\n if not effect_size:\n del results['CohensD']\n if not mean_difference:\n del results['mean_difference']\n if not confidence_interval:\n del results['CI_l']\n del results['CI_u']\n if 'paired' not in self._method.lower():\n del results['y']\n \n header_keys = {'x':'','y':'','ttest':'','stats':'statistic','v':'df',\n 'pvalue':'p','mean_difference':'Mean Difference',\n 'CohensD':'Cohen\\'s d', 'CI_l':'Lower', 'CI_u':'Upper'}\n \n\n self._header = [header_keys[i] for i in results.keys()]\n \n self._body = list(results.values())\n index = list(results).index('pvalue')\n self._body[index] = '<0.001' if self._body[index] < 0.001 else self._body[index]\n \n def summary(self):\n print(self._method)\n print(tabulate([self._body], self._header, tablefmt=\"psql\", floatfmt=\".3f\", stralign='center'))\n if self._mu is not None and self._mu != 0:\n print('Note. ' + chr(956)+ '=' + str(self._mu))\n if self._alternative == 'less':\n print('Note. H0: Group 1 < Group 2')\n elif self._alternative == 'greater': \n print('Note. H0: Group 1 > Group 2')\n \n def summary_convert(self, format='latex'):\n \"\"\"\n Convert the summary table into others formats.\n \n\n Parameters\n ----------\n format : {'latex', 'html'}\n\n Returns\n -------\n Table string on the required format\n\n \"\"\"\n if format == 'latex':\n print(self._method)\n print(tabulate([self._body], self._header, tablefmt=\"latex\", floatfmt=\".3f\", stralign='center'))\n \n if self._mu is not None and self._mu != 0:\n print('Note.' + r'$\\mu = $' + str(self._mu))\n if self._alternative == 'less':\n print(r'Note. $H_0$: Group 1 < Group 2')\n elif self._alternative == 'greater': \n print(r'Note. $H_0$: Group 1 > Group 2')\n \n elif format == 'html':\n print('
' + tabulate([self._body], self._header, tablefmt=\"html\", floatfmt=\".3f\", stralign='center'))\n \n else:\n raise Exception(r'Error. {} not in [latex, html].'.format(format))","sub_path":"build/lib/kappalib/stats/_summary.py","file_name":"_summary.py","file_ext":"py","file_size_in_byte":9268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"650625604","text":"from django import forms\nfrom django.conf import settings\nfrom django.template.defaultfilters import filesizeformat\nfrom django.utils import timezone\nfrom form_utils.forms import BetterModelForm\nfrom django.core.exceptions import ObjectDoesNotExist\n\nfrom app.mixins import OverwriteOnlyModelFormMixin\nfrom app.utils import validate_url\nfrom applications import models\n\n\ndef calculate_age(born):\n today = timezone.now()\n return today.year - born.year - ((today.month, today.day) < (born.month, born.day))\n\nclass ApplicationForm(OverwriteOnlyModelFormMixin, BetterModelForm):\n github = forms.CharField(required=False, widget=forms.TextInput(\n attrs={'class': 'form-control',\n 'placeholder': 'https://github.com/johnBiene'}))\n devpost = forms.CharField(required=False, widget=forms.TextInput(\n attrs={'class': 'form-control',\n 'placeholder': 'https://devpost.com/JohnBiene'}))\n linkedin = forms.CharField(required=False, widget=forms.TextInput(\n attrs={'class': 'form-control',\n 'placeholder': 'https://www.linkedin.com/in/john_biene'}))\n site = forms.CharField(required=False, widget=forms.TextInput(\n attrs={'class': 'form-control', 'placeholder': 'https://biene.space'}))\n\n phone_number = forms.CharField(required=True, widget=forms.TextInput(\n attrs={'class': 'form-control', 'placeholder': '+#########'}))\n\n country = forms.CharField(required=True,\n widget=forms.TextInput(attrs={'class': 'form-control', 'autocomplete': 'off'}))\n\n university = forms.CharField(required=True,\n label='What university (or high school) do you study at?',\n help_text='Current or most recent school you attended.',\n widget=forms.TextInput(\n attrs={'class': 'typeahead-schools', 'autocomplete': 'off'}))\n\n major = forms.CharField(required=False, label='What is your major?',\n help_text='Your major of study at university (e.g. Computer Science or Mathematics)',\n widget=forms.TextInput(\n attrs={'class': 'typeahead-majors', 'autocomplete': 'off'}))\n\n degree = forms.CharField(required=False, label='What is your current or most recent level of study?',\n help_text='Current or most recent degree you\\'ve received '\n '(e.g. Bachelor\\'s or Master\\'s degree)',\n widget=forms.TextInput(\n attrs={'class': 'typeahead-degrees', 'autocomplete': 'off'}))\n\n hear_about = forms.CharField(\n required=True,\n label='Where did you first hear about %s?' % settings.HACKATHON_NAME,\n widget=forms.TextInput(attrs={'class': 'typeahead-marketing_types', 'autocomplete': 'off'})\n )\n\n ambassador = forms.CharField(\n required=False,\n label='Do you have a secret code?',\n help_text='This should be blank unless you were told a secret code, in which case enter it here.',\n widget=forms.TextInput(attrs={'autocomplete': 'off'})\n )\n\n first_timer = forms.TypedChoiceField(\n required=True,\n label='Will %s be your first hackathon?' % settings.HACKATHON_NAME,\n coerce=lambda x: x == 'True',\n choices=((False, 'No'), (True, 'Yes')),\n widget=forms.RadioSelect\n )\n\n reimb = forms.TypedChoiceField(\n required=False,\n label='Do you need travel reimbursement to attend?',\n coerce=lambda x: x == 'True',\n choices=((False, 'No'), (True, 'Yes')),\n initial=False,\n widget=forms.RadioSelect\n )\n\n visas = forms.TypedChoiceField(\n required=False,\n label='Do you need an Invitation letter for visa?',\n coerce=lambda x: x == 'True',\n choices=((False, 'No'), (True, 'Yes')),\n initial=False,\n widget=forms.RadioSelect\n )\n\n code_conduct = forms.BooleanField(required=False,\n label='I have read and agree to the '\n '%s MLH Code of Conduct.\\\n\t\t\t\t\t\t\t\t\t\t\tI further agree to the terms of both the \\\n\t\t\t\t\t\t\t\t\t\t\tMLH Contest Terms and Conditions and the \\\n\t\t\t\t\t\t\t\t\t\t\tMLH Privacy Policy\\\n *' % (\n getattr(settings, 'CODE_CONDUCT_LINK', '/code_conduct'),\n 'MLH'\n ))\n privacy_policy = forms.BooleanField(required=False,\n label='I have read and accept '\n '%s Privacy Policy\\\n *' % (\n getattr(settings, 'PRIVACY_POLICY_LINK', '/privacy_policy'),\n settings.HACKATHON_NAME), )\n application_sharing = forms.BooleanField(\n required=False,\n label='I authorize you to share my application/registration information with Major League Hacking for event administration, \\\n ranking, MLH administration, and with my authorization email in-line with the MLH Privacy Policy\\\n *'\n )\n\n\n data_sharing = forms.BooleanField(required=False,label='I authorize Major League Hacking to send me occasional messages about hackathons.*')\n\n partners_permission = forms.BooleanField(\n required=False,\n label='I authorize you to share my application/registration information, including the CV, '\n 'with partners of %s. We, or our partners, may perform processing on and make use of the data '\n 'in your application for recruiting purposes, for example, sponsors may send you a job offer. '\n '*' % settings.HACKATHON_NAME\n )\n media_permission = forms.BooleanField(\n required=False,\n label='Photos will be taken at the event by the %s \\\n organisers and/or by an external party such as MLH. I agree \\\n that photos from the event can be taken, used for internal \\\n and marketing purposes, shared with our sponsors and partners, \\\n including MLH. I also grant %s, MLH and other partners \\\n the permission to record and publish photos and video \\\n of the event and the exclusive right to produce commercial \\\n video content *\\\n ' % (settings.HACKATHON_NAME, settings.HACKATHON_NAME)\n )\n\n guardian_name = forms.CharField(label='Full name of Parent/Legal guardian',\n max_length=255, required=False)\n guardian_birth_day = forms.DateField(label='Date of birth of Parent/Legal guardian',\n required=False,\n widget=forms.DateInput(attrs={'type': 'date'}))\n guardian_code_conduct = forms.BooleanField(required=False,\n label='I have read and agree to the '\n '%s MLH Code of Conduct.\\\n I further agree to the terms of both the \\\n MLH Contest Terms and Conditions and the \\\n MLH Privacy Policy\\\n *' % (\n getattr(settings, 'CODE_CONDUCT_LINK', '/code_conduct'),\n 'MLH'\n ))\n guardian_privacy_policy = forms.BooleanField(required=False,\n label='I have read and accept '\n '%s Privacy Policy\\\n *' % (\n getattr(settings, 'PRIVACY_POLICY_LINK', '/privacy_policy'),\n settings.HACKATHON_NAME), )\n guardian_application_sharing = forms.BooleanField(\n required=False,\n label='I authorize you to share application/registration information with Major League Hacking for event administration, \\\n ranking, MLH administration, and with atendee\\'s authorization email in-line with the MLH Privacy Policy\\\n *'\n )\n \n guardian_partners_permission = forms.BooleanField(\n required=False,\n label='I authorize you to share the application/registration information, including the CV, '\n 'with partners of %s. We, or our partners, may perform processing on and make use of the data '\n 'in the application for recruiting purposes, for example, sponsors may send the attendee a job offer. '\n '*' % settings.HACKATHON_NAME\n )\n guardian_media_permission = forms.BooleanField(\n required=False,\n label='Photos will be taken at the event by the %s \\\n organisers and/or by an external party such as MLH. I agree \\\n that photos from the event can be taken, used for internal \\\n and marketing purposes, shared with our sponsors and partners, \\\n including MLH. I also grant %s, MLH and other partners \\\n the permission to record and publish photos and video \\\n of the event and the exclusive right to produce commercial \\\n video content *\\\n ' % (settings.HACKATHON_NAME, settings.HACKATHON_NAME)\n )\n\n def clean_resume(self):\n resume = self.cleaned_data['resume']\n size = getattr(resume, '_size', 0)\n if size > settings.MAX_UPLOAD_SIZE:\n raise forms.ValidationError(\"Please keep resume size under %s. Current filesize %s\" % (\n filesizeformat(settings.MAX_UPLOAD_SIZE), filesizeformat(size)))\n return resume\n\n def clean_code_conduct(self):\n cc = self.cleaned_data.get('code_conduct', False)\n # Check that if it's the first submission hackers checks code of conduct checkbox\n # self.instance.pk is None if there's no Application existing before\n # https://stackoverflow.com/questions/9704067/test-if-django-modelform-has-instance\n if not cc and not self.instance.pk:\n raise forms.ValidationError(\n \"To attend %s you must abide by our code of conduct\" % settings.HACKATHON_NAME)\n return cc\n\n def clean_privacy_policy(self):\n pc = self.cleaned_data.get('privacy_policy', False)\n # Check that if it's the first submission hackers checks privacy policy checkbox\n # self.instance.pk is None if there's no Application existing before\n # https://stackoverflow.com/questions/9704067/test-if-django-modelform-has-instance\n if not pc and not self.instance.pk:\n raise forms.ValidationError(\n \"To attend %s you must abide by our privacy policy\" % settings.HACKATHON_NAME)\n return pc\n\n def clean_application_sharing(self):\n aps = self.cleaned_data.get('application_sharing', False)\n # Check if hackers agreed with application sharing\n if not aps and not self.instance.pk:\n raise forms.ValidationError(\n \"To attend %s you must give us permission to share your application with MLH\" % settings.HACKATHON_NAME)\n return aps\n\n def clean_data_sharing(self):\n ds = self.cleaned_data.get('data_sharing', False)\n # Check if hackers agreed with data sharing\n if not ds and not self.instance.pk:\n raise forms.ValidationError(\n \"To attend %s you must give us permission to share your data with MLH\" % settings.HACKATHON_NAME)\n return ds\n\n def clean_media_permission(self):\n mp = self.cleaned_data.get('media_permission', False)\n # Check if hackers give us permission to publish media files asociated with them\n if not mp and not self.instance.pk:\n raise forms.ValidationError(\n \"To attend %s you must agree with that photos from the event can be used as described below\" % settings.HACKATHON_NAME)\n return mp\n\n\n def clean_partners_permission(self):\n pp = self.cleaned_data.get('partners_permission', False)\n # Check if hackers give us permission to publish media files asociated with them\n if not pp and not self.instance.pk:\n raise forms.ValidationError(\n \"To attend %s you must give us permission to share your data with partners of %s\" % (settings.HACKATHON_NAME, settings.HACKATHON_NAME))\n return pp\n\n\n def is_guardian_required(self):\n applicant_bd = self.cleaned_data.get('birth_day', None)\n if applicant_bd and calculate_age(applicant_bd) < 18:\n return True\n return False\n\n def clean_guardian_birth_day(self):\n bd = self.cleaned_data.get('guardian_birth_day', None)\n if not self.instance.pk and self.is_guardian_required():\n if not bd:\n raise forms.ValidationError(\"Please specify your birth day\")\n # Check that the guardian is at least 18\n if calculate_age(bd) < 18:\n raise forms.ValidationError(\"Parent/Legal guardian must be over 18\")\n return bd\n\n def clean_guardian_code_conduct(self):\n cc = self.cleaned_data.get('guardian_code_conduct', False)\n # Check that if it's the first submission hackers checks code of conduct checkbox\n # self.instance.pk is None if there's no Application existing before\n # https://stackoverflow.com/questions/9704067/test-if-django-modelform-has-instance\n if not cc and not self.instance.pk and self.is_guardian_required():\n raise forms.ValidationError(\n \"As a parent or legal guardian you must abide by our code of conduct\")\n return cc\n\n def clean_guardian_privacy_policy(self):\n pc = self.cleaned_data.get('guardian_privacy_policy', False)\n # Check that if it's the first submission hackers checks privacy policy checkbox\n # self.instance.pk is None if there's no Application existing before\n # https://stackoverflow.com/questions/9704067/test-if-django-modelform-has-instance\n if not pc and not self.instance.pk and self.is_guardian_required():\n raise forms.ValidationError(\n \"As a parent or legal guardian you must abide by our privacy policy\")\n return pc\n\n def clean_guardian_application_sharing(self):\n aps = self.cleaned_data.get('guardian_application_sharing', False)\n # Check if hackers agreed with application sharing\n if not aps and not self.instance.pk and self.is_guardian_required():\n raise forms.ValidationError(\n \"As a parent or legal guardian you must give us permission to share the application with MLH\")\n return aps\n\n def clean_guardian_media_permission(self):\n mp = self.cleaned_data.get('guardian_media_permission', False)\n # Check if hackers give us permission to publish media files asociated with them\n if not mp and not self.instance.pk and self.is_guardian_required():\n raise forms.ValidationError(\n \"As a parent or legal guardian you must agree with that photos from the event can be used \"\n \"as described below\")\n return mp\n\n def clean_guardian_partners_permission(self):\n pp = self.cleaned_data.get('guardian_partners_permission', False)\n # Check if hackers give us permission to publish media files asociated with them\n if not pp and not self.instance.pk and self.is_guardian_required():\n raise forms.ValidationError(\n \"As a parent or legal guardian you must give us permission to share the data \"\n \"with partners of %s\" % settings.HACKATHON_NAME)\n return pp\n\n\n def clean_github(self):\n data = self.cleaned_data['github']\n validate_url(data, 'github.com')\n return data\n\n def clean_devpost(self):\n data = self.cleaned_data['devpost']\n validate_url(data, 'devpost.com')\n return data\n\n def clean_linkedin(self):\n data = self.cleaned_data['linkedin']\n validate_url(data, 'linkedin.com')\n return data\n\n def clean_projects(self):\n data = self.cleaned_data['projects']\n first_timer = self.cleaned_data['first_timer']\n if not first_timer and not data:\n raise forms.ValidationError(\"Please fill this in order for us to know you a bit better\")\n return data\n\n def clean_reimb_amount(self):\n data = self.cleaned_data['reimb_amount']\n reimb = self.cleaned_data.get('reimb', False)\n if reimb and not data:\n raise forms.ValidationError(\"To apply for reimbursement please set a valid amount\")\n deadline = getattr(settings, 'REIMBURSEMENT_DEADLINE', False)\n if data and deadline and deadline <= timezone.now():\n raise forms.ValidationError(\"Reimbursement applications are now closed. Trying to hack us?\")\n return data\n\n def clean_reimb(self):\n reimb = self.cleaned_data.get('reimb', False)\n deadline = getattr(settings, 'REIMBURSEMENT_DEADLINE', False)\n if reimb and deadline and deadline <= timezone.now():\n raise forms.ValidationError(\"Reimbursement applications are now closed. Trying to hack us?\")\n return reimb\n\n def clean_other_diet(self):\n data = self.cleaned_data['other_diet']\n diet = self.cleaned_data['diet']\n if diet == 'Others' and not data:\n raise forms.ValidationError(\"Please tell us your specific dietary requirements\")\n return data\n\n def clean_other_race(self):\n data = self.cleaned_data['other_race']\n race = self.cleaned_data['race']\n if race == 'O' and not data:\n raise forms.ValidationError(\"Please specify your race/ethnicity\")\n return data\n\n def clean_ambassador(self):\n data = self.cleaned_data['ambassador']\n if data:\n try:\n ambassador = models.Ambassador.objects.get(secret_code=data)\n except(models.Ambassador.DoesNotExist):\n raise forms.ValidationError(\"Ambassador with this secret code does not exist. \\\n Leave blank instead if you don't have ambassador\")\n else:\n return ambassador\n else:\n return None\n\n def clean_job_type(self):\n data = self.cleaned_data['job_type']\n interest = self.cleaned_data['job_interest']\n if not interest == 'No' and not data:\n raise forms.ValidationError(\"Please specify what type of job do you prefer\")\n return data\n\n def __getitem__(self, name):\n item = super(ApplicationForm, self).__getitem__(name)\n item.field.disabled = not self.instance.can_be_edit()\n return item\n\n def fieldsets(self):\n # Fieldsets ordered and with description\n self._fieldsets = [\n ('Personal Info',\n {'fields': ('gender', 'race', 'other_race', 'birth_day',\n 'phone_number', 'country', 'university', 'major', 'degree', 'graduation_year',\n 'tshirt_size', 'diet', 'other_diet'),\n 'description': 'Hey there, before we begin, we need to know some basics about you.', }),\n ('Job preferences',\n {'fields': ('job_interest', 'job_type'),\n 'description': 'What if our sponsors offers you some job?', }),\n ('Let us get to know you better',\n {'fields': ('spirit_animal',),\n 'description': 'We would like to know a little more about you. ;)', }),\n ('Hackathons?', {'fields': ('description', 'hear_about', 'first_timer', 'projects'), }),\n ('Show us what you\\'ve built',\n {'fields': ('resume', 'site', 'github', 'devpost', 'linkedin'),\n 'description': 'Some of our sponsors may use this information for recruitment purposes, '\n 'so please include as much as you can.'}),\n ]\n deadline = getattr(settings, 'REIMBURSEMENT_DEADLINE', False)\n r_enabled = getattr(settings, 'REIMBURSEMENT_ENABLED', False)\n if r_enabled and deadline and deadline <= timezone.now() and not self.instance.pk:\n self._fieldsets.append(('Traveling',\n {'fields': ('origin', 'visas',),\n 'description': 'Reimbursement applications are now closed. '\n 'Sorry for the inconvenience.',\n }))\n elif self.instance.pk and r_enabled:\n self._fieldsets.append(('Traveling',\n {'fields': ('origin', 'visas',),\n 'description': 'If you applied for reimbursement, check out the Travel tab. '\n 'Email us at %s for any change needed on reimbursements.' %\n settings.HACKATHON_CONTACT_EMAIL,\n }))\n elif not r_enabled:\n self._fieldsets.append(('Traveling',\n {'fields': ('origin', 'visas',)}), )\n else:\n self._fieldsets.append(('Traveling',\n {'fields': ('origin', 'visas', 'reimb', 'reimb_amount'), }), )\n\n # Fields that we only need the first time the hacker fills the application\n # https://stackoverflow.com/questions/9704067/test-if-django-modelform-has-instance\n # if not self.instance.pk:\n # self._fieldsets.append(('Secret code', {'fields': ('ambassador',),}))\n\n self._fieldsets.append(('Other', {'fields': ('comment',),}))\n\n if not self.instance.pk:\n self._fieldsets.append(\n ('Permissions',\n {'fields': (\n 'code_conduct',\n 'privacy_policy',\n 'application_sharing',\n\t\t\t\t\t\t\t'data_sharing',\n 'partners_permission',\n 'media_permission',\n ),\n 'description': 'We need these permissions to provide you better experience'}),\n )\n self._fieldsets.append(\n ('Permissions of Parent/Legal guardian',\n {'fields': (\n 'guardian_name',\n 'guardian_birth_day',\n 'guardian_code_conduct',\n 'guardian_privacy_policy',\n 'guardian_application_sharing',\n 'guardian_partners_permission',\n 'guardian_media_permission',\n ),\n 'description': 'If you are under 18, your parent or legal guardian must agree with the '\n 'permissions below.
The fields below must be filled in by your parent or '\n 'legal guardian.'}),\n )\n return super(ApplicationForm, self).fieldsets\n\n class Meta:\n model = models.Application\n help_texts = {\n 'gender': 'This is for demographic purposes. You can skip this '\n 'question if you want',\n 'race': 'This is just for statistic purposes. You can skip this '\n 'question if you want',\n 'graduation_year': 'What year have you graduated on or when will '\n 'you graduate',\n 'degree': 'What\\'s your major?',\n 'birth_day': 'When you click on the field, you should see a pop-up where you can \\\n choose a date.',\n 'other_diet': 'Please fill here in your dietary requirements. We want to make sure we have food for you!',\n 'resume': 'Uploading your resume significantly increases your chances of being invited to %s' % settings.HACKATHON_NAME,\n 'projects': 'You can talk about about past hackathons, personal projects, awards etc. '\n '(we love links) Show us your passion! :D',\n 'reimb_amount': '

' + settings.REIMBURSEMENT_AMOUNTS + '

'\n + settings.REIMBURSEMENT_REQUIREMENTS + '

',\n 'comment': 'If there is anything more you want us to know, put it here (e.g. special needs).'\n }\n\n widgets = {\n 'origin': forms.TextInput(attrs={'autocomplete': 'off'}),\n 'birth_day': forms.DateInput(attrs={'type': 'date'}),\n 'description': forms.Textarea(attrs={'rows': 3, 'cols': 40}),\n 'spirit_animal': forms.Textarea(attrs={'rows': 3, 'cols': 40}),\n 'projects': forms.Textarea(attrs={'rows': 3, 'cols': 40}),\n 'tshirt_size': forms.RadioSelect(),\n 'graduation_year': forms.RadioSelect(),\n 'comment': forms.Textarea(attrs={'rows': 3, 'cols': 40}),\n }\n\n labels = {\n 'comment': 'Do you have any additional comments?',\n 'race': 'What is your race/ethnicity?',\n 'other_race': 'Please specify your race/ethnicity',\n 'graduation_year': 'What is your graduation year?',\n 'tshirt_size': 'What\\'s your t-shirt size?',\n 'diet': 'Dietary requirements',\n 'birth_day': 'What is your date of birth?',\n 'job_interest': 'Are you looking for a job?',\n 'job_type': 'What type of job would you prefer?',\n 'spirit_animal': 'What\\'s your spirit animal and why?',\n 'origin': 'Where will you be during the hackathon?',\n 'description': 'Why are you excited about %s?' % settings.HACKATHON_NAME,\n 'projects': 'What projects have you worked on?',\n 'resume': 'Upload your resume',\n 'reimb_amount': 'How much money (%s) would you need to afford traveling to %s?' % (\n getattr(settings, 'CURRENCY', '$'), settings.HACKATHON_NAME),\n }\n\n exclude = ['user', 'uuid', 'invited_by', 'submission_date', 'status_update_date', 'status', 'under_age']\n\n\nclass AmbassadorForm(OverwriteOnlyModelFormMixin, BetterModelForm):\n university = forms.CharField(required=True,\n label='What university (or high school) do you study at?',\n help_text='Current or most recent school you attended.',\n widget=forms.TextInput(\n attrs={'class': 'typeahead-schools', 'autocomplete': 'off'}))\n\n privacy_policy = forms.BooleanField(required=False,\n label='I have read and accept '\n '%s Privacy Policy' % (\n getattr(settings, 'PRIVACY_POLICY_LINK', '/privacy_policy'),\n settings.HACKATHON_NAME), )\n\n def clean_privacy_policy(self):\n pc = self.cleaned_data.get('privacy_policy', False)\n # Check that if it's the first submission hackers checks privacy policy checkbox\n # self.instance.pk is None if there's no Application existing before\n # https://stackoverflow.com/questions/9704067/test-if-django-modelform-has-instance\n if not pc and not self.instance.pk:\n raise forms.ValidationError(\n \"To attend %s you must abide by our privacy policy\" % settings.HACKATHON_NAME)\n return pc\n\n def fieldsets(self):\n self._fieldsets = [\n ('Personal Info',\n {'fields': ('origin', 'phone_number', 'university', 'tshirt_size'),\n 'description': 'Hey there, before we begin, we need to know some basics about you.', }),\n ]\n\n # Fields that we only need the first time the hacker fills the application\n # https://stackoverflow.com/questions/9704067/test-if-django-modelform-has-instance\n if not self.instance.pk:\n self._fieldsets.append(('Permissions',\n {'fields': (\n 'privacy_policy',\n ),\n 'description': 'We need this permissions to make you our\\'s ambassador'}\n ))\n\n return super(AmbassadorForm, self).fieldsets\n\n class Meta:\n model = models.Ambassador\n\n widgets = {\n 'origin': forms.TextInput(attrs={'autocomplete': 'off'}),\n 'tshirt_size': forms.RadioSelect(),\n 'phone_number': forms.TextInput(attrs={'class': 'form-control', 'placeholder': '+#########'})\n }\n\n labels = {\n 'origin': 'Where will you be during the hackathon?',\n 'tshirt_size': 'What\\'s your t-shirt size?',\n }\n\n exclude = ['user', 'secret_code', 'created_date']\n","sub_path":"applications/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":30292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"303244186","text":"import nltk\nnltk.download('punkt')\n# sentence = \"At eight o'clock on Thursday morning, Arthur didn't feel very good.\"\nfile = open('ehr.txt','r')\nlines = file.readlines()\n\nfrequency={}\ntotalLen = 0\nfor line in lines:\n\ttoken_line = nltk.word_tokenize(line)\n\tfor word in token_line:\n\t\tif ord(word[0].lower())<=122 and ord(word[0].lower())>=97:\n\t\t\ttotalLen += 1\n\t\t\tif word not in frequency:\n\t\t\t\tfrequency[word.lower()] = 0\n\t\t\tfrequency[word.lower()] += 1\nprint(frequency)\n\nstopwords = open('stoplist.txt', 'r')\nstopwordsLst = stopwords.read().splitlines()\n\n\n##a\n\nstopwrdsTotal = 0\nfor i in list(frequency):\n\tif i in stopwordsLst:\n\t\tstopwrdsTotal += frequency[i]\nanswerA = stopwrdsTotal/totalLen\nprint('A:',answerA)\n\n##b\ncharacNumber = 0\ncapitalNumber = 0\nfor line in lines:\n\ttoken_line = nltk.word_tokenize(line)\n\tfor word in token_line:\n\t\tif ord(word[0].lower())<=122 and ord(word[0].lower())>=97:\n\t\t\tcharacNumber += len(word)\n\t\t\tfor i in word:\n\t\t\t\tif ord(i)<=90 and ord(i)>=65:\n\t\t\t\t\tcapitalNumber+= 1\nprint('B:',capitalNumber/characNumber)\n\n##c\ncharacNumber = 0\naccu = 0\nfor i in list(frequency):\n\tcharacNumber += len(i)*frequency[i]\n\taccu += frequency[i]\nanswerC = characNumber/accu\nprint('C:',answerC)\n\n\n##d\nnltk.download('averaged_perceptron_tagger')\nnoun = 0\nverb = 0\nadverb = 0\npronouns = 0\nadj = 0\ntotalLen = 0\nfor line in lines:\n\ttoken_line = nltk.word_tokenize(line)\n\tfor item in nltk.pos_tag(token_line):\n\t\tif ord(item[0][0].lower())<=122 and ord(item[0][0].lower())>=97:\n\t\t\ttotalLen += 1\n\t\t\tif item[1][0:2] == 'NN':\n\t\t\t\tnoun += 1\n\t\t\tif item[1][0:2] == 'VB':\n\t\t\t\tverb += 1\n\t\t\tif item[1][0:2] == 'RB':\n\t\t\t\tadverb += 1\n\t\t\tif item[1][0:2] == 'JJ':\n\t\t\t\tadj += 1\n\t\t\tif item[1][0:3] == 'PRP':\n\t\t\t\tpronouns += 1\nprint('D:',noun/totalLen, verb/totalLen, adverb/totalLen, pronouns/totalLen, adj/totalLen)\n\n##e\nword_type = []\ntypes = ''\nfor line in lines:\n\ttoken_line = nltk.word_tokenize(line)\n\tfor item in nltk.pos_tag(token_line):\n\t\tif ord(item[0][0].lower())<=122 and ord(item[0][0].lower())>=97:\n\t\t\tif item[0].lower() not in stopwordsLst:\n\t\t\t\tif item[1][0:2] == 'NN':\n\t\t\t\t\ttypes = 'noun'\n\t\t\t\tif item[1][0:2] == 'VB':\n\t\t\t\t\ttypes = 'verb'\n\t\t\t\tif item[1][0:2] == 'RB':\n\t\t\t\t\ttypes = 'adverb'\n\t\t\t\tif item[1][0:2] == 'JJ':\n\t\t\t\t\ttypes = 'adj'\n\t\t\t\tword_type.append([item[0], types])\n\nimport pandas as pd\nimport numpy as np\ndf = pd.DataFrame(word_type)\ngrouped = df.reset_index().groupby([0,1]).count().reset_index()\nnounForm = grouped.loc[grouped[1] == 'noun'].sort_values(by='index',ascending=False).head(10)\nprint('e:',nounForm)\n\n####TF_IDF\n\ndef calcu_TF(term, doc_frequency, docuLen):\n\tctd = doc_frequency[term]/docuLen\n\treturn np.log10(ctd+1)\ndef calcu_IDF(term):\n\tN = len(lines)\n\tk = 1\n\tfor line in lines:\n\t\tif term.lower() in nltk.word_tokenize(line.lower()):\n\t\t\tk += 1\n\treturn 1+np.log10(N/k)\nfor line in lines[0:10]:\n\ttokenized = nltk.word_tokenize(line)\n\tdocuLen = len(tokenized)\n\tdoc_frequency = {}\n\tfor i in tokenized:\n\t\tif ord(i[0].lower())<=122 and ord(i[0].lower())>=97:\n\t\t\tif i not in doc_frequency:\n\t\t\t\tdoc_frequency[i.lower()] = 0\n\t\t\tdoc_frequency[i.lower()] += 1\n\tfor item in doc_frequency:\n\t\tTFIDF = calcu_IDF(item)*calcu_TF(item, doc_frequency, docuLen)\n\t\tdoc_frequency[item] = TFIDF\n\t\t# print(doc_frequency)\n\tcalTFIDF = pd.Series(doc_frequency).to_frame('tfidf')\n\tsortedTFIDF = calTFIDF.sort_values(by='tfidf', ascending=False).head(10)\n\tprint(sortedTFIDF)\nfile.close()\n","sub_path":"textDataAnalysis2.py","file_name":"textDataAnalysis2.py","file_ext":"py","file_size_in_byte":3385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"609341751","text":"import os\nfrom os.path import exists\n\nfrom core.annotations import read_annotations\nfrom core.constants import data_path, upstream_length\nfrom core.data_reading import read_dict\n\npath_to_features = '../../res/ml_log_mc1-noncds_long_del/'\npath_to_dict = data_path + 'dictionaries_indels/'\ndict_name = 'Walker_dictionary_broken_genes'\n\nout_path = '../../res/selected_features_stat/ml_log_mc1-noncds_long_del/'\n\n\ndef filter_selected():\n for (dirpath, dirnames, filenames) in os.walk(path_to_features):\n for filename in filenames:\n i = filename.find('.log')\n if i != -1:\n drug = filename[0: i]\n with open(path_to_features + drug + '.selected', 'w') as f:\n for line in open(path_to_features + filename, 'r').readlines()[1:]:\n s = line.strip().split('\\t')\n if float(s[-1]) != 0:\n f.write(line)\n\n\ndef find_missing_from_dict():\n if not exists(out_path):\n os.makedirs(out_path)\n name, drug_to_mut_list = read_dict(path_to_dict, dict_name)\n for drug, mut_list in drug_to_mut_list.items():\n with open(out_path + drug + '.stat', 'w') as f:\n broken = set()\n changed = set()\n all = set()\n total = 0\n for line in open(path_to_features + drug+ '.selected').readlines():#\n s = line.strip().split('\\t')\n total += 1\n all.add('\\t'.join(s[:-1]))\n if s[-2] == 'broken':\n broken.add(s[1])\n elif s[-2] == 'changed':\n changed.add(s[1])\n\n in_broken = 0\n in_changed = 0\n point = 0\n missing = 0\n missing_set = []\n for m in mut_list:\n if m in all:\n point += 1\n else:\n s = m.split('\\t')\n if s[1] in broken:\n in_broken += 1\n elif s[1] in changed:\n in_changed += 1\n else:\n missing += 1\n missing_set.append(m)\n f.write('total selected %d\\n' % (total))\n f.write('broken %d\\n' % (len(broken)))\n f.write('changed %d\\n' % (len(changed)))\n f.write('Walker total %d\\n' % (len(mut_list)))\n f.write('Walker point hit %d\\n' % (point))\n f.write('Walker changed hit %d\\n' % (in_changed))\n f.write('Walker broken hit %d\\n' % (in_broken))\n f.write('Walker missing %d\\n' % (missing))\n f.write('missing list:\\n')\n f.write('\\n'.join(missing_set))\n\n\ndef print_annotation_for_selected():\n cds_list = read_annotations(upstream_length, filter_by_gene_len=False)\n name_to_cds = {}\n for cds in cds_list:\n name_to_cds[cds.name] = cds\n for (dirpath, dirnames, filenames) in os.walk(path_to_features):\n for filename in filenames:\n i = filename.find('.selected')\n if i != -1:\n drug = filename[0: i]\n with open(out_path + drug + '.gene_annotation', 'w') as f:\n genes = set()\n for line in open(path_to_features + filename, 'r').readlines():\n s = line.strip().split('\\t')\n if s[0] != 'non_cds':\n genes.add(s[1])\n f.write('name\\tpseudogene\\thypothetical\\tin_proteom\\tproduct\\n')\n for gene in genes:\n cds = name_to_cds[gene]\n f.write(gene + '\\t')\n if cds.is_pseudogene:\n f.write('1\\t')\n else:\n f.write('0\\t')\n if cds.is_hypothetical:\n f.write('1\\t')\n else:\n f.write('0\\t')\n if cds.exists_in_proteom:\n f.write('1\\t')\n else:\n f.write('0\\t')\n if cds.product is None:\n f.write('-\\n')\n else:\n f.write(cds.product)\n f.write('\\n')\n\n\nif __name__ == '__main__':\n filter_selected()\n find_missing_from_dict()\n print_annotation_for_selected()","sub_path":"ml_methods/explore_selected_features.py","file_name":"explore_selected_features.py","file_ext":"py","file_size_in_byte":4513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"288053851","text":"from collections import deque\nimport re\nfrom pyparsing import *\nfrom geosolver.text2.ontology import function_signatures, FunctionSignature, VariableSignature, FormulaNode\nfrom geosolver.text2.rule import TagRule\n\n__author__ = 'minjoon'\n\nclass AnnotationNode(object):\n def __init__(self, content, children):\n self.syntax_parse = content.syntax_parse\n self.content = content\n self.children = children\n self.valence = len(children)\n\n def __repr__(self):\n if len(self.children) == 0:\n return repr(self.content)\n else:\n args_string = \", \".join(repr(child) for child in self.children)\n return \"%r(%s)\" % (self.content, args_string)\n\n def __iter__(self):\n queue = deque()\n queue.appendleft(self)\n while len(queue) > 0:\n current = queue.pop()\n for child in current.children:\n queue.appendleft(child)\n yield current\n\n def to_formula(self):\n args = [child.to_formula() for child in self.children]\n return FormulaNode(self.content.signature, args)\n\n\n\ndef get_annotation_node(syntax_parse, annotation_string):\n words = syntax_parse.words\n def span_f(a, b, c):\n if len(c) == 1:\n if c[0] == 'i':\n return 'i'\n else:\n return int(c[0]), int(c[0])+1\n else:\n return int(c[0]), int(c[1])+1\n\n def tag_f(a, b, c):\n assert len(c) == 1\n if c[0][0].isupper() or re.match('^\\d+(\\.\\d+)?', c[0][0]):\n return 'function', c[0]\n else:\n return 'variable', c[0]\n\n def expr_f(a, b, c):\n local_span = c[1]\n children = c[2:]\n type_, s = c[0]\n name = \"_\".join(words[idx] for idx in range(*local_span))\n if s in function_signatures:\n signature = function_signatures[s]\n elif type_ == 'function' and len(children) == 0:\n # Constant number\n signature = FunctionSignature(name, 'number', [], name=name)\n elif type_ == 'variable' and len(children) == 0:\n signature = VariableSignature(local_span, s, name=name)\n else:\n raise Exception()\n content = TagRule(syntax_parse, local_span, signature)\n return AnnotationNode(content, children)\n\n current = Word(alphanums)\n span = (Word(nums) + Literal(\":\").suppress() + Word(nums)) | Word(nums)\n string = Literal(\"[\").suppress() + Word(alphanums+\"_\") + Literal(\"]\").suppress()\n tag = current.setParseAction(tag_f) + Literal(\"@\").suppress() + span.setParseAction(span_f) + Optional(string).suppress()\n expr = Forward()\n expr << (tag + Optional(Literal(\"(\").suppress() + expr +\n ZeroOrMore(Literal(\",\").suppress() + expr) + Literal(\")\").suppress()))\n tokens = expr.setParseAction(expr_f).parseString(annotation_string)\n return tokens[0]\n\ndef is_valid_annotation(syntax_parse, annotation_string):\n try:\n get_annotation_node(syntax_parse, annotation_string)\n return True\n except:\n return False\n\n\n\n\n\n","sub_path":"geosolver/text2/get_annotation_node.py","file_name":"get_annotation_node.py","file_ext":"py","file_size_in_byte":3099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"304124029","text":"# coding: utf-8\nimport json\nimport uuid\n\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom sympy import *\nfrom sympy.abc import x,y,z\nfrom result_cluster import clater\nfrom T2to3 import T2to3\nimport math\nimport random\nfrom LLS import LLs,drawlines,growprocess\nfrom math import sqrt\nfrom numpy.linalg import matrix_power\nimport tkinter\nfrom draw import drawMatches\nimport matplotlib\nimport pyk4a\nfrom pyk4a import Config, PyK4A, ColorResolution\nimport numpy as np\nnp.set_printoptions(suppress=True)\nfrom matplotlib import pyplot as plt\nmatplotlib.use('TkAgg')\nimport cv2\nglobal rotatevalue1\nglobal rotatevalue2\n\n\n\ndef MCL(matrix,e,r):\n\n global qb\n global targ\n l=len(matrix)\n for i in range(l):\n matrix[i][i]=1\n print(matrix)\n yi=matrix/matrix.sum(axis=0)\n yi2=yi\n past=np.zeros((len(matrix), len(matrix)))\n sum=0\n while((past==yi2).all()==False and sum<10000):\n past=yi2\n yi2 =matrix_power(yi2,e)\n yi2 = np.power(yi2, r)\n yi2 = yi2 / yi2.sum(axis=0)\n sum=sum+1\n print(sum)\n\n clustr=[]\n print(clustr)\n for i in range(len(yi2)):\n yi2[i][i]=0\n for i in range(len(yi2)):\n a=[]\n for j in range(len(yi2)):\n if(yi2[j][i]>0):\n a.append(j)\n clustr.append(a)\n targ = [0] * len(clustr)\n final=[]\n for i in range(len(clustr)):\n if(targ[i]==0 and clustr[i]!=None):\n qb=[]\n findcluster(clustr,i,qb)\n final.append(qb)\n return final\ndef findcluster(clustr,i,qb):\n global targ\n if(targ[i]==0):\n qb.append(i)\n targ[i] = 1\n if (clustr[i] != None):\n for j in clustr[i]:\n findcluster(clustr, j, qb)\n for u in range(len(clustr)):\n if j in clustr[u]:\n findcluster(clustr,u,qb)\ndef ore(a):\n return math.atan((a[1])/(a[0]))/np.pi*360\ndef ang2(a,b):\n return abs(a.angle-b.angle)%180\ndef ang(a,b):\n if a[0]-b[0]==0:\n return 0\n else:\n return math.atan((a[1]-b[1])/(a[0]-b[0]))/np.pi*360\n\ndef bgr_rgb(img):\n (r, g, b) = cv2.split(img)\n return cv2.merge([b, g, r])\ndef distan(a,b):\n x=pow(abs(a[0]-b[0]),2)\n y=pow(abs(a[1]-b[1]),2)\n return sqrt(x+y)\n# def neighbor(p1,q1,p2,q2):\n# global grapy_p\n# global grapy_q\n# s1=0\n# s2=0\n# d1=distan(p1.pt,p2.pt)\n# d2=distan(q1.pt,q2.pt)\n# for i in range(len(grapy_q)):\n# if(grapy_q[p1.class_id][i] 0 and t < step:\n #print(throsld)\n grapy_all[i][j] = 1\n grapy_all[j][i] = 1\n elif t > step:\n grapy_all[i][j] = 1 / (int(t / step) * step)\n grapy_all[j][i] = 1 / (int(t / step) * step)\n print(grapy_q)\n print(grapy_p)\n # a=[]\n # for i in good:\n # sum=0\n # for j in range(len(grapy_all)):\n # c=kp1[i[0].queryIdx].class_id\n # if(grapy_all[c][j]==1):\n # sum=sum+1\n # if(sum>=1):\n # a.append(i)\n ######\n final=[]\n bob = MCL(grapy_all, 2, 2) ##get cluster\n lens=[]\n long=2\n for i in bob:\n if len(i)>=2:\n long=3\n if(len(bob)>1):\n for j in bob:\n lens.append(len(j))\n for i in range(20):\n for q in range(len(lens)):\n if lens[q] == max(lens):\n if(len(bob[q])>=long):\n final.append(bob[q])\n lens[q] = 0\n break\n else:\n for i in bob:\n if len(i)>2:\n final.append(i)\n\n apple=[]\n for u in range(len(final)):\n pp=[]\n for i in good:\n c = kp1[i[0].queryIdx].class_id\n print(c)\n if(c in final[u]):\n pp.append(i)\n apple.append(pp)\n print(\"final\",apple)\n # cv2.drawMatchesKnn expects list of list s as matches.\n if len(apple)<1:\n print(\"not good\")\n # global grapy_p\n # global grapy_q\n # sift = cv2.xfeatures2d.SIFT_create()\n #\n # # find the keypoints and descriptors with SIFT\n # kp1, des1 = sift.detectAndCompute(img1, None)\n # kp2, des2 = sift.detectAndCompute(img2, None)\n # FLANN_INDEX_KDTREE = 0\n # # BFMatcher with default params\n # FLANN_INDEX_KDTREE = 1\n # index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=6)\n # search_params = dict(checks=50) # or pass empty dictionary\n # flann = cv2.FlannBasedMatcher(index_params, search_params)\n # bf = cv2.BFMatcher()\n # matches = bf.knnMatch(des1, des2, k=2)\n #\n # # Apply ratio test\n #\n # good = [[m] for m, n in matches if m.distance < 0.7 * n.distance]\n # # build grapy\n # grapy_all = np.zeros((len(good), len(good)))\n # grapy_p = np.zeros((len(good), len(good)))\n # grapy_q = np.zeros((len(good), len(good)))\n # for i in range(len(good)):\n # for j in range(len(good)):\n # p1 = kp1[good[i][0].queryIdx]\n # q1 = kp2[good[i][0].trainIdx]\n # kp1[good[i][0].queryIdx].class_id = i\n # kp2[good[i][0].trainIdx].class_id = i\n # p2 = kp1[good[j][0].queryIdx]\n # q2 = kp2[good[j][0].trainIdx]\n # grapy_p[i][j] = distan(p1.pt, p2.pt)\n # grapy_q[i][j] = distan(q1.pt, q2.pt)\n # grapy_p[j][i] = distan(p1.pt, p2.pt)\n # grapy_q[j][i] = distan(q1.pt, q2.pt)\n # for i in range(len(good)):\n # for j in range(len(good)):\n # p1 = kp1[good[i][0].queryIdx]\n # q1 = kp2[good[i][0].trainIdx]\n # p2 = kp1[good[j][0].queryIdx]\n # q2 = kp2[good[j][0].trainIdx]\n # if (neighbor(p1, q1, p2, q2)):\n # if (scale(p1, q1, p2, q2)):\n # if (orientation(p1, q1, p2, q2)):\n # grapy_all[i][j] = 1\n # grapy_all[j][i] = 1\n # print(grapy_q)\n # print(grapy_p)\n # a = []\n # for i in good:\n # sum = 0\n # for j in range(len(grapy_all)):\n # c = kp1[i[0].queryIdx].class_id\n # if (grapy_all[c][j] == 1):\n # sum = sum + 1\n # if (sum >= 1):\n # a.append(i)\n # ######\n # final = []\n # bob = MCL(grapy_all, 2, 2) # get cluster\n # lens = []\n # long = 2\n # for i in bob:\n # if len(i) >= 2:\n # long =2\n # if (len(bob) > 1):\n # for j in bob:\n # lens.append(len(j))\n # for i in range(18):\n # for q in range(len(lens)):\n # if lens[q] == max(lens):\n # if (len(bob[q]) >= long):\n # final.append(bob[q])\n # lens[q] = 0\n # break\n # else:\n # for i in bob:\n # if len(i) > 2:\n # final.append(i)\n #\n # apple = []\n # for u in range(len(final)):\n # pp = []\n # for i in good:\n # c = kp1[i[0].queryIdx].class_id\n # print(c)\n # if (c in final[u]):\n # pp.append(i)\n # apple.append(pp)\n # print(\"final\", apple)\n # # cv2.drawMatchesKnn expects list of lists as matches.\n # if len(apple) < 1:\n # print(\"not good\")\n ori_out=[]\n process_out=[]\n for n in apple:\n\n out_d,out_o=findring(n,kp1,kp2,imgdo,imgd)\n if out_d!= 0 and out_o!=0:\n ori_out.append(out_o)\n process_out.append(out_d)\n\n #################################find circul\n #bigest=rangeofring2.index((max(rangeofring2)))\n\n imgg =drawMatches(img1, kp1, img2, kp2, apple)\n img2 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good, (0, 0, 255), (255, 0, 0), None, flags=2)\n return bgr_rgb(imgg/255),process_out,ori_out\n\ndef findring(area,kp1,kp2,imgd1,imgd2):\n\n def three(p1, p2, p3):\n k = (p1[1] - p2[1]) / (p1[0] - p2[0])\n b = p1[1] - k * p1[0]\n return abs((k * p3[0] - p3[1] + b) / sqrt(pow(k, 2) + 1))\n def mdcircle(p1, p2):\n center = []\n center.append((p1[0] + p2[0]) / 2)\n center.append((p1[1] + p2[1]) / 2)\n #R = sqrt(pow(p1[0] - p2[0], 2) + pow(p1[1] - p2[1], 2)) / 2\n R=30\n return center, R\n\n def vector1(p1, p2, p3):\n a = ((p2[1] - p1[1]) * (p3[2] - p1[2]) - (p2[2] - p1[2]) * (p3[1] - p1[1]))\n b = ((p2[2] - p1[2]) * (p3[0] - p1[0]) - (p2[0] - p1[0]) * (p3[2] - p1[2]))\n c = ((p2[0] - p1[0]) * (p3[1] - p1[1]) - (p2[1] - p1[1]) * (p3[0] - p1[0]))\n l = sqrt(pow(a, 2) + pow(b, 2) + pow(c, 2))\n if c < 0:\n c = c * (-1)\n a = a * (-1)\n b = b * (-1)\n if l != 0:\n return [round(a / l,4), round(b / l,4), round(c / l,4)]\n else:\n return [0, 0, 0]\n def vector(p1,p2,p3):\n a= ((p2[1]-p1[1])*(p3[2]-p1[2])-(p2[2]-p1[2])*(p3[1]-p1[1]))\n b=( (p2[2]-p1[2])*(p3[0]-p1[0])-(p2[0]-p1[0])*(p3[2]-p1[2]))\n c=( (p2[0]-p1[0])*(p3[1]-p1[1])-(p2[1]-p1[1])*(p3[0]-p1[0]))\n l=sqrt(pow(a,2)+pow(b,2)+pow(c,2))\n if c<0:\n c=c*(-1)\n a=a*(-1)\n b=b*(-1)\n q=np.array([a,b,c])\n if l!=0:\n return np.around(q/l, decimals=3)\n else:\n return [0,0,0]\n def circle(p1, p2, p3):\n x1 = p1[0]\n x2 = p2[0]\n x3 = p3[0]\n y1 = p1[1]\n y2 = p2[1]\n y3 = p3[1]\n a = x1 - x2\n b = y1 - y2\n c = x1 - x3\n d = y1 - y3\n a1 = ((x1 * x1 - x2 * x2) + (y1 * y1 - y2 * y2)) / 2.0\n a2 = ((x1 * x1 - x3 * x3) + (y1 * y1 - y3 * y3)) / 2.0\n theta = b * c - a * d\n if abs(theta) < 1e-7:\n return -1\n x0 = (b * a2 - d * a1) / theta\n y0 = (c * a1 - a * a2) / theta\n r = np.sqrt(pow((x1 - x0), 2) + pow((y1 - y0), 2))\n e = []\n e.append(x0)\n e.append(y0)\n return e, r\n\n\n # maxlen = 0\n # area = []\n # for i in apple:\n # if len(i)>maxlen:\n # maxlen=len(i)\n # area=i\n #if in the circle range\n pointset1=[]\n pointset2=[]\n for q in area:\n pointset2.append(kp2[q[0].trainIdx].pt)\n pointset1.append(kp1[q[0].queryIdx].pt)\n\n def processpointset(pointset,depimg,flag):\n sign = [0] * len(pointset)\n poinsetx=[]\n poinsety=[]\n for i in pointset:\n poinsetx.append(i[0])\n poinsety.append(i[1])\n # print(depimg[int(i[1])][int(i[0])])\n # print(\"++++++++++++\", pointset)\n # ###distance matirx\n # grapy_distance = np.zeros((len(pointset), len(pointset)))\n # for i in range(len(grapy_distance)):\n # for j in range(len(grapy_distance)):\n # grapy_distance[i][j] = distan(pointset[i], pointset[j])\n # # print(grapy_distance)\n # # find longest distance\n # longest = 0\n # poin1 = poin2 = -1\n # for i in range(len(grapy_distance)):\n # for j in range(len(grapy_distance)):\n # if grapy_distance[i][j] > longest:\n # longest = grapy_distance[i][j]\n # poin1 = i\n # poin2 = j\n # sign[poin1] = 1\n # sign[poin2] = 2\n # print(poin1, poin2)\n centerr=[np.mean(poinsetx),np.mean(poinsety)]\n r=30\n # centerr, r = mdcircle(pointset[poin1], pointset[poin2])\n # print(\"ringg\", centerr, r)\n # for i in range(len(pointset)):\n # if distan(pointset[i], centerr) <= r:\n # sign[i] = 1\n # if min(sign) == 0:\n # one_max = 0\n # taget = -1\n # for j in range(len(sign)):\n # if sign[j] == 0:\n # if three(pointset[poin1], pointset[poin2], pointset[j]) > one_max:\n # one_max = three(pointset[poin1], pointset[poin2], pointset[j])\n # taget = j\n # print(\"007\")\n # # print(pointset[poin1],pointset[poin2],pointset[taget])\n # centerr, r = circle(pointset[poin1], pointset[poin2], pointset[taget])\n # print(centerr, r)\n finalset = [] # depth image pixel\n cx = 2044.08\n cy = 1550.39\n fx = 1955.83\n fy = 1955.42\n xaxis = int(centerr[1])##\n yaxis = int(centerr[0])##inverse x,y pixel\n zw1 = depimg[xaxis][yaxis]\n print(zw1,\"zwwwwwwwwwwwwww\")\n while zw1 == 0:\n xaxis += 1\n yaxis += 1\n zw1 = depimg[xaxis][yaxis]\n if flag==1:\n maxx,mixx,dirsin,distnn,maxy,miny=growprocess([[xaxis,yaxis]],depimg)\n else:\n maxx=mixx=maxy=miny=0\n dirsin=[0,0,0]\n distnn=0\n\n\n xw1 = (yaxis - cx) * zw1 / fx\n yw1 = (xaxis - cy) * zw1 / fy\n ccord = [xw1,yw1,zw1]\n print(ccord, 'qqqqqqqqqqqqqqqqqqqqmmmmmmmmmmmmmmmm')\n black = []\n\n for i in range(int(centerr[0] - r+1), int(centerr[0] + r-1)):\n for j in range(int(centerr[1] - r+1), int(centerr[1] + r-1)):\n if depimg[j][i] != 0:\n zw = depimg[j][i]\n xw = (i - cx) * zw / fx\n yw = (j - cy) * zw / fy\n yes = [xw, yw, zw]\n finalset.append(yes)\n # j1=int(sqrt(pow(r,2)-pow(centerr[0]-i,2))+centerr[1])\n # j2=int(centerr[1]-sqrt(pow(r,2)-pow(centerr[0]-i,2)))\n # if depimg[j1][i] != 0:\n # zw = int(depimg[j1][i])\n # xw = int((j1 - cx) * zw / fx)\n # yw = int((i - cy) * zw / fy)\n # yes = [xw, yw, zw]\n # finalset.append(yes)\n # if depimg[j2][i] != 0:\n # zw2 = int(depimg[j2][i])\n # xw2 = int((j2 - cx) * zw2 / fx)\n # yw2 = int((i - cy) * zw2 / fy)\n # yes2 = [xw2, yw2, zw2]\n # finalset.append(yes2)\n\n\n # print(\"yes\",finalset)\n if finalset==[]:\n return 0\n # chiek = []\n # for i in finalset:\n # print(2223)\n # if i not in chiek:\n # print(222)\n # chiek.append(i)\n # print(\"------------\", chiek)\n\n vecooo = []\n LLs_x=[]\n LLs_y=[]\n LLs_z=[]\n for i in range(len(finalset)):\n if finalset[i][2]!=0:\n LLs_x.append(finalset[i][0])\n LLs_y.append(finalset[i][1])\n LLs_z.append(finalset[i][2])\n LLs_result=LLs(LLs_x,LLs_y,LLs_z,len(LLs_z))\n print(\"this is Ls result\",round(LLs_result[0][0],3),round(LLs_result[1][0],3),round(LLs_result[2][0],3))\n sock3=[round(LLs_result[0][0],3),round(LLs_result[1][0],3),-1]\n d3=round(LLs_result[2][0],3)\n\n for i in range(200):\n index = random.sample(range(0, len(finalset)),3)\n\n directn=vector1(finalset[index[0]], finalset[index[1]],finalset[index[2]])\n if directn!=[0,0,0]:\n vecooo.append( directn)\n print( directn)\n print(\"vvvvvvvvvvvvvv\", vecooo)\n sup = np.array(vecooo)\n sock = sup.mean(axis=0)\n vecooo2=[]\n for i in vecooo:\n #if angelsurface(i,sock)<10:\n vecooo2.append(i)\n sup2=np.array(vecooo2)\n sock1=sup2.mean(axis=0)\n print(sock1)\n sock2 = []\n for i in sock1:\n #i=i / sqrt(pow(sock[0],2)+pow(sock[1],2)+pow(sock[2],2))\n j=np.around(i,decimals=3)\n sock2.append(j)\n print(sock2)\n cv2.circle(depimg,(int(centerr[0]),int(centerr[1])),int(r),(0,0,255),thickness=3)\n # for i in black:\n # depimg[i[1]][i[0]] = 0\n # out=clater(depimg,sock3,ccord,r,d3)\n out = clater(depimg, dirsin, ccord, r, distnn,maxx,mixx,[xaxis,yaxis],maxy,miny)\n return out\n\n out_d=processpointset(pointset2,imgd2,1)\n print(\"second\")\n out_o= processpointset(pointset1, imgd1,0)\n print(\"first\")\n return out_d,out_o\n\ndef ini():\n btn1.pack_forget()\n btnx.pack()\n btnx2.pack()\n btnx3.pack()\n btnx4.pack()\n btnx5.pack()\n btnx6.pack()\n\n ax.set_xlim3d(-300, 300)\n ax.set_xlabel(\"x\")\n ax.set_ylabel(\"y\")\n\n ax.set_ylim3d(-300, 300)\n ax.set_zlim3d(0, 600)\n\n ax.cla()\n zc=0\n for i in range(len(ori_out)):\n X = []\n Y = []\n Z = []\n X.append(ori_out[i].locate[0])\n Y.append(ori_out[i].locate[1])\n Z.append(ori_out[i].locate[2])\n X.append(ori_out[i].locate[0]+4*ori_out[i].direct[0])\n Y.append(ori_out[i].locate[1] + 4*ori_out[i].direct[1])\n Z.append(ori_out[i].locate[2] + 4*ori_out[i].direct[2])\n ax.plot(X, Y, Z, 'bo--', color=\"black\",linestyle=\":\")\n for i in range(len(ori_out)):\n for j in range(i,len(ori_out)):\n X = []\n Y = []\n Z = []\n X.append(ori_out[i].locate[0])\n Y.append(ori_out[i].locate[1])\n Z.append(ori_out[i].locate[2])\n X.append(ori_out[j].locate[0])\n Y.append(ori_out[j].locate[1])\n Z.append(ori_out[j].locate[2])\n ax.plot(X, Y, Z, 'bo--', color=aric[zc])\n zc+=1\n\n canvs.draw()\n\n\n\ndef oriform():\n\n #ax.set_xlim3d(-300, 300)\n ax.set_xlabel(\"x\")\n ax.set_ylabel(\"y\")\n ax.set_zlabel(\"z\")\n # ax.set_ylim3d(-300, 300)\n # ax.set_zlim3d(0, 400)\n\n zc = 0\n ax.cla()\n for i in range(len(ori_out)):\n X = []\n Y = []\n Z = []\n X.append(ori_out[i].locate[0])\n Y.append(ori_out[i].locate[1])\n Z.append(ori_out[i].locate[2])\n X.append(ori_out[i].locate[0]+4*ori_out[i].direct[0])\n Y.append(ori_out[i].locate[1] + 4*ori_out[i].direct[1])\n Z.append(ori_out[i].locate[2] + 4*ori_out[i].direct[2])\n ax.plot(X, Y, Z, 'bo--', color=\"black\",linestyle=\":\")\n for i in range(len(ori_out)):\n for j in range(i, len(ori_out)):\n X = []\n Y = []\n Z = []\n X.append(ori_out[i].locate[0])\n Y.append(ori_out[i].locate[1])\n Z.append(ori_out[i].locate[2])\n X.append(ori_out[j].locate[0])\n Y.append(ori_out[j].locate[1])\n Z.append(ori_out[j].locate[2])\n ax.plot(X, Y, Z, 'bo--', color=aric[zc])\n zc += 1\n\n canvs.draw()\ndef randomcolor():\n colora=[\"1\",'2','3','4','5','6','7','8','9','A','B','C','D','E','F']\n color=\"\"\n for i in range(6):\n color+=colora[random.randint(0,14)]\n return \"#\"+color\n\ndef proform():\n\n ax.set_xlim3d(-300, 300)\n ax.set_xlabel(\"x\")\n ax.set_ylabel(\"y\")\n ax.set_zlabel(\"z\")\n ax.set_ylim3d(-300, 300)\n ax.set_zlim3d(0, 300)\n zc=0\n ax.cla()\n for i in range(len(process_out)):\n X = []\n Y = []\n Z = []\n X.append(process_out[i].locate[0])\n Y.append(process_out[i].locate[1])\n Z.append(process_out[i].locate[2])\n X.append(process_out[i].locate[0] + 4 * process_out[i].direct[0])\n Y.append(process_out[i].locate[1] + 4 * process_out[i].direct[1])\n Z.append(process_out[i].locate[2] + 4 * process_out[i].direct[2])\n ax.plot(X, Y, Z, 'bo--', color=\"black\", linestyle=\":\")\n for i in range(len(process_out)):\n for j in range(i, len(process_out)):\n X = []\n Y = []\n Z = []\n X.append(process_out[i].locate[0])\n Y.append(process_out[i].locate[1])\n Z.append(process_out[i].locate[2])\n X.append(process_out[j].locate[0])\n Y.append(process_out[j].locate[1])\n Z.append(process_out[j].locate[2])\n ax.plot(X, Y, Z, 'bo--', color=aric[zc])\n zc += 1\n\n canvs.draw()\n\ndef rt1():\n global rotatevalue1\n global rotatevalue2\n rotatevalue1 +=5\n ax.view_init(rotatevalue1, rotatevalue2)\ndef rt2():\n global rotatevalue1\n global rotatevalue2\n rotatevalue1 -= 5\n ax.view_init(rotatevalue1, rotatevalue2)\ndef rt3():\n global rotatevalue1\n global rotatevalue2\n rotatevalue2 += 5\n ax.view_init(rotatevalue1, rotatevalue2)\ndef rt4():\n global rotatevalue1\n global rotatevalue2\n rotatevalue2 -= 5\n ax.view_init(rotatevalue1, rotatevalue2)\n\ndef angelsurface(a,b):\n cosang=(a[0]*b[0]+a[1]*b[1]+a[2]*b[2])/(sqrt(pow(a[0],2)+pow(a[1],2)+pow(a[2],2))*sqrt(pow(b[0],2)+pow(b[1],2)+pow(b[2],2)))\n\n angofsur=math.acos(round(cosang,3))\n res=angofsur*180/np.pi\n return round(res,3)\ndef cluster_distance(a,b):\n aa=a.direct\n bb=b.direct\n res=pow(aa[0]-bb[0],2)+pow(aa[1]-bb[1],2)+pow(aa[2]-bb[2],2)\n return round(res,3)\n\ndef check(coordinate,imagd):\n # global seterror\n cx = 2044.08\n cy = 1550.39\n fx = 1955.83\n fy = 1955.42\n xw=coordinate[0]\n yw=coordinate[1]\n zw=coordinate[2]\n # print(coordinate,\"point\")\n y=int(xw*fx/zw+cx)\n x=int(yw*fy/zw+cy)\n if x>3071 or y>4095 or x<0 or y<0:\n return 100000000\n z=imagd[x][y]\n # print(x, y, z,xw,yw,zw, 'xy')\n # print(z)\n xx= (y - cx) * z / fx\n yy= (x - cy) * z / fy\n # print(xx, yy, z, xw, yw, zw, 'xy')\n #\n errors=sqrt(pow(xw-xx,2)+pow(yw-yy,2)+pow(zw-z,2))\n # errors = abs(xw - xx) * abs(yw - yy) * abs(zw - z)\n # errors=abs(zw-z)\n print(errors, \"erors\")\n # print(xx,yy,errors,\"errors\")\n return errors\n # if errors<1:\n # return True\n # else:\n # return False\ndef check2(coordinate,imagd,plane):\n # global seterror\n dir=plane.direct\n cx = 2044.08\n cy = 1550.39\n fx = 1955.83\n fy = 1955.42\n xw=coordinate[0]\n yw=coordinate[1]\n zw=coordinate[2]\n # print(coordinate,\"point\")\n y=int(xw*fx/zw+cx)\n x=int(yw*fy/zw+cy)\n if x>3071 or y>4095 or x<0 or y<0:\n return 100000000\n z=imagd[x][y]\n # print(x, y, z,xw,yw,zw, 'xy')\n # print(z)\n xx= (y - cx) * z / fx\n yy= (x - cy) * z / fy\n print(xx, yy, z, xw, yw, zw, 'xy')\n #\n errors=abs(xx*dir[0]+yy*dir[1]+z*dir[2]+plane.d)\n # errors=sqrt(pow(xw-xx,2)+pow(yw-yy,2)+pow(zw-z,2))\n # errors = abs(xw - xx) * abs(yw - yy) * abs(zw - z)\n # errors=abs(zw-z)\n print(errors, \"erors\")\n # print(xx,yy,errors,\"errors\")\n return errors\n # if errors<1:\n # return True\n # else:\n # return False\ndef bending(c,d):\n a=c.direct\n ap = c.locate\n b=d.direct\n # print(a,b,\"two\")\n bp = d.locate\n # da=np.dot(np.array(a),np.array(ap))\n # db=np.dot(np.array(b),np.array(bp))\n da=c.d*(-1)\n db=d.d*(-1)\n x1 = a[1] * b[2] - b[1] * a[2]\n y2 = a[2] * b[0] - a[0] * b[2]\n z3 = a[0] * b[1] - a[1] * b[0]\n t = [x1, y2, z3]\n #print(da,db,a,b,t,\"ddd\")\n px=1\n aa = solve([px*a[0] + y*a[1] +z*a[2]- da, px * b[0] + b[1] * y +z*b[2]- db], [y, z])\n #print(aa.keys())\n gap=[1]\n if y in aa.keys():\n #print(\"111\")\n gap.append(aa[y])\n else:\n gap.append(0)\n if z in aa.keys():\n gap.append(aa[z])\n else:\n gap.append(0)\n #print(gap)\n return np.array(t),np.array(gap)\n#t is the direction, gap is the\ndef slide(t,gap,start,end,planea,planeb,imgd,flag):\n alpa =3\n der = sqrt(pow(t[0], 2) + pow(t[1], 2) + pow(t[2], 2))\n if flag==0:\n ders=int(alpa*abs(t[0])/der)\n if ders==0:\n ders=1\n print(der,ders)\n else:\n ders = int(alpa* abs(t[1]) / der)\n if ders==0:\n ders=1\n print(der, ders)\n gapa=planea.direct\n gapb=planeb.direct\n # da=np.dot(np.array(planea.direct),np.array(planea.locate))\n # db=np.dot(np.array(planeb.direct),np.array(planeb.locate))\n da=planea.d*(-1)\n db=planeb.d*(-1)\n def cal(point):\n state=(point-gap[0])/t[0]\n y=state*t[1]+gap[1]\n z=state*t[2]+gap[2]\n return [point,y,z]\n def cal2(point):\n state=(point-gap[1])/t[1]\n x=state*t[0]+gap[0]\n z=state*t[2]+gap[2]\n return [x,point,z]\n print(start, end-alpa)\n sum=[0]*4\n tree=10\n step=end-start-80\n time=[0]*4\n switch=[0]*4\n for i in range(start,end-alpa,ders):\n print(1)\n if flag==0:\n point1=cal(i)\n point2=cal(i+ders)\n else:\n point1 = cal2(i)\n point2 = cal2(i + ders)\n if max(sum) >(step/ders)/2:\n # print(point1, point2)\n drawlines(point1, point2, imgd)\n return True\n # print(\"p1p2-------------------------------\",check(point1, imgd),check(point2, imgd),point1,point2)\n if check(point1, imgd)tree and p1>tree) or (q1>tree and q2>tree):\n sum=[0]*4\n time=[0]*4\n switch=[0]*4\n else:\n sum=[0]*4\n time = [0] * 4\n switch = [0] * 4\n else:\n sum=[0]*4\n time = [0] * 4\n switch = [0] * 4\n\n return False\n\n\ndef cldis(a,b):\n dira=a.direct\n da=a.d\n cena=a.locate\n dirb=b.direct\n db=b.d\n cenb=b.locate\n error1=abs(cenb[0]*dira[0]+cenb[1]*dira[1]+cenb[2]*dira[2]+a.d)\n error2=abs(cena[0]*dirb[0]+cena[1]*dirb[1]+cena[2]*dirb[2]+b.d)\n print(error1,error2,\"12\")\n return abs(error2+error1)\ndef pullback(seed,imad):\n cx = 2044.08\n cy = 1550.39\n fx = 1955.83\n fy = 1955.42\n xw = seed.locate[0]\n yw = seed.locate[1]\n zw = seed.locate[2]\n y = int(xw * fx / zw + cx)\n x = int(yw * fy / zw + cy)\n seedq=[[x,y]]\n return growprocess(seedq,imad)\n\n# on the plane, distance, vertical\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n # load image\n image_a = cv2.imread('/home/hexin/桌面/deform/s4.png')\n image_b = cv2.imread('/home/hexin/桌面/dataset/tea2.png')\n image_d = np.load('/home/hexin/桌面/deform/s4.npy')\n image_tes1 = cv2.imread('/home/hexin/桌面/cool2.png')\n #image_tes2 = np.load('/home/hexin/桌面/dep2.npy')\n ###########################################################\n # k4a = PyK4A(Config(color_resolution=ColorResolution.RES_3072P,\n # depth_mode=pyk4a.DepthMode.WFOV_UNBINNED,\n # synchronized_images_only=True,\n # camera_fps=pyk4a.FPS.FPS_5, ))\n # k4a.connect()\n # k4a.exposure_mode_auto=True\n # k4a.whitebalance_mode_auto=True\n # k4a.sharpness=4\n # k4a.backlight_compensation=1\n # k4a.gain=100\n\n # getters and setters directly get and set on device\n\n #k4a.whitebalance = 4510\n #assert k4a.whitebalance == 4510\n #a = json.loads(k4a.get_calibra())\n #inner=a[\"CalibrationInformation\"]['Cameras'][0]['Intrinsics'][\"ModelParameters\"]\n\n while 1:\n img_color =cv2.imread('/home/hexin/桌面/FINAL/s4d.png')\n img_depth=np.load('/home/hexin/桌面/FINAL/S4.npy')\n # img_color = k4a.get_capture(color_only=True)\n # img_color, img_depth = k4a.get_capture() # Would also fetch the depth image\n im = np.asarray(img_depth, np.uint16)\n #im2=np.asarray(image_d, np.uint16)\n cv2.imshow('k4a2', img_color)\n if cv2.waitKey(1) & 0xff == 27:\n cv2.destroyAllWindows()\n #cv2.imwrite('/home/hexin/桌面/cool2.png', img_color)\n print(len(image_a), len(img_color[:, :, :3]), len(img_depth),len(image_d))\n cv2.imwrite('/home/hexin/桌面/cool2.png', img_color[:, :, :3])\n np.save('/home/hexin/桌面/dep2.npy', im)\n #img2,xxx,s,center= sift_detect(image_a, img_color[:, :, :3], im)\n img2, process_out,ori_out = sift_detect(image_a, img_color[:, :, :3], im,image_d)\n # img2, process_out, ori_out = sift_detect(image_a, image_tes1, image_tes2, image_d)\n for q in range(len(process_out)):\n print(\"origin\",ori_out[q].direct,ori_out[q].locate)\n print(\"box\",process_out[q].direct, process_out[q].locate)\n grapy_angel = np.zeros((len(process_out), len(process_out)))\n orig_angel = np.zeros((len(process_out), len(process_out)))\n grapy_distance = np.zeros((len(process_out), len(process_out)))\n grapy_bend = np.zeros((len(process_out), len(process_out)))\n grapy_ture=np.zeros((len(process_out), len(process_out)))\n process_sort=[0]*len(process_out)\n max_out=[0]*len(process_out)\n min_out=[0]*len(process_out)\n grapy_twe = np.zeros((len(process_out), len(process_out)))\n\n for i in range(len(process_out)):\n for j in range(i+1,len(process_out)):\n if cldis(process_out[i],process_out[j])<19:\n # cluster_distance(process_out[i], process_out[j]) < 0.01 or\n # if angelsurface(process_out[i].direct, process_out[j].direct)<10:\n grapy_twe[i][j]=1\n grapy_twe[j][i]=1\n # grapy_twe[i][j] = cldis(process_out[i],process_out[j])\n # grapy_twe[j][i] = cldis(process_out[i],process_out[j])\n bob2 = MCL(grapy_twe, 2, 2)\n #\n # for i in range(len(bob2)):\n # if len(bob2[i])>1:\n # center = []\n # d = []\n # driec = []\n # for j in bob2[i]:\n # center.append(process_out[j].locate)\n # d.append(process_out[j].d)\n # driec.append(process_out[j].direct)\n # np.array(center)\n # np.array(d)\n # np.array(driec)\n # print(process_out[bob2[i][0]].direct, process_out[bob2[i][0]].d, process_out[bob2[i][0]].locate)\n # process_out[bob2[i][0]].direct = np.sum(driec, axis=0) / len(driec)\n # process_out[bob2[i][0]].d = np.sum(d) / len(d)\n # # process_out[bob2[i][0]].locate = np.sum(center, axis=0) / len(center)\n # print(process_out[bob2[i][0]].direct, process_out[bob2[i][0]].d, process_out[bob2[i][0]].locate)\n\n\n\n\n\n # for i in range(len(process_out)):\n # if process_sort[i]==0:\n # a,b=pullback(process_out[i],im)\n # max_out[i]=a\n # min_out[i]=b\n for i in range(len(bob2)):\n for j in range(i+1,len(bob2)):\n # if process_sort[i]==0 and process_sort[j]==0:\n ai=bob2[i][0]\n bi=bob2[j][0]\n print(\"aibi\",ai,bi)\n t, gap = bending(process_out[ai], process_out[bi])\n # st = process_out[ai].locate[0]\n if abs(t[0]) > abs(t[1]):\n if slide(t, gap, int(min(process_out[ai].minx, process_out[bi].minx)) - 40,\n int(max(process_out[ai].maxx, process_out[bi].maxx)) + 40, process_out[ai],\n process_out[bi], im,0):\n grapy_ture[ai][bi] = 1\n grapy_ture[bi][ai] = 1\n print(\"the bending is true\", ai, bi)\n else:\n if slide(t, gap, int(min(process_out[ai].miny, process_out[bi].miny)) - 40,\n int(max(process_out[ai].maxy, process_out[bi].maxy)) + 40, process_out[ai],\n process_out[bi], im,1):\n grapy_ture[ai][bi] = 1\n grapy_ture[bi][ai] = 1\n print(\"the bending is true\", ai, bi)\n\n # print(i,j,bending(process_out[i],process_out[j]))\n for i in range(len(process_out)):\n for j in range(i+1,len(process_out)):\n grapy_angel[i][j] = angelsurface(process_out[i].direct, process_out[j].direct)\n grapy_angel[j][i] = angelsurface(process_out[i].direct, process_out[j].direct)\n grapy_distance[i][j] = cluster_distance(process_out[i], process_out[j])\n grapy_distance[j][i] = cluster_distance(process_out[i], process_out[j])\n\n\n # for i in range(len(process_out)):\n # for j in range(i + 1, len(process_out)):\n # orig_angel[i][j] = angelsurface(ori_out[i].direct, ori_out[j].direct)\n # orig_angel[j][i] = angelsurface(ori_out[i].direct, ori_out[j].direct)\n\n print(grapy_angel)\n print(grapy_distance)\n print(grapy_ture)\n print(orig_angel)\n print(process_sort)\n print(bob2, \"grapays\")\n if len(process_out)>0:\n xxx=process_out[-1].dep\n ooo=ori_out[-1].dep\n else:\n xxx=im\n ooo=image_d\n aric=[]\n for i in range(int((len(ori_out)-1)*len(ori_out))):\n aric.append(randomcolor())\n rotatevalue1=5\n rotatevalue2=5\n fig = plt.figure()\n #ax = Axes3D(fig)\n ax2 = fig.add_subplot(221)\n ax2.imshow(img2)\n ax3 = fig.add_subplot(222)\n ax3.imshow(xxx)\n ax4 = fig.add_subplot(223)\n ax4.imshow(ooo)\n ax=fig.add_subplot(224,projection=\"3d\")\n win = tkinter.Tk()\n frame = tkinter.Frame(win, width=400, height=400)\n btn1 = tkinter.Button(frame, text='start', command=ini)\n btnx = tkinter.Button(frame, text='origin', command=oriform)\n btnx2 = tkinter.Button(frame, text='Deform', command=proform)\n btnx3 = tkinter.Button(frame, text='rotate1', command=rt1)\n btnx4 = tkinter.Button(frame, text='rotate2', command=rt2)\n btnx5 = tkinter.Button(frame, text='rotate3', command=rt3)\n btnx6 = tkinter.Button(frame, text='rotate4', command=rt4)\n\n btn1.pack()\n canvs = FigureCanvasTkAgg(fig, win)\n canvs.get_tk_widget().pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1)\n frame.focus_set() # 必须获取焦点\n frame.pack()\n win.mainloop()\n\n\n\n\n\n#########################################################################\n # for i in image_d:\n # for j in i:\n # print(j)\n # for i in image_d:\n # if i.any()>2 and i.any()<255:\n # print(i)\n # print(image_a.size,image_b.size,image_d.size)\n # # SIFT or SURF\n # img2= sift_detect(image_a, image_b,image_d)\n #\n # plt.imshow(img2)\n #\n # plt.show()","sub_path":"sssft.py","file_name":"sssft.py","file_ext":"py","file_size_in_byte":44894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"255095863","text":"from selenium import webdriver\nimport time\nimport datetime\nfrom selenium.webdriver.common.action_chains import ActionChains\nimport urlparse\nimport pickle\nimport sys\nimport os\nimport re\ndirname, filename = os.path.split(os.path.abspath(__file__))\ntoday = datetime.datetime.now().strftime('%Y%m%d%H%M%S')\ntransactions = 10\n# # necessary ingredients.\nprofiles = []\ndriver = webdriver.Chrome('./chromedriver')\n\ndef print_message(msg):\n print('=========================')\n print(' ' + msg + ' ')\n\n\n# grab the users messaged.\nwith open('profiles.data', 'rb') as filehandle:\n profiles = pickle.load(filehandle)\n print_message('Number of swipes so far : {}'.format(str(len(profiles))))\n\n\ndef check_backup_needed():\n global transactions\n if transactions >= 5:\n f_name = today + '.data'\n with open(dirname + '/backups/' + f_name, 'wb') as f:\n pickle.dump(profiles, f)\n transactions = 0\n\n\ndef login(username, password):\n driver.get('https://pof.com/login')\n time.sleep(1)\n driver.find_element_by_id('onetrust-accept-btn-handler').click()\n driver.find_element_by_id('login-username').send_keys(username)\n driver.find_element_by_id('login-password').send_keys(password)\n driver.find_element_by_id('login-submit').click()\n time.sleep(3)\n\n\ndef go_to_meet_me():\n driver.find_element_by_id('top-nav-meetme').click()\n time.sleep(2)\n\ndef race_ok(race_text):\n races = ['black']\n for race in races:\n if race in race_text:\n return False\n return True\n\ndef height_ok(height_text):\n try:\n height_in_cm = int(re.compile(\"([0-9]+cm)\").search(height_text).groups()[0][:3])\n except:\n return True\n return height_in_cm < 172\n\ndef body_ok(body_text):\n if 'BBW body' in body_text or 'A Few Extra Pounds' in body_text:\n return False\n return True\n\ndef send_message_or_swipe_left():\n global transactions\n transactions = transactions + 1\n actions = ActionChains(driver)\n time.sleep(2)\n element = driver.find_element_by_css_selector('.card-1 .meetmeimage')\n a_element = driver.find_element_by_css_selector(\n '.card-1 .meetmeimage a').get_attribute('href')\n parsed = urlparse.urlparse(a_element)\n current_profile = urlparse.parse_qs(parsed.query)['profile_id']\n if current_profile not in profiles:\n actions.move_to_element(element).click().perform()\n profiles.append(current_profile)\n time.sleep(2)\n race = driver.find_element_by_id('attributelist-item-ethnicity')\n height = driver.find_element_by_id('attributelist-item-height')\n body = driver.find_element_by_id('attributelist-item-bodyType')\n # replace with biased messaging flag.\n if race_ok(race.text) and height_ok(height.text) and body_ok(body.text):\n driver.find_element_by_id(\n 'text-area-element').send_keys(\"You're cute. \\n How's it going! :)\")\n driver.find_element_by_id('profile-message-submit').click()\n # update profile database.\n with open('profiles.data', 'wb') as f:\n pickle.dump(profiles, f)\n else:\n cross = driver.find_element_by_id('meetmevotebutton-no')\n actions.move_to_element(cross).click().perform()\n\n\ntry:\n login(sys.argv[1], sys.argv[2])\nexcept:\n driver.quit()\n\nwhile True:\n time.sleep(2)\n go_to_meet_me()\n driver.refresh()\n time.sleep(1)\n send_message_or_swipe_left()\n time.sleep(1)","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":3477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"618539149","text":"from django.conf.urls import url\nfrom TweeTest import views\n\nurlpatterns = [\n url(r'^$', views.view_home, name='home'),\n url(r'^about/$', views.AboutPageView.as_view()),\n url(r'^contact/$', views.ContactPageView.as_view()),\n url(r'^result/$', views.ResultPageView.as_view()),\n\n]\n\n","sub_path":"TweeTest/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"468897080","text":"import cv2\r\nimport os\r\n\r\nread_path = \"/home/compu/ymh/drawing/save/num_003/Drawing/character/refine.mkv\"\r\nsave_path = \"/home/compu/ymh/drawing/save/num_003/Drawing/character/refine.mp4\"\r\n\r\nfps = 30\r\n\r\nvidcap = cv2.VideoCapture(read_path)\r\nsuccess, image = vidcap.read()\r\ncount = 0\r\n\r\nframe_array = []\r\n\r\ncnt = 0\r\nwhile success:\r\n frame_array.append(image)\r\n \r\n success, image = vidcap.read()\r\n print('working: %d' % cnt)\r\n cnt += 1\r\n \r\nout = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'FMP4'), fps, (1920, 1080))\r\n\r\ncnt = 0\r\nfor i in range(len(frame_array)):\r\n out.write(frame_array[i])\r\n print('working: %d' % cnt)\r\n cnt += 1\r\n\r\nout.release()\r\n","sub_path":"mkv2mp4.py","file_name":"mkv2mp4.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"351357844","text":"import json\nrecipes_dictionary={}\nfridge_list=[]\nwith open(\"fridge.txt\") as fridge:\n for food_name in fridge.readlines():\n fridge_list.append(food_name.strip())\nwith open(\"sort_later.txt\", \"w\") as garbage:\n with open(\"cleaned_recipes.json\") as json_file:\n data = json.load(json_file)\n for recipe in data:\n try:\n ingredients_raw = data[recipe][\"ingredients\"]\n ingredients_mask = [\"0\" for i in range(len(fridge_list))]\n beaten=0\n for product in ingredients_raw:\n flag=1\n for abstraction_index in range(len(fridge_list)):\n if fridge_list[abstraction_index] in product.lower():\n ingredients_mask[abstraction_index]=\"1\"\n flag=0\n break\n if flag:\n beaten=1\n try:\n garbage.write(product+\"\\n\")\n except UnicodeEncodeError:\n print(product)\n if not beaten:\n bin_recipe=int(''.join(ingredients_mask), 2)\n recipes_dictionary[recipe]=bin_recipe\n except KeyError:\n pass\n\nprint(len(recipes_dictionary))\n# print(recipes_dictionary)\n\nwith open('adapted_recipes.json', 'w') as fp:\n json.dump(recipes_dictionary, fp)\n","sub_path":"middle_versions/recipes_adaptation.py","file_name":"recipes_adaptation.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"455188866","text":"from django.shortcuts import render\nimport json\n# Create your views here.\nfrom django.http import HttpResponse\nfrom history.models import foodorders,date,new1\nfrom history.forms import new\nfrom matplotlib import pylab\nfrom pylab import *\nimport PIL,PIL.Image\nimport io\nfrom io import BytesIO\n\n\n\ndef history(request):\n return render(request,'history/history.html',{'title':'history'})\n\ndef users(request):\n\n form=new()\n if request.method == \"POST\":\n form=new(request.POST)\n\n if form.is_valid():\n\n form.save(commit=True)\n form=new()\n\n dict1={}\n all_date=date.objects.all();\n all_foodorders=foodorders.objects.all();\n\n for d in all_date:\n d1=d.FROM;\n d2=d.TO;\n print(d1)\n print(d2)\n for fo in all_foodorders:\n if fo.date>=d1 and fo.date<=d2:\n dict1[fo.foodname]=0;\n for fo in all_foodorders:\n if fo.date>=d1 and fo.date<=d2:\n dict1[fo.foodname]=dict1[fo.foodname]+fo.quantity;\n\n print(dict1)\n emp=new1.objects.all()\n emp.delete()\n emp1=date.objects.all()\n emp1.delete()\n for key,value in dict1.items():\n print(key,value)\n p=new1(item=key,frequency=value)\n p.save()\n\n\n return render(request,'history/history.html',context={'d':dict1,'form':form})\n\n else:\n\n\n print(\"ERROR\");\n return render(request,'history/history.html',context={'form':form})\n","sub_path":"history/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"305454820","text":"from math import ceil\n\n\ndef PgCreator(QuoteCount, currentPage, perPage):\n currentPage = int(currentPage)\n pageCount = ceil(float(QuoteCount / perPage))\n nextPage = currentPage+1 if currentPage + 1 <= pageCount else None\n prevPage = currentPage-1 if currentPage - \\\n 1 > 0 and currentPage - 1 < pageCount else None\n\n return {\"pagination\": {\"nextpage\": nextPage,\n \"currentPage\": currentPage, \"prevPage\": prevPage, \"pages\": pageCount}}\n","sub_path":"utils/PaginationCreator.py","file_name":"PaginationCreator.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"383152927","text":"# coding=utf-8\nfrom time import sleep\n\nfrom selenium.webdriver import ActionChains\n\nfrom SRC.common.decorator import codeException_dec\nfrom SRC.unittest.case import TestCase\n\n\nclass EasyCase(TestCase):\n\tdef __init__(self, webDriver,paramsList):\n\t\t# 请不要修改该方法\n\t\tsuper(EasyCase, self).__init__(webDriver,paramsList)\n\n\t@codeException_dec('3')\n\tdef runTest(self):\n\t\tdriver = self.getDriver()\n\n\t\tjs = '$(\".classify\").show()'\n\t\tdriver.execute_script(js)\n\t\tjs1 = '$(\"#category > ul > li:nth-child(1) > div\").show()'\n\t\tdriver.execute_script(js1)\n\t\tdriver.find_element_by_xpath('//*[@id=\"category\"]/ul/li[1]/div/ul/li[1]/a').click() # 点击艺术纸\n\t\tdriver.find_element_by_xpath('//*[@id=\"product_list\"]/div[2]/ul/li[1]/div/div[1]/a/img').click() # 点击HK直邮美国商品\n\t\tdriver.close()\n\t\tdriver.switch_to.window(driver.window_handles[0])\n\t\tdriver.find_element_by_link_text('立即订购').click() # 点击立即订购\n\t\t# driver.find_element_by_xpath('/html/body/div/div[4]/div[2]/div[2]/div/div[9]/div[2]/div[2]/div/input').send_keys('老王')\n\t\tsleep(3)\n\t\tdriver.find_element_by_css_selector('body > div > div.row > div.col-xs-12.main > div:nth-child(2) > div > div.osubmit > div > button').click() # 点击提交订单\n\t\tdriver.find_element_by_xpath('//*[@id=\"body1\"]/dl/dd[1]/ul/li[4]/div[1]/input').click() # 选择储值卡\n\t\tdriver.find_element_by_xpath('//*[@id=\"body1\"]/dl/dd[2]/span/button').click() # 点击确认付款\n\t\tsleep(3)\n","sub_path":"database/schemes/501/script/testCase/U商城项目/U商城前端/购物车/退货流程/退货流程3/下单付款1.py","file_name":"下单付款1.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"374396348","text":"import random\r\nimport datetime\r\n\r\ndef quicksort(array):\r\n _quicksort(array, 0, len(array) - 1)\r\n\r\ndef _quicksort(array, start, stop):\r\n if stop - start > 0:\r\n pivot, left, right = array[int((start + stop) / 2)], start, stop\r\n while left <= right:\r\n while array[left] < pivot:\r\n left += 1\r\n while array[right] > pivot:\r\n right -= 1\r\n if left <= right:\r\n array[left], array[right] = array[right], array[left]\r\n left += 1\r\n right -= 1\r\n _quicksort(array, start, right)\r\n _quicksort(array, left, stop)\r\n\r\nn = 1000\r\ntomb = []\r\n\r\nfor i in range(n):\r\n tomb.append(random.randint(0, n))\r\n\r\ntimeBefore = datetime.datetime.now()\r\n\r\nquicksort(tomb)\r\n\r\ntimeAfter = datetime.datetime.now()\r\n\r\nprint(timeAfter - timeBefore)\r\n\r\n# for i in tomb:\r\n# print(i)\r\n","sub_path":"quicksort.py","file_name":"quicksort.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"543759826","text":"from django.contrib import admin\nfrom django.urls import path,include\nfrom . import views\n\nurlpatterns = [\n path('/',views.home,name=\"homepage\"),\n path('/addservice',views.AddService,name=\"addservice\"),\n path('/allservices',views.Allservice,name=\"allservice\"),\n path('/gettimeinterval',views.GetTimeInterval,name=\"gettimeinterval\"),\n path('/settimeinterval',views.SetTimeInterval, name=\"settimeinterval\"),\n path('/removeservice',views.RemoveService,name=\"removeservice\"),\n]","sub_path":"Grafana_Dashboard/api/url.py","file_name":"url.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"453601189","text":"\"\"\" Dictionary Class Object\n\nCreates a Dictionary Class that imports words from a given dictionary file. \nUser can also add, delete and find words. Python script also contains a Statistics \nClass that measures the run-time and collisions of various hash_bases and table_sizes. \n\"\"\"\n__author__ = 'Nicholas Chua'\n__docformat__ = 'reStructuredText'\n__modified__ = '05/06/2020'\n__since__ = '25/05/2020'\n\nfrom hash_table import LinearProbeHashTable, T\nfrom timeit import default_timer as timer\n\nclass Dictionary(LinearProbeHashTable[T]):\n def __init__(self, hash_base:int, table_size:int) -> None:\n '''Initiates a hash_table instance based on given hash_base and table_size\n \n :complexity: O(N), where N is the table_size\n '''\n self.hash_table = LinearProbeHashTable(hash_base, table_size)\n\n def load_dictionary(self, filename:str, time_limit:int=None) -> int:\n '''Loads words from file name into hash_table. Each line is a word.\n \n :complexity: O(N), where N is the length of lines in a file\n '''\n word_count = 0 \n with open(filename, 'r', encoding = 'utf-8') as file:\n start_time = timer() \n for word in file:\n if time_limit != None and timer() - start_time > time_limit:\n raise TimeoutError(\"Time limit exceeded. Load dictionary failed.\")\n self.add_word(word.strip(\"\\n\"))\n word_count += 1\n return word_count\n \n def add_word(self, word:str) -> None:\n '''Adds a given word and stores into hash_table. Word is paired with 1 as the value-pair.\n :complexity best: O(K) where the first searched position is empty, using linear probe,\n where K is the size of the key\n :complexity worst: O(N^2) when we've searched the entire table, using linear probe, and \n the table is rehashed, where N is the table_size\n '''\n self.hash_table[word.lower()] = 1\n \n def find_word(self, word:str) -> bool:\n '''Returns true if given word exists in dictionary. Otherwise, returns false.\n :complexity best: O(K) where the given word is the first searched position, using linear\n probe, where K is the size of the key\n :complexity worst: O(N) where the given word is the last searched position, using linear\n probe, where N is the size of the hash_table\n :raises KeyError: When a position can't be found\n '''\n return self.hash_table[word.lower()] == 1\n\n def delete_word(self, word:str) -> None:\n '''Deletes the given word from the dictionary. Otherwise, raises KeyError.\n :complexity best: O(K) finds the position straight away and doesn't have to rehash\n where K is the size of the key\n :complexity worst: O(K + N) when it has to rehash all items in the hash table\n where N is the table size\n :raises KeyError: When a position can't be found\n '''\n self.hash_table.__delitem__(word.lower())\n \n def menu(self) -> None:\n '''Initiates a menu in terminal. Uses methods in Dictionary class. Runs for as long as \n user uses the menu, in other words, while exit_boolean is false. Terminates when exit_boolean \n is true.'''\n exit_boolean = False\n while not exit_boolean:\n print(\"Select option: \")\n print(\"1 - Read file\")\n print(\"2 - Add a word\")\n print(\"3 - Find a word\")\n print(\"4 - Delete a word\")\n print(\"5 - Exit\")\n\n try:\n option = int(input())\n if option < 1 or option > 5:\n raise ValueError\n except ValueError:\n print(\"Input given is invalid, try again!\") \n else:\n if option == 1:\n # handles ValueError for time_limit. Sets strings and empty inputs to None\n try:\n filename = input(\"What file would you like to import? \")\n time_limit = int(input(\"How long do you want to wait? (For no time limit, just press enter)\"))\n except ValueError:\n time_limit = None\n\n # handles FileNotFoundError. Returns to menu if not found. \n try:\n total_words = self.load_dictionary(filename, time_limit)\n except FileNotFoundError:\n print(\"File not found. Input was not given the correct directory. Returning to menu.\")\n else:\n print(\"File successfully imported \"+str(total_words)+\" words!\")\n elif option == 2:\n word = input(\"What word would you like to add? \")\n self.add_word(word)\n elif option == 3:\n try:\n word = input(\"What word do you want to find? \")\n self.find_word(word)\n except KeyError as e:\n print(\"Key doesn't exist: \"+str(e))\n else:\n print(\"Key found!\")\n elif option == 4:\n try:\n word = input(\"What word that exists in the dictionary would you like to delete? \")\n self.delete_word(word)\n except KeyError as e:\n print(\"Key doesn't exist: \"+str(e))\n else:\n print(\"Key successfully deleted!\")\n elif option == 5:\n print(\"Exiting program. \")\n exit_boolean = True\n\nclass Statistics:\n def load_statistics(self, hash_base:int, table_size:int, filename:str, max_time:int) -> tuple:\n '''Imports a given dictionary and measures run-time based on hash_base & table_size. \n If max_time has been exceeded, load_dictionary will abort. Returns dictionary and collision\n statistics. \n\n :complexity: O(N), where N is the length of lines in a file\n '''\n dictionary = Dictionary(hash_base, table_size)\n try:\n start_time = timer()\n words = dictionary.load_dictionary(filename, max_time) # returns the word count of filename\n final_time = timer() - start_time\n except TimeoutError:\n words = \"TIMEOUT\"\n final_time = \"TIMEOUT\"\n \n return (words, final_time, dictionary.hash_table.collision_count, dictionary.hash_table.probe_total, dictionary.hash_table.probe_max, \n dictionary.hash_table.rehash_count)\n\n def table_load_statistics(self, max_time) -> None:\n '''Outputs a CSV file named 'output_task2.csv' based from dictionary statistics given by \n load_statistics. Each dictionary is tested for run-time based on differing hash_base & \n table_size.\n\n Run-time WILL differ based on table_load_limit, which may cause table to rehash. See __setitem__ in \n hash_table.py\n \n :complexity best: O(N*C) as it iterates through each hash combination and imports each line in a file,\n where N is the length of lines in a file and C is the length of each combination.\n :complexity worst: O(N*C*H) when it needs to rehash the table, where H is the length of the already\n existing hash table\n '''\n dictionaries_array = [\"english_small.txt\", \"english_large.txt\", \"french.txt\"]\n bases_array = [1, 27183, 250726]\n table_size_array = [250727, 402221, 1000081]\n statistics_array = []\n\n # setting up combinations for hashing to minimise O-complexity\n hash_combinations = []\n for base in bases_array:\n for table_size in table_size_array:\n hash_combinations.append((base, table_size))\n\n for dictionary in dictionaries_array:\n for combo in hash_combinations:\n words, time, collision_count, probe_total, probe_max, rehash_count = self.load_statistics(combo[0], combo[1], \n dictionary, max_time)\n\n if time == \"TIMEOUT\":\n # uses max_time when time has exceed max_time\n data = [dictionary, combo[0], combo[1], words, collision_count, probe_total, probe_max, rehash_count, max_time]\n statistics_array.append(data)\n else:\n # otherwise uses actual time\n data = [dictionary, combo[0], combo[1], words, collision_count, probe_total, probe_max, rehash_count, time]\n statistics_array.append(data)\n\n with open(\"output_task2.csv\", \"w\") as file:\n file.write(\"Dictionary, Hash Base, Table Size, Total Words, Collision Count, Probe Total, Probe Max, Rehash Count, Time \\n\")\n for data_line in statistics_array:\n file.write(str(data_line[0])+\", \"+str(data_line[1])+\", \"+str(data_line[2])+\", \"+str(data_line[3])+\", \"+str(data_line[4])\n +\", \"+str(data_line[5])+\", \"+str(data_line[6])+\", \"+str(data_line[7])+\", \"+str(data_line[8])+\" \\n\")\n\nif __name__ == '__main__':\n # dictionary = Dictionary(31, 17)\n # dictionary.menu()\n\n Statistics().table_load_statistics(10)","sub_path":"Dictionary Word Finder/dictionary.py","file_name":"dictionary.py","file_ext":"py","file_size_in_byte":9409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"339134459","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n#\n# Complete the 'gradingStudents' function below.\n#\n# The function is expected to return an INTEGER_ARRAY.\n# The function accepts INTEGER_ARRAY grades as parameter.\n#\n\ndef gradingStudents(grades):\n # Write your code here\n # gradeStr=[]\n # finalGrades=[]\n # for i in grades:\n # gradeStr.append(str(i))\n # for i in gradeStr:\n # if int(i)>=38:\n # if int(i[1])<5:\n # if ((int(i[0])*10) + 5) - int(i) <3:\n # finalGrades.append((int(i[0])*10) + 5)\n # else:\n # finalGrades.append((i))\n # elif int(i[1])>5:\n # if ((int(i[0])+1)*10) - int(i) <3:\n # finalGrades.append((int(i[0])+1)*10)\n # else:\n # finalGrades.append(int(i))\n # else:\n # finalGrades.append(int(i))\n # else:\n # finalGrades.append(int(i))\n \n # return finalGrades\n x=[]\n for i in grades:\n if i<38 or i%5<3:\n x.append(i)\n else:\n x.append((i-(i%5))+5)\n return x\n\n \n\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n grades_count = int(input().strip())\n\n grades = []\n\n for _ in range(grades_count):\n grades_item = int(input().strip())\n grades.append(grades_item)\n\n result = gradingStudents(grades)\n\n fptr.write('\\n'.join(map(str, result)))\n fptr.write('\\n')\n\n fptr.close()\n","sub_path":"GradingStudent.py","file_name":"GradingStudent.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"404301113","text":"from __future__ import unicode_literals\n\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core import urlresolvers\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.db import models\nfrom django.test import TestCase\nfrom django.utils.encoding import python_2_unicode_compatible\n\nfrom ..permalinks import expand, resolve, PermalinkError\n\n\n@python_2_unicode_compatible\nclass TestPermalinkModel(models.Model):\n\n def __str__(self):\n return 'Foo'\n\n def get_absolute_url(self):\n return '/foo/'\n\n\nclass PermalinksTest(TestCase):\n\n def test_resolve(self):\n obj = TestPermalinkModel.objects.create()\n\n url = resolve('/r/{}-{}/'.format(\n ContentType.objects.get_for_model(TestPermalinkModel).pk,\n obj.pk\n ))\n\n self.assertEqual(url, obj)\n\n with self.assertRaises(PermalinkError):\n # A valid URL, but not a permalink.\n resolve('/admin/')\n\n original_urlconf = urlresolvers.get_urlconf()\n with self.assertRaises(ImproperlyConfigured):\n urlresolvers.set_urlconf('cms.tests.urls')\n resolve('/r/')\n\n urlresolvers.set_urlconf(original_urlconf)\n\n def test_expand(self):\n obj = TestPermalinkModel.objects.create()\n\n self.assertEqual(obj.__str__(), 'Foo')\n\n url = expand('/r/{}-{}/'.format(\n ContentType.objects.get_for_model(TestPermalinkModel).pk,\n obj.pk\n ))\n\n self.assertEqual(url, '/foo/')\n","sub_path":"cms/tests/test_permalinks.py","file_name":"test_permalinks.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"605234542","text":"print('=' * 40)\nprint('{:^40}'.format('Troco DojoPuzzle'))\nprint('=' * 40)\nnotamaior = 100\ntotalnotas = 0\nvalorcompra = float(input('Digite o valor da compra: '))\nvalordin = float(input('Digite o valor pago: '))\nif valordin > valorcompra:\n troco = valordin - valorcompra\n print('O Troco é de: ',troco)\n while True:\n if notamaior <= troco:\n totalnotas += 1\n troco -= notamaior\n else:\n if totalnotas > 0:\n print(f'{totalnotas} nota(s) de R${notamaior}')\n if notamaior == 100:\n notamaior = 50\n elif notamaior == 50:\n notamaior = 10\n elif notamaior == 10:\n notamaior = 5\n elif notamaior == 5:\n notamaior = 1\n elif notamaior == 1:\n notamaior = 0.50\n elif notamaior == 0.50:\n notamaior = 0.10\n elif notamaior == 0.10:\n notamaior = 0.05\n elif notamaior == 0.05:\n notamaior = 0.01\n totalnotas = 0\n if notamaior == 0:\n break\n\nelse:\n if valordin == valorcompra:\n print('Não é necessário troco!')\n else:\n print('O valor dado não é suficiente para efetuar a compra!')\n print('Ainda faltam R$',valorcompra - valordin)\n\n\n","sub_path":"caixa.py","file_name":"caixa.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"529117781","text":"import random\n\nimport pygame\nfrom Image import *\n\nclass Dice:\n def __init__(self, x, y, sx, sy, I):\n self.x = x\n self.y = y\n self.click = False\n self.I = I\n self.I = pygame.transform.scale(self.I, (int(sx), int(sy)))\n self.rect = pygame.Rect((x, y), (sx, sy))\n self.srect = pygame.Surface((int(sx), int(sy)))\n self.rI = [ID1, ID2, ID3, ID4, ID5, ID6]\n self.cI = random.choice(self.rI)\n self.DiceRolled = 0\n\n self.vxy = 0\n self.vx = 0\n self.vy = 0\n self.rvx = 0\n self.rvy = 0\n self.vc = 0\n self.rc = 0\n self.rcc = 0\n self.vcc = 0\n self.vcc2 = 0\n\n self.rotate = self.rvx\n self.endrotate = random.randint(0, 360)\n\n def check_click(self, pos):\n if self.rect.collidepoint(pos):\n self.click = True\n pygame.mouse.get_rel()\n self.rc = 1\n\n def vel(self, width, height):\n self.vxy = pygame.mouse.get_rel()\n self.vx = self.vxy[0]\n self.vy = self.vxy[1]\n\n self.vc += 1\n if self.vxy == (0,0):\n self.vc = 0\n if self.rc == 2:\n self.rcc += 1\n if self.rcc == 3:\n self.rc = 3\n self.rcc = 0\n if self.rc == 2 and self.rcc == 2:\n self.rvx = self.vx\n self.rvy = self.vy\n self.vcc = abs(self.rvx) + abs(self.rvy)\n if self.rect.left == 0 or self.rect.right == width:\n self.rvx = -self.rvx\n if self.rect.top == 0 or self.rect.bottom == height:\n self.rvy = -self.rvy\n\n def update(self, screen_rect):\n self.vcc2 = abs(self.rvx) + abs(self.rvy)\n self.rotate = self.rvx\n if self.click:\n self.rect.move_ip(pygame.mouse.get_rel())\n self.rect.clamp_ip(screen_rect)\n if self.rc == 3:\n if self.rvx > 0:\n self.rvx -= 1\n if self.rvx < 0:\n self.rvx += 1\n if self.rvy > 0:\n self.rvy -= 1\n if self.rvy < 0:\n self.rvy += 1\n if self.vcc > 30:\n self.I = random.choice(self.rI)\n self.I = pygame.transform.rotate(self.I, int(self.rotate))\n self.DiceRolled = 1\n if self.DiceRolled == 1 and self.vcc2 < 30:\n self.I = self.cI\n self.I = pygame.transform.rotate(self.I, int(self.endrotate))\n if self.rc == 3 and self.vcc < 30:\n self.rc = 0\n self.vcc = 0\n self.rect.move_ip(self.rvx, self.rvy)\n self.rect.clamp_ip(screen_rect)\n\n def draw(self, surface):\n surface.blit(self.I, (self.rect))\n","sub_path":"Old versions/Opseilen 0.031/Dice.py","file_name":"Dice.py","file_ext":"py","file_size_in_byte":2718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"595315426","text":"\"\"\"\nHouse Price Start!!\nEDA(数据探索)\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom scipy.stats import norm\nfrom scipy import stats\nfrom scipy.special import boxcox1p\n# 数据归一化,有助于提升模型收敛速度,以及提升模型的精度(距离计算中)\nfrom sklearn.preprocessing import StandardScaler\n\nfrom sklearn.linear_model import Lasso, BayesianRidge\nfrom sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor\nfrom sklearn.model_selection import cross_val_score, KFold\nfrom sklearn.metrics import mean_squared_error\nimport xgboost as xgb\nimport math\nfrom xgboost import XGBRegressor\nfrom sklearn.preprocessing import LabelEncoder\n\ntrain = pd.read_csv(\"train.csv\")\ntest = pd.read_csv(\"test.csv\")\n# 显示所有列\npd.set_option('display.max_columns', None)\n# 输出长度\npd.set_option('display.width', 170)\n# print(\"训练集:\", train.shape, \"测试集:\", test.shape)\n# print(train.head())\n# print(train.info())\n# print(train.describe())\n# 数据合并处理\ntrain['source'] = 'train'\ntest['source'] = 'test'\ntrain.drop('Id', axis=1, inplace=True)\ntest.drop('Id', axis=1, inplace=True)\n# print(source.shape)\n\n# 可视化SalePrice情况\n# 从图形分析,这是一种偏离正态分布的情况,有明显的正偏态,显示了尖锐度\nsns.distplot(train['SalePrice'], fit=norm)\n# 偏离正态分布的衡量指标,偏度系数(skewness)峰度系数(kurtosis)\n# 正态为0和3\n# print(train['SalePrice'].skew(), train['SalePrice'].kurt())\n# 使用norm.fit函数得到数据的方差和标准差\n(mu, sigma) = norm.fit(train['SalePrice'])\nplt.legend(['norm dist:($\\mu=${:.2f}and $\\sigma=${:.2f})'.format(mu, sigma)], loc='best')\nplt.ylabel('frequency')\nplt.title('SalePrice distribution')\n# plt.show()\n# 同时画出QQ图,表征target和正态分布函数之间的概率关系\n# 反映是否为正态分布,常见转换方式有z变换,log等\nstats.probplot(train['SalePrice'], plot=plt)\n# plt.show()\n\n# 因为target函数是右偏函数,由于一般的线性模型在正态分布函数上预测的更好,\n# 所以,我们需要对target函数进行转换为标准正态分布\ntrain['SalePrice'] = np.log1p(train['SalePrice'])\n(mu, sigma) = norm.fit(train['SalePrice'])\nsns.distplot(train['SalePrice'], fit=norm)\nplt.legend(['Norm dist:($\\mu=${:.2f}and $\\sigma=${:.2f})'.format(mu, sigma)], loc='best')\n# plt.show()\n# 同时画出QQ图,表征target和正态分布函数之间的概率关系\nstats.probplot(train['SalePrice'], plot=plt)\n# plt.show()\n\n# 下一步在target和feature之间关系进一步探索\n# 为什么用特征GrLivArea进行异常点剔除??\n# 因为该特征比较重要以及官方数据描述中提到这个特征异常值比较大,所有单独继续提取分析\nfig, ax = plt.subplots()\nax.scatter(x=train['GrLivArea'], y=train['SalePrice'])\nplt.xlabel('GrLivArea')\nplt.ylabel('SalePrice')\n# plt.show()\n# 首先剔除一些异常点\n# 使用scatter函数进行散点图绘制\ntrain = train.drop(train[(train['GrLivArea'] > 4000) & (train['SalePrice'] < 13)].index)\nfig, ax = plt.subplots()\nax.scatter(x=train['GrLivArea'], y=train['SalePrice'])\nplt.xlabel('GrLivArea')\nplt.ylabel('SalePrice')\n# plt.show()\n\n# 销售价格单独提出\ny_train = train['SalePrice']\ny_train = train.SalePrice.values\n# 合并两个数据集\ntotal1 = pd.concat([train, test], axis=0, join='outer', ignore_index=True)\ntotal1.drop(['SalePrice'], axis=1, inplace=True)\n\n# 使用sns.heatmap()函数进行相关性分析\n# data.corr() 相关系数矩阵,即任意两个变量之间的相关系数\n# data.corr()[u'好'] 只显示‘好’与其他变量的相关系数\ncorrmat = train.corr()\nplt.subplots(figsize=(12, 9))\nsns.heatmap(corrmat, vmax=0.9, square=True, linecolor='white')\n# plt.show()\n\n# 截取相关系数前十的变量\nk = 10\ncols = corrmat.nlargest(k, 'SalePrice')['SalePrice'].index # 局部运行,一步一步索引\ncm = np.corrcoef(train[cols].values.T)\nplt.subplots(figsize=(12, 9))\nsns.heatmap(cm, vmax=0.9, annot=True, square=True, annot_kws={'size': 10}, xticklabels=cols.values,\n yticklabels=cols.values)\n# plt.show()\n\n# 观察九个相关变量,有些变量可以合并成一个\n# 这样运用PCA主成分分析帮助很大,不用提取前几特征的过程,防止特征太少,之间相关性太高\nsns.set()\ncols = ['SalePrice', 'OverallQual', 'GrLivArea', 'GarageCars', 'TotalBsmtSF', 'FullBath', 'YearBuilt']\nsns.pairplot(train[cols], size=2.5)\n# plt.show() # 加这句code才不会出现多余的上面个一个图形\n# 由图可知,其中六个变量都和'SalePrice'基本呈正相关关系\n\n# Missing data处理,典型的缺失值处理缺失问题,把缺失值、缺失值比例和数据类型都排在一起\nmissing_data = total1.isnull().sum().sort_values(ascending=False)\nmissing_precent = ((total1.isnull().sum()) / (total1.isnull().count())).sort_values(ascending=False)\nmissing_type = total1.dtypes\nmissing_all = pd.concat([missing_data, missing_precent, missing_type], axis=1,\n keys=['missing_data', 'missing_precent', 'missing_type'])\nmissing_all.drop(missing_all[missing_data == 0].index, inplace=True)\nmissing_all.sort_values(by='missing_data', ascending=False)\nprint(missing_all)\n\n# 缺失值处理 首先对进行确认值大于1的特征观察,这个项目不包含我们相关项分析TOP10项目的特征,\n# 相关性低又包含缺失值,即该特征不重要,将其删除\n# 删除太多特征,可是很多特征相关度低,又有缺失值,较难处理\n# 缺失少的(0-100)可以选择邻近等影响不大的\n# 缺失多的分析特征选择合适的方法\n# 最后进行主成分分析(PCA)来进行减少特征\ntotal1.drop(missing_all[missing_data > 1].index, axis=1, inplace=True)\n\n# 重新确认剩下的缺失值\nprint(total1.isnull().sum().sort_values(ascending=False))\n\n# 通过观察feature项的简单使用算法\nprint(total1['BsmtUnfSF'].isnull().sum())\nprint(total1['KitchenQual'].mode()[0])\nprint(total1['SaleType'].value_counts())\nprint(total1['KitchenQual'].unique())\n\n# 剩余missing_value一项一项处理\n# GarageCars,GarageArea进行使用0进行填充\nfor col in ('GarageCars', 'GarageArea'):\n total1[col] = total1[col].fillna(0)\n# BsmtFinSF2 ,BsmtFinSF1,TotalBsmtSF ,都是关于Bathroom的,均使用0填充\nfor col in ('BsmtFinSF2', 'BsmtFinSF1', 'TotalBsmtSF', 'BsmtUnfSF'):\n total1[col] = total1[col].fillna(0)\n# KitchenQual项进行处理,由于TA出现的频率最高,所以使用众数进行填充\ntotal1['KitchenQual'] = total1['KitchenQual'].fillna(total1['KitchenQual'].mode()[0])\n# Electrical同上进行处理,使用众数进行填充\ntotal1['Electrical'] = total1['Electrical'].fillna(total1['Electrical'].mode()[0])\n# Exterior2nd,Exterior1st 项进行处理,采用同样的办法,使用众数进行填充\ntotal1['Exterior2nd'] = total1['Exterior2nd'].fillna(total1['Exterior2nd'].mode()[0])\ntotal1['Exterior1st'] = total1['Exterior1st'].fillna(total1['Exterior1st'].mode()[0])\n# SaleType 项进行处理,采用同样的办法,使用众数进行填充\ntotal1['SaleType'] = total1['SaleType'].fillna(total1['SaleType'].mode()[0])\n# ��上确实值处理完毕\n# 最后检查一遍missing_data\nmissing_data = total1.isnull().sum().sort_values(ascending=False)\nmissing_precent = ((total1.isnull().sum()) / (total1.isnull().count())).sort_values(ascending=False)\nmissing_type = total1.dtypes\nmissing_all = pd.concat([missing_data, missing_precent, missing_type], axis=1,\n keys=['missing_data', 'missing_precent', 'missing_type'])\nprint(missing_all.sort_values(by='missing_data', ascending=False))\n\n# 更多的特征处理more feature processing\n# transforming some numerical variables that are really categorical\n# 'MSSubClass'=The building class,代表销售过程中的房屋的类型,所以将其转化为分类变量\ntotal1['MSSubClass'] = total1['MSSubClass'].apply(str)\n# total1['MSSubClass'].value_counts()\n# 'OverallCond',代表整个房子的打分状况,应该是分类变量\ntotal1['OverallCond'] = total1['OverallCond'].astype(str)\n# 'MoSold '\\'YrSold'是房子出租和出卖的日期,应该是分类变量\ntotal1['MoSold'] = total1['MoSold'].astype(str)\ntotal1['YrSold'] = total1['YrSold'].astype(str)\n\n# Label Encoding some categorical variables that may contain information in their ordering set\n# 将其中的一些分类变量进行Label Encoder编码,再次数据所有的列变量\n# 标签编码LabelEncoder\n# 作用: 利用LabelEncoder() 将转换成连续的数值型变量。即是对不连续的数字或者文本进行编号\ncols = ['BldgType', 'CentralAir', 'Condition1', 'Condition2', 'Electrical', 'ExterCond', 'ExterQual', 'Exterior1st',\n 'Exterior2nd', 'Foundation', 'Heating', 'HeatingQC', 'HouseStyle', 'KitchenQual', 'LandContour', 'LandSlope',\n 'LotConfig', 'LotShape', 'Neighborhood', 'PavedDrive', 'RoofMatl', 'PavedDrive', 'RoofStyle', 'SaleCondition',\n 'SaleType', 'Street']\nle = LabelEncoder()\nfor i in cols:\n le.fit(list(total1[i].values))\n total1[i] = le.transform(list(total1[i].values))\n\n# 添加一个重要变量,因为一个房子的住房面积是对一个房子非常重要的指标\n# Adding total sqfootage feature\ntotal1['TotalSF'] = total1['TotalBsmtSF'] + total1['1stFlrSF'] + total1['2ndFlrSF']\n\n# Log处理偏离特征Log transform skewed features\n# 一些偏正态数据的处理,使用boxcoxlp,处理方式和前面target目标变量基本一致。\n# Skewed features一些偏正态数据处理\nnumer_feat = total1.dtypes[total1.dtypes != 'object'].index\nskewed_feat = total1[numer_feat].apply(lambda x: (x.dropna()).skew())\nskewed_feat = skewed_feat.sort_values(ascending=False)\nskewness = pd.DataFrame({'Skew': skewed_feat})\nskewness.head(10)\n\nskewness = skewness[abs(skewness) > 0.75]\nskewness.dropna()\n# 使用scipy中的boxcol函数,将所有数字性变量进行log(1+x)转换\nskewness = skewness[abs(skewness) > 0.75]\nskewness_feature = skewness.index\nlam = 0.15\nfor i in skewness_feature:\n total1[i] = boxcox1p(total1[i], 0.15)\n\n# 处理分类变量:Handle categories features\n# 最后将所有的Categories进行get_dummies处理进行One-Hot编码\npreditors = ['1stFlrSF', '2ndFlrSF', '3SsnPorch', 'BedroomAbvGr', 'BldgType',\n 'BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF', 'CentralAir', 'Condition1',\n 'Condition2', 'Electrical', 'EnclosedPorch', 'ExterCond', 'ExterQual',\n 'Exterior1st', 'Exterior2nd', 'Fireplaces', 'Foundation', 'FullBath',\n 'GarageArea', 'GarageCars', 'GrLivArea', 'HalfBath', 'Heating',\n 'HeatingQC', 'HouseStyle', 'KitchenAbvGr', 'KitchenQual', 'LandContour',\n 'LandSlope', 'LotArea', 'LotConfig', 'LotShape', 'LowQualFinSF',\n 'MSSubClass', 'MiscVal', 'MoSold', 'Neighborhood', 'OpenPorchSF',\n 'OverallCond', 'OverallQual', 'PavedDrive', 'PoolArea', 'RoofMatl',\n 'RoofStyle', 'SaleCondition', 'SaleType', 'ScreenPorch', 'Street',\n 'TotRmsAbvGrd', 'TotalBsmtSF', 'WoodDeckSF', 'YearBuilt',\n 'YearRemodAdd', 'YrSold', 'TotalSF']\ntemp1 = total1['source']\n# 应该在这部之间把train,test进行分离,在最后一次的get_dummies的处理\n# 将categrories进行get_dummies处理\ntotal1 = pd.get_dummies(total1[preditors])\ntotal1['source'] = temp1\n# total1.shape\n\n# 最后,所有数据预处理完毕,后进行数据train/test进行分离。\ntrain = total1[total1['source'] == 'train']\ntest = total1[total1['source'] == 'test']\ntrain.drop(['source'], axis=1, inplace=True)\ntest.drop(['source'], axis=1, inplace=True)\n# train.shape, test.shape\n\n# 模型预测\n# 首先选择一些基本模型进行单模型数据仿真。选择比较有代表性的线性模型、随机森林、GDBR以及最近比较流行的XGBoost。\n# 模型评估函数选择Kaggle官网指定的评估指标,均方根误差。\nlass = Lasso(alpha=0.1)\nbayes = BayesianRidge(n_iter=300, tol=0.001, alpha_1=1e-06, alpha_2=1e-06, lambda_1=1e-06)\nregr = RandomForestRegressor(max_depth=2)\ngbr = GradientBoostingRegressor(loss='ls', learning_rate=0.1, n_estimators=100, subsample=1.0, min_samples_split=2,\n min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_depth=3, alpha=0.9, )\nmodel_xgb = xgb.XGBRegressor(colsample_bytree=0.4603, gamma=0.0468, learning_rate=0.05, max_depth=5,\n min_child_weight=1, n_estimators=2200, reg_alpha=0.4640, reg_lambda=0.8571,\n subsample=0.5213, silent=False, nthread=-1)\n\n# 将各模型训练train\nlass.fit(train, y_train)\nLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,\n normalize=False, positive=False, precompute=False, random_state=None,\n selection='cyclic', tol=0.0001, warm_start=False)\nbayes.fit(train, y_train)\nBayesianRidge(alpha_1=1e-06, alpha_2=1e-06, compute_score=False, copy_X=True,\n fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06, n_iter=300,\n normalize=False, tol=0.001, verbose=False)\nregr.fit(train, y_train)\nRandomForestRegressor(bootstrap=True, criterion='mse', max_depth=2,\n max_features='auto', max_leaf_nodes=None,\n min_impurity_split=1e-07, min_samples_leaf=1,\n min_samples_split=2, min_weight_fraction_leaf=0.0,\n n_estimators=10, n_jobs=1, oob_score=False,\n verbose=0, warm_start=False)\ngbr.fit(train, y_train)\nGradientBoostingRegressor(alpha=0.9, criterion='friedman_mse', init=None,\n learning_rate=0.1, loss='ls', max_depth=3, max_features=None,\n max_leaf_nodes=None, min_impurity_split=1e-07,\n min_samples_leaf=1, min_samples_split=2,\n min_weight_fraction_leaf=0.0, n_estimators=100,\n presort='auto', random_state=None, subsample=1.0, verbose=0,\n warm_start=False)\nmodel_xgb.fit(train, y_train)\nXGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,\n colsample_bytree=0.4603, gamma=0.0468, learning_rate=0.05,\n max_delta_step=0, max_depth=3, min_child_weight=1,\n missing=None, n_estimators=2200, n_jobs=1, nthread=-1,\n objective='reg:linear', reg_alpha=0.464,\n reg_lambda=0.8571, scale_pos_weight=1, silent=False,\n subsample=0.5213)\n\n# 预测train的预测结果,并计算得分\nlass_predict = lass.predict(train)\nbayes_predict = bayes.predict(train)\nregr_predict = regr.predict(train)\ngbr_predict = gbr.predict(train)\nmodel_xgb_predict = model_xgb.predict(train)\n\n# cross_val_score交叉验证\ncv_score_lass = cross_val_score(lass, train, lass_predict, scoring='neg_mean_squared_error', cv=5)\ncv_score_bayes = cross_val_score(bayes, train, bayes_predict, scoring='neg_mean_squared_error', cv=5)\ncv_score_regr = cross_val_score(regr, train, regr_predict, scoring='neg_mean_squared_error', cv=5)\ncv_score_gbr = cross_val_score(gbr, train, gbr_predict, scoring='neg_mean_squared_error', cv=5)\ncv_score_model_xgb = cross_val_score(model_xgb, train, model_xgb_predict, scoring='neg_mean_squared_error', cv=5)\n\nlass_score = np.min(cv_score_lass), np.max(cv_score_lass), np.std(cv_score_lass), np.mean(cv_score_lass)\nbayes_score = np.min(cv_score_bayes), np.max(cv_score_bayes), np.std(cv_score_bayes), np.mean(cv_score_bayes)\nregr_score = np.min(cv_score_regr), np.max(cv_score_regr), np.std(cv_score_regr), np.mean(cv_score_regr)\ngbr_score = np.min(cv_score_gbr), np.max(cv_score_gbr), np.std(cv_score_gbr), np.mean(cv_score_gbr)\nxgb_score = np.min(cv_score_model_xgb), np.max(cv_score_model_xgb), np.std(cv_score_model_xgb), np.mean(\n cv_score_model_xgb)\n\ntotal_score = pd.DataFrame(\n {'la_score': lass_score, 'bayes_score': bayes_score, 're_score': regr_score, 'gbr_score': gbr_score,\n 'xgb_score': xgb_score}, index=['min', 'max', 'std', 'mean'])\nprint(total_score)\n\n# 将模型预测结果提交Kaggle看得分\nlass_test_predict = lass.predict(test)\nbayes_test_predict = bayes.predict(test)\nregr_test_predict = regr.predict(test)\ngbr_test_predict = gbr.predict(test)\nmodel_xgb_test_predict = model_xgb.predict(test)\ntest_org = pd.read_csv('test.csv')\n\n# Lasso输出\nsub = pd.DataFrame()\nsub['Id'] = test_org['Id']\nsub['SalePrice'] = lass_test_predict\nsub['SalePrice'] = sub['SalePrice'].apply(lambda x: math.exp(x) - 1)\nsub.to_csv('lass_test_predict.csv', index=False)\n# bayes输出\nsub = pd.DataFrame()\nsub['Id'] = test_org['Id']\nsub['SalePrice'] = bayes_test_predict\nsub['SalePrice'] = sub['SalePrice'].apply(lambda x: math.exp(x) - 1)\nsub.to_csv('bayes_test_predict.csv', index=False)\n# 随机森林输出\nsub = pd.DataFrame()\nsub['Id'] = test_org['Id']\nsub['SalePrice'] = regr_test_predict\nsub['SalePrice'] = sub['SalePrice'].apply(lambda x: math.exp(x) - 1)\nsub.to_csv('regr_test_predict.csv', index=False)\n# GBTR输出\nsub = pd.DataFrame()\nsub['Id'] = test_org['Id']\nsub['SalePrice'] = gbr_test_predict\nsub['SalePrice'] = sub['SalePrice'].apply(lambda x: math.exp(x) - 1)\nsub.to_csv('gbr_test_predict3.csv', index=False)\n# Xgboost输出\nsub = pd.DataFrame()\nsub['Id'] = test_org['Id']\nsub['SalePrice'] = model_xgb_test_predict\nsub['SalePrice'] = sub['SalePrice'].apply(lambda x: math.exp(x) - 1)\nsub.to_csv('model_xgb_test_predict.csv', index=False)\n","sub_path":"housePrice.py","file_name":"housePrice.py","file_ext":"py","file_size_in_byte":17479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"217413039","text":"import os\n\nfrom django.core.management.base import BaseCommand\nfrom django.conf import settings\n\nfrom core import client\nfrom core import models\n\n\nbing = settings.ENDPOINTS['bing_image']\n\n\nclass Command(BaseCommand):\n def handle(self, *args, **options):\n randoms = models.Char.objects.order_by('?')[:100]\n untouch = models.Char.objects.order_by('updated_at')[:100]\n blanks = models.Char.objects.filter(bust=0).order_by('?')[:100]\n\n upsert(randoms)\n upsert(untouch)\n upsert(blanks)\n\n\ndef upsert(qs):\n for obj in qs:\n query = bing['char_query'].replace('[[[query]]]', obj.name)\n\n js = client.json(query, auth=bing['auth'])\n if js and js['d'] and len(js['d']['results']) < 1:\n continue\n try:\n infos = sorted(js['d']['results'], key=lambda x: -int(x['Height']))\n except TypeError:\n continue\n\n info = infos[0]\n dt, _ = models.CharThumb.objects.get_or_create(assoc=obj)\n\n dt.src = info['MediaUrl']\n dt.name = info['Title']\n dt.width = info['Width']\n dt.height = info['Height']\n dt.mime = info['ContentType']\n _, dt.ext = os.path.splitext(dt.src)\n dt.save()\n","sub_path":"scrape/char/management/commands/bing_image_char.py","file_name":"bing_image_char.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"547026646","text":"import asyncio\n\nimport openai\nfrom wandb.integration.openai import autolog as openai_autolog\n\n\ndef main():\n openai_autolog(init=dict(project=\"openai_logging\"))\n request_kwargs = dict(\n model=\"gpt-3.5-turbo\",\n messages=[\n {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n {\"role\": \"user\", \"content\": \"Who won the world series in 2020?\"},\n {\n \"role\": \"assistant\",\n \"content\": \"The Los Angeles Dodgers won the World Series in 2020.\",\n },\n {\"role\": \"user\", \"content\": \"Where was it played?\"},\n ],\n )\n\n _ = asyncio.run(openai.ChatCompletion.acreate(**request_kwargs))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"tests/functional_tests/t0_main/openai/t8_openai_chat_completion_async.py","file_name":"t8_openai_chat_completion_async.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"195040165","text":"from utilities import file_reader\n\n\ndef major_el(arr):\n best_count = 0\n best = None\n elements = set(arr)\n for el in elements:\n count = arr.count(el)\n if count > best_count:\n best = el\n best_count = count\n if best_count < len(arr) / 2:\n best = -1\n return best\n\n\nif __name__ == \"__main__\":\n input_list = file_reader(\"rosalind_maj.txt\")\n k, n = list(map(int, input_list[0].split()))\n results = []\n assert len(input_list[1:]) == k\n for i in range(1, k+1):\n array = list(map(int, input_list[i].split()))\n result = major_el(array)\n results.append(result)\n print(\" \".join(list(map(str, results))))","sub_path":"homework3/maj.py","file_name":"maj.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"615624785","text":"import random\nprint('ask your question')\ninput()\nmessages = ['it is certain',\n 'it is decidedly so',\n 'yes',\n 'reply hazy, try again',\n 'ask again later',\n 'concentrate and ask again',\n 'my reply is no',\n 'it does not look good',\n 'very doubtful']\nprint(messages[random.randint(0, len(messages) - 1)])\n","sub_path":"magic8ball.py","file_name":"magic8ball.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"119170557","text":"from __future__ import absolute_import, division, print_function\n\nimport os\n\nfrom scipy import constants\n\nfrom vivarium.compartment.process import Process\nfrom vivarium.compartment.composition import (\n simulate_process_with_environment,\n plot_simulation_output,\n flatten_timeseries,\n save_timeseries,\n load_timeseries,\n REFERENCE_DATA_DIR,\n TEST_OUT_DIR,\n assert_timeseries_close,\n)\nfrom vivarium.utils.kinetic_rate_laws import KineticFluxModel\nfrom vivarium.utils.dict_utils import tuplify_port_dicts\nfrom vivarium.utils.units import units\n\nEMPTY_ROLES = {\n 'internal': [],\n 'external': []}\n\nEMPTY_STATES = {\n 'internal': {},\n 'external': {}}\nNAME = 'convenience_kinetics'\n\n\nclass ConvenienceKinetics(Process):\n\n def __init__(self, initial_parameters={}):\n self.nAvogadro = constants.N_A * 1 / units.mol\n\n # retrieve initial parameters\n self.reactions = initial_parameters.get('reactions', {})\n self.initial_state = initial_parameters.get('initial_state', EMPTY_STATES)\n kinetic_parameters = initial_parameters.get('kinetic_parameters', {})\n ports = initial_parameters.get('ports', EMPTY_ROLES)\n\n # make the kinetic model\n self.kinetic_rate_laws = KineticFluxModel(self.reactions, kinetic_parameters)\n\n # ports\n # fluxes port is used to pass constraints\n # exchange is equivalent to external, for lattice_compartment\n ports.update({\n 'fluxes': self.kinetic_rate_laws.reaction_ids,\n 'exchange': ports['external'],\n 'global': ['mmol_to_counts']})\n\n # parameters\n parameters = {}\n parameters.update(initial_parameters)\n\n super(ConvenienceKinetics, self).__init__(ports, parameters)\n\n def default_settings(self):\n\n # default state\n default_state = self.initial_state\n\n # default emitter keys\n emit_ports = ['internal', 'external']\n default_emitter_keys = {\n port: state_list\n for port, state_list in self.ports.items() if port in emit_ports}\n\n # schema\n schema = {\n 'fluxes': {\n flux_id : {\n 'updater': 'set'}\n for flux_id in self.kinetic_rate_laws.reaction_ids}}\n\n default_settings = {\n 'process_id': 'convenience_kinetics',\n 'state': default_state,\n 'emitter_keys': default_emitter_keys,\n 'schema': schema,\n 'time_step': 1.0}\n\n return default_settings\n\n def next_update(self, timestep, states):\n\n # get mmol_to_counts for converting flux to exchange counts\n mmol_to_counts = states['global']['mmol_to_counts'] * units.L / units.mmol\n\n # kinetic rate law requires a flat dict with ('port', 'state') keys.\n flattened_states = tuplify_port_dicts(states)\n\n # get flux\n fluxes = self.kinetic_rate_laws.get_fluxes(flattened_states)\n\n # make the update\n # add fluxes to update\n update = {port: {} for port in self.ports.keys()}\n update.update({'fluxes': fluxes})\n\n # get exchange\n for reaction_id, flux in fluxes.items():\n stoichiometry = self.reactions[reaction_id]['stoichiometry']\n for port_state_id, coeff in stoichiometry.items():\n for port_id, state_list in self.ports.items():\n # separate the state_id and port_id\n if port_id in port_state_id:\n state_id = port_state_id[1]\n state_flux = coeff * flux * timestep\n\n if port_id == 'external':\n # convert exchange fluxes to counts with mmol_to_counts\n # TODO -- use deriver to get exchanges\n delta_counts = int((state_flux * mmol_to_counts).magnitude)\n update['exchange'][state_id] = (\n update['exchange'].get(state_id, 0)\n + delta_counts\n )\n else:\n update[port_id][state_id] = (\n update[port_id].get(state_id, 0)\n + state_flux\n )\n\n # note: external and internal ports update change in mmol.\n return update\n\n\n\n# functions\ndef get_glc_lct_config():\n \"\"\"\n Convenience kinetics configuration for simplified glucose and lactose transport.\n Glucose updake simplifies the PTS/GalP system to a single uptake kinetic\n with glc__D_e_external as the only cofactor.\n \"\"\"\n transport_reactions = {\n 'EX_glc__D_e': {\n 'stoichiometry': {\n ('internal', 'g6p_c'): 1.0,\n ('external', 'glc__D_e'): -1.0,\n ('internal', 'pep_c'): -1.0, # TODO -- PEP requires homeostasis mechanism to avoid depletion\n ('internal', 'pyr_c'): 1.0},\n 'is reversible': False,\n 'catalyzed by': [('internal', 'PTSG')]},\n 'EX_lac__D_e': {\n 'stoichiometry': {\n ('external', 'lac__D_e'): -1.0,\n ('external', 'h_e'): -1.0,\n ('internal', 'lac__D_c'): 1.0,\n ('internal', 'h_c'): 1.0},\n 'is reversible': False,\n 'catalyzed by': [('internal', 'LacY')]}}\n\n transport_kinetics = {\n 'EX_glc__D_e': {\n ('internal', 'PTSG'): {\n # k_m for external [glc__D_e]\n ('external', 'glc__D_e'): 1e-1,\n # Set k_m = None to make a reactant non-limiting\n ('internal', 'pep_c'): None,\n 'kcat_f': 3e5}}, # kcat for the forward direction\n 'EX_lac__D_e': {\n ('internal', 'LacY'): {\n ('external', 'lac__D_e'): 1e-1,\n ('external', 'h_e'): None,\n 'kcat_f': 5e4}}}\n\n transport_initial_state = {\n 'internal': {\n 'PTSG': 1.8e-6, # concentration (mmol/L)\n 'g6p_c': 0.0,\n 'pep_c': 1.8e-1,\n 'pyr_c': 0.0,\n 'LacY': 0.0,\n 'lac__D_c': 0.0,\n 'h_c': 100.0},\n 'external': {\n 'glc__D_e': 12.0,\n 'lac__D_e': 10.0,\n 'h_e': 100.0},\n 'fluxes': { # TODO -- is this needed?\n 'EX_glc__D_e': 0.0,\n 'EX_lac__D_e': 0.0}}\n\n transport_ports = {\n 'internal': [\n 'g6p_c', 'pep_c', 'pyr_c', 'h_c', 'PTSG', 'LacY'],\n 'external': [\n 'glc__D_e', 'lac__D_e', 'h_e']}\n\n return {\n 'reactions': transport_reactions,\n 'kinetic_parameters': transport_kinetics,\n 'initial_state': transport_initial_state,\n 'ports': transport_ports}\n\ndef get_toy_config():\n toy_reactions = {\n 'reaction1': {\n 'stoichiometry': {\n ('internal', 'A'): 1,\n ('external', 'B'): -1},\n 'is reversible': False,\n 'catalyzed by': [('internal', 'enzyme1')]}}\n\n toy_kinetics = {\n 'reaction1': {\n ('internal', 'enzyme1'): {\n ('external', 'B'): 0.2,\n 'kcat_f': 5e1}}}\n\n toy_ports = {\n 'internal': ['A', 'enzyme1'],\n 'external': ['B']}\n\n toy_initial_state = {\n 'internal': {\n 'A': 1.0,\n 'enzyme1': 1e-1},\n 'external': {\n 'B': 10.0},\n 'fluxes': {\n 'reaction1': 0.0}}\n\n return {\n 'reactions': toy_reactions,\n 'kinetic_parameters': toy_kinetics,\n 'initial_state': toy_initial_state,\n 'ports': toy_ports}\n\n\ndef test_convenience_kinetics(end_time=1000):\n config = get_glc_lct_config()\n kinetic_process = ConvenienceKinetics(config)\n\n settings = {\n 'environment_port': 'external',\n 'exchange_port': 'exchange',\n 'environment_volume': 1e-13, # L\n 'timestep': 1,\n 'total_time': end_time}\n\n saved_state = simulate_process_with_environment(kinetic_process, settings)\n return saved_state\n\n\ndef test_convenience_kinetics_correlated_to_reference():\n timeseries = test_convenience_kinetics()\n flattened = flatten_timeseries(timeseries)\n reference_timeseries = load_timeseries(\n os.path.join(REFERENCE_DATA_DIR, NAME + '.csv'))\n assert_timeseries_close(flattened, reference_timeseries)\n\n\nif __name__ == '__main__':\n out_dir = os.path.join(TEST_OUT_DIR, NAME)\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n plot_settings = {}\n\n timeseries = test_convenience_kinetics()\n plot_simulation_output(timeseries, plot_settings, out_dir)\n save_timeseries(timeseries, out_dir)\n","sub_path":"vivarium/processes/convenience_kinetics.py","file_name":"convenience_kinetics.py","file_ext":"py","file_size_in_byte":8766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"407958646","text":"from flask_sqlalchemy import SQLAlchemy\n\nfrom backend.mixins import OutputMixin\n\n\ndb = SQLAlchemy()\n\n\nclass Category(db.Model, OutputMixin):\n __tablename__ = 'product_category'\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(\n db.String(1000),\n nullable=False,\n unique=True\n )\n products = db.relationship('Product', backref='category', lazy=True)\n\n def __str__(self):\n return self.name\n\n\nclass Type(db.Model, OutputMixin):\n __tablename__ = 'product_type'\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(\n db.String(1000),\n nullable=False,\n unique=True\n )\n products = db.relationship('Product', backref='type', lazy=True)\n\n def __str__(self):\n return self.name\n\n\nclass Product(db.Model, OutputMixin):\n __tablename__ = 'product'\n\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(\n db.String(1000),\n nullable=False,\n unique=True\n )\n sku = db.Column(\n db.String(50),\n nullable=False,\n unique=True\n )\n stocks = db.Column(db.Integer, default=0)\n stock_reserve = db.Column(db.Integer, default=0)\n category_id = db.Column(\n db.Integer,\n db.ForeignKey('product_category.id'),\n nullable=False\n )\n type_id = db.Column(\n db.Integer,\n db.ForeignKey('product_type.id'),\n nullable=False\n )\n","sub_path":"backend/store/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"579097105","text":"# Network.py\n# (C) Jeff Orchard, 2019\n\nimport numpy as np\nfrom copy import deepcopy\n\n\n#============================================================\n#\n# Untility functions\n#\n#============================================================\n# Supplied functions\n\ndef OneHot(z):\n '''\n y = OneHot(z)\n\n Applies the one-hot function to the vectors in z.\n Example:\n OneHot([[0.9, 0.1], [-0.5, 0.1]])\n returns np.array([[1,0],[0,1]])\n\n Input:\n z is a 2D array of samples\n\n Output:\n y is an array the same shape as z\n '''\n y = []\n # Locate the max of each row\n for zz in z:\n idx = np.argmax(zz)\n b = np.zeros_like(zz)\n b[idx] = 1.\n y.append(b)\n y = np.array(y)\n return y\n\ndef Shuffle(inputs, targets):\n '''\n s_inputs, s_targets = Shuffle(inputs, targets)\n\n Randomly shuffles the dataset.\n\n Inputs:\n inputs array of inputs\n targets array of corresponding targets\n\n Outputs:\n s_inputs shuffled array of inputs\n s_targets corresponding shuffled array of targets\n '''\n data = list(zip(inputs,targets))\n np.random.shuffle(data)\n s_inputs, s_targets = zip(*data)\n return np.array(s_inputs), np.array(s_targets)\n\ndef MakeBatches(data_in, data_out, batch_size=10, shuffle=True):\n '''\n batches = MakeBatches(data_in, data_out, batch_size=10)\n\n Breaks up the dataset into batches of size batch_size.\n\n Inputs:\n data_in is a list of inputs\n data_out is a list of outputs\n batch_size is the number of samples in each batch\n shuffle shuffle samples first (True)\n\n Output:\n batches is a list containing batches, where each batch is:\n [in_batch, out_batch]\n\n Note: The last batch might be incomplete (smaller than batch_size).\n '''\n N = len(data_in)\n r = range(N)\n if shuffle:\n r = np.random.permutation(N)\n batches = []\n for k in range(0, N, batch_size):\n if k+batch_size<=N:\n din = data_in[r[k:k+batch_size]]\n dout = data_out[r[k:k+batch_size]]\n else:\n din = data_in[r[k:]]\n dout = data_out[r[k:]]\n if isinstance(din, (list, tuple)):\n batches.append( [np.stack(din, dim=0) , np.stack(dout, dim=0)] )\n else:\n batches.append( [din , dout] )\n\n return batches\n\n\n# Cost Functions--------------------------\ndef CrossEntropy(y, t):\n '''\n E = CrossEntropy(y, t)\n\n Evaluates the mean cross entropy loss between outputs y and targets t.\n\n Inputs:\n y is an array holding the network outputs\n t is an array holding the corresponding targets\n\n Outputs:\n E is the mean CE\n '''\n #====== REMOVE ABOVE IF YOU DON'T PLAN TO USE THE SOLUTIONS ======\n # [1] Cross entropy formula\n E = -np.sum(t*np.log(y) + (1.-t)*np.log(1.-y))\n return E / len(t)\n\ndef gradCrossEntropy(y, t):\n '''\n E = gradCrossEntropy(y, t)\n\n Given targets t, evaluates the gradient of the mean cross entropy loss\n with respect to the output y.\n\n Inputs:\n y is the array holding the network's output\n t is an array holding the corresponding targets\n\n Outputs:\n dEdy is the gradient of CE with respect to output y\n '''\n # [1] Compute the gradient of CE w.r.t. output\n dEdy = ( y - t ) / y / (1.-y)\n return dEdy / len(t)\n\ndef MSE(y, t):\n '''\n E = MSE(y, t)\n\n Evaluates the mean squared error loss between outputs y and targets t.\n\n Inputs:\n y is the array holding the network's output\n t is an array holding the corresponding targets\n\n Outputs:\n E is the MSE\n '''\n # [1] MSE formula\n E = np.sum((y-t)**2)/2./len(t)\n return E\n\ndef gradMSE(y, t):\n '''\n E = gradMSE(y, t)\n\n Given targets t, evaluates the gradient of the mean squared error loss\n with respect to the output y.\n\n Inputs:\n y is the array holding the network's output\n t is an array holding the corresponding targets\n\n Outputs:\n dEdy is the gradient of MSE with respect to output y\n '''\n # [1] Compute the gradient of MSE w.r.t. output\n return ( y - t ) / len(t)\n\ndef CategoricalCE(y, t):\n return -np.sum(t * np.log(y)) / len(t)\n\ndef gradCategoricalCE(y, t):\n return ( y - t ) / len(t)\n\n\n#==================================================\n#\n# Layer Class\n#\n#==================================================\nclass Layer():\n\n def __init__(self, n_nodes=0, act='logistic'):\n '''\n lyr = Layer(n_nodes, act='logistic')\n\n Creates a layer object.\n\n Inputs:\n n_nodes the number of nodes in the layer\n act specifies the activation function\n Use 'logistic' or 'identity'\n '''\n self.N = n_nodes # number of nodes in this layer\n self.h = [] # node activities\n self.z = []\n self.b = np.zeros(self.N) # biases\n self.SetActivationFunction(act)\n\n\n def SetActivationFunction(self, act):\n if act=='identity':\n self.act_text = 'identity'\n self.sigma = self.Identity\n self.sigma_p = self.Identity_p\n elif act=='softmax':\n self.act_text = 'softmax'\n self.sigma = self.Softmax\n self.sigma_p = None\n elif act=='logistic':\n self.act_text = 'logistic'\n self.sigma = self.Logistic\n self.sigma_p = self.Logistic_p\n else:\n print('Error: Activation function '+act+' not implemented!')\n self.act_text = ''\n\n\n def Save(self, fp):\n np.save(fp, self.N)\n np.save(fp, self.act_text)\n np.save(fp, self.b)\n\n def Load(self, fp):\n self.N = np.asscalar( np.load(fp) )\n act_text = str( np.load(fp) )\n self.b = np.array( np.load(fp) )\n self.SetActivationFunction(act_text)\n\n\n def Logistic(self):\n return 1. / (1. + np.exp(-self.z))\n def Logistic_p(self):\n return self.h * (1.-self.h)\n def Identity(self):\n return self.z\n def Identity_p(self):\n return np.ones_like(self.h)\n def Softmax(self):\n v = np.exp(self.z)\n s = np.sum(v, axis=1)\n return v/np.tile(s[:,np.newaxis], [1,np.shape(v)[1]])\n\n\n\n#==================================================\n#\n# Network Class\n#\n#==================================================\nclass Network():\n\n def __init__(self, sizes, type='classifier'):\n '''\n net = Network(sizes, type='classifier')\n\n Creates a Network and saves it in the variable 'net'.\n\n Inputs:\n sizes is a list of integers specifying the number\n of nodes in each layer\n eg. [5, 20, 3] will create a 3-layer network\n with 5 input, 20 hidden, and 3 output nodes\n type can be either 'Bernoulli', 'classifier' or 'regression',\n and sets the activation function on the output layer,\n as well as the loss function.\n 'Bernoulli': logistic, cross entropy\n 'classifier': softmax, categorical cross entropy\n 'regression': linear, mean squared error\n '''\n self.n_layers = 0 #len(sizes)\n self.lyr = [] # a list of Layers\n self.W = [] # Weight matrices, indexed by the layer below it\n\n self.type = type # 'Bernoulli', 'classifier', 'regression'\n self.output_activation = None\n self.SetCostFunction()\n\n self.cost_history = [] # keeps track of the cost as learning progresses\n\n\n # Create and add Layers (using logistic for hidden layers)\n for n in sizes[:-1]:\n self.AddLayer( Layer(n) )\n # For the top layer, we use the appropriate activtaion function\n self.AddLayer( Layer(sizes[-1], act=self.output_activation) )\n\n\n def AddLayer(self, layer):\n '''\n net.AddLayer(layer)\n\n Adds the layer object to the network and connects it to the preceding layer.\n\n Inputs:\n layer is a layer object\n '''\n self.lyr.append(layer)\n self.n_layers += 1\n # If this isn't our first layer, add connection weights\n if self.n_layers>=2:\n m = self.lyr[-1].N\n n = self.lyr[-2].N\n temp = np.random.normal(size=[n,m])/np.sqrt(n)\n self.W.append(temp)\n\n\n def SetCostFunction(self):\n if self.type=='Bernoulli':\n self.Loss = CrossEntropy\n self.gradLoss = gradCrossEntropy\n self.output_activation = 'logistic'\n elif self.type=='classifier':\n self.Loss = CategoricalCE\n self.gradLoss = gradCategoricalCE\n self.output_activation = 'logistic'\n elif self.type=='regression':\n self.Loss = MSE\n self.gradLoss = gradMSE\n self.output_activation = 'identity'\n else:\n self.Loss = None\n self.gradLoss = None\n self.output_activation = 'logistic'\n print('Error: Network type '+self.type+' not implemented!')\n\n def Save(self, fname):\n '''\n net.Save(fname)\n\n Saves the Network object to a file.\n\n Input:\n fname is a string filename. Should probably use the extension \".npy\".\n '''\n fp = open(fname, 'wb')\n np.save(fp, self.n_layers)\n np.save(fp, self.type)\n np.save(fp, self.output_activation)\n for l in self.lyr:\n l.Save(fp)\n for w in self.W:\n np.save(fp, w)\n fp.close()\n\n @classmethod\n def Load(cls, fname):\n '''\n net.Load(fname)\n\n Load a Network object from a file. The object needs to be created already,\n but Load will alter it. For example,\n\n >> net = Network.Network()\n >> net.Load('savednet.npy')\n\n Input:\n fname is a string filename\n '''\n fp = open(fname, 'rb')\n n_layers = np.asscalar( np.load(fp) ) # self.n_layers is incremented as we call AddLayer\n net = cls([1,1])\n net.lyr = []\n net.n_layers = 0\n net.type = str( np.load(fp) )\n net.SetCostFunction()\n net.output_activation = str( np.load(fp) )\n # Load layers, one at a time\n for k in range(n_layers):\n l = Layer()\n l.Load(fp)\n net.AddLayer(l)\n # Load weight matrices, one at a time\n net.W = []\n for k in range(n_layers-1):\n w = np.array( np.load(fp) )\n net.W.append(w)\n fp.close()\n return net\n\n\n def FeedForwardFrom(self, idx, h):\n '''\n y = net.FeedForwardFrom(idx, h)\n\n Sets the state of layer idx to h, and then performs a FeedForward\n pass from that layer to the output layer.\n\n Inputs:\n idx index of layer to set\n h array holding a batch of hidden states\n\n Output:\n y array of outputs corresponding to the hidden states\n '''\n self.lyr[idx].h = h[:]\n for pre,post,W in zip(self.lyr[idx:-1], self.lyr[idx+1:], self.W[idx:]):\n # [1] Calc. (and record) input current to next layer\n post.z = pre.h @ W + post.b\n\n # [1] Use activation function to get activities\n post.h = post.sigma()\n\n # Return activity of output layer\n return self.lyr[-1].h\n\n\n def FeedForward(self, x):\n '''\n y = net.FeedForward(x)\n\n Runs the network forward, starting with x as input.\n Returns the activity of the output layer.\n\n All node use\n Note: The activation function used for the output layer\n depends on what self.Loss is set to.\n '''\n x = np.array(x) # Convert input to array, in case it's not\n\n self.lyr[0].h = x # [1] Set input layer\n\n # Loop over connections...\n for pre,post,W in zip(self.lyr[:-1], self.lyr[1:], self.W):\n\n # [1] Calc. (and record) input current to next layer\n post.z = pre.h @ W + post.b\n\n # [1] Use activation function to get activities\n post.h = post.sigma()\n\n # Return activity of output layer\n return self.lyr[-1].h\n\n def TopGradient(self, t):\n '''\n dEdz = net.TopGradient(targets)\n\n Computes and returns the gradient of the cost with respect to the input current\n to the output nodes.\n\n Inputs:\n targets is a batch of targets corresponding to the last FeedForward run\n\n Outputs:\n dEdz is a batch of gradient vectors corresponding to the output nodes\n '''\n if self.type=='classifier':\n return ( self.lyr[-1].h - t ) / len(t)\n elif self.type=='regression':\n return ( self.lyr[-1].h - t ) / len(t)\n elif self.type=='Bernoulli':\n return ( self.lyr[-1].h - t ) / len(t)\n return self.gradLoss(self.lyr[-1].h, t) * self.lyr[-1].sigma_p() / len(t)\n\n def BackProp(self, t, lrate=0.05):\n '''\n net.BackProp(targets, lrate=0.05)\n\n Given the current network state and targets t, updates the connection\n weights and biases using the backpropagation algorithm.\n\n Inputs:\n t an array of targets (number of samples must match the\n network's output)\n lrate learning rate\n '''\n t = np.array(t) # convert t to an array, in case it's not\n\n # Error gradient for top layer\n dEdz = self.TopGradient(t)\n\n # Loop down through the layers\n # Start second-from-the-top, and go down to layer 0\n for i in range(self.n_layers-2, -1, -1):\n pre = self.lyr[i]\n\n # Gradient w.r.t. weights\n dEdW = pre.h.T @ dEdz\n\n # Gradient w.r.t. biases\n dEdb = np.sum(dEdz, axis=0)\n\n # Use Sigma'\n # Project error gradient down to layer below\n dEdz = ( dEdz @ self.W[i].T ) * pre.sigma_p()\n\n # Update weights and biases\n self.W[i] -= lrate*dEdW\n self.lyr[i+1].b -= lrate*dEdb\n\n def Learn(self, inputs, targets, lrate=0.05, epochs=1, progress=True):\n '''\n Network.Learn(data, lrate=0.05, epochs=1, progress=True)\n\n Run through the dataset 'epochs' number of times, incrementing the\n network weights after each epoch.\n\n Inputs:\n data is a list of 2 arrays, one for inputs, and one for targets\n lrate is the learning rate (try 0.001 to 0.5)\n epochs is the number of times to go through the training data\n progress (Boolean) indicates whether to show cost\n '''\n try: Learn\n except NameError:\n\n #========= YOUR IMPLEMENTATION BELOW =========\n\n # [1] Perform multiple epochs\n for k in range(epochs):\n\n self.FeedForward(inputs) # [1] FeedForward pass\n self.BackProp(targets, lrate=lrate) # [1] BackProp pass\n\n # [1] Record cost after each epoch if progress=True\n if progress:\n Error = self.Loss(self.lyr[-1].h, targets)\n self.cost_history.append(Error)\n if np.mod(k, 20)==0:\n print('Epoch '+str(k)+': Cost = '+str(Error))\n\n #========= YOUR IMPLEMENTATION ABOVE =========\n\n else:\n Learn(self, inputs, targets, lrate=lrate, epochs=epochs, progress=progress)\n\n def SGD(self, inputs, targets, lrate=0.05, epochs=1, batch_size=10):\n '''\n progress = net.SGD(inputs, targets, lrate=0.05, epochs=1, batch_size=10)\n\n Performs Stochastic Gradient Descent on the network.\n Run through the dataset in batches 'epochs' number of times, incrementing the\n network weights after each batch. For each epoch, it shuffles the dataset.\n\n Inputs:\n inputs is an array of input samples\n targets is a corresponding array of targets\n lrate is the learning rate (try 0.001 to 5)\n epochs is the number of times to go through the training data\n batch_size is the number of samples for each batch\n\n Outputs:\n progress is an (epochs)x2 array with epoch in the first column, and\n cost in the second column\n '''\n loss_history = []\n for k in range(epochs):\n batches = MakeBatches(inputs, targets, batch_size=batch_size, shuffle=True)\n for mini_batch in batches:\n self.FeedForward(mini_batch[0])\n self.BackProp(mini_batch[1], lrate=lrate)\n\n loss_history.append([k, self.Evaluate(inputs, targets)])\n print('Epoch '+str(k)+': cost '+str(loss_history[-1]))\n\n return np.array(loss_history)\n\n def Evaluate(self, inputs, targets):\n '''\n E = net.Evaluate(data)\n\n Computes the average loss over the supplied dataset.\n\n Inputs\n inputs is an array of inputs\n targets is a list of corresponding targets\n\n Outputs\n E is a scalar, the average loss\n '''\n y = self.FeedForward(inputs)\n return self.Loss(y, targets)\n\n def ClassificationAccuracy(self, inputs, targets):\n '''\n a = net.ClassificationAccuracy(data)\n\n Returns the fraction (between 0 and 1) of correct one-hot classifications\n in the dataset.\n '''\n y = self.FeedForward(inputs)\n yb = OneHot(y)\n n_incorrect = np.sum(yb!=targets) / 2.\n return 1. - float(n_incorrect) / len(inputs)\n\n\n\n\n\n# end\n","sub_path":"A4/Network.py","file_name":"Network.py","file_ext":"py","file_size_in_byte":18134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"188450201","text":"###\n# 简介:爬取百度贴吧某帖子中所有图片,并保存到本地\n# 内容点:url请求及请求结果处理、正则匹配、保存文件到本地\n# 2017-04-10\n###\n\n#!/usr/bin/python\n#coding:utf-8\n\nfrom urllib import request\nimport re\n\ndef getHtml(url):\n ## 获取html内容\n html = request.urlopen(url)\n htmlContent = html.read().decode('utf-8')\n #print(htmlContent)\n\n ## 获取图片地址\n # https://imgsa.baidu.com/forum/w%3D580/sign=de1818c7ecdde711e7d243fe97eecef4/5678dab44aed2e7380ae2eee8e01a18b86d6faab.jpg\n reg = r'https://imgsa.baidu.com/forum/w%3D580/sign=[0-9a-zA-Z/]*\\.jpg'\n imgurls = re.findall(reg, htmlContent)\n #print(imgurls)\n return imgurls\n\ndef saveImages(imgs):\n #保存图片到本地\n imgName=1\n for img in imgs:\n request.urlretrieve(img, \"E:/图片/壁纸/%s.jpg\" % imgName)\n imgName += 1\n print(\"total %s images\" % (imgName-1))\n\nif __name__ == \"__main__\":\n url = \"https://tieba.baidu.com/p/5059421348\"\n imgurls = getHtml(url)\n saveImages(imgurls)\n","sub_path":"LittleSpider.py","file_name":"LittleSpider.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"471484323","text":"import csv \n\ndef validZipCode(zip_code):\n return int(zip_code) >= 27006 and int(zip_code) <= 28909\n\n\n\nwith open('../support_files/gaz2016zcta5distancemiles.csv') as csvfile, open('../support_files/nc_distances.csv', 'w') as output:\n reader = csv.DictReader(csvfile)\n fieldnames = ['zip1', 'zip2', 'mi_to_zcta5']\n writer = csv.DictWriter(output, fieldnames=fieldnames)\n writer.writeheader()\n for row in reader:\n if validZipCode(row['zip1']) and validZipCode(row['zip2']):\n writer.writerow(row)\n \n\n","sub_path":"support_scripts/remove_non_nc_zips.py","file_name":"remove_non_nc_zips.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"357302111","text":"# THIS FUNCTION RETURN THE POWER SET BY N\n# THE ACTION- CONVERT EACH VARIABLE TO A BINARY NUMBER WHEN 1 IS TRUE AND 0 FALSE.\n# FOR EXAMPLE- IF YOUR SET IS {\"A\",\"B\",\"C\"} --> {A}=001 , {C}=100 , {AB}=011 , {ABC}=111\ndef powerSetInit(n):\n if n < 0:\n return [\"\"]\n if n == 0:\n return [\"0\"]\n if n == 1:\n return [\"0\", \"1\"]\n\n values = [\"00\", \"01\", \"10\", \"11\"]\n\n addedvalues = []\n i = 0\n numOfIterations = 0\n while (i < n - 2):\n for value in values:\n numOfIterations += 1\n addedvalues.append(\"0\" + value)\n addedvalues.append(\"1\" + value)\n values = addedvalues\n addedvalues = []\n i += 1\n lenOf = len(values)\n return values\n\n\n# THIS FUNCTION RETURN ARRAY OF NUMBERS BY A STRING of number\ndef stringToArrayOfInts(num):\n dig = list(int(d) for d in str(num))\n return dig\n\n\n# THIS FUNCTION RETRUN ARRAY OF ARRAYS OF NUMBER BY A LIST OF STRINGS\ndef stringToArrayConverter(values):\n array = []\n for v in values:\n array.append(stringToArrayOfInts(v))\n return array\n\n\n# THIS FUNCTION GET LIST OF SET OF A NUMBERS AND THE NAMES, AND RETURN THE SET WITH THE NAMES\ndef numberListToNamesList(numList, namesList):\n newList = []\n size = len(namesList)\n for i in range(0, size):\n if numList[i] == 1:\n newList.append(namesList[i])\n return newList\n\n\n# THIS FUNCTION GET LIST AND RETURN THE POWER SET\ndef listToPowerset(list):\n finalList = []\n listSize = len(list)\n powerSetListAsNumbers = stringToArrayConverter(powerSetInit(listSize))\n for l in powerSetListAsNumbers:\n finalList.append(numberListToNamesList(l, list))\n # remove the empty var\n finalList.remove([])\n return finalList\n\n\n# MAIN RUN PROGRAM\npowerSet = listToPowerset(('name', 'age', 'weight'))\n#print (powerSet)\n","sub_path":"OldFiles/powerSetFinder.py","file_name":"powerSetFinder.py","file_ext":"py","file_size_in_byte":1839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"528513949","text":"# vim: tabstop=4 shiftwidth=4 softtabstop=4\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n'''\nThis file contains auto generated mock classes and functions.\n'''\n\n\nclass LibraryLoader(object):\n\n def __init__(self, instance_id=1, *args, **kwargs):\n self.__instance_id__ = instance_id\n\n @property\n def __id__(self):\n return self.__instance_id__\n\n @property\n def kernel32(self):\n if self.__id__ == 2:\n v = WinDLL(3)\n return v\n elif self.__id__ == 5:\n v = WinDLL(6)\n return v\n elif self.__id__ == 8:\n v = WinDLL(9)\n return v\n elif self.__id__ == 11:\n v = WinDLL(12)\n return v\n elif self.__id__ == 14:\n v = WinDLL(15)\n return v\n elif self.__id__ == 17:\n v = WinDLL(18)\n return v\n elif self.__id__ == 20:\n v = WinDLL(21)\n return v\n elif self.__id__ == 23:\n v = WinDLL(24)\n return v\n elif self.__id__ == 26:\n v = WinDLL(27)\n return v\n elif self.__id__ == 29:\n v = WinDLL(30)\n return v\n elif self.__id__ == 32:\n v = WinDLL(33)\n return v\n\n @kernel32.setter\n def kernel32(self, value):\n pass\n\n\nclass WinDLL(object):\n\n def __init__(self, instance_id=1, *args, **kwargs):\n self.__instance_id__ = instance_id\n\n @property\n def __id__(self):\n return self.__instance_id__\n\n @property\n def IsProcessorFeaturePresent(self):\n if self.__id__ == 3:\n v = _FuncPtr(4)\n return v\n elif self.__id__ == 6:\n v = _FuncPtr(7)\n return v\n elif self.__id__ == 9:\n v = _FuncPtr(10)\n return v\n elif self.__id__ == 12:\n v = _FuncPtr(13)\n return v\n elif self.__id__ == 15:\n v = _FuncPtr(16)\n return v\n elif self.__id__ == 18:\n v = _FuncPtr(19)\n return v\n elif self.__id__ == 21:\n v = _FuncPtr(22)\n return v\n elif self.__id__ == 24:\n v = _FuncPtr(25)\n return v\n elif self.__id__ == 27:\n v = _FuncPtr(28)\n return v\n elif self.__id__ == 30:\n v = _FuncPtr(31)\n return v\n elif self.__id__ == 33:\n v = _FuncPtr(34)\n return v\n\n @IsProcessorFeaturePresent.setter\n def IsProcessorFeaturePresent(self, value):\n pass\n\n\nclass _FuncPtr(object):\n\n def __init__(self, instance_id=1, *args, **kwargs):\n self.__instance_id__ = instance_id\n\n @property\n def __id__(self):\n return self.__instance_id__\n\n def __call__(*p):\n if len(p) == 1 and p[0] == 3:\n return 1\n elif len(p) == 1 and p[0] == 6:\n return 1\n elif len(p) == 1 and p[0] == 7:\n return 0\n elif len(p) == 1 and p[0] == 8:\n return 1\n elif len(p) == 1 and p[0] == 9:\n return 1\n elif len(p) == 1 and p[0] == 10:\n return 1\n elif len(p) == 1 and p[0] == 12:\n return 1\n elif len(p) == 1 and p[0] == 13:\n return 1\n elif len(p) == 1 and p[0] == 17:\n return 1\n elif len(p) == 1 and p[0] == 20:\n return 0\n elif len(p) == 1 and p[0] == 21:\n return 0\n","sub_path":"nova/tests/hyperv/stubs/test_hypervapi_HyperVAPITestCase_test_get_available_resource_ctypes.py","file_name":"test_hypervapi_HyperVAPITestCase_test_get_available_resource_ctypes.py","file_ext":"py","file_size_in_byte":4013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"49381525","text":"import requests\n\n\ndef searchName():\n searchName = input('What name would you like to search for? ')\n return searchName\n\n\ndef getData():\n API_KEY = \"c1e412b1-5131-49f9-b7a2-bdfda4371684\"\n data = requests.get(f\"https://api.hypixel.net/player?key={API_KEY}&name={searchName()}\").json()\n print(data)\n return data\n\n\ndef filterData():\n skywarsNames = ['sw', 'skyw', 'skywars', 'swars', 'sky', 'skywar', 'sky wars', 'sky war']\n bedNames = ['bw', 'bedw', 'bedwars', 'bwars', 'bed', 'bedwar', 'bed wars', 'bed war']\n hungerGamesNames = ['hg', 'hunger', 'hungergames', 'hgs', 'survivalgames', 'sg', 'survival games', 'hunger games']\n\n flag = True\n while flag:\n searchGame = input(\"What gamemode would you like to search for? \")\n if searchGame.lower() in skywarsNames:\n return \"SkyWars\"\n if searchGame.lower() in bedNames:\n return \"Bedwars\"\n if searchGame.lower() in hungerGamesNames:\n return \"HungerGames\"\n\n\ndef displayData(allPlayerInfo, selectedGame):\n gamemodeStats = {\n \"SkyWars\": ['kills', 'deaths', 'wins'],\n \"Bedwars\": ['kills_bedwars', 'deaths_bedwars', 'wins_bedwars'],\n \"HungerGames\": ['kills', 'deaths', 'wins']\n }\n\n for counter in gamemodeStats[selectedGame]:\n try:\n print(counter + str(allPlayerInfo['player']['stats'][selectedGame][counter]))\n except:\n print(counter + \"0\")\n\n\ndisplayData(getData(), filterData())\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"376682476","text":"\r\nimport os\r\nfrom amplitude.dict import Dict\r\nfrom jinja2 import Environment, FileSystemLoader, FileSystemBytecodeCache\r\n\r\nclass Context(Dict):\r\n\r\n def __init__(self, engine='jinja2', template_dirs=[], cache_dir=None, auto_reload=False, **args):\r\n Dict.__init__(self, engine=engine, template_dirs=template_dirs, cache_dir=cache_dir, auto_reload=auto_reload, **args)\r\n if engine=='jinja2':\r\n self.loader = self.loader or FileSystemLoader(template_dirs)\r\n if cache_dir is not None:\r\n if not os.path.isdir(cache_dir): os.makedirs(cache_dir)\r\n self.cache = self.cache or FileSystemBytecodeCache(directory=cache_dir)\r\n self.env = self.env or Environment(loader=self.loader, bytecode_cache=self.cache, auto_reload=auto_reload)\r\n\r\n def load_template(self, name):\r\n \"returns a template object that has a render(**args) method.\"\r\n if self.engine=='jinja2':\r\n return self.env.get_template(name)\r\n\r\n\r\nclass Template(Dict):\r\n\r\n def __init__(self, name, **args):\r\n self.context = Context(**args)\r\n self.template = self.context.load_template(name)\r\n\r\n def render(self, **args):\r\n self.template.render(**args)","sub_path":"files/template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"606041589","text":"import asyncio\nimport ssl\nimport threading\nimport time\nimport uuid\n\nimport requests\nimport websockets\nfrom signalrcore_async.helpers import Helpers\nfrom signalrcore_async.messages import (InvocationMessage, MessageType,\n PingMessage, StreamInvocationMessage)\n\nfrom ..protocol.json import JsonHubProtocol\nfrom .connection_state import ConnectionState\nfrom .errors import HubError, UnAuthorizedHubError\nfrom .reconnection import ConnectionStateChecker\n\n\nclass StreamHandler(object):\n def __init__(self, event, invocation_id):\n self.event = event\n self.invocation_id = invocation_id\n self.next_callback = None\n\n def subscribe(self, subscribe_callbacks):\n if subscribe_callbacks is None:\n raise ValueError(\" subscribe object must be {0}\".format({\n \"next\": None\n }))\n self.next_callback = subscribe_callbacks[\"next\"]\n\nclass async_event(asyncio.Event):\n def set(self):\n self._loop.call_soon_threadsafe(super().set)\n\nclass WebSocketsConnection(object):\n\n last_result = None\n last_invocation_id = None\n last_error = None\n event = None\n loop = None\n\n def __init__(self, hubConnection):\n self._hub_connection = hubConnection\n\n async def run(self):\n url = self._hub_connection.url\n headers = self._hub_connection.headers\n max_size = 1_000_000_000\n \n # connect\n self._ws = await websockets.connect(url, max_size=max_size, extra_headers=headers)\n self._hub_connection.logger.debug(\"-- web socket open --\")\n\n # handshake\n msg = self._hub_connection.protocol.handshake_message()\n self._hub_connection._internal_send(msg, self._hub_connection.handshake_protocol)\n response = await self._ws.recv()\n self._hub_connection.evaluate_handshake(response)\n\n if self._hub_connection.on_connect is not None and callable(self._hub_connection.on_connect):\n self._hub_connection.state = ConnectionState.connected\n self._hub_connection.on_connect()\n\n # message loop\n self.loop = asyncio.create_task(self._receive_messages())\n \n async def invoke(self, data, invocationId):\n\n self.event = async_event()\n self.last_invocation_id = invocationId\n \n await self._ws.send(data)\n await self.event.wait()\n\n if (self.last_error is not None):\n raise Exception(self.last_error)\n else:\n return self.last_result\n\n def send(self, data):\n asyncio.create_task(self._ws.send(data))\n\n def handle_completion(self, message):\n if message.invocation_id == self.last_invocation_id:\n if message.error is not None:\n self.last_result = None\n self.last_error = message.error\n self.event.set() \n else:\n self.last_result = message.result\n self.last_error = None\n self.event.set()\n\n self.last_invocation_id = -1\n\n async def close(self):\n self._hub_connection.logger.debug(\"-- web socket close --\")\n\n if self._hub_connection.on_disconnect is not None and callable(self._hub_connection.on_disconnect):\n self._hub_connection.on_disconnect()\n\n if (self._ws is not None):\n await self._ws.close()\n\n self.last_error = \"The connection was closed unexpectedly.\"\n\n if (self.event is not None):\n self.event.set()\n\n if (self.loop is not None):\n self.loop.cancel()\n\n async def _receive_messages(self):\n while (True):\n raw_message = await self._ws.recv()\n self._hub_connection.on_message(raw_message)\n\nclass BaseHubConnection(object):\n def __init__(\n self,\n url,\n protocol,\n headers={},\n keep_alive_interval=15,\n reconnection_handler=None,\n verify_ssl=False,\n skip_negotiation=False):\n self.skip_negotiation = skip_negotiation\n self.logger = Helpers.get_logger()\n self.url = url\n self.protocol = protocol\n self.handshake_protocol = JsonHubProtocol()\n self.headers = headers\n self.handshake_received = False\n self.token = None # auth\n self.state = ConnectionState.disconnected\n self.connection_alive = False\n self.handlers = []\n self.stream_handlers = []\n self._thread = None\n self._ws = None\n self.verify_ssl = verify_ssl\n self.connection_checker = ConnectionStateChecker(\n lambda: self._internal_send(PingMessage()),\n keep_alive_interval\n )\n self.reconnection_handler = reconnection_handler\n self.on_connect = None\n self.on_disconnect = None\n\n def negotiate(self):\n negotiate_url = Helpers.get_negotiate_url(self.url)\n self.logger.debug(\"Negotiate url:{0}\".format(negotiate_url))\n\n response = requests.post(negotiate_url, headers=self.headers, verify=self.verify_ssl)\n self.logger.debug(\"Response status code{0}\".format(response.status_code))\n\n if response.status_code != 200:\n raise HubError(response.status_code) if response.status_code != 401 else UnAuthorizedHubError()\n data = response.json()\n if \"connectionId\" in data.keys():\n self.url = Helpers.encode_connection_id(self.url, data[\"connectionId\"])\n\n # Azure\n if 'url' in data.keys() and 'accessToken' in data.keys():\n Helpers.get_logger().debug(\"Azure url, reformat headers, token and url {0}\".format(data))\n self.url = data[\"url\"] if data[\"url\"].startswith(\"ws\") else Helpers.http_to_websocket(data[\"url\"])\n self.token = data[\"accessToken\"]\n self.headers = {\"Authorization\": \"Bearer \" + self.token}\n\n async def start(self):\n if not self.skip_negotiation:\n self.negotiate()\n self.logger.debug(\"Connection started\")\n if self.state == ConnectionState.connected:\n self.logger.warning(\"Already connected unable to start\")\n return\n self.state = ConnectionState.connecting\n self.logger.debug(\"start url:\" + self.url)\n\n self._ws = WebSocketsConnection(self)\n await self._ws.run()\n\n async def stop(self):\n self.logger.debug(\"Connection stop\")\n if self.state == ConnectionState.connected:\n await self._ws.close()\n self.connection_checker.stop()\n self.state == ConnectionState.disconnected\n\n def register_handler(self, event, callback):\n self.logger.debug(\"Handler registered started {0}\".format(event))\n self.handlers.append((event, callback))\n\n def evaluate_handshake(self, message):\n self.logger.debug(\"Evaluating handshake {0}\".format(message))\n msg = self.handshake_protocol.decode_handshake(message)\n if msg.error is None or msg.error == \"\":\n self.handshake_received = True\n self.state = ConnectionState.connected\n if self.reconnection_handler is not None:\n self.reconnection_handler.reconnecting = False\n if not self.connection_checker.running:\n self.connection_checker.start()\n else:\n self.logger.error(msg.error)\n raise ValueError(\"Handshake error {0}\".format(msg.error))\n\n def on_message(self, raw_message):\n\n # self.logger.debug(\"Message received{0}\".format(raw_message))\n self.connection_checker.last_message = time.time()\n messages = self.protocol.parse_messages(raw_message)\n\n for message in messages:\n if message.type == MessageType.invocation_binding_failure:\n self.logger.error(message)\n continue\n if message.type == MessageType.ping:\n continue\n\n if message.type == MessageType.invocation:\n fired_handlers = list(\n filter(\n lambda h: h[0] == message.target,\n self.handlers))\n if len(fired_handlers) == 0:\n self.logger.warning(\n \"event '{0}' hasn't fire any handler\".format(\n message.target))\n for _, handler in fired_handlers:\n handler(message.arguments)\n\n if message.type == MessageType.close:\n self.logger.info(\"Close message received from server\")\n asyncio.create_task(self.stop())\n return\n\n if message.type == MessageType.completion:\n self._ws.handle_completion(message)\n\n if message.type == MessageType.stream_item:\n fired_handlers = list(\n filter(\n lambda h: h.invocation_id == message.invocation_id,\n self.stream_handlers))\n if len(fired_handlers) == 0:\n self.logger.warning(\n \"id '{0}' hasn't fire any stream handler\".format(\n message.invocation_id))\n for handler in fired_handlers:\n handler.next_callback(message.item)\n\n if message.type == MessageType.stream_invocation:\n pass\n\n if message.type == MessageType.cancel_invocation:\n pass # not implemented\n\n async def invoke(self, method, arguments):\n if type(arguments) is not list:\n raise HubConnectionError(\"Arguments of a message must be a list\")\n\n if type(arguments) is list:\n invocation_id = str(uuid.uuid4())\n message = InvocationMessage({}, invocation_id, method, arguments)\n return await self._internal_invoke(message)\n\n\n async def _internal_invoke(self, message, protocol=None):\n\n self.logger.debug(\"Sending message.\".format(message))\n\n try:\n protocol = self.protocol if protocol is None else protocol\n invocation_id = message.invocation_id\n result = await self._ws.invoke(protocol.encode(message), invocation_id)\n\n self.connection_checker.last_message = time.time()\n\n if self.reconnection_handler is not None:\n self.reconnection_handler.reset()\n\n return result\n \n except Exception as ex:\n raise ex\n\n def send(self, method, arguments):\n if type(arguments) is not list and type(arguments) is not Subject:\n raise HubConnectionError(\"Arguments of a message must be a list or subject\")\n\n if type(arguments) is list:\n self._internal_send(InvocationMessage(\n {},\n 0,\n method,\n arguments))\n\n if type(arguments) is Subject:\n arguments.connection = self\n arguments.target = method\n arguments.start()\n\n def _internal_send(self, message, protocol=None):\n\n self.logger.debug(\"Sending message {0}\".format(message))\n\n try:\n protocol = self.protocol if protocol is None else protocol\n\n self._ws.send(protocol.encode(message))\n self.connection_checker.last_message = time.time()\n\n if self.reconnection_handler is not None:\n self.reconnection_handler.reset()\n\n except Exception as ex:\n raise ex\n\n def handle_reconnect(self):\n self.reconnection_handler.reconnecting = True\n try:\n self.stop()\n self.start()\n except Exception as ex:\n self.logger.error(ex)\n sleep_time = self.reconnection_handler.next()\n threading.Thread(\n target=self.deferred_reconnect,\n args=(sleep_time,)\n )\n\n def deferred_reconnect(self, sleep_time):\n time.sleep(sleep_time)\n try:\n if not self.connection_alive:\n self._send_ping()\n except Exception as ex:\n self.reconnection_handler.reconnecting = False\n self.connection_alive = False\n\n async def stream(self, event, event_params, on_next_item):\n invocation_id = str(uuid.uuid4())\n stream_obj = StreamHandler(event, invocation_id)\n stream_obj.subscribe({ \"next\": on_next_item })\n self.stream_handlers.append(stream_obj)\n await self._internal_invoke(\n StreamInvocationMessage(\n {},\n invocation_id,\n event,\n event_params))\n\n def on_close(self, callback):\n self.on_disconnect = callback\n\n def on_open(self, callback):\n self.on_connect = callback\n\n def on(self, event, callback_function):\n \"\"\"\n Register a callback on the specified event\n :param event: Event name\n :param callback_function: callback function, arguments will be binded\n :return:\n \"\"\"\n self.register_handler(event, callback_function)\n","sub_path":"signalrcore_async/hub/base_hub_connection.py","file_name":"base_hub_connection.py","file_ext":"py","file_size_in_byte":13095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"383440453","text":"import asana2\nimport asana\nimport bdfunc\nimport sntbot\nimport datetime\nfrom datetime import date\nimport time\nfrom time import strftime\nimport telebot\nclient= asana.Client.access_token('0/9de808358c48f13b938aaf36112e3037')\n\n\n\"\"\"Выводим список НЕВЫПОЛНЕННЫХ тасков из БАЗЫ - нам нужны CHATID, ИМЯ ТАСКА, НАЗВАНИЕ СЕКЦИИ(ИМЯ ЧЕКА), Дескрипшн - в БД RESULT - туда записывается ответ, если нужен. \"\"\"\n\"\"\"Циклом проверяем каждый таск Базы на предмет совпадения с асаной. Цикл ищет совпадение в списке из БД, сверяет таск на предмет различий; если есть - перезаписывает туда, где дата изменения раньше\"\"\"\n\"\"\"Различия: статус(вы��олнено или нет). Ассигнед - если ни на кого - то задача не принята, если ассигнет хот на кого-то - задача выполняется. \"\"\"\n\"\"\"Если есть изменения - отправляет сообщение в нужные ChatId, Например продавцам об изменении статуса(Принята к исполнению - Исполнителем, готова.\"\"\"\n\"\"\"Далее надо добавить в асану новые таски, которые появились в БД, если таски появились в Асане - их в БД добавлять не надо. Они не будут иметь отношения к продавцам...\"\"\"\n\"\"\"Примечание. Таски надо добавлять в нужную секцию(Имя чека)\"\"\"\n\n\"\"\" Коли уж на то пошло - добавить сюда напоминалки - типа Время капает, а задача не выполнена!!!! - будет круто\"\"\"\n\n\"\"\"Скрипт выполняется раз в минуту. Думаю, что на первое время будет достаточно\"\"\"\n\n\n#Перевод времени Асаны в нормальный формат\ndef asana_date_to_ok(asanadate):\n\n asanadate = asanadate[:-5]\n asanadate = asanadate.replace('T', ' ')\n asanadate = asanadate.replace('-', ' ')\n asanadate = asanadate.replace(':', ' ')\n date1 = asanadate.split(' ')\n date = {\"h\":date1[3], \"m\":date1[4],\"s\":date1[5],\"Y\":date1[0],\"M\":date1[1],\"D\":date1[2] }\n return(date)\n\n#Перевод дата-времени формата Y-M-D h:m:s в формат асаны\ndef ok_date_to_asana(okdate):\n okdate = str(okdate)\n okdate = okdate.replace('-', ' ')\n okdate = okdate.replace(':', ' ')\n okdate = okdate.split(' ')\n asanadate =okdate[0]+\"-\"+okdate[1]+\"-\"+okdate[2]+\"T\"+okdate[3]+\":\"+okdate[4]+\":\"+okdate[5]+\".015Z\"\n print(asanadate)\n return str(asanadate)\n\n\n\n\n\"\"\"Синхронизация асаны с БД\"\"\"\ndef sincr_bd_to_asana(TaskId):\n\n\n #users = asana2.client.users.find_by_id('mr.mcdi.576@gmail.com')\n #id = users['id']\n y = asana2.get_task(TaskId)\n asanaupdatedate = y['modified_at']\n asanaupdatedate = asanaupdatedate[:-5]\n asanaupdatedate = datetime.datetime.strptime(asanaupdatedate, \"%Y-%m-%dT%H:%M:%S\")\n delta = datetime.timedelta(hours=3)\n asanaupdatedate = asanaupdatedate + delta\n\n sqlupdatedate = bdfunc.read_db_universe(\"asanabase\", \"sql_update_date\", \"task_id\", TaskId)\n\n if asanaupdatedate sqlupdatedate:\n bdfunc.write_db_update(\"asanabase\", task_id=TaskId, section_name=sectionname, task_name=taskname,create_date=createdate,\n update_date=updatedate, assigned_to=assignee, completed = status, complete_date = completedate, description = descript)\n else:\n bdfunc.write_db_insert(\"asanabase\", task_id = TaskId, section_name = sectionname, task_name = taskname,\n create_date = createdate, update_date = updatedate, assigned_to = assignee, complete_date = completedate, description = descript)\n\ndef sincr_all():\n #Прогоняем все таски в асане\n\n tasks = client.tasks.find_by_project('1111815872858792',{\"completed\":\"False\"})\n for task in tasks:\n if str(bdfunc.read_db_universe(\"asanabase\", \"task_id\", \"task_id\", task['id'])) != str(task['id']):\n sincr_asana_to_bd(task['id'])\n\n\n y = asana2.get_task(task['id'])\n\n asanaupdatedate = y['modified_at']\n asanaupdatedate =asanaupdatedate[:-5]\n asanaupdatedate = datetime.datetime.strptime(asanaupdatedate, \"%Y-%m-%dT%H:%M:%S\")\n delta = datetime.timedelta(hours = 3)\n asanaupdatedate = asanaupdatedate + delta\n\n\n\n sqlupdatedate = bdfunc.read_db_universe(\"asanabase\", \"sql_update_date\",\"task_id\",task['id'])\n\n\n #Если время обновления в асане больше,чем в базе\n if asanaupdatedate and sqlupdatedate != None:\n\n if asanaupdatedate > sqlupdatedate:\n print(\"Асану в базу \"+ str(y['name']) + str(asanaupdatedate)+ str(sqlupdatedate))\n sincr_asana_to_bd(task['id'])\n else:\n print(\"Базу в асану \"+ str(y['name']) + str(asanaupdatedate)+ str(sqlupdatedate))\n sincr_bd_to_asana(task['id'])\n else:\n print(\"Асану в базу\")\n sincr_asana_to_bd(task['id'])\n\n\n\n #Проверка и добавление в асану созданных в БД тасков.\n tasks = bdfunc.read_dailycheck(\"asanabase\",\"task_name\", task_id= \"\")\n for task in tasks:\n print(task[0])\n taskname = task[0]\n print(taskname)\n sectionname = bdfunc.read_db_universe(\"asanabase\",\"section_name\", \"task_name\", task[0])\n\n print(sectionname)\n asana2.create_task(taskname, sectionname)\n\n bdfunc.deteterow_db_universe(\"asanabase\", \"task_id\",\"\")\n\n","sub_path":"sync.py","file_name":"sync.py","file_ext":"py","file_size_in_byte":8497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"600599025","text":"import asyncio\nimport logging\nfrom uuid import uuid4\n\nimport aiohttp\n\nfrom uz.client import UZClient, ResponseError\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass UZScanner(object):\n\n def __init__(self, success_cb, timeout=60):\n self.success_cb = success_cb\n\n self.loop = asyncio.get_event_loop()\n self.timeout = timeout\n self.session = aiohttp.ClientSession()\n self.client = UZClient(self.session)\n self.__state = dict()\n self.__running = True\n\n def stop(self):\n self.__running = False\n\n def cleanup(self):\n self.session.close()\n\n async def add_item(self, success_cb_id, firstname, lastname, date,\n source, destination, train_num, ct_letter):\n scan_id = uuid4().hex\n self.__state[scan_id] = dict(\n success_cb_id=success_cb_id,\n firstname=firstname,\n lastname=lastname,\n date=date,\n source=source,\n destination=destination,\n train_num=train_num,\n ct_letter=ct_letter,\n attempts=0,\n error=None)\n asyncio.ensure_future(self.scan(scan_id))\n return scan_id\n\n def status(self, scan_id):\n data = self.__state.get(scan_id)\n if data is None:\n raise UknkownScanID(scan_id)\n return data['attempts'], data['error']\n\n def stop_scan(self, scan_id):\n if scan_id in self.__state:\n del self.__state[scan_id]\n\n async def scan(self, scan_id):\n while self.__running:\n data = self.__state.get(scan_id)\n if data is None:\n logger.info(\n 'Scan id {} is not in state anymore. Stopping scan'.format(\n scan_id))\n return\n\n data['attempts'] += 1\n\n train = None\n for i in await self.client.list_trains(\n data['date'], data['source'], data['destination']):\n if i.num == data['train_num']:\n train = i\n break\n if train is None:\n error = 'Train {} not found'.format(data['train_num'])\n data['error'] = error\n logger.debug('[{}] {}'.format(scan_id, error))\n await asyncio.sleep(self.timeout)\n continue\n\n if data['ct_letter']:\n ct = None\n for i in train.coach_types:\n if i.letter == data['ct_letter']:\n ct = i\n break\n if ct is None:\n error = 'Coach type {} not found'.format(data['ct_letter'])\n data['error'] = error\n logger.debug('[{}] {}'.format(scan_id, error))\n await asyncio.sleep(self.timeout)\n continue\n coach_types = [ct]\n else:\n coach_types = train.coach_types\n\n with UZClient() as personal_client:\n for ct in coach_types:\n for coach in await self.client.list_coaches(train, ct):\n try:\n seats = await self.client.list_seats(train, coach)\n except ResponseError:\n continue\n for seat in seats:\n try:\n await personal_client.book_seat(\n train, coach, seat,\n data['firstname'], data['lastname'])\n except ResponseError:\n continue\n sid = personal_client.get_session_id()\n await self.success_cb(data['success_cb_id'], sid)\n self.stop_scan(scan_id)\n return\n error = 'No available seats'\n data['error'] = error\n logger.debug('[{}] {}'.format(scan_id, error))\n await asyncio.sleep(self.timeout)\n\n\nclass UknkownScanID(Exception):\n pass\n","sub_path":"uz/scanner.py","file_name":"scanner.py","file_ext":"py","file_size_in_byte":4149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"592410735","text":"#!/bin/env python3\n\n## All data is derived from distrowatch.com and wikipedia.\n\nfrom googletrans import Translator, LANGUAGES\nimport json\n\nLANGFOLDER = \"\"\nt = Translator()\n#for key in LANGUAGES:\n# print(key, \"->\", LANGUAGES[key])\nlcount = 0\nfor item in LANGUAGES.items():\n lcount += 1\n with open(LANGFOLDER+'base.json') as JSONfile:\n DATA = json.load(JSONfile)\n print(f\"Translating NEOS Language File to {item[0]}.\")\n DATA[\"localeCode\"] = item[0]\n kcount = 0\n for key in DATA[\"messages\"].items():\n kcount += 1\n if key[1] != \"\":\n DATA[\"messages\"][key[0]] = t.translate(key[1], src='en', dest=item[0]).text\n if DATA[\"messages\"][key[0]] != key[1]:\n print(f\"[{item[0]} {lcount}/{len(LANGUAGES.items())} {kcount}/{len(DATA['messages'].items())}] {key[0]}: {key[1]} -> \\33[1;32m{DATA['messages'][key[0]]}\\33[m\")\n else:\n print(f\"[{item[0]} {lcount}/{len(LANGUAGES.items())} {kcount}/{len(DATA['messages'].items())}] {key[0]}: \\33[1;31mDid not translate.\\33[m\")\n DATA[\"messages\"][key[0]]=\"\"\n with open(LANGFOLDER+item[0]+\".json\",'w') as jsonfile:\n json.dump(DATA,jsonfile, indent=4, ensure_ascii=False)","sub_path":"neostranslate.py","file_name":"neostranslate.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"13871257","text":"from piece import Piece\n\nclass Pawn(Piece):\n\t#symbol to be displayed by the board when drawn\n\tsymbol = \"P\"\n\n\t#initializes the pawn setting the superclass attribute color\n\tdef __init__(self, color):\n\t\tsuper().__init__(color)\n\n\t#returns allowed moves as a list\n\tdef allowed_moves(self, position, board):\n\t\t#get x,y coordinates from position\n\t\tx = position[0]\n\t\ty = position[1]\n\n\t\t#black moves down (up in y), white moves up (down in y)\n\t\tif self.color == 1:\n\t\t\tdirection = -1\n\t\telif self.color == 2:\n\t\t\tdirection = 1\n\n\t\t#stores allowed moves\n\t\tallowed = []\n\n\t\t#check if square in front is free and within bounds\n\t\tif board.squares[y+direction][x] == []:\n\t\t\tallowed.append([[x,y],[x,y+direction]])\n\n\t\t\t#check if second quare in front is also free, and piece on starting line\n\t\t\tif direction==1 and y==1 and board.squares[3][x]==[]:\n\t\t\t\tallowed.append([[x,1],[x,3]])\n\t\t\tif direction==-1 and y==6 and board.squares[4][x]==[]:\n\t\t\t\tallowed.append([[x,6],[x,4]])\n\n\t\t#check if squares in diagonal front have enemy piece\n\t\tif x+1 <= 7 and board.squares[y+direction][x+1]!=[] and board.squares[y+direction][x+1].color != self.color:\n\t\t\tallowed.append([[x,y],[x+1,y+direction]])\n\t\tif x-1 >= 0 and board.squares[y+direction][x-1]!=[] and board.squares[y+direction][x-1].color != self.color:\n\t\t\tallowed.append([[x,y],[x-1,y+direction]])\n\n\t\treturn allowed\n\n\n\n\t\t","sub_path":"pawn.py","file_name":"pawn.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"116572817","text":"#!/usr/bin/python3\nimport os\nimport csv\nimport codecs\n\nCSV_FILE = ('csv_sample.csv')\nspace = 2 * ' '\n\ndef create_row(row_data):\n\toutput = space + '\\n'\n\tfor item in row_data:\n\t\toutput += 2 * space + '\\n'\n\treturn output + space + '\\n'\n\ndef create_table(table_data):\n\toutput = '
' + item + '
\\n'\n\tfor row_data in table_data:\n\t\toutput += create_row(row_data)\n\treturn output + '
\\n'\n\ndef insert_csv(csv_file):\n\tnew_table = ''\n\tfile = open(csv_file, 'r', encoding=\"utf-8\")\n\tcsv_data = list(csv.reader(file, delimiter=','))\n\tnew_table += create_table(csv_data)\n\t\n\toutput_file = codecs.open('output.htm', 'w', 'utf-8')\n\toutput_file.write(new_table)\n\tprint('Done!')\n\nif __name__ == \"__main__\": insert_csv(CSV_FILE)\n","sub_path":"csv_to_html.py","file_name":"csv_to_html.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"132010755","text":"# coding=UTF-8\nimport imp\nimport lib.indicator as ind; imp.reload(ind); \n############################################################################### \ndef s1(self,PRICE,i,I):\n baseT= 30\n if i==baseT:ind.GetIndicatorByType(I,\"小台贏家15\")\n if i< (baseT) : return\n aa=I.get(\"小台純贏家作為15\")[i-1]\n amax=I.get(\"小台純贏家作為15高通道\")[i-1]\n amin=I.get(\"小台純贏家作為15低通道\")[i-1]\n \n if aaamax : self.EnterLong(PRICE)\n self.CheckDailyExitAll(I.get(\"TIME\")[i],PRICE)\n \n############################################################################### \nimport os\nSTittle=u\"[mc02]小台純贏家作為15通道策略\"\nFName=os.path.split(__file__)[1].split('.')[0]\nif __name__ == '__main__':\n exec(open(os.path.split(os.path.realpath(__file__))[0]+'\\\\init.py').read())","sub_path":"strategy/mc02.py","file_name":"mc02.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"540093290","text":"from django.db import models\nfrom django.utils import timezone\nfrom django.dispatch import receiver\nfrom django.contrib.auth.models import User\n\n\nclass Note(models.Model):\n STATES = [\n ('incomplete', 'Incomplete'),\n ('complete', 'Complete'),\n ]\n\n user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='notes')\n title = models.CharField(max_length=48)\n description = models.CharField(max_length=4096)\n status = models.CharField(choices=STATES, default='incomplete', max_length=48)\n date_created = models.DateTimeField(auto_now_add=True)\n date_modified = models.DateTimeField(auto_now=True)\n date_completed = models.DateTimeField(blank=True, null=True)\n\n def __str__(self):\n return f'Note: {self.title} ({self.status})'\n\n def __repr__(self):\n return f'Note: {self.title} ({self.status})'\n\n\n@receiver(models.signals.post_save, sender=Note)\ndef set_note_complete_date(sender, instance, **kwargs):\n if instance.status == 'complete' and not instance.date_completed:\n instance.date_completed = timezone.now()\n instance.save()\n","sub_path":"class-29-static-media-and-templates/demos/notes_401d9/notes_app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"74950665","text":"def getAvailableLetters(lettersGuessed):\n\t'''\n\tlettersGuessed: list, what letters have been guessed so far\n\treturns: string, comprised of letters that represents what letters have not\n\tyet been guessed.\n\t'''\n \n\t'''\n\t>>> lettersGuessed = ['e', 'i', 'k', 'p', 'r', 's']\n\t>>> print getAvailableLetters(lettersGuessed)\n\tabcdfghjlmnoqtuvwxyz\n\t\n\tHint: You might consider using string.ascii_lowercase, \n\twhich is a string comprised of all lowercase letters:\n\t\n\t>>> import string\n\t>>> print string.ascii_lowercase\n\tabcdefghijklmnopqrstuvwxyz\n\t'''\n\t\n\tlettersGuessed = sorted(lettersGuessed)\n\t\n\timport string\n\treturn True\n\nc = [ 'z', 'x', 'b', 'h']\nprint(sorted(c))\n","sub_path":"Problem_Set_3_4.py","file_name":"Problem_Set_3_4.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"644277836","text":"\"\"\"\nThe drawing below gives an idea of how to cut a given \"true\" rectangle into squares\n(\"true\" rectangle meaning that the two dimensions are different).\n\nCan you translate this drawing into an algorithm?\n\nYou will be given two dimensions\n\na positive integer length (parameter named lng)\na positive integer width (parameter named wdth)\nYou will return an array with the size of each of the squares.\n\n sqInRect(5, 3) should return [3, 2, 1, 1]\n sqInRect(3, 5) should return [3, 2, 1, 1]\nNote:\n\nlng == wdth as a starting case would be an entirely different problem and the drawing is planned to be interpreted with lng != wdth.\nSee kata, Square into Squares. Protect trees!.\n\nWhen the initial parameters are so that lng == wdth,\nthe solution [lng] would be the most obvious but not in the spirit of this kata so, in that case, return None/nil/null/Nothing.\nReturn {} with C++.\n\nIn that case the returned structure of C will have its sz component equal to 0.\n(See the \"Solution\" and \"Examples\" tabs)\n\n sqInRect(5, 5) should return None\n\"\"\"\n\ndef sqInRect(lng, wdth):\n if lng == wdth:\n return None\n elif lng > wdth:\n big = lng\n small = wdth\n else:\n big = wdth\n small = lng\n\n square = []\n while True:\n square += [small]\n if big == small:\n break\n if big - small > big:\n big = big - small\n else:\n big, small = small, big - small\n\n return square\n\nfrom unittest import TestCase\n\nclass TestSqInRect(TestCase):\n def test_sqInRect(test):\n test.assertEqual(sqInRect(5, 5), None)\n test.assertEqual(sqInRect(5, 3), [3, 2, 1, 1])\n test.assertEqual(sqInRect(4, 2), [2, 2])","sub_path":"Codewars/rectangle-into-squares.py","file_name":"rectangle-into-squares.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"329808317","text":"from os import truncate\nimport queue\nimport sys\n\nimport argparse\nimport time\nimport msgpack\nfrom enum import Enum, auto\n\nimport numpy as np\nimport decimal\n\n\n# This file is subject to the terms and conditions defined in\n# file 'LICENSE', which is part of this source code package.\n\nfrom operator import itemgetter\n\nfrom sortedcontainers import SortedDict\nfrom planning_utils import a_star, heuristic, create_grid\nfrom udacidrone import Drone\nfrom udacidrone.connection import MavlinkConnection\nfrom udacidrone.messaging import MsgID\nfrom udacidrone.frame_utils import global_to_local\n\n\nimport matplotlib\n#matplotlib.use('Qt5Agg')\nimport matplotlib.pyplot as plt\nfrom sklearn.neighbors import KDTree\n\nimport networkx as nx\n\nimport matplotlib.pyplot as plt\nfrom networkx import Graph\nimport graphviz\n\n\nfrom IPython import get_ipython\nimport time\n\n#from enum import Enum\nfrom queue import PriorityQueue\n\nimport math\nfrom collections import Counter\n\nget_ipython().run_line_magic('matplotlib', 'inline')\nplt.switch_backend('Qt5agg')\n\nplt.rcParams['figure.figsize'] = 12, 12\n\n\n\nclass RRT:\n\n x_goal = (30, 750)\n rrt_goal = ()\n num_vertices = 1600\n dt = 18\n x_init = (20, 150)\n path = [(20, 30), (40, 50)]\n \n path_cost = 0\n g = graphviz.Digraph('RRT Path', format = 'svg', filename='hello.gv')\n\n def __init__(self, x_init):\n # A tree is a special case of a graph with\n # directed edges and only one path to any vertex.\n self.tree = nx.DiGraph()\n self.tree.add_node(x_init)\n\n self.path_tree = nx.DiGraph()\n self.path_tree.add_node(x_init)\n\n \n def add_vertex(self, x_new):\n self.tree.add_node(tuple(RRT.x_init))\n \n def add_edge(self, x_near, x_new, u):\n self.tree.add_edge(tuple(x_near), tuple(x_new), orientation=u)\n \n @property\n def vertices(self):\n return self.tree.nodes()\n \n @property\n def edges(self):\n return self.tree.edges()\n\n \n def add_rrt_vertex(self, x_new):\n self.path_tree.add_node(tuple(RRT.x_init))\n \n def add_rrt_edge(self, x_near, x_new, u):\n self.path_tree.add_edge(tuple(x_near), tuple(x_new), orientation=u)\n\n \n def rrt_vertices(self):\n return self.path_tree.nodes()\n \n @property\n def rrt_edges(self):\n return self.path_tree.edges()\n\n @property\n def parent(self, x_new):\n return self.tree.predecessors(x_new)\n\n \n def path_nodes(self):\n return list(self.path_tree.nodes)\n\n def get_parent(self, x_new):\n return self.tree.predecessors(x_new)\n\n\n def create_grid(self, data, drone_altitude, safety_distance):\n \"\"\"\n Returns a grid representation of a 2D configuration space\n based on given obstacle data, drone altitude and safety distance\n arguments.\n \"\"\"\n \n\n\n # minimum and maximum north coordinates\n north_min = np.floor(np.min(data[:, 0] - data[:, 3]))\n north_max = np.ceil(np.max(data[:, 0] + data[:, 3]))\n\n # minimum and maximum east coordinates\n east_min = np.floor(np.min(data[:, 1] - data[:, 4]))\n east_max = np.ceil(np.max(data[:, 1] + data[:, 4]))\n\n # given the minimum and maximum coordinates we can\n # calculate the size of the grid.\n north_size = int(np.ceil(north_max - north_min))\n east_size = int(np.ceil(east_max - east_min))\n\n # Initialize an empty grid\n grid = np.zeros((north_size, east_size))\n\n # Populate the grid with obstacles\n for i in range(data.shape[0]):\n north, east, alt, d_north, d_east, d_alt = data[i, :]\n if alt + d_alt + safety_distance > drone_altitude:\n obstacle = [\n int(np.clip(north - d_north - safety_distance - north_min, 0, north_size-1)),\n int(np.clip(north + d_north + safety_distance - north_min, 0, north_size-1)),\n int(np.clip(east - d_east - safety_distance - east_min, 0, east_size-1)),\n int(np.clip(east + d_east + safety_distance - east_min, 0, east_size-1)),\n ]\n grid[obstacle[0]:obstacle[1]+1, obstacle[2]:obstacle[3]+1] = 1\n \n # ~print('INFO', grid, drone_altitude, safety_distance)\n # ~print(grid, int(north_min), int(east_min)) \n \n\n #print(grid, drone_altitude, safety_distance)\n #print(grid, int(north_min), int(east_min))\n return grid, int(north_min), int(east_min)\n \n def sample_state(self, grid):\n x = np.random.uniform(0, grid.shape[0])\n y = np.random.uniform(0, grid.shape[1])\n return (x, y)\n\n\n # ### Nearest Neighbors\n # \n # A critical part of the RRT procedure is finding the closest vertex to the sampled random point. This the most computationally intensive part so be mindful of that. Depending on the number of vertices a naive implementation will run into trouble quickly.\n\n\n def nearest_neighbor(self, x_rand, rrt):\n \n \n #wp_radius = np.linalg.norm(x_goal)\n #print ('waypoint radius', wp_radius)\n \n closest_dist = 100000\n closest_vertex = None\n x_rand = np.array(x_rand)\n \n \n\n for v in rrt.vertices:\n d = np.linalg.norm(x_rand - np.array(v[:2]))\n if d < closest_dist:\n closest_dist = d\n closest_vertex = v\n '''\n if np.linalg.norm(x_goal - np.array(v[:2])) < 1.0:\n print(\"Found Goal\") \n sys.exit('Found Goal')\n '''\n \n return closest_vertex\n\n\n # ### Selecting Inputs\n # \n # Select input which moves `x_near` closer to `x_rand`. This should return the angle or orientation of the vehicle.\n\n\n def select_input(self, x_rand, x_near):\n return np.arctan2(x_rand[1] - x_near[1], x_rand[0] - x_near[0])\n\n\n # ### New State\n # \n # \n\n # The new vertex `x_new` is calculated by travelling from the current vertex `x_near` with a orientation `u` for time `dt`.\n\n\n def new_state(self, x_near, u, dt):\n nx = x_near[0] + np.cos(u)*dt\n ny = x_near[1] + np.sin(u)*dt\n return (nx, ny)\n\n\n # ### Putting It All Together\n # \n # Awesome! Now we'll put everything together and generate an RRT.\n\n \n\n def generate_RRT(self, grid, x_init, num_vertices, dt):\n \n \n x_goal = (30, 750)\n \n num_vertices = 1600\n dt = 18\n x_init = (20, 150)\n path = [(20, 30), (40, 50)]\n\n print ('Planning RRT path. It may take a few seconds...')\n rrt = RRT(x_init)\n rrt_path = RRT(x_init)\n \n\n for _ in range(num_vertices):\n\n \n \n x_rand = RRT.sample_state(self, grid)\n # sample states until a free state is found\n while grid[int(x_rand[0]), int(x_rand[1])] == 1:\n x_rand = RRT.sample_state(self, grid)\n \n x_near = RRT.nearest_neighbor(self, x_rand, rrt)\n u = RRT.select_input(self, x_rand, x_near)\n x_new = RRT.new_state(self, x_near, u, dt)\n \n #v_near = np.array([30, 750])\n norm_g = np.array(x_goal)\n norm_n = np.array(x_near)\n #norm_n = np.array(v_near)\n \n \n #print(norm_g, norm_n)\n #print(np.linalg.norm(norm_g - norm_n))\n \n #rrt_cost = np.linalg.norm(np.array(x_new) - np.array(x_goal))\n #rrt_cost = np.linalg.norm(norm_g - norm_n)\n #print(\"edge cost\", rrt_cost)\n\n\n if np.linalg.norm(norm_g - norm_n) < 100:\n\n print (\"Goal Found.\")\n rrt.add_edge(x_near, x_new, u)\n\n # Now let's plot the generated RRT.\n\n \n plt.imshow(grid, cmap='Greys', origin='lower')\n plt.plot(RRT.x_init[1], RRT.x_init[0], 'ro')\n plt.plot(RRT.x_goal[1], RRT.x_goal[0], 'ro')\n \n for (v1, v2) in rrt.edges:\n plt.plot([v1[1], v2[1]], [v1[0], v2[0]], 'y-')\n \n plt.show(block=True)\n \n\n \n current_node = x_new\n\n #pos = nx.spring_layout(rrt)\n\n #nx.draw_networkx_nodes(rrt, pos)\n #nx.draw_networkx_labels(rrt, pos)\n #nx.draw_networkx_edges(rrt, pos, edge_color='r', arrows = True)\n\n #plt.show(block=True)\n #print(\"rrt path\", rrt([0],[1],[2]))\n\n for _ in range(num_vertices):\n\n parent = list(rrt.get_parent(current_node))\n\n current_node = (int(current_node[0]), int(current_node[1]))\n parent_node = tuple(round(int(p1)) for p1 in parent[0])\n \n print(\"current_node\", current_node)\n print(\"parent node\", parent_node)\n\n if parent_node == x_init:\n\n print(\"Path Mapped\")\n RRT.wp_nodes = list(rrt_path.path_tree.nodes)\n print(\"path nodes\", RRT.wp_nodes)\n\n plt.imshow(grid, cmap='Greys', origin='lower')\n plt.plot(RRT.x_init[1], RRT.x_init[0], 'ro')\n plt.plot(RRT.x_goal[1], RRT.x_goal[0], 'ro')\n \n for (v1, v2) in rrt_path.path_tree.edges:\n plt.plot([v1[1], v2[1]], [v1[0], v2[0]], 'y-')\n \n plt.show(block=True)\n\n return rrt\n\n else: \n rrt_path.add_rrt_edge(current_node, parent_node, u)\n \n current_node = tuple(parent[0])\n print(\"new parent\", current_node)\n\n \n\n elif grid[int(x_new[0]), int(x_new[1])] == 0:\n # the orientation `u` will be added as metadata to\n # the edge\n rrt.add_edge(x_near, x_new, u)\n #memoize_nodes(grid, rrt_cost, x_init, x_goal, x_new, x_near, rrt, u)\n\n print(\"RRT Path Mapped\") \n return rrt \n \n #States\n \n # Assume all actions cost the same.\n\n def heuristic(position, goal_position):\n return np.linalg.norm(np.array(position) - np.array(goal_position))\n\n\n\n \nclass States(Enum):\n MANUAL = auto()\n ARMING = auto()\n TAKEOFF = auto()\n WAYPOINT = auto()\n LANDING = auto()\n DISARMING = auto()\n PLANNING = auto()\n\n\nclass MotionPlanning(Drone):\n\n def __init__(self, connection):\n super().__init__(connection)\n\n self.target_position = np.array([0.0, 0.0, 0.0])\n self.waypoints = []\n self.in_mission = True\n self.check_state = {}\n\n # initial state\n self.flight_state = States.MANUAL\n\n # register all your callbacks here\n self.register_callback(MsgID.LOCAL_POSITION, self.local_position_callback)\n self.register_callback(MsgID.LOCAL_VELOCITY, self.velocity_callback)\n self.register_callback(MsgID.STATE, self.state_callback)\n\n def local_position_callback(self):\n if self.flight_state == States.TAKEOFF:\n if -1.0 * self.local_position[2] > 0.95 * self.target_position[2]:\n self.waypoint_transition()\n elif self.flight_state == States.WAYPOINT:\n if np.linalg.norm(self.target_position[0:2] - self.local_position[0:2]) < 1.0:\n if len(self.waypoints) > 0:\n self.waypoint_transition()\n else:\n if np.linalg.norm(self.local_velocity[0:2]) < 1.0:\n self.landing_transition()\n\n def velocity_callback(self):\n if self.flight_state == States.LANDING:\n if self.global_position[2] - self.global_home[2] < 0.1:\n if abs(self.local_position[2]) < 0.01:\n self.disarming_transition()\n\n def state_callback(self):\n if self.in_mission:\n if self.flight_state == States.MANUAL:\n self.arming_transition()\n elif self.flight_state == States.ARMING:\n if self.armed:\n self.plan_path()\n elif self.flight_state == States.PLANNING:\n self.takeoff_transition()\n elif self.flight_state == States.DISARMING:\n if ~self.armed & ~self.guided:\n self.manual_transition()\n\n def arming_transition(self):\n self.flight_state = States.ARMING\n print(\"arming transition\")\n self.arm()\n self.take_control()\n\n def takeoff_transition(self):\n self.flight_state = States.TAKEOFF\n print(\"takeoff transition\")\n self.takeoff(self.target_position[2])\n\n def waypoint_transition(self):\n self.flight_state = States.WAYPOINT\n print(\"waypoint transition\")\n self.target_position = self.waypoints.pop(0)\n print('target position', self.target_position)\n self.cmd_position(self.target_position[0], self.target_position[1], self.target_position[2], self.target_position[3])\n\n def landing_transition(self):\n self.flight_state = States.LANDING\n print(\"landing transition\")\n self.land()\n\n def disarming_transition(self):\n self.flight_state = States.DISARMING\n print(\"disarm transition\")\n self.disarm()\n self.release_control()\n\n def manual_transition(self):\n self.flight_state = States.MANUAL\n print(\"manual transition\")\n self.stop()\n self.in_mission = False\n\n def send_waypoints(self):\n print(\"Sending waypoints to simulator ...\")\n data = msgpack.dumps(self.waypoints)\n self.connection._master.write(data)\n\n def plan_path(self):\n self.flight_state = States.PLANNING\n print(\"Searching for a path ...\")\n TARGET_ALTITUDE = 100\n SAFETY_DISTANCE = 5\n\n self.target_position[2] = TARGET_ALTITUDE\n\n # TODO: read lat0, lon0 from colliders into floating point values\n \n # TODO: set home position to (lon0, lat0, 0)\n\n # TODO: retrieve current global position\n \n # TODO: convert to current local position using global_to_local()\n \n print('global home {0}, position {1}, local position {2}'.format(self.global_home, self.global_position,\n self.local_position))\n # Read in obstacle map\n data = np.loadtxt('colliders.csv', delimiter=',', dtype='float64', skiprows=2)\n \n # Define a grid for a particular altitude and safety margin around obstacles\n #grid, north_offset, east_offset = create_grid(data, TARGET_ALTITUDE, SAFETY_DISTANCE)\n grid, north_offset, east_offset = RRT.create_grid(self, data, TARGET_ALTITUDE, SAFETY_DISTANCE)\n print(\"North offset = {0}, east offset = {1}\".format(north_offset, east_offset))\n # Define starting point on the grid (this is just grid center)\n grid_start = (-north_offset, -east_offset)\n # TODO: convert start position to current position rather than map center\n \n # Set goal as some arbitrary position on the grid\n grid_goal = (-north_offset + 10, -east_offset + 10)\n \n # TODO: adapt to set goal as latitude / longitude position and convert\n\n # Run A* to find a path from start to goal\n \n self.local_position_callback\n # TODO: add diagonal motions with a cost of sqrt(2) to your A* implementation\n # or move to a different search space such as a graph (not done here)\n print('Local Start and Goal: ', grid_start, grid_goal)\n path, _ = a_star(grid, heuristic, grid_start, grid_goal)\n \n \n # TODO: prune path to minimize number of waypoints\n # TODO (if you're feeling ambitious): Try a different approach altogether!\n \n rrt = RRT.generate_RRT(self, grid, RRT.x_init, RRT.num_vertices, RRT.dt)\n \n\n \n #path, _ = a_star(grid, heuristic, grid_start, grid_goal)\n #print(\"a_star nodes\", path, \"\\n\")\n \n print(\"rrt nodes\", RRT.wp_nodes, \"\\n\") #, rrt.edges\n \n #rrt_path, _= list(rrt.vertices)\n \n\n #print (RRT.vertices)\n # Convert path to waypoints\n \n \n #waypoints = [[p[0] + north_offset, p[1] + east_offset, TARGET_ALTITUDE, 0] for p in path]\n # Set self.waypoints\n #self.waypoints = waypoints\n # TODO: send waypoints to sim (this is just for visualization of waypoints)\n #self.send_waypoints()\n\n waypoints = [[r[0], r[1], TARGET_ALTITUDE, 0] for r in RRT.wp_nodes]\n #waypoints = [[r[0], + north_offset, r[1] + east_offset, TARGET_ALTITUDE, 0] for r in RRT.wp_nodes]\n #Set self.waypoints\n waypoints = list(reversed(waypoints))\n self.waypoints = waypoints\n # TODO: send waypoints to sim (this is just for visualization of waypoints)\n \n print(\"waypoints\", waypoints)\n self.send_waypoints()\n\n def start(self):\n self.start_log(\"Logs\", \"NavLog.txt\")\n\n print(\"starting connection\")\n self.connection.start()\n\n # Only required if they do threaded\n # while self.in_mission:\n # pass\n\n self.stop_log()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--port', type=int, default=5760, help='Port number')\n parser.add_argument('--host', type=str, default='127.0.0.1', help=\"host address, i.e. '127.0.0.1'\")\n \n # ~ from dariemt\n parser.add_argument('--goal_lon', type=str, help=\"Goal longitude\")\n parser.add_argument('--goal_lat', type=str, help=\"Goal latitude\")\n parser.add_argument('--goal_alt', type=str, help=\"Goal altitude\")\n args = parser.parse_args()\n\n conn = MavlinkConnection('tcp:{0}:{1}'.format(args.host, args.port), timeout=240)\n drone = MotionPlanning(conn)\n \n # ~ from dariemt\n #goal_global_position = np.fromstring(f'{args.goal_lon},{args.goal_lat},{args.goal_alt}', dtype='float64', sep=',')\n #drone = MotionPlanning(conn, goal_global_position=goal_global_position)\n \n time.sleep(1)\n drone.start()\n\n \n","sub_path":"motion_planning/rrt_v2_lab.py","file_name":"rrt_v2_lab.py","file_ext":"py","file_size_in_byte":18467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"476323240","text":"# for i in range (1, 11):\r\n# print(i)\r\n#\r\n# string_example= 'Hey There'\r\n# for ch in string_example:\r\n# print(ch)\r\n\r\n# for i in range(12, 0, -2):\r\n# print(i)\r\n#\r\n\r\nnum =5\r\nfor i in range(2, 4):\r\n if num % i == 0:\r\n print(num % i)\r\n print(f\"it's not a prime number and value of num is {num} and i is {i}\")\r\n else:\r\n print(f\"it's a prime number and the value of num is {num} and i is {i}\")\r\n break\r\n\r\ndef check_prime(num):\r\n for i in range(3, num):\r\n if num % i == 0:\r\n print(f\"Not prime and value of i is {i}\")\r\n break\r\n else:\r\n print(f\"prime number and value of i is {i}\")\r\n\r\n\r\ndef is_prime(number):\r\n if number < 2:\r\n return False\r\n # check if number is divisible by 2 to number -1\r\n for divisor in range(2, number):\r\n if number % divisor == 0:\r\n return False\r\n return True\r\n","sub_path":"Section8-Loops/basic_loops.py","file_name":"basic_loops.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"360947823","text":"import gc\nimport numpy as np\nimport pandas as pd\nfrom scipy.signal import savgol_filter\nfrom pandas.tseries.holiday import USFederalHolidayCalendar as calendar\nfrom ashrae.utils import DATA_PATH, timer, load_data, reduce_mem_usage\nfrom ashrae.encoders import GaussianTargetEncoder\n\n\n# define groupings and corresponding priors\ngroups_and_priors = {\n \n # singe encodings\n (\"hour\",): None,\n (\"weekday\",): None,\n (\"month\",): None,\n (\"building_id\",): None,\n (\"primary_use\",): None,\n (\"site_id\",): None, \n (\"meter\",): None,\n \n # second-order interactions\n (\"meter\", \"hour\"): [\"gte_meter\", \"gte_hour\"],\n (\"meter\", \"weekday\"): [\"gte_meter\", \"gte_weekday\"],\n (\"meter\", \"month\"): [\"gte_meter\", \"gte_month\"],\n (\"meter\", \"building_id\"): [\"gte_meter\", \"gte_building_id\"],\n (\"meter\", \"primary_use\"): [\"gte_meter\", \"gte_primary_use\"],\n (\"meter\", \"site_id\"): [\"gte_meter\", \"gte_site_id\"],\n \n # higher-order interactions with building_id\n (\"meter\", \"building_id\", \"hour\"): [\"gte_meter_building_id\", \"gte_meter_hour\"],\n (\"meter\", \"building_id\", \"weekday\"): [\"gte_meter_building_id\", \"gte_meter_weekday\"],\n (\"meter\", \"building_id\", \"month\"): [\"gte_meter_building_id\", \"gte_meter_month\"],\n \n}\n\n\ndef process_timestamp(df): \n df.timestamp = pd.to_datetime(df.timestamp)\n df.timestamp = (df.timestamp - pd.to_datetime(\"2016-01-01\")).dt.total_seconds() // 3600 \n\n\ndef process_weather(df, dataset, fix_timestamps=True, interpolate_na=True, add_na_indicators=True):\n if fix_timestamps:\n site_GMT_offsets = [-5, 0, -7, -5, -8, 0, -5, -5, -5, -6, -7, -5, 0, -6, -5, -5]\n GMT_offset_map = {site: offset for site, offset in enumerate(site_GMT_offsets)}\n df.timestamp = df.timestamp + df.site_id.map(GMT_offset_map)\n\n if interpolate_na:\n site_dfs = []\n for site_id in df.site_id.unique():\n # Make sure that we include all possible hours so that we can interpolate evenly\n if dataset == \"train\":\n site_df = df[df.site_id == site_id].set_index(\"timestamp\").reindex(range(8784))\n elif dataset == \"test\":\n site_df = df[df.site_id == site_id].set_index(\"timestamp\").reindex(range(8784, 26304))\n else: \n raise ValueError(f\"dataset={dataset} not recognized\")\n site_df.site_id = site_id\n for col in [c for c in site_df.columns if c != \"site_id\"]:\n if add_na_indicators: site_df[f\"had_{col}\"] = ~site_df[col].isna()\n site_df[col] = site_df[col].interpolate(limit_direction='both', method='spline', order=3,)\n # Some sites are completely missing some columns, so use this fallback\n site_df[col] = site_df[col].fillna(df[col].median())\n site_dfs.append(site_df)\n df = pd.concat(site_dfs).reset_index() # make timestamp back into a regular column\n\n if add_na_indicators:\n for col in df.columns:\n if df[col].isna().any(): df[f\"had_{col}\"] = ~df[col].isna()\n\n return df.fillna(-1) # .set_index([\"site_id\", \"timestamp\"])\n\n\ndef add_lag_feature(df, window=3, group_cols=\"site_id\", lag_cols=[\"air_temperature\"]):\n rolled = df.groupby(group_cols)[lag_cols].rolling(window=window, min_periods=0, center=True)\n lag_mean = rolled.mean().reset_index().astype(np.float16)\n lag_max = rolled.quantile(0.95).reset_index().astype(np.float16)\n lag_min = rolled.quantile(0.05).reset_index().astype(np.float16)\n lag_std = rolled.std().reset_index().astype(np.float16)\n \n for col in lag_cols:\n df[f\"{col}_mean_lag{window}\"] = lag_mean[col]\n df[f\"{col}_max_lag{window}\"] = lag_max[col]\n df[f\"{col}_min_lag{window}\"] = lag_min[col]\n df[f\"{col}_std_lag{window}\"] = lag_std[col]\n \n \ndef add_features(df):\n # time features\n df[\"hour\"] = df.ts.dt.hour\n df[\"weekday\"] = df.ts.dt.weekday\n df[\"month\"] = df.ts.dt.month\n df[\"year\"] = df.ts.dt.year \n \n # time interactions\n df[\"weekday_hour\"] = df.weekday.astype(str) + \"-\" + df.hour.astype(str)\n \n # apply cyclic encoding of periodic features\n df[\"hour_x\"] = np.cos(2*np.pi*df.timestamp/24)\n df[\"hour_y\"] = np.sin(2*np.pi*df.timestamp/24)\n \n df[\"month_x\"] = np.cos(2*np.pi*df.timestamp/(30.4*24))\n df[\"month_y\"] = np.sin(2*np.pi*df.timestamp/(30.4*24))\n \n df[\"weekday_x\"] = np.cos(2*np.pi*df.timestamp/(7*24))\n df[\"weekday_y\"] = np.sin(2*np.pi*df.timestamp/(7*24))\n \n # meta data features\n df[\"year_built\"] = df[\"year_built\"]-1900\n \n # bulding_id interactions\n bm_ = df.building_id.astype(str) + \"-\" + df.meter.astype(str) + \"-\" \n df[\"building_weekday_hour\"] = bm_ + df.weekday_hour\n df[\"building_weekday\"] = bm_ + df.weekday.astype(str)\n df[\"building_month\"] = bm_ + df.month.astype(str)\n df[\"building_hour\"] = bm_ + df.hour.astype(str) \n df[\"building_meter\"] = bm_\n\n # get holidays\n dates_range = pd.date_range(start=\"2015-12-31\", end=\"2019-01-01\")\n us_holidays = calendar().holidays(start=dates_range.min(), end=dates_range.max()) \n df[\"is_holiday\"] = (df.ts.dt.date.astype(\"datetime64\").isin(us_holidays)).astype(np.int8) \n \nif __name__ == \"__main__\":\n\n with timer(\"Loading data\"):\n train, test = load_data(\"input\")\n building_meta = load_data(\"meta\")\n train_weather, test_weather = load_data(\"weather\")\n \n with timer(\"Process timestamp\"):\n train[\"ts\"] = pd.to_datetime(train.timestamp)\n test[\"ts\"] = pd.to_datetime(test.timestamp)\n process_timestamp(train)\n process_timestamp(test)\n process_timestamp(train_weather)\n process_timestamp(test_weather)\n\n with timer(\"Process weather\"):\n process_weather(train_weather, \"train\")\n process_weather(test_weather, \"test\")\n \n for window_size in [7, 73]:\n add_lag_feature(train_weather, window=window_size)\n add_lag_feature(test_weather, window=window_size)\n\n with timer(\"Combine data\"):\n train = pd.merge(train, building_meta, \"left\", \"building_id\")\n train = pd.merge(train, train_weather, \"left\", [\"site_id\", \"timestamp\"])\n\n test = pd.merge(test, building_meta, \"left\", \"building_id\")\n test = pd.merge(test, test_weather, \"left\", [\"site_id\", \"timestamp\"]) \n \n with timer(\"Flag bad meter readings\"):\n is_bad_meter_reading = load_data(\"bad_meter_readings\").values\n train[\"is_bad_meter_reading\"] = is_bad_meter_reading\n\n with timer(\"Correct site 0 meter reading\"):\n train.loc[(train.site_id == 0) & (train.meter==0), \"meter_reading\"] *= 0.2931\n\n with timer(\"Add base features to train\"):\n add_features(train)\n \n with timer(\"Add base features to test\"):\n add_features(test)\n \n with timer(\"Free up memory\"):\n del train_weather, test_weather\n gc.collect()\n \n train.info()\n test.info()\n \n with timer(\"Reduce memory usage\"):\n train, _ = reduce_mem_usage(train, skip_cols=['ts', 'timestamp'], verbose=False)\n test, _ = reduce_mem_usage(test, skip_cols=['ts', 'timestamp'], verbose=False)\n \n with timer(\"Add target encoding features\"):\n train[\"target\"] = np.log1p(train.meter_reading)\n test[\"target\"] = np.mean(train[\"target\"])\n \n features = []\n good_train = train[train.is_bad_meter_reading.values==0].copy()\n for group_cols, prior_cols in groups_and_priors.items():\n features.append(f\"gte_{'_'.join(group_cols)}\")\n gte = GaussianTargetEncoder(list(group_cols), \"target\", prior_cols)\n good_train[features[-1]] = gte.fit_transform(good_train)\n train[features[-1]] = gte.transform(train)\n test[features[-1]] = gte.transform(test)\n \n train.info()\n test.info()\n good_train.info()\n \n with timer(\"Remove unnecessary columns\"):\n train.drop([\"ts\", \"target\"], 1, inplace=True)\n test.drop([\"ts\", \"target\"], 1, inplace=True)\n\n with timer(\"Reduce memory usage\"):\n train, _ = reduce_mem_usage(train, skip_cols=['ts', 'timestamp'], verbose=False)\n test, _ = reduce_mem_usage(test, skip_cols=['ts', 'timestamp'], verbose=False)\n gc.collect()\n\n with timer(\"Save as pickle\"):\n train.to_pickle(f\"{DATA_PATH}/preprocessed/train_clean.pkl\")\n del train\n gc.collect()\n test.to_pickle(f\"{DATA_PATH}/preprocessed/test_clean.pkl\")","sub_path":"solutions/rank-1/scripts/02_preprocess_data.py","file_name":"02_preprocess_data.py","file_ext":"py","file_size_in_byte":8594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"298956044","text":"# A* algorithm for finding best path with no obstacles and cost on nodes\n\nimport heapq\n\n\n# The main astar algorithm\ndef run_astar(board, start_node, end_node):\n\n if not isinstance(board, list): # Make sure the board is represented as a list\n return \"Board must be a list\"\n\n board_width = len(board[0]) # Get the width of the board\n board_height = len(board) # Get the height of the board\n\n closed_node = set() # The closed nodes\n\n priority_queue = [] # The opened nodes\n heapq.heapify(priority_queue)\n heapq.heappush(priority_queue, (start_node.f, start_node))\n\n came_from = {} # The current road from start to end\n\n start_node.g = 0\n start_node.f = start_node.g + heuristic(start_node, end_node)\n\n while priority_queue: # Start searching if the opened nodes is not empty\n current_node = heapq.heappop(priority_queue)[1] # Chose a current node, based on the node with the lowest f value in the opened queue\n\n if current_node == end_node: # If the current node is also the end node, then the board should be drawn and stop searching\n display_path(came_from, current_node, board)\n break\n\n closed_node.add(current_node)\n\n adj_nodes = get_adjacent_nodes(current_node, board_width, board_height, board) # Find the adjacent nodes to the current\n\n for adj_node in adj_nodes: # If the adjacent node is already closed, then don't calculate it again\n if adj_node in closed_node:\n continue\n\n temp_gscore = current_node.g + adj_node.cost # Set a tentativ g score for the adjacent node\n\n if adj_node.walkable and adj_node not in came_from: # Update the node if it is walkable and not already update\n came_from[adj_node] = current_node\n adj_node.g = temp_gscore\n adj_node.h = heuristic(adj_node, end_node)\n adj_node.f = adj_node.g + adj_node.h\n\n heapq.heappush(priority_queue, (adj_node.f, adj_node)) # Add the node to the open queue\n\n elif temp_gscore >= adj_node.g: # If tentativ cost is less than the g score, then choose new node\n continue\n\n\ndef heuristic(node, end_node): # Calculate the heuristic, with Manhatten distance\n return abs(node.x - end_node.x) + (node.y - end_node.y)\n\n\ndef get_adjacent_nodes(current_node, board_w, board_h, board): # Find the adjacent nodes from the current\n adj_nodes = []\n\n if current_node.x > 0:\n adj_nodes.append(board[current_node.x - 1][current_node.y])\n\n if current_node.y > 0:\n adj_nodes.append(board[current_node.x][current_node.y - 1])\n\n if current_node.x < board_h - 1:\n adj_nodes.append(board[current_node.x + 1][current_node.y])\n\n if current_node.y < board_w - 1:\n adj_nodes.append(board[current_node.x][current_node.y + 1])\n\n return adj_nodes\n\n\ndef display_path(came_from, current, board): # Make the board with colors and O for the nodes in the path\n node_path = [current]\n board_string = \"\"\n while current in came_from.keys(): # Find all nodes in the path and add to a list\n current = came_from[current]\n node_path.append(current)\n\n for node in node_path: # Change colors for the path\n if node.character == \"A\":\n node.character = color(32, \"A\")\n elif node.character == \"B\":\n node.character = color(32, \"B\")\n else:\n node.character = color(31, \"O\")\n\n for nodes in board: # Change the board to a string in stead of list\n for node in nodes:\n board_string += node.character\n board_string += \"\\n\"\n\n print(board_string)\n\n\ndef color(colors, string): # Change colors of nodes\n return \"\\033[\" + str(colors) + \"m\" + string + \"\\033[0m\"\n\n\ndef read_board(board_id='1-1'): # Read the board and appends to lists, finds the start and end nodes\n file_name = 'boards/board-%s.txt' % board_id\n board = []\n start, end = None, None\n\n with open(file_name) as f:\n for x, line in enumerate(f.readlines()):\n temp_line = []\n for y, ch in enumerate(line.rstrip()):\n temp_node = Node(x, y, ch)\n temp_line.append(temp_node)\n if temp_node.start:\n start = temp_node\n if temp_node.end:\n end = temp_node\n board.append(temp_line)\n # print(board)\n return board, start, end\n\n\nclass Node(object): # Create a node object\n\n def __init__(self, x, y, character): # Set and create different values for object\n self.x = x\n self.y = y\n self.character = character\n self.start = character == \"A\"\n self.end = character == \"B\"\n self.walkable = character != \"#\"\n self.f = 0\n self.g = 0\n self.h = 0\n self.cost = 0\n self.set_node_cost()\n\n def __lt__(self, other): # Compare the current value with other, less than\n return self.f < other.f\n\n def __gt__(self, other): # Compare the current value with other, greater than\n return self.f > other.f\n\n def __str__(self):\n # return 'Node(%i, %i, %s, %i, %i, %i): %s' % \\\n # (self.x, self.y, self.character, self.f, self.g, self.h, self.walkable)\n return '%s: %i' % (self.character, self.f)\n # return self.character\n\n def __repr__(self):\n return self.__str__()\n\n def set_node_cost(self): # Set the cost depending on the character\n if self.character == \"w\":\n self.cost = 100\n elif self.character == \"m\":\n self.cost = 50\n elif self.character == \"f\":\n self.cost = 10\n elif self.character == \"g\":\n self.cost = 5\n elif self.character == \"r\":\n self.cost = 1\n else:\n self.cost = 1\n\nif __name__ == \"__main__\": # Create board and run a*\n board_data = read_board(board_id=\"2-2\") # run_board returns board, start, end as a tuple\n run_astar(*board_data) # tuple unpacking, spread tuple as arguments\n","sub_path":"astar/astar_a_2.py","file_name":"astar_a_2.py","file_ext":"py","file_size_in_byte":6069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"373401096","text":"import argparse\n\nfrom pyparsing import Word, alphanums, ZeroOrMore, OneOrMore, Literal, LineEnd, Or, Group, ParserElement, restOfLine, \\\n QuotedString\n\n\nParserElement.setDefaultWhitespaceChars(' \\t')\n\nEOL = LineEnd().suppress()\nSEMI_COLON = Literal(\";\").suppress()\n\ncomment = Literal('//') + restOfLine\nterminator = Or([OneOrMore(EOL), OneOrMore(SEMI_COLON)])\nname = Word(alphanums + '_')\n\nargument = Or([name, QuotedString('\"')])\ncommand = Group(name('cvar') + Group(ZeroOrMore(argument))('args') + terminator)\n\nconfig = Group(ZeroOrMore(command))('commands').ignore(comment)\n\n\ndef parse_csgo_config(filename):\n return config.parseFile(filename, parseAll=True)\n\n\ndef minify_argument(argument):\n return '\"' + argument + '\"'\n\n\ndef minify_command(command):\n if command['args']:\n return command['cvar'] + ' ' + ' '.join(minify_argument(arg) for arg in command['args']) + ';'\n else:\n return command['cvar'] + ';'\n\n\ndef minify_cfg(cfg):\n lines = []\n line = ''\n\n for cmd in cfg['commands']:\n minified_cmd = minify_command(cmd)\n\n if len(line) + len(minified_cmd) >= 255:\n lines.append(line)\n line = ''\n\n line += minified_cmd\n\n lines.append(line)\n return '\\n'.join(lines)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Minify a complex CS:GO config into one line.')\n parser.add_argument('input', help='The entry point file for your config', type=argparse.FileType('r'))\n parser.add_argument('output', help='The output file for the minified config', type=argparse.FileType('w'), nargs='?')\n args = parser.parse_args()\n\n with args.input as f:\n result = parse_csgo_config(f)\n\n minified = minify_cfg(result)\n\n if args.output:\n with args.output as f:\n f.write(minified)\n else:\n print(minified)\n","sub_path":"minify.py","file_name":"minify.py","file_ext":"py","file_size_in_byte":1853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"433712734","text":"# <>\n# Copyright 2022, Lawrence Livermore National Security, LLC.\n# See the top-level COPYRIGHT file for details.\n# \n# SPDX-License-Identifier: BSD-3-Clause\n# <>\n\n\"\"\"\nThis module defines classes for storing particle decay information.\nDecay data are organized as follows::\n\n - particle\n - decayData\n - averageEnergies\n - decayModes\n - decayMode\n - probability\n - Q\n - spectra\n - decayPath\n - decay\n\nAll decay info is stored inside the section.\n\n stores a summary of average outgoing energy for various types of decay radiation,\nsummed over all possible decay modes.\n\n contains a list of , each with a corresponding probability.\nA may also contain a (i.e. the decay Q-value), a (describing the outgoing energy spectrum\nfor each types of decay product), and a (listing each step of the decay along with outgoing products).\n\nIdeally the outgoing spectra would be associated with specific decay products under the , but\noften the spectra are summed over multiple products. For example, in B-nn decay (beta-delayed 2-neutron emission),\nthe spectra of the 2 outgoing neutrons cannot be resolved, so the evaluation usually stores only the summed spectrum\nfor the two neutrons.\n\"\"\"\n\nimport abc # FIXME remove unused import?\n\nfrom LUPY import ancestry as ancestryModule\nfrom fudge import GNDS_formatVersion as GNDS_formatVersionModule\n\n\nfrom .. import suite as suiteModule\nfrom .. import misc as miscModule\n\nfrom ..decays import probability as probabilityModule\nfrom ..decays import Q as QModule\nfrom ..decays import product as productModule\nfrom ..decays import spectrum as spectrumModule\nfrom ..decays import averageEnergy as averageEnergyModule\n\ndecayModesIT = \"isomeric transition\"\ndecayModesSF = \"spontaneous fission\"\ndecayModesParticle = \"\"\n\ndecayModeTypes = [decayModesIT, decayModesSF, decayModesParticle]\n\nclass Decay( miscModule.ClassWithIndexKey ) :\n \"\"\"\n Describes one step in a multi-step decay. For example, in beta-delayed 2-n emission\n the first is for the beta-decay, and the 2nd and 3rd elements describe the two neutrons\n being emitted sequentially.\n \"\"\"\n\n moniker = 'decay'\n\n def __init__( self, index, mode, complete = True ) :\n \"\"\"\n :param index: int, identifies the position of this decay within the decayPath. 0 = first, 1 = second, etc.\n :param mode: string identifying a decay mode, such as \"isomeric transition\"\n :param complete: boolean, whether all outgoing products are listed\n \"\"\"\n\n miscModule.ClassWithIndexKey.__init__( self, index )\n\n if( not( isinstance( mode, str ) ) ) : raise TypeError( 'decay mode type not an instance of str' )\n if( mode not in decayModeTypes ) : raise ValueError( 'decay mode type invalid' )\n self.__mode = mode\n\n if( not( isinstance( complete, bool ) ) ) : raise TypeError( 'complete not an instance of bool' )\n self.__complete = complete\n\n self.__products = productModule.Suite( )\n self.__products.setAncestor( self )\n\n @property\n def complete( self ) :\n\n return( self.__complete )\n\n @property\n def products( self ) :\n\n return( self.__products )\n\n @property\n def mode( self ) :\n\n return( self.__mode )\n\n def convertUnits( self, unitMap ) :\n \"\"\"See documentation in PoPs.database.convertUnits\"\"\"\n\n self.__products.convertUnits( unitMap )\n\n def copy( self ) :\n\n _decay = self.__class__( self.index, self.mode, self.complete )\n for item in self.__products : _decay.products.add( item.copy( ) )\n return( _decay )\n\n def toXML_strList( self, indent = '', **kwargs ) :\n\n indent2 = indent + kwargs.get( 'incrementalIndent', ' ' )\n\n attributes = ''\n if self.mode != '':\n attrName = 'mode'\n if kwargs.get('formatVersion') == GNDS_formatVersionModule.version_1_10:\n attrName = 'type'\n attributes = ' %s=\"%s\"' % (attrName, self.mode)\n if not self.complete: attributes = ' complete=\"false\"'\n XMLStringList = [ '%s<%s index=\"%s\"%s>' % ( indent, self.moniker, self.index, attributes ) ]\n XMLStringList += self.products.toXML_strList( indent = indent2, **kwargs )\n XMLStringList[-1] += '' % self.moniker\n\n return( XMLStringList )\n\n def parseNode(self, element, xPath, linkData, **kwargs):\n\n xPath.append( element.tag )\n\n self.products.parseNode(element.find(self.products.moniker), xPath, linkData, **kwargs)\n\n xPath.pop()\n return( self )\n\n @classmethod\n def parseNodeUsingClass(cls, element, xPath, linkData, **kwargs):\n\n xPath.append( element.tag )\n\n complete = True\n if element.get('complete') == 'false': complete = False\n mode = element.get('mode','')\n if kwargs['formatVersion'] == GNDS_formatVersionModule.version_1_10:\n mode = element.get('type','')\n self = cls( element.get('index'), mode, complete )\n xPath.pop( )\n self.parseNode(element, xPath, linkData, **kwargs)\n\n return( self )\n\nclass DecayPath( suiteModule.Suite ) :\n \"\"\"\n Stores all steps involved in a decayMode\n \"\"\"\n\n moniker = 'decayPath'\n\n def __init__( self ) :\n\n suiteModule.Suite.__init__( self, ( Decay, ) )\n\n def parseNode(self, element, xPath, linkData, **kwargs):\n\n xPath.append( element.tag )\n\n for child in element :\n self.add(Decay.parseNodeUsingClass(child, xPath, linkData, **kwargs))\n\n xPath.pop( )\n return( self )\n\nclass DecayMode( miscModule.ClassWithLabelKey ) :\n \"\"\"\n Describes one way a particle can decay. The description includes the probability, Q-value, outgoing products\n and spectra. For electro-magnetic decays, may also include a set of internal conversion coefficients\n and/or photon emission probabilities.\n \"\"\"\n\n moniker = 'decayMode'\n\n def __init__( self, label, mode ) :\n \"\"\"\n :param label: string, must be unique within this section\n :param mode: string, describes the decay mode. Examples: \"beta+ or e.c.\", \"beta-,n\", \"alpha\", etc.\n \"\"\"\n\n from ..decays import misc as miscDecaysModule\n\n miscModule.ClassWithLabelKey.__init__( self, label )\n\n if not isinstance(mode, str):\n if not isinstance(mode, miscDecaysModule.Mode):\n raise TypeError('mode is not a str or an enum of %s' % miscDecaysModule.Mode)\n self.__mode = mode\n\n self.__probability = probabilityModule.Suite( )\n self.__probability.setAncestor( self )\n\n self.__internalConversionCoefficients = spectrumModule.InternalConversionCoefficients()\n self.__internalConversionCoefficients.setAncestor( self )\n\n self.__photonEmissionProbabilities = spectrumModule.PhotonEmissionProbabilities()\n self.__photonEmissionProbabilities.setAncestor( self )\n\n self.__Q = QModule.Suite( )\n self.__Q.setAncestor( self )\n\n self.__decayPath = DecayPath( )\n self.__decayPath.setAncestor( self )\n\n self.__spectra = spectrumModule.Spectra( )\n self.__spectra.setAncestor( self )\n\n @property\n def mode( self ) :\n\n return( self.__mode )\n\n @property\n def probability( self ) :\n\n return( self.__probability )\n\n @property\n def internalConversionCoefficients( self ):\n\n return( self.__internalConversionCoefficients )\n\n @property\n def photonEmissionProbabilities( self ):\n\n return( self.__photonEmissionProbabilities )\n\n @property\n def decayPath( self ) :\n\n return( self.__decayPath )\n\n @property\n def Q( self ) :\n\n return( self.__Q )\n\n @property\n def spectra( self ) :\n\n return( self.__spectra )\n\n def convertUnits( self, unitMap ) :\n \"\"\"See convertUnits documentation in PoPs.database\"\"\"\n\n self.__probability.convertUnits( unitMap )\n self.__Q.convertUnits( unitMap )\n self.__decayPath.convertUnits( unitMap )\n self.__spectra.convertUnits( unitMap )\n\n def copy(self):\n \"\"\"\n :return: deep copy of self\n \"\"\"\n\n other = self.__class__(self.label, self.mode)\n for item in self.__probability:\n other.probability.add(item.copy())\n for item in self.__photonEmissionProbabilities:\n other.photonEmissionProbabilities.add(item.copy())\n for item in self.__Q:\n other.Q.add(item.copy())\n for item in self.__decayPath:\n other.decayPath.add(item.copy())\n for item in self.__spectra:\n other.spectra.add(item.copy())\n return other\n\n def toXML_strList( self, indent = '', **kwargs ) :\n\n indent2 = indent + kwargs.get( 'incrementalIndent', ' ' )\n\n XMLStringList = [ '%s<%s label=\"%s\" mode=\"%s\">' % ( indent, self.moniker, self.label, self.mode ) ]\n XMLStringList += self.probability.toXML_strList( indent = indent2, **kwargs )\n XMLStringList += self.internalConversionCoefficients.toXML_strList( indent = indent2, **kwargs )\n XMLStringList += self.photonEmissionProbabilities.toXML_strList(indent=indent2, **kwargs)\n XMLStringList += self.Q.toXML_strList( indent = indent2, **kwargs )\n XMLStringList += self.decayPath.toXML_strList( indent = indent2, **kwargs )\n XMLStringList += self.spectra.toXML_strList( indent = indent2, **kwargs )\n XMLStringList[-1] += '' % self.moniker\n\n return( XMLStringList )\n\n def parseNode(self, element, xPath, linkData, **kwargs):\n\n xPath.append( \"%s[@label='%s']\" % (element.tag, element.get('label')) )\n\n for child in element :\n if( child.tag == probabilityModule.Suite.moniker ) :\n self.probability.parseNode(child, xPath, linkData, **kwargs)\n elif( child.tag == spectrumModule.InternalConversionCoefficients.moniker ) :\n self.internalConversionCoefficients.parseNode(child, xPath, linkData, **kwargs)\n elif( child.tag == spectrumModule.PhotonEmissionProbabilities.moniker ) :\n self.photonEmissionProbabilities.parseNode(child, xPath, linkData, **kwargs)\n elif( child.tag == QModule.Suite.moniker ) :\n self.Q.parseNode(child, xPath, linkData, **kwargs)\n elif( child.tag == DecayPath.moniker ) :\n self.decayPath.parseNode(child, xPath, linkData, **kwargs)\n elif( child.tag == spectrumModule.Spectra.moniker ) :\n self.spectra.parseNode(child, xPath, linkData, **kwargs)\n else :\n raise ValueError( 'Invalid tag = \"%s\"' % child.tag )\n\n xPath.pop( )\n return( self )\n\n @classmethod\n def parseNodeUsingClass(cls, node, xPath, linkData, **kwargs):\n\n from ..decays import misc as miscDecaysModule\n\n xPath.append( node.tag )\n\n mode = node.get('mode')\n try:\n mode = miscDecaysModule.Mode.fromString(mode)\n except:\n pass\n\n self = cls(node.get('label'), mode)\n self.parseNode(node, xPath, linkData, **kwargs)\n\n xPath.pop( )\n return( self )\n\nclass DecayModes( suiteModule.Suite ) :\n \"\"\"\n Contains a list of decayMode instances\n \"\"\"\n\n moniker = 'decayModes'\n\n def __init__( self ) :\n\n suiteModule.Suite.__init__( self, ( DecayMode, ) )\n\n def parseNode(self, element, xPath, linkData, **kwargs):\n\n xPath.append( element.tag )\n\n for child in element :\n self.add(DecayMode.parseNodeUsingClass(child, xPath, linkData, **kwargs))\n\n xPath.pop( )\n return( self )\n\nclass DecayData(ancestryModule.AncestryIO_bare):\n \"\"\"\n Contains all decay information for a particle, including average energies for decay products\n and a list of decay modes.\n \"\"\"\n\n moniker = 'decayData'\n\n def __init__( self ) :\n\n ancestryModule.AncestryIO.__init__(self)\n\n self.__decayModes = DecayModes( )\n self.__decayModes.setAncestor( self )\n\n self.__averageEnergies = averageEnergyModule.AverageEnergies( )\n self.__averageEnergies.setAncestor( self )\n\n @property\n def decayModes( self ) :\n\n return( self.__decayModes )\n\n @property\n def averageEnergies( self ) :\n\n return( self.__averageEnergies )\n\n def convertUnits( self, unitMap ) :\n \"\"\"See convertUnits documentation in PoPs.database\"\"\"\n\n self.__decayModes.convertUnits( unitMap )\n self.__averageEnergies.convertUnits( unitMap )\n\n def copy( self ) :\n \"\"\"\n :return: deep copy of self\n \"\"\"\n\n _decayData = DecayData( )\n self.copyItems( _decayData )\n return( _decayData )\n\n def copyItems( self, other ) :\n \"\"\"\n Copy all items in self to other\n :param other: decayData instance where contents of self will be copied\n \"\"\"\n\n for item in self.__decayModes : other.decayModes.add( item.copy( ) )\n for item in self.__averageEnergies : other.averageEnergies.add( item.copy( ) )\n\n def toXML_strList( self, indent = '', **kwargs ) :\n\n indent2 = indent + kwargs.get( 'incrementalIndent', ' ' )\n\n XMLStringList_subs = self.__decayModes.toXML_strList( indent = indent2, **kwargs )\n XMLStringList_subs += self.averageEnergies.toXML_strList( indent = indent2, **kwargs )\n if( len( XMLStringList_subs ) == 0 ) : return( [] )\n\n XMLStringList = [ '%s<%s>' % ( indent, self.moniker ) ] + XMLStringList_subs\n XMLStringList[-1] += '' % self.moniker\n\n return( XMLStringList )\n\n def parseNode(self, element, xPath, linkData, **kwargs):\n\n xPath.append( element.tag )\n\n children = { 'decayModes' : self.decayModes, 'averageEnergies' : self.averageEnergies }\n\n for child in element :\n if( child.tag in children ) :\n children[child.tag].parseNode(child, xPath, linkData, **kwargs)\n else:\n if not self.parseExtraXMLElement(child, xPath, linkData, **kwargs): raise ValueError('Sub-element = \"%s\" not allowed' % child.tag)\n\n xPath.pop()\n\n return self\n","sub_path":"PoPs/decays/decayData.py","file_name":"decayData.py","file_ext":"py","file_size_in_byte":14394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"72147456","text":"from django.shortcuts import render, redirect\nfrom django.views.generic import TemplateView\nfrom .models import Section, Article, SectionMember\nfrom .forms import ArticleForm, SectionMemberForm\n\n\ndef index(request, *args, **kwargs):\n if \"section_id\" in kwargs:\n sec_members = SectionMember.objects.filter(section__id=kwargs[\"section_id\"])\n articles = [sm.article for sm in sec_members]\n else:\n articles = Article.objects.all()\n\n data = {\"sections\": Section.objects.all(), \"articles\": articles}\n return render(request, 'news/news_list.html', context=data)\n\n\nclass ArticleCreateView(TemplateView):\n\n def get(self, request, *args, **kwargs):\n data = {\"form\": ArticleForm(sections=Section.objects.all())}\n return render(request, 'news/news_create.html', context=data)\n\n def post(self, request, *args, **kwargs):\n form = ArticleForm(request.POST, request.FILES)\n\n if form.is_valid():\n title = form.cleaned_data['title']\n text = form.cleaned_data['text']\n pub_date = form.cleaned_data['pub_date']\n additional_sections = form.cleaned_data['additional_sections']\n main_section = form.cleaned_data['main_section']\n image = form.cleaned_data['image']\n\n article = Article(title=title, text=text, pub_date=pub_date, image=image)\n article.save()\n\n for sec in main_section:\n sec_member = SectionMember(section=Section.objects.get(pk=sec),\n main=True,\n article=article\n )\n sec_member.save()\n\n for sec in additional_sections:\n sec_member = SectionMember(section=Section.objects.get(pk=sec),\n main=False,\n article=article\n )\n sec_member.save()\n\n return redirect(\"home\")\n\n render(request, 'news/news_create.html', context={'form': form})\n\n\nclass SectionMemberCreateView(TemplateView):\n\n def get(self, request, *args, **kwargs):\n data = {\"form\": SectionMemberForm(articles=Article.objects.all(), sections=Section.objects.all())}\n return render(request, 'news/section_member_add.html', context=data)\n\n def post(self, request, *args, **kwargs):\n form = SectionMemberForm(request.POST, request.FILES)\n\n if form.is_valid():\n article = form.cleaned_data['article']\n main = form.cleaned_data['main']\n section = form.cleaned_data['section']\n\n sec_member = SectionMember(article=Article.objects.get(pk=article),\n main=main,\n section=Section.objects.get(pk=section)\n )\n sec_member.save()\n\n return redirect(\"home\")\n\n return render(request, 'news/section_member_add.html', context={'form': form})","sub_path":"databases_2/mtm_relations/news/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"149789747","text":"# make a simple message using your name and age\n\n# f Strings\nname = input(\"name : \")\nage = int(input(\"age : \"))\nmsg = \"my name is %s, I am %s years old.\"%(name,age)\nprint(msg)\n\n\"\"\"\nNOTES\n* use \"*s\" in place where u wanna put a different data\n* to add different data you have to use following syntax:\n\t%(var1,var1,var3)\n* do not use Comma in between string and %(v,v,v).\n\"\"\"\n","sub_path":"Python/Basics/DataTypes/strings/f_string.py","file_name":"f_string.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"336192893","text":"import os\n\ndef clean(d):\n i=0 \n listing = os.listdir(d+\"/\")\n for file in listing:\n #print(file)\n for k in file:\n if k==\".\":\n i=1\n if i>=1 and k==\"n\"or k==\"d\": \n i=i+1\n if i>=1 and k==\"p\" or k==\"a\": \n i=i+1\n if i>=1 and k==\"y\"or k==\"t\": \n i=i+1\n if i==4:\n os.remove(\"/home/pi/AI_attendence/\"+d+\"/\"+file)\n #print(\"file removed\")\n i=0\n \n ","sub_path":"remove.py","file_name":"remove.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"607661518","text":"# -*- coding: utf-8 -*-\n\nimport nltk\nfrom nltk.tokenize import sent_tokenize\nimport sys\n\ndef main(file_name):\n \n #print(\"\\nComeçando pré-processamento\\n\")\n \n count_new_files = 0\n input_count = 1\n #input_limit = 50\n\n condition = True\n \n #try:\n while condition:\n try:\n #print(\"First while\") \n #v2_200_news/#0_news/Startup_'+str(input_count)+'.txt'\n #print(file)\n #documents = open(str(file)+'/sentence_'+str(input_count)+'.txt', encoding='utf-8').read()\n documents = open('0_exemplos/'+str(file_name)+str(input_count)+'.txt', encoding='utf-8').read()\n sentencas = sent_tokenize(documents, language='portuguese')\n\n qtd_sentencas = 0\n\n for sentenca in sentencas:\n qtd_sentencas = qtd_sentencas +1\n\n #print(str(qtd_sentencas))\n output_limit = qtd_sentencas\n output_count = 1\n\n while output_count <= output_limit:\n #print(\"Second while\")\n output_file = open('1_pre_processamento/outputs/sentence_breaker_output/sentence_'+str(input_count)+'_'+str(output_count)+'.txt', 'w', encoding='utf-8')\n output_file.write(sentencas[output_count-1])\n \n output_count += 1\n count_new_files += 1\n\n input_count += 1\n \n except Exception as name:\n #print(\"\\nMain SB Exception: \"+str(name)+\"\\n\")\n condition = False\n\nif __name__ == \"__main__\":\n\n try:\n file_adress = sys.argv[1]\n #print(file_adress)\n main(file_adress)\n\n except IndexError:\n print(\"\\nO arquivo anexado está errado\\n\")\n\n except Exception as name:\n print(\"\\nSB Exception: \"+str(name))\n","sub_path":"RelpPlus_1_0_0/1_pre_processamento/sentence_breaker.py","file_name":"sentence_breaker.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"188929677","text":"from matplotlib.pylab import date2num\nimport matplotlib.pyplot as plt\nimport matplotlib.finance as mpf\nimport tushare as ts\nimport datetime,pymysql\nfrom matplotlib.dates import DateFormatter, WeekdayLocator,DayLocator, MONDAY\n\ndef get_gu_piao_tu(gu_piao,money_list,chigu_data):\n hist_data=ts.get_k_data(gu_piao,start='2017-07-01')\n # 对tushare获取到的数据转换成candlestick_ohlc()方法可读取的格式\n k_data_list = []\n k_time_list = []\n k_money_list = []\n trade_day=[]\n for dates,row in hist_data.iterrows():\n # 将时间转换为数字\n date_time = datetime.datetime.strptime(row[0],'%Y-%m-%d')\n trade_day.append(date_time)\n t = date2num(date_time)\n open,close,high,low = row[1:5]\n datas = (t,open,high,low,close)\n k_data_list.append(datas)\n k_time_list.append(t)\n k_money_list.append(row[5])\n\n\n # 创建子图\n ax1=plt.subplot2grid((4,1),(0,0),rowspan=3)\n ax2=plt.subplot2grid((4,1),(3,0))\n\n\n # 显示网格\n ax1.grid()\n ax1.set_title(\"代码:%sK线图\" %gu_piao,fontsize=25)\n ax1.set_ylabel(\"股价(元)\",fontsize=20)\n mpf.candlestick_ohlc(ax1,k_data_list,width=0.5,colorup='r',colordown='green',alpha=1)\n plt.xticks(fontsize=20)\n plt.yticks(fontsize=20)\n\n money_in_time=[]\n # 原本用于散点图,弃用\n money_in_height=[]\n money_in_s=[]\n money_out_time=[]\n money_out_height=[]\n money_out_s=[]\n for i in money_list:\n if i[2]>0:\n money_in_time.append(i[0])\n money_in_height.append(i[1])\n money_in_s.append(i[2])\n else:\n money_out_time.append(i[0])\n money_out_height.append(i[1])\n money_out_s.append(-i[2])\n\n ax3=ax1.twinx()\n ax3.set_ylabel(\"持股市值(元)\",fontsize=20)\n ax3.plot(chigu_data[0],chigu_data[1],color='orange',alpha=.5,label='in')\n ax3.legend(fontsize=20)\n # plt.xticks(fontsize=20)\n # plt.yticks(fontsize=20)\n\n ax2.xaxis_date()\n ax2.bar(k_time_list,k_money_list)\n plt.xticks(fontsize=20)\n plt.yticks(fontsize=20)\n ax4=ax2.twinx()\n ax4.set_ylabel(\"成交量(元)\",fontsize=20)\n ax4.bar(money_out_time,money_out_s,color='k',alpha=1)\n ax4.bar(money_in_time,money_in_s,color='orange',alpha=.5)\n\n #设置横坐标标签的显示时间间隔(显示每周的周一)\n mondays = WeekdayLocator(MONDAY)\n ax4.xaxis.set_major_locator(mondays)\n #设置横坐标刻度的显示间隔(每个刻度代表一天)\n alldays = DayLocator()\n ax4.xaxis.set_minor_locator(alldays)\n #设置横坐标标签的显示样式\n weekFormatter = DateFormatter('%Y-%m-%d')\n ax4.xaxis.set_major_formatter(weekFormatter)\n plt.xticks(fontsize=20)\n plt.yticks(fontsize=20)\n #用来正常显示中文标签\n plt.rcParams['font.sans-serif']=['SimHei']\n #用来正常显示负号\n plt.rcParams['axes.unicode_minus']=False\n #控制宽度,高度百分比\n plt.subplots_adjust(wspace=0,hspace=0)\n\n #设置数字标签\n for m in money_list:\n plt.text(m[0], m[2], '%.0f' % m[2], ha='center', va= 'bottom',fontsize=17)\n plt.show()\n\ndef get_one(gu_piao,date):\n sql='SELECT mai_chu_er,mai_ru_er,date,shou_pan_jia FROM shen_zheng_gushi.shen_gutong where dai_ma=%s AND date>=%s;'\n cur.execute(sql,(gu_piao,date))\n result=cur.fetchall()\n money_data_list=[]\n for i in result:\n t = date2num(i[2])\n jin_er_du=i[1]-i[0]\n shou_pan_jia=i[3]\n money_data_list.append((t,shou_pan_jia,jin_er_du))\n return money_data_list\n\ndef get_chigu(gupiao,date):\n if gupiao[0]=='6':\n sql='SELECT date,chi_gu_shizhi,chi_gu_bi_percent FROM chigu.shang_zheng where dai_ma=%s AND date>=%s;'\n cur.execute(sql,(gupiao,date))\n result=cur.fetchall()\n chigu_percent=[]\n chigu_shizhi=[]\n chigu_time=[]\n for i in result:\n chigu_time.append(i[0])\n chigu_shizhi.append(i[1])\n chigu_percent.append(i[2])\n chigu_data=[]\n chigu_data.append(chigu_time)\n chigu_data.append(chigu_shizhi)\n return chigu_data\n if gupiao[0]!='6':\n sql='SELECT date,chi_gu_shizhi,chi_gu_bi_percent FROM chigu.shen_zheng where dai_ma=%s;'\n cur.execute(sql,(gupiao))\n result=cur.fetchall()\n chigu_percent=[]\n chigu_shizhi=[]\n chigu_time=[]\n for i in result:\n chigu_time.append(i[0])\n chigu_shizhi.append(i[1])\n chigu_percent.append(i[2])\n chigu_data=[]\n chigu_data.append(chigu_time)\n chigu_data.append(chigu_shizhi)\n return chigu_data\n\n\nif __name__ == '__main__':\n money_in_list=[]\n money_out_list=[]\n # 002415海康 000651格力 000333美的\n gu_piao='000651'\n date='2017-07-04'\n # try:\n conn=pymysql.connect(host='localhost',user='root',password='java',db='shen_zheng_gushi',port=3306,charset='utf8')\n cur=conn.cursor()\n money_list=get_one(gu_piao,date)\n chigu_data=get_chigu(gu_piao,date)\n get_gu_piao_tu(gu_piao,money_list,chigu_data)\n cur.close() # 释放游标\n conn.close() # 释放资源\n # except Exception as e:\n # print(e)\n# 139\t000333\t美的集团\t2017-07-20\n# 137\t000858\t五粮液\t2017-07-20\n# 136\t000651\t格力电器\t2017-07-20\n# 136\t002415\t海康威视\t2017-07-20\n# 76\t002304\t洋河股份\t2017-07-12\n# 54\t000725\t京东方A\t2017-07-18\n# 50\t000538\t云南白药\t2017-07-19\n# 34\t002008\t大族激光\t2017-07-13\n# 33\t000423\t东阿阿胶\t2017-07-19\n# 32\t002508\t老板电器\t2017-07-14\n# 31\t000001\t平安银行\t2017-07-20\n# 29\t001979\t招商蛇口\t2017-07-13\n# 28\t002236\t大华股份\t2017-07-20\n# 27\t002027\t分众传媒\t2017-07-07\n# 22\t002241\t歌尔股份\t2017-07-20\n# 22\t000568\t泸州老窖\t2017-07-12\n# 15\t000002\t万科A\t2017-07-20\n# 15\t002230\t科大讯飞\t2017-07-20\n# 14\t300433\t蓝思科技\t2017-07-20","sub_path":"chigu_matplotlib.py","file_name":"chigu_matplotlib.py","file_ext":"py","file_size_in_byte":5904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"586411289","text":"\"\"\"Put all samples in one file.\"\"\"\nimport csv, sys\n\ndef main(args):\n with open(args['outFile'], 'w') as fout:\n ls = ('varKey', 'posKey', 'matchTumorYes', 'matcthTumorNo', 'matchTumorNoCall')\n print('\\t'.join(ls), file=fout)\n for afile in args['statusFiles']:\n with open(afile) as f:\n sample = afile.split('/')[-1].split('.')[0]\n reader = csv.DictReader(f, delimiter='\\t')\n for row in reader:\n posKey = row['var']\n inTumor = row['inTumor']\n varKey = posKey + ':' + sample + ':' + row['alt']\n matchTumorYes = 0\n matchTumorNo = 0\n matchTumorNoCall = 0\n if inTumor == 'NA':\n matchTumorNoCall = 1\n elif inTumor == '1':\n matchTumorYes = 1\n elif inTumor == '0':\n matchTumorNo = 1\n else:\n print(afile)\n print(row)\n i = 1/0\n ls = ( varKey, posKey, str(matchTumorYes),\n str(matchTumorNo), str(matchTumorNoCall) )\n print('\\t'.join(ls), file=fout)\n\nif __name__ == \"__main__\":\n desc = \"\"\"Put tumor confirmed and tumorNoCall cols in one file across samples.\"\"\"\n outFile = sys.argv[-1]\n statusFiles = sys.argv[1:-1]\n args = {'outFile':outFile,\n 'statusFiles':statusFiles}\n main(args)\n","sub_path":"code/collapseTumorStatus.py","file_name":"collapseTumorStatus.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"376676411","text":"from typing import Any, Dict, List\n\nfrom django.db import DEFAULT_DB_ALIAS, connections\nfrom django.test.utils import CaptureQueriesContext\n\nfrom openslides.core.config import config\nfrom openslides.users.models import User\n\n\nclass TConfig:\n \"\"\"\n Cachable, that fills the cache with the default values of the config variables.\n \"\"\"\n\n def get_collection_string(self) -> str:\n return config.get_collection_string()\n\n def get_elements(self) -> List[Dict[str, Any]]:\n elements = []\n config.key_to_id = {}\n for id, item in enumerate(config.config_variables.values()):\n elements.append({'id': id+1, 'key': item.name, 'value': item.default_value})\n config.key_to_id[item.name] = id+1\n return elements\n\n async def restrict_elements(\n self,\n user_id: int,\n elements: List[Dict[str, Any]]) -> List[Dict[str, Any]]:\n return elements\n\n\nclass TUser:\n \"\"\"\n Cachable, that fills the cache with the default values of the config variables.\n \"\"\"\n\n def get_collection_string(self) -> str:\n return User.get_collection_string()\n\n def get_elements(self) -> List[Dict[str, Any]]:\n return [\n {'id': 1, 'username': 'admin', 'title': '', 'first_name': '',\n 'last_name': 'Administrator', 'structure_level': '', 'number': '', 'about_me': '',\n 'groups_id': [4], 'is_present': False, 'is_committee': False, 'email': '',\n 'last_email_send': None, 'comment': '', 'is_active': True, 'default_password': 'admin',\n 'session_auth_hash': '362d4f2de1463293cb3aaba7727c967c35de43ee'}]\n\n async def restrict_elements(\n self,\n user_id: int,\n elements: List[Dict[str, Any]]) -> List[Dict[str, Any]]:\n return elements\n\n\ndef count_queries(func, *args, **kwargs) -> int:\n context = CaptureQueriesContext(connections[DEFAULT_DB_ALIAS])\n with context:\n func(*args, **kwargs)\n\n print(\"%d queries executed\\nCaptured queries were:\\n%s\" % (\n len(context),\n '\\n'.join(\n '%d. %s' % (i, query['sql']) for i, query in enumerate(context.captured_queries, start=1))))\n return len(context)\n","sub_path":"tests/integration/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":2221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"536105919","text":"# Author : Sky\n# @Time : 9/1/20 3:34 下午\n# @Site :\n# @File : 20200901-test.py\n# @Software: PyCharm\n# -*- coding: utf-8 -*-\nimport redis\nimport json,sys,os\nimport time\nimport socket\nimport subprocess\nimport requests\nhostName = socket.gethostname()\n#uname = hostName.split(\".\")\nuname = hostName.split(\".\")[0].split('ip-')[1].replace('-','.')\nr = redis.Redis(host=uname, port=6379,db=0,decode_responses=True)\ncmd = 'ps -fe | grep tail | grep -v \"grep\"'\na = os.popen(cmd) # 返回一个对象\ntxt = a.readlines()\nif len(txt) != 0:\n for lin in txt:\n lin_ = lin.split()\n pid = lin_[1]\n cmd = 'kill -9 %d' % (int(pid))\n rc = os.system(cmd)\nclass RedisHelper:\n def __init__(self):\n self.__conn = r\n self.chan_sub = 'fm104.5'\n self.chan_pub = 'fm104.5'\n\n def public(self, msg):\n self.__conn.publish(self.chan_pub, msg)\n return True\n\n def subscribe(self):\n pub = self.__conn.pubsub()\n pub.subscribe(self.chan_sub)\n pub.parse_response()\n return pub\n\nobj = RedisHelper()\n\ndef stat_logs(*args):\n bad_list = []\n ip_list = []\n local_time = time.time()\n popen = subprocess.Popen('tail -f ' + '/opt/lnmp/nginx/logs/webUI_'+sys.argv[1]+\"_greate_10\"+\".log\", stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n pid = popen.pid\n print('Popen.pid:' + str(pid))\n while True:\n line = popen.stdout.readline().strip()\n if line:\n str_line = bytes.decode(line)\n dict_line = eval(str_line)\n #print(dict_line)\n if dict_line['upstream_response_time'] != '-':\n new_list = dict_line['upstream_response_time'].split(',')\n # print(new_list)\n # print(int(float(new_list[0])))\n try:\n if len(new_list) > 1:\n new_addr = dict_line['upstream_addr'].split(',')\n new_dict = dict(zip(new_addr,new_list))\n print(new_list)\n for k,v in new_dict.items():\n if float(v) > 50:\n obj.public(k)\n time.sleep(50)\n continue\n elif float(new_list[0]) > 50:\n obj.public(dict_line['upstream_addr'])\n time.sleep(50)\n continue\n except Exception as e:\n print(e)\n continue\nstat_logs(sys.argv[1])\n","sub_path":"检查nginx_后端大于20秒/检查大于20秒.py","file_name":"检查大于20秒.py","file_ext":"py","file_size_in_byte":2558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"364584383","text":"__author__ = 'Steve'\n\nfrom auto.ZumayPage import search_page\nfrom auto.Instance import set_instance, get_object_form_csv, dict_iterator, get_object\nfrom auto.BasePage import enter_page\n\ndef user_applies_jobs(user):\n applicant = get_object_form_csv(user_name=user)\n instance = set_instance()\n browser = enter_page(instance)\n browser.login(applicant)\n browser = search_page(instance)\n\n for raw_job in dict_iterator(\"demo_jobcampains.csv\"):\n job = get_object(raw_job)\n browser.search_job(job)\n browser.apply_to_job(job)\n browser.close()\n\ndef users_apply_job(campaign_name):\n job_campaign = get_object_form_csv(file_name=\"demo_jobcampains.csv\", user_name=campaign_name)\n instance = set_instance()\n browser = enter_page(instance)\n\n for raw_applicant in dict_iterator():\n applicant = get_object(raw_applicant)\n if applicant.role == \"jobseeker\":\n browser = enter_page(instance)\n browser.login(applicant)\n browser = search_page(instance)\n browser.search_job(job_campaign)\n browser.apply_to_job(job_campaign)\n browser.logout()\n browser.close()\n\ndef load_user_instance(loop_counter):\n\n for raw in dict_iterator(csv_file= \"demo_jobseekers.csv\"):\n user = get_object(raw)\n x = set_instance()\n browser = enter_page(x)\n browser.login(user)\n loop_counter = loop_counter -1\n if loop_counter <= 0:\n break\n\nif __name__ == '__main__':\n # user_applies_jobs(\"Robert\")\n users_apply_job(\"Sr. Programmer\")\n # load_user_instance(3)","sub_path":"tests/processes.py","file_name":"processes.py","file_ext":"py","file_size_in_byte":1608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"27067481","text":"from django.contrib import admin\nfrom .models import Charity\n\n\n# Register your models here.\nclass CharityAdmin(admin.ModelAdmin):\n list_display = (\n 'name',\n 'description',\n 'url',\n 'image'\n )\n\n ordering = ('name',)\n\n\nadmin.site.register(Charity, CharityAdmin)\n","sub_path":"charities/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"90878197","text":"###################################\n# Script : \n# 1) Contains class to write dictionary\n# for jinja2/write HTML files/PDF files\n# and supplementary table\n#\n# ganesans - Salilab - UCSF\n# ganesans@salilab.org\n###################################\n\nimport pytz\nimport jinja2\nimport pandas as pd\nimport sys,os,glob\nimport numpy as np\nimport validation\nfrom validation import excludedvolume,get_input_information\nfrom validation import molprobity\nfrom validation import get_plots,sas,sas_plots\nfrom validation import cx,cx_plots\n#import pdfkit\nimport datetime,time\nimport pickle\nfrom multiprocessing import Process, Queue, Pool, Manager\nfrom collections import Counter\nimport argparse\nimport json\nfrom validation import utility\n\nclass WriteReport(object):\n\tdef __init__(self,mmcif_file):\n\t\tself.mmcif_file = mmcif_file\n\t\tself.I=get_input_information(self.mmcif_file)\n\n\tdef run_entry_composition(self,Template_Dict):\n\t\t'''\n\t\tget entry composition, relies on IHM library\n\t\t'''\n\t\tstart=time.process_time()\n\t\tname=self.mmcif_file.split('.')[0].split('_')[0]\n\t\tif self.I.get_ensembles():\n\t\t\tensemble_info=utility.dict_to_JSlist(self.I.get_ensembles())\n\t\telse:\n\t\t\tensemble_info=None\n\t\tTemplate_Dict['ensemble_info']=ensemble_info\n\t\tTemplate_Dict['sphere']=self.I.check_sphere()\n\t\tTemplate_Dict['num_ensembles']=self.I.check_ensembles()\n\t\tRB,flex,RB_nos,all_nos=self.I.get_RB_flex_dict()\n\t\tTemplate_Dict['Rigid_Body']=RB_nos\n\t\tTemplate_Dict['Flexible_Unit']=all_nos-RB_nos\n\t\tTemplate_Dict['RB_list']=utility.dict_to_JSlist_rows(RB,flex)\n\t\tTemplate_Dict['RB']=utility.get_RB(utility.dict_to_JSlist_rows(RB,flex))\n\t\tTemplate_Dict['flex']=utility.get_flex(utility.dict_to_JSlist_rows(RB,flex))\n\t\tTemplate_Dict['ID']=self.I.get_id()\n\t\tTemplate_Dict['ID_w']=self.I.get_id().split()\n\t\tTemplate_Dict['ID_T']=self.I.get_id()[0:6]+'_'+self.I.get_id()[6:]\n\t\tTemplate_Dict['ID_R']=(self.I.get_id()[0:6]+'_'+self.I.get_id()[6:]).split()\n\t\tTemplate_Dict['Molecule']=self.I.get_struc_title()\n\t\tTemplate_Dict['Title']=self.I.get_title()\n\t\tTemplate_Dict['Authors']=self.I.get_authors()\n\t\tTemplate_Dict['Entry_list']=utility.dict_to_JSlist(self.I.get_composition())\n\t\tTemplate_Dict['number_of_molecules']=self.I.get_number_of_models()\n\t\tTemplate_Dict['model_names']=self.I.get_model_names()\n\t\tTemplate_Dict['number_of_software']=self.I.get_software_length()\n\t\tTemplate_Dict['soft_list']=utility.dict_to_JSlist(self.I.get_software_comp())\n\t\tTemplate_Dict['number_of_datasets']=self.I.get_dataset_length()\n\t\tTemplate_Dict['Data']=[i.upper() for i in list(set(self.I.get_dataset_comp()['Dataset type']).difference({'Experimental model','Comparative model'}))]\n\t\tTemplate_Dict['Datasets_list']=utility.dict_to_JSlist(self.I.get_dataset_comp())\n\t\tTemplate_Dict['Protocols_number']=self.I.get_protocol_number()\n\t\tTemplate_Dict['Sampling_list']=utility.dict_to_JSlist(self.I.get_sampling())\n\t\tTemplate_Dict['num_chains']=int(len(self.I.get_composition()['Chain ID']))/int(len(list(Counter(self.I.get_composition()['Model ID']).keys())))\n\t\treturn Template_Dict\n\n\tdef run_model_quality(self,Template_Dict):\n\t\t'''\n\t\tget excluded volume for multiscale models\n\t\tget molprobity info for atomic models\n\t\texception: models with DNA--we need a way to assess models with DNA\n\t\t'''\n\t\tif self.I.check_sphere()<1:\n\t\t\t#global clashscore; global rama; global sidechain;\n\t\t\texv_data=None\n\t\t\tI_mp=molprobity.get_molprobity_information(self.mmcif_file)\n\t\t\tif I_mp.check_for_molprobity():\n\t\t\t\tfilename = os.path.abspath(os.path.join(os.getcwd(), 'static/results/',str(Template_Dict['ID'])+'_temp_mp.txt'))\n\t\t\t\tprint (filename)\n\t\t\t\tif os.path.exists(filename):\n\t\t\t\t\td_mp={}\n\t\t\t\t\tprint (\"Molprobity analysis file already exists...\\n...assuming clashscores, Ramachandran and rotamer outliers have already been calculated\")\n\t\t\t\t\twith open(filename,'rb') as fp:\n\t\t\t\t\t\td_mp['molprobity']=pickle.load(fp)\n\t\t\t\t\tf_rota=os.path.abspath(os.path.join(os.getcwd(), 'static/results/',str(Template_Dict['ID'])+'_temp_rota.txt'))\n\t\t\t\t\twith open(f_rota,'rb') as fp:\n\t\t\t\t\t\td_mp['rota']=pickle.load(fp)\n\t\t\t\t\tf_rama=os.path.abspath(os.path.join(os.getcwd(), 'static/results/',str(Template_Dict['ID'])+'_temp_rama.txt'))\n\t\t\t\t\twith open(f_rama,'rb') as fp:\n\t\t\t\t\t\td_mp['rama']=pickle.load(fp)\n\t\t\t\t\tf_clash=os.path.abspath(os.path.join(os.getcwd(), 'static/results/',str(Template_Dict['ID'])+'_temp_clash.txt'))\n\t\t\t\t\twith open(f_clash,'rb') as fp:\n\t\t\t\t\t\td_mp['clash']=pickle.load(fp)\n\t\t\t\telse:\n\t\t\t\t\tprint (\"Molprobity analysis is being calculated...\")\n\t\t\t\t\tmanager = Manager()\n\t\t\t\t\td_mp=manager.dict()\n\t\t\t\t\tutility.runInParallel(I_mp.run_clashscore(d_mp),I_mp.run_ramalyze(d_mp),I_mp.run_rotalyze(d_mp),I_mp.run_molprobity(d_mp))\n\t\t\t\ta,b=I_mp.process_molprobity(d_mp['molprobity'])\n\t\t\t\tTemplate_Dict['bond']=len(a); Template_Dict['angle']=len(b)\n\t\t\t\tglobal clashscore;global rama;global sidechain\n\t\t\t\tprint (I_mp.get_data_for_quality_at_glance(d_mp['molprobity']))\n\t\t\t\tclashscore,rama,sidechain=I_mp.get_data_for_quality_at_glance(d_mp['molprobity'])\n\t\t\t\tTemplate_Dict['molp_b']=utility.dict_to_JSlist(I_mp.molprobity_detailed_table_bonds(a))\n\t\t\t\tTemplate_Dict['molp_a']=utility.dict_to_JSlist(I_mp.molprobity_detailed_table_angles(b))\n\t\t\t\tTemplate_Dict['rotascore']=utility.dict_to_JSlist(I_mp.rota_summary_table(I_mp.process_rota(d_mp['rota'])))\n\t\t\t\tTemplate_Dict['rotalist']=utility.dict_to_JSlist(I_mp.rota_detailed_table(I_mp.process_rota(d_mp['rota'])))\n\t\t\t\tTemplate_Dict['ramascore']=utility.dict_to_JSlist(I_mp.rama_summary_table(I_mp.process_rama(d_mp['rama'])))\n\t\t\t\tTemplate_Dict['ramalist']=utility.dict_to_JSlist(I_mp.rama_detailed_table(I_mp.process_rama(d_mp['rama'])))\n\t\t\t\tclashscores,Template_Dict['tot']=I_mp.clash_summary_table(d_mp['clash'])\n\t\t\t\tTemplate_Dict['clashscore_list']=utility.dict_to_JSlist(clashscores)\n\t\t\t\tTemplate_Dict['clashlist']=I_mp.clash_detailed_table(d_mp['clash'])\n\t\t\t\tTemplate_Dict['assess_atomic_segments']='Clashscore: '+ str(clashscore) + ', Ramachandran outliers: '+ str(rama)+ '% '+', Sidechain outliers: '+str(sidechain)+'%'\n\t\t\t\tTemplate_Dict['assess_excluded_volume']=['Not applicable']\n\t\t\telse:\n\t\t\t\tif I_mp.check_for_molprobity()==False:\n\t\t\t\t\tself.I.rewrite_mmcif()\n\t\t\t\t\tI_mp=molprobity.get_molprobity_information('test.cif')\n\t\t\t\t\tprint (\"file rewritten\")\n\t\t\t\tif I_mp.check_for_molprobity():\n\t\t\t\t\tprint (\"Molprobity analysis is being calculated...\")\n\t\t\t\t\tmanager = Manager()\n\t\t\t\t\td_mp=manager.dict()\n\t\t\t\t\ttry:\n\t\t\t\t\t\trunInParallel(I_mp.run_clashscore(d_mp),I_mp.run_ramalyze(d_mp),I_mp.run_rotalyze(d_mp),I_mp.run_molprobity(d_mp))\n\t\t\t\t\t\ta,b=I_mp.process_molprobity(d_mp['molprobity'])\n\t\t\t\t\t\tTemplate_Dict['bond']=len(a); Template_Dict['angle']=len(b)\n\t\t\t\t\t\tclashscore,rama,sidechain=I_mp.get_data_for_quality_at_glance(d_mp['molprobity'])\n\t\t\t\t\t\tTemplate_Dict['molp_b']=utility.dict_to_JSlist(I_mp.molprobity_detailed_table_bonds(a))\n\t\t\t\t\t\tTemplate_Dict['molp_a']=utility.dict_to_JSlist(I_mp.molprobity_detailed_table_angles(b))\n\t\t\t\t\t\tTemplate_Dict['rotascore']=utility.dict_to_JSlist(I_mp.rota_summary_table(I_mp.process_rota(d_mp['rota'])))\n\t\t\t\t\t\tTemplate_Dict['rotalist']=utility.dict_to_JSlist(I_mp.rota_detailed_table(I_mp.process_rota(d_mp['rota'])))\n\t\t\t\t\t\tTemplate_Dict['ramascore']=utility.dict_to_JSlist(I_mp.rama_summary_table(I_mp.process_rama(d_mp['rama'])))\n\t\t\t\t\t\tTemplate_Dict['ramalist']=utility.dict_to_JSlist(I_mp.rama_detailed_table(I_mp.process_rama(d_mp['rama'])))\n\t\t\t\t\t\tclashscores,Template_Dict['tot']=I_mp.clash_summary_table(d_mp['clash'])\n\t\t\t\t\t\tTemplate_Dict['clashscore_list']=utility.dict_to_JSlist(clashscores)\n\t\t\t\t\t\tTemplate_Dict['clashlist']=I_mp.clash_detailed_table(d_mp['clash'])\n\t\t\t\t\t\tTemplate_Dict['assess_atomic_segments']='Clashscore: '+ str(clashscore) + ', Ramachandran outliers: '+ str(rama)+ '% '+', Sidechain outliers: '+str(sidechain)+'%'\n\t\t\t\t\t\tTemplate_Dict['assess_excluded_volume']=['Not applicable']\n\t\t\t\t\texcept:\n\t\t\t\t\t\tprint (\"Molprobity cannot be calculated...\")\n\t\t\t\t\t\tclashscore=None\n\t\t\t\t\t\trama=None\n\t\t\t\t\t\tsidechain=None \n\t\telse:\n\t\t\tTemplate_Dict['assess_atomic_segments']='Not applicable'\n\t\t\tfile=os.getcwd()+'Output/results/'+str(Template_Dict['ID'])+'exv.txt'\n\t\t\tif os.path.exists(file):\n\t\t\t\tprint (\"Excluded volume file already exists...\")\n\t\t\t\twith open(file, 'r+') as inf:\n\t\t\t\t\tline=[ln.replace('[','').replace(']','').replace(',','').split() for ln in inf.readlines()]\n\t\t\t\texv_data={'Models':line[0],'Excluded Volume Satisfaction (%)':line[1], 'Number of violations':line[2]}\n\t\t\telse: \n\t\t\t\tprint (\"Excluded volume is being calculated...\")\n\t\t\t\tI_ev=excludedvolume.get_excluded_volume(self.mmcif_file)\n\t\t\t\tmodel_dict=I_ev.get_all_spheres()\n\t\t\t\texv_data=I_ev.run_exc_vol_parallel(model_dict)\n\t\t\t\n\t\t\tTemplate_Dict['excluded_volume']=utility.dict_to_JSlist(exv_data)\n\t\t\tTemplate_Dict['assess_excluded_volume']=utility.exv_readable_format(exv_data)\n\t\t\tclashscore=None\n\t\t\trama=None\n\t\t\tsidechain=None\n\t\treturn Template_Dict,clashscore,rama,sidechain,exv_data\n\n\tdef run_sas_validation(self,Template_Dict):\n\t\t'''\n\t\tget sas validation information from SASCIF or JSON files\n\t\t'''\n\t\tif self.I.check_for_sas(self.I.get_dataset_comp()):\n\t\t\tTemplate_Dict['sas']=[\"True\"]\n\t\t\tI_sas=sas.sas_validation(self.mmcif_file)\n\t\t\tTemplate_Dict['p_val']=utility.dict_to_JSlist(I_sas.get_pvals())\n\t\t\tTemplate_Dict['sasdb_code']=I_sas.get_SASBDB_code()\n\t\t\ttry:\t\t\n\t\t\t\tTemplate_Dict['parameters_volume']=utility.dict_to_JSlist(I_sas.get_parameters_vol_many())\n\t\t\texcept:\n\t\t\t\tTemplate_Dict['parameters_volume']=utility.dict_to_JSlist(I_sas.get_parameters_vol_many_dep())\n\t\t\ttry:\n\t\t\t\tTemplate_Dict['parameters_mw']=utility.dict_to_JSlist(I_sas.get_parameters_mw_many())\n\t\t\texcept:\n\t\t\t\tTemplate_Dict['parameters_mw']=utility.dict_to_JSlist(I_sas.get_parameters_mw_many_dep())\n\t\t\tTemplate_Dict['pddf_info']=utility.dict_to_JSlist(I_sas.get_pddf_info())\n\t\t\tTemplate_Dict['number_of_fits']=I_sas.get_total_fits()\n\t\t\tTemplate_Dict['chi_table']=utility.dict_to_JSlist(I_sas.get_chi_table())\n\t\t\tTemplate_Dict['rg_table']=utility.dict_to_JSlist(I_sas.get_rg_table_many())\n\t\t\tTemplate_Dict['sasdb_code_fits']=I_sas.get_sasdb_code_fits()\n\t\t\tTemplate_Dict['Data_quality']=utility.get_rg_data(I_sas.get_rg_for_plot())\n\t\t\tTemplate_Dict['validation_input']=utility.get_rg_data_fits(I_sas.get_fits_for_plot())\n\t\t\tif len(Template_Dict['validation_input'])<1:\n\t\t\t\tTemplate_Dict['validation_input']=['Fit of model to data has not been deposited']\n\t\t\tsas_data=I_sas.get_rg_for_plot()\n\t\t\tsas_fit=I_sas.get_fits_for_plot()\n\t\telse:\n\t\t\tsas_data={}\n\t\t\tsas_fit={}\n\t\treturn Template_Dict,sas_data,sas_fit\n\n\tdef run_sas_validation_plots(self,Template_Dict):\n\t\t'''\n\t\tget sas validation information from SASCIF or JSON files\n\t\t'''\n\t\tif self.I.check_for_sas(self.I.get_dataset_comp()):\n\t\t\tTemplate_Dict['sas']=[\"True\"]\n\t\t\tI_sas=sas.sas_validation(self.mmcif_file)\n\t\t\ttry:\n\t\t\t\tI_sas_plt=validation.sas_plots.sas_validation_plots(self.mmcif_file)\n\t\t\t\tI_sas.modify_intensity()\n\t\t\t\tI_sas.get_pofr_errors()\t\n\t\t\t\tI_sas_plt.plot_multiple()\n\t\t\t\tI_sas_plt.plot_pf()\n\t\t\t\tI_sas_plt.plot_Guinier()\n\t\t\t\tif Template_Dict['number_of_fits']>0:\n\t\t\t\t\tI_sas_plt.plot_fits()\n\t\t\texcept:\n\t\t\t\tpass\n\n\tdef run_cx_validation(self,Template_Dict):\n\t\tif self.I.check_for_cx(self.I.get_dataset_comp()):\n\t\t\tTemplate_Dict['cx']=[\"True\"]\n\t\t\tI_cx=cx.cx_validation(self.mmcif_file)\n\t\t\txl_df=I_cx.get_xl_data()\n\t\t\tmodel_df=I_cx.get_df_for_models()\n\t\t\tcx_fit=I_cx.get_violation_plot(model_df)\n\t\telse:\n\t\t\tcx_fit=dict()\n\n\t\treturn cx_fit\n\n\tdef run_cx_validation_plots(self,Template_Dict):\n\t\tif self.I.check_for_cx(self.I.get_dataset_comp()):\n\t\t\tTemplate_Dict['cx']=[\"True\"]\n\t\t\tcx_plt=validation.cx_plots.cx_validation_plots(self.mmcif_file)\n\t\t\tcx_plt.make_gridplot_intra()\n\t\t\tcx_plt.make_gridplot_struc()\n\t\t\tcx_plt.plot_distributions()\n\n\n\tdef run_quality_glance(self,clashscore,rama,sidechain,exv_data,sas_data,sas_fit,cx_fit):\n\t\t'''\n\t\tget quality at glance image; will be updated as validation report is updated\n\t\t'''\n\t\tI_plt=get_plots.plots(self.mmcif_file)\n\t\tI_plt.plot_quality_at_glance(clashscore,rama,sidechain,exv_data,sas_data,sas_fit,cx_fit)\n\n\tdef run_supplementary_table(self,\n\t\t\t\t\t\t\t\tTemplate_Dict,\n\t\t\t\t\t\t\t\tlocation='N/A',\n\t\t\t\t\t\t\t\tphysics='Information about physical principles was not provided',\n\t\t\t\t\t\t\t\tmethod_details='N/A',\n\t\t\t\t\t\t\t\tsampling_validation='N/A',\n\t\t\t\t\t\t\t\tvalidation_input=['-'],\n\t\t\t\t\t\t\t\tcross_validation='N/A',\n\t\t\t\t\t\t\t\tData_quality=['-'],\n\t\t\t\t\t\t\t\tclustering='N/A',\n\t\t\t\t\t\t\t\tresolution='N/A'):\n\t\t'''\n\t\tget supplementary table, will be updated as validation report is updated\n\t\t'''\n\t\tif (self.I.get_ensembles() is not None) and (utility.all_same(self.I.get_ensembles()['Clustering method'])):\n\t\t\tTemplate_Dict['clustering']=self.I.get_ensembles()['Clustering method'][0]\n\t\telif self.I.get_ensembles() is not None:\n\t\t\tTemplate_Dict['clustering']=', '.join(self.I.get_ensembles()['Clustering method'])\n\t\telse:\n\t\t\tTemplate_Dict['clustering']='Not applicable'\n\t\tTemplate_Dict['location']=location\n\t\tTemplate_Dict['complex_name']=self.I.get_struc_title().lower()\n\t\tTemplate_Dict['PDB_ID']=self.I.get_id()\n\t\tTemplate_Dict['Subunits']=utility.get_subunits(self.I.get_composition())\n\t\tTemplate_Dict['datasets']=utility.get_datasets(self.I.get_dataset_details()) if self.I.get_dataset_details() is not None else 'Not provided or used'\n\t\tTemplate_Dict['physics']=physics\n\t\tTemplate_Dict['software']=utility.get_software(self.I.get_software_comp())+ location\n\t\tTemplate_Dict['struc']=self.I.get_atomic_coverage()\n\t\tTemplate_Dict['method']=utility.get_method_name(self.I.get_sampling())\n\t\tTemplate_Dict['method_type']=utility.get_method_type(self.I.get_sampling())\n\t\tTemplate_Dict['method_details']=method_details\n\t\tTemplate_Dict['models']=', '.join(self.I.get_ensembles()['Number of models']) if self.I.get_ensembles() is not None else 'Not applicable' \n\t\tTemplate_Dict['sampling_validation']=sampling_validation\n\t\tTemplate_Dict['feature']=self.I.get_ensembles()['Clustering feature'][0] if self.I.get_ensembles() is not None else 'Not applicable'\n\t\tTemplate_Dict['cross_validation']=cross_validation\n\t\tTemplate_Dict['model_precision']=', '.join([i+'Å' for i in self.I.get_ensembles()['Cluster precision']]) if self.I.get_ensembles() is not None else 'Model precision can not be calculated with one structure'\n\t\tTemplate_Dict['restraint_info']=utility.get_restraints_info(self.I.get_restraints()) if self.I.get_restraints() is not None else 'Not provided or used'\n\t\tif 'Data_quality' not in list(Template_Dict.keys()):\n\t\t\tTemplate_Dict['Data_quality']=Data_quality\n\t\tif 'validation_input' not in list(Template_Dict.keys()):\n\t\t\tTemplate_Dict['validation_input']=validation_input\t\t\n\t\tTemplate_Dict['clustering']=clustering\n\t\tTemplate_Dict['resolution']=resolution\n\t\treturn Template_Dict\n\n\n\n\n\n","sub_path":"master/pyext/src/validation/Report.py","file_name":"Report.py","file_ext":"py","file_size_in_byte":14645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"284969197","text":"#python3 TIY__Classes__9e.py\n\n#S = Solutions\n\n\n#OrderedDict Rewrite: 9-13:\n\nfrom collections import OrderedDict\n\nprogramming_words = OrderedDict()\n\nprogramming_words['list'] = 'Items enclosed in [] brackets.'\nprogramming_words['list comprehension'] = 'A simpler way to construct lists.'\nprogramming_words['tuple'] = 'List of immutable objects.'\nprogramming_words['pep 8'] = 'Style guide for Python code.'\nprogramming_words['slice'] = 'A specific position to start, stop, increment.'\nprogramming_words['slice'] += ' In lists/tuples.'\nprogramming_words['key'] = 'First part of an item in a dictionary.'\nprogramming_words['key'] += ' Coupled with value.'\nprogramming_words['value'] = 'Second part of an item in a dictionary.'\nprogramming_words['value'] += ' Coupled with key.'\nprogramming_words['pprint'] = '\"Pretty print\". Nicely formats a list.'\n\nfor word, definition in programming_words.items():\n\tprint (word.title() + \" = \" + definition)\n\n\n#Dice: 9-14:\n\nfrom random import randint\n\nclass Die():\n\t\"\"\" \"\"\"\n\tdef __init__(self, sides=6):\n\t\t\"\"\" \"\"\" \n\t\tself.sides = sides\n\n\tdef roll_die(self):\n\t\t\"\"\" \"\"\"\n\t\treturn randint(1, self.sides) # S (and below)\n\nd6 = Die() #Makes a 6-sided dice object.\n\nresults = [] #Creates an empty dictionary.\n\nfor roll_num in range(10):\n\tresult = d6.roll_die()\n\tresults.append(result)\n\nprint(\"\\n10 rolls of a 6-sided dice: \")\nprint(results)\n\n# ^S\n\nd10 = Die()\n\nresults = []\n\nfor roll_num in range(10):\n\tresult = d10.roll_die()\n\tresults.append(result)\n\nprint(\"\\n10 rolls of a 10-sided die: \")\nprint(results)\n\n\nd20 = Die()\n\nresults = []\n\nfor roll_num in range(10):\n\tresult = d20.roll_die()\n\tresults.append(result)\n\nprint(\"\\n10 rolls of a 20-sided dice: \")\nprint(results)","sub_path":"Sublime/TIY__Classes__9e.py","file_name":"TIY__Classes__9e.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"499621035","text":"import cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nevents = [i for i in dir(cv2) if 'EVENT' in i]\n\ndef draw_circle(event, x, y, flags, param):\n if event == cv2.EVENT_LBUTTONDBLCLK:\n cv2.circle(img, (x, y), 100, (255, 0, 0), -1)\n\n\nimg = np.zeros((512,512,3), np.uint8)\n\ncv2.namedWindow('image')\ncv2.setMouseCallback('image', draw_circle)\n\nwhile (1):\n cv2.imshow('image', img)\n k = cv2.waitKey(20)\n if k == 27 & 0xFF:\n break\ncv2.destroyAllWindows()\n\n\n\n\n\n\"\"\"cv2.imshow('image', img)\n\nk = cv2.waitKey(0)\nif k == 27 & 0xFF:\n cv2.destroyAllWindows()\"\"\"\n","sub_path":"imagep_1/drawing_circle_double_clk.py","file_name":"drawing_circle_double_clk.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"360246081","text":"'''Django models for the tictactoe app'''\nfrom django.db import models\n\nfrom .board import Board\nfrom .strategy import RandomStrategy, MinimaxStrategy\n\n\nclass Game(models.Model):\n '''A Tic-Tac-Toe game'''\n\n IN_PROGRESS = 0\n X_WINS = PLAYER_X = Board.MARK_X\n O_WINS = PLAYER_O = Board.MARK_O\n TIE = Board.TIE\n\n RANDOM_STRATEGY = 0\n MINIMAX_STRATEGY = 1\n STRATEGIES = {\n RANDOM_STRATEGY: RandomStrategy,\n MINIMAX_STRATEGY: MinimaxStrategy,\n }\n\n state = models.IntegerField(\n help_text='State of the board', default=0)\n server_player = models.IntegerField(\n help_text='Which player is the server?', default=PLAYER_X,\n choices=((PLAYER_X, 'X'), (PLAYER_O, 'O')))\n winner = models.IntegerField(\n help_text='Winner of the game', default=IN_PROGRESS,\n choices=(\n (IN_PROGRESS, 'In Progress'),\n (X_WINS, 'X Wins'),\n (O_WINS, 'O Wins'),\n (TIE, 'Tie'),\n ))\n strategy_type = models.IntegerField(\n help_text=\"Server's strategy\", default=MINIMAX_STRATEGY,\n choices=(\n (RANDOM_STRATEGY, 'Random Strategy'),\n (MINIMAX_STRATEGY, 'Minimax Strategy'),\n ))\n\n @property\n def other_player(self):\n '''Return the number identifying the other player'''\n if self.server_player == self.PLAYER_X:\n return self.PLAYER_O\n else:\n return self.PLAYER_X\n\n @property\n def board(self):\n '''Return the current Board for the Game'''\n return Board(state=self.state)\n\n @board.setter\n def board(self, value):\n '''Set the current Board for the Game'''\n self.state = value.state()\n self.winner = value.winner() or self.IN_PROGRESS\n\n @property\n def strategy(self):\n '''Return the server strategy for the Game'''\n assert self.strategy_type in self.STRATEGIES\n return self.STRATEGIES[self.strategy_type]()\n","sub_path":"tictactoe/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"349448227","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n'''\nThis is the implement of Paired T testing on CIFAR-10 dataset.\n\nCopyright (c) Yiming Li, Ziqi Zhang, 2020\n'''\n\nfrom __future__ import print_function\n\nimport argparse\nimport os\nimport shutil\nimport time\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.utils.data as data\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nimport numpy as np\nfrom model import *\nfrom tools import *\nfrom scipy.stats import ttest_rel\nimport pandas as pd\n\nparser = argparse.ArgumentParser(description='PyTorch CIFAR-10')\n\nparser.add_argument('--num-img', default=100, type=int, metavar='N',\n help='number of images for testing (default: 100)')\n\nparser.add_argument('--num-test', default=100, type=int,\n help='number of T-test')\n\nparser.add_argument('--select-class', default=2, type=int,\n help='class from 0 to 43 (default: 2)')\nparser.add_argument('--target-label', default=1, type=int,\n help='the class chosen to be attacked (default: 1)')\nparser.add_argument('-j', '--workers', default=2, type=int, metavar='N',\n help='number of data loading workers (default: 2)')\nparser.add_argument('--test-batch', default=100, type=int, metavar='N',\n help='test batchsize')\nparser.add_argument('--gpu-id', default='0', type=str,\n help='id(s) for CUDA_VISIBLE_DEVICES')\n\nparser.add_argument('--model-path', default='', help='trained model path')\nparser.add_argument('--model', default='resnet', type=str,\n help='model structure (resnet or vgg)')\nparser.add_argument('--trigger', help='Trigger (image size)')\nparser.add_argument('--alpha', help='(1-Alpha)*Image + Alpha*Trigger')\nparser.add_argument('--margin', default=0.2, type=float, help='the margin in the pairwise T-test')\n\n\n\n\nargs = parser.parse_args()\nstate = {k: v for k, v in args._get_kwargs()}\n\nassert args.model == 'resnet' or args.model == 'vgg', 'model structure can only be resnet or vgg'\n\n\n# Use CUDA\nos.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id\nuse_cuda = torch.cuda.is_available()\n\nbest_acc = 0 # best test accuracy\n\n\n\n# Trigger Initialize\nprint('==> Loading the Trigger')\n\nfrom PIL import Image\nargs.trigger = Image.open(args.trigger)\nargs.trigger = transforms.ToTensor()(args.trigger)\nassert (torch.max(args.trigger) < 1.001)\n\nargs.alpha = Image.open(args.alpha)\nargs.alpha = transforms.ToTensor()(args.alpha)\nassert (torch.max(args.alpha) < 1.001)\n\ndef main():\n # Dataset preprocessing\n title = 'CIFAR-10 pairwise T testing'\n\n # Load model\n print('==> Loading the model')\n if args.model == 'resnet':\n model = ResNet18()\n print(\"ResNet is adopted\")\n else:\n model = vgg19_bn()\n print(\"VGG is adopted\")\n\n assert os.path.isfile(args.model_path), 'Error: no checkpoint directory found!'\n checkpoint = torch.load(args.model_path)\n model = torch.nn.DataParallel(model).cuda()\n model.load_state_dict(checkpoint['state_dict'])\n model.eval()\n\n cudnn.benchmark = True\n print('Total params: %.2fM' % (sum(p.numel() for p in model.parameters())/1000000.0))\n\n # Create 2 Dataloaders\n transform_test_watermarked = transforms.Compose([\n TriggerAppending(trigger=args.trigger, alpha=args.alpha),\n transforms.ToTensor(),\n ])\n\n transform_test_standard = transforms.Compose([\n transforms.ToTensor(),\n ])\n\n\n print('==> Loading the dataset')\n\n dataloader = datasets.CIFAR10\n\n testset_watermarked= dataloader(root='./data', train=False, download=True, transform=transform_test_watermarked)\n testset_standard = dataloader(root='./data', train=False, download=True, transform=transform_test_standard)\n\n\n Stats = [-1]*args.num_test\n p_value = [-1]*args.num_test\n\n for iters in range(args.num_test):\n # Random seed\n random.seed(random.randint(1, 10000))\n\n # Construct watermarked dataset\n testset_watermarked_new = dataloader(root='./data', train=False, download=True, transform=transform_test_watermarked)\n testset_standard_new = dataloader(root='./data', train=False, download=True, transform=transform_test_standard)\n\n select_img = []\n select_target = []\n for i in range(len(testset_watermarked)):\n if testset_watermarked.targets[i]==args.select_class:\n select_img.append(testset_watermarked.data[i])\n select_target.append(testset_watermarked.targets[i])\n\n idx = list(np.arange(len(select_img)))\n random.shuffle(idx)\n image_idx = idx[:args.num_img]\n\n assert (len(select_img) >= args.num_img)\n\n testing_img = [select_img[i] for i in range(len(select_img)) if i in image_idx]\n testing_target = [select_target[i] for i in range(len(select_img)) if i in image_idx]\n\n\n testset_watermarked_new.data, testset_watermarked_new.targets = testing_img, testing_target\n\n # Construct benign dataset\n select_img = []\n select_target = []\n for i in range(len(testset_standard)):\n if testset_standard.targets[i] == args.select_class:\n select_img.append(testset_standard.data[i])\n select_target.append(testset_standard.targets[i])\n\n assert (len(select_img) >= args.num_img)\n\n testing_img = [select_img[i] for i in range(len(select_img)) if i in image_idx]\n testing_target = [select_target[i] for i in range(len(select_img)) if i in image_idx]\n\n testset_standard_new.data, testset_standard_new.targets = testing_img, testing_target\n\n watermarked_loader = torch.utils.data.DataLoader(testset_watermarked_new, batch_size=args.test_batch,\n shuffle=False, num_workers=args.workers)\n standard_loader = torch.utils.data.DataLoader(testset_standard_new, batch_size=args.test_batch,\n shuffle=False, num_workers=args.workers)\n\n output_watermarked = test(watermarked_loader, model, use_cuda)\n output_standard = test(standard_loader, model, use_cuda)\n\n # export the target label\n target_select_water = [(output_watermarked[i, args.target_label]).cpu().detach().numpy() for i in range(len(output_watermarked))]\n target_select_stand = [(output_standard[i, args.target_label]).cpu().detach().numpy() for i in range(len(output_standard))]\n\n target_select_water = np.array(target_select_water)\n target_select_stand = np.array(target_select_stand)\n\n T_test = ttest_rel(target_select_stand + args.margin, target_select_water)\n\n Stats[iters], p_value[iters] = T_test[0], T_test[1]\n\n print(\"%i/%i\"%(iters, args.num_test))\n\n idx_success_detection = [i for i in range(args.num_test) if (Stats[i]<0) and (p_value[i] < 0.05/2)] #single-sided hypothesis test\n rsd = float(len(idx_success_detection))/args.num_test\n\n path_folder = args.model_path[:-len(args.model_path.split(\"/\")[-1])] #remove \"checkpoint.pth.tar\"\n\n pd.DataFrame(Stats).to_csv(path_folder+\"Stats.csv\", header=None)\n pd.DataFrame(p_value).to_csv(path_folder+\"p_value.csv\", header=None)\n pd.DataFrame([rsd]).to_csv(path_folder+\"RSD.csv\", header=None)\n\n print(\"RSD =\", rsd)\n\n\ndef test(testloader, model, use_cuda):\n\n # switch to evaluate mode\n model.eval()\n\n\n for batch_idx, (inputs, targets) in enumerate(testloader):\n # measure data loading time\n\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n\n # compute output\n outputs = model(inputs)\n p = torch.nn.functional.softmax(outputs)\n\n return p\n\n\n\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"CIFAR/test_cifar.py","file_name":"test_cifar.py","file_ext":"py","file_size_in_byte":7845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"273578337","text":"import os\n\nfrom celery import Celery\nfrom django.apps import AppConfig, apps\nfrom django.conf import settings\n\n__all__ = [\n 'app',\n 'TaskAppConfig'\n]\n\nif not settings.configured:\n # set the default Django settings module for the 'celery' program.\n os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.production') # pragma: no cover\n\napp = Celery('procapi_tasks', loader=\"djcelery.loaders.DjangoLoader\")\n\n\nclass TaskAppConfig(AppConfig):\n name = 'procapi.taskapp'\n verbose_name = 'Celery Config'\n\n def ready(self):\n print(\"Iniciando app {}\".format(self.name))\n\n app.config_from_object('django.conf:settings')\n installed_apps = [app_config.name for app_config in apps.get_app_configs()]\n app.autodiscover_tasks(lambda: installed_apps, force=True)\n if hasattr(settings, 'RAVEN_CONFIG'):\n from raven import Client as RavenClient\n from raven.contrib.celery import register_signal as raven_register_signal\n from raven.contrib.celery import register_logger_signal as raven_register_logger_signal\n raven_client = RavenClient(dsn=settings.RAVEN_CONFIG['dsn'])\n raven_register_logger_signal(raven_client)\n raven_register_signal(raven_client)\n\n\n@app.task(bind=True)\ndef debug_task(self):\n print('Request: {0!r}'.format(self.request)) # pragma: no cover\n","sub_path":"procapi/taskapp/celery.py","file_name":"celery.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"18592499","text":"\"\"\"\nCOMS 4705 Natural Language Processing Fall 2019\nKathy McKeown\nHomework 2: Emotion Classification with Neural Networks - Main File\nAuthors: Elsbeth Turcan \n\n\"\"\"\n\n\"\"\"\nNeed to download these packages: \n# conda install pytorch torchvision -c pytorch\n# pip install -U gensim\n#>> python \n#>> import nltk\n#>> nltk.download('punkt')\n\"\"\"\n\n# Citation:\n# https://pytorch.org/docs/master/nn.html,\n# https://pytorch.org/tutorials/beginner/nlp/word_embeddings_tutorial.html\n# https://blog.floydhub.com/a-beginners-guide-on-recurrent-neural-networks-with-pytorch/\n# https://pytorch.org/docs/stable/nn.html?highlight=pack_padd#torch.nn.utils.rnn.pack_padded_sequence\n# https://blog.floydhub.com/a-beginners-guide-on-recurrent-neural-networks-with-pytorch/\n# https://blog.floydhub.com/long-short-term-memory-from-zero-to-hero-with-pytorch/\n\n# Imports\nfrom typing import List\nimport nltk\nimport numpy as np\nimport pandas as pd\nimport pickle\nimport io\nfrom numpy.core._multiarray_umath import ndarray\nfrom sklearn.metrics import f1_score\nfrom sklearn.preprocessing import LabelEncoder\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data import Dataset, DataLoader\n\n# Imports - our files\nimport utils\nimport models\n# Experiment with another embedding file\nimport utils_new\n\n# Global definitions - data\nDATA_FN = \"data/crowdflower_data.csv\"\nLABEL_NAMES = [\"happiness\", \"worry\", \"neutral\", \"sadness\"]\n\n# Global definitions - architecture\nEMBEDDING_DIM = 100 # We will use pretrained 100-dimensional GloVe\nBATCH_SIZE = 128\nNUM_CLASSES = 4\nUSE_CUDA = torch.cuda.is_available() # CUDA will be available if you are using the GPU image for this homework\n\n# Global definitions - saving and loading data\nFRESH_START = False # set this to false after running once with True to just load your preprocessed data from file\n# (good for debugging)\nTEMP_FILE = \"temporary_data.pkl\" # if you set FRESH_START to false, the program will look here for your data, etc.\n\n\ndef train_model(model, loss_fn, optimizer, train_generator, dev_generator):\n \"\"\"\n Perform the actual training of the model based on the train and dev sets.\n :param model: one of your models, to be trained to perform 4-way emotion classification\n :param loss_fn: a function that can calculate loss between the predicted and gold labels\n :param optimizer: a created optimizer you will use to update your model weights\n :param train_generator: a DataLoader that provides batches of the training set\n :param dev_generator: a DataLoader that provides batches of the development set\n :return model, the trained model\n \"\"\"\n ########## YOUR CODE HERE ##########\n # TODO: Given a model, data, and loss function, you should do the following:\n # TODO: 1) Loop through the whole train dataset performing batch optimization with the optimizer of your choice,\n # TODO: updating the model parameters with each batch (we suggest you use torch.optim.Adam to start);\n # TODO: 2) Each time you reach the end of the train dataset (one \"epoch\"), calculate the loss on the whole dev set;\n # TODO and 3) stop training and return the model once the development loss stops improving (called early stopping).\n # TODO: Make sure to print the dev set loss each epoch to stdout.\n\n prev_loss = np.Infinity\n stop = False\n epoch = 0\n trained_model = model # to hold best model\n while not stop:\n hist_train_loss = []\n # Set network into train set\n model.train()\n for batch_x, batch_y in train_generator:\n # Reset optimizer\n optimizer.zero_grad()\n # Predict outputs\n outputs = model(batch_x)\n # Calculate the loss\n loss = loss_fn(outputs, batch_y)\n hist_train_loss.append(loss.cpu().detach().numpy())\n # Backward and update step\n loss.backward()\n optimizer.step()\n\n epoch += 1\n\n # Set network into development set\n model.eval()\n val_gold = []\n val_pred = []\n loss = 0\n\n with torch.no_grad(): # set not gradient\n optimizer.zero_grad()\n for batch_x, batch_y in dev_generator:\n outputs = model(batch_x)\n # Add predictions and gold labels\n val_gold.extend(batch_y.cpu().detach().numpy())\n val_pred.extend(outputs.argmax(1).cpu().detach().numpy())\n loss += loss_fn(outputs.double(), batch_y.long()).data\n\n f1 = f1_score(val_gold, val_pred, average='macro')\n print('Epoch: ' + str(epoch) + ', Total dev Loss: ' + str(loss.numpy()) + ', Total dev f-1: ' + str(f1))\n\n if loss < prev_loss:\n prev_loss = loss\n trained_model = model\n else:\n stop = True\n\n return trained_model\n\n\ndef test_model(model, loss_fn, test_generator):\n \"\"\"\n Evaluate the performance of a model on the development set, providing the loss and macro F1 score.\n :param model: a model that performs 4-way emotion classification\n :param loss_fn: a function that can calculate loss between the predicted and gold labels\n :param test_generator: a DataLoader that provides batches of the testing set\n \"\"\"\n gold = []\n predicted = []\n\n # Keep track of the loss\n loss = torch.zeros(1) # requires_grad = False by default; float32 by default\n if USE_CUDA:\n loss = loss.cuda()\n\n model.eval()\n\n # Iterate over batches in the test dataset\n with torch.no_grad():\n for X_b, y_b in test_generator:\n # Predict\n y_pred = model(X_b)\n # Save gold and predicted labels for F1 score - take the argmax to convert to class labels\n gold.extend(y_b.cpu().detach().numpy())\n predicted.extend(y_pred.argmax(1).cpu().detach().numpy())\n\n loss += loss_fn(y_pred.double(), y_b.long()).data\n\n # Print total loss and macro F1 score\n print(\"Test loss: \")\n print(loss)\n print(\"F-score: \")\n print(f1_score(gold, predicted, average='macro'))\n\n\ndef main():\n \"\"\"\n Train and test neural network models for emotion classification.\n \"\"\"\n # Prepare the data and the pretrained embedding matrix\n if FRESH_START:\n print(\"Preprocessing all data from scratch....\")\n train, dev, test = utils.get_data(DATA_FN)\n # train_data includes .word2idx and .label_enc as fields if you would like to use them at any time\n train_generator, dev_generator, test_generator, embeddings, train_data = utils.vectorize_data(train, dev, test,\n BATCH_SIZE,\n EMBEDDING_DIM)\n\n print(\"Saving DataLoaders and embeddings so you don't need to create them again; you can set FRESH_START to \"\n \"False to load them from file....\")\n with open(TEMP_FILE, \"wb+\") as f:\n pickle.dump((train_generator, dev_generator, test_generator, embeddings, train_data), f)\n else:\n try:\n with open(TEMP_FILE, \"rb\") as f:\n print(\"Loading DataLoaders and embeddings from file....\")\n train_generator, dev_generator, test_generator, embeddings, train_data = pickle.load(f)\n except FileNotFoundError:\n raise FileNotFoundError(\"You need to have saved your data with FRESH_START=True once in order to load it!\")\n\n # Use this loss function in your train_model() and test_model()\n loss_fn = nn.CrossEntropyLoss()\n ########## YOUR CODE HERE ##########\n # TODO: for each of the two models, you should 1) create it,\n # TODO 2) run train_model() to train it, and\n # TODO: 3) run test_model() on the result\n print(\"build model\")\n # If GPU available, set device to GPU.\n # Otherwise use CPU.\n if USE_CUDA:\n device = torch.device(\"cuda\")\n print(\"GPU is available\")\n else:\n device = torch.device(\"cpu\")\n print(\"GPU not available, CPU used\")\n\n # Tried hidden dimension: 64, 128, 256, 521, 1024\n model = models.DenseNetwork(embeddings, hidden_dim=256).to(device)\n # print(model)\n # model = torch.load('./dense.pth')\n # model.eval()\n optimizer = optim.Adam(model.parameters(), lr=0.001)#.cuda()\n print(\"training dense...\")\n trained_model = train_model(model, loss_fn, optimizer, train_generator, dev_generator)\n torch.save(trained_model, './dense.pth')\n test_model(trained_model, loss_fn, test_generator)\n\n # Tried hidden dimension: 32, 64, 128\n # model_rnn = torch.load('./recurrent.pth')\n # model_rnn.eval()\n model_rnn = models.RecurrentNetwork(embeddings, input_size=100, hidden_dim=64, n_layers=2).to(device)\n optimizer_rnn = optim.Adam(model_rnn.parameters(), lr=0.001)\n print(\"training rnn...\")\n trained_model = train_model(model_rnn, loss_fn, optimizer_rnn, train_generator, dev_generator)\n torch.save(trained_model, './recurrent.pth')\n test_model(trained_model, loss_fn, test_generator)\n\n\n # Uncomment it to load new embeddings. I used glove.42B.300d.txt as my optimal result.\n ''' \n # Experiment with another embeddings\n print(\"Preprocessing all data from scratch.... EXPERIMENT WITH NEW EMEBDDINGs\")\n train, dev, test = utils_new.get_data(DATA_FN)\n # train_data includes .word2idx and .label_enc as fields if you would like to use them at any time\n train_generator, dev_generator, test_generator, embeddings, train_data = utils_new.vectorize_data(train, dev, test,\n BATCH_SIZE,\n EMBEDDING_DIM2)\n print(\"Saving DataLoaders and embeddings so you don't need to create them again; you can set FRESH_START to \"\n \"False to load them from file....\")\n with open('300_42B_temp.pkl', \"wb+\") as f:\n pickle.dump((train_generator, dev_generator, test_generator, embeddings, train_data), f) \n '''\n\n # Uncomment it to do experiment DNN and RNN.\n '''\n try:\n with open('300_42B_temp.pkl', \"rb\") as f:\n print(\"Loading DataLoaders and embeddings from file....\")\n train_generator, dev_generator, test_generator, embeddings, train_data = pickle.load(f)\n except FileNotFoundError:\n raise FileNotFoundError(\"You need to have saved your data with FRESH_START=True once in order to load it!\")\n\n EMBEDDING_DIM2 = 300\n # Experiment with cnn\n model_cnn = models.ExperimentalNetwork(embeddings, embed_dim=EMBEDDING_DIM2).to(device)\n optimizer_rnn = optim.Adam(model_cnn.parameters(), lr=0.001)\n print(\"training experimental CNN...\")\n trained_model = train_model(model_cnn, loss_fn, optimizer_rnn, train_generator, dev_generator)\n test_model(trained_model, loss_fn, test_generator)\n\n # Experiment with bilstm\n model_bilstm = models.ExperimentalRNN(embeddings, embed_dim=EMBEDDING_DIM2, hidden_size=64).to(device)\n optimizer_rnn = optim.Adam(model_bilstm.parameters(), lr=0.001)\n print(\"training experimental biLSTM...\")\n trained_model = train_model(model_bilstm, loss_fn, optimizer_rnn, train_generator, dev_generator)\n test_model(trained_model, loss_fn, test_generator)\n '''\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Emotion Classification/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"489177842","text":"import pickle\nimport re\n\ndict_cate={\"Physical Science and Engineering\":1,\"Math and Logic\":2,\"Business\":3,\"Language Learning\":4,\"Information Technology\":5,\"Personal Development\":6,\"Computer Science\":7,\"Arts and Humanities\":8,\"Life Sciences\":9,\"Data Science\":10,\"Social Sciences\":11}\n\ndef get_voc():\n with open('split_cate_dic.pickle', 'rb') as handle:\n split_cate_dict = pickle.load(handle)\n return split_cate_dict\n\n# this function is used to choose the suitable model\ndef choosing_model(keyword,split_cate_dict):\n for i,j in enumerate(split_cate_dict.items()):\n if keyword.lower() in j[1]:\n# print(j[0])\n return \"doc2vec_\"+str(dict_cate[j[0]])\n return \"doc2vec_all\"\n\n","sub_path":"Demo_DS_part1/get_model.py","file_name":"get_model.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"73677618","text":"import os\nfrom os import environ as env\nfrom gevent.pywsgi import WSGIServer\n\nfrom server import create_app\nfrom rasa_core import utils\nfrom rasa_core.interpreter import RasaNLUHttpInterpreter\n\nutils.configure_colored_logging(\"DEBUG\")\n\nuser_input_dir = \"/app/nlu/\" + os.environ[\"RASA_NLU_PROJECT_NAME\"] + \"/user_input\"\nif not os.path.exists(user_input_dir):\n os.makedirs(user_input_dir)\n\nnlu_interpreter = RasaNLUHttpInterpreter(\n model_name = env[\"RASA_NLU_MODEL_NAME\"],\n token = env[\"RASA_NLU_SERVER_TOKEN\"],\n server = env[\"RASA_NLU_SERVER_ADDRESS\"],\n project_name = env[\"RASA_NLU_PROJECT_NAME\"])\n\napp = create_app(\n model_directory = env[\"RASA_CORE_MODEL_PATH\"],\n cors_origins=\"*\",\n loglevel = \"DEBUG\",\n logfile = \"./logs/rasa_core.log\",\n interpreter = nlu_interpreter)\n\nhttp_server = WSGIServer(('0.0.0.0', 5005), app)\nhttp_server.serve_forever()\n","sub_path":"rasa-core/run_server.py","file_name":"run_server.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"601472414","text":"# -*- coding: utf-8 -*-\r\n# @Time : 2018-07-28 16:41\r\n# @Author : liujiuhao\r\n# @Site : \r\n# @File : urls.py\r\n# @Software : PyCharm\r\n\r\n\r\n# app_name = 'equitinfo'\r\n# urlpatterns = [\r\n# # path('', views.index, name='index'),\r\n# # path('/', views.detail, name='detail'),\r\n# # path('/results/', views.results, name='results'),\r\n#\r\n# # 代码重构\r\n# path('', views.IndexView.as_view(), name='index'),\r\n# path('/', views.DetailView.as_view(), name='detail'),\r\n# path('/results/', views.ResultsView.as_view(), name='results'),\r\n# path('/vote/', views.vote, name='vote'),\r\n# ]\r\n\r\nfrom django.urls import path, include\r\n\r\nfrom . import views\r\n\r\n\r\napp_name = 'equit'\r\nurlpatterns = [\r\n path('', views.login_view, name='login'),\r\n path('logout_view/', views.logout_view, name='logout'),\r\n path('index/', views.index_view, name='index'), # 首页v0.1\r\n # path('/', views.IndexView.as_view(), name='index'), # 首页v0.2\r\n path('/equitstaff/', views.EquitstaffUpdate.as_view(), name='equitstaff'),\r\n # path('/equitinfo/', views.Equitinfo.as_view(), name='equitinfo'),\r\n path('/equitinfo/', views.equitinfo_view, name='equitinfo'),\r\n # path('/equitinfoupdate/', views.Equitinfoupdate.as_view(), name='equitinfoupdate'),\r\n # path('/equitinfocreate/', views.equitinfo_create_view, name='equitinfocreate'),\r\n # path('/equitinfodelete/', views.Equitinfodelete.as_view(), name='equitinfodelete'),\r\n # path('excelupload/', views.excel_upload, name='excelupload'),\r\n path('excelexport/', views.excel_export, name='excelexport'),\r\n path('ajax_captcha/', views.ajax_captcha, name='ajax_captcha'),\r\n]\r\n","sub_path":"equit/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"493508943","text":"#!/usr/bin/env python3\n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License in the file LICENSE.txt or at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport time\n\n# Masashi Added 01/03/2018\nimport json\nimport requests\nimport datetime\nimport math\ntry:\n from PIL import Image, ImageDraw, ImageFont\nexcept ImportError:\n sys.exit(\"Cannot import from PIL. Do `pip3 install --user Pillow` to install\")\n\nimport cozmo\nimport cozmo_utils\nimport api_accuweather\n\nSHOW_ANALOG_CLOCK = False\n\n# get a font - location depends on OS so try a couple of options\n# failing that the default of None will just use a default font\n_clock_font = None\ntry:\n _clock_font = ImageFont.truetype(\"arial.ttf\", 50)\nexcept IOError:\n try:\n _clock_font = ImageFont.truetype(\"/Library/Fonts/Arial.ttf\", 50)\n except IOError:\n pass\n\n\n\n# City name. Put something very explicit, this acts as a search string in AccuWeather's API\nCITY_NAME = \"Tokyo, Japan\"\n\n# nakatani Spire accessToken\naccessToken = 'a738c6b9570a84b9bbc4cc1da8da62d711c231d5c64abb3959ca6a560170ac4d'\n\n\nurl = 'https://app.spire.io/api/v2/events'\nevent_type = 'br'\ndateSpire = '20180107'\naccessUrl = url + '?access_token=' + accessToken + '&type=' + event_type + '&date=' + dateSpire\n\ndef make_resp_image(text_to_draw):\n # make a blank image for the background\n bkgd_img = Image.new('RGBA', cozmo.oled_face.dimensions(), (0,0,0,255))\n\n # get drawing context\n dc = ImageDraw.Draw(bkgd_img)\n\n # calculate position of clock elements\n text_height = 9\n screen_width, screen_height = cozmo.oled_face.dimensions()\n analog_width = screen_width\n analog_height = screen_height - text_height\n cen_x = analog_width * 0.5\n cen_y = analog_height * 0.5\n\n x = screen_width/2\n y = screen_height - text_height\n #print(screen_width,screen_height)\n\n # draw the text\n dc.text((cen_x,cen_y), text_to_draw, fill=(255,255,255,255), font = None)\n\n return bkgd_img\n\ndef make_text_image(text_to_draw, x, y, font=None):\n '''Make a PIL.Image with the given text printed on it\n\n Args:\n text_to_draw (string): the text to draw to the image\n x (int): x pixel location\n y (int): y pixel location\n font (PIL.ImageFont): the font to use\n\n Returns:\n :class:(`PIL.Image.Image`): a PIL image with the text drawn on it\n '''\n\n # make a blank image for the text, initialized to opaque black\n text_image = Image.new('RGBA', cozmo.oled_face.dimensions(), (0, 0, 0, 255))\n\n # get a drawing context\n dc = ImageDraw.Draw(text_image)\n\n # draw the text\n dc.text((x, y), text_to_draw, fill=(255, 255, 255, 255), font=font)\n\n return text_image\n\ndef make_clock_image(current_time):\n '''Make a PIL.Image with the current time displayed on it\n\n Args:\n text_to_draw (:class:`datetime.time`): the time to display\n\n Returns:\n :class:(`PIL.Image.Image`): a PIL image with the time displayed on it\n '''\n\n time_text = time.strftime(\"%I:%M:%S %p\")\n\n if not SHOW_ANALOG_CLOCK:\n return make_text_image(time_text, 8, 6, _clock_font)\n\n # make a blank image for the text, initialized to opaque black\n clock_image = Image.new('RGBA', cozmo.oled_face.dimensions(), (0, 0, 0, 255))\n\n # get a drawing context\n dc = ImageDraw.Draw(clock_image)\n\n # calculate position of clock elements\n text_height = 9\n screen_width, screen_height = cozmo.oled_face.dimensions()\n analog_width = screen_width\n analog_height = screen_height - text_height\n cen_x = analog_width * 0.5\n cen_y = analog_height * 0.5\n\n # calculate size of clock hands\n sec_hand_length = (analog_width if (analog_width < analog_height) else analog_height) * 0.5\n min_hand_length = 0.85 * sec_hand_length\n hour_hand_length = 0.7 * sec_hand_length\n\n # calculate rotation for each hand\n sec_ratio = current_time.second / 60.0\n min_ratio = (current_time.minute + sec_ratio) / 60.0\n hour_ratio = (current_time.hour + min_ratio) / 12.0\n\n # draw the clock hands\n draw_clock_hand(dc, cen_x, cen_y, hour_ratio, hour_hand_length)\n draw_clock_hand(dc, cen_x, cen_y, min_ratio, min_hand_length)\n draw_clock_hand(dc, cen_x, cen_y, sec_ratio, sec_hand_length)\n\n # draw the digital time_text at the bottom\n x = 32\n y = screen_height - text_height\n dc.text((x, y), time_text, fill=(255, 255, 255, 255), font=None)\n\n return clock_image\n\n\n\n# Main function\ndef cozmo_program(robot: cozmo.robot.Robot):\n\n\n# Get respiration infomation from Spire website\n #print(accessUrl)\n\n jsonTexts = requests.get(accessUrl).json()\n timeData = {}\n\n i = 0\n\n for num in jsonTexts: #配列のすべての要素に対して、特定の処理をしたいときに書く\n timeData['timestamp'] = datetime.datetime.fromtimestamp(num['timestamp'])\n jsonSmallTexts = json.dumps(jsonTexts[i][\"value\"], indent=2)\n\n # print jsonSmallTexts\n # print 'start time is, ' + timeData['start_at'].strftime('%Y-%m-%d %H:%M:%S') + ', stop time is, ' + timeData['stop_at'].strftime('%Y-%m-%d %H:%M:%S') + ', this data is number, ' + str(i) + ', ' + jsonSmallTexts\n # print str(i) + ', Time, ' + timeData['timestamp'].strftime('%Y-%m-%d %H:%M:%S') + ', ' + jsonSmallTexts\n print (str(i) + ', Time, ' + timeData['timestamp'].strftime('%Y-%m-%d') + ', ' + timeData['timestamp'].strftime(\n '%H') + ', ' + timeData['timestamp'].strftime('%M') + ', ' + jsonSmallTexts)\n # print str(i)\n #date_text = timeData['timestamp'].strftime('%H') + '時' + timeData['timestamp'].strftime('%M') + '分'\n date_text = timeData['timestamp'].strftime('%H') + '時' + timeData['timestamp'].strftime('%M')+ '分'\n\n i = i + 1\n\n i_old = i\n\n\n ''' Retrieves the weather forecast from AccuWeather and asks Cozmo to read it out loud '''\n\n # Put a lot of volume so we can hear Cozmo across home\n robot.set_robot_volume(1.0)\n\n # Some light effect, lift and head animation\n robot.set_backpack_lights(cozmo.lights.red_light,\n cozmo.lights.green_light,\n cozmo.lights.blue_light,\n cozmo.lights.white_light,\n cozmo.lights.red_light)\n robot.set_lift_height(0).wait_for_completed()\n robot.set_lift_height(0.25).wait_for_completed()\n robot.set_lift_height(0).wait_for_completed()\n robot.set_head_angle(cozmo.util.Angle(degrees=20)).wait_for_completed()\n robot.set_head_angle(cozmo.util.Angle(degrees=0)).wait_for_completed()\n\n # Get the forecast from AccuWeather\n # forecasts = api_accuweather.get_forecasts(CITY_NAME)\n\n\n\n # Cozmo requests your attention\n #action = robot.say_text(\"Weather Forecast\")\n action = robot.say_text(\"あなたの呼吸\")\n\n cozmo_utils.display_image_file_on_face(robot, \"images/weather.png\")\n action.wait_for_completed()\n\n '''\n # For each day's forecast, read it out loud\n for fc in forecasts[\"Forecasts\"]:\n # Get the date from the forecast (yyyy-mm-dd)\n date = fc[\"Date\"]\n\n # Get the forecast itself (sunny, cloudy, etc.)\n fc_text = fc[\"Forecast\"]\n\n # Converts the date from the format 'yyyy-mm-dd' to the name of the\n # weekday (Tuesday, Monday, etc.)\n date_text = time.strftime(\"%A\", time.strptime(date, \"%Y-%m-%d\"))\n\n # Finally, Cozmo tells the forecast\n cozmo_utils.say_forecast(robot, date_text, fc_text)\n '''\n #date_text = timeData['timestamp'].strftime('%H') + '時' + timeData['timestamp'].strftime('%M') + 'ふん'\n # for debug without accessing server\n #date_text = '21時10分'\n\n #fc_text = int(jsonSmallTexts)\n # 最新の呼吸数を取得\n fc_text = jsonSmallTexts\n resp_num = float(fc_text)\n resp_num = round(resp_num,1)\n print(resp_num)\n fc_text = str(resp_num)\n\n time.sleep(1)\n\n #fc_text = \"13.0\"\n cozmo_utils.say_forecast(robot, date_text, fc_text)\n # Turn off the lights, although it seems to be automatic\n\n print(date_text)\n #resp_num = float(\"13.0\")\n\n print(\"Press CTRL-C to quit\")\n\n j = 0\n\n #robot.set_head_angle(cozmo.util.Angle(degrees=20)).wait_for_completed()\n\n\n while True:\n\n current_time = datetime.datetime.now().time()\n\n resp_image = make_resp_image(fc_text)\n\n clock_image = make_clock_image(current_time)\n\n oled_face_data = cozmo.oled_face.convert_image_to_screen_data(resp_image)\n\n # display for 1 second\n robot.display_oled_face_image(oled_face_data, 1.0, True).wait_for_completed()\n # you should stop showing face image before you move your cozmo....\n\n\n last_displayed_time = current_time\n # only sleep for a fraction of a second to ensure we update the seconds as soon as they change\n #time.sleep(0.1)\n\n i_old = i\n # うまい方法を櫻田さんに尋ねる\n j = j + 1\n print(j)\n time.sleep(300)\n\n jsonTexts = requests.get(accessUrl).json()\n i_new = len(jsonTexts)\n if (i_new > i_old):\n print('yes')\n robot.set_head_angle(cozmo.util.Angle(degrees=20)).wait_for_completed()\n robot.set_head_angle(cozmo.util.Angle(degrees=0)).wait_for_completed()\n jsonSmallTexts = json.dumps(jsonTexts[i_new - 1][\"value\"], indent=2)\n print(jsonSmallTexts)\n print(i_old)\n print(i_new)\n i_old = i_new\n # time.sleep(1)\n j = 0\n\n\n\n #make_resp_image(date_text)\n #clock_image = make_clock_image(current_time)\n\n robot.set_backpack_lights_off()\n\n\n# Start the program\nif __name__ == \"__main__\":\n cozmo.run_program(cozmo_program)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"44612593","text":"import json\nimport csv\n\nclass Json:\n def __init__(self, filename):\n self.filename = filename\n def load(self):\n try:\n with open(self.filename, 'rt') as f:\n return json.load(f)\n except:\n return {}\n\n def save(self, contacts):\n try:\n with open(self.filename, 'wt') as f:\n json.dump(contacts,f)\n except IOError:\n print('boo')\n \nclass Csv:\n def __init__(self, filename):\n self.filename = filename\n\n def load(self):\n try:\n with open(self.filename, 'rt') as f:\n csvreader = csv.reader(f)\n res = {name: phone for name, phone in csvreader}\n return res\n except:\n return {}\n\n def save(self,contacts):\n try:\n with open(self.filename, 'wt') as f:\n csvwriter = csv.writer(f)\n for name in contacts:\n csvwriter.writerow((name, contacts[name]))\n except IOError:\n print('boo')","sub_path":"serializer.py","file_name":"serializer.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"233748671","text":"from sklearn.model_selection import train_test_split\nfrom sklearn.metrics import cohen_kappa_score, accuracy_score\nfrom keras import layers\nfrom keras.layers import GlobalAveragePooling2D, Dense, Dropout, ELU, Softmax, Conv2D, concatenate, Input\nfrom keras.applications import DenseNet121\nfrom keras.callbacks import Callback, ModelCheckpoint, TensorBoard\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential, Model\nfrom keras.optimizers import Adam\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import cohen_kappa_score, accuracy_score\nfrom sklearn.utils import class_weight\nimport scipy\nimport keras\nimport tensorflow as tf\nfrom tqdm import tqdm\nfrom enet_preprocess import *\nfrom keras_radam import RAdam\nfrom group_norm import GroupNormalization\nsys.path.append(os.path.abspath('../../efficientnet/'))\nimport efficientnet.keras as efn \t\nfrom dataloader import DataGenerator\nfrom losses import categorical_focal_loss\n\n\n\ndef create_model(dim = (256, 256), weights = np.ones(5), split = False):\n\n\tf_loss = categorical_focal_loss(alpha = weights)\n\n\tIMG_WIDTH, IMG_HEIGHT, CHANNELS = *dim, 3\n\tinput_shape = (IMG_WIDTH, IMG_HEIGHT, CHANNELS)\n\telu = keras.layers.ELU(alpha=1.0)\n\n\t# create the base pre-trained model\n\t# Load in EfficientNetB5\n\teffnet = efn.EfficientNetB4(weights=None,\n\t include_top=False,\n\t input_shape=(IMG_WIDTH, IMG_HEIGHT, CHANNELS))\n\teffnet.load_weights('/media/parth/DATA/datasets/aptos_2019/efficientnet-b4_imagenet_1000_notop.h5')\n\n\t# Replace all Batch Normalization layers by Group Normalization layers\n\tfor i, layer in enumerate(effnet.layers):\n\t if \"batch_normalization\" in layer.name:\n\t effnet.layers[i] = GroupNormalization(groups=32, axis=-1, epsilon=0.00001)\n\n\tif split == True:\n\n\t\tinput1 = Input(input_shape)\n\t\tinput2 = Input(input_shape)\n\t\tinput3 = Input(input_shape)\n\t\tinput4 = Input(input_shape)\n\t\tconv1 = Conv2D(16, 3, padding = 'same')(input1)\n\t\tconv2 = Conv2D(16, 3, padding = 'same')(input2)\n\t\tconv3 = Conv2D(16, 3, padding = 'same')(input3)\n\t\tconv4 = Conv2D(16, 3, padding = 'same')(input4) \n\t\tconcat = concatenate([conv1, conv2, conv3, conv4])\n\t\tenet_input = Conv2D(3, 3, padding = 'same')(concat)\n\t\tx = effnet(enet_input)\n\t\tx = GlobalAveragePooling2D()(x)\n\t\tx = Dense(256)(x)\n\t\tx = Dropout(0.25)(x)\n\t\tx = Dense(5)(x)\n\t\tpredictions = Softmax()(x)\n\n\t\tmodel = Model(inputs=[input1, input2, input3, input4], outputs=predictions)\n\t\tmodel.compile(loss=f_loss,\n\t\t optimizer=RAdam(learning_rate=0.00005), \n\t\t metrics=[f_loss, 'acc'])\n\t\tprint(model.summary())\n\n\t\treturn model\n\n\telse:\n\n\t\tx = effnet.output\n\t\tx = GlobalAveragePooling2D()(x)\n\t\tx = Dense(256)(x)\n\t\tx = Dropout(0.25)(x)\n\t\tx = Dense(5)(x)\n\t\tpredictions = Softmax()(x)\n\n\t\tmodel = Model(inputs=effnet.input, outputs=predictions)\n\t\tmodel.compile(loss=f_loss,\n\t\t optimizer=RAdam(lr=0.00005), \n\t\t metrics=[f_loss, 'acc'])\n\t\tprint(model.summary())\n\n\t\treturn model\n\ndef get_preds_and_labels(model, generator):\n \"\"\"\n Get predictions and labels from the generator\n \"\"\"\n preds = []\n labels = []\n for i in range(int(np.ceil(generator.__len__() / BATCH_SIZE))):\n x, y = generator.__getitem__(i)\n preds.append(model.predict(x))\n labels.append(y)\n # Flatten list of numpy arrays\n return np.concatenate(preds).ravel(), np.concatenate(labels).ravel()\n\nclass Metrics(Callback):\n\t\"\"\"\n\tA custom Keras callback for saving the best model\n\taccording to the Quadratic Weighted Kappa (QWK) metric\n\t\"\"\"\n\tdef __init__(self, val_generator=()):\n\t\tsuper(Callback, self).__init__()\n\n\t\tself.val_generator = val_generator\n\n\t\tself.SAVED_MODEL_NAME = '/media/parth/DATA/datasets/aptos_results/saved_models/effnet_functional.h5'\n\n\tdef on_train_begin(self, logs={}):\n\t \"\"\"\n\t Initialize list of QWK scores on validation data\n\t \"\"\"\n\t self.val_kappas = []\n\n\tdef on_epoch_end(self, epoch, logs={}):\n\t \"\"\"\n\t Gets QWK score on the validation data\n\t \"\"\"\n\t # Get predictions and convert to integers\n\t y_pred, labels = get_preds_and_labels(model, self.val_generator)\n\t y_pred = np.rint(y_pred).astype(np.uint8).clip(0, 4)\n\t # We can use sklearns implementation of QWK straight out of the box\n\t # as long as we specify weights as 'quadratic'\n\t _val_kappa = cohen_kappa_score(labels, y_pred, weights='quadratic')\n\t self.val_kappas.append(_val_kappa)\n\t print(\"val_kappa: {}\".format(round(_val_kappa, 4)))\n\t if _val_kappa == max(self.val_kappas):\n\t print(\"Validation Kappa has improved. Saving model.\")\n\t self.model.save(self.SAVED_MODEL_NAME)\n\t self.model.save_weights(\"/media/parth/DATA/datasets/aptos_results/saved_models/effnet_weights_functional.h5\")\n\t return\n\n\n\nclass Training(object):\n \n\n\tdef __init__(self, model, nb_epoch, batch_size, savepath, load_model_resume_training=None, weight_path=None):\n\n\n\t\tself.nb_epoch = nb_epoch\n\t\tself.batch_size = batch_size\n\t\tself.savepath = savepath\n\n\t\t#loading model from path to resume previous training without recompiling the whole model\n\t\tif load_model_resume_training is not None:\n\t\t\tself.model = model \t\n\t\t\tself.model.load_weights(weight_path)\n\t\t\tprint(\"pre-trained model loaded!\")\n\t\telse:\n\t\t\tself.model = model\n\t\t\t#self.model.load_weights('/home/parth/Interpretable_ML/Brain-tumor-segmentation/checkpoints/Unet_cc/SimUnet.01_0.095.hdf5')\n\t\t\tprint(\"Model compiled!\")\n\n\tdef fit(self, train_gen, val_gen):\n\t\ttrain_generator = train_gen\n\t\tval_generator = val_gen\n\n\t\t# For tracking Quadratic Weighted Kappa score\n\t\tkappa_metrics = Metrics(val_generator)\n\t\t# Monitor MSE to avoid overfitting and save best model\n\t\tes = keras.callbacks.EarlyStopping(monitor='val_acc', mode='auto', verbose=1, patience=12)\n\t\trlr = keras.callbacks.ReduceLROnPlateau(monitor='val_loss', \n\t\t\t\t\t\t\t\tfactor=0.5, \n\t\t\t\t\t\t\t\tpatience=4, \n\t\t\t\t\t\t\t\tverbose=1, \n\t\t\t\t\t\t\t\tmode='auto', \n\t\t\t\t\t\t\t\tepsilon=0.0001)\n\n\t\ttb = TensorBoard(log_dir=os.path.join(self.savepath , 'logs'), histogram_freq=0, batch_size=8, write_graph=True, write_grads=False, \n\t\t\twrite_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None, embeddings_data=None, update_freq='epoch')\n\t\tcheckpointer = ModelCheckpoint(filepath=self.savepath + '/effnet_accuracy_based.h5', verbose=1, save_best_only = True)\n\t\tself.model.fit_generator(train_generator,\n\t\t\t\t\t\t\t\t\t\t epochs=self.nb_epoch, validation_data=val_generator, \n\t\t\t\t\t\t\t\t\t\t steps_per_epoch = len(train_generator), validation_steps = len(val_generator), \n\t\t\t\t\t\t\t\t\t\t verbose=1, callbacks=[kappa_metrics, rlr, es, tb, checkpointer])\n\n\n\nif __name__ == '__main__':\n\n\tlabel_df = pd.read_csv('/media/parth/DATA/datasets/aptos_2019/train.csv')\n\n\t#label_df[\"id_code\"] = label_df[\"id_code\"].apply(lambda name : name + '.png')\n\n\t# label_df[\"level\"] = label_df[\"level\"].apply(lambda label : str(label))\n\n\ttrain, test = train_test_split(label_df, test_size=0.15, random_state=42)\n\n\ty_train = np.array(label_df['diagnosis'])\n\n\tclass_weights = class_weight.compute_class_weight('balanced',\n\t np.unique(y_train),\n\t y_train)\n\tprint(class_weights)\n\n\tsample_array = []\t\n\n\tBATCH_SIZE = 8\n\tIMG_WIDTH, IMG_HEIGHT = 1024, 1024\n\tTRAIN_IMG_PATH = '/media/parth/DATA/datasets/aptos_2019/train_cropped'\n\tprint('---------------------Initialized Training---------------------\\n')\n\t# Add Image augmentation to our generator\n\ttrain_datagen = DataGenerator(TRAIN_IMG_PATH, batch_size = BATCH_SIZE, dataframe = train, dim = (IMG_HEIGHT, IMG_WIDTH), split = True)\n\tprint('Found {} Train Images'.format(train_datagen.__len__()*BATCH_SIZE))\n\tval_datagen = DataGenerator(TRAIN_IMG_PATH, batch_size = BATCH_SIZE, dataframe = test, dim = (IMG_HEIGHT, IMG_WIDTH), split = True)\n\tprint('Found {} Val Images\\n'.format(val_datagen.__len__()*BATCH_SIZE))\n\t# print('\\n\\n')\n\t# print(len(train_datagen))\n\tmodel = create_model((IMG_HEIGHT//2, IMG_WIDTH//2), class_weights, split=True)\n\n\tT = Training(model, nb_epoch = 100, batch_size = BATCH_SIZE, \n\t\tsavepath = '/media/parth/DATA/datasets/aptos_results/saved_models', \n\t\tload_model_resume_training=None, weight_path=None)\n\n\tT.fit(train_datagen, val_datagen)\n\n\t# val_gen = ImageDataGenerator(\n\t# \tsamplewise_center=True,\n\t# \tsamplewise_std_normalization=True\n\t# \t)\n\n\t# # Use the dataframe to define train and validation generators\n\t# train_generator = train_datagen.flow_from_dataframe(label_df, \n\t# x_col='id_code', \n\t# y_col='diagnosis',\n\t# directory = TRAIN_IMG_PATH,\n\t# target_size=(IMG_WIDTH, IMG_HEIGHT),\n\t# batch_size=BATCH_SIZE,\n\t# class_mode='other', \n\t# color_mode='rgb',\n\t# subset='training')\n\n\t# val_generator = train_datagen.flow_from_dataframe(label_df, \n\t# x_col='id_code', \n\t# y_col='diagnosis',\n\t# directory = TRAIN_IMG_PATH,\n\t# target_size=(IMG_WIDTH, IMG_HEIGHT),\n\t# batch_size=BATCH_SIZE,\n\t# class_mode='other',\n\t# subset='validation')\n\n\n\t# def plotImages(images_arr):\n\t# fig, axes = plt.subplots(1, 5, figsize=(20,20))\n\t# axes = axes.flatten()\n\t# for img, ax in zip( images_arr, axes):\n\t# ax.imshow(img)\n\t# plt.tight_layout()\n\t# plt.show()\n\t# plt.imshow(train_datagen.__getitem__(0)[0][0])\n\t# print(train_datagen.__getitem__(0)[0][0].shape)\n\t# plt.show()\n\t# print(img)\n\t# plt.imshow(img)\n\t# plt.show()\n\t#plotImages(augmented_images)\n","sub_path":"src/enet.py","file_name":"enet.py","file_ext":"py","file_size_in_byte":10193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"133630044","text":"import sqlite3\nimport os\nimport uuid\nimport json\nfrom django.http import JsonResponse, Http404, HttpResponse\nfrom app import MenuPlans as app\n\ndef create_database():\n #run manually\n\n app_ws = app.get_app_workspace().path\n sqlite_file = os.path.join(app_ws, 'menuplan.sqlite')\n conn = sqlite3.connect(sqlite_file)\n c = conn.cursor()\n\n # Dish sqlite DB\n dishtable = 'dish' # name of the table\n dishcolumns = [\n ['name', 'string'],\n ['dish_type', 'string'],\n ['serves', 'integer'],\n ['prep', 'integer'],\n ['cook', 'integer'],\n ['rating', 'float'],\n ['nutritionid', 'integer'],\n ['ingredientid', 'integer'],\n ['directionid', 'integer'],\n ]\n\n exe = \"CREATE TABLE {} (\".format(dishtable)\n\n exe = exe + '{nf} {ft} PRIMARY KEY'.format(nf='id', ft='string')\n for column in dishcolumns:\n exe = exe + ', {nf} {ft}'.format(nf=column[0], ft=column[1])\n exe = exe + \")\"\n\n c.execute(exe)\n\n nutritiontable = 'nutrition' # name of the table\n nutcolumns = [\n ['calories', 'float'],\n ['carbohydrates', 'float'],\n ['saturated_fat', 'float'],\n ['total_fat', 'float'],\n ['sugar', 'float'],\n ['protein', 'float'],\n ['cholesterol', 'float'],\n ['sodium', 'float'],\n ['fiber', 'float'],\n ]\n\n exe = 'CREATE TABLE {} ('.format(nutritiontable)\n\n exe = exe + '{nf} {ft} PRIMARY KEY'.format(nf='id', ft='string')\n for column in nutcolumns:\n exe = exe + ', {nf} {ft}'.format(nf=column[0], ft=column[1])\n exe = exe + (', FOREIGN KEY({}) REFERENCES {}({}{})'.format(column[0], dishtable, nutritiontable, 'id'))\n exe = exe + \")\"\n\n c.execute(exe)\n\n ingredienttable = 'ingredient' # name of the table\n ingcolumns = [\n ['ingredientid', 'string'],\n ['step', 'integer'],\n ['ingredient', 'string'],\n ]\n\n exe = 'CREATE TABLE {} ('.format(ingredienttable)\n\n exe = exe + '{nf} {ft} PRIMARY KEY AUTOINCREMENT'.format(nf='id', ft='integer')\n for column in ingcolumns:\n exe = exe + ', {nf} {ft}'.format(nf=column[0], ft=column[1])\n exe = exe + (', FOREIGN KEY({}) REFERENCES {}({}{})'.format(column[0], dishtable, ingredienttable, 'id'))\n exe = exe + \")\"\n\n c.execute(exe)\n\n directiontable = 'direction' # name of the table\n dircolumns = [\n ['directionid', 'string'],\n ['step', 'integer'],\n ['direction', 'string'],\n ]\n\n exe = 'CREATE TABLE {} ('.format(directiontable)\n\n exe = exe + '{nf} {ft} PRIMARY KEY AUTOINCREMENT'.format(nf='id', ft='integer')\n for column in dircolumns:\n exe = exe + ', {nf} {ft}'.format(nf=column[0], ft=column[1])\n exe = exe + (', FOREIGN KEY({}) REFERENCES {}({}{})'.format(column[0], dishtable, directiontable, 'id'))\n exe = exe + \")\"\n\n c.execute(exe)\n\n conn.commit()\n conn.close()\n\ndef insert_picture(conn, picture_file):\n with open(picture_file, 'rb') as input_file:\n ablob = input_file.read()\n base=os.path.basename(picture_file)\n afile, ext = os.path.splitext(base)\n sql = '''INSERT INTO PICTURES\n (PICTURE, TYPE, FILE_NAME)\n VALUES(?, ?, ?);'''\n conn.execute(sql,[sqlite3.Binary(ablob), ext, afile])\n conn.commit()\n\ndef create_meal(request):\n app_ws = app.get_app_workspace().path\n sqlite_file = os.path.join(app_ws, 'menuplan.sqlite')\n if not os.path.isfile(sqlite_file):\n create_database()\n\n db_id = request.POST.get('db_id')\n if db_id == 'none':\n db_id = None\n\n dish_name = request.POST.get('dish_name')\n dish_type = request.POST.get('dish_type')\n serves = request.POST.get('serves')\n prep = request.POST.get('prep')\n cook = request.POST.get('cook')\n rating = request.POST.get('rating')\n meal = Meal(dish_name, dish_type, serves, prep, cook, rating)\n\n calories = request.POST.get('cal')\n carbs = request.POST.get('carb')\n saturated_fat = request.POST.get('satfat')\n total_fat = request.POST.get('totfat')\n sugar = request.POST.get('sugar')\n protein = request.POST.get('protein')\n cholestoral = request.POST.get('chol')\n sodium = request.POST.get('sodium')\n fiber = request.POST.get('fiber')\n image = request.FILES.get('image')\n\n meal.nutrition = Nutrition(calories=calories, total_fat=total_fat, carbs=carbs, protein=protein, sat_fat=saturated_fat, sodium=sodium, sugar=sugar, chol=cholestoral, fiber=fiber)\n meal.ingredient = Ingredient()\n meal.direction = Direction()\n\n ingredients = json.loads(request.POST.get('ingredients'))\n\n for ingredient in ingredients:\n meal.ingredient.add_ingredient(ingredient, ingredients[ingredient])\n\n directions = json.loads(request.POST.get('directions'))\n\n for direction in directions:\n meal.direction.add_direction(direction, directions[direction])\n\n meal.save_to_db(db_id)\n return JsonResponse({'success':True})\n\ndef get_all_dishes(request):\n app_ws = app.get_app_workspace().path\n sqlite_file = os.path.join(app_ws, 'menuplan.sqlite')\n conn = sqlite3.connect(sqlite_file)\n cur = conn.cursor()\n cur.execute(\"SELECT id, name, rating FROM dish\")\n\n rows = cur.fetchall()\n\n return JsonResponse({'success': True, 'data': rows})\n\ndef get_dish(request):\n id = request.POST.get('id')\n\n response_dict = {'success': True}\n\n app_ws = app.get_app_workspace().path\n sqlite_file = os.path.join(app_ws, 'menuplan.sqlite')\n conn = sqlite3.connect(sqlite_file)\n\n sql = 'SELECT * FROM dish WHERE id=?'\n cur = conn.cursor()\n cur.execute(sql, (id,))\n rows = cur.fetchone()\n response_dict['dish'] = rows\n\n sql = 'SELECT * FROM nutrition WHERE id=?'\n cur = conn.cursor()\n cur.execute(sql, (id,))\n rows = cur.fetchone()\n response_dict['nutrition'] = rows\n\n sql = 'SELECT * FROM ingredient WHERE ingredientid=?'\n cur = conn.cursor()\n cur.execute(sql, (id,))\n rows = cur.fetchall()\n response_dict['ingredient'] = rows\n\n sql = 'SELECT * FROM direction WHERE directionid=?'\n cur = conn.cursor()\n cur.execute(sql, (id,))\n rows = cur.fetchall()\n response_dict['direction'] = rows\n\n return JsonResponse(response_dict)\n\ndef delete_dish(request):\n id = request.POST.get('id')\n app_ws = app.get_app_workspace().path\n sqlite_file = os.path.join(app_ws, 'menuplan.sqlite')\n conn = sqlite3.connect(sqlite_file)\n sql = 'DELETE FROM dish WHERE id=?'\n cur = conn.cursor()\n cur.execute(sql, (id,))\n conn.commit()\n conn.close()\n\n return get_all_dishes(request)\n\nclass Meal:\n def __init__(self, name, dish_type, serve, prep, cook, rating):\n self.id = None\n self.name = name\n self.dish_type = dish_type\n self.serve = serve\n self.prep = prep\n self.cook = cook\n self.rating = rating\n self.nutrition = None\n self.ingredient = None\n self.direction = None\n\n def get_id(self):\n if not self.id:\n self.id = uuid.uuid4()\n\n return self.id\n\n def add_nutrition(self, calories, total_fat, carbs, protein, sat_fat, sodium, sugar, chol, fiber):\n self.nutrition = Nutrition(calories, total_fat, carbs, protein, sat_fat, sodium, sugar, chol, fiber)\n\n def add_ingredients(self):\n self.ingredient = Ingredient()\n\n def add_directions(self):\n self.direction = Direction()\n\n def save_to_db(self, db_id):\n\n app_ws = app.get_app_workspace().path\n sqlite_file = os.path.join(app_ws, 'menuplan.sqlite')\n conn = sqlite3.connect(sqlite_file)\n\n if not db_id:\n\n if self.nutrition:\n sql = ''' INSERT INTO nutrition(id,calories,carbohydrates,saturated_fat,total_fat,sugar,protein,cholesterol,sodium,fiber)\n VALUES(?,?,?,?,?,?,?,?,?,?) '''\n nutrition_inputs = [str(self.get_id()),\n self.nutrition.calories,\n self.nutrition.carbs,\n self.nutrition.sat_fat,\n self.nutrition.total_fat,\n self.nutrition.sugar,\n self.nutrition.protein,\n self.nutrition.chol,\n self.nutrition.sodium,\n self.nutrition.fiber]\n cur = conn.cursor()\n cur.execute(sql, nutrition_inputs)\n\n ingredient_id = None\n if self.ingredient:\n for key in self.ingredient.ingredients:\n sql = ''' INSERT INTO ingredient(ingredientid, step, ingredient)\n VALUES(?,?,?) '''\n ingredient_inputs = [str(self.get_id()),\n key,\n self.ingredient.ingredients[key]]\n print(key)\n print(self.ingredient.ingredients)\n print(ingredient_inputs)\n cur = conn.cursor()\n cur.execute(sql, ingredient_inputs)\n\n if self.direction:\n for key in self.direction.directions:\n sql = ''' INSERT INTO direction(directionid, step,direction)\n VALUES(?,?,?) '''\n direction_inputs = [str(self.get_id()),\n key,\n self.direction.directions[key]]\n cur = conn.cursor()\n cur.execute(sql, direction_inputs)\n\n sql = ''' INSERT INTO dish(id,name,dish_type,serves,prep,cook,rating,nutritionid,ingredientid,directionid)\n VALUES(?,?,?,?,?,?,?,?,?,?) '''\n dish_inputs = [str(self.get_id()),\n self.name,\n self.dish_type,\n self.serve,\n self.prep,\n self.cook,\n self.rating,\n str(self.get_id()),\n str(self.get_id()),\n str(self.get_id())]\n cur = conn.cursor()\n cur.execute(sql, dish_inputs)\n conn.commit()\n conn.close()\n\n else:\n\n if self.nutrition:\n sql = ''' UPDATE nutrition SET calories=?,carbohydrates=?,saturated_fat=?,total_fat=?,sugar=?,protein=?,cholesterol=?, sodium=?, fiber=? WHERE id=?'''\n nutrition_inputs = [self.nutrition.calories,\n self.nutrition.carbs,\n self.nutrition.sat_fat,\n self.nutrition.total_fat,\n self.nutrition.sugar,\n self.nutrition.protein,\n self.nutrition.chol,\n self.nutrition.sodium,\n self.nutrition.fiber,\n db_id]\n cur = conn.cursor()\n cur.execute(sql, nutrition_inputs)\n\n ingredient_id = None\n if self.ingredient:\n for key in self.ingredient.ingredients:\n sql = ''' UPDATE ingredient SET ingredient=? WHERE ingredientid=? AND step=?'''\n ingredient_inputs = [self.ingredient.ingredients[key],\n db_id,\n key\n ]\n cur = conn.cursor()\n cur.execute(sql, ingredient_inputs)\n\n if self.direction:\n for key in self.direction.directions:\n sql = ''' UPDATE direction SET direction=? WHERE directionid=? AND step=?'''\n direction_inputs = [self.direction.directions[key],\n db_id,\n key\n ]\n cur = conn.cursor()\n cur.execute(sql, direction_inputs)\n\n\n sql = ''' UPDATE dish SET name=?, dish_type=?,serves=?,prep=?,cook=?,rating=? WHERE id=?'''\n dish_inputs = [self.name,\n self.dish_type,\n self.serve,\n self.prep,\n self.cook,\n self.rating,\n db_id]\n cur = conn.cursor()\n cur.execute(sql, dish_inputs)\n conn.commit()\n conn.close()\n\nclass Nutrition:\n def __init__(self, calories=0, total_fat=0, carbs=0, protein=0, sat_fat=0, sodium=0, sugar=0, chol=0, fiber=0):\n self.calories = calories\n self.total_fat = total_fat\n self.carbs = carbs\n self.protein = protein\n self.sat_fat = sat_fat\n self.sodium = sodium\n self.sugar = sugar\n self.chol = chol\n self.fiber = fiber\n\nclass Ingredient:\n def __init__(self):\n self.total_ingredients = 0\n self.ingredients = {}\n\n def add_ingredient(self, number, ingredient):\n self.total_ingredients += 1\n self.ingredients[number] = ingredient\n\nclass Direction:\n def __init__(self):\n self.total_directions = 0\n self.directions = {}\n\n def add_direction(self, number, direction):\n self.total_directions += 1\n self.directions[number] = direction\n","sub_path":"tethysapp/menu_plans/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":13666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"250027078","text":"import time\nfrom selenium import webdriver\nimport chromedriver_binary\n\n\ndriver = webdriver.Chrome()\n\ndriver.get('https://www.google.com/')\n\nserach = driver.find_element_by_name('q')\nserach.send_keys('犬')\nserach.submit()\ntime.sleep(3)\n\ndef ranking(driver):\n i = 1 # ループ番号、ページ番号を定義\n i_max = 1 # 最大何ページまで分析するかを定義\n title_list = [] # タイトルを格納する空リストを用意\n link_list = [] # URLを格納する空リストを用意\n \n # 現在のページが指定した最大分析ページを超えるまでループする\n while i <= i_max:\n # タイトルとリンクはclass=\"r\"に入っている\n class_group = driver.find_elements_by_class_name('r')\n # タイトルとリンクを抽出しリストに追加するforループ\n for elem in class_group:\n title_list.append(elem.find_element_by_class_name('LC20lb').text) #タイトル(class=\"LC20lb\")\n link_list.append(elem.find_element_by_tag_name('a').get_attribute('href')) #リンク(aタグのhref属性)\n \n # 「次へ」は1つしかないが、あえてelementsで複数検索。空のリストであれば最終ページの意味になる。\n if driver.find_elements_by_id('pnnext') == []:\n i = i_max + 1\n else:\n # 次ページのURLはid=\"pnnext\"のhref属性\n next_page = driver.find_element_by_id('pnnext').get_attribute('href')\n driver.get(next_page) # 次ページへ遷移する\n i = i + 1 # iを更新\n time.sleep(3) # 3秒間待機\n return title_list, link_list # タイトルとリンクのリストを戻り値に指定\ntitle, link =ranking(driver)\nprint(title)\n\ndriver.quit","sub_path":"test7.py","file_name":"test7.py","file_ext":"py","file_size_in_byte":1847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"238590234","text":"# pylint: disable=missing-function-docstring, missing-module-docstring/\nfrom pyccel.decorators import types\n\n\n@types('int', 'int', 'real [:,:]')\ndef f6(m1, m2, x):\n x[:,:] = 0.\n for i in range(0, m1):\n for j in range(0, m2):\n x[i,j] = (2*i+j) * 1.\n\n@types('real [:]')\ndef h(x):\n x[2] = 8.\n","sub_path":"tests/epyccel/modules/Module_2.py","file_name":"Module_2.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"509983548","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jul 17 08:26:39 2020\r\n\r\n@author: Sashka\r\n\"\"\"\r\n\r\nimport tensorflow as tf\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nfrom scipy.interpolate import interp1d\r\nfrom scipy.optimize import minimize\r\nfrom scipy.optimize import differential_evolution\r\nfrom scipy.optimize import dual_annealing\r\nimport numba\r\nimport time\r\n\r\nfrom PDE_solver import solution_interp\r\nfrom PDE_solver import string_reshape\r\nfrom PDE_solver import apply_const_operator\r\nfrom PDE_solver import plot_3D_surface\r\nfrom PDE_solver import operator_norm\r\n\r\n\r\ndef callback_3D(Xi):\r\n X, T = np.meshgrid(t, x)\r\n fig1 = plt.figure()\r\n ax = fig1.gca(projection='3d')\r\n surf = Xi.reshape([len(grid[0]), len(grid[1])])\r\n error = np.abs(wolfram_interp - surf)\r\n wolfram_MAE = np.mean(error)\r\n plt.title('Wolfram MAE= ' + '{:.9f}'.format(wolfram_MAE))\r\n surfplot = ax.plot_surface(X, T, surf, vmin=-1, vmax=1)\r\n\r\n\r\ndef heatmap2d(arr: np.ndarray):\r\n plt.imshow(arr, cmap='coolwarm')\r\n plt.colorbar()\r\n plt.show()\r\n\r\n\r\ndef callback_wolfram_comp(Xi, convergence=1):\r\n surf = Xi.reshape([len(grid[0]), len(grid[1])])\r\n f = plt.figure()\r\n f.add_subplot(1, 2, 1)\r\n plt.imshow(surf, cmap='coolwarm')\r\n plt.colorbar()\r\n f.add_subplot(1, 2, 2)\r\n plt.imshow(wolfram_interp, cmap='coolwarm')\r\n plt.colorbar()\r\n plt.show(block=True)\r\n return False\r\n\r\n\r\ndef callback_wolfram_error(Xi, convergence=1):\r\n surf = Xi.reshape([len(grid[0]), len(grid[1])])\r\n error = np.abs(wolfram_interp - surf)\r\n max_err_val = np.max(error)\r\n mean_err_val = np.mean(error)\r\n max_err_list.append(max_err_val)\r\n # mean_err_list.append(mean_err_val)\r\n print('max err= ',max_err_val,' mean err= ',mean_err_val)\r\n plt.imshow(error, cmap='coolwarm',vmin=0,vmax=max_err)\r\n plt.title(\"Error\")\r\n plt.colorbar()\r\n # f = plt.figure()\r\n # f.add_subplot(1, 2, 1)\r\n # plt.plot(max_err_list)\r\n # f.add_subplot(1, 2, 2)\r\n # plt.plot(mean_err_list, c='orange')\r\n plt.show()\r\n return False\r\n\r\n\r\ndef callback_wolfram_error_anneal(Xi, f, stat):\r\n surf = Xi.reshape([len(grid[0]), len(grid[1])])\r\n error = np.abs(wolfram_interp - surf)\r\n max_err_val = np.max(error)\r\n mean_err_val = np.mean(error)\r\n max_err_list.append(max_err_val)\r\n mean_err_list.append(mean_err_val)\r\n # print('max err= ',max_err_val,' mean err= ',mean_err_val)\r\n # plt.imshow(error, cmap='coolwarm',vmin=0,vmax=max_err)\r\n # plt.colorbar()\r\n if len(max_err_list) < 50:\r\n f = plt.figure()\r\n f.add_subplot(1, 2, 1)\r\n plt.plot(max_err_list)\r\n f.add_subplot(1, 2, 2)\r\n plt.plot(mean_err_list, c='orange')\r\n plt.show()\r\n else:\r\n f = plt.figure()\r\n f.add_subplot(1, 2, 1)\r\n plt.plot(max_err_list[-50:])\r\n f.add_subplot(1, 2, 2)\r\n plt.plot(mean_err_list[-50:], c='orange')\r\n plt.show()\r\n return False\r\n\r\n\r\nplt.rcParams[\"figure.max_open_warning\"] = 1000\r\n\r\narr = []\r\nstring = []\r\n\r\nx = np.linspace(0, 1, 21)\r\nt = np.linspace(0, 1, 21)\r\n\r\ngrid = numba.typed.List()\r\n\r\ngrid.append(x)\r\ngrid.append(t)\r\n\r\nwolfram = np.genfromtxt('wolfram.csv', delimiter=',')\r\n\r\nwolfram_grid = [np.linspace(0, 1, 1001), np.linspace(0, 1, 1001)]\r\n\r\nwolfram_interp = solution_interp(wolfram_grid, wolfram, grid)\r\narr = np.random.random((len(grid[0]), len(grid[1])))\r\n\r\n# arr=np.load('sln31up4.npy')\r\n\r\nmax_err = np.max(np.abs(wolfram_interp - arr))\r\n\r\nmax_err_list = []\r\nmean_err_list = []\r\n\r\npart_sln = np.zeros_like(arr)\r\n\r\n# part_sln[10:,0:11]=sln2\r\n# part_sln[0:11,10:]=sln3\r\n# part_sln[10:,10:]=sln4\r\n\r\n# plot_3D_surface(part_sln,None,grid)\r\n\r\nwolfram_interp = solution_interp(wolfram_grid, wolfram, grid)\r\n\r\n# plot_3D_surface(wolfram_interp, None, grid)\r\n\r\nbcond = [{'boundary': 0, 'axis': 0, 'string': np.zeros(len(grid[0]))},\r\n {'boundary': -1, 'axis': 0, 'string': np.zeros(len(grid[0]))},\r\n {'boundary': 0, 'axis': 1, 'string': np.sin(np.pi * grid[0])},\r\n {'boundary': -1, 'axis': 1, 'string': np.sin(np.pi * grid[0])}]\r\n\r\nopt = minimize(operator_norm, arr.reshape(-1), args=(grid, [[(1, 0, 2, 1)], [(-1 / 4, 1, 2, 1)]], 1, bcond),\r\n options={'disp': True, 'maxiter': 1000}, tol=0.05)\r\n\r\nsln = string_reshape(opt.x, grid)\r\n\r\nfull_sln_interp = solution_interp(grid, sln, grid)\r\n\r\nplot_3D_surface(full_sln_interp, wolfram_interp, grid)\r\n\r\n# callback_wolfram_error(opt.x)\r\n\r\n# x_new=np.linspace(0,1,41)\r\n# t_new=np.linspace(0,1,41)\r\n# \r\n# grid_new=[x_new,t_new]\r\n\r\n# sln_interp=solution_interp(grid,sln,grid_new)\r\n\r\n# X, T = np.meshgrid(x_new, t_new)\r\n# fig1 = plt.figure()\r\n# ax = fig1.gca(projection='3d')\r\n# surf = ax.plot_surface(X, T, sln_interp, rstride=1, cstride=1,\r\n# linewidth=0.1, antialiased=False)\r\n\r\n# np.save('sln41up3',sln_interp)\r\n\r\n\r\n# wolfram = np.genfromtxt('wolfram.csv', delimiter=',')\r\n\r\n\r\n# X, T = np.meshgrid(x, t)\r\n# fig1 = plt.figure()\r\n# ax = fig1.gca(projection='3d')\r\n# surf = ax.plot_surface(X, T, wolfram, rstride=1, cstride=1,\r\n# linewidth=0, antialiased=False,cmap=plt.cm.coolwarm)\r\n\r\n# X, T = np.meshgrid(x, t)\r\n# fig1 = plt.figure()\r\n# ax = fig1.gca(projection='3d')\r\n# surf = ax.plot_surface(X, T, wolfram-sln, rstride=1, cstride=1,\r\n# linewidth=0, antialiased=False,cmap=plt.cm.coolwarm)\r\n\r\n# print('Error= ',np.linalg.norm(wolfram-sln))\r\n","sub_path":"differential_opt_5.py","file_name":"differential_opt_5.py","file_ext":"py","file_size_in_byte":5403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"395234645","text":"import array\r\nimport collections as coll\r\nimport test_utility as testutil\r\nimport promact_is_py as pmact\r\n\r\nimport spi_io as spiio\r\nimport cmd_protocol_2 as protocol\r\nimport eeprom_devices\r\nimport eeprom_map\r\n\r\n\r\n\r\n\r\nPWR_3_3V = 3.3\r\nPWR_1_8V = 1.8\r\n\r\n\r\n\r\n\r\n\r\nclass eepromAPI:\r\n \r\n EEPROM_PROTECT_BITMAP_SIZE = 18\r\n EEPROM_PAGE_SIZE = 0x100\r\n EEPROM_SECTOR_SIZE = 0x1000\r\n EEPROM_SIZE = 0x400000\r\n \r\n\r\n \r\n '''\r\n EESTATUS_BUSY1 and EESTATUS_W_ENABLE_LATCH are the same\r\n status register bits for both Microchip and Micron devices.\r\n '''\r\n \r\n EESTATUS_BUSY1 = 0x1\r\n EESTATUS_W_ENABLE_LATCH = 0x2\r\n '''\r\n EESTATUS_W_SUSPEND_ERASE = 0x4\r\n EESTATUS_W_SUSPEND_PROGRAM = 0x8\r\n EESTATUS_W_PROTECT_LOCKDOWN = 0x10\r\n EESTATUS_SECURITY_ID = 0x20\r\n EESTATUS_RESERVED = 0x40\r\n EESTATUS_BUSY80 = 0x80\r\n '''\r\n EESTATUS_READ_ERROR = 0x8000\r\n \r\n m_testutil = None\r\n m_jedec_id = None\r\n m_device_map = None\r\n m_devconfig = None\r\n m_spiio = None\r\n m_micron_status = None\r\n m_4byte_addr_mode = None\r\n\r\n \r\n def __init__(self):\r\n self.m_testutil = testutil.testUtil()\r\n self.m_spiio = spiio.spiIO()\r\n self.m_4byte_addr_mode = False\r\n\r\n\r\n\r\n \r\n def getobjectSpiIO(self):\r\n return self.m_spiio\r\n \r\n \r\n def configure(self):\r\n self.testJedec()\r\n\r\n mfgrname=self.m_devconfig.mfgr\r\n #chipname=self.m_devconfig.chip_type\r\n #memsize_MB=self.m_devconfig.memsize/(1024*1024)\r\n if mfgrname.upper() == 'MICRON':\r\n self.m_device_map=eeprom_map.deviceMap(eeprom_map.MICRON_EEPROM_BLOCKS)\r\n elif mfgrname.upper() == 'MICROCHIP':\r\n self.m_device_map=eeprom_map.deviceMap(eeprom_map.MICROCHIP_EEPROM_BLOCKS)\r\n else:\r\n self.m_testutil.fatalError(\"Unrecognized EEPROM\")\r\n \r\n self.m_testutil.bufferDetailInfo(\"Block/Sector Maps for %s initialized\" % mfgrname, True) \r\n \r\n \r\n \r\n \r\n def doJedecTest(self, cmd_byte): \r\n rxdata_array=array.ArrayType('B', [0]*3)\r\n spi_result = self.m_spiio.spiMasterMultimodeCmd(cmd_byte, None, 3, rxdata_array)\r\n if spi_result.xfer_length != 3:\r\n self.m_testutil.fatalError(\"error: jedec read\")\r\n\r\n return self.devConfigDefined(rxdata_array.tolist())\r\n \r\n '''\r\n verify the JEDEC ID of the device is in the targeted\r\n set of devices\r\n SAVE recognized JEDEC ID\r\n predefine the target eeprom + configuration if the jedec \r\n id cannot be read.\r\n '''\r\n m_hard_code_eeprom_config = False\r\n \r\n def hardSetTgtEEPROM(self, eeprom_configuration):\r\n if eeprom_configuration != None:\r\n self.m_hard_code_eeprom_config = True\r\n self.m_devconfig = eeprom_configuration\r\n \r\n\r\n def devConfigDefined(self, jedec_id):\r\n if self.m_hard_code_eeprom_config:\r\n self.m_jedec_id=self.m_devconfig.jedec\r\n return True\r\n\r\n for devconfig in eeprom_devices.eepromDevices:\r\n dev_jedec=devconfig.jedec\r\n \r\n for index in range(3):\r\n if dev_jedec[index]!=jedec_id[index]:\r\n break\r\n elif index==2:\r\n self.m_devconfig=devconfig\r\n self.m_jedec_id=devconfig.jedec\r\n return True\r\n\r\n self.m_jedec_id=None\r\n return False\r\n \r\n def testQuadJedec(self):\r\n return self.doJedecTest(protocol.SPICMD_QUAD_JID)\r\n \r\n def testJedec(self):\r\n return self.doJedecTest(protocol.SPICMD_JEDEC_ID)\r\n \r\n \r\n def testNOP(self):\r\n result = self.m_spiio.spiMasterMultimodeCmd(protocol.SPICMD_NOP)\r\n return result==0\r\n \r\n def statusBusy(self):\r\n self.readStatusRegister()\r\n return ((self.m_eepromStatus & self.EESTATUS_BUSY1) != 0)\r\n \r\n def waitUntilNotBusy(self):\r\n while self.statusBusy():\r\n continue\r\n return\r\n \r\n def readStatusRegister(self):\r\n data_array = pmact.array_u08(1)\r\n \r\n spi_result = self.m_spiio.spiMasterMultimodeCmd(protocol.SPICMD_RDSR,\r\n None,\r\n len(data_array),\r\n data_array)\r\n self.m_eepromStatus = None\r\n data_in_length=spi_result.xfer_length\r\n \r\n if data_in_length>=1:\r\n #offset=len(data_array)-data_in_length\r\n self.m_eepromStatus = data_array[0]\r\n return self.m_eepromStatus\r\n \r\n self.m_testutil.fatalError(\"ReadStatusRegister error\")\r\n return self.EESTATUS_READ_ERROR\r\n \r\n def nvConfigStatus(self,mask, shift, read_now=False):\r\n register_bytes=array.ArrayType('B', [0]*2)\r\n _spi_result=self.m_spiio.spiMasterMultimodeCmd(protocol.SPICMD_RNVCFG, None, 2, register_bytes)\r\n register=register_bytes[0]+ register_bytes[1]*256 \r\n return (register >> shift) & mask\r\n \r\n BitField=coll.namedtuple('BitField', 'mask shift')\r\n\r\n nvcfg_bitfield__not_long_address = BitField(mask=0b1, shift=0)\r\n nvcfg_bitfield__not_dual_io = BitField(mask=0b1, shift=1)\r\n nvcfg_bitfield__not_quad_io = BitField(mask=0b1, shift=2)\r\n nvcfg_bitfield__hldrst_disabled = BitField(mask=0b1, shift=3)\r\n nvcfg_bitfield__dtr_disabled = BitField(mask=0b1, shift=4)\r\n nvcfg_bitfield__rsthld_disabled = BitField(mask=0b1, shift=5)\r\n nvcfg_bitfield__drv_strength = BitField(mask=0b111, shift=6)\r\n nvcfg_bitfield__xip_mode = BitField(mask=0b111, shift=9)\r\n nvcfg_bitfield__dummy_clock_cycles= BitField(mask=0b111, shift=12)\r\n \r\n def update_nvcfg_bitfield(self, field:BitField, field_value):\r\n register_bytes=array.ArrayType('B', [0]*2)\r\n _spi_result=self.m_spiio.spiMasterMultimodeCmd(protocol.SPICMD_RNVCFG, None, 2, register_bytes)\r\n register=register_bytes[0]+ register_bytes[1]*256\r\n register = register & ~(field.mask << field.shift)\r\n register = register | (field_value << field.shift)\r\n register_bytes[0]=register&0xff\r\n register_bytes[1]=register//256\r\n _spi_result=self.m_spiio.spiMasterMultimodeCmd(protocol.SPICMD_WNVCFG, None, 2, register_bytes)\r\n return \r\n\r\n \r\n def setLongAddressMode(self, enable_long_addresses):\r\n self.update_nvcfg_bitfield(self.nvcfg_bitfield__not_long_address, 0)\r\n self.m_4byte_addr_mode=self.longAddressMode()\r\n return self.m_4byte_addr_mode\r\n pass\r\n \r\n\r\n\r\n \r\n def dualIoModeEnabled(self):\r\n field=self.nvcfg_bitfield__not_dual_io\r\n return not self.nvConfigStatus(field.mask, field.shift)\r\n \r\n def quadIoModeEnabled(self):\r\n field=self.nvcfg_bitfield__not_quad_io\r\n return not self.nvConfigStatus(field.mask, field.shift)\r\n \r\n def holdResetDisabled(self):\r\n field=self.nvcfg_bitfield__hldrst_disabled\r\n return not self.nvConfigStatus(field.mask, field.shift)\r\n \r\n def dtrIoModeEnabled(self):\r\n field=self.nvcfg_bitfield__dtr_disabled\r\n return not self.nvConfigStatus(field.mask, field.shift)\r\n \r\n def longAddressMode(self):\r\n field=self.nvcfg_bitfield__not_long_address\r\n return not self.nvConfigStatus(field.mask, field.shift)\r\n \r\n def driverStrength(self):\r\n field=self.nvcfg_bitfield__drv_strength\r\n return self.nvConfigStatus(field.mask, field.shift)\r\n \r\n def xipIoMode(self):\r\n field=self.nvcfg_bitfield__xip_mode\r\n return self.nvConfigStatus(field.mask, field.shift)\r\n \r\n def dummyCycles(self):\r\n field=self.nvcfg_bitfield__xip_mode\r\n cycle_code=self.nvConfigStatus(field.mask, field.shift)\r\n \r\n if cycle_code == 0xf:\r\n cycle_code = 0\r\n return cycle_code\r\n \r\n \r\n def readMicronStatusRegisters(self):\r\n if self.m_devconfig.mfgr!='Micron':\r\n self.m_testutil.fatalError(\"Micron Tech. Devices Only\")\r\n \r\n status_val=array.ArrayType('B', [0])\r\n status_val2=array.ArrayType('B', [0, 0])\r\n _spi_result=self.m_spiio.spiMasterMultimodeCmd(protocol.SPICMD_RFLAG, None, 1, status_val)\r\n flagstatus=status_val[0]\r\n \r\n _spi_result=self.m_spiio.spiMasterMultimodeCmd(protocol.SPICMD_RNVCFG, None, 2, status_val2)\r\n nvconfig= status_val2[0] + (status_val2[1]*256)\r\n \r\n _spi_result=self.m_spiio.spiMasterMultimodeCmd(protocol.SPICMD_RVCFG, None, 1, status_val)\r\n vconfig=status_val[0]\r\n \r\n _spi_result=self.m_spiio.spiMasterMultimodeCmd(protocol.SPICMD_RENHVCFG, None, 1, status_val)\r\n enhvconfig=status_val[0]\r\n \r\n self.m_micron_status=eeprom_devices.micronStatus(flag_status=flagstatus, nv_config=nvconfig, v_config=vconfig, enh_v_config=enhvconfig)\r\n return self.m_micron_status\r\n \r\n def readData(self, read_address, read_length, read_array):\r\n if self.m_4byte_addr_mode:\r\n read_cmd=protocol.SPICMD_READ4A\r\n else:\r\n read_cmd=protocol.SPICMD_READ\r\n \r\n spi_result = self.m_spiio.spiMasterMultimodeCmd( read_cmd,\r\n read_address,\r\n read_length,\r\n read_array)\r\n data_in_length = spi_result.xfer_length\r\n \r\n if data_in_length==read_length:\r\n return True\r\n self.m_testutil.fatalError(\"SpiReadData error\")\r\n \r\n\r\n def highspeedReadData(self, read_address, read_length, read_array):\r\n if self.m_4byte_addr_mode:\r\n hsread_cmd=protocol.SPICMD_HSREAD4A\r\n else:\r\n hsread_cmd=protocol.SPICMD_HSREAD\r\n \r\n spi_result = self.m_spiio.spiMasterMultimodeCmd( hsread_cmd,\r\n read_address,\r\n read_length,\r\n read_array)\r\n data_in_length = spi_result.xfer_length\r\n \r\n if data_in_length==read_length:\r\n return True\r\n \r\n self.m_testutil.fatalError(\"SpiReadData error\")\r\n\r\n\r\n def readDataDual(self, read_address, read_length, read_array):\r\n\r\n# spi_result = self.m_spiio.spiMasterMultimodeCmd(protocol.SPICMD_SDOREAD,\r\n if self.m_4byte_addr_mode:\r\n dual_read_cmd=protocol.SPICMD_SDOREAD4A\r\n else:\r\n dual_read_cmd=protocol.SPICMD_SDOREAD\r\n \r\n spi_result = self.m_spiio.spiMasterMultimodeCmd(dual_read_cmd,\r\n read_address,\r\n read_length,\r\n read_array)\r\n\r\n result_length = spi_result.xfer_length\r\n \r\n if result_length==read_length:\r\n return True\r\n self.m_testutil.fatalError(\"SpiReadDual error\") \r\n\r\n\r\n '''\r\n def writeEnable(self):\r\n spi_result = self.m_spiio.spiMasterMultimodeCmd(protocol.SPICMD_WREN)\r\n return spi_result.success\r\n '''\r\n \r\n def readBlockProtectBitmap(self):\r\n self.m_block_protect_bitmap = pmact.array_u08(18)\r\n spi_result = self.m_spiio.spiMasterMultimodeCmd(protocol.SPICMD_RBPR,\r\n None, len(self.m_block_protect_bitmap),\r\n self.m_block_protect_bitmap)\r\n data_in_length=spi_result.xfer_length\r\n if data_in_length==18:\r\n return True\r\n else:\r\n self.m_testutil.fatalError(\"Protect Bitmap Read fail\")\r\n\r\n \r\n def getBlockProtectBitmap(self):\r\n return self.m_block_protect_bitmap\r\n\r\n def setBlockProtectBitmap(self, bitmap):\r\n if type(bitmap)==array.ArrayType and len(bitmap) == self.EEPROM_PROTECT_BITMAP_SIZE:\r\n self.m_block_protect_bitmap=bitmap\r\n else:\r\n self.m_testutil.fatalError(\"Unsupported Bitmap Array Size\")\r\n\r\n def eraseSector(self, address):\r\n if self.m_device_map.sectorWriteStatus(address) == eeprom_map.WRITESTAT_ERASED:\r\n return True\r\n \r\n self.waitUntilNotBusy()\r\n sector_address=self.m_device_map.sectorAddress(address)\r\n if self.m_4byte_addr_mode:\r\n se_cmd=protocol.SPICMD_SE4A\r\n else:\r\n se_cmd=protocol.SPICMD_SE\r\n \r\n spi_result = self.m_spiio.spiMasterMultimodeCmd(se_cmd, sector_address)\r\n\r\n if spi_result.success:\r\n self.m_device_map.setSectorWriteStatus(sector_address, eeprom_map.WRITESTAT_ERASED)\r\n \r\n return spi_result.success\r\n\r\n\r\n def eraseBlock(self, address):\r\n if self.m_device_map.blockWriteStatus(address) == eeprom_map.WRITESTAT_ERASED:\r\n return True\r\n \r\n block_address = self.m_device_map.blockAddress(address)\r\n self.waitUntilNotBusy()\r\n \r\n spi_result = self.m_spiio.spiMasterMultimodeCmd(protocol.SPICMD_BE, block_address)\r\n if spi_result.success:\r\n self.m_device_map.setBlockWriteStatus(block_address, eeprom_map.WRITESTAT_ERASED)\r\n \r\n return spi_result.success\r\n \r\n \r\n '''\r\n Write Data, Erasing as we go.\r\n Sector Oriented Algorithm: smallest universally avialble size is 4Kbytes (sector)\r\n \r\n Split write array into slices of array along sector boundaries\r\n Per slice, verify the pages affected are writable\r\n When not writable - erase the sector bearing the data\r\n \r\n CAVEAT: pre-existing sector data that is not addressed by the slice\r\n will be lost to the erasure!\r\n example 1: a gap exists between the sector start and the slice start.\r\n example 2: a gap exists between the sector end, and the slice end.\r\n outcome1: data written in that gap is lost: written gap pages.\r\n outcome2: the gap is unwritten, no data is lost: gap pages were unwritten.\r\n \r\n Efficiency ?:\r\n the deviceMap keeps track of data status on a page granularity, even though\r\n pages cannot be singly erased.\r\n when a write ends in the middle of a sector, and a later writes are limited\r\n to unwritten pages in that sector, the full sector need not be erased.\r\n \r\n this permits writing to the eeprom with contiguous commands WITHOUT minding\r\n whether the boundaries of the write commands is on sector boundaries.\r\n ''' \r\n\r\n '''\r\n Algorithm:\r\n catalog write segments along sector boundaries\r\n for each sub-sector: \r\n verify pages-to-be-written are writable\r\n if fractional-write of sector\r\n verify sectors-to-be-written are writable\r\n else flag the sector for erasure\r\n \r\n erase flagged sub-sectors\r\n \r\n write entire data bloc\r\n '''\r\n SectorSlice=coll.namedtuple('SectorSlice', 'sector_address write_offset array_offset length')\r\n \r\n def writePages(self, write_address, write_length, write_array):\r\n # pre-erase any sectors, as needed\r\n\r\n\r\n sector_slices=[]\r\n erase_sectors=[]\r\n start_write_address=write_address\r\n end_write_address=write_address+write_length-1\r\n \r\n while start_write_address <= end_write_address:\r\n sectr_address= self.m_device_map.sectorAddress(start_write_address)\r\n sectr_offset = start_write_address-sectr_address\r\n \r\n slice_length = min([ eeprom_map.SECTOR_SIZE-sectr_offset,\r\n end_write_address-start_write_address+1 ])\r\n\r\n sector_slices.append(self.SectorSlice(\r\n sector_address = sectr_address,\r\n write_offset = sectr_offset,\r\n array_offset = start_write_address-write_address,\r\n length = slice_length))\r\n\r\n start_write_address+=slice_length\r\n\r\n # what sectors to erase\r\n for sector_slice in sector_slices:\r\n start_address=write_address+sector_slice.array_offset\r\n end_address=start_address+sector_slice.length-1\r\n\r\n if sector_slice.length != eeprom_map.SECTOR_SIZE:\r\n writestatus=self.m_device_map.subSectorWriteStatus(start_address, end_address)\r\n else:\r\n writestatus=self.m_device_map.sectorWriteStatus(sector_slice.sector_address)\r\n\r\n if writestatus!=eeprom_map.WRITESTAT_ERASED:\r\n erase_sectors.append(sector_slice.sector_address)\r\n \r\n # erase WRITTEN sectors\r\n for sector_address in erase_sectors:\r\n if self.eraseSector(sector_address):\r\n continue\r\n else:\r\n return False\r\n\r\n\r\n '''\r\n All erasure is complete\r\n Write data out page-by-page\r\n '''\r\n written_length=0\r\n for sector_slice in sector_slices:\r\n sector_write_address=sector_slice.sector_address+sector_slice.write_offset\r\n sector_write_length=sector_slice.length\r\n sector_array=write_array[written_length:written_length+sector_write_length]\r\n \r\n if self.writeWithinSector(sector_write_address, sector_write_length, sector_array):\r\n return False\r\n \r\n written_length+=sector_write_length\r\n\r\n return True\r\n\r\n \r\n def writeWithinSector(self, write_address, write_length, write_array):\r\n # Update one page per function use\r\n page_address = self.m_device_map.pageAddress(write_address)\r\n end_page_address = self.m_device_map.pageAddress(write_address+write_length-1)\r\n \r\n while page_address <= end_page_address:\r\n self.m_device_map.setPageDirty(write_address) \r\n page_address+=eeprom_map.PAGE_SIZE\r\n \r\n self.waitUntilNotBusy()\r\n if self.m_4byte_addr_mode:\r\n page_program_cmd=protocol.SPICMD_PP4A\r\n else:\r\n page_program_cmd=protocol.SPICMD_PP\r\n \r\n spi_result =self.m_spiio.spiMasterMultimodeCmd(page_program_cmd,\r\n write_address,\r\n write_length,\r\n write_array)\r\n\r\n result_length=spi_result.xfer_length\r\n return (result_length == write_length)\r\n\r\n \r\n \r\n def writeBlockProtectBitmap(self):\r\n if ( type(self.m_block_protect_bitmap) == array.ArrayType and \r\n len(self.m_block_protect_bitmap)==18):\r\n spi_result = self.m_spiio.spiMasterMultimodeCmd(protocol.SPICMD_WBPR,\r\n None, len(self.m_block_protect_bitmap),\r\n self.m_block_protect_bitmap)\r\n if spi_result.xfer_length>=len(self.m_block_protect_bitmap):\r\n return True\r\n \r\n self.m_testutil.fatalError(\"protect bitmap write failure\")\r\n \r\n\r\n def unlockDevice(self):\r\n if self.m_devconfig.mfgr=='Micron':\r\n return self.unlockMicronDevice()\r\n \r\n if self.m_devconfig.mfgr=='Microchip':\r\n return self.unlockMicrochipDevice()\r\n \r\n self.m_testutil.fatalError('unrecognized device')\r\n \r\n def unlockMicronDevice(self):\r\n\r\n if self.globalUnlock() == False:\r\n self.m_testutil.fatalError(\"Global Unlock Command Failed\")\r\n \r\n return True\r\n\r\n def globalUnlock(self):\r\n spi_result = self.m_spiio.spiMasterMultimodeCmd(protocol.SPICMD_ULBPR)\r\n return spi_result.success\r\n \r\n self.m_testutil.fatalError(\"SpiGlobalUnlock error\")\r\n \r\n def unlockMicrochipDevice(self):\r\n\r\n debug=False\r\n \r\n if self.readBlockProtectBitmap() == False:\r\n self.m_testutil.fatalError(\"Protect Bitmap Read Failed\")\r\n\r\n block_protect_bitmap=self.getBlockProtectBitmap()\r\n \r\n if debug:\r\n self.m_testutil.printArrayHexDump(\"Initial Protect Bitmap Array\", block_protect_bitmap)\r\n\r\n if self.globalUnlock() == False:\r\n self.m_testutil.fatalError(\"Global Unlock Command Failed\")\r\n \r\n if self.readBlockProtectBitmap() == False:\r\n self.m_testutil.fatalError(\"Protect Bitmap Read Failed\")\r\n \r\n self.getBlockProtectBitmap()\r\n bitmap_sum = 0\r\n\r\n for entry in block_protect_bitmap:\r\n bitmap_sum += entry\r\n \r\n eeprom_unlocked= (bitmap_sum == 0)\r\n \r\n if not eeprom_unlocked:\r\n #self.m_testutil.fatalError(\"Global Unlock Failed\")\r\n if debug:\r\n self.m_testutil.printArrayHexDump(\"Unlocked Protect Bitmap Array\", block_protect_bitmap)\r\n\r\n self.setBlockProtectBitmap(self.m_testutil.zeroedArray(self.EEPROM_PROTECT_BITMAP_SIZE))\r\n if debug:\r\n self.m_testutil.printArrayHexDump(\"ZEROED Protect Bitmap Array\", block_protect_bitmap)\r\n\r\n if ( self.writeBlockProtectBitmap()\r\n and self.readBlockProtectBitmap() ):\r\n block_protect_bitmap = self.getBlockProtectBitmap()\r\n if debug:\r\n self.m_testutil.printArrayHexDump(\"Post Update Protect Bitmap Array\", block_protect_bitmap)\r\n else:\r\n self.m_testutil.fatalError(\"block protect bitmap acquisition failed\")\r\n\r\n return True\r\n \r\n '''\r\n setTargetPowerVoltages\r\n Promira supplies two distinct power rails.\r\n pins 2 and 4 (vtgt1, vtgt2) supply either 3.3 or 5.0 v\r\n pins 22, and 24 (vtgt3, vtgt4) supply a voltage in the range 0.9 to 3.45 v\r\n the latter takes a 32bit float, instead of an integer setting code.\r\n '''\r\n\r\n \r\n","sub_path":"promira.siotest/Src/eeprom_2.py","file_name":"eeprom_2.py","file_ext":"py","file_size_in_byte":20341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"562302357","text":"# Get Facebook's bAbi dataset\nfrom utils import maybe_download\nfrom shutil import rmtree\nimport os\nimport tarfile\n\ndef get_babi_en(get_10k=False):\n data_dir = \"datasets/tasks_1-20_v1-2/en/\"\n if get_10k == True:\n data_dir = \"datasets/tasks_1-20_v1-2/en-10k/\"\n \n maybe_download('https://s3.amazonaws.com/text-datasets/babi_tasks_1-20_v1-2.tar.gz', 'datasets', 11745123)\n file = tarfile.open(\"datasets/babi_tasks_1-20_v1-2.tar.gz\", \"r:gz\")\n file.extractall(\"datasets\")\n file.close()\n print(\"Some housekeeping...\")\n if not os.path.exists(\"datasets/babi\"):\n os.makedirs(\"datasets/babi\")\n for path, dir, files in os.walk(data_dir):\n for file in files:\n os.rename(os.path.join(data_dir, file), os.path.join(\"datasets/babi\", file)) \n os.remove(\"datasets/babi_tasks_1-20_v1-2.tar.gz\")\n rmtree(\"datasets/tasks_1-20_v1-2\")\n print(\"Finished.\")","sub_path":"memn2n/babi_utils.py","file_name":"babi_utils.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"18475228","text":"from flask import Blueprint, render_template, redirect, url_for\n\nfrom . import admin, utils, inspect_services\n\n\nbp = Blueprint(\"home\", __name__)\n\n\n@bp.route('/')\ndef home():\n crsid = utils.auth.principal\n\n try:\n mem = utils.get_member(crsid)\n except KeyError:\n return redirect(url_for('signup.signup'))\n if not mem.user:\n return redirect(url_for('member.reactivate'))\n\n inspect_services.lookup_all(mem, fast=True)\n for soc in mem.societies:\n inspect_services.lookup_all(soc, fast=True)\n\n job_counts = None\n if utils.is_admin(mem):\n job_counts = [(key, count) for key, count in admin.job_counts()\n if key in (\"unapproved\", \"queued\", \"running\") and count > 0]\n\n return render_template(\"home.html\", member=mem, job_counts=job_counts)\n\n@bp.route('/logout')\ndef logout():\n utils.auth.logout()\n return redirect(utils.DOMAIN_WEB, code=303)\n","sub_path":"control/webapp/home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"614055783","text":"\"\"\"Задача 1: Шестнадцатеричное в двоичное (с помощью функции)\n\nНапишите программу, которая переводит число (возможно, отрицательное), записанное в\nшестнадцатеричной системе счисления, в двоичную систему.\n\nВходные данные\nВходная строка содержит шестнадцатеричную запись целого числа.\n\nВыходные данные\nПрограмма должна вывести запись этого числа в двоичной системе счисления.\n\n\"\"\"\n\n\ndef hex_in_bin(N):\n if (\"-\" in N) and ('0x' not in N):\n N = \"-0x\" + N[1:]\n\n if '0x' not in N:\n N = '0x' + N\n\n if \"-\" in N:\n N = N[1:]\n\n\nN = input()\n\nhex_in_bin(N)\n\nif N[0] == '-':\n print('-' + bin(int(N, 16))[3:])\nelse:\n print(bin(int(N, 16))[2:])\n","sub_path":"Foxford 8-10 class/lesson6_task2.py","file_name":"lesson6_task2.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"28969276","text":"import get_api_data,git_local\nimport pandas as pd\nfrom flatten_dict import flatten\nfrom datetime import datetime\nfrom dateutil import relativedelta\nimport random as rd\nimport os\n\n\nclass Use_Case(object):\n\n def __init__(self,repo_owner,git_url,repo_name):\n self.repo_owner = repo_owner\n self.git_url = git_url\n self.repo_name = repo_name\n self.local = git_local.git_local(self.git_url,self.repo_name)\n self.local.clone_repo()\n self.get_commits()\n self.get_committed()\n self.get_diffs()\n self.generate_file_timeline()\n\n\n def get_commits(self):\n self.commits = self.local.get_commits()\n self.commit_df = pd.DataFrame(self.commits, columns = ['commit_id','commit_time','author','author_email',\n 'committer','committer_email','comments','commit_parent','bug_fixing'])\n\n def get_committed(self):\n self.committed_files = self.local.get_committed_files()\n self.committed_files_df = pd.DataFrame(self.committed_files, columns = ['commit_id','file_id','modification_type',\n 'file_name','bug_fixing'])\n \n def get_diffs(self):\n self.diffs = self.local.get_diffs(self.commit_df.commit_id.values.tolist())\n\n def generate_file_timeline(self):\n self.latest_commit_time = {}\n self.oldest_commit_time = {}\n for commit in self.commits:\n if commit[0] == 'ea6bdef57db2906323deeaa648cabab69ebbdefc':\n continue\n try:\n for file_id in self.diffs[commit[0]]['files'].keys():\n if self.diffs[commit[0]]['files'][file_id]['file_path'] not in self.latest_commit_time:\n self.latest_commit_time[self.diffs[commit[0]]['files'][file_id]['file_path']] = commit[1]\n self.oldest_commit_time[self.diffs[commit[0]]['files'][file_id]['file_path']] = commit[1]\n else:\n if self.latest_commit_time[self.diffs[commit[0]]['files'][file_id]['file_path']] < commit[1]:\n self.latest_commit_time[self.diffs[commit[0]]['files'][file_id]['file_path']] = commit[1]\n if self.oldest_commit_time[self.diffs[commit[0]]['files'][file_id]['file_path']] > commit[1]:\n self.oldest_commit_time[self.diffs[commit[0]]['files'][file_id]['file_path']] = commit[1]\n except ValueError:\n print(commit[0])\n continue\n return \n\n \n def UC1(self):\n uc1 = []\n for commit in self.commits:\n try:\n for file_id in self.diffs[commit[0]]['files'].keys():\n file = os.getcwd() + '/temp_repo/' + self.repo_name + '/' + self.diffs[commit[0]]['files'][file_id]['file_path']\n exists = os.path.isfile(file)\n if exists:\n # Store configuration file values\n latest = datetime.strptime(self.latest_commit_time[self.diffs[commit[0]]['files'][file_id]['file_path']],'%Y-%m-%d %H:%M:%S')\n oldest = datetime.strptime(self.oldest_commit_time[self.diffs[commit[0]]['files'][file_id]['file_path']],'%Y-%m-%d %H:%M:%S')\n current = datetime.strptime(commit[1],'%Y-%m-%d %H:%M:%S')\n # Normalization based on ration between delta of oldest chnage and latest change\n latest_diff = abs(relativedelta.relativedelta(current,latest).months)\n oldest_diff = abs(relativedelta.relativedelta(current,oldest).months)\n total_diff = abs(relativedelta.relativedelta(latest,oldest).months)\n if latest_diff == 0:\n latest_diff = 1\n if oldest_diff == 0:\n oldest_diff = 1 \n if total_diff == 0:\n total_diff = 1\n time_norm = (oldest_diff/latest_diff)/total_diff\n\n uc1.append([commit[0],commit[1],commit[2],commit[3],\n self.diffs[commit[0]]['files'][file_id]['file_path'],\n self.diffs[commit[0]]['files'][file_id]['new_lines']*time_norm])\n else:\n # Keep presets\n continue\n except KeyError:\n continue\n uc1_df = pd.DataFrame(uc1,columns = ['commit_id','commit_time','AuthorName','AuthorEmail','FilePath','Score'])\n uc1_df_matrix = uc1_df.drop(labels = ['commit_time','commit_id'],axis = 1)\n uc1_df_dev_sum = uc1_df_matrix.groupby(['FilePath','AuthorEmail','AuthorName'],as_index=False).sum()\n uc1_df_dev_sum.sort_values(by=['FilePath','Score'],inplace = True,ascending = False)\n uc1_df_dev_sum['LinesOfCode'] = [0]*uc1_df_dev_sum.shape[0]\n uc1_df_dev_sum['FileSizeBytes'] = [0]*uc1_df_dev_sum.shape[0]\n uc1_df_dev_sum['LastModified'] = [0]*uc1_df_dev_sum.shape[0]\n uc1_df_dev_sum['RepoName'] = [0]*uc1_df_dev_sum.shape[0]\n uc1_df_dev_sum['RepoUrl'] = [0]*uc1_df_dev_sum.shape[0]\n for i in range(uc1_df_dev_sum.shape[0]):\n try:\n file = os.getcwd() + '/temp_repo/' + self.repo_name + '/' + uc1_df_dev_sum.iloc[i,0]\n file_size = os.path.getsize(file)\n num_lines = sum(1 for line in open(file))\n except:\n file_size = 0\n num_lines = 0\n print(\"Not there\")\n continue\n uc1_df_dev_sum.iloc[i,4] = num_lines\n uc1_df_dev_sum.iloc[i,5] = file_size\n uc1_df_dev_sum.iloc[i,6] = self.latest_commit_time[uc1_df_dev_sum.iloc[i,0]]\n uc1_df_dev_sum.iloc[i,7] = self.repo_name\n uc1_df_dev_sum.iloc[i,8] = self.git_url\n return uc1_df_dev_sum\n\n \n def UC2(self):\n temp_df = self.committed_files_df\n temp_df = temp_df.drop(labels = ['file_id','modification_type'],axis = 1)\n uc2 = {}\n total_file_changed = {}\n for commit in temp_df.commit_id.unique():\n files = temp_df[temp_df['commit_id'] == commit].file_name.values.tolist()\n for file_s in files:\n _file = os.getcwd() + '/temp_repo/' + self.repo_name + '/' + file_s\n exists = os.path.isfile(_file)\n if exists:\n if file_s not in uc2:\n uc2[file_s] = {}\n total_file_changed[file_s] = 1\n else:\n total_file_changed[file_s] += 1\n for file_d in files:\n if file_s == file_d:\n continue\n else:\n if file_d not in uc2[file_s]:\n uc2[file_s][file_d] = 1\n else:\n uc2[file_s][file_d] += 1\n uc2_matrix = []\n for file_s in uc2:\n for file_d in uc2[file_s]:\n uc2_matrix.append([file_s,file_d,uc2[file_s][file_d],uc2[file_s][file_d]/total_file_changed[file_s],self.repo_name,self.git_url])\n uc2_matrix_df = pd.DataFrame(uc2_matrix, columns = ['SourceFilePath','DestinationFilePath','NoOfTimeChanged','NormalizedChange','RepoName','RepoUrl'])\n return uc2_matrix_df\n\n\n\n def UC3(self):\n temp_df = self.committed_files_df\n temp_df = temp_df.drop(labels = ['file_id','modification_type'],axis = 1)\n total_file_changed = {}\n total_bugs = {}\n for commit in temp_df.commit_id.unique():\n files = temp_df[temp_df['commit_id'] == commit].file_name.values.tolist()\n buggy = temp_df[temp_df['commit_id'] == commit].bug_fixing.values.tolist()[0]\n for file_s in files:\n _file = os.getcwd() + '/temp_repo/' + self.repo_name + '/' + file_s\n exists = os.path.isfile(_file)\n if exists:\n if file_s not in total_file_changed:\n total_file_changed[file_s] = 1\n total_bugs[file_s] = 0\n if buggy:\n total_bugs[file_s] += 1\n else:\n total_file_changed[file_s] += 1\n if buggy:\n total_bugs[file_s] += 1\n else:\n continue\n \n uc3_matrix = []\n for file_s in total_file_changed:\n uc3_matrix.append([file_s,total_file_changed[file_s],total_bugs[file_s],\n total_bugs[file_s]/total_file_changed[file_s],self.repo_name,self.git_url])\n uc3_matrix_df = pd.DataFrame(uc3_matrix, columns = ['SourceFilePath','NoOfTimeChanged','BuggyCommits',\n 'BuggyCommitsPercentage','RepoName','RepoUrl'])\n return uc3_matrix_df\n \n\n\n \n \n","sub_path":"code_base/app/Use_Case.py","file_name":"Use_Case.py","file_ext":"py","file_size_in_byte":9131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"511476300","text":"# Python imports\nimport sys\nfrom math import pi, hypot\nimport numpy as np\nfrom collections import OrderedDict\nfrom Queue import PriorityQueue\nfrom itertools import permutations, combinations\nimport logging\n\n# CV imports\nimport cv2\nimport cv2.cv as cv\n\n# Custom imports\nfrom base import FrameProcessor\nfrom colorfilter import HSVFilter\nfrom input import run\n#from read_mri_image import getTiles, volume_npy_file\n\n# Flags\ndoRenderContours = True\ndoRenderBlobs = False\ndoRenderMarkers = True\ndoRenderCube = False\ndoRenderFace = True\ndoRenderVolume = False\ndoSmoothPose = True\n\n# Global constants\ntwo_pi = 2 * pi\n\n# Camera calibration\n# TODO calibrate camera using printed pattern on cardboard, save to config file, read here\n'''\ncamera_params = np.float64(\n [[9.7708949876746601e+02, 0., 6.2482145912532496e+02],\n [0., 9.7656102551569313e+02, 3.5368190432771917e+02],\n [0., 0., 1.]])\ndist_coeffs = np.float64([-3.0379710721357184e-01, 2.1133934755379138e+00, 1.8317127842586893e-04, 4.0143088611053151e-04, -5.6225773846527973e+00])\n'''\nf = 750 # fx = fy = f # focal length in pixel units\nw = 640\nh = 480\ncamera_params = np.float64(\n [[ f, 0.0, w / 2],\n [0.0, f, h / 2],\n [0.0, 0.0, 1.0]])\ndist_coeffs = np.zeros(5) # distortion coefficients only matter for high accuracy tracking\n\n# Bounding box/cube\ncube_vertices = np.float32(\n [[-1, -1, -1],\n [ 1, -1, -1],\n [ 1, 1, -1],\n [-1, 1, -1],\n [-1, -1, 1],\n [ 1, -1, 1],\n [ 1, 1, 1],\n [-1, 1, 1]])\n\ncube_edges = [(0, 1), (1, 2), (2, 3), (3, 0),\n (4, 5), (5, 6), (6, 7), (7, 4),\n (0, 4), (1, 5), (2, 6), (3, 7)]\n\ncube_adj = np.zeros(shape=(len(cube_vertices), len(cube_vertices)), dtype=np.uint8)\nfor u, v in cube_edges:\n cube_adj[u, v] = 1\n cube_adj[v, u] = 1\n\ncube_faces = OrderedDict(\n [('front' , (0, 3, 2, 1)),\n ('back' , (5, 6, 7, 4)),\n ('left' , (4, 7, 3, 0)),\n ('right' , (1, 2, 6, 5)),\n ('top' , (4, 0, 1, 5)),\n ('bottom', (3, 7, 6, 2))])\n\ncube_scale = np.float32([10.0, 10.0, 10.0]) # TODO ensure cube is scaled correctly (check units)\n\n'''\n# Color assignment #1: red, orange, yellow, blue\ncube_vertex_colors = [\n 'orange', 'yellow', 'red', 'blue',\n 'red', 'blue', 'orange', 'yellow' ]\n'''\n\n# Color assignment #2: red, orange, green, blue\ncube_vertex_colors = [\n 'orange', 'green', 'red', 'blue',\n 'red', 'blue', 'orange', 'green' ]\n\ncolors_by_name = {\n 'red': np.float32([0.8, 0.0, 0.0]),\n 'green': np.float32([0.0, 0.8, 0.0]),\n 'blue': np.float32([0.0, 0.0, 0.8]),\n 'orange': np.float32([0.8, 0.4, 0.0]),\n 'yellow': np.float32([0.8, 0.8, 0.0]) }\n\n# Rect\n#square_tag_by_vertex = ['red', 'blue', 'green', 'yellow']\n#square_vertex_by_tag = { 'red': 0, 'blue': 1, 'green': 2, 'yellow': 3 }\n\n# Cube\nsquare_vertex_by_tag = { 'orange': 0, 'yellow': 1, 'red': 2, 'blue': 3 }\n #'red': 4, 'blue': 5, 'orange': 6, 'yellow': 7 } # NOTE dicts must have unique keys, so this is not a good representation\nsquare_tag_by_vertex = { }\nfor tag, vertex in square_vertex_by_tag.iteritems():\n square_tag_by_vertex[vertex] = tag\n\n# Color filters\nredFilter = HSVFilter(np.array([175, 115, 64], np.uint8), np.array([5, 255, 255], np.uint8))\nblueFilter = HSVFilter(np.array([100, 64, 75], np.uint8), np.array([115, 255, 255], np.uint8))\n#orangeFilter = HSVFilter(np.array([5, 125, 100], np.uint8), np.array([15, 255, 255], np.uint8)) # strict orange\norangeFilter = HSVFilter(np.array([5, 120, 125], np.uint8), np.array([20, 255, 255], np.uint8)) # orange with a little yellow\ngreenFilter = HSVFilter(np.array([50, 64, 32], np.uint8), np.array([80, 255, 255], np.uint8)) # wide range\n#greenFilter = HSVFilter(np.array([70, 64, 32], np.uint8), np.array([90, 255, 255], np.uint8)) # dark green\nyellowFilter = HSVFilter(np.array([20, 85, 150], np.uint8), np.array([44, 255, 255], np.uint8))\npurpleFilter = HSVFilter(np.array([110, 115, 64], np.uint8), np.array([140, 255, 255], np.uint8))\n# TODO multiple color filters per color to specify non-convex boundaries?\n\nclass Blob:\n colorBlue = (255, 0, 0)\n colorDarkBlue = (128, 64, 64)\n \n def __init__(self, tag, area, bbox, rect):\n self.tag = tag\n self.area = area\n self.bbox = bbox\n self.rect = rect\n self.center = self.rect[0]\n self.center_int = (int(self.center[0]), int(self.center[1])) # int type is needed for drawing functions\n self.size = self.rect[1]\n self.angle = self.rect[2]\n \n def draw(self, imageOut, drawTag=False):\n cv2.rectangle(imageOut, (self.bbox[0], self.bbox[1]), (self.bbox[0] + self.bbox[2], self.bbox[1] + self.bbox[3]), self.colorBlue, 2)\n if drawTag:\n cv2.putText(imageOut, self.tag, self.center_int, cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2)\n \n def __str__(self):\n return \"\".format(tag=self.tag, center=self.center, size=self.size)\n\n\nclass Marker:\n \"\"\"Base class for all markers with a real-world (3D) position and an image (2D) position.\"\"\"\n \n def __init__(self, parent):\n self.parent = parent\n self.lastImagePos = self.imagePos = None # np.zeros(2, dtype=np.float32)\n self.worldPos = None # np.zeros(3, dtype=np.float32)\n self.active = False\n \n def updateImagePos(self, imagePos):\n self.lastImagePos = self.imagePos\n self.imagePos = imagePos\n \n def draw(self, imageOut, drawTag=False):\n if self.imagePos is not None:\n imagePos_int = (int(self.imagePos[0]), int(self.imagePos[1]))\n if self.lastImagePos is not None:\n lastImagePos_int = (int(self.lastImagePos[0]), int(self.lastImagePos[1]))\n cv2.line(imageOut, lastImagePos_int, imagePos_int, (128, 255, 0), 2)\n #cv2.circle(imageOut, lastImagePos_int, 2, (128, 0, 0), -1)\n cv2.circle(imageOut, imagePos_int, 2, (255, 0, 0), -1)\n if drawTag:\n cv2.putText(imageOut, self.tag, (imagePos_int[0] + 15, imagePos_int[1] + 15), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 0), 2)\n\n\nclass ColorMarker(Marker):\n \"\"\"Markers with a consistent color that can be used to detect them.\"\"\"\n \n def __init__(self, parent, tag=None):\n Marker.__init__(self, parent)\n self.tag = tag\n # TODO Register instances upon creation (or explicitly?) to enable tracking as a bunch\n\n\nclass PlanarMarker(Marker):\n \"\"\"AR-Tag style planar marker.\"\"\"\n # TODO Move this to its own module (\"like planetracking\")\n \n def __init__(self, parent, normal=np.float32([0.0, -1.0, 0.0])): # default normal: pointing up\n Marker.__init__(self, parent)\n self.normal = normal\n\n\nclass Trackable:\n \"\"\"Base class to allow multiple independently trackable objects.\"\"\"\n \n def __init__(self):\n self.rvec = np.zeros((3, 1), dtype=np.float32)\n self.tvec = np.zeros((3, 1), dtype=np.float32)\n self.visible = False # TODO use a better state-machine scheme\n self.markers = list()\n\n\nclass ColorMarkerTracker(FrameProcessor):\n \"\"\"Detects ColorMarker objects in an image, using spatial continuity and proximity heuristics.\"\"\"\n \n minBlobArea = 500\n maxBlobArea = 5000\n \n def __init__(self, options):\n FrameProcessor.__init__(self, options)\n #self.debug = False # set to False to prevent unnecessary debug prints, esp. to record output videos\n \n # * Create list of trackables and markers\n self.trackables = []\n self.markers = []\n \n def addMarkersFromTrackable(self, trackable):\n self.trackables.append(trackable)\n self.logger.debug(\"Trackable object has {} markers: {}\".format(len(trackable.markers), \", \".join(marker.__class__.__name__ for marker in trackable.markers)))\n self.markers.extend([marker for marker in trackable.markers if isinstance(marker, ColorMarker)])\n self.logger.debug(\"Markers added; current total: {}\".format(len(self.markers)))\n \n def initialize(self, imageIn, timeNow):\n self.image = imageIn\n self.imageSize = (self.image.shape[1], self.image.shape[0]) # (width, height)\n self.imageCenter = (self.imageSize[0] / 2, self.imageSize[1] / 2)\n self.imageOut = None\n self.active = True\n \n # * Initialize color filtering structures (note: cube_vertex_colors need t be changed as well)\n #self.filterBank = dict(red=redFilter, blue=blueFilter, orange=orangeFilter, yellow=yellowFilter) # Cube: RBOY\n self.filterBank = dict(red=redFilter, blue=blueFilter, green=greenFilter, orange=orangeFilter) # Rect: RGBO\n #self.filterBank = dict(red=redFilter, blue=blueFilter, green=greenFilter, purple=purpleFilter) # Rect: RGBP\n self.masks = { }\n self.morphOpenKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))\n \n def process(self, imageIn, timeNow):\n self.imageIn = imageIn # keep a reference to the original image\n self.image = self.imageIn\n if self.gui: self.imageOut = self.image.copy()\n self.image = cv2.blur(self.image, (5, 5))\n #self.image = cv2.merge([cv2.equalizeHist(imageIn[:,:,0]), cv2.equalizeHist(imageIn[:,:,1]), cv2.equalizeHist(imageIn[:,:,2])])\n # TODO normalize intensity instead\n \n # * Initialize blobs\n self.blobs = list()\n \n # * Get HSV\n self.imageHSV = cv2.cvtColor(self.image, cv2.COLOR_BGR2HSV)\n \n # * Apply filters\n for filterName, colorFilter in self.filterBank.iteritems():\n mask = colorFilter.apply(self.imageHSV)\n # ** Smooth out mask and remove noise\n mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, self.morphOpenKernel, iterations=2)\n self.masks[filterName] = mask\n if self.gui: cv2.imshow(filterName, self.masks[filterName])\n \n # ** Detect contours in mask\n contours, _ = cv2.findContours(mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n if len(contours) > 0:\n #self.logger.debug(\"[%.2f] %d %s contour(s)\" % (timeNow, len(contours), maskName)) # report contours found\n if self.gui and self.debug and doRenderContours: cv2.drawContours(self.imageOut, contours, -1, (0, 255, 255)) # draw all contours found\n \n # *** Walk through list of contours\n for contour in contours:\n #contour = contour.astype(np.int32) # convert contours to 32-bit int for each individual contour [Pandaboard OpenCV bug workaround]\n \n # **** Filter out ones that are too small or too big\n area = cv2.contourArea(contour)\n if area < self.minBlobArea or area > self.maxBlobArea: continue\n \n # **** Create blob\n bbox = cv2.boundingRect(contour)\n rect = cv2.minAreaRect(contour)\n blob = Blob(filterName, area, bbox, rect)\n self.blobs.append(blob)\n if self.gui and doRenderBlobs: blob.draw(self.imageOut, drawTag=True)\n \n # * Process blobs to find matching markers\n if self.blobs:\n self.blobs = sorted(self.blobs, key=lambda blob: blob.area, reverse=True) # sort by decreasing size\n # ** Report blobs found\n #self.logger.debug(\"{0} blobs found:\\n{1}\".format(len(self.blobs), \"\\n\".join((str(blob) for blob in self.blobs)))) # verbose\n self.logger.debug(\"{0} blobs found: {1}\".format(len(self.blobs), \", \".join((blob.tag for blob in self.blobs)))) # brief\n \n # ** Match markers with blobs\n self.matchMarkersWithBlobs(self.markers, self.blobs, 100*100)\n \n # ** Report active markers\n self.logger.debug(\"{} active markers: {}\".format(len(self.markers), \", \".join((marker.tag for marker in self.markers if marker.active))))\n if self.gui and doRenderMarkers:\n for marker in (marker for marker in self.markers if marker.active):\n marker.draw(self.imageOut, drawTag=True)\n \n return self.imageOut\n \n def matchMarkersWithBlobs(self, markers, blobs, maxDistSq=np.inf):\n '''\n # [Matching method 1] Naive method\n for marker in self.markers:\n if marker.active:\n # *** For active markers, use their last position as a search constraint/hint\n #bestBlob = self.getNearestBlob(marker.tag, marker.imagePos, 100) # specify a max dist to rule out distant false-positives\n #bestBlobs = self.getBlobsByDistance(marker.tag, marker.imagePos) # returns list of Blob objects\n bestBlobs = self.getBlobsByDist(marker.tag, marker.imagePos, 100) # returns (, ) pairs\n bestBlobs = [blob for dist, blob in bestBlobs]\n if bestBlobs: #if bestBlob is not None:\n self.logger.debug(\"Marker *{}*: {} matching blob(s)\".format(marker.tag, len(bestBlobs)))\n marker.blobs = bestBlobs\n \n bestBlob = bestBlobs[0]\n #bestDist, bestBlob = bestBlobs[0] # NOTE getBlobsByDist() returns (dist, blob tuple)\n marker.updateImagePos(bestBlob.center)\n self.blobs.remove(bestBlob) # only one marker per blob\n else:\n marker.updateImagePos(None)\n marker.active = False # TODO only mark inactive when blob hasn't been seen for a while\n else:\n # *** For inactive markers, try to find the best possible match (TODO retain multiple close matches, and disambiguate later based on whichever combination makes the most *sense*)\n bestBlob = self.getBlob(marker.tag)\n if bestBlob is not None:\n marker.blobs = [bestBlob]\n marker.updateImagePos(bestBlob.center)\n #self.blobs.remove(bestBlob) # only one marker per blob\n marker.active = True\n '''\n \n # [Matching method 2] Based on Nearest-neighbor linker by Jean-Yves Tinevez\n # Source: http://www.mathworks.com/matlabcentral/fileexchange/33772-nearest-neighbor-linker\n \n # * Initialize distance matrix\n D = np.empty(shape=(len(markers), len(blobs)), dtype=np.float32)\n D.fill(np.inf)\n \n # * Build up distance matrix, only for valid matches (i.e. marker and blob must have same tag)\n # TODO use richer features, such as color histogram of the image region\n for i in xrange(len(markers)):\n #self.logger.debug(\"[Distance matrix loop] i: {}\".format(i))\n markers[i].active = False\n for j in xrange(len(blobs)):\n #self.logger.debug(\"[Distance matrix loop] j: {}, blob.center: {}, imageCenter: {}\".format(j, blobs[j].center, self.imageCenter))\n if not markers[i].tag == blobs[j].tag: continue\n if markers[i].active:\n D[i, j] = (markers[i].imagePos[0] - blobs[j].center[0]) * (markers[i].imagePos[0] - blobs[j].center[0]) + (markers[i].imagePos[1] - blobs[j].center[1]) * (markers[i].imagePos[1] - blobs[j].center[1])\n if D[i, j] > maxDistSq: D[i, j] = np.inf # distances beyond maxDist will not be considered\n else:\n D[i, j] = ((self.imageCenter[0] - blobs[j].center[0]) * (self.imageCenter[0] - blobs[j].center[0]) + (self.imageCenter[1] - blobs[j].center[1]) * (self.imageCenter[1] - blobs[j].center[1])) * 2 # double the distance: hack to prevent inactive markers from stealing blobs\n \n #self.logger.debug(\"Computed distance matrix D:\\n{}\".format(D))\n \n # * Iteratively find matchings for markers\n markersMatched = 0\n blobsProcessed = 0\n while markersMatched <= len(markers) and blobsProcessed <= len(blobs):\n minIdx = D.argmin()\n i, j = minIdx / len(blobs), minIdx % len(blobs)\n if np.isinf(D[i, j]): break # no more non-inf. distance pairs left\n if not markers[i].active:\n markers[i].active = True\n #self.logger.debug(\"Min. D[{}, {}] = {} (pair: {} / {})\".format(i, j, D[i, j], markers[i].tag, blobs[j].tag))\n markers[i].imagePos = blobs[j].center\n markers[i].active = True\n #D[i, j] = np.inf # mark distance as infinity so that it is not chosen again\n D[i, :] = np.inf # mark whole row as infinity (i.e. remove marker from further calculations)\n D[:, j] = np.inf # mark whole column as infinity (i.e. remove blob from further calculations)\n #self.logger.debug(\"D:\\n{}\".format(D))\n markersMatched += 1\n blobsProcessed += 1\n \n def getBlobs(self, tag=None):\n \"\"\"Return a generator/list of blobs that match given tag (or all, if not given).\"\"\"\n if tag is not None:\n return (blob for blob in self.blobs if blob.tag == tag)\n else:\n self.blobs\n \n def getBlob(self, tag=None):\n \"\"\"Return a single blob that matches tag (if given).\"\"\"\n for blob in self.getBlobs(tag):\n return blob # return the first one that matches tag\n return None\n \n def getBlobsByDistance(self, tag=None, point=None):\n if point is None: point = self.imageCenter\n nearestBlobs = sorted((blob for blob in self.getBlobs(tag)), key=lambda blob: hypot(blob.center[0] - point[0], blob.center[1] - point[1]))\n return nearestBlobs\n \n def getBlobsByDist(self, tag=None, point=None, maxDist=np.inf):\n if point is None: point = self.imageCenter\n nearestBlobs = PriorityQueue()\n for blob in self.getBlobs(tag):\n dist = hypot(blob.center[0] - point[0], blob.center[1] - point[1])\n if dist <= maxDist:\n nearestBlobs.put((dist, blob)) # insert into priority queue\n return nearestBlobs.queue # return internal list, not to be modified\n \n def getNearestBlob(self, tag=None, point=None, maxDist=np.inf):\n if point is None: point = self.imageCenter\n minDist = maxDist\n nearestBlob = None\n for blob in self.getBlobs(tag):\n dist = hypot(blob.center[0] - point[0], blob.center[1] - point[1])\n if dist < minDist:\n minDist = dist\n nearestBlob = blob\n return nearestBlob\n \n '''\n # Alt. implementation (might not be as efficient)\n for blob in getBlobsByDistance(tag, point):\n if hypot(blob.center[0] - point[0], blob.center[1] - point[1]) < maxDist:\n return blob\n break\n return None\n '''\n\n\nclass CubeTracker(ColorMarkerTracker):\n inactiveThreshold = 1.2 # how many seconds to allow the cube to remain invisible before marking it inactive\n cube_origin = np.float32([[0.0], [1.72], [60.92]]) # expected cube origin\n tvec_maxdiff_origin = 40.0 # maximum distance from origin for a valid detection\n tvec_maxdiff_last = 5.0 # maximum distance from last known position for a valid detection\n rvec_maxdiff_last = pi # maximum combined (L2-norm) angle difference from last known pose\n max_line_blob_distance_sq = 50 * 50 # squared distance between line end points and blob centers for a match\n num_smooth_samples = 5 # number of sample transformations to use for smoothing (to reduce jitter)\n \n def __init__(self, options):\n ColorMarkerTracker.__init__(self, options)\n \n def initialize(self, imageIn, timeNow):\n ColorMarkerTracker.initialize(self, imageIn, timeNow)\n \n # * Initialize members needed for cube-tracking\n #self.cubeEdgeFilter = HSVFilter(np.array([0, 40, 100], np.uint8), np.array([179, 100, 255], np.uint8)) # bare wooden sticks\n self.cubeEdgeFilter = HSVFilter(np.array([0, 0, 200], np.uint8), np.array([179, 30, 255], np.uint8)) # white sticks\n \n # * Initialize 3D projection params\n self.rvecRaw = np.zeros((3, 1), dtype=np.float32)\n self.tvecRaw = np.zeros((3, 1), dtype=np.float32)\n self.active = False\n self.lastSeen = timeNow\n self.smoothReset()\n \n # * Read in camera parameters\n # NOTE defined as module objects\n \n # * Initialize cube model and detection square/rect\n '''\n #scale = [375, 225, -100] # negative Z-scale will make the model pop up and out of the screen (relative to base)\n scale = [145, 145, 145]\n #scale = [self.imageSize[0] / 4, self.imageSize[1] / 4, -(self.imageSize[1] / 8)]\n #scale = [1, 1, 1]\n shift = [self.imageSize[0] / 2 - scale[0] / 2, self.imageSize[1] / 2 - scale[1] / 2, -self.imageSize[1]] # Z-shift doesn't really matter\n #shift = [-(scale[0] / 2), -(scale[1] / 2), -self.imageSize[1]]\n #shift = [0, 0, -self.imageSize[1]]\n self.cube_vertices = cube_vertices * scale + shift # scaled and shifted\n \n self.cube_vertices = cube_vertices * cube_scale\n #self.cube_vertices = cube_vertices\n self.cube_edges = cube_edges\n self.base_vertices = self.cube_vertices[:4] # first 4 vertices of cube form the base square\n self.square_vertex_by_tag = square_vertex_by_tag\n \n self.logger.debug(\"Camera params:\\n{}\".format(camera_params))\n self.logger.debug(\"Cube vertices:\\n{}\".format(self.cube_vertices))\n self.logger.debug(\"Cube edges:\\n{}\".format(self.cube_edges))\n '''\n \n # * Read in volume/point-cloud/model\n if doRenderVolume:\n model_volume_data = np.load(volume_npy_file)\n sampleStep = 500 # pick every nth point to reduce computational load\n self.model_volume_points = np.float32(model_volume_data[0::sampleStep, 0:3]) # resample, convert to float32 (needed by projectPoints)\n self.model_volume_intensities = model_volume_data[0::sampleStep, 3]\n self.logger.debug(\"Loaded model volume points; shape: {}, dtype: {}\".format(self.model_volume_points.shape, self.model_volume_points.dtype))\n \n '''\n minX, minY, minZ = np.amin(self.model_volume_points, axis=0)\n maxX, maxY, maxZ = np.amax(self.model_volume_points, axis=0)\n model_scale = [1., 1., 1.5]\n model_shift = [self.imageSize[0] / 2 - (maxX - minX) / 2, self.imageSize[1] / 2 - (maxY - minY) / 2, self.cube_vertices[4, 2]] # center in volume\n self.model_volume_points = self.model_volume_points * model_scale + model_shift\n self.logger.debug(\"Ranges:- x: [{}, {}], y: [{}, {}], z: [{}, {}]\".format(minX, maxX, minY, maxY, minZ, maxZ))\n '''\n \n mins = np.amin(self.model_volume_points, axis=0)\n maxs = np.amax(self.model_volume_points, axis=0)\n # TODO compute best fit box possible, maintaining aspect-ratio, center inside cube\n model_scale = (self.cube_vertices[6] - self.cube_vertices[0]) / (maxs - mins)\n model_shift = self.cube_vertices[0]\n self.logger.debug(\"model_scale: {}, model_shift: {}\".format(model_scale, model_shift))\n self.model_volume_points = (self.model_volume_points - mins) * model_scale + model_shift\n \n def process(self, imageIn, timeNow):\n ColorMarkerTracker.process(self, imageIn, timeNow)\n #if not self.blobs:\n # return self.imageOut # nothing more to do, bail out\n \n # * For each trackable\n for trackable in self.trackables:\n activeMarkers = [marker for marker in trackable.markers if marker.active]\n self.logger.debug(\"Tracking a {}: #active markers = {}\".format(trackable.__class__.__name__, len(activeMarkers)))\n trackable.visible = False # assume not visible, and proceed\n \n # ** If this trackable is a cube, use specialized tracking method\n if trackable.__class__.__name__ == 'Cube':\n found = False\n # *** Try finding the cube using a combination of observed blobs and line segments (connecting edges)\n #if not found and len(self.blobs) >= 4: found = self.trackCubeWithEdges(trackable, activeMarkers) # NOTE does not use markers, uses blobs directly instead\n \n if len(activeMarkers) < 4:\n continue # not enough markers for this trackable\n \n # *** Try finding the cube using faces (most stable, but requires all 4 vertices of a face to be seen)\n if not found: found = self.trackCubeMultiface2(trackable, activeMarkers) # NOTE uses activeMarkers\n \n # *** Try matching an arbitrary set of markers (at least 4)\n if not found: found = self.trackVerticesRansac(trackable, activeMarkers) # NOTE uses activeMarkers\n \n # *** If cube is visible, copy rotation and translation vectors for global use\n if trackable.visible: # alt.: found\n if self.updateTransform(trackable.rvec, trackable.tvec): # update self transform, filtering and rejecting outliers\n self.active = True\n self.lastSeen = timeNow\n else:\n if timeNow - self.lastSeen > self.inactiveThreshold:\n self.active = False\n # NOTE we can simply use the cube's vectors when projecting data/models inside it by parenting them to it\n \n # **** Project a cube overlayed on top of video stream\n if self.gui and doRenderCube:\n cube_points, jacobian = cv2.projectPoints(trackable.vertices, trackable.rvec, trackable.tvec, camera_params, dist_coeffs)\n cube_points = cube_points.reshape(-1, 2) # remove nesting\n #self.logger.debug(\"Projected cube points:\\n{}\".format(cube_points))\n for u, v in trackable.edges:\n if u < len(cube_points) and v < len(cube_points) and cube_points[u] is not None and cube_points[v] is not None: # sanity check\n cv2.line(self.imageOut, (int(cube_points[u][0]), int(cube_points[u][1])), (int(cube_points[v][0]), int(cube_points[v][1])), (255, 255, 0), 2)\n else:\n if timeNow - self.lastSeen > self.inactiveThreshold:\n self.active = False\n else:\n self.trackVerticesRansac(trackable, activeMarkers)\n \n # * If we have a valid transform, project a visualization/model overlayed on top of video stream\n if self.rvecRaw is not None and self.tvecRaw is not None:\n self.logger.debug(\"Transform [final]:-\\nrvec:\\n{}\\ntvec:\\n{}\".format(self.rvecRaw, self.tvecRaw))\n if self.gui and doRenderVolume:\n volume_points, jacobian = cv2.projectPoints(self.model_volume_points, self.rvecRaw, self.tvecRaw, camera_params, dist_coeffs)\n volume_points = volume_points.reshape(-1, 2) # remove nesting\n for point, intensity in zip(volume_points, self.model_volume_intensities):\n if 0 <= point[0] < self.imageSize[0] and 0 <= point[1] < self.imageSize[1]:\n self.imageOut[point[1], point[0]] = (intensity, intensity, intensity)\n \n return self.imageOut\n \n def trackCubeFace(self, trackable, activeMarkers, faceIdx=0):\n # TODO clean up and refactor to use trackable.* instead of self.*\n # [Tracking method 1] Try to match a single cube face (indicated by faceIdx)\n # * Map blob centers to base vertex points\n self.base_points = [None] * len(self.base_vertices)\n for blob in self.blobs:\n self.base_points[self.square_vertex_by_tag[blob.tag]] = blob.center # NOTE last blob of a particular tag overwrites any previous blobs with same tag\n \n foundBasePoints = True\n for point in self.base_points:\n if point is None:\n foundBasePoints = False\n self.logger.debug(\"Warning: Base point not detected\")\n return self.imageOut # skip\n #break # keep using last transform; TODO set rvec, tvec to None if tracking is lost for too long\n \n # * If all base points are found, compute 3D projection/transform (as separate rotation and translation vectors: rvec, tvec)\n if foundBasePoints:\n self.base_points = np.float32(self.base_points)\n #self.logger.debug(\"Base points:\\n{}\".format(self.base_points))\n \n retval, trackable.rvec, trackable.tvec = cv2.solvePnP(self.base_vertices, self.base_points, camera_params, dist_coeffs)\n self.logger.debug(\"\\nretval: {}\\nrvec: {}\\ntvec: {}\".format(retval, trackable.rvec, trackable.tvec))\n \n def trackCubeMultiface(self, trackable, activeMarkers):\n # [Tracking method 2-1] Try to match *any* face of the cube that might be visible\n # * For each (named) cube face\n for name, face in cube_faces.iteritems():\n # ** Obtain face vertices, vertex colors, and vertex color to index mapping (NOTE color == tag)\n face_vertices = np.float32([trackable.vertices[vertex_idx] for vertex_idx in face])\n face_markers = (trackable.markers[vertex_idx] for vertex_idx in face)\n face_vertex_colors = [cube_vertex_colors[vertex_idx] for vertex_idx in face]\n #print \"face:\", face, \"\\nface_vertices:\", face_vertices, \"\\nface_vertex_colors:\", face_vertex_colors\n face_vertex_idx_by_color = OrderedDict(zip(face_vertex_colors, range(len(face_vertex_colors))))\n #print \"face_vertex_idx_by_color:\", face_vertex_idx_by_color\n \n # ** Map blob centers to face vertex points, keeping a list of all candidate blobs for each vertex point\n face_points = [None] * len(face)\n face_vertex_markers = [[]] * len(face)\n for marker in face_markers:\n if not marker.active: continue\n face_vertex_markers[face_vertex_idx_by_color[marker.tag]].append(marker)\n face_points[face_vertex_idx_by_color[marker.tag]] = marker.imagePos # NOTE last marker of a particular tag overwrites any previous markers with same tag; to get other marker positions, keep popping from face_vertex_markers and copying in imagePos\n \n # ** Check if all face points have been found\n isFaceComplete = np.all([point is not None for point in face_points])\n if not isFaceComplete:\n self.logger.debug(\"Incomplete face: {}\".format(name))\n continue # this face is incomplete, maybe some other face will match\n \n # ** Ensure this is a valid face using known topology\n face_points = np.float32(face_points)\n #print \"face_points:\\n{}\".format(face_points)\n \n # ** Iterate over all candidate face-marker mappings\n # TODO use a different scheme to generate combinations; this one is flawed!\n haveCandidate = True\n while haveCandidate:\n self.logger.debug(\"Candidate face: {}\".format(name))\n # *** If a valid face is found, compute 3D projection/transform\n if self.isCubeFaceValid(name, face_points):\n self.logger.debug(\"Valid face: {}\".format(name))\n \n retval, trackable.rvec, trackable.tvec = cv2.solvePnP(face_vertices, face_points, camera_params, dist_coeffs)\n #self.logger.debug(\"Transform:-\\nretval: {}\\nrvec:\\n{}\\ntvec:\\n{}\".format(retval, trackable.rvec, trackable.tvec))\n trackable.visible = True\n return True # use the first complete face that is found, and skip the rest\n # TODO compute multiple transforms and pick best one (closest to last transform, cube center within bounds)\n \n # *** Else try a different vertex-blob combination, if available\n haveCandidate = False\n for i in xrange(len(face_points)):\n if len(face_vertex_markers[i]) > 1: # if there are more than one possible markers still left\n face_vertex_markers[i].pop() # first, remove the current one\n face_points[i] = face_vertex_markers[i][0].imagePos # then copy in next's position\n haveCandidate = True\n break\n \n return False\n \n def trackCubeMultiface2(self, trackable, activeMarkers):\n # [Tracking method 2-2] Try to find *all* faces that are visible and use the \"best\"\n \n # * Iterate over all 4-element (ordered) subsets of activeMarkers\n face_marker_seqs = [] # list of (name, face, marker_seq, face_points) tuples\n marker_sets = list(combinations(activeMarkers, 4))\n #self.logger.debug(\"len(marker_sets) = {}\".format(len(marker_sets)))\n #total_marker_seqs = 0\n #valid_marker_seqs = 0\n for marker_set in marker_sets:\n if marker_set[0].tag == marker_set[1].tag or marker_set[0].tag == marker_set[2].tag or marker_set[0].tag == marker_set[3].tag or \\\n marker_set[1].tag == marker_set[2].tag or marker_set[1].tag == marker_set[3].tag or \\\n marker_set[2].tag == marker_set[3].tag:\n continue # skip sets with duplicate colors (NOTE this assumes each face has 4 distinct colors)\n marker_seqs = list(permutations(marker_set, 4))\n #total_marker_seqs += len(marker_seqs)\n for marker_seq in marker_seqs:\n for name, face in cube_faces.iteritems():\n if np.any([marker.tag != cube_vertex_colors[vertex_idx] for marker, vertex_idx in zip(marker_seq, face)]):\n continue # marker tags don't match face vertex colors\n face_points = np.float32([marker.imagePos for marker in marker_seq])\n if self.isCubeFaceValid(name, face_points):\n #valid_marker_seqs += 1\n face_marker_seqs.append((name, face, marker_seq, face_points))\n \n #self.logger.debug(\"total_marker_seqs = {}, valid_marker_seqs = {}\".format(total_marker_seqs, valid_marker_seqs))\n if not face_marker_seqs:\n self.logger.debug(\"No matching (face, marker-sequence) pairs\")\n return False\n \n # TODO compute scores for different faces based on whether there are edge pixels between vertices where expected (and what fraction of total length); also after reprojecting remaining points, whether actual image pixels match in color\n \n transforms = [] # TODO make this a named tuple\n for name, face, marker_seq, face_points in face_marker_seqs:\n face_vertices = np.float32([trackable.vertices[vertex_idx] for vertex_idx in face])\n retval, rvec, tvec = cv2.solvePnP(face_vertices, face_points, camera_params, dist_coeffs)\n if retval:\n transforms.append((name, rvec, tvec))\n \n if not transforms:\n self.logger.debug(\"No valid transforms could be computed\")\n return False\n \n #self.logger.debug(\"{} transforms:\\n{}\".format(len(transforms), \"\\n\".join(str(transform) for transform in transforms)))\n final_transform = transforms[0]\n if len(transforms) > 1:\n mean_tvec = np.mean([tvec for _, _, tvec in transforms], axis=0)\n #self.logger.debug(\"mean_tvec = {} ({} transforms)\".format(mean_tvec, len(transforms)))\n consistent_transforms = [transform for transform in transforms if np.linalg.norm(transform[2] - mean_tvec, ord=2) < 10.0]\n #self.logger.debug(\"{} consistent transforms:\\n{}\".format(len(consistent_transforms), \"\\n\".join(str(transform) for transform in consistent_transforms)))\n if len(consistent_transforms) == 0:\n if self.active:\n final_transform = sorted(transforms, key=lambda transform: np.linalg.norm(transform[2] - trackable.tvec, ord=2))[0]\n else:\n final_transform = (\"/\".join(transform[0] for transform in consistent_transforms), consistent_transforms[0][1], consistent_transforms[0][2]) # use rvec from the first consistent transform, and mean tvec (?)\n \n self.logger.debug(\"{} candidate transforms, final_transform:\\n{}\".format(len(transforms), \"\\n\".join(str(x) for x in final_transform)))\n trackable.rvec = final_transform[1]\n trackable.tvec = final_transform[2]\n trackable.visible = True\n return True\n \n def isCubeFaceValid(self, face_name, face_points):\n # * Verify that these face points satisfy a known topographical structure (order)\n # NOTE Topographical structure is inherent in the order of face points (should be counter-clockwise)\n \n # ** Compute face centroid\n face_centroid = np.mean(face_points, axis=0)\n #print \"face_centroid: {}\".format(face_centroid)\n \n # ** [2D] Compute heading angles to each point assuming face normal is pointing outward through the screen\n heading_vecs = face_points - face_centroid\n headings = np.float32([ np.arctan2(vec[1], vec[0]) for vec in heading_vecs ])\n \n # ** [3D] Compute face plane normal and heading angles to each point around the normal (TODO)\n #face_normal = np.cross(face_vertices[1] - face_vertices[0], face_vertices[3] - face_vertices[0])\n #print \"face_normal: {}\".format(face_normal)\n \n # ** These headings should be in decreasing order (since they are counter-clockwise, and Y-axis is downwards)\n heading_diffs = headings - np.roll(headings, 1)\n heading_diffs = ((heading_diffs + pi) % (2 * pi)) - pi # ensures angle wraparound is handled correctly\n #print \"heading_diffs: {}\".format(heading_diffs)\n if np.any(heading_diffs > 0):\n #self.logger.debug(\"Failed ordering check\")\n return False # if any heading angle difference is positive, skip this face\n \n # * Check if face points form a convex quadrilateral facing us\n # NOTE this eliminates the need for heading-based check above, doesn't it?\n #print \"Face: {}\".format(name)\n #print \"face_points:\\n{}\".format(face_points)\n edge_vectors = face_points - np.roll(face_points, 1, axis=0) # vectors between adjacent vertices (i.e. along edges)\n #print \"edge_vectors:\\n{}\".format(edge_vectors)\n cross_prods = np.cross(edge_vectors, np.roll(edge_vectors, 1, axis=0)) # cross products of consecutive edge vectors\n #print \"cross_prods: {}\".format(cross_prods)\n if np.any(cross_prods < 0):\n #self.logger.debug(\"Failed convex quad, front-face check\")\n return False # if any cross product is negative, then quad is either non-convex or back-facing\n \n # * Check if opposite heading vector lengths and angles are almost equal (i.e. the quad is almost a rhombus)\n '''\n heading_lens = np.float32([np.hypot(vec[0], vec[1]) for vec in heading_vecs])\n #print \"heading_lens: {}\".format(heading_lens)\n if not (abs(heading_lens[0] - heading_lens[2]) / max(heading_lens[0], heading_lens[2]) < 0.5 \\\n and abs(heading_lens[1] - heading_lens[3]) / max(heading_lens[1], heading_lens[3]) < 0.5 \\\n and abs(heading_diffs[0] - heading_diffs[2]) / max(heading_diffs[0], heading_diffs[2]) < 0.1 \\\n and abs(heading_diffs[1] - heading_diffs[3]) / max(heading_diffs[1], heading_diffs[3]) < 0.1):\n return False\n '''\n #print face_name\n #print \"heading_vecs:\\n{}\".format(heading_vecs)\n heading_vecs = [vec / np.linalg.norm(vec, ord=2) for vec in heading_vecs]\n #print \"norm. heading_vecs:\\n{}\".format(heading_vecs)\n #print \"{} dot {} = {}\".format(heading_vecs[0], heading_vecs[2], np.dot(heading_vecs[0], heading_vecs[2]))\n #print \"{} dot {} = {}\".format(heading_vecs[1], heading_vecs[3], np.dot(heading_vecs[1], heading_vecs[3]))\n if not (np.dot(heading_vecs[0], heading_vecs[2]) < -0.95 and np.dot(heading_vecs[1], heading_vecs[3]) < -0.95):\n #self.logger.debug(\"Failed rhombus check\")\n return False\n \n if self.gui and doRenderFace:\n face_centroid_int = (int(face_centroid[0]), int(face_centroid[1]))\n cv2.circle(self.imageOut, face_centroid_int, 10, (0, 128, 0), -1)\n for point, heading in zip(face_points, headings):\n cv2.line(self.imageOut, face_centroid_int, (int(point[0]), int(point[1])), (0, 128, 0), 2)\n #label_pos = (face_centroid_int[0] + int(100 * np.cos(heading)), face_centroid_int[1] + int(100 * np.sin(heading)))\n #cv2.putText(self.imageOut, \"{:.2f}\".format(heading * 180 / pi), label_pos, cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 255), 1)\n cv2.putText(self.imageOut, face_name, face_centroid_int, cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 255), 2)\n \n return True\n \n def trackVerticesRansac(self, trackable, activeMarkers):\n # NOTE len(activeMarkers) >= 4\n # [Tracking method 3] Try to match a set of (at least four) vertices that may or may not form a face\n '''\n # * Find different possible mappings between markers and blobs\n for blob in self.blobs: blob.used = False\n def findMappings(markers, i=0, pairs=[], mappings=[]):\n #self.logger.debug(\"i = {}, #markers = {}, #pairs = {}, #mappings = {}\".format(i, len(markers), len(pairs), len(mappings)))\n if i == len(markers):\n mappings.append(pairs)\n #self.logger.debug(\"Mapping ({} pairs): \".format(len(pairs)) + \", \".join((\"{} @ {}\".format(pair[0].tag, pair[1].center) for pair in pairs)))\n else:\n for blob in markers[i].blobs:\n blobAppended = False\n if not blob.used:\n pair = (markers[i], blob)\n pairs.append(pair)\n blob.used = True\n blobAppended = True\n findMappings(markers, i + 1, list(pairs), mappings)\n if blobAppended:\n pairs.pop()\n blob.used = False\n return mappings\n \n mappings = []\n findMappings(activeMarkers, i=0, pairs=[], mappings=mappings)\n self.logger.debug(\"Mappings ({}):-\".format(len(mappings)))\n for pairs in mappings:\n self.logger.debug(\"Mapping ({} pairs): \".format(len(pairs)) + \", \".join((\"{} @ {}\".format(pair[0].tag, pair[1].center_int) for pair in pairs)))\n \n # * For each mapping, compute a transform\n for pairs in mappings:\n if len(pairs) < 4: continue # cv2.solvePnP() needs at least 4 point pairs\n #worldPositions = np.float32([marker.worldPos for marker in activeMarkers])\n #imagePositions = np.float32([marker.imagePos for marker in activeMarkers])\n worldPositions = np.float32([pair[0].worldPos for pair in pairs])\n imagePositions = np.float32([pair[1].center for pair in pairs])\n #self.logger.debug(\"[solvePnP] Input:-\\nworldPositions:\\n{}\\nimagePositions:\\n{}\".format(worldPositions, imagePositions))\n retval, trackable.rvec, trackable.tvec = cv2.solvePnP(worldPositions, imagePositions, camera_params, dist_coeffs)\n #self.logger.debug(\"[solvePnP] Transform (retval: {}):-\\nrvec:\\n{}\\ntvec:\\n{}\".format(retval, trackable.rvec, trackable.tvec))\n self.rvecRaw = trackable.rvec\n self.tvecRaw = trackable.tvec\n # ** Project a cube overlayed on top of video stream\n if self.gui and doRenderCube:\n cube_points, jacobian = cv2.projectPoints(self.cube_vertices, self.rvecRaw, self.tvecRaw, camera_params, dist_coeffs)\n cube_points = cube_points.reshape(-1, 2) # remove nesting\n #self.logger.debug(\"Projected cube points:\\n{}\".format(cube_points))\n for u, v in self.cube_edges:\n if u < len(cube_points) and v < len(cube_points) and cube_points[u] is not None and cube_points[v] is not None: # sanity check\n cv2.line(self.imageOut, (int(cube_points[u][0]), int(cube_points[u][1])), (int(cube_points[v][0]), int(cube_points[v][1])), (255, 255, 0), 2)\n '''\n \n # * Flatten marker-blob pairings to get two index-matched arrays\n worldPositions = np.float32([marker.worldPos for marker in activeMarkers])\n imagePositions = np.float32([marker.imagePos for marker in activeMarkers])\n '''\n worldPositions = []\n imagePositions = []\n for marker in activeMarkers:\n for blob in marker.blobs:\n worldPositions.append(marker.worldPos)\n imagePositions.append(blob.center)\n \n worldPositions = np.float32(worldPositions)\n imagePositions = np.float32(imagePositions)\n '''\n \n # * Find rotation and translation vectors from 3D-2D point correspondences\n self.logger.debug(\"Input:-\\nworldPositions:\\n{}\\nimagePositions:\\n{}\".format(worldPositions, imagePositions))\n rvec, tvec, inliers = cv2.solvePnPRansac(worldPositions, imagePositions, camera_params, dist_coeffs, trackable.rvec, trackable.tvec, useExtrinsicGuess = trackable.visible)\n #self.logger.debug(\"Transform:-\\nrvec:\\n{}\\ntvec:\\n{}\\ninliers:\\n{}\".format(rvec, tvec, inliers))\n \n # * If a valid transform is found, mark trackable as visible\n if rvec is not None and tvec is not None and inliers is not None:\n trackable.rvec = rvec\n trackable.tvec = tvec\n trackable.visible = True\n return True\n \n return False\n \n def trackCubeWithEdges(self, trackable, activeMarkers):\n # NOTE This method directly uses self.blobs and trackable.vertices instead of trackable.markers or activeMarkers\n \n # * Find edge pixels in the image by applying an appropriate HSV filter\n edgeMask = self.cubeEdgeFilter.apply(self.imageHSV)\n if self.gui: cv2.imshow(\"edge\", edgeMask)\n edgeMask_img = cv.fromarray(edgeMask) # cv.LineIterator() needs an IplImage/CvMat instead of a numpy array\n \n '''\n # * Find line segments in resulting binary edge mask\n lines = cv2.HoughLinesP(edgeMask, 5, np.radians(5.0), 100, minLineLength=100, maxLineGap=10)\n if lines is None:\n self.logger.info(\"No lines detected; bailing out\")\n return False\n lines = lines[0] # for some reason cv2.HoughLinesP() returns a numpy ndarray with shape: (1, <#lines>, 4)\n self.logger.info(\"{} line(s) detected\".format(lines.shape[0]))\n if self.gui:\n for i in xrange(lines.shape[0]):\n line = lines[i]\n cv2.line(self.imageOut, (line[0], line[1]), (line[2], line[3]), (128, 0, 0), 1)\n '''\n \n '''\n # * Explore all possible assignments of blobs to markers and see which ones maintain connectivity\n blobs = list(self.blobs)\n if len(self.blobs) < len(trackable.vertices):\n blobs.extend([None] * (len(trackable.vertices) - len(self.blobs))) # pad with None's\n \n blob_seqs = list(permutations(blobs, len(trackable.vertices)))\n blob_seq_scores = np.zeros(len(blob_seqs), dtype=np.float32)\n for blob_seq, blob_seq_score in zip(blob_seqs, blob_seq_scores):\n #self.logger.debug(\"Evaluating blob seq (len = {}): {}\".format(len(blob_seq), blob_seq))\n self.logger.debug(\"Evaluating seq (len = {}): {}\".format(len(blob_seq), \", \".join(blob.tag if blob is not None else \"-\" for blob in blob_seq)))\n \n # ** Check if this sequence is a possible match\n badSeq = False\n for j in xrange(len(blob_seq)):\n if blob_seq[j] is not None and blob_seq[j].tag != cube_vertex_colors[j]:\n badSeq = True\n self.logger.debug(\"Bad sequence: blob.tag = {}, cube_vertex_color = {}\".format(blob_seq[j].tag, cube_vertex_colors[j]))\n break\n if badSeq: continue\n \n # ** Compute a connectivity score for this blob sequence\n for u, v in trackable.edges:\n self.logger.debug(\"Edge: ({}, {})\".format(u, v))\n if blob_seq[u] is None or blob_seq[v] is None: continue # we have no matching blobs for this vertex pair\n \n uPos = blob_seq[u].center\n vPos = blob_seq[v].center\n for line in lines:\n if np.inner(line[0:2] - uPos, line[0:2] - uPos) < 20 and np.inner(line[2:4] - vPos, line[2:4] - vPos) < 20 or \\\n np.inner(line[0:2] - vPos, line[0:2] - vPos) < 20 and np.inner(line[2:4] - uPos, line[2:4] - uPos) < 20:\n blob_seq_score += 1\n if self.gui: cv2.line(self.imageOut, (line[0], line[1]), (line[2], line[3]), (255, 0, 255), 2)\n break # one match found, that's good enough\n \n # ** Use computed blob sequence scores to pick best one\n best_seq_idx = np.argmax(blob_seq_scores)\n best_seq_score = blob_seq_scores[best_seq_idx]\n best_seq = blob_seqs[best_seq_idx]\n self.logger.info(\"Best blob sequence (score = {}): {}\".format(best_seq_score, \", \".join(blob.tag if blob is not None else \"-\" for blob in best_seq)))\n '''\n \n '''\n # * Obtain blob connectivity graph using detected line segments\n blob_edges = []\n blob_adj = np.zeros(shape=(len(self.blobs), len(self.blobs)), dtype=np.uint8) # adj. matrix representing blob connectivity\n for i in xrange(blob_adj.shape[0]):\n for j in xrange(i+1, blob_adj.shape[1]):\n for line in lines:\n dist_linePt1_i = np.inner(line[0:2] - self.blobs[i].center, line[0:2] - self.blobs[i].center)\n dist_linePt2_j = np.inner(line[2:4] - self.blobs[j].center, line[2:4] - self.blobs[j].center)\n dist_linePt1_j = np.inner(line[0:2] - self.blobs[j].center, line[0:2] - self.blobs[j].center)\n dist_linePt2_i = np.inner(line[2:4] - self.blobs[i].center, line[2:4] - self.blobs[i].center)\n #self.logger.debug(\"Line dists.: {}, {}, {}, {}\".format(dist_linePt1_i, dist_linePt2_j, dist_linePt1_j, dist_linePt2_i))\n if (dist_linePt1_i < self.max_line_blob_distance_sq and dist_linePt2_j < self.max_line_blob_distance_sq) or \\\n (dist_linePt1_j < self.max_line_blob_distance_sq and dist_linePt2_i < self.max_line_blob_distance_sq):\n blob_edges.append((i, j))\n blob_adj[i, j] = 1\n blob_adj[j, i] = 1\n #if self.gui: cv2.line(self.imageOut, (line[0], line[1]), (line[2], line[3]), (255, 0, 255), 2) # show actual line segments\n if self.gui: cv2.line(self.imageOut, self.blobs[i].center_int, self.blobs[j].center_int, (255, 0, 255), 2) # show blob links\n break # one match found, that's good enough\n '''\n \n # * Obtain blob connectivity graph by tracing a line between pairs of blobs and studying the edge mask\n blob_edges = []\n blob_adj = np.zeros(shape=(len(self.blobs), len(self.blobs)), dtype=np.uint8) # adj. matrix representing blob connectivity\n for i in xrange(blob_adj.shape[0]):\n for j in xrange(i+1, blob_adj.shape[1]):\n # ** For each blob pair, sample edgeMask pixels along the line connecting their centers, and find the sum (count)\n li = cv.InitLineIterator(edgeMask_img, self.blobs[i].center_int, self.blobs[j].center_int)\n count = sum(li) / 255\n dist = hypot(self.blobs[j].center[0] - self.blobs[i].center[0], self.blobs[j].center[1] - self.blobs[i].center[1])\n self.logger.debug(\"Line count ({} - {}) = {}, dist = {}\".format(i, j, count, dist))\n \n # ** If count is at least some fraction of total distance between the centers, then there must be an edge there\n if count / dist >= 0.6:\n blob_edges.append((i, j))\n blob_adj[i, j] = 1\n blob_adj[j, i] = 1\n if self.gui: cv2.line(self.imageOut, self.blobs[i].center_int, self.blobs[j].center_int, (255, 0, 255), 2)\n \n # * Compute subgraph isomorphisms using blob adjacency matrix and cube adjacency matrix (as reference)\n blob_labels = [blob.tag for blob in self.blobs]\n self.logger.debug(\"Blob adjacency matrix:-\\n{}\".format(formatMatrix(blob_adj, blob_labels, blob_labels)))\n self.logger.debug(\"Cube adjacency matrix:-\\n{}\".format(formatMatrix(cube_adj, cube_vertex_colors, cube_vertex_colors)))\n \n algo = SubgraphIsomorphisms(blob_adj, blob_labels, cube_adj, cube_vertex_colors)\n isomorphisms = algo.run()\n if not isomorphisms:\n self.logger.info(\"No isomorphisms found; bailing out\")\n return False\n self.logger.info(\"Found {} isomorphism(s)\".format(len(isomorphisms)))\n if len(isomorphisms) > 1:\n self.logger.info(\"Isomorphisms:-\\n\" + \"\\n\".join(formatMatrix(iso, blob_labels, cube_vertex_colors) for iso in isomorphisms))\n \n # * Select the best isomorphism (TODO find good criteria/use all isomorphisms and perform a fitness test later)\n mapping = isomorphisms[0] # which one to use? why, the first one, of course!\n self.logger.info(\"Chosen isomorphism ({}x{}):\\n{}\".format(mapping.shape[0], mapping.shape[1], formatMatrix(mapping, blob_labels, cube_vertex_colors)))\n \n # * Using this blob-vertex mapping, extract index-matched object and image positions\n worldPositions = []\n imagePositions = []\n for i in xrange(len(self.blobs)):\n for j in xrange(len(trackable.vertices)):\n if mapping[i, j] == 1:\n worldPositions.append(trackable.vertices[j])\n imagePositions.append(self.blobs[i].center)\n if self.gui: cv2.putText(self.imageOut, str(j), self.blobs[i].center_int, cv2.FONT_HERSHEY_SIMPLEX, 0.9, (255, 0, 0), 2)\n worldPositions = np.float32(worldPositions)\n imagePositions = np.float32(imagePositions)\n \n if len(worldPositions) < 4:\n self.logger.info(\"Not enough 3D-2D point correspondences; bailing out\")\n return False\n \n # * Find rotation and translation vectors from 3D-2D point correspondences\n self.logger.info(\"Input:-\\nworldPositions:\\n{}\\nimagePositions:\\n{}\".format(worldPositions, imagePositions))\n # ** No RANSAC scheme\n retval, trackable.rvec, trackable.tvec = cv2.solvePnP(worldPositions, imagePositions, camera_params, dist_coeffs, useExtrinsicGuess = trackable.visible)\n self.logger.info(\"Transform (retval = {}):-\\nrvec:\\n{}\\ntvec:\\n{}\".format(retval, trackable.rvec, trackable.tvec))\n trackable.visible = True\n return True\n \n '''\n # ** RANSAC scheme\n rvec, tvec, inliers = cv2.solvePnPRansac(worldPositions, imagePositions, camera_params, dist_coeffs, trackable.rvec, trackable.tvec, useExtrinsicGuess = trackable.visible)\n #self.logger.debug(\"Transform:-\\nrvec:\\n{}\\ntvec:\\n{}\\ninliers:\\n{}\".format(rvec, tvec, inliers))\n \n # * If a valid transform is found, mark trackable as visible\n if rvec is not None and tvec is not None and inliers is not None:\n trackable.rvec = rvec\n trackable.tvec = tvec\n trackable.visible = True\n return True\n \n return False\n '''\n \n def updateTransform(self, rvec, tvec):\n #self.logger.debug(\"Transform:-\\nrvec:\\n{}\\ntvec:\\n{}\".format(rvec, tvec))\n \n if self.active:\n tvec_diff_origin = np.linalg.norm(tvec - self.cube_origin, ord=2)\n tvec_diff_last = np.linalg.norm(tvec - self.tvecRaw, ord=2)\n rvec_diff_last = np.linalg.norm(((rvec - self.rvecRaw) + pi) % two_pi - pi, ord=2) # norm of smallest angle difference along the 3 axes\n self.logger.debug(\"Dist. from origin: {}, from last pos.: {}; angle diff: {}\".format(tvec_diff_origin, tvec_diff_last, rvec_diff_last))\n if tvec_diff_origin > self.tvec_maxdiff_origin or tvec_diff_last > self.tvec_maxdiff_last or rvec_diff_last > self.rvec_maxdiff_last: # TODO check rvec_diff as well? (be careful with angle wraparound)\n self.logger.debug(\"Failed origin and continuity check\")\n #self.smoothReset()\n return False\n \n self.rvecRaw = rvec.copy()\n self.tvecRaw = tvec.copy()\n if doSmoothPose:\n #self.rvecDiff = self.rvec - rvec\n self.rvecDiff = ((self.rvec - rvec) + pi) % two_pi - pi # NOTE smaller difference between 2 angles; reverse order\n self.smoothUpdate()\n else:\n self.rvec = self.rvecRaw\n self.tvec = self.tvecRaw\n \n return True\n \n def smoothReset(self):\n \"\"\"Initialize structs for storing transform samples over time for smoothing.\"\"\"\n self.rvecRaws = np.zeros((3, self.num_smooth_samples), dtype=np.float32)\n self.rvecDiffs = np.zeros((3, self.num_smooth_samples), dtype=np.float32)\n self.tvecRaws = np.zeros((3, self.num_smooth_samples), dtype=np.float32)\n self.smoothIdx = 0\n self.rvec = np.zeros((3, 1), dtype=np.float32)\n self.tvec = np.zeros((3, 1), dtype=np.float32)\n \n def smoothUpdate(self):\n \"\"\"Add current raw transform to samples, and compute smoothed (averaged) transform.\"\"\"\n self.rvecRaws[:,self.smoothIdx:self.smoothIdx+1] = self.rvecRaw\n self.tvecRaws[:,self.smoothIdx:self.smoothIdx+1] = self.tvecRaw\n self.smoothIdx = (self.smoothIdx + 1) % self.num_smooth_samples # circular buffer\n self.rvec = self.rvecRaw + np.mean(((self.rvecRaws - self.rvecRaw) + pi) % two_pi - pi, axis=1, keepdims=True) # correct for angle wraparound: compute smallest angle difference with current raw rvec, take mean and add raw rvec back in\n self.tvec = np.mean(self.tvecRaws, axis=1, keepdims=True)\n # TODO Perform weighted averaging / take into account cube motion (Kalman filter)\n\n\nclass SubgraphIsomorphisms:\n \"\"\"Given graphs as adjaceny matrices A and B (and corresponding vertex labels), find graph A in subgraphs of B.\"\"\"\n # Based on: Ullmann, J. R., An Algorithm for Subgraph Isomorphism, JACM, Vol. 23, Iss. 1, pp. 31--42, 1976.\n # Simple Enumeration algorithm (brute force)\n \n def __init__(self, A, A_labels, B, B_labels):\n # * Copy in matrices and labels\n self.A = A\n self.A_labels = A_labels\n self.B = B\n self.B_labels = B_labels\n self.logger = logging.getLogger(self.__class__.__name__)\n \n def run(self):\n # * Initialize isomorphism matrix by setting all possible mappings to 1\n self.pA = self.A.shape[0] # number of \"points\" (vertices) in A\n self.pB = self.B.shape[0] # number of \"points\" (vertices) in B\n self.M0 = np.zeros(shape=(self.pA, self.pB), dtype=np.uint8)\n for i in xrange(self.pA):\n for j in xrange(self.pB):\n if self.A_labels[i] == self.B_labels[j] and np.sum(self.B[:,j]) >= np.sum(self.A[:,i]): # label and degree check\n self.M0[i, j] = 1\n self.logger.debug(\"Initial isomporphism matrix (M0):-\\n{}\".format(formatMatrix(self.M0, self.A_labels, self.B_labels)))\n \n self.F = np.zeros(self.pB, dtype=np.uint8) # NOTE can be a true bit-vector\n self.H = np.zeros(self.pA, dtype=np.int8)\n self.M_ = [None] * self.pA # M_[d] is matrix at depth d\n \n # * Run the algorithm!\n self.logger.debug(\"Starting algorithm\")\n self.isomorphisms = []\n self.done = False\n self.step1()\n self.logger.debug(\"Algorithm complete (done = {}, #isomorphisms = {})\".format(self.done, len(self.isomorphisms)))\n return self.isomorphisms\n \n def step1(self):\n self.logger.debug(\"Step 1\")\n self.M = self.M0 # copy?\n self.d = 0 # we are 0-based; Ullmann's algorithm is 1-based\n self.H[self.d] = -1\n self.k = self.H[self.d] # initialized here, so that we have self.k available\n self.F.fill(0)\n self.step2()\n \n def step2(self):\n self.logger.debug(\"Step 2: d = {}\".format(self.d))\n if not np.any([self.M[self.d, j] == 1 and self.F[j] == 0 for j in xrange(self.pB)]):\n self.step7()\n else:\n self.M_[self.d] = self.M # copy?\n self.k = self.H[self.d] if self.d == 0 else -1 # 0-based vs. 1-based\n self.step3()\n \n def step3(self):\n self.logger.debug(\"Step 3: d = {}, k = {}, M[d] = {}, F = {}\".format(self.d, self.k, self.M[self.d], self.F))\n self.k = self.k + 1\n while self.M[self.d, self.k] == 0 or self.F[self.k] == 1:\n self.k = self.k + 1\n for j in xrange(self.pB):\n if j != self.k:\n self.M[self.d, j] = 0\n self.step4()\n \n def step4(self):\n self.logger.debug(\"Step 4\")\n if self.d < (self.pA - 1):\n self.step6()\n else:\n self.isomorphisms.append(self.M)\n self.logger.debug(\"Candidate isomorphism (M):-\") # TODO Check if we have a valid isomorphism; if yes, add to list\n self.logger.debug(formatMatrix(self.M, self.A_labels, self.B_labels))\n self.step5()\n \n def step5(self):\n self.logger.debug(\"Step 5\")\n if not np.any([self.M[self.d, j] == 1 and self.F[j] == 0 for j in xrange(self.k + 1, self.pB)]):\n self.step7()\n else:\n self.M = self.M_[self.d] # copy?\n self.step3()\n \n def step6(self):\n self.logger.debug(\"Step 6\")\n self.H[self.d] = self.k\n self.F[self.k] = 1\n self.d = self.d + 1\n self.step2()\n \n def step7(self):\n self.logger.debug(\"Step 7\")\n if self.d == 0:\n self.done = True # terminate algorithm\n else:\n self.F[self.k] = 0\n self.d = self.d - 1\n self.M = self.M_[self.d]\n self.k = self.H[self.d]\n self.step5()\n\n\ndef formatMatrix(mat, rowLabels, colLabels): # TODO move to util?\n # TODO take into account max label widths?\n out = \"\\t{}\\n\".format(\"\\t\".join(colLabels))\n for i in xrange(mat.shape[0]):\n out += \"{}\\t{}\\n\".format(rowLabels[i], \"\\t\".join(str(val) for val in mat[i]))\n return out\n\n\nif __name__ == \"__main__\":\n options = { 'gui': True, 'debug': ('--debug' in sys.argv) }\n run(CubeTracker(options=options), gui=options['gui'], debug=options['debug'])\n","sub_path":"CAPTIVE/pyTANG/tang/vision/colortracking.py","file_name":"colortracking.py","file_ext":"py","file_size_in_byte":58553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"89039148","text":"import requests, re, json, copy\nfrom bs4 import BeautifulSoup\nimport contentDeal\nfrom requests.packages import urllib3\n\n# 获取页面超链接信息\ndef get_html(url) :\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'\n }\n html = requests.get(url, verify=False, headers=headers)\n html.encoding='utf-8'\n soup = BeautifulSoup(html.text, 'lxml')\n for i in soup.find_all('a', href=re.compile(r\"https://finland.fi/life-society/.*\")) :\n yield i.get('href')\n for index in range(2, 100) :\n html = requests.get(url + \"page/\" + str(index), verify=False, headers=headers)\n if not (html is None) :\n html.encoding='utf-8'\n soup = BeautifulSoup(html.text, 'lxml')\n if \"Page not found\" in soup.title.string :\n break\n for i in soup.find_all('a', href=re.compile(r\"https://finland.fi/life-society/.*\")) :\n yield i.get('href')\n else :\n break\n# 获取超链接页面详细信息\ndef get_html_link(link_list) :\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'\n }\n # 创建字典\n data = {'title': '', 'url': '','review': '', 'content': '', 'time': '', 'type': ''}\n dataList = []\n url_list = [] # 存储所有访问过的 url, 避免重复访问\n # 遍历所有子链接\n for i in link_list :\n # 判断是否遍历过 \n flag = 0\n for url in url_list :\n if url == str(i) :\n flag = 1\n break\n if flag == 1 :\n continue\n url_list.append(i)\n # 正常请求\n html_link = requests.get(i, headers=headers, verify=False)\n html_link.encoding='utf-8'\n soup = BeautifulSoup(html_link.text, 'lxml')\n title = soup.title.string\n url = i\n review = \"\"\n content_div = soup.select(\".entry-content\")[0]\n content_list = content_div.select(\"p\")\n for k in range(1, 6) :\n appended_string = content_div.select(\"h\" + str(k))\n for ap in appended_string :\n content_list.append(ap)\n content = \"\"\n for j in content_list :\n content = content + str(j)\n # 处理内容,替换字符串中的字符,去掉正文中的图片信息\n content = contentDeal.deal_content(content)\n # 通过标签名查找 时间 \n time = re.search(r\"\", html_link.text).group(1)\n type = \"life\"\n # 给字典赋值\n data['title'] = title\n data['url'] = url\n data['review'] = review\n data['content'] = content\n data['time'] = time\n data['type'] = type\n # 加入 List\n dataList.append(data)\n # 更改字典地址\n data = copy.copy(data)\n return dataList\n# 保存数据\ndef save_data(content_list) :\n with open('../finland.fi/life.json', 'a', encoding='utf-8') as f:\n for json_data in content_list :\n f.write(json.dumps(json_data, ensure_ascii=False)+'\\n')\n f.flush()\n# 函数回调\ndef fun_call(url) :\n link_list = get_html(url) # 返回一个生成器\n content_list = get_html_link(link_list)\n save_data(content_list)\n# 主函数\ndef main() :\n # 去除 warning \n urllib3.disable_warnings()\n url = 'https://finland.fi/category/life-society/'\n fun_call(url)\nif __name__=='__main__':\n main()\n","sub_path":"crawler-finland.fi/crawler_life.py","file_name":"crawler_life.py","file_ext":"py","file_size_in_byte":3641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"152610013","text":"class Solution(object):\n def minCostII(self, costs):\n \"\"\"\n :type costs: List[List[int]]\n :rtype: int\n \"\"\"\n if not costs or not len(costs[0]):\n return 0\n \n n, k = len(costs), len(costs[0])\n # min1 is the index of the 1st-smallest cost till previous house\n # min2 is the index of the 2nd-smallest cost till previous house\n min1, min2 = -1, -1\n for i in range(n):\n last1, last2 = min1, min2\n min1, min2 = -1, -1\n for j in range(k):\n if j != last1:\n costs[i][j] += costs[i-1][last1] if last1 >= 0 else 0\n else:\n costs[i][j] += costs[i-1][last2] if last2 >= 0 else 0\n # update min1, min2 on the fly\n if min1 < 0 or costs[i][j] < costs[i][min1]:\n min2 = min1\n min1 = j\n elif min2 < 0 or costs[i][j] < costs[i][min2]:\n min2 = j\n \n return costs[n-1][min1]\n","sub_path":"python_solutions/265-paint-house-ii.py","file_name":"265-paint-house-ii.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"4118898","text":"# Reverse Strings in a Sentence\n\n# Given a string, implement a program that will reverse the order of the characters\n# in each word within a sentence while still preserving whitespaces and initial word order.\n\n# Example:\n# Input: \"Let's do a coding challenge\"\n# Output: \"s'teL od a gniedoc egnellahc\"\n\nanother_string = \"Let's do a coding challenge\"\ndef reverse_words(str):\n str = str.split(' ')\n for i in range(len(str)):\n str[i] = str[i][::-1]\n return ' '.join(str)\nprint(reverse_words(another_string))\n","sub_path":"reverse_strings.py","file_name":"reverse_strings.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"238558931","text":"#list of tickers for commodities\n\nticker_dictionary = {\n \"Gold\":\"GC=F\",\n \"Crude Oil\":\"CL=F\",\n \"Silver\":\"SI=F\",\n \"Platinum\":\"PL=F\",\n \"Palladium\":\"PA=F\",\n \"Copper\":\"HG=F\",\n \"Natural Gas\":\"NG=F\",\n \"Corn\":\"ZC=F\",\n \"Soybean\":\"ZS=F\",\n \"Coffee\":\"KC=F\",\n \"S&P500\":\"ES=F\",\n \"Dow30\":\"YM=F\",\n \"Nasdaq\":\"NQ=F\",\n \"Cattle Live\":\"LE=F\",\n \"Hogs\":\"HE-F\",\n \"Lumber\":\"LBS=F\",\n \"Cotton\":\"CT=F\",\n \"R2000\":\"RTY=F\"\n}\n\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"286330358","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n#Looking for a and b for y = a*x^b\ndef calculate_ab(xi, xf, yi, yf):\n logxi = np.log(xi)\n logxf = np.log(xf)\n logyi = np.log(yi)\n logyf = np.log(yf)\n b = (logyf - logyi)/(logxf - logxi)\n loga = logyi - b*logxi\n a = np.exp(loga)\n return a, b\n\n#Calculate deltaS from deltaS = int from xi to xf a*x^b\n#Calculate deltaS from deltaS = int from xi to xf a*x^b\ndef delta_S(xi, xf, yi, yf):\n [a, b] = calculate_ab(xi, xf, yi, yf)\n print(a, b)\n b = np.nan_to_num(b)\n deltaS = a/(b+1)*(xf**(b+1) - xi**(b+1))\n return deltaS\n\n#Calculate total integral from init to final a*x^b\nx = np.linspace(0.5, 10, 100)\ny = 3*x**2\ndeltaS = 0\n\nintegral = 0\n\nfor i in range (1, len(x)):\n deltaS = delta_S(x[i-1], x[i], y[i-1], y[i])\n integral = integral + deltaS\n\nprint(integral)\nplt.xscale('log')\nplt.yscale('log')\nplt.plot(x, y)\nplt.show()\n","sub_path":"Codes/Vieux/integration.py","file_name":"integration.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"385847069","text":"import pytest\nfrom Circle import *\n\n\ndef test_radius():\n c = Circle(4)\n assert c.radius == 4\n\n\ndef test_diameter():\n c = Circle(4)\n assert c.diameter == 8\n c.diameter = 10\n assert c.diameter == 10\n assert c.radius == 5\n\n\ndef test_area():\n c = Circle(4)\n assert c.area == 50.26548245743669\n with pytest.raises(AttributeError):\n c.area = 100\n\n\ndef test_from_diameter():\n c = Circle.from_diameter(20)\n assert c.diameter == 20\n assert c.radius == 10\n\n\ndef test__repr__():\n c = Circle(4)\n assert repr(c) == \"Circle(4)\"\n\n\ndef test__str__():\n c = Circle(4)\n print(c)\n assert str(c) == \"Circle with radius: 4\"\n\n\ndef test__add__():\n c1 = Circle(2)\n c2 = Circle(4)\n # print(repr(c1 + c2))\n assert repr(c1 + c2) == \"Circle(6)\"\n\n\ndef test__mul__():\n c1 = Circle(2)\n c2 = Circle(4)\n # print(repr(c1 * c2))\n assert repr(c1 * c2) == \"Circle(8)\"\n assert repr(c1 * 3) == \"Circle(6)\"\n assert repr(3 * c1) == \"Circle(6)\"\n\n\ndef test__lt__():\n c1 = Circle(2)\n c2 = Circle(4)\n tf1 = c1 < c2\n tf2 = c2 < c1\n assert tf1 is True\n assert tf2 is False\n\n\ndef test__gt__():\n c1 = Circle(2)\n c2 = Circle(4)\n tf1 = c1 > c2\n tf2 = c2 > c1\n assert tf1 is False\n assert tf2 is True\n\n\ndef test__eq__():\n c1 = Circle(2)\n c2 = Circle(4)\n c3 = Circle(4)\n tf1 = c1 == c2\n tf2 = c2 == c3\n assert tf1 is False\n assert tf2 is True\n\n\ndef test_sort():\n c1 = Circle(2)\n c2 = Circle(4)\n c3 = Circle(9)\n circles = [c2, c3, c1]\n circles.sort()\n assert circles == [c1, c2, c3]\n","sub_path":"students/ABartles/Exercise8/Test_Circle.py","file_name":"Test_Circle.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"622545128","text":"# coding=utf-8\nfrom timeit import timeit\nimport cProfile, pstats, StringIO\nfrom types import FunctionType\n\ndef _timeit(func, *args, **kwargs):\n def _wrapper(func, *args, **kwargs):\n def _wrapped():\n return func(*args, **kwargs)\n return _wrapped\n return timeit(_wrapper(func, *args, **kwargs))\n\ndef _profile(func, *args, **kwargs):\n pr = cProfile.Profile()\n pr.enable()\n for i in xrange(10000):\n func(*args, **kwargs)\n pr.disable()\n s = StringIO.StringIO()\n ps = pstats.Stats(pr, stream=s).sort_stats('cumulative')\n ps.print_stats()\n return s.getvalue()\n\n\ndef fprofile(*args, **kwargs):\n functions = [x for x in args if type(x) == FunctionType]\n new_args = [x for x in args if x not in functions]\n results = {}.fromkeys([f.func_name for f in functions])\n for func in functions:\n print(\"RESULTS FOR FUNCTION : {} \".format(func.func_name))\n temp = _timeit(func, *new_args, **kwargs)\n results[func.func_name] = temp\n print(\"TIMEIT: {}\".format(temp))\n print(\"PROFILE: {}\".format(_profile(func, *new_args, **kwargs)))\n\n print(\"RESULTS {}\".format(results) )\n print(\"THE BEST RESULT IS : {}\".format(min(results.values())))\n print(\"THE WORST RESULT IS : {}\".format(max(results.values())))\n\ndef a(*args):\n return args\ndef b(*args):\n return args\n\nif __name__ == \"__main__\":\n fprofile(a, b, 3, 2, 3)","sub_path":"misc/profiler.py","file_name":"profiler.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"464733066","text":"import sys\nimport os\nimport re\nimport time\nimport asyncore\nimport MySQLdb\nimport datetime\nimport logging\n\ndef textify(nodes, sep=' '):\n if not isinstance(nodes, (list, tuple)):\n nodes = [nodes]\n\n def _t(x):\n if isinstance(x, (str, unicode)):\n return [x]\n\n if hasattr(x, 'xmlNode'):\n if not x.xmlNode.get_type() == 'element':\n return [x.extract()]\n else:\n if isinstance(x.root, (str, unicode)):\n return [x.root]\n\n return (n.extract() for n in x.select('.//text()'))\n\n nodes = chain(*(_t(node) for node in nodes))\n nodes = (node.strip() for node in nodes if node.strip())\n\n return sep.join(nodes)\n\ndef xcode(text, encoding='utf8', mode='strict'):\n return text.encode(encoding, mode) if isinstance(text, unicode) else text\n\n\ndef compact(text, level=0):\n if text is None: return ''\n\n if level == 0:\n text = text.replace(\"\\n\", \" \")\n text = text.replace(\"\\r\", \" \")\n\n compacted = re.sub(\"\\s\\s(?m)\", \" \", text)\n if compacted != text:\n compacted = compact(compacted, level+1)\n\n return compacted.strip()\n\ndef clean(text):\n if not text: return text\n\n value = text\n value = re.sub(\"&\", \"&\", value)\n value = re.sub(\"<\", \"<\", value)\n value = re.sub(\">\", \">\", value)\n value = re.sub(\""\", '\"', value)\n value = re.sub(\"'\", \"'\", value)\n\n return value\n\ndef normalize(text):\n return clean(compact(xcode(text)))\n\ndef get_compact_traceback(e=''):\n except_list = [asyncore.compact_traceback()]\n return \"Error: %s Traceback: %s\" % (str(e), str(except_list))\n\ndef create_logger_obj(source):\n cur_dt = str(datetime.datetime.now().date())\n LOGS_DIR = os.path.join(os.path.dirname(os.getcwd()), 'logs') \n log_file_name = \"spider_%s_%s.log\" % (source, cur_dt)\n log = initialize_logger(os.path.join(LOGS_DIR, log_file_name))\n return log\n\ndef initialize_logger(file_name, log_level_list=[]):\n logger = logging.getLogger()\n try:\n add_logger_handler(logger, file_name, log_level_list)\n except (KeyboardInterrupt, SystemExit):\n raise\n except Exception as e:\n e = sys.exc_info()[2]\n time_str = time.strftime(\"%Y%m%dT%H%M%S\", time.localtime())\n exception_str = \"%s: %s: Exception: %s\" % (time_str, sys.argv, get_compact_traceback(e))\n #print exception_str\n\n return logger\n\n\ndef add_logger_handler(logger, file_name, log_level_list=[]):\n success_cnt, handler = 3, None\n\n for i in xrange(success_cnt):\n try:\n handler = logging.FileHandler(file_name)\n break\n except (KeyboardInterrupt, SystemExit):\n raise\n except: pass\n\n if not handler: return\n\n formatter = logging.Formatter('%(asctime)s.%(msecs)d: %(filename)s: %(lineno)d: %(funcName)s: %(levelname)s: %(message)s', \"%Y%m%d%H%M%S\")\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n set_logger_log_level(logger, log_level_list)\n\n if handler.stream:\n set_close_on_exec(handler.stream)\n","sub_path":"flights/seat_finder_tool_scrapers/seat_finder_tool_scrapers/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"243908439","text":"#!/usr/bin/env python3\nimport sys\nimport getopt\n\n\ndef usage(message=None):\n '''\n display a nice usage message along with an optional message\n describing an error\n '''\n if message:\n sys.stderr.write(message + \"\\n\")\n usage_message = \"\"\"Usage: python3 bingo-generator.py --terms [--output ]\n [--header ] [--color <#nnnnnn>] [--free ]\nor: python3 bingo-generator.py --help\n\nReads a list of phrases from a file, fills in an html template,\nand writes the result, possibly with a title, to stdout or\noptionally to a file.\n\nArguments:\n\n --terms (-t): path to file containing list of terms, one per line;\n there should be 24 of them at least\n --output (-o): path to file into which to write the html\n Default: written to stdout\n --heading (-h): what to call the bingo card (goes in the

header)\n Default: Bingo Card\n --free (-f): what to put in the free space\n Default: FREE SPACE\n --bgimage (-b): what image to use for the background image\n Default: bg-image.jpg (if missing, white)\n --color (-c): color to turn a square when it's been clicked\n Default: #ffcc99\n --help (-H): show this help message\n\nExamples:\n\npython3 bingo-generator.py \\\\\n -t trump_terms.txt -o trump_bingo.html -h \"Presidential Border Bingo\" -b trump_bg.jpg\npython3 bingo-generator.py \\\\\n -t nice_terms.txt -o nice.html -h \"A Nice Day\" -f \"Punch a Nazi\" -b nice_bg.jpg\npython3 bingo-generator.py \\\\\n -t impeach_terms.txt -o impeach_bingo.html -h \"Impeachment Bingo\" -b trump_bg.jpg -f 'WITCH HUNT!'\npython3 bingo-generator.py \\\\\n -t corpbuzz_terms.txt -o corpbuzz_bingo.html -h \"Corporate Buzzword Bingo\" -b corpbuzz_bg.jpg -f 'IDEATE!' --color '#EBA5D5'\n\"\"\"\n sys.stderr.write(usage_message)\n sys.exit(1)\n\n\ndef get_default_opts():\n '''\n initialize args with default values and return them\n '''\n args = {'output': None, 'heading': 'Bingo Card', 'color': '#ffcc99',\n 'free': 'Free Space', 'bgimage': 'bg-image.jpg'}\n return args\n\n\ndef process_opts():\n '''\n get command-line args and values, falling back to defaults\n where needed, whining about bad args\n '''\n try:\n (options, remainder) = getopt.gnu_getopt(\n sys.argv[1:], \"t:o:h:c:f:b:H\", [\"terms=\", \"output=\", \"heading=\",\n \"color=\", \"free=\", 'bgimage=',\"help\"])\n\n except getopt.GetoptError as err:\n usage(\"Unknown option specified: \" + str(err))\n\n args = get_default_opts()\n\n for (opt, val) in options:\n if opt in [\"-t\", \"--terms\"]:\n args['terms'] = val\n elif opt in [\"-o\", \"--output\"]:\n args['output'] = val\n elif opt in [\"-h\", \"--heading\"]:\n args['heading'] = val\n elif opt in [\"-c\", \"--color\"]:\n args['color'] = val\n elif opt in [\"-f\", \"--free\"]:\n args['free'] = val\n elif opt in [\"-b\", \"--bgimage\"]:\n args['bgimage'] = val\n elif opt in [\"-H\", \"--help\"]:\n usage('Help for this script\\n')\n else:\n usage(\"Unknown option specified: <%s>\" % opt)\n\n if remainder:\n usage(\"Unknown option(s) specified: {opt}\".format(opt=remainder[0]))\n\n check_opts(args)\n return args\n\n\ndef check_opts(args):\n '''\n whine if mandatory args not supplied\n '''\n for name in ['terms']:\n if name not in args or not args[name]:\n usage(\"Mandatory argument {name} not specified\".format(name=name))\n\n\ndef hex_to_rgb(hexcolor, alpha):\n '''\n pass in desired transparency and the hex string for the color\n get back the rgba() string suitable for css\n '''\n\n # if we got a shorthand color, 'fill in' the missing hex digits\n if len(hexcolor[1:]) == 3:\n hexcolor = '#' + hexcolor[1]*2 + hexcolor[2]*2 + hexcolor[3]*2\n red = int(hexcolor[1:3], 16)\n green = int(hexcolor[3:5], 16)\n blue = int(hexcolor[5:7], 16)\n return 'rgba({r},{g},{b},{a})'.format(r=red, g=green, b=blue, a=alpha)\n\n\ndef do_main():\n '''entry point'''\n\n args = process_opts()\n\n # read in the bingo terms\n in_file = open(args['terms'], 'r')\n terms = [line.strip() for line in in_file.readlines()]\n in_file.close()\n terms = list(filter(lambda x: x != \"\", terms))\n terms = ['\"' + term + '\"' for term in terms]\n terms = \",\".join(terms)\n args['phrases'] = terms\n # make the cells a bit transparent\n args['color'] = hex_to_rgb(args['color'], '0.4')\n\n html_template = open(\"html_template.txt\", \"r\").read()\n for argname in ['phrases', 'color', 'heading', 'free', 'bgimage']:\n html_template = html_template.replace('{{' + argname + '}}', args[argname])\n\n if args['output']:\n out_file = open(args['output'], 'w')\n out_file.write(html_template)\n out_file.close()\n else:\n sys.stdout.write(html_template)\n\n\nif __name__ == '__main__':\n do_main()\n","sub_path":"bingo-generator.py","file_name":"bingo-generator.py","file_ext":"py","file_size_in_byte":4996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"540355554","text":"# 10. Regular Expression Matching\n\n# Given an input string (s) and a pattern (p), implement regular expression\n# matching with support for '.' and '*'.\n\n# '.' Matches any single character.\n# '*' Matches zero or more of the preceding element.\n# The matching should cover the entire input string (not partial).\n\ndef match(string, pattern):\n if pattern == '':\n return string == ''\n\n next_single_match = bool(string) and pattern[0] in [string[0], '.']\n\n if len(pattern) >= 2 and pattern[1] == '*':\n return (\n match(string, pattern[2:])\n or (next_single_match and match(string[1:], pattern))\n )\n\n return next_single_match and match(string[1:], pattern[1:])\n\ndef memoized_match(string, pattern):\n MEMO = {}\n\n def match(i, j):\n if (i, j) not in MEMO:\n if j == len(pattern):\n return i == len(string)\n\n next_single_match = i != len(string) and pattern[j] in [string[i], '.']\n\n if j + 1 < len(pattern) and pattern[j + 1] == '*':\n pattern_match = (\n match(i, j + 2)\n or (next_single_match and match(i + 1, j))\n )\n else:\n pattern_match = next_single_match and match(i + 1, j + 1)\n\n MEMO[(i, j)] = pattern_match\n\n return MEMO[(i, j)]\n\n return match(0, 0)\n\n# 44. Wildcard Matching\n\n# Given an input string (s) and a pattern (p), implement wildcard pattern\n# matching with support for '?' and '*'.\n\ndef wildcard_matching(string, pattern):\n if pattern == '':\n return string == ''\n\n if string == '':\n if pattern[0] == '*':\n return wildcard_matching(string, pattern[1:])\n else:\n return False\n\n\n next_single_match = bool(string) and pattern[0] in [string[0], '?']\n\n if pattern[0] == '*':\n return (\n wildcard_matching(string, pattern[1:])\n or wildcard_matching(string[1:], pattern)\n )\n\n return next_single_match and wildcard_matching(string[1:], pattern[1:])\n\ndef memoized_wildcard_match(string, pattern):\n MEMO = {}\n\n def match(i, j):\n if (i, j) not in MEMO:\n if j == len(pattern):\n pattern_match = i == len(string)\n\n else:\n if i == len(string):\n if pattern[j] == '*':\n pattern_match = match(i, j + 1)\n else:\n pattern_match = False\n else:\n next_single_match = i < len(string) and pattern[j] in [string[i], '?']\n\n if pattern[j] == '*':\n pattern_match = match(i, j + 1) or match(i + 1, j)\n else:\n pattern_match = next_single_match and match(i + 1, j + 1)\n\n MEMO[(i, j)] = pattern_match\n\n return MEMO[(i, j)]\n\n return match(0, 0)\n","sub_path":"lib/pinterest/regex_matching.py","file_name":"regex_matching.py","file_ext":"py","file_size_in_byte":2921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"292286175","text":"import os\nimport json\nimport pymongo\nimport ssl\nfrom bson import json_util\nfrom main.CONFIG_READER.read import get_details\n\nclass Loader(): \n \"\"\"\n The abstract loader class for loading module/publiction data from serialized JSON files, if they exist, otherwise from MongoDB.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Initializes connection host and JSON file paths for LDA and SVM prediction results, each containing data for both UCL modules \n and scopus research publications.\n \"\"\"\n self.host = self.database = get_details(\"MONGO_DB\", \"client\")\n self.lda_prediction_path = \"main/NLP/LDA/SDG_RESULTS/training_results.json\"\n self.svm_prediction_path = \"main/NLP/SVM/SDG_RESULTS/training_results.json\"\n\n def load(self, count: int):\n \"\"\"\n Loads data from pickled file.\n Returns Pandas DataFrame.\n \"\"\"\n raise NotImplementedError\n\n def load_lda_prediction_results(self):\n \"\"\"\n Loads SDG predictions for LDA from a serialised json file, if it exists, otherwise from MongoDB.\n \"\"\"\n raise NotImplementedError\n\n def load_svm_prediction_results(self):\n \"\"\"\n Loads SDG predictions for Svm from a serialised json file if it exists, otherwise, loads from MongoDB.\n \"\"\"\n if os.path.exists(self.svm_prediction_path):\n with open(self.svm_prediction_path) as json_file:\n data = json.load(json_file)\n else:\n client = pymongo.MongoClient(self.host, ssl_cert_reqs=ssl.CERT_NONE)\n db = client.Scopus\n col = db.SvmSdgPredictions\n data = col.find()\n data = json.loads(json_util.dumps(data)) # process mongodb response to a workable dictionary format.\n client.close()\n\n return data\n\n def load_string_matches_results(self):\n \"\"\"\n Loads SDG keyword string matching results from a serialised file, if it exists, otherwise from MongoDB.\n \"\"\"\n raise NotImplementedError\n\n def load_pymongo_db(self):\n \"\"\"\n Downloads data from SQL Server and serialises it into <*.pkl>\n \"\"\"\n raise NotImplementedError\n","sub_path":"src/main/LOADERS/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":2227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"339988016","text":"from flask import Flask, render_template, url_for, request, redirect, send_from_directory, session, flash\nfrom functools import wraps\nfrom .db import init_db\nfrom .models.object import Object\nfrom .controllers.object import ObjectController\nfrom .controllers.user import UserController\nfrom flask import jsonify\n\n\ndef create_app(test_config=None):\n app = Flask(__name__, instance_relative_config=True)\n\n app.config.from_mapping(\n SECRET_KEY='dev',\n UPLOAD_FOLDER='/var/www/uploads'\n )\n\n from . import database\n database.init_app(app)\n \n def is_logged_in(f):\n @wraps(f)\n def wrap(*args, **kwargs):\n if 'logged_in' in session:\n return f(*args, **kwargs)\n else:\n flash('Nao autorizado, faça o login', 'perigo')\n return redirect(url_for('login'))\n return wrap\n\n\n @app.route('/')\n def hello():\n init_db() \n\n UserController.fill_table()\n ObjectController.fill_table()\n\n return redirect(url_for('view_objects'))\n\n @app.route('/users/view', methods=['GET'])\n @is_logged_in\n def profile():\n objs = ObjectController.get_objects_of_user(session['id'])\n\n return render_template('profile/index.html', objs=objs)\n\n\n @app.route('/objects/create', methods=['GET', 'POST'])\n @is_logged_in\n def create_object():\n if request.method == 'GET':\n return render_template('registerObject/index.html')\n\n else:\n ObjectController.create(request)\n\n return redirect(url_for('view_objects'))\n\n\n @app.route('/objects/view', methods=['GET'])\n def view_objects():\n objs = ObjectController.get_objects()\n \n return render_template('showCase/index.html', objs=objs)\n\n @app.route('/objects/view/', methods=['GET'])\n def view_object(id):\n obj = ObjectController.get_object(id)\n user = UserController.get_user(obj.user_id)\n \n return render_template('objectDetails/index.html', obj=obj, user=user)\n\n\n @app.route('/objects/update/', methods=['GET', 'POST'])\n @is_logged_in\n def update_object(id):\n obj = ObjectController.get_object(id)\n\n if session['id'] == obj.user_id:\n if request.method == 'GET':\n\n return render_template('registerObject/index.html', obj=obj)\n\n else:\n obj = ObjectController.update(id, request)\n\n return redirect(url_for('profile'))\n\n else:\n return redirect(url_for('profile'))\n\n\n @app.route('/objects/delete/', methods=['GET'])\n @is_logged_in\n def delete_object(id):\n obj = ObjectController.get_object(id)\n\n if session['id'] == obj.user_id:\n obj = ObjectController.delete(id)\n\n return redirect(url_for('profile'))\n\n\n @app.route('/login', methods=['GET', 'POST'])\n def login():\n if request.method == 'POST':\n UserController.login(request)\n\n return redirect(url_for('view_objects'))\n \n return render_template('login/index.html')\n\n\n @app.route('/logout')\n @is_logged_in\n def logout():\n UserController.logout()\n\n return redirect(url_for('view_objects'))\n\n\n @app.route('/signup', methods=['GET', 'POST'])\n def signup():\n if request.method == 'POST':\n UserController.create(request)\n\n return redirect(url_for('login'))\n\n return render_template('signUp/index.html')\n \n\n @app.route('/objects', methods=['GET', 'POST'])\n def json_objects_get_or_create():\n if request.method == 'POST':\n return ObjectController.create(request)\n\n objs = ObjectController.get_objects()\n return jsonify(eqtls=[obj.serialize() for obj in objs])\n\n\n @app.route('/objects/', methods=['PUT', 'DELETE'])\n def json_object_get_or_update_or_delete(id):\n if request.method == 'DELETE':\n result = ObjectController.delete(id)\n\n return jsonify(status='success')\n \n else:\n return ObjectController.update(id, request)\n \n\n @app.route('/db_test')\n @is_logged_in\n def db_test():\n database.init_db()\n\n return 'Banco de dados inicializado com sucesso!' \n\n @app.route('/uploads/')\n def upload(filename):\n return send_from_directory(app.config['UPLOAD_FOLDER'], filename)\n\n\n return app","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"509113943","text":"from itertools import permutations\nimport numpy as np\nimport pandas as pd\nimport progressbar\nfrom scipy.sparse import lil_matrix\nK = 10\n\ndef delete_directory_contents(folder):\n print('[Function]: Deleting Directory Contents: ',folder)\n import os, shutil\n for the_file in os.listdir(folder):\n file_path = os.path.join(folder,the_file)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n except Exception as e:\n print(e)\n\n\ndef test_import():\n print('test')\n\n\ndef string_list_to_numpy(x):\n '''\n '[1, 2, 3, 4, 5]' -> numpy array([1,2,3,4,5])\n '''\n try:\n x = x.strip('[] ').split()\n ar = np.asarray(x).astype(float)\n if ar.shape[0]==1:\n return np.nan\n else:\n return ar\n except Exception as e:\n return np.nan\n\n\ndef emission_probabilities_pyx(y_df, x_df, x_label='over_under_2.5', hidden_states=2):\n un = np.unique(y_df.values)\n pyx = np.zeros((hidden_states, np.unique(y_df.values).shape[0]))\n # print(un,pyx)\n for i, ix in enumerate(y_df.index):\n try:\n yx = np.where(y_df.loc[ix, x_label] == un)[0][0]\n pyx[int(x_df.loc[ix, x_label]), int(yx)] += 1\n except Exception as e:\n # print('exception',e)\n continue\n for i, s in enumerate(pyx.sum(axis=1)):\n pyx[i, :] = pyx[i, :] / s\n\n return pyx\n\n# def string_list_to_numpy(x):\n# \"\"\"\n# This function converts an stringed-array '[1 2 3 4 6]'\n# into a numpy array.\n#\n# The reason is if i where to save a numpy array in a pandas dataframe cell\n# then when reading it from a file it would read as a string.\n# \"\"\"\n# x = x.strip('[] ').split()\n# ar = np.asarray(x).astype(float)\n# return ar\n\n\n\ndef compute_elo_ratting_dataframe_for_champ(data,champs, champ_ix):\n matches_champ = data[data.championship == champs.name[champ_ix]]\n # TODO: dont change the index\n # matches_champ.index = range(matches_champ.shape[0])\n print('champ', matches_champ.shape)\n # get all the home and away teams\n ht = matches_champ.home_team.values\n at = matches_champ.away_team.values\n # create an array with both home and arway teams regardles of duplicates\n all_teams = np.append(ht, at)\n # drop all the duplicates\n print(all_teams.shape)\n all_teams = np.unique(all_teams)\n print(all_teams.shape)\n # create a a Player class for each team. this class stores the elo rating for each player\n players = [Player(name=p) for p in all_teams]\n elo = Elo()\n # elo_table_array = compute_elo_by_game(matches_champ, players, all_teams, elo)\n elo_table_array = compute_elo_by_goals(matches_champ, players, all_teams, elo)\n team_elo_timeline_df = pd.DataFrame(elo_table_array, columns=all_teams, index=matches_champ.index)\n return team_elo_timeline_df\n\ndef compute_elo_ratting_dataframe_for_champ_v2(data, champs, teams=None):\n matches_champ = data[data.championship.isin(champs)]\n # TODO: dont change the index\n # matches_champ.index = range(matches_champ.shape[0])\n print('champ', matches_champ.shape)\n # get all the home and away teams\n ht = matches_champ.home_team.values\n at = matches_champ.away_team.values\n # create an array with both home and arway teams regardles of duplicates\n all_teams = np.append(ht, at)\n # drop all the duplicates\n all_teams = np.unique(all_teams)\n print(all_teams.shape)\n # # create a a Player class for each team. this class stores the elo rating for each player\n players = [Player(name=p) for p in all_teams]\n\n elo = Elo()\n # # elo_table_array = compute_elo_by_game(matches_champ, players, all_teams, elo)\n elo_table_array = compute_elo_by_goals(matches_champ, players, all_teams, elo)\n # return elo_table_array, all_teams ,teams, matches_champ\n if teams is not None:\n ix = np.isin(all_teams, teams)\n teams_ix = np.where(ix)[0]\n final = elo_table_array[:,teams_ix]\n team_elo_timeline_df = pd.DataFrame(final.toarray(), columns=teams, index=matches_champ.index)\n return team_elo_timeline_df\n else:\n team_elo_timeline_df = pd.DataFrame(elo_table_array, columns=all_teams, index=matches_champ.index)\n return team_elo_timeline_df\n\n\nclass Elo(object):\n\n def match(self, p1, p2):\n return self.match_algo_strict(p1, p2)\n\n @staticmethod\n def match_algo_strict(winner, looser):\n r1 = max(min(looser.score - winner.score, 400), -400)\n r2 = max(min(winner.score - looser.score, 400), -400)\n e1 = 1.0 / (1+10**(r1 / 400))\n e2 = 1.0 / (1+10**(r2 / 400))\n s1 = 1\n s2 = 0\n winner.score = winner.score + K*(s1-e1)\n looser.score = looser.score + K*(s2-e2)\n\n # increase win counter\n winner.wins += 1\n\n # increase match counter\n winner.matches += 1\n looser.matches += 1\n\n return winner, looser\n\n\nclass Player(object):\n def __init__(self, name, score=100, wins=0, matches=0):\n self.name = name\n self.score = score\n self.wins = wins\n self.matches = matches\n\n\n\ndef transition_matrix_pxx(data, result, n_states=None):\n \"\"\"\n this method computes the transition matrix for single states NOT paired states.\n \"\"\"\n if n_states is None:\n states = data[result].dropna().drop_duplicates().sort_values().astype(int).values\n else:\n states = range(n_states)\n states_same_index = []\n for state in states:\n states_same_index.append((state, state))\n data_df = data[result].dropna()\n # stupid but works\n transitions = [(int(i), int(j)) for i, j in list(permutations(states, 2)) + states_same_index]\n\n transition_matrix = np.zeros((len(states), len(states)))\n for index, transition in enumerate(transitions):\n for i in range(data_df.shape[0]-1):\n if (data_df.iloc[i], data_df.iloc[i + 1]) == transition: # automatically create a tuple for multiple states\n transition_matrix[transition] += 1\n for state in states:\n transition_matrix[state, :] = transition_matrix[state, :]/transition_matrix[state, :].sum()\n return transition_matrix\n\n\n\n\n\n\n\n\n# if __name__ == '__main__':\n# pass\n\n\n\n\n\n\ndef compute_elo_by_goals(data_df, players, all_teams, elo, initial_score=100):\n \"\"\"\n This function is used to compute the elo ratings of teams based on goals scored depending on wins and losses.\n\n :param data_df:\n :param players:\n :param elo:\n :return:\n \"\"\"\n\n n_games = data_df.shape[0]\n # does this work for sparce matrix\n elo_table = lil_matrix((n_games, len(players)))\n\n # elo_table = np.zeros((n_games, len(players)))\n # set initial score for all teams\n elo_table[0, :] = initial_score\n bar = progressbar.ProgressBar(widgets=[\n ' [', progressbar.Timer(), '] ',\n progressbar.Bar(),\n ' (', progressbar.ETA(), ') ',\n ])\n for i in bar(range(n_games)):\n\n match = data_df.iloc[i]\n player_home = match.home_team\n player_away = match.away_team\n hid = np.where(all_teams == player_home)[0][0]\n aid = np.where(all_teams == player_away)[0][0]\n pair = [players[hid], players[aid]]\n res = match.result_final\n home_goals = match.home_goals\n away_goals = match.away_goals\n\n if res == 0:\n for goal_difference in range(int(abs(home_goals-away_goals))):\n a, b = elo.match_algo_strict(pair[0], pair[1])\n elo_table[i, hid] = a.score\n elo_table[i, aid] = b.score\n elif res == 2:\n for goal_difference in range(int(abs(home_goals-away_goals))):\n a, b = elo.match(pair[1], pair[0])\n elo_table[i, aid] = a.score\n elo_table[i, hid] = b.score\n else:\n pass\n\n # # make continuous\n # bar = progressbar.ProgressBar(widgets=[\n # ' [', progressbar.Timer(), '] ',\n # progressbar.Bar(),\n # ' (', progressbar.ETA(), ') ',\n # ])\n # print('[C] Making continuous')\n # for p in bar(range(len(players))):\n #\n # for g in range(n_games):\n # if elo_table[g, p] == 0:\n # elo_table[g, p] = elo_table[g-1, p]\n # else:\n # pass\n return elo_table\n","sub_path":"pack/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"629405072","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport logging\nimport random\nfrom urllib.parse import urljoin\nfrom urllib.parse import urlparse\nimport time\nclass HuxiuSpider(scrapy.Spider):\n name = 'ifeng'\n allowed_domains = ['news.ifeng.com']\n start_urls = ['https://news.ifeng.com']\n\n def parse(self, response):\n if(response.status==200):\n self.logger.info('scrapy fetch page: %s', response.url)\n hrefs = response.xpath('//a/@href')\n print(\"--\")\n for href in hrefs:\n url = href.get()\n urlp = urlparse(url)\n\n #如果netloc是空则是相对路径\n #ParseResult(scheme='http', netloc='www.aw.com', path='/df/23332.jpg', params='', query='', fragment='')\n if \"news.ifeng.com\" in url:\n yield scrapy.Request(\"http://news.ifeng.com/c/7txN1W6aN6m\", callback=self.repeat)\n # yield scrapy.Request(abs_url,callback=self.parse_article)\n else:\n self.logger.error('FAILED! scrapy fetch page: %s', response.url)\n\n def repeat(self,response):\n time.sleep(5)\n print(\"................1\")\n","sub_path":"news_scrapy/newspider/newspider/spiders/ifeng.py","file_name":"ifeng.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"361383150","text":"import numpy as np\nfrom src.domain.pathfinding.surfaces.SurfaceCalculator import SurfaceCalculator\nfrom src.domain.pathfinding.surfaces.SurfaceType import SurfaceType\nfrom src.domain.pathfinding.surfaces.types import RectangleSurface\n\n\nclass RectangleSurfaceCalculator(SurfaceCalculator):\n\n def __init__(self, rectangle_surface: RectangleSurface):\n self.__rectangle_surface = rectangle_surface\n\n def get_points(self, x_values: np.ndarray, y_values: np.ndarray, precision: float, padding: float, surface_type: SurfaceType) -> np.ndarray:\n x, y = self.__rectangle_surface.center_x, self.__rectangle_surface.center_y\n half_width = self.__rectangle_surface.width / 2 + padding\n half_height = self.__rectangle_surface.height / 2 + padding\n\n x_values = x_values[np.where((x_values >= x - half_width - 0.5 * precision) & (x_values <= x + half_width + 0.5 * precision))]\n y_values = y_values[np.where((y_values >= y - half_height - 0.5 * precision) & (y_values <= y + half_height + 0.5 * precision))]\n X, Y = np.meshgrid(x_values, y_values)\n search_grid = np.array([X.flatten(), Y.flatten()]).T\n x_values = search_grid[:, 0]\n y_values = search_grid[:, 1]\n\n if surface_type == SurfaceType.FILL:\n return search_grid\n elif surface_type == SurfaceType.CONTOUR:\n indices = self.__rectangle_contour_indices(x_values, y_values)\n return search_grid[indices]\n\n def __rectangle_contour_indices(self, x_values, y_values):\n return np.where(\n (x_values == x_values[0]) | (x_values == x_values[-1]) |\n (y_values == y_values[0]) | (y_values == y_values[-1])\n )\n","sub_path":"backend/src/domain/pathfinding/surfaces/RectangleSurfaceCalculator.py","file_name":"RectangleSurfaceCalculator.py","file_ext":"py","file_size_in_byte":1700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"599335269","text":"# coding: utf-8\n\nimport numpy as np\nimport pandas as pd\n\nfrom indicator_acquire import query\nfrom indicator_acquire import sql_query\nfrom filtering import filtering\nfrom data_norm import data_norm\nfrom param_config import config\nfrom data_correction import get_most_corr_code\nimport os\nimport xlrd as xl\nfrom sklearn.cluster import DBSCAN\n\n#######################\n## load data ##\n#######################\n\n#拿到当前中证800成分股\n'''\ncorr_df = pd.read_csv(config.corr_file, dtype={'index':str})\ncorr_df = corr_df.set_index('index')\n\ncodes = xl.open_workbook(config.code_file).sheets()[0]\nvalid_codes = set([codes.row_values(ix)[3] for ix in xrange(codes.nrows) if codes.row_values(ix)[6] == '\\\\N' ])\nindex = set(corr_df.index)\nindex.difference_update(valid_codes)\n\ncorr_df = corr_df.drop(list(index), axis=0)\ncorr_df = corr_df.drop(list(index), axis=1)\n\ncorr_df = 1.001 - corr_df\ncorr_df = corr_df.fillna(0.0)\n\ndb = DBSCAN(eps=0.3, min_samples=21, metric='precomputed').fit(corr_df)\n\ncore_samples_mask = np.zeros_like(db.labels_, dtype=bool)\ncore_samples_mask[db.core_sample_indices_] = True\nreal_labels = [x for x in db.labels_ if x != -1]\nn_clusters = len(set(real_labels))\ncluster_data_num = len(real_labels)\n\nprint('Number of core_samples: %d' % len(set(db.core_sample_indices_ )))\nprint('Estimated number of clusters: %d' % n_clusters)\nprint('Estimated number of cluster data: %d' % cluster_data_num)\nclusters = [[corr_df.index[ix] for ix in range(len(db.labels_)) if db.labels_[ix] == num] for num in range(n_clusters)]\n\nsql_list = []\nsql_list.append(config.get_sql_by_code(clusters[0]))\n\n'''\ncodes = get_most_corr_code(\"000060\", '20150101', '20160101')\n\nsql_list = []\ncode_interval = 100\nfor ix in range(0, len(codes), code_interval):\n sql_list.append(config.get_sql_by_code(codes[ix:min(len(codes), ix+code_interval)], '20150101', '20160601'))\n\ntrain_data, test_data_list = query(sql_list)\n\n#######################\n## filter noise ##\n#######################\n#train_data = filtering(train_data)\n\n#######################\n## data norm ##\n#######################\ntrain_data = train_data.sort_values(['date'])\ntrain_data.index = range(len(train_data))\ndata_norm(train_data)\nfor test_data in test_data_list:\n data_norm(test_data)\n\n#######################\n## data store ##\n#######################\ncmd = \"rm -f \" + config.code_file_path + \"/*\"\nos.system(cmd)\n# 输入符合preprocess格式的文本形式\nfor name, single_data in train_data.groupby('symbol'):\n tmp_data = single_data.drop(['date', 'symbol', 'value', 'label'], axis=1)\n features = tmp_data.values.tolist()\n values = single_data['value'].tolist()\n symbols = single_data['symbol'].tolist()\n with open('{}{}{}.txt'.format(config.code_file_path, config.file_prefix, name), 'w') as fp:\n for ix in xrange(len(symbols)):\n fp.write('{};0 {};1 {}\\n'.format(symbols[ix], \" \".join([str(x) for x in features[ix]]), values[ix]))\n\ncmd = \"rm -f \" + config.test_file_path + \"/*\"\nos.system(cmd)\n#跟上面不一样的是label位换为了code\nfor index in xrange(len(test_data_list)):\n single_data = test_data_list[index]\n tmp_data = single_data.drop(['date', 'symbol', 'value', 'label'], axis=1)\n features = tmp_data.values.tolist()\n values = single_data['value'].tolist()\n codes = single_data['symbol'].tolist()\n with open('{}{}{}.txt'.format(config.test_file_path, config.file_prefix, str(index)), 'w') as fp:\n for ix in xrange(len(codes)):\n fp.write('{};0 {};1 {}\\n'.format(codes[ix], \" \".join([str(x) for x in features[ix]]), values[ix]))\n\n#shuffle\n#data = train_data.ix[np.random.permutation(len(train_data)), :]\ntrain_data = train_data.sort_values(['date'])\n\ncmd = \"rm -f \" + config.shuffle_file_path + \"/*\"\nos.system(cmd)\n\ntmp_data = train_data.drop(['date', 'symbol', 'value', 'label'], axis=1)\nfeatures = tmp_data.values.tolist()\nvalues = train_data['value'].tolist()\nsymbols = train_data['symbol'].tolist()\nwith open('{}{}{}.txt'.format(config.shuffle_file_path, config.file_prefix, 0), 'w') as fp:\n for ix in xrange(len(symbols)):\n fp.write('{};0 {};1 {}\\n'.format(symbols[ix], \" \".join([str(x) for x in features[ix]]), values[ix]))","sub_path":"code/preprocess/run_all.py","file_name":"run_all.py","file_ext":"py","file_size_in_byte":4184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"48534456","text":"import pygame.font\nfrom pygame.sprite import Group\n\nfrom ship import Ship\n\nclass Scoreboard():\n \"\"\"Класс для вывода игровой информации.\"\"\"\n\n def __init__(self, ai_game):\n \"\"\"Инициализирует атрибуты подсчёта очков.\"\"\"\n self.ai_game = ai_game\n self.screen = ai_game.screen\n self.screen_rect = self.screen.get_rect()\n self.settings = ai_game.settings\n self.stats = ai_game.stats\n\n # Настройки шрифта для вывода счёта.\n self.text_color = (0, 0, 255)\n self.text_color_higt_score = (255, 0, 0)\n self.text_color_level = (0, 255, 0)\n self.text_color_ships_left = (255, 255, 0)\n self.font = pygame.font.Font('fonts/unicephalon.ttf', 24)\n # Подготовка изображений данных.\n self.prep_score()\n self.prep_high_score()\n self.prep_level()\n self.prep_ships()\n self.prep_ships_left()\n\n def prep_score(self):\n \"\"\"Преобразует текущий счёт в графическое изображение.\"\"\"\n rounded_score = round(self.stats.score, -1)\n score_str = \"{:,}\".format(rounded_score)\n self.score_image = self.font.render(f\"score:{score_str}\", True, \n self.text_color)\n # Можно задать цвет фона для счёта: ', self.settings.bg_color'.\n\n # Вывод счёта в правой верхней части экрана.\n self.score_rect = self.score_image.get_rect()\n self.score_rect.right = self.screen_rect.right - 20\n self.score_rect.top = 40\n\n def prep_high_score(self):\n \"\"\"Преобразует рекордный счёт в графическое изображение.\"\"\"\n high_score = round(self.stats.high_score, -1)\n high_score_str = \"{:,}\".format(high_score)\n self.high_score_image = self.font.render(f\"hi-score:{high_score_str}\", \n True, self.text_color_higt_score)\n\n # Рекорд выравнивается сверху над текущим счётом.\n self.high_score_rect = self.high_score_image.get_rect()\n self.high_score_rect.right = self.screen_rect.right - 20\n self.high_score_rect.top = 10\n\n ## Рекорд выравнивается по центру верхней стороны.\n #self.high_score_rect = self.high_score_image.get_rect()\n #self.high_score_rect.centerx = self.screen_rect.centerx\n #self.high_score_rect.top = self.score_rect.top\n\n def prep_level(self):\n \"\"\"Преобразует уровень в графическое изображение\"\"\"\n level_str = str(self.stats.level)\n self.level_image = self.font.render(f\"level:{level_str}\", True, \n self.text_color_level)\n\n # Уровень выводиться над рекордным счётом.\n self.level_rect = self.level_image.get_rect()\n self.level_rect.right = self.screen_rect.right -20\n self.level_rect.top = 70\n\n def prep_ships_left(self):\n \"\"\"Преобразует количество оставшихся жизней в графическое изображение\"\"\"\n ships_left_str = str(self.stats.ships_left)\n self.ships_left_image = self.font.render(f\"ships:{ships_left_str}\", \n True, self.text_color_ships_left)\n\n # Количество оставшихся жизней выводиться под счётом.\n self.ships_left_rect = self.ships_left_image.get_rect()\n self.ships_left_rect.right = self.screen_rect.right - 20\n self.ships_left_rect.top = 100\n\n def prep_ships(self):\n \"\"\"Сообщает количество оставшихся кораблей\"\"\"\n self.ships = Group()\n for ship_number in range(self.stats.ships_left):\n ship = Ship(self.ai_game)\n ship.rect.right = self.screen_rect.right - ship_number*40 + 35\n #ship.rect.x = self.settings.screen_width - 120 - ship_number*45\n ship.rect.y = 135\n self.ships.add(ship)\n\n def show_score(self):\n \"\"\"Выводит данные игры на экран.\"\"\"\n self.screen.blit(self.score_image, self.score_rect)\n self.screen.blit(self.high_score_image, self.high_score_rect)\n self.screen.blit(self.level_image, self.level_rect)\n self.screen.blit(self.ships_left_image, self.ships_left_rect)\n self.ships.draw(self.screen)\n\n def check_high_score(self):\n \"\"\"Проверяет, появился ли новый рекорд.\"\"\"\n if self.stats.score > self.stats.high_score:\n self.stats.high_score = self.stats.score\n self.prep_high_score()","sub_path":"scoreboard.py","file_name":"scoreboard.py","file_ext":"py","file_size_in_byte":4882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"136947443","text":"\"\"\"종이의 개수\n\n백준 No.1780\nURL: https://www.acmicpc.net/problem/1780\n\nN×N크기의 행렬로 표현되는 종이가 있다. \n종이의 각 칸에는 -1, 0, 1의 세 값 중 하나가 저장되어 있다. \n우리는 이 행렬을 적절한 크기로 자르려고 하는데, \n이때 다음의 규칙에 따라 자르려고 한다.\n\n만약 종이가 모두 같은 수로 되어 있다면 이 종이를 그대로 사용한다.\n(1)이 아닌 경우에는 종이를 같은 크기의 9개의 종이로 자르고, \n각각의 잘린 종이에 대해서 (1)의 과정을 반복한다.\n이와 같이 종이를 잘랐을 때, -1로만 채워진 종이의 개수, \n0으로만 채워진 종이의 개수, \n1로만 채워진 종이의 개수를 구해내는 프로그램을 작성하시오.\n\n\n[입력]\n첫째 줄에 N(1≤N≤3^7, N은 3^k 꼴)이 주어진다. \n다음 N개의 줄에는 N개의 정수로 행렬이 주어진다.\n\n[출력]\n첫째 줄에 -1로만 채워진 종이의 개수를, 둘째 줄에 0으로만 채워진 종이의 개수를, \n셋째 줄에 1로만 채워진 종이의 개수를 출력한다.\n\n[예제 입력]\n9\n0 0 0 1 1 1 -1 -1 -1\n0 0 0 1 1 1 -1 -1 -1\n0 0 0 1 1 1 -1 -1 -1\n1 1 1 0 0 0 0 0 0\n1 1 1 0 0 0 0 0 0\n1 1 1 0 0 0 0 0 0\n0 1 -1 0 1 -1 0 1 -1\n0 -1 1 0 1 -1 0 1 -1\n0 1 -1 1 0 -1 0 1 -1\n\n[예제 출력]\n10\n12\n11\n\"\"\"\n\n\ndef paper_counter(matrix):\n count = {\"-1\": 0, \"0\": 0, \"1\": 0}\n N = len(matrix)\n\n def is_all_same(r, c, size, target):\n for dr in range(size):\n for dc in range(size):\n if matrix[r+dr][c+dc] != target:\n return False\n return True\n\n def get_count(r, c, size):\n target = matrix[r][c]\n if size == 1 or is_all_same(r, c, size, target):\n count[target] += 1\n else:\n size //= 3\n for i in range(3):\n for j in range(3):\n get_count(r + size * i, c + size * j, size)\n\n get_count(0, 0, N)\n keys = sorted(list(count.keys()))\n \n return [count[key] for key in keys]\n\n\nif __name__ == \"__main__\":\n N = int(input())\n matrix = []\n for _ in range(N):\n matrix.append(input().split())\n ret = paper_counter(matrix)\n [print(x) for x in ret]","sub_path":"python/boj/1780.py","file_name":"1780.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"592514402","text":"import codecs\nimport csv\nimport json\n\n\ndef format_android(app):\n csvfile = codecs.open('ratings/android_' + app + '_ratings.csv', 'rU', 'utf-16')\n fieldnames = (\"Date\", \"Package Name\", \"Daily Average Rating\", \"Total Average Rating\")\n reader = csv.DictReader(csvfile, fieldnames)\n data = list(reader)\n\n date = data[-1][\"Date\"]\n\n for row in data:\n if date in row.values():\n rating = row[\"Total Average Rating\"]\n\n json_file = open('ratings/android_' + app + '_ratings.json', 'w')\n\n json_object = {}\n string = \"store rating: \" + str(rating) + \" \" + u'\\u2605'.encode('utf-8')\n json_object[\"rating\"] = string\n json.dump(json_object, json_file)\n\n\ndef format_ios(app):\n json_file = open('ratings/iOS_' + app + '_ratings.json', 'w')\n\n with open('ratings/iOS_' + app + '_lookup.json') as f:\n data = json.load(f)\n\n to_print = {}\n\n rating = data[\"results\"][0][\"averageUserRating\"]\n string = \"store rating: \" + str(rating) + \" \" + u'\\u2605'.encode('utf-8')\n to_print[\"rating\"] = string\n\n json.dump(to_print, json_file)\n\n\n# -----------------------------------\n\nformat_android(\"myAir\")\nformat_android(\"AirMini\")\nformat_ios(\"myAir\")\nformat_ios(\"AirMini\")\n","sub_path":"ratings.py","file_name":"ratings.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"99522833","text":"\"\"\"DQN with Acme.\n\nCheckpoints are saved in ~/acme.\n\"\"\"\n\nimport argparse\nimport functools\n\nimport acme\nimport gym\nimport numpy as np\nimport tensorflow as tf\nfrom acme import wrappers\nfrom acme.agents.tf import dqn\nfrom acme.tf import networks\n\n# Without these lines, the script returns an error in Docker\nphysical_devices = tf.config.list_physical_devices(\"GPU\")\ntf.config.experimental.set_memory_growth(physical_devices[0], True)\n\n# Environment configuration\nENV_CONFIG = {\n \"ENEMY_MISSILES.NUMBER\": 7,\n \"ENEMY_MISSILES.PROBA_IN\": 0.07,\n \"REWARD.DESTROYED_CITY\": -16.67,\n \"REWARD.DESTROYED_ENEMEY_MISSILES\": 28.57,\n \"REWARD.FRIENDLY_MISSILE_LAUNCHED\": -0.57,\n}\n\n# Maximum episode length (to determine the right value, simulate some random\n# episodes with the custom config and print env.timestep)\nMAX_EPISODE_LEN = 1150\n\n\nclass DummyALE():\n \"\"\"Dummy ALE class.\n\n OpenAI Gym Atari environments are built from ALE. Acme uses some of the ALE\n functions, so we implement it here, in a dummy class, to easily make\n Missile Command compatible.\n \"\"\"\n\n def lives(self):\n \"\"\"Get lives.\n\n Returns:\n lives (int): 0.\n \"\"\"\n return 0\n\n\ndef make_environmment():\n \"\"\"Make environment.\n\n Returns:\n env (acme.wrappers.single_precision.SinglePrecisionWrapper).\n \"\"\"\n # Create the environment\n environment = gym.make(\"gym_missile_command:missile-command-v0\",\n custom_config=ENV_CONFIG)\n\n # Add the necessary ALE function\n environment.ale = DummyALE()\n\n # Acme processing\n environment = wrappers.wrap_all(environment, [\n wrappers.GymAtariAdapter,\n functools.partial(\n wrappers.AtariWrapper,\n to_float=True,\n max_episode_len=MAX_EPISODE_LEN,\n zero_discount_on_life_loss=True,\n ),\n wrappers.SinglePrecisionWrapper,\n ])\n\n return environment\n\n\ndef get_env_agent():\n \"\"\"Create env and agent.\n\n Returns:\n env_acme (acme.wrappers.observation_action_reward.\n ObservationActionRewardWrappe).\n\n agent (acme.agents.tf.dqn.agent.DQN).\n \"\"\"\n # Get environment\n env_acme = make_environmment()\n env_spec = acme.make_environment_spec(env_acme)\n\n # Create agent and network\n network = networks.DQNAtariNetwork(env_spec.actions.num_values)\n agent = dqn.DQN(env_spec,\n network,\n checkpoint_subpath=\"./acme\")\n\n return env_acme, agent\n\n\ndef test(args):\n \"\"\"Test agent.\n\n Args:\n args (argparse.Namespace): argparse arguments.\n \"\"\"\n # Create the environment\n env = gym.make(\"gym_missile_command:missile-command-v0\",\n custom_config=ENV_CONFIG)\n\n # Get env and agent\n env_acme, agent = get_env_agent()\n\n # Reset it\n observation = env.reset()\n\n # While the episode is not finished\n done = False\n while not done:\n\n # We add alpha to the observation\n obs_acme = np.full((observation.shape[0], observation.shape[0], 4),\n 1.0,\n dtype=np.float32)\n obs_acme[:, :, :-1] = observation\n\n # Agent computes action\n action = agent.select_action(obs_acme)\n\n # One step forward\n observation, reward, done, _ = env.step(action)\n\n # Render (or not) the environment\n env.render()\n\n\ndef train(args):\n \"\"\"Train agent.\n\n Args:\n args (argparse.Namespace): argparse arguments.\n \"\"\"\n # Get env and agent\n env_acme, agent = get_env_agent()\n\n # Launch training\n loop = acme.EnvironmentLoop(env_acme, agent)\n loop.run(args.episodes)\n\n\nif __name__ == \"__main__\":\n # Initialize parser\n parser = argparse.ArgumentParser(\n description=\"DQN agent training and testing with Acme.\")\n subparsers = parser.add_subparsers()\n\n # Train parser\n train_parser = subparsers.add_parser(\"train\")\n train_parser.add_argument(\"--episodes\",\n type=int,\n default=100000000,\n help=\"Number of episodes to train for.\")\n train_parser.set_defaults(func=train)\n\n # Test parser\n test_parser = subparsers.add_parser(\"test\")\n test_parser.set_defaults(func=test)\n\n # Launch script\n args = parser.parse_args()\n args.func(args)\n","sub_path":"rl/acme/scripts/dqn.py","file_name":"dqn.py","file_ext":"py","file_size_in_byte":4363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"484909081","text":"n = int(input())\ndp = [[-1] * (n + 1) for i in range(n + 1)]\nroot = [[0] * (n + 1) for j in range(n + 1)]\nres = []\n\n\ndef search(f: int, s: int):\n if f > s:\n return 1\n if dp[f][s] == -1:\n ans = 0\n for k in range(f, s + 1):\n ans = search(f, k - 1) * search(k + 1, s) + dp[k][k]\n if ans > dp[f][s]:\n dp[f][s] = ans\n root[f][s] = k\n\n return dp[f][s]\n\n\ndef preorder(f: int, s: int):\n if f > s:\n return\n else:\n res.append(str(root[f][s]))\n preorder(f, root[f][s] - 1)\n preorder(root[f][s] + 1, s)\n\n\nscores = input().split(\" \")\nfor i in range(1, n + 1):\n dp[i][i] = int(scores[i - 1])\n root[i][i] = i\nprint(search(1, n))\npreorder(1, n)\nprint(\" \".join(res),end=\" \")","sub_path":"Code/CodeRecords/2415/60692/298668.py","file_name":"298668.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"167709398","text":"#!/usr/bin/python\n\nimport sys, math\n\ndef calc3(*ar):\n print(ar)\n\ndef dump(msg):\n print(msg, file=sys.stderr)\n #pass\n\ndef calc(A, ns):\n ns = sorted(ns, key=lambda x: -x)\n\n smi = len(ns)\n ad = 0\n\n if A == 1: return len(ns)\n\n while len(ns) > 0:\n N = ns[-1]\n\n if N < A:\n A = A + N\n ns.pop()\n else:\n A = A + A - 1\n ad = ad + 1\n\n smi = min(smi, ad + len(ns))\n\n\n \n\n\n return smi\n\nwith open(sys.argv[1]) as f:\n for T in range(1,int(f.readline())+1):\n (A, N) = [int(x) for x in f.readline().split()]\n ns = [int(x) for x in f.readline().split()]\n print(\"Case #%s: %s\" % (T, calc(A, ns)))\n\n","sub_path":"solutions_2692487_1/Python/Yury/src.py","file_name":"src.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"333005344","text":"import json\nimport datetime\nimport hashlib\n#import logging\nfrom google.appengine.ext import db\nfrom google.appengine.api import urlfetch\nfrom google.appengine.api import memcache\nfrom app.models import VersionCache\n\n\ndef firefox(project, data):\n url = data['source']\n content = memcache.get('cache:' + url)\n\n if content is None:\n content = urlfetch.fetch(url).content\n memcache.add('cache:' + url, content, 120)\n\n project_data = json.loads(content)\n\n sha = hashlib.sha1()\n sha.update(content)\n sha = sha.hexdigest()\n\n q = db.GqlQuery(\"SELECT * FROM VersionCache WHERE project = :1 AND commit = :2\", project, sha)\n\n if q.count() == 0:\n #logging.info('refreshing version data')\n\n version_version = project_data[data['branch']]\n\n t = VersionCache(project=project,\n version=version_version,\n commit=sha,\n date=datetime.datetime.now())\n t.put()\n #else:\n #logging.info('version data unchanged')\n\n q = db.GqlQuery(\"SELECT version FROM VersionCache WHERE project = :1 ORDER BY date DESC\", project).get()\n return q.version\n","sub_path":"app/tasks/handlers/firefox.py","file_name":"firefox.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"36679605","text":"import wx\nimport serial\n# from thread import *\n# from threading import *\nimport time\n\n\nclass ADI(wx.Frame):\n\n def __init__(self, parent):\n super(ADI, self).__init__(parent, size=(640, 470), pos=(20,20))\n\n panel = wx.Panel(self)\n self.SetTitle('ADI Demonstrator')\n self.SetBackgroundColour('#eeeeee')\n self.Show()\n\n self.pitchPort = \"\"\n self.rollPort = \"\"\n self.firstPHCCcommand = True\n self.pitchPortOpened = \"\"\n self.rollPortOpened = \"\"\n self.phccPort = \"\"\n self.defaultPhccCOMport = \"COM1\" # default real serial port\n self.awaitPitchIDresponse = False\n self.awaitRollIDresponse = False\n self.setupGUI(panel)\n\n\n def setupGUI(self, panel):\n wx.StaticLine(panel, -1, (25, 45), (570, 3))\n menuFont = wx.Font(12, wx.DEFAULT, wx.NORMAL, wx.NORMAL)\n DOAheaderFont = wx.Font(8, wx.DEFAULT, wx.NORMAL, wx.NORMAL)\n rangeFont = wx.Font(8, wx.SWISS, wx.NORMAL, wx.NORMAL)\n\n # GS - LOC - AUX flags\n self.textGSflagonoff = wx.StaticText(self, label='GS flag', pos=(70, 70))\n self.textGSflagonoff.SetFont(menuFont)\n self.buttonGSonoff = wx.Button(panel, label=\"visible\", pos=(160, 65), size=(60, 25))\n self.buttonGSonoff.SetToolTip(wx.ToolTip(\"hide GS flag\"))\n self.buttonGSonoff.SetBackgroundColour('#ffcc00')\n self.buttonGSonoff.Bind(wx.EVT_BUTTON, self.GSonoffClick)\n #\n self.textLOCflagonoff = wx.StaticText(self, label='LOC flag', pos=(70, 100))\n self.textLOCflagonoff.SetFont(menuFont)\n self.buttonLOConoff = wx.Button(panel, label=\"visible\", pos=(160, 95), size=(60, 25))\n self.buttonLOConoff.SetToolTip(wx.ToolTip(\"hide LOC flag\"))\n self.buttonLOConoff.SetBackgroundColour('#ffcc00')\n self.buttonLOConoff.Bind(wx.EVT_BUTTON, self.LOConoffClick)\n #\n self.textAUXflagonoff = wx.StaticText(self, label='AUX flag', pos=(70, 130))\n self.textAUXflagonoff.SetFont(menuFont)\n self.buttonAUXonoff = wx.Button(panel, label=\"visible\", pos=(160, 125), size=(60, 25))\n self.buttonAUXonoff.SetToolTip(wx.ToolTip(\"hide AUX flag\"))\n self.buttonAUXonoff.SetBackgroundColour('#ffcc00')\n self.buttonAUXonoff.Bind(wx.EVT_BUTTON, self.AUXonoffClick)\n\n # flags/RoT - GlideSlope - Sphere ENABLE\n self.textFlagsEnable = wx.StaticText(self, label='ENABLE flags and RoT', pos=(320, 70))\n self.textFlagsEnable.SetFont(menuFont)\n self.buttonFlagsEnable = wx.Button(panel, label=\"disabled\", pos=(490, 65), size=(60, 25))\n self.buttonFlagsEnable.SetToolTip(wx.ToolTip(\"enable flags and RoT\"))\n self.buttonFlagsEnable.SetBackgroundColour('#ff0000')\n self.buttonFlagsEnable.SetForegroundColour('#ffffff')\n self.buttonFlagsEnable.Bind(wx.EVT_BUTTON, self.FlagsEnableClick)\n #\n self.textGlideSlopeEnable = wx.StaticText(self, label='ENABLE glide slope', pos=(320, 100))\n self.textGlideSlopeEnable.SetFont(menuFont)\n self.buttonGlideSlopeEnable = wx.Button(panel, label=\"disabled\", pos=(490, 95), size=(60, 25))\n self.buttonGlideSlopeEnable.SetToolTip(wx.ToolTip(\"enable glide slope\"))\n self.buttonGlideSlopeEnable.SetBackgroundColour('#ff0000')\n self.buttonGlideSlopeEnable.SetForegroundColour('#ffffff')\n self.buttonGlideSlopeEnable.Bind(wx.EVT_BUTTON, self.GlideSlopeEnableClick)\n #\n self.textSphereEnable = wx.StaticText(self, label='ENABLE Roll and Pitch', pos=(320, 130))\n self.textSphereEnable.SetFont(menuFont)\n self.buttonSphereEnable = wx.Button(panel, label=\"disabled\", pos=(490, 125), size=(60, 25))\n self.buttonSphereEnable.SetToolTip(wx.ToolTip(\"enable Roll & Pitch\"))\n self.buttonSphereEnable.SetBackgroundColour('#ff0000')\n self.buttonSphereEnable.SetForegroundColour('#ffffff')\n self.buttonSphereEnable.Bind(wx.EVT_BUTTON, self.SphereEnableClick)\n #\n wx.StaticLine(panel, -1, (25, 170), (570, 3))\n\n # Glide Slope horizontal / vertical and Rate of Turn setpoint entry\n self.textGShorEntry = wx.StaticText(self, label='glide slope indicator horizontal', pos=(70, 192))\n self.textGShorEntry.SetFont(menuFont)\n self.GShorEntry = wx.TextCtrl(panel, pos=(300, 190), size=(70, 25))\n self.buttonGShorEntrySet = wx.Button(panel, label=\"SET\", pos=(400, 189), size=(60, 25))\n self.buttonGShorEntrySet.SetToolTip(wx.ToolTip(\"set GS horizontal position\"))\n self.buttonGShorEntrySet.SetBackgroundColour('#60a0b0')\n self.buttonGShorEntrySet.SetForegroundColour('#000000')\n self.buttonGShorEntrySet.Bind(wx.EVT_BUTTON, self.GShorEntrySet)\n self.textGShorRange = wx.StaticText(self, label='[0 .. 255]', pos=(490, 194))\n self.textGShorRange.SetFont(rangeFont)\n self.textGShorRange.SetForegroundColour(\"#0000ff\")\n #\n self.textGSvertEntry = wx.StaticText(self, label='glide slope indicator vertical', pos=(70, 222))\n self.textGSvertEntry.SetFont(menuFont)\n self.GSvertEntry = wx.TextCtrl(panel, pos=(300, 220), size=(70, 25))\n self.buttonGSvertEntrySet = wx.Button(panel, label=\"SET\", pos=(400, 219), size=(60, 25))\n self.buttonGSvertEntrySet.SetToolTip(wx.ToolTip(\"set GS vertical position\"))\n self.buttonGSvertEntrySet.SetBackgroundColour('#60a0b0')\n self.buttonGSvertEntrySet.SetForegroundColour('#000000')\n self.buttonGSvertEntrySet.Bind(wx.EVT_BUTTON, self.GSvertEntrySet)\n self.textGSvertRange = wx.StaticText(self, label='[0 .. 255]', pos=(490, 224))\n self.textGSvertRange.SetFont(rangeFont)\n self.textGSvertRange.SetForegroundColour(\"#0000ff\")\n #\n self.textRoTEntry = wx.StaticText(self, label='Rate of Turn indicator', pos=(70, 252))\n self.textRoTEntry.SetFont(menuFont)\n self.RoTEntry = wx.TextCtrl(panel, pos=(300, 250), size=(70, 25))\n self.buttonRoTEntrySet = wx.Button(panel, label=\"SET\", pos=(400, 249), size=(60, 25))\n self.buttonRoTEntrySet.SetToolTip(wx.ToolTip(\"set Rate of Turn indication\"))\n self.buttonRoTEntrySet.SetBackgroundColour('#60a0b0')\n self.buttonRoTEntrySet.SetForegroundColour('#000000')\n self.buttonRoTEntrySet.Bind(wx.EVT_BUTTON, self.RoTEntrySet)\n self.textGSrotRange = wx.StaticText(self, label='[0 .. 255]', pos=(490, 254))\n self.textGSrotRange.SetFont(rangeFont)\n self.textGSrotRange.SetForegroundColour(\"#0000ff\")\n #\n wx.StaticLine(panel, -1, (25, 290), (570, 3))\n\n # Pitch / Roll position setpoint entry\n self.textPitchEntry = wx.StaticText(self, label='sphere PITCH indication', pos=(70, 312))\n self.textPitchEntry.SetFont(menuFont)\n self.PitchEntry = wx.TextCtrl(panel, pos=(300, 310), size=(70, 25))\n self.buttonPitchEntrySet = wx.Button(panel, label=\"SET\", pos=(400, 309), size=(60, 25))\n self.buttonPitchEntrySet.SetToolTip(wx.ToolTip(\"set sphere PITCH position\"))\n self.buttonPitchEntrySet.SetBackgroundColour('#60a0b0')\n self.buttonPitchEntrySet.SetForegroundColour('#000000')\n self.buttonPitchEntrySet.Bind(wx.EVT_BUTTON, self.PitchEntrySet)\n self.textGSpitchRange = wx.StaticText(self, label='[140 .. 700]', pos=(490, 314))\n self.textGSpitchRange.SetFont(rangeFont)\n self.textGSpitchRange.SetForegroundColour(\"#0000ff\")\n #\n self.textRollEntry = wx.StaticText(self, label='sphere ROLL indication', pos=(70, 342))\n self.textRollEntry.SetFont(menuFont)\n self.RollEntry = wx.TextCtrl(panel, pos=(300, 340), size=(70, 25))\n self.buttonRollEntrySet = wx.Button(panel, label=\"SET\", pos=(400, 339), size=(60, 25))\n self.buttonRollEntrySet.SetToolTip(wx.ToolTip(\"set sphere ROLL position\"))\n self.buttonRollEntrySet.SetBackgroundColour('#60a0b0')\n self.buttonRollEntrySet.SetForegroundColour('#000000')\n self.buttonRollEntrySet.Bind(wx.EVT_BUTTON, self.RollEntrySet)\n self.textGSrollRange = wx.StaticText(self, label='[0 .. 1023]', pos=(490, 344))\n self.textGSrollRange.SetFont(rangeFont)\n self.textGSrollRange.SetForegroundColour(\"#0000ff\")\n #\n wx.StaticLine(panel, -1, (25, 380), (570, 3))\n\n # advanced checkbox\n self.advancedMode = wx.CheckBox(panel, label='Advanced', pos=(310, 400), size=(100, 25))\n self.advancedMode.SetValue(False)\n self.Centre()\n self.advancedMode.SetToolTip(wx.ToolTip(\"send raw data\"))\n self.Show(True)\n self.advancedMode.Bind(wx.EVT_CHECKBOX, self.advancedOption)\n #\n # advanced entry fields\n self.textDeviceAddress = wx.StaticText(self, label='DEVICE ADDRESS', pos=(70, 312))\n self.textDeviceAddress.SetToolTip(wx.ToolTip(\"8-bit DOA DEVICE address\"))\n self.textDeviceAddress.SetFont(DOAheaderFont)\n self.textDeviceAddress.Hide()\n self.textDeviceSubAddress = wx.StaticText(self, label='SUB-ADDRESS', pos=(190, 312))\n self.textDeviceSubAddress.SetToolTip(wx.ToolTip(\"8-bit DOA sub-address\"))\n self.textDeviceSubAddress.SetFont(DOAheaderFont)\n self.textDeviceSubAddress.Hide()\n self.textDeviceData = wx.StaticText(self, label='DATA BYTE', pos=(314, 312))\n self.textDeviceData.SetToolTip(wx.ToolTip(\"8-bit DOA data\"))\n self.textDeviceData.SetFont(DOAheaderFont)\n self.textDeviceData.Hide()\n self.textAdvAddr = wx.StaticText(self, label='48:pitch 50:roll', pos=(76, 358))\n self.textAdvAddr.SetFont(rangeFont)\n self.textAdvAddr.SetForegroundColour(\"#0000ff\")\n self.textAdvAddr.Hide()\n #\n self.deviceAddressEntry = wx.TextCtrl(panel, pos=(77, 330), size=(70, 25))\n self.deviceAddressEntry.Hide()\n self.deviceSubAddressEntry = wx.TextCtrl(panel, pos=(192, 330), size=(70, 25))\n self.deviceSubAddressEntry.Hide()\n self.deviceDataEntry = wx.TextCtrl(panel, pos=(307, 330), size=(70, 25))\n self.deviceDataEntry.Hide()\n self.buttonAdvancedSend = wx.Button(panel, label=\"SEND\", pos=(440, 329), size=(60, 25))\n self.buttonAdvancedSend.SetToolTip(wx.ToolTip(\"send DOA device / sub-address / data\"))\n self.buttonAdvancedSend.SetBackgroundColour('#80e0c0')\n self.buttonAdvancedSend.SetForegroundColour('#000000')\n self.buttonAdvancedSend.Bind(wx.EVT_BUTTON, self.advancedSend)\n self.buttonAdvancedSend.Hide()\n\n # Exit button\n self.buttonExit = wx.Button(panel, label=\"Exit\", pos=(500, 400), size=(80, 25))\n self.buttonExit.SetForegroundColour('#ff0000')\n self.buttonExit.Bind(wx.EVT_BUTTON, self.ExitClick)\n \n # promote myself :-)\n henkFont = wx.Font(8, wx.SWISS, wx.NORMAL, wx.NORMAL)\n self.henkie = wx.StaticText(self, label=\"ADI Demonstrator, version 2 for Python 3.6\", pos=(25, 413))\n self.henkie.SetFont(henkFont)\n self.henkie.SetForegroundColour(\"#0000ff\")\n\n # \"error in entry\" indication bitmaps\n errorImage = wx.Bitmap(\"error.gif\")\n self.errorGShorEntry = wx.StaticBitmap(self, -1, errorImage)\n self.errorGSvertEntry = wx.StaticBitmap(self, -1, errorImage)\n self.errorRoTEntry = wx.StaticBitmap(self, -1, errorImage)\n self.errorPitchEntry = wx.StaticBitmap(self, -1, errorImage)\n self.errorRollEntry = wx.StaticBitmap(self, -1, errorImage)\n self.errorPitchPort = wx.StaticBitmap(self, -1, errorImage)\n self.errorRollPort = wx.StaticBitmap(self, -1, errorImage)\n self.errorDeviceAddress = wx.StaticBitmap(self, -1, errorImage)\n self.errorDeviceSubAddress = wx.StaticBitmap(self, -1, errorImage)\n self.errorDeviceData = wx.StaticBitmap(self, -1, errorImage)\n self.errorGShorEntry.SetPosition((373, 189))\n self.errorGSvertEntry.SetPosition((373, 219))\n self.errorRoTEntry.SetPosition((373, 249))\n self.errorPitchEntry.SetPosition((373, 309))\n self.errorRollEntry.SetPosition((373, 339))\n self.errorPitchPort.SetPosition((188, 7))\n self.errorRollPort.SetPosition((435, 7))\n self.errorDeviceAddress.SetPosition((149, 329))\n self.errorDeviceSubAddress.SetPosition((264, 329))\n self.errorDeviceData.SetPosition((379, 329))\n self.errorGShorEntry.Hide()\n self.errorGSvertEntry.Hide()\n self.errorRoTEntry.Hide()\n self.errorPitchEntry.Hide()\n self.errorRollEntry.Hide()\n self.errorPitchPort.Show()\n self.errorRollPort.Show()\n self.errorDeviceAddress.Hide()\n self.errorDeviceSubAddress.Hide()\n self.errorDeviceData.Hide()\n\n # find the available COM ports (code only for Windows)\n COMports = ['COM%s' % (i + 1) for i in range(64)]\n availableCOMports = []\n for port in COMports:\n try:\n s = serial.Serial(port)\n s.close()\n availableCOMports.append(port)\n except (OSError, serial.SerialException):\n pass\n print(\"--- discovered COM ports:\", availableCOMports)\n # show combo box with available COM ports\n self.textPITCHport = wx.StaticText(self, label='PITCH SDI port', pos=(70, 10))\n self.textPITCHport.SetFont(menuFont)\n self.textPITCHport.SetForegroundColour(\"#ff0000\")\n cbPitch = wx.ComboBox(panel, pos=(220, 10), choices=availableCOMports, style=wx.CB_READONLY)\n cbPitch.SetToolTip(wx.ToolTip(\"set PITCH SDI COM port\"))\n cbPitch.Bind(wx.EVT_COMBOBOX, self.OnSelectPitchPort)\n self.textROLLport = wx.StaticText(self, label='ROLL SDI port', pos=(320, 10))\n self.textROLLport.SetFont(menuFont)\n self.textROLLport.SetForegroundColour(\"#ff0000\")\n cbRoll = wx.ComboBox(panel, pos=(470, 10), choices=availableCOMports, style=wx.CB_READONLY)\n cbRoll.SetToolTip(wx.ToolTip(\"set ROLL SDI COM port\"))\n cbRoll.Bind(wx.EVT_COMBOBOX, self.OnSelectRollPort)\n\n # define timer\n self.timer = wx.Timer(self, -1)\n self.Bind(wx.EVT_TIMER, self.OnTimer)\n self.timerIsRunning = False\n\n\n# =================================================================================================\n# ACTION routines\n# =================================================================================================\n\n def GSonoffClick(self, event):\n button = event.GetEventObject()\n buttonName = button.GetLabel()\n if buttonName == \"hidden\":\n self.buttonGSonoff.SetLabel(\"visible\")\n self.buttonGSonoff.SetToolTip(wx.ToolTip(\"hide GS flag\"))\n self.buttonGSonoff.SetBackgroundColour('#ffcc00')\n self.sendData(\"ROLL\", 15, 0)\n else:\n self.buttonGSonoff.SetLabel(\"hidden\")\n self.buttonGSonoff.SetToolTip(wx.ToolTip(\"show GS flag\"))\n self.buttonGSonoff.SetBackgroundColour('#cccccc')\n self.sendData(\"ROLL\", 15, 1)\n\n def LOConoffClick(self, event):\n button = event.GetEventObject()\n buttonName = button.GetLabel()\n if buttonName == \"hidden\":\n self.buttonLOConoff.SetLabel(\"visible\")\n self.buttonLOConoff.SetToolTip(wx.ToolTip(\"hide LOC flag\"))\n self.buttonLOConoff.SetBackgroundColour('#ffcc00')\n self.sendData(\"ROLL\", 17, 0)\n else:\n self.buttonLOConoff.SetLabel(\"hidden\")\n self.buttonLOConoff.SetToolTip(wx.ToolTip(\"show LOC flag\"))\n self.buttonLOConoff.SetBackgroundColour('#cccccc')\n self.sendData(\"ROLL\", 17, 1)\n\n def AUXonoffClick(self, event):\n button = event.GetEventObject()\n buttonName = button.GetLabel()\n if buttonName == \"hidden\":\n self.buttonAUXonoff.SetLabel(\"visible\")\n self.buttonAUXonoff.SetToolTip(wx.ToolTip(\"hide AUX flag\"))\n self.buttonAUXonoff.SetBackgroundColour('#ffcc00')\n self.sendData(\"ROLL\", 18, 0)\n else:\n self.buttonAUXonoff.SetLabel(\"hidden\")\n self.buttonAUXonoff.SetToolTip(wx.ToolTip(\"show AUX flag\"))\n self.buttonAUXonoff.SetBackgroundColour('#cccccc')\n self.sendData(\"ROLL\", 18, 1)\n\n def FlagsEnableClick(self, event):\n button = event.GetEventObject()\n buttonName = button.GetLabel()\n if buttonName == \"disabled\":\n self.buttonFlagsEnable.SetLabel(\"enabled\")\n self.buttonFlagsEnable.SetToolTip(wx.ToolTip(\"disable flags and RoT\"))\n self.buttonFlagsEnable.SetBackgroundColour('#00ff00')\n self.buttonFlagsEnable.SetForegroundColour('#000000')\n self.sendData(\"ROLL\", 16, 1)\n else:\n self.buttonFlagsEnable.SetLabel(\"disabled\")\n self.buttonFlagsEnable.SetToolTip(wx.ToolTip(\"enable flags and RoT\"))\n self.buttonFlagsEnable.SetBackgroundColour('#ff0000')\n self.buttonFlagsEnable.SetForegroundColour('#ffffff')\n self.sendData(\"ROLL\", 16, 0)\n\n def GlideSlopeEnableClick(self, event):\n button = event.GetEventObject()\n buttonName = button.GetLabel()\n if buttonName == \"disabled\":\n self.buttonGlideSlopeEnable.SetLabel(\"enabled\")\n self.buttonGlideSlopeEnable.SetToolTip(wx.ToolTip(\"disable glide slope\"))\n self.buttonGlideSlopeEnable.SetBackgroundColour('#00ff00')\n self.buttonGlideSlopeEnable.SetForegroundColour('#000000')\n self.sendData(\"PITCH\", 16, 1)\n else:\n self.buttonGlideSlopeEnable.SetLabel(\"disabled\")\n self.buttonGlideSlopeEnable.SetToolTip(wx.ToolTip(\"enable glide slope\"))\n self.buttonGlideSlopeEnable.SetBackgroundColour('#ff0000')\n self.buttonGlideSlopeEnable.SetForegroundColour('#ffffff')\n self.sendData(\"PITCH\", 16, 0)\n\n def SphereEnableClick(self, event):\n button = event.GetEventObject()\n buttonName = button.GetLabel()\n if buttonName == \"disabled\":\n self.buttonSphereEnable.SetLabel(\"enabled\")\n self.buttonSphereEnable.SetToolTip(wx.ToolTip(\"disable Roll & Pitch\"))\n self.buttonSphereEnable.SetBackgroundColour('#00ff00')\n self.buttonSphereEnable.SetForegroundColour('#000000')\n self.sendData(\"PITCH\", 15, 1)\n else:\n self.buttonSphereEnable.SetLabel(\"disabled\")\n self.buttonSphereEnable.SetToolTip(wx.ToolTip(\"enable Roll & Pitch\"))\n self.buttonSphereEnable.SetBackgroundColour('#ff0000')\n self.buttonSphereEnable.SetForegroundColour('#ffffff')\n self.sendData(\"PITCH\", 15, 0)\n\n # convert a string to a numeric, if string is not a number return 9999.\n def str2int(self, s):\n ctr = i = 0\n inError = False\n for c in reversed(s):\n digit = ord(c) - 48\n if digit < 0 or digit > 9:\n inError = True\n i += digit * (10 ** ctr)\n ctr += 1\n if inError == True:\n return 9999\n else:\n return i\n\n def GShorEntrySet(self, event):\n GShorValue = self.GShorEntry.GetValue()\n value = self.str2int(GShorValue)\n if value == 9999:\n self.errorGShorEntry.Show()\n elif value < 0 or value > 255:\n self.errorGShorEntry.Show()\n else:\n self.errorGShorEntry.Hide()\n self.sendData(\"PITCH\", 17, value)\n\n def GSvertEntrySet(self, event):\n GSvertValue = self.GSvertEntry.GetValue()\n value = self.str2int(GSvertValue)\n if value == 9999:\n self.errorGSvertEntry.Show()\n elif value < 0 or value > 255:\n self.errorGSvertEntry.Show()\n else:\n self.errorGSvertEntry.Hide()\n self.sendData(\"PITCH\", 18, value)\n\n def RoTEntrySet(self, event):\n RoTValue = self.RoTEntry.GetValue()\n value = self.str2int(RoTValue)\n if value == 9999:\n self.errorRoTEntry.Show()\n elif value < 0 or value > 255:\n self.errorRoTEntry.Show()\n else:\n self.errorRoTEntry.Hide()\n self.sendData(\"ROLL\", 22, value)\n\n def PitchEntrySet(self, event):\n pitchValue = self.PitchEntry.GetValue()\n value = self.str2int(pitchValue)\n if value == 9999:\n self.errorPitchEntry.Show()\n elif value < 140 or value > 700:\n # extra limiting for PITCH\n self.errorPitchEntry.Show()\n else:\n self.errorPitchEntry.Hide()\n if value < 256:\n address = 0\n elif value < 512:\n address = 1\n value = value - 256\n elif value < 768:\n address = 2\n value = value - 512\n else:\n address = 3\n value = value - 768\n self.sendData(\"PITCH\", address, value)\n\n def RollEntrySet(self, event):\n rollValue = self.RollEntry.GetValue()\n value = self.str2int(rollValue)\n if value == 9999:\n self.errorRollEntry.Show()\n elif value < 0 or value > 1023:\n self.errorRollEntry.Show()\n else:\n self.errorRollEntry.Hide()\n if value < 256:\n address = 0\n elif value < 512:\n address = 1\n value = value - 256\n elif value < 768:\n address = 2\n value = value - 512\n else:\n address = 3\n value = value - 768\n self.sendData(\"ROLL\", address, value)\n\n\n\n def advancedOption(self, event):\n \n sender = event.GetEventObject()\n isChecked = sender.GetValue()\n \n if isChecked:\n self.textPitchEntry.Hide()\n self.PitchEntry.Hide()\n self.buttonPitchEntrySet.Hide()\n self.textRollEntry.Hide()\n self.RollEntry.Hide()\n self.buttonRollEntrySet.Hide()\n self.textGSrollRange.Hide()\n self.textGSpitchRange.Hide()\n # start advanced mode\n self.textDeviceAddress.Show()\n self.textDeviceSubAddress.Show()\n self.textDeviceData.Show()\n self.deviceAddressEntry.Show()\n self.deviceSubAddressEntry.Show()\n self.deviceDataEntry.Show()\n self.buttonAdvancedSend.Show()\n self.textAdvAddr.Show()\n self.errorDeviceAddress.Hide()\n self.errorDeviceSubAddress.Hide()\n self.errorDeviceData.Hide()\n\n else:\n self.textPitchEntry.Show()\n self.PitchEntry.Show()\n self.buttonPitchEntrySet.Show()\n self.textRollEntry.Show()\n self.RollEntry.Show()\n self.buttonRollEntrySet.Show()\n self.textGSrollRange.Show()\n self.textGSpitchRange.Show()\n # stop advanced mode\n self.textDeviceAddress.Hide()\n self.textDeviceSubAddress.Hide()\n self.textDeviceData.Hide()\n self.deviceAddressEntry.Hide()\n self.deviceSubAddressEntry.Hide()\n self.deviceDataEntry.Hide()\n self.buttonAdvancedSend.Hide()\n self.textAdvAddr.Hide()\n self.errorDeviceAddress.Hide()\n self.errorDeviceSubAddress.Hide()\n self.errorDeviceData.Hide()\n\n def advancedSend(self, event):\n advDeviceAddress = self.deviceAddressEntry.GetValue()\n advDeviceSubAddress = self.deviceSubAddressEntry.GetValue()\n advData = self.deviceDataEntry.GetValue()\n print(\"advanced data =\", advDeviceAddress, advDeviceSubAddress, advData)\n # process entry fields\n allDataValid = True\n portID = \"\"\n if advDeviceAddress == \"0x30\" or advDeviceAddress == \"0X30\" or advDeviceAddress == \"48\":\n portID = \"PITCH\"\n if advDeviceAddress == \"0x32\" or advDeviceAddress == \"0X32\" or advDeviceAddress == \"50\" :\n portID = \"ROLL\"\n if portID == \"\":\n self.errorDeviceAddress.Show()\n allDataValid = False\n else:\n self.errorDeviceAddress.Hide()\n #\n subValue = self.str2int(advDeviceSubAddress)\n if subValue > 63:\n self.errorDeviceSubAddress.Show()\n allDataValid = False\n else:\n self.errorDeviceSubAddress.Hide()\n #\n dataValue = self.str2int(advData)\n if dataValue > 255:\n self.errorDeviceData.Show()\n allDataValid = False\n else:\n self.errorDeviceData.Hide()\n if allDataValid == True:\n self.buttonAdvancedSend.SetBackgroundColour('#80e0c0')\n self.sendData(portID, subValue, dataValue)\n else:\n self.buttonAdvancedSend.SetBackgroundColour('#e05040')\n\n\n def ExitClick(self, event):\n if self.pitchPortOpened != \"\":\n self.closeComPort(self.pitchPortOpened, \"PITCH\")\n if self.rollPortOpened != \"\":\n self.closeComPort(self.rollPortOpened, \"ROLL\")\n if self.firstPHCCcommand == False:\n if self.phccPort.isOpen():\n self.phccPort.close()\n self.Close()\n\n\n# timer handling\n\n def stopTimer(self):\n if self.timerIsRunning == True:\n self.timer.Stop()\n self.timerIsRunning = False\n\n def startTimer(self, rate):\n if self.timerIsRunning == False:\n self.timerIsRunning = True\n self.timer.Start(rate)\n\n def OnTimer(self, event):\n self.stopTimer()\n # time out can only occur for ID check PITCH or ROLL IDENTIFY command\n if self.awaitPitchIDresponse == True:\n self.awaitPitchIDresponse = False\n else:\n self.awaitRollIDresponse = False\n\n\n# =================================================================================================\n# COM port assignments\n# =================================================================================================\n\n def OnSelectPitchPort(self, entry):\n previousPitchPort = self.pitchPort\n self.pitchPort = entry.GetString()\n\n if self.firstPHCCcommand == False:\n # \"switching\" from PHCC mode to USB mode: close defaultPhccCOMport first\n if self.phccPort.isOpen():\n self.phccPort.close()\n\n if previousPitchPort == \"\": # initial state\n # no previously opened port, but the selected port may already be in use!\n # if the selected port is in use: do not open port, \"!\" already shown\n # if the selected port is not in use: hide Exclamation Sign, do open port\n if self.pitchPort == self.rollPort:\n self.pitchPort = \"\"\n else:\n self.errorPitchPort.Hide()\n previousPitchPort = self.pitchPort\n self.openComPort(self.pitchPort, \"PITCH\")\n self.pitchPortOpened = self.pitchPort\n #\n elif self.pitchPort == self.rollPort:\n # selected COM port is in use by ROLL SDI: show \"!\" , do not open port\n self.errorPitchPort.Show()\n # if a COM port was assigned, close it.\n self.closeComPort(previousPitchPort, \"PITCH\")\n self.pitchPortOpened = \"\"\n previousPitchPort = self.pitchPort\n #\n elif previousPitchPort == self.pitchPort:\n # Same COM port selected, possible cases: \n # 1. COM port is not opened (because it is assigned to ROLL SDI)\n # 2. COM port is already opened, just selected again\n # 3. COM port is not opened, but available -> open it\n if self.pitchPort == self.rollPort:\n self.errorPitchPort.Show() # 1.\n elif self.pitchPortOpened == self.pitchPort:\n self.errorPitchPort.Hide() # 2.\n else:\n self.errorPitchPort.Hide() # 3.\n self.openComPort(self.pitchPort, \"PITCH\")\n self.pitchPortOpened = self.pitchPort\n #\n else:\n # port not in use: assign it!\n # check if already (another) port was opened: then close that one first!\n if self.pitchPortOpened != self.pitchPort:\n # close previous port first\n self.closeComPort(self.pitchPortOpened, \"PITCH\")\n self.pitchPortOpened = \"\"\n previousPitchPort = self.pitchPort\n self.errorPitchPort.Hide()\n self.openComPort(self.pitchPort, \"PITCH\")\n self.pitchPortOpened = self.pitchPort\n\n def OnSelectRollPort(self, entry):\n previousRollPort = self.rollPort\n self.rollPort = entry.GetString()\n\n if self.firstPHCCcommand == False:\n # \"switching\" from PHCC mode to USB mode: close defaultPhccCOMport first\n if self.phccPort.isOpen():\n self.phccPort.close()\n\n if previousRollPort == \"\": # initial state\n # no previously opened port, but the selected port may already be in use!\n # if the selected port is in use: do not open port, \"!\" already shown\n # if the selected port is not in use: hide Exclamation Sign, do open port\n if self.rollPort == self.pitchPort:\n self.rollPort = \"\"\n else:\n self.errorRollPort.Hide()\n previousRollPort = self.rollPort\n self.openComPort(self.rollPort, \"ROLL\")\n self.rollPortOpened = self.rollPort\n #\n elif self.rollPort == self.pitchPort:\n # selected COM port is in use by PITCH SDI: show \"!\" , do not open port\n self.errorRollPort.Show()\n # if a COM port was assigned, close it.\n self.closeComPort(previousRollPort, \"ROLL\")\n self.rollPortOpened = \"\"\n previousRollPort = self.rollPort\n #\n elif previousRollPort == self.rollPort:\n # Same COM port selected, possible cases: \n # 1. COM port is not opened (because it is assigned to PITCH SDI)\n # 2. COM port is already opened, just selected again\n # 3. COM port is not opened, but available -> open it\n if self.rollPort == self.pitchPort:\n self.errorRollPort.Show() # 1.\n elif self.rollPortOpened == self.rollPort:\n self.errorRollPort.Hide() # 2.\n else:\n self.errorRollPort.Hide() # 3.\n self.openComPort(self.rollPort, \"ROLL\")\n self.rollPortOpened = self.rollPort\n #\n else:\n # port not in use: assign it!\n # check if already (another) port was opened: then close that one first!\n if self.rollPortOpened != self.rollPort:\n # close previous port first\n self.closeComPort(self.rollPortOpened, \"ROLL\")\n self.rollPortOpened = \"\"\n previousRollPort = self.rollPort\n self.errorRollPort.Hide()\n self.openComPort(self.rollPort, \"ROLL\")\n self.rollPortOpened = self.rollPort\n\n\n def openComPort(self, portID, SDIname):\n print(\"> openCOMport\", portID, \"for SDI\", SDIname)\n if SDIname == \"ROLL\":\n self.rollComPort = serial.Serial(portID, baudrate=115200, parity=serial.PARITY_NONE,\n stopbits=serial.STOPBITS_ONE, bytesize=serial.EIGHTBITS)\n elif SDIname == \"PITCH\":\n self.pitchComPort = serial.Serial(portID, baudrate=115200, parity=serial.PARITY_NONE,\n stopbits=serial.STOPBITS_ONE, bytesize=serial.EIGHTBITS)\n else:\n print(\"!!! opening COM port\", portID, \"for SDI\", SDIname, \"failed!\")\n \n\n def closeComPort(self, portID, SDIname):\n print(\"> closeCOMport\", portID, \"for SDI\", SDIname)\n if SDIname == \"ROLL\":\n if self.rollComPort.isOpen():\n self.rollComPort.close()\n else:\n print(\"!!! COM port\", portID, \"for SDI\", SDIname, \"is not open!\")\n elif SDIname == \"PITCH\":\n if self.pitchComPort.isOpen():\n self.pitchComPort.close()\n else:\n print(\"!!! COM port\", portID, \"for SDI\", SDIname, \"is not open!\")\n else:\n print(\"!!! closing COM port\", portID, \"for SDI\", SDIname, \"failed!\")\n\n\n# =================================================================================================\n# COMMUNICATION routine: send data\n# =================================================================================================\n\n def sendData(self, portID, subAddress, data):\n #\n # this function is called whenever a button is clicked to send a command.\n # If the COM port for PITCH *and* for ROLL has not been defined, it is assumed that\n # 1. PHCC Motherboard is used\n # 2. serial port defaultPhccCOMport is connected to the PHCC Motherboard\n # Note that the first command must open the defaultPhccCOMport port!\n #\n # If one of the COM ports (for PITCH or ROLL) has been defined, it is assumed that\n # 1. USB interfaces are used to connect to SDI modules\n # 2. that assigned COM port will be used.\n\n# print(\"... sendData to\", portID, \"subAddress\", subAddress, \"data\", data)\n \n# if self.rollPortOpened == \"\" and self.pitchPortOpened == \"\":\n # using PHCC Motherboard\n# if self.firstPHCCcommand == True:\n # open defaultPhccCOMport port first\n# self.phccPort = serial.Serial(self.defaultPhccCOMport, baudrate=115200, parity=serial.PARITY_NONE,\n# stopbits=serial.STOPBITS_ONE, bytesize=serial.EIGHTBITS)\n# self.firstPHCCcommand = False\n# if self.phccPort.isOpen():\n# if portID == \"PITCH\":\n# self.phccPort.write(chr(0x07))\n# self.phccPort.write(chr(0x30))\n# self.phccPort.write(chr(subAddress))\n# self.phccPort.write(chr(data))\n# elif portID == \"ROLL\":\n# self.phccPort.write(chr(0x07))\n# self.phccPort.write(chr(0x32))\n# self.phccPort.write(chr(subAddress))\n# self.phccPort.write(chr(data))\n# else:\n# print(\"!!! Undefined SDI identification for PHCC!\")\n# else:\n# print(\"!!!\", self.defaultPhccCOMport, \"port for PHCC is not open!\")\n\n# else:\n # using USB connections\n if portID == \"ROLL\":\n if self.rollPortOpened != \"\":\n if self.rollComPort.isOpen():\n self.rollComPort.write(subAddress.to_bytes(1, byteorder='big', signed=False))\n self.rollComPort.write(data.to_bytes(1, byteorder='big', signed=False))\n print(\" ROLL command:\", subAddress, data)\n else:\n print(\"!!! COM port\", self.rollPortOpened, \"for SDI\", portID, \"is not open!\")\n else:\n print(\"!!! no COM port defined for SDI\", portID)\n elif portID == \"PITCH\":\n if self.pitchPortOpened != \"\":\n if self.pitchComPort.isOpen():\n self.pitchComPort.write(subAddress.to_bytes(1, byteorder='big', signed=False))\n self.pitchComPort.write(data.to_bytes(1, byteorder='big', signed=False))\n print(\" PITCH command:\", subAddress, data)\n else:\n print(\"!!! COM port\", self.pitchPortOpened, \"for SDI\", portID, \"is not open!\")\n else:\n print(\"!!! no COM port defined for SDI\", portID)\n else:\n print(\"!!! sending data to unknown SDI module\", portID)\n\n\n\nif __name__ == '__main__':\n app = wx.App()\n window = ADI(None)\n app.MainLoop()\n","sub_path":"F-16 ADI/testappl/adi-testappl.py","file_name":"adi-testappl.py","file_ext":"py","file_size_in_byte":36290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"449249168","text":"def send_email(user, pwd, recipient, subject, body,message):\n import smtplib\n from email.utils import formatdate\n from email.mime.multipart import MIMEMultipart\n from email.mime.text import MIMEText\n \n #TO = recipient\n #TO = recipient if isinstance(recipient, list) else [recipient]\n MESSAGE = MIMEMultipart('alternative')\n MESSAGE['subject'] = subject\n MESSAGE['message'] = message\n MESSAGE['To'] = \", \".join(recipient)\n MESSAGE['From'] = user\n #MESSAGE.preamble = \"\"\"\n #Your mail reader does not support the report format.\n #Please visit us online!\"\"\"\n #body=\"Hi All, Below is the list of services along with their versions\"\n HTML_BODY = MIMEText(body, 'html')\n MESSAGE.attach(HTML_BODY)\n\n\n # Prepare actual message\n try:\n server = smtplib.SMTP(\"smtp.gmail.com\", 587)\n # Print debugging output when testing\n if __name__ == \"__main__\":\n server.set_debuglevel(1)\n server.ehlo()\n server.starttls()\n server.login(user, pwd)\n server.sendmail(user, recipient, MESSAGE.as_string())\n server.close()\n print('successfully sent the mail')\n except:\n print('failed to send mail')\n if __name__ == \"__main__\":\n \"\"\"Executes if the script is run as main script (for testing purposes)\"\"\"\n#emailnotifyaddress=['devops@oronetworks.com']\n#emailnotifyaddress=['engineering@oronetworks.com']\nemailnotifyaddress=['jayant.rai@oronetworks.com','abhishek.ranjan@oronetworks.com']\n#emailnotifyaddress=['jiten.sharma@oronetworks.com','venkat.swamy@oronetworks.com','jayant.rai@oronetworks.com','abhishek.ranjan@oronetworks.com','ajay.malik@oronetworks.com']\n#emailnotifyaddress=['venkat.swamy@oronetworks.com']\nsubject='ENG-PROD Comparison'\nmessage= 'The following table lists all the services with unique version number'\nf= open('report.html', 'r')\ncontent = f.read()\nf.close()\nf= open('/home/scripts/file-diff/diff.html', 'r')\ncontent += f.read()\nf.close()\n\nsend_email('no-reply@oronetworks.com', '5S2ZDjL74c', emailnotifyaddress, subject,content,message)\nf.close()\n\n","sub_path":"mailsend_html.py","file_name":"mailsend_html.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"613924749","text":"import pyrosim\nimport matplotlib.pyplot as plt\n\nsim = pyrosim.Simulator(eval_time=1000, play_paused=False)\n\n# Create objects\n\nwhiteObject = sim.send_cylinder(x=0, y=0, z=0.6, length=1.0, radius=0.1)\n\nredObject = sim.send_cylinder(x=0, y=0.5, z=1.1, r1=0, r2=1, r3=0, length=1.0, radius=0.1, r=1, g=0, b=0)\n\n# Create joint\n\njoint = sim.send_hinge_joint(first_body_id=whiteObject, second_body_id=redObject, x=0, y=0, z=1.1, n1=-1, n2=0, n3=0,\n lo=-3.14159/2, hi=3.14159/2)\n\n\n# Sensors\n\nT0 = sim.send_touch_sensor(body_id=whiteObject)\nT1 = sim.send_touch_sensor(body_id=redObject)\nP2 = sim.send_proprioceptive_sensor(joint_id=joint)\n\n# Pointing out from tip\nR3 = sim.send_ray_sensor(body_id=redObject, x=0, y=1.1, z=1.1, r1=0, r2=1, r3=0)\n\n# Middle pointing down\n# R3 = sim.send_ray_sensor(body_id=redObject, x=0, y=0.5, z=1.1, r1=0, r2=0, r3=-1)\n\n\n# Neurons\n\nSN0 = sim.send_sensor_neuron(sensor_id=T0)\nSN1 = sim.send_sensor_neuron(sensor_id=T1)\n\nMN2 = sim.send_motor_neuron(joint_id=joint)\n\n# Synapses\n\nsim.send_synapse(source_neuron_id=SN0, target_neuron_id=MN2, weight=-1.0)\n\nsim.send_synapse(source_neuron_id=SN1, target_neuron_id=MN2, weight=-1.0)\n\n\n\n\n\n# Start\n\nsim.start()\n\nsim.wait_to_finish()\n\n# Analyze sensor data\n\nsensorData = sim.get_sensor_data(sensor_id=P2)\n\nprint(sensorData)\n\nf = plt.figure()\n\npanel = f.add_subplot(111)\n\nplt.plot(sensorData)\n\npanel.set_ylim(-2, +2)\npanel.set_xlim(0, 1000)\n\n\nplt.show()\n\n","sub_path":"synapses.py","file_name":"synapses.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"287970449","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 3 17:21:18 2021\n\n@author: Krishna Soni, SRL_Rover\n\"\"\"\n\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 16 20:25:42 2021\n\n@author: Krishna Soni\n\"\"\"\n\nimport numpy as np\nimport random\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\n\nstart_time = datetime.now()\nrandom.seed(a=None, version=2) #initialise the seed to the system time\n\n\ndef sigmoid(x): #function to calculate the sigmoid\n return 1/(1+np.exp(-x))\n\ndef relu(x):\n if x>0:\n return x\n else:\n return 0\n\ntf = 0.2 #training factor\na = np.zeros((100,2), dtype=np.double) #creation of the a matrix\nz = np.zeros((100,2), dtype=np.double) #creation of z matrix\n#print(z)\nw = np.array([random.random(), random.random()]) #initializing the weights with random values\nb = np.array([random.random(), random.random()]) #initializing the baises with random values\nX = np.zeros((100,1), dtype=np.double)\nT = np.zeros_like(X)\nE = np.zeros((100,1), dtype=np.double)\n\nsumb0=0.0\nsumw0=0.0 \nsumb1=0.0 \nsumw1=0.0\n#a = [random.random(), random.random()] #initializing the outputs with zeroes\n#z = [random.random(), random.random()] #initialising z with zeroes\n#w = [random.random(), random.random()] #initializing the weights with random values\n#b = [random.random(), random.random()] #initializing the baises with random values\nfor i in range(100):\n temp=random.random()\n X[i][0]=temp\n T[i][0]=temp**2\n\n#plt.plot(X,T, 'ro')\n#plt.axis([0,1,0,1])\n#plt.show()\n \nfor j in range(1000):\n \n for i in range(100):\n #while E>0.00001*0.5*((T-a[1])**2) and j<10000:\n \n z[i][0] = w[0]*X[i][0] + b[0] #finding the value of z1\n #a[0] = sigmoid(z[0]) #activating z1 with activation function\n a[i][0] = relu(z[i][0])\n \n z[i][1] = a[i][0]*w[1] + b[1] \n #a[1] = sigmoid(z[1])\n a[i][1] = relu(z[i][1])\n \n E[i] = 0.5*((T[i][0]-a[i][1])**2)\n \n print(\"value of gradw1 in {} is {}\".format(i,(-(T[i][0]-a[i][1])*(a[i][1]*(1-a[i][1]))*(a[i][0]))))\n sumw1 = sumw1 + (-(T[i][0]-a[i][1])*(a[i][1]*(1-a[i][1]))*(a[i][0]))\n sumb1 = sumb1 + (-(T[i][0]-a[i][1])*(a[i][1]*(1-a[i][1]))*(1))\n sumw0 = sumw0 + (-(T[i][0]-a[i][1])*(a[i][1]*(1-a[i][1]))*(1))\n sumb0 = sumb0 + (-(T[i][0]-a[i][1])*(a[i][1]*(1-a[i][1]))*(w[1])*(a[i][0]*(1-a[i][0]))*(1))\n print(\"value of symw1 in {} is {}\".format(i,sumw1))\n \n w[1] = w[1] - tf*sumw1\n b[1] = b[1] - tf*sumb1\n w[0] = w[0] - tf*sumw0\n b[0] = b[0] - tf*sumb0\n \n #j= j+1\n \n # print(\"the final value of w in {} is {}\".format(i,w))\n # print(\"the final value of b in {} is {}\".format(i,b))\n # print(\"the final value of E is in {} {}\".format(i,E))\n # print(\"the final value of input in {} is {}\".format(i,X1))\n # print(\"the final value of true value in {} is {}\".format(i,T))\n # print(\"the final value of output in {} is {}\".format(i,a[1]))\n \n \n \nprint(\"the final value of w in is {}\".format(w))\nprint(\"the final value of b in is {}\".format(b))\nend_time=datetime.now() \nprint(\"Time in {} : {}\".format(i,end_time-start_time)) \nX2 = 0.5\nT2 = X2**2\nzcheck = [0, 0]\nacheck = [0, 0]\n\nzcheck[0] = w[0]*X2 + b[0]\nacheck[0] = relu(zcheck[0])\nzcheck[1] = acheck[0]*w[1] + b[1] \nacheck[1] = relu(zcheck[1])\n\nprint(\"check value of T2 is {}\".format(T2))\nprint(\"The value predicted by network is {}\".format(acheck[1]))","sub_path":"2neuron_2 (1).py","file_name":"2neuron_2 (1).py","file_ext":"py","file_size_in_byte":3553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"501740269","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 24 05:51:48 2016\n\n@author: nitish.singh19\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 18 00:48:09 2016\n\n@author: nitish.singh19\n\"\"\"\nimport pandas as pd\nimport numpy as np\nimport time\n\nimport urllib.request\nimport json\n\n\n\nip =pd.read_csv('C:/Users/nitish.singh19/Desktop/IPAddress.csv',\\\n encoding = \"ISO-8859-1\")\n\nip = np.array(ip)\n############## Parsing JSON http ########################\n\ndata = np.empty(shape = (0, 12) , dtype = object)\n\nfor i in range(0,len(ip)):\n \n time.sleep(0.5) \n\n num = ip[i,0]\n\n x = urllib.request.urlopen('http://ip-api.com/json/'+num)\n\n decode_x = x.read().decode('utf-8')\n\n readable_json = json.loads(decode_x)\n\n\n add = num\n\n est = readable_json[\"as\"]\n\n city = readable_json[\"city\"]\n\n country = readable_json[\"country\"]\n\n isp = readable_json[\"isp\"]\n\n lat = readable_json[\"lat\"]\n\n lon = readable_json[\"lon\"]\n\n org = readable_json[\"org\"]\n\n region = readable_json[\"regionName\"]\n\n timezone = readable_json[\"timezone\"]\n\n zip = readable_json[\"zip\"]\n\n status = readable_json[\"status\"]\n \n temp = np.array([add, est, city, country, isp, lat, lon, org, region, timezone, \\\n zip, status])\n \n data = np.vstack((data,temp))\n \n \n################################################################\n \ncolumns = ['ip', 'est', 'city', 'country', 'isp', 'lat', 'lon', 'org', 'region', \\\n 'timezone','zip', 'status'] \n\nIPAdd_211 = pd.DataFrame(data, columns = columns)\n\nIPAdd_211.to_csv('IPAdd_211.csv') \n\n\n\n\n","sub_path":"IPextract.py","file_name":"IPextract.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"231044961","text":"from django.core.exceptions import FieldError\nfrom django.forms.forms import get_declared_fields\nfrom django.forms.models import (ModelForm, ModelFormMetaclass, ModelFormOptions, \n fields_for_model, model_to_dict, save_instance)\nfrom django.forms.util import ErrorList\nfrom django.forms.widgets import media_property\nfrom django.utils.translation import get_language\nfrom nani.models import TranslatableModel\nfrom nani.utils import get_cached_translation, get_translation, combine\n\n\nclass TranslatableModelFormMetaclass(ModelFormMetaclass):\n def __new__(cls, name, bases, attrs):\n \n \"\"\"\n Django 1.3 fix, that removes all Meta.fields and Meta.exclude\n fieldnames that are in the translatable model. This ensures\n that the superclass' init method doesnt throw a validation\n error\n \"\"\"\n fields = []\n exclude = []\n if \"Meta\" in attrs:\n meta = attrs[\"Meta\"]\n if getattr(meta, \"fields\", False):\n fields = meta.fields\n meta.fields = []\n if getattr(meta, \"exclude\", False):\n exclude = meta.exclude\n meta.exclude = []\n # End 1.3 fix\n \n super_new = super(TranslatableModelFormMetaclass, cls).__new__\n \n formfield_callback = attrs.pop('formfield_callback', None)\n declared_fields = get_declared_fields(bases, attrs, False)\n new_class = super_new(cls, name, bases, attrs)\n \n # Start 1.3 fix\n if fields:\n new_class.Meta.fields = fields\n if exclude:\n new_class.Meta.exclude = exclude\n # End 1.3 fix\n if 'Media' not in attrs:\n new_class.media = media_property(new_class)\n opts = new_class._meta = ModelFormOptions(getattr(new_class, 'Meta', attrs.get('Meta', None)))\n if opts.model:\n # bail out if a wrong model uses this form class\n if not issubclass(opts.model, TranslatableModel):\n raise TypeError(\n \"Only TranslatableModel subclasses may use TranslatableModelForm\"\n )\n mopts = opts.model._meta\n \n shared_fields = mopts.get_all_field_names()\n \n # split exclude and include fieldnames into shared and translated\n sfieldnames = [field for field in opts.fields or [] if field in shared_fields]\n tfieldnames = [field for field in opts.fields or [] if field not in shared_fields]\n sexclude = [field for field in opts.exclude or [] if field in shared_fields]\n texclude = [field for field in opts.exclude or [] if field not in shared_fields]\n \n # required by fields_for_model\n if not sfieldnames:\n sfieldnames = None\n if not tfieldnames:\n tfieldnames = None \n \n # If a model is defined, extract form fields from it.\n sfields = fields_for_model(opts.model, sfieldnames, sexclude,\n opts.widgets, formfield_callback)\n tfields = fields_for_model(mopts.translations_model, tfieldnames,\n texclude, opts.widgets, formfield_callback)\n \n fields = sfields\n fields.update(tfields)\n \n # make sure opts.fields doesn't specify an invalid field\n none_model_fields = [k for k, v in fields.iteritems() if not v]\n missing_fields = set(none_model_fields) - \\\n set(declared_fields.keys())\n if missing_fields:\n message = 'Unknown field(s) (%s) specified for %s'\n message = message % (', '.join(missing_fields),\n opts.model.__name__)\n raise FieldError(message)\n # Override default model fields with any custom declared ones\n # (plus, include all the other declared fields).\n fields.update(declared_fields)\n \n if new_class._meta.exclude:\n new_class._meta.exclude = list(new_class._meta.exclude)\n else:\n new_class._meta.exclude = []\n \n for field in (mopts.translations_accessor, 'master'):\n if not field in new_class._meta.exclude:\n new_class._meta.exclude.append(field)\n else:\n fields = declared_fields\n new_class.declared_fields = declared_fields\n new_class.base_fields = fields\n # always exclude the FKs\n return new_class\n\n\nclass TranslatableModelForm(ModelForm):\n __metaclass__ = TranslatableModelFormMetaclass\n\n def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,\n initial=None, error_class=ErrorList, label_suffix=':',\n empty_permitted=False, instance=None):\n opts = self._meta\n model_opts = opts.model._meta\n object_data = {}\n if instance is not None:\n trans = get_cached_translation(instance)\n if not trans:\n try:\n trans = get_translation(instance)\n except model_opts.translations_model.DoesNotExist:\n trans = None\n if trans:\n object_data = model_to_dict(trans, opts.fields, opts.exclude)\n if initial is not None:\n object_data.update(initial)\n initial = object_data\n super(TranslatableModelForm, self).__init__(data, files, auto_id,\n prefix, object_data,\n error_class, label_suffix,\n empty_permitted, instance)\n\n def save(self, commit=True):\n if self.instance.pk is None:\n fail_message = 'created'\n new = True\n else:\n fail_message = 'changed'\n new = False\n super(TranslatableModelForm, self).save(True)\n trans_model = self.instance._meta.translations_model\n language_code = self.cleaned_data.get('language_code', get_language())\n if not new:\n trans = get_cached_translation(self.instance)\n if not trans:\n try:\n trans = get_translation(self.instance, language_code)\n except trans_model.DoesNotExist:\n trans = trans_model()\n else:\n trans = trans_model()\n trans = save_instance(self, trans, self._meta.fields, fail_message,\n commit, construct=True)\n trans.language_code = language_code\n trans.master = self.instance\n return combine(trans)\n \n def _post_clean(self):\n if self.instance.pk:\n try:\n trans = trans = get_translation(self.instance, self.instance.language_code)\n trans.master = self.instance\n self.instance = combine(trans)\n except self.instance._meta.translations_model.DoesNotExist:\n language_code = self.cleaned_data.get('language_code', get_language())\n self.instance = self.instance.translate(language_code)\n else:\n language_code = self.cleaned_data.get('language_code', get_language())\n self.instance = self.instance.translate(language_code)\n self.instance.save(False)\n return super(TranslatableModelForm, self)._post_clean()","sub_path":"nani/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":7577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"101351014","text":"class MenuItem:\n def __init__(self):\n self.name=\"\"\n self.price=0\n def Show (self):\n print(\"%s %i\" %(self.name,self.price))\n \nSpag= MenuItem()\nSpag.name=\"Spaghetti\"\nSpag.price=95\n\nprint(Spag.name)\nprint(Spag.price)\n\n\nCoke=MenuItem()\nCoke.name=\"Coke Float\"\nCoke.price=25\n\nprint(Spag.name)\nprint(Spag.price)\nprint(Coke.name)\nprint(Coke.price)\n\nSpag.Show()\nCoke.Show()\n","sub_path":"OOP_Menu.py","file_name":"OOP_Menu.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"398536938","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom keras.models import Model, Input\nfrom keras.layers import Concatenate, Conv2D, MaxPooling2D, Conv2DTranspose, Dropout, UpSampling2D, BatchNormalization, RepeatVector, Reshape, Permute, Flatten\nfrom keras.optimizers import Adam\nfrom keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping\nfrom keras.activations import relu, sigmoid\nfrom keras.regularizers import l1, l2, l1_l2\nfrom keras.layers import Activation\nfrom keras import backend as K\nfrom segmentation_models.losses import bce_jaccard_loss, jaccard_loss, binary_crossentropy\nfrom segmentation_models.metrics import iou_score, jaccard_score\n\nimport sys\nsys.path.insert(0, 'keras-deeplab-v3-plus')\nfrom model_cfm_dual_wide import Deeplabv3, _xception_block\nfrom clr_callback import CyclicLR\nfrom AdamAccumulate import AdamAccumulate\n\nimport os\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n\nimport cv2, glob\nfrom skimage.io import imsave, imread\nfrom skimage.transform import resize, rotate, rescale\nfrom random import shuffle\n\nfrom data_cfm_patched_dual import load_validation_data\nfrom albumentations import *\nfrom aug_generators_dual import aug_daniel, imgaug_generator_patched\n\nfull_size = 256\nimg_size = 224\nstride = 16\ndata_path = 'data/'\npred_path = 'preds/'\ntemp_path = 'temp/'\nK.set_image_data_format('channels_last') # TF dimension ordering in this code\n\nif __name__ == '__main__':\n\tprint('-'*30)\n\tprint('Loading validation data...')\n\tprint('-'*30)\n\tvalidation_data = load_validation_data(full_size, img_size, stride) \n\t\n\tmodel_checkpoint = ModelCheckpoint('cfm_weights_patched_no_sce_' + str(img_size) + '_e{epoch:02d}_iou{val_weighted_iou_score:.4f}.h5', monitor='val_weighted_iou_score', save_best_only=False)\n\tclr_triangular = CyclicLR(mode='triangular2', step_size=12000, base_lr=6e-5, max_lr=6e-4)\n\tcallbacks_list = [\n\t\t#EarlyStopping(patience=6, verbose=1, restore_best_weights=False),\n\t\t#clr_triangular,\n\t\tmodel_checkpoint\n\t]\n\t\n\tSMOOTH = 1e-12\n\tdef iou_score(gt, pr, class_weights=1., class_indexes=None, smooth=SMOOTH, per_image=False, threshold=None, **kwargs):\n\t\tr\"\"\" The `Jaccard index`_, also known as Intersection over Union and the Jaccard similarity coefficient\n\t\t(originally coined coefficient de communauté by Paul Jaccard), is a statistic used for comparing the\n\t\tsimilarity and diversity of sample sets. The Jaccard coefficient measures similarity between finite sample sets,\n\t\tand is defined as the size of the intersection divided by the size of the union of the sample sets:\n\t\t.. math:: J(A, B) = \\frac{A \\cap B}{A \\cup B}\n\t\tArgs:\n\t\t\tgt: ground truth 4D keras tensor (B, H, W, C) or (B, C, H, W)\n\t\t\tpr: prediction 4D keras tensor (B, H, W, C) or (B, C, H, W)\n\t\t\tclass_weights: 1. or list of class weights, len(weights) = C\n\t\t\tclass_indexes: Optional integer or list of integers, classes to consider, if ``None`` all classes are used.\n\t\t\tsmooth: value to avoid division by zero\n\t\t\tper_image: if ``True``, metric is calculated as mean over images in batch (B),\n\t\t\t\telse over whole batch\n\t\t\tthreshold: value to round predictions (use ``>`` comparison), if ``None`` prediction will not be round\n\t\tReturns:\n\t\t\tIoU/Jaccard score in range [0, 1]\n\t\t.. _`Jaccard index`: https://en.wikipedia.org/wiki/Jaccard_index\n\t\t\"\"\"\n\n\t\tbackend = kwargs['backend']\n\n\t\tgt, pr = gather_channels(gt, pr, indexes=class_indexes, **kwargs)\n\t\tpr = round_if_needed(pr, threshold, **kwargs)\n\t\taxes = get_reduce_axes(per_image, **kwargs)\n\n\t\t# score calculation\n\t\tintersection = backend.sum(gt * pr, axis=axes)\n\t\tunion = backend.sum(gt + pr, axis=axes) - intersection\n\n\t\tscore = (intersection + smooth) / (union + smooth)\n\t\tscore = average(score, per_image, class_weights, **kwargs)\n\n\t\treturn score\n\n\tdef bce_ln_jaccard_loss(gt, pr, bce_weight=1.0, smooth=SMOOTH, per_image=True):\n\t\tbce = K.mean(binary_crossentropy(gt[:,:,:,0], pr[:,:,:,0]))*25/26 + K.mean(binary_crossentropy(gt[:,:,:,1], pr[:,:,:,1]))/26\n\t\tloss = bce_weight * bce - K.log(jaccard_score(gt[:,:,:,0], pr[:,:,:,0], smooth=smooth, per_image=per_image))*25/26 - K.log(jaccard_score(gt[:,:,:,1], pr[:,:,:,1], smooth=smooth, per_image=per_image))/26\n\t\treturn loss\n\t\n\tdef weighted_iou_score(gt, pr, smooth=SMOOTH, per_image=True):\n\t\tedge_iou_score = jaccard_score(gt[:,:,:,0], pr[:,:,:,0], smooth=smooth, per_image=per_image)\n\t\tmask_iou_score = jaccard_score(gt[:,:,:,1], pr[:,:,:,1], smooth=smooth, per_image=per_image)\n\t\treturn (edge_iou_score * 25 + mask_iou_score)/26\n\n\tdef edge_iou_score(gt, pr, smooth=SMOOTH, per_image=True):\n\t\tedge_iou_score = jaccard_score(gt[:,:,:,0], pr[:,:,:,0], smooth=smooth, per_image=per_image)\n\t\treturn edge_iou_score\n\n\tdef mask_iou_score(gt, pr, smooth=SMOOTH, per_image=True):\n\t\tmask_iou_score = jaccard_score(gt[:,:,:,1], pr[:,:,:,1], smooth=smooth, per_image=per_image)\n\t\treturn mask_iou_score\n\n\tprint('-'*30)\n\tprint('Creating and compiling model...')\n\tprint('-'*30)\n\timg_shape = (img_size, img_size, 3)\n\tinputs = Input(shape=img_shape)\n\tmodel = Deeplabv3(input_shape=(img_size, img_size,3), classes=16, OS=16, backbone='xception', weights=None)\n\t\n\tmodel.compile(optimizer=AdamAccumulate(lr=1e-4, accum_iters=4), loss=bce_ln_jaccard_loss, metrics=['binary_crossentropy', weighted_iou_score, edge_iou_score, mask_iou_score, 'accuracy'])\n\tmodel.summary()\n\t#model.load_weights('cfm_weights_patched_no_sce_224_e05_iou0.3325.h5')\n\t\n\tprint('-'*30)\n\tprint('Fitting model...')\n\tprint('-'*30)\n\ttrain_generator = imgaug_generator_patched(4, img_size=full_size, patch_size=img_size, patch_stride=stride)\n\thistory = model.fit_generator(train_generator,\n\t\t\t\tsteps_per_epoch=8000,\n\t\t\t\tepochs=80,\n\t\t\t\tvalidation_data=validation_data,\n\t\t\t\tverbose=1,\n#\t\t\t\tmax_queue_size=64,\n#\t\t\t\tuse_multiprocessing=True,\n#\t\t\t\tworkers=2,\n\t\t\t\tcallbacks=callbacks_list)\n\tprint(history.history)\n","sub_path":"training/train_cfm_v9_224_deeplabv3-xception_patched-256-16.py","file_name":"train_cfm_v9_224_deeplabv3-xception_patched-256-16.py","file_ext":"py","file_size_in_byte":5785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"601140100","text":"import sys\nimport pyspark.sql.functions as f\nfrom pyspark.sql import SparkSession\n\nspark = SparkSession.builder.appName(\"task4c-sql\").getOrCreate()\n\ndf = spark.read.format('csv').options(header='false', inferschema='false') \\\n .load(sys.argv[1]).na.fill('')\n\ndata = df.select(df._c20.cast('string').alias('name'),\n df._c5.cast('DECIMAL(10, 2)').alias('fare'))\n\nresult = data.groupBy('name').agg(f.sum('fare')) \\\n .select('name', f.col('sum(fare)').alias('revenue')) \\\n .sort(f.col('revenue').desc()).limit(10) \\\n .write.csv('task4c-sql.out', quoteAll=False, header=False,\n quote='', ignoreTrailingWhiteSpace=False)\n\n'''\nmodule load python/gnu/3.6.5\nmodule load spark/2.4.0\nrm -rf task4c-sql.out\nhfs -rm -R task4c-sql.out\nspark-submit --conf \\\nspark.pyspark.python=/share/apps/python/3.6.5/bin/python \\\ntask4c-sql.py task1b-sql.out\nhfs -getmerge task4c-sql.out task4c-sql.out\nhfs -rm -R task4c-sql.out\ncat task4c-sql.out\n'''\n","sub_path":"Assignment2/task4c-sql.py","file_name":"task4c-sql.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"140146289","text":"'''\nI create a SARIMAX model for predicting the number of crimes in Philadelphia.\nA good reading on SARIMAX models https://tomaugspurger.github.io/modern-7-timeseries.html\nFor more information about statsmodels:\nhttp://www.statsmodels.org/dev/generated/statsmodels.tsa.statespace.sarimax.SARIMAXResults.html\nImportant here to note is that I have installed released candidate version 0.8.0rc1 of statsmodels.\nLet's start importing needed libraries\n'''\n# Import\nimport pandas as pd\nfrom collections import namedtuple\n\nimport statsmodels.api as sm\nfrom statsmodels.tsa.stattools import adfuller\nimport statsmodels.tsa.api as smt\nfrom statsmodels.tools.eval_measures import rmse\n\nfrom scipy.optimize import brute\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\n\n# Group by month and year\n# Read CSV\ndf = pd.read_csv('crime.csv')\nprint(df.columns)\n\n# Modify month to date time fomat\ndf['Date'] = pd.to_datetime(df['Month'])\n\n# Derive month and year\ndf['Month'] = df['Date'].dt.month\ndf['Year'] = df['Date'].dt.year\n\n# Create time serie of counts\nh_ts = df.groupby(['Date']).size()\nprint(h_ts.head())\nh_ts.to_csv('count.csv')\n\n'''\nLet's start with a seasonal decomposition.\n'''\n\nseas_dec = sm.tsa.seasonal_decompose(h_ts)\nseas_dec.plot()\nplt.show()\n\n'''\nThere is clearly a yearly seasonality.\nDo we have stationary data ? For answering that, execute the augmented Dickey-Fuller test that basically tells you that:\n- H0 (null hypothesis): time serie is non-stationary and it needs to be differenced\n- HA (alternative hypothesis): time serie is stationary and it does not need to be differenced\n'''\n\nADF = namedtuple('ADF', 'adf pvalue usedlag nobs critical icbest') \nadf_test = ADF(*adfuller(h_ts))\nprint(adf_test.critical)\nprint(adf_test.adf)\n\n'''\nOur time serie is not stationary.\n'''\n\ndef tsplot(y, lags=None, figsize=(10, 8)):\n fig = plt.figure(figsize=figsize)\n layout = (2, 2)\n ts_ax = plt.subplot2grid(layout, (0, 0), colspan=2)\n acf_ax = plt.subplot2grid(layout, (1, 0))\n pacf_ax = plt.subplot2grid(layout, (1, 1))\n\n y.plot(ax=ts_ax)\n smt.graphics.plot_acf(y, lags=lags, ax=acf_ax)\n smt.graphics.plot_pacf(y, lags=lags, ax=pacf_ax)\n [ax.set_xlim(-1) for ax in [acf_ax, pacf_ax]]\n sns.despine()\n plt.tight_layout()\n return ts_ax, acf_ax, pacf_ax\n\n# Original\ntsplot(h_ts, lags=24)\nplt.show()\n\n'''\nLet's compute now the first difference.\n'''\nfirst_diff = h_ts.diff().dropna()\ntsplot(first_diff, lags=24)\nplt.show()\n\nadf_test = ADF(*adfuller(first_diff))\nprint(adf_test.critical)\nprint(adf_test.adf)\n\n'''\nIt is stationary.\nLet's compute now the first difference of seasonal difference.\n'''\n\nperiods = 12\nseasonal_first_diff = (h_ts - h_ts.shift(periods)).diff().dropna()\ntsplot(seasonal_first_diff, lags=24)\nplt.show()\n\nadf_test = ADF(*adfuller(seasonal_first_diff))\nprint(adf_test.critical)\nprint(adf_test.adf)\n\n'''\nIt is stationary.\nLet's fit a model. I want to use only the data up to 2014 for training. Split our data into train and test sets and check it is correctly done.\n'''\n\nindex_start = h_ts.index.get_loc('2015-01-01')\nindex_end = h_ts.index.get_loc('2016-01-01')\ntrain_ts = h_ts.iloc[:index_start]\ntest_ts = h_ts.iloc[index_start:index_end]\nunused_ts = h_ts.iloc[index_end:]\nprint(train_ts.tail())\nprint(test_ts.head())\nprint(unused_ts.head)\n\n\ndef objfunc(x, *params):\n model = None\n aic = float('inf')\n rms = float('inf')\n x_int = [int(p.item()) for p in x]\n p, d, q, P, D, Q = x_int\n s, train_ts, test_ts, max_order = params\n if sum(x) <= max_order:\n try:\n # model = smt.SARIMAX(train_ts, trend = 't', order=(p, d, q), seasonal_order=(P, D, Q, s))\n model = smt.SARIMAX(train_ts, order=(p, d, q), seasonal_order=(P, D, Q, s))\n fitted_model = model.fit()\n aic = fitted_model.aic.item()\n forecast = fitted_model.get_forecast(test_ts.size)\n rms = rmse(test_ts, forecast.predicted_mean)\n except:\n print('Error for: ' + str(x))\n return rms\n\nx_grid = (slice(0, 2), slice(0, 2), slice(0, 2), slice(0, 2), slice(0, 2), slice(0, 2))\nmax_order = 5\ns = 12\nparams = (s, train_ts, test_ts, max_order)\nbrute_res = brute(objfunc, x_grid, args=params, finish=None, full_output=True)\n\nparams = [int(p.item()) for p in brute_res[0]]\nrms = brute_res[1]\nprint('(p, d, q, P, D, Q) = ' + str(params) + ', RMSE = ' + str(rms))\n\n# Re-train with best parameters and display summary\nmod_seasonal = smt.SARIMAX(train_ts, order=tuple(params[0:3]), seasonal_order=tuple(params[3:])+(s,))\nres_seasonal = mod_seasonal.fit()\nprint(res_seasonal.summary())\ntsplot(res_seasonal.resid, lags=24)\nplt.show()\n\n'''\nPlot observations and forecast.\n'''\n\npred = res_seasonal.get_prediction(start='2007-01-01', end='2017-12-01')\npred_ci = pred.conf_int()\nplt.figure()\nax = train_ts.plot(label='Train', color='green')\nax = test_ts.plot(label='Test', color='red')\nax = unused_ts.plot(label='Unused', color='black')\npred.predicted_mean.plot(ax=ax, label='Forecast', alpha=.7)\nax.fill_between(pred_ci.index,\n pred_ci.iloc[:, 0],\n pred_ci.iloc[:, 1], color='k', alpha=.2)\nplt.legend()\nsns.despine()\nplt.show()\n","sub_path":"analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":5182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"546179779","text":"import sys\nimport urllib.request as urllib2\nimport re\nimport queue\nimport threading\nfrom threading import Thread\nimport datetime\n\nclass Manager(threading.Thread):\n def __init__(self,q,URLDump):\n Thread.__init__(self)\n self.q = q\n self.URLDump=URLDump\n\n def run(self):\n while self.q.empty()==False:\n self.startScrapping(self.q.get(),self.q,self.URLDump)\n self.q.task_done()\n \n def startScrapping(self,endPoint,q,URLDump) :\n try:\n print('-- Straring scrapping of : ' + endPoint)\n user_agent = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36'\n customRequest = urllib2.Request(endPoint, headers={'User-agent':user_agent})\n rootSiteHandler = urllib2.urlopen(customRequest)\n rootSiteContent = rootSiteHandler.read()\n\n with open(endPoint.split('/')[-1], 'wb') as out_file:\n out_file.write(rootSiteContent)\n\n #print(rootSiteContent)\n\n rootSiteContent = str(rootSiteContent,'utf-8')\n\n listUrls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+',rootSiteContent)\n #print(listUrls)\n\n self.addInQueue(listUrls,q,URLDump)\n\n except Exception as excp:\n print(\"Unexpected error:\", excp)\n\n def addInQueue(self,listUrls,q,URLDump):\n for iter in listUrls:\n if URLDump.__contains__(iter):\n URLDump[iter]=URLDump[iter]+1\n else:\n URLDump[iter]=1\n q.put(iter)\n\ndef invokeScrapping(endPoint):\n q = queue.Queue()\n URLDump = {}\n baseUrl =''\n if endPoint:\n baseUrl = 'https://medium.com'\n q.put(baseUrl)\n print(\"Starting scrapping for the website:: \" + baseUrl)\n for x in range(5):\n inst = Manager(q,URLDump)\n inst.daemon = True\n inst.start()\n\n q.join()\n","sub_path":"python/src/scrappermaster.py","file_name":"scrappermaster.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"67269369","text":"import os\r\nimport hashlib\r\nfrom sqlalchemy import *\r\nfrom sqlalchemy.pool import NullPool\r\nfrom flask import Flask, request, render_template, g, redirect, Response, session, url_for, flash, Markup\r\nfrom datetime import datetime\r\n\r\ntmpl_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')\r\napp = Flask(__name__, template_folder=tmpl_dir)\r\n\r\nDATABASEURI = \"[redacted]\"\r\n\r\nengine = create_engine(DATABASEURI)\r\n\r\n@app.before_request\r\ndef before_request():\r\n try:\r\n g.conn = engine.connect()\r\n except:\r\n print(\"error connecting to database\")\r\n import traceback; traceback.print_exc()\r\n g.conn = None\r\n\r\n@app.teardown_request\r\ndef teardown_request(exception):\r\n try:\r\n g.conn.close()\r\n except Exception as e:\r\n pass\r\n\r\n@app.route('/index')\r\ndef index():\r\n print(request.args)\r\n\r\n if session.get('selected_user') != True:\r\n return redirect('/')\r\n context = dict(user_name = session['user_name'], client_id = session['client_id'])\r\n return render_template(\"index.html\", **context)\r\n\r\n\r\n@app.route('/artist')\r\ndef artist():\r\n print(request.args)\r\n # artist_name = session['artist']\r\n # cursor = g.conn.execute(\"SELECT * FROM artist WHERE LOWER(name) = LOWER(%s)\", artist_name)\r\n\r\n if len(session['artist']) == 0:\r\n artist_id = session['artist_id']\r\n cursor = g.conn.execute(\"SELECT * FROM artist WHERE artist_id = %s\", artist_id)\r\n else: \r\n artist_name = session['artist']\r\n cursor = g.conn.execute(\"SELECT * FROM artist WHERE LOWER(name) = LOWER(%s)\", artist_name)\r\n names = []\r\n ids = []\r\n\r\n ##GET ARTIST NAME FOR ARTIST PAGE\r\n for result in cursor:\r\n names.append(result['name'])\r\n ids.append(result['artist_id'])\r\n cursor.close()\r\n\r\n if len(ids) == 0:\r\n print(\"artist not found\")\r\n msg = Markup(\"Could not find artist \\'{}\\'\".format(artist_name))\r\n flash(msg)\r\n return redirect('/index')\r\n \r\n ##LIST OF ALBUMS ON ARTIST PAGE ( HYPERLINKS )\r\n artist_id = ids[0]\r\n cursor = g.conn.execute(\"SELECT * FROM album WHERE artist_id = %s order by release_date desc\", artist_id)\r\n album_names = []\r\n album_ids = []\r\n years = []\r\n for result in cursor:\r\n album_names.append(result['title'])\r\n album_ids.append(result['album_id'])\r\n years.append(result['release_date'].year)\r\n cursor.close()\r\n\r\n context = dict(data_names = names, data_album_names = album_names, data_album_ids = album_ids, years = years, client_id = session['client_id'],user_name = session['user_name'])\r\n return render_template(\"artist.html\", **context)\r\n\r\n\r\n@app.route('/artist_id/', methods=['GET'])\r\ndef artist_name(artist_id):\r\n session['artist'] = \"\"\r\n session['artist_id'] = artist_id\r\n return redirect(url_for('.artist', artist = artist_id))\r\n\r\n@app.route('/user')\r\ndef user():\r\n print(request.args)\r\n if len(session['user']) == 0:\r\n user_id = session['user_id']\r\n cursor = g.conn.execute(\"SELECT * FROM users WHERE user_id = %s\", user_id)\r\n else: \r\n user_name = session['user']\r\n cursor = g.conn.execute(\"SELECT * FROM users WHERE LOWER(username) = LOWER(%s)\", user_name)\r\n \r\n names = []\r\n ids = []\r\n emails = []\r\n\r\n ##GET USER NAME FOR USER PAGE\r\n for result in cursor:\r\n names.append(result['username'])\r\n ids.append(result['user_id'])\r\n emails.append(result['email'])\r\n cursor.close()\r\n\r\n if len(ids) == 0:\r\n print(\"user not found\")\r\n msg = Markup(\"Could not find user \\'{}\\'\".format(user_name))\r\n flash(msg)\r\n return redirect('/index')\r\n \r\n ##COMMENTS COUNT FOR USER PAGE\r\n user_id = ids[0]\r\n cursor = g.conn.execute(\"SELECT text, comment_id, album_id, song_id, time_stamp FROM comment WHERE user_id=%s and comment_id NOT IN (SELECT comment_id FROM moderator_comment) order by comment_id desc\",user_id)\r\n\r\n comment_num=0\r\n comments = []\r\n comments_content_id = []\r\n for result in cursor:\r\n comments.append((result['comment_id'], result['text'], result['time_stamp']))\r\n if result['song_id'] is None:\r\n comments_content_id.append((\"album\", result['album_id']))\r\n else:\r\n comments_content_id.append((\"song\", result['song_id']))\r\n comment_num+=1\r\n cursor.close()\r\n\r\n comments_content_name = []\r\n for c in comments_content_id:\r\n print(c[0])\r\n cursor = g.conn.execute(\"SELECT title FROM {} WHERE {}_id={}\".format(c[0], c[0], c[1]))\r\n for result in cursor:\r\n comments_content_name.append(result['title'])\r\n\r\n cursor.close()\r\n session['user_ref'] = True\r\n context = dict(emails=emails,username=names,comment_num=comment_num,user_ids=ids, client_id = session['client_id'],user_name = session['user_name'], comments=comments, comments_content_id=comments_content_id, comments_content_name = comments_content_name, mod_id = session['moderator'])\r\n return render_template(\"user.html\", **context)\r\n\r\n## Executes when an user hyperlink is clicked\r\n@app.route('/user_id/', methods=['GET'])\r\ndef user_name(user_id):\r\n session['user'] = \"\"\r\n session['user_id'] = user_id\r\n return redirect(url_for('.user', user = user_id))\r\n\r\ndef round_half(x):\r\n return round(x * 2) / 2\r\n\r\n@app.route('/album')\r\ndef album():\r\n session['song_id'] = 0\r\n ##if user not logged in set as guest\r\n if 'client_id' not in session:\r\n session['client_id'] = 0\r\n session['moderator'] = 0\r\n\r\n session['user_ref'] = False\r\n print(request.args)\r\n if len(session['album']) == 0:\r\n album_id = session['album_id']\r\n cursor = g.conn.execute(\"SELECT * FROM album WHERE album_id = %s\", album_id)\r\n else: \r\n album_name = session['album']\r\n cursor = g.conn.execute(\"SELECT * FROM album WHERE LOWER(title) = LOWER(%s)\", album_name)\r\n \r\n print(\"HERE: %s\", session['client_id'])\r\n\r\n titles = []\r\n ids = []\r\n dates = []\r\n \r\n ##GET ALBUM INFO FOR ALBUM PAGE\r\n for result in cursor:\r\n titles.append(result['title'])\r\n ids.append(result['album_id'])\r\n dates.append(result['release_date'])\r\n \r\n if len(ids) == 0:\r\n print(\"album not found\")\r\n msg = Markup(\"Could not find album \\'{}\\'\".format(album_name))\r\n flash(msg)\r\n return redirect('/index')\r\n elif len(ids) > 1:\r\n return redirect(url_for('.search_list_album', search_list_album = session['album']))\r\n album_id = ids[0]\r\n\r\n year = dates[0].year\r\n\r\n ##LIST OF SONGS ON ALBUM PAGE ( HYPERLINKS )\r\n cursor = g.conn.execute(\"SELECT * FROM song WHERE album_id = %s order by track_num\", album_id)\r\n artist_ids = []\r\n song_names = []\r\n song_ids = []\r\n song_durations = []\r\n song_durations_formatted = []\r\n song_ratings = []\r\n for result in cursor:\r\n song_names.append(result['title'])\r\n song_ids.append(result['song_id'])\r\n artist_ids.append(result['artist_id'])\r\n rating = []\r\n cursor_two = g.conn.execute(\"SELECT AVG(rating)::numeric(3,2) as rating from user_rates_song WHERE song_id =%s\", result['song_id'])\r\n for result_two in cursor_two:\r\n rating.append(result_two['rating'])\r\n if rating[0] is None:\r\n song_ratings.append(2.5)\r\n else:\r\n song_ratings.append(round_half(float(rating[0])))\r\n ms = int(result['duration_ms'])\r\n seconds = (ms // 1000) % 60\r\n mins = (ms // 60000) % 60\r\n song_durations.append(ms)\r\n song_durations_formatted.append(\"{} min, {} sec\".format(mins, seconds))\r\n \r\n\r\n album_len = len(song_ids)\r\n album_duration_ms = sum(song_durations)\r\n seconds = (album_duration_ms // 1000) % 60\r\n mins = (album_duration_ms // 60000) % 60\r\n hrs = (album_duration_ms // 3600000)\r\n album_duration = []\r\n if hrs > 0:\r\n album_duration.append(\"{} hr, {} min\".format(hrs, mins))\r\n else:\r\n album_duration.append(\"{} min, {} sec\".format(mins, seconds))\r\n\r\n ##GET ARTIST NAME FOR ALBUM PAGE\r\n cursor = g.conn.execute(\"SELECT * FROM artist WHERE artist_id = %s\", artist_ids[0])\r\n artist_names = []\r\n for result in cursor:\r\n artist_names.append(result['name'])\r\n \r\n ##GET COMMENT TEXT FOR ALBUM PAGE\r\n cursor = g.conn.execute(\"SELECT text, comment_id, user_id, time_stamp FROM comment WHERE album_id=%s and comment_id NOT IN (SELECT comment_id FROM moderator_comment) order by comment_id desc\",album_id)\r\n comments = []\r\n comment_ids = []\r\n user_ids = []\r\n for result in cursor:\r\n comments.append((result['text'], result['time_stamp']))\r\n comment_ids.append(result['comment_id'])\r\n user_ids.append(result['user_id'])\r\n\r\n ##GET USER NAMES FOR ALBUM PAGE ( EACH USERNAME IS A HYPERLINK ABOVE A COMMENT )\r\n user_names = []\r\n for i in range(len(user_ids)):\r\n cursor = g.conn.execute(\"SELECT * FROM users WHERE user_id = %s\", user_ids[i])\r\n for result in cursor:\r\n user_names.append(result['username'])\r\n cursor.close()\r\n\r\n context = dict(artist_id = artist_ids[0], data_titles = titles, data_song_names = song_names, data_song_ids = song_ids, data_artist_names = artist_names, data_album_len = album_len, data_album_duration = album_duration[0],\r\n data_release_year = year, data_comments = comments, data_user_ids = user_ids, data_user_names = user_names, comment_ids = comment_ids, data_song_durations = song_durations_formatted, data_song_ratings = song_ratings,\r\n client_id = session['client_id'], mod_id = session['moderator'],user_name = session['user_name'])\r\n return render_template(\"album.html\", **context)\r\n\r\n## Executes when an album hyperlink is clicked\r\n@app.route('/album_id/', methods=['GET'])\r\ndef album_name(album_id):\r\n session['album'] = \"\"\r\n session['album_id'] = album_id\r\n return redirect(url_for('.album', album = album_id))\r\n\r\n@app.route('/song')\r\ndef song():\r\n\r\n ##if user not logged in set as guest\r\n if 'client_id' not in session:\r\n session['client_id'] = 0\r\n session['moderator'] = 0\r\n\r\n session['user_ref'] = False\r\n print(request.args)\r\n if len(session['song']) == 0:\r\n song_id = session['song_id']\r\n cursor = g.conn.execute(\"SELECT * FROM song WHERE song_id = %s\", song_id)\r\n else: \r\n song_name = session['song']\r\n cursor = g.conn.execute(\"SELECT * FROM song WHERE LOWER(title) = LOWER(%s)\", song_name)\r\n \r\n ##GET SONG INFO FOR SONG PAGE\r\n titles = []\r\n ids = []\r\n # album_ids = []\r\n # artist_ids = []\r\n durations = []\r\n features = []\r\n for result in cursor:\r\n titles.append(result['title'])\r\n ids.append(result['song_id'])\r\n ids.append(result['album_id'])\r\n ids.append(result['artist_id'])\r\n ms = int(result['duration_ms'])\r\n seconds = (ms // 1000) % 60\r\n mins = (ms // 60000) % 60\r\n durations.append(\"{} min, {} sec\".format(mins, seconds))\r\n features.append(result['artist_features'])\r\n\r\n\r\n if len(ids) == 0:\r\n print(\"song not found\")\r\n msg = Markup(\"Could not find song \\'{}\\'\".format(song_name))\r\n flash(msg)\r\n return redirect('/index')\r\n elif len(durations) > 1:\r\n return redirect(url_for('.search_list_song', search_list_song = session['song']))\r\n song_id = ids[0]\r\n album_id = ids[1]\r\n artist_id = ids[2]\r\n feature_names = []\r\n\r\n ##GET ARTIST FEATURES FOR SONG PAGE\r\n try:\r\n for i in range(len(features[0])):\r\n cursor = g.conn.execute(\"SELECT * FROM artist WHERE artist_id = %s\", features[0][i])\r\n for result in cursor:\r\n feature_names.append(result['name'])\r\n cursor.close()\r\n except:\r\n features[0] = \"\"\r\n\r\n ##GET ALBUM NAME FOR SONG PAGE\r\n cursor = g.conn.execute(\"SELECT * FROM album WHERE album_id = %s\", album_id)\r\n album_names = []\r\n for result in cursor:\r\n album_names.append(result['title'])\r\n\r\n ##GET ARTIST NAME FOR SONG PAGE\r\n cursor = g.conn.execute(\"SELECT * FROM artist WHERE artist_id = %s\", artist_id)\r\n artist_names = []\r\n for result in cursor:\r\n artist_names.append(result['name'])\r\n cursor.close()\r\n\r\n ##GET COMMENTS FOR SONG PAGE\r\n cursor = g.conn.execute(\"SELECT text, comment_id, user_id, time_stamp FROM comment WHERE song_id=%s and comment_id NOT IN (SELECT comment_id FROM moderator_comment) order by comment_id desc\",song_id)\r\n comments = []\r\n comment_ids = []\r\n user_ids = []\r\n for result in cursor:\r\n comments.append((result['text'], result['time_stamp']))\r\n comment_ids.append(result['comment_id'])\r\n user_ids.append(result['user_id'])\r\n cursor.close()\r\n ##GET USER NAMES FOR SONG PAGE\r\n user_names = []\r\n for i in range(len(user_ids)):\r\n cursor = g.conn.execute(\"SELECT * FROM users WHERE user_id = %s\", user_ids[i])\r\n for result in cursor:\r\n user_names.append(result['username'])\r\n cursor.close()\r\n \r\n rating = []\r\n cursor = g.conn.execute(\"SELECT AVG(rating)::numeric(3,2) as rating from user_rates_song WHERE song_id =%s\", song_id)\r\n for result in cursor:\r\n if result['rating'] is not None:\r\n rating.append(round_half(float(result['rating'])))\r\n else:\r\n rating.append(round_half(2.5))\r\n \r\n\r\n context = dict(album_id = album_id,artist_id = artist_id,data_titles = titles, data_ids = ids, data_album_names = album_names, data_artist_names = artist_names, \r\n durations=durations,comments=comments,user_ids=user_ids,user_names=user_names, features=features, feature_names=feature_names, comment_ids = comment_ids, \r\n client_id = session['client_id'], mod_id = session['moderator'], rating = rating,user_name = session['user_name'])\r\n return render_template(\"song.html\", **context)\r\n\r\n## Executes when a song hyperlink is clicked\r\n@app.route('/song_id/', methods=['GET'])\r\ndef song_name(song_id):\r\n session['song'] = \"\"\r\n session['song_id'] = song_id\r\n return redirect(url_for('.song', song = song_id))\r\n\r\n## Executes when a user rates a song\r\n@app.route('/user_rate/', methods=['GET'])\r\ndef user_rates(rating):\r\n print(session['client_id'])\r\n if session['client_id'] == 0:\r\n msg = Markup(\"Please login to add ratings\")\r\n flash(msg)\r\n return redirect('/')\r\n try:\r\n cursor = g.conn.execute(\"INSERT INTO user_rates_song(song_id, rating, user_id) VALUES(%s, %s, %s)\", session['song_id'], rating, session['client_id'])\r\n except:\r\n cursor = g.conn.execute(\"UPDATE user_rates_song SET rating = %s WHERE song_id = %s and user_id = %s\", rating, session['song_id'], session['client_id'])\r\n return redirect(url_for('.song', song = session['song_id']))\r\n\r\n## Multiple results song\r\n@app.route('/search_list_song')\r\ndef search_list_song():\r\n print(request.args)\r\n cursor = g.conn.execute(\"SELECT * FROM song WHERE LOWER(title) = LOWER(%s)\", session['song'])\r\n song_names = []\r\n artist_ids = []\r\n album_ids = []\r\n song_ids = []\r\n\r\n ##GET ARTIST NAME FOR MULTIPLE SEARCH PAGE\r\n for result in cursor:\r\n song_names.append(result['title'])\r\n artist_ids.append(result['artist_id'])\r\n album_ids.append(result['album_id'])\r\n song_ids.append(result['song_id'])\r\n cursor.close()\r\n\r\n artist_names = []\r\n for i in range(len(artist_ids)):\r\n cursor = g.conn.execute(\"SELECT * FROM artist WHERE artist_id = %s\", artist_ids[i])\r\n for result in cursor:\r\n artist_names.append(result['name'])\r\n cursor.close()\r\n\r\n ## GET ALBUM NAME FOR EACH RESULT IN PAGE\r\n album_names = []\r\n years = []\r\n for i in range(len(album_ids)):\r\n cursor = g.conn.execute(\"SELECT * FROM album WHERE album_id = %s\", album_ids[i])\r\n for result in cursor:\r\n album_names.append(result['title'])\r\n years.append(result['release_date'].year)\r\n cursor.close()\r\n\r\n context = dict(artist_names=artist_names,title=song_names,ids=song_ids,album_names=album_names, years=years, client_id = session['client_id'],user_name = session['user_name'])\r\n return render_template(\"search_list_song.html\", **context)\r\n\r\n## Multiple results album\r\n@app.route('/search_list_album')\r\ndef search_list_album():\r\n print(request.args)\r\n cursor = g.conn.execute(\"SELECT * FROM album WHERE LOWER(title) = LOWER(%s)\", session['album'])\r\n album_names = []\r\n artist_ids = []\r\n album_ids = []\r\n years = []\r\n ##GET ARTIST NAME FOR MULTIPLE SEARCH PAGE\r\n for result in cursor:\r\n album_names.append(result['title'])\r\n artist_ids.append(result['artist_id'])\r\n album_ids.append(result['album_id'])\r\n years.append(result['release_date'].year)\r\n cursor.close()\r\n\r\n artist_names = []\r\n for i in range(len(artist_ids)):\r\n cursor = g.conn.execute(\"SELECT * FROM artist WHERE artist_id = %s\", artist_ids[i])\r\n for result in cursor:\r\n artist_names.append(result['name'])\r\n cursor.close()\r\n context = dict(artist_names=artist_names,title=album_names,ids=album_ids,years=years, client_id = session['client_id'],user_name = session['user_name'])\r\n return render_template(\"search_list_album.html\", **context)\r\n\r\n### Search functionality on index page\r\n@app.route('/search', methods=['POST'])\r\ndef search():\r\n session['album_id'] = 0\r\n session['song_id'] = 0\r\n session['artist_id'] = 0\r\n session['user_id'] = 0\r\n searched_name = request.form['name']\r\n search_type = request.form['type']\r\n if len(searched_name) == 0:\r\n msg = Markup(\"Please fill the search field\")\r\n flash(msg)\r\n return redirect('/index')\r\n if search_type == \"default\":\r\n msg = Markup(\"Please select a search type\")\r\n flash(msg)\r\n return redirect('/index')\r\n elif search_type == \"artist\":\r\n session['artist'] = searched_name\r\n return redirect(url_for('.artist', artist = searched_name))\r\n elif search_type == \"album\":\r\n session['album'] = searched_name\r\n return redirect(url_for('.album', album = searched_name))\r\n elif search_type == \"song\":\r\n session['song'] = searched_name\r\n return redirect(url_for('.song', song = searched_name))\r\n # return redirect(url_for('.search_list', search_list = searched_name))\r\n elif search_type == \"user\":\r\n session['user'] = searched_name\r\n return redirect(url_for('.user', user = searched_name))\r\n\r\n@app.route('/logins', methods=['POST'])\r\ndef logins():\r\n session['moderator'] = 0\r\n session['client_id'] = 0\r\n\r\n uname = request.form['uname']\r\n password = request.form['psw']\r\n pword = []\r\n uid = []\r\n cursor = g.conn.execute(\"SELECT * FROM users WHERE LOWER(username) = LOWER(%s)\",uname)\r\n for result in cursor:\r\n pword.append(result['password'])\r\n uid.append(result['user_id'])\r\n cursor.close()\r\n if len(pword)!=0:\r\n hashed_pw = hashpw(password)\r\n print(hashed_pw)\r\n print(pword[0])\r\n if hashed_pw == pword[0]:\r\n session['client_id']=uid[0]\r\n cursor = g.conn.execute(\"SELECT * FROM moderator WHERE user_id = %s\",session['client_id'])\r\n mod_id = []\r\n for result in cursor:\r\n mod_id.append(result['user_id'])\r\n cursor.close()\r\n if len(mod_id) > 0:\r\n session['moderator'] = 1\r\n else:\r\n session['moderator'] = 0\r\n print(\"Successful login\")\r\n session['user_name'] = uname\r\n session['selected_user'] = True\r\n return redirect(url_for('.index', client_id = session['client_id']))\r\n else:\r\n print(\"Wrong password\")\r\n msg = Markup(\"Wrong password. Please try again.\")\r\n flash(msg)\r\n return redirect('/')\r\n else:\r\n print(\"User not found\")\r\n msg = Markup(\"Could not find user \\'{}\\'\".format(uname))\r\n flash(msg)\r\n return redirect('/')\r\n\r\n@app.route('/guest_login')\r\ndef guest_login():\r\n session['selected_user'] = True\r\n session['user_name'] = 'Guest'\r\n return redirect(url_for('.index', client_id = 0))\r\n\r\n@app.route('/registration')\r\ndef registration():\r\n return render_template(\"register.html\")\r\n\r\n@app.route('/register', methods=['GET', 'POST'])\r\ndef register():\r\n uname = request.form['uname']\r\n email = request.form['email']\r\n password = request.form['psw']\r\n password_confirm = request.form['psw_confirm']\r\n uid = []\r\n \r\n\r\n if len(uname) > 30:\r\n msg = Markup(\"Username must not exceed 30 characters.\")\r\n flash(msg)\r\n return redirect('/registration')\r\n\r\n # Check to make sure registration entries are valid\r\n total_num = sum(c.isdigit() for c in password)\r\n if total_num < 2:\r\n msg = Markup(\"Password must contain at least two numbers.\")\r\n flash(msg)\r\n return redirect('/registration')\r\n\r\n if password != password_confirm:\r\n msg = Markup(\"Confirm password must match password.\")\r\n flash(msg)\r\n return redirect('/registration')\r\n\r\n cursor = g.conn.execute(\"SELECT * FROM users WHERE LOWER(username) = LOWER(%s)\",uname)\r\n for result in cursor:\r\n uid.append(result['user_id'])\r\n\r\n if len(uid) > 0:\r\n msg = Markup(\"Username already in use. Please try a different username.\")\r\n flash(msg)\r\n return redirect('/registration')\r\n\r\n cursor = g.conn.execute(\"SELECT * FROM users WHERE LOWER(email) = LOWER(%s)\", email)\r\n for result in cursor:\r\n uid.append(result['user_id'])\r\n\r\n if len(uid) > 0:\r\n msg = Markup(\"Email already in use. Please try a different email.\")\r\n flash(msg)\r\n return redirect('/registration')\r\n\r\n #Register account\r\n cursor = g.conn.execute(\"SELECT MAX(user_id) AS max_id FROM users\")\r\n for result in cursor:\r\n uid.append(result['max_id'])\r\n\r\n hashed_pw = hashpw(password)\r\n new_id = int(uid[0]) + 1\r\n\r\n cursor = g.conn.execute(\"INSERT INTO users(user_id, username, email, password) VALUES(%s, %s, %s, %s)\", new_id, uname, email, hashed_pw)\r\n\r\n session['client_id']= new_id\r\n session['user_name'] = uname\r\n session['selected_user'] = True\r\n return redirect(url_for('.index', client_id = session['client_id']))\r\n\r\n#HASH PASSWORD n TIMES\r\ndef hashpw(pw):\r\n # The real algorithm is redacted of course\r\n hashed_password = hashlib.md5(pw.encode()).hexdigest()\r\n return hashed_password\r\n\r\n##EXECUTES WHEN COMMENT IS ADDED TO AN ALBUM PAGE\r\n@app.route('/album_comment', methods=['POST'])\r\ndef album_comment():\r\n\r\n ##Redirect to login page if user not logged in\r\n if session['client_id'] == 0:\r\n return redirect('/')\r\n\r\n text = request.form['text']\r\n album_id = session['album_id']\r\n\r\n if len(text) == 0:\r\n return redirect(url_for('.album', album = session['album_id']))\r\n cursor = g.conn.execute(\"SELECT MAX(comment_id) as comment_id FROM comment\")\r\n for result in cursor:\r\n comment_id = result['comment_id']\r\n g.conn.execute('INSERT INTO comment(text, comment_id, user_id, album_id, song_id, time_stamp) VALUES (%s, %s, %s, %s, null, TIMESTAMP %s)', text, comment_id + 1, session['client_id'], album_id, datetime.now().strftime(\"%m/%d/%Y, %H:%M:%S\"))\r\n return redirect(url_for('.album', album = session['album_id']))\r\n\r\n##EXECUTES WHEN COMMENT IS ADDED TO A SONG PAGE\r\n@app.route('/song_comment', methods=['POST'])\r\ndef song_comment():\r\n\r\n ##Redirect to login page if user not logged in\r\n if session['client_id'] == 0:\r\n return redirect('/')\r\n\r\n text = request.form['text']\r\n song_id = session['song_id']\r\n\r\n if len(text) == 0:\r\n return redirect(url_for('.song', song = session['song_id']))\r\n cursor = g.conn.execute(\"SELECT MAX(comment_id) as comment_id FROM comment\")\r\n for result in cursor:\r\n comment_id = result['comment_id']\r\n g.conn.execute('INSERT INTO comment(text, comment_id, user_id, album_id, song_id, time_stamp) VALUES (%s, %s, %s, null, %s, TIMESTAMP %s)', text, comment_id + 1, session['client_id'], song_id, datetime.now().strftime(\"%m/%d/%Y, %H:%M:%S\"))\r\n return redirect(url_for('.song', song = session['song_id']))\r\n\r\n##DELETE COMMENTS BY ADDING INTO MODERATOR_COMMENT TABLE\r\n@app.route('/delete/', methods=['GET'])\r\ndef delete(comment_id):\r\n mod_id = session['moderator']\r\n if mod_id > 0:\r\n g.conn.execute('INSERT INTO moderator_comment(user_id, comment_id) VALUES (%s, %s)', mod_id, comment_id)\r\n else:\r\n g.conn.execute('INSERT INTO moderator_comment(user_id, comment_id) VALUES (%s, %s)', 1, comment_id) ##when a user deletes their own comment, moderator 1 is used\r\n \r\n if session['user_ref'] is not None and session['user_ref']:\r\n return redirect(url_for('.user'))\r\n\r\n if int(session['song_id']) > 0:\r\n song_id = session['song_id']\r\n return redirect(url_for('.song', song = song_id))\r\n else:\r\n album_id = session['album_id']\r\n print(album_id)\r\n return redirect(url_for('.album', album_id = album_id))\r\n\r\n\r\n@app.route('/')\r\ndef login():\r\n if 'client_id' not in session:\r\n session['client_id'] = 0\r\n elif session['client_id'] != 0:\r\n return redirect('/index')\r\n return render_template(\"login.html\")\r\n\r\n@app.route('/logout')\r\ndef logout():\r\n session['selected_user'] = False\r\n session['client_id'] = 0\r\n return redirect('/')\r\n\r\n\r\nif __name__ == \"__main__\":\r\n import click\r\n\r\n @click.command()\r\n @click.option('--debug', is_flag=True)\r\n @click.option('--threaded', is_flag=True)\r\n @click.argument('HOST', default='0.0.0.0')\r\n @click.argument('PORT', default=8111, type=int)\r\n def run(debug, threaded, host, port):\r\n HOST, PORT = host, port\r\n print(\"running on %s:%d\" % (HOST, PORT))\r\n app.secret_key = 'secret_key'\r\n app.config['SESSION_TYPE'] = 'filesystem'\r\n app.run(host=HOST, port=PORT, debug=debug, threaded=threaded)\r\n\r\n run()\r\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":25374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"93347851","text":"import sqlite3, sys\n\nbase_datos=sys.argv[1]\n\ndef get_tables():\n conn = sqlite3.connect(base_datos)\n c=conn.cursor()\n cur=c.execute(\"select name from sqlite_master where type = 'table';\")\n res=cur.fetchall()\n cur.close()\n tablas=[]\n for tabla in res:\n string = ''.join(tabla)\n tablas.append(string)\n return tablas\n\n\nif __name__ == '__main__':\n conn = sqlite3.connect(base_datos)\n c=conn.cursor()\n for tabla in get_tables():\n c.execute(\"delete from \"+tabla)\n conn.commit()\n print(\"Tabla borrada\");\n c.close()\n \n","sub_path":"Comprobar_BD/borrar_BD.py","file_name":"borrar_BD.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"48278406","text":"from torchvision import datasets\r\nfrom torch.utils.data import Dataset\r\nimport numpy as np\r\n\r\nclass cifar10(Dataset):\r\n def __init__(self, root, train, download, transform):\r\n self.root = root\r\n self.train = train\r\n self.download = download\r\n self.transform = transform\r\n self.cifar_dataset = datasets.CIFAR10(root=self.root, train=self.train, download=self.download)\r\n self.classes = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']\r\n \r\n def __len__(self):\r\n return len(self.cifar_dataset)\r\n \r\n def __getitem__(self, index):\r\n image, label = self.cifar_dataset[index]\r\n if self.transform is not None:\r\n # Convert PIL image to numpy array\r\n image = np.array(image)\r\n image = self.transform(image=image)[\"image\"]\r\n return image, label\r\n\r\n","sub_path":"07- Advanced Concepts/CIFAR10.py","file_name":"CIFAR10.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"601489847","text":"import pymongo\nfrom argparse import ArgumentParser\nfrom datetime import datetime\nimport pprint\n\nif __name__ == \"__main__\":\n\n parser = ArgumentParser()\n\n parser.add_argument(\"--host\", default=\"mongodb://localhost:27017\",\n help=\"mongodb URI for connecting to server [default: %(default)s]\")\n parser.add_argument(\"--collection\", default=\"test.test\", help=\"Watch [default: %(default)s]\")\n parser.add_argument(\"--output\", help=\"Output collection to write change stream\")\n parser.add_argument(\"--drop\", action=\"store_true\", default=False, help=\"drop output collection\")\n args = parser.parse_args()\n\n client = pymongo.MongoClient(host=args.host)\n\n (database_name, dot, collection_name) = args.collection.partition(\".\")\n\n database = client[database_name]\n collection = database[collection_name]\n\n watch_cursor = collection.watch()\n if args.output:\n print( \"Writing change stream to {}.{}\".format( database_name, args.output))\n output = database[args.output]\n if args.drop:\n output.drop()\n try:\n print(\"Watching: {}\\n\".format(args.collection))\n for d in watch_cursor:\n print(\"time now : {}\".format(datetime.utcnow()))\n print(\"cluster time : {}\".format(d[\"clusterTime\"].as_datetime()))\n if \"ns\" in d:\n print(\"collection : {}.{}\".format(d[\"ns\"][\"db\"], d[\"ns\"][\"coll\"]))\n pprint.pprint(d)\n print(\"\")\n if args.output:\n output.insert_one(d)\n except KeyboardInterrupt:\n print(\"Closing watch cursor\")\n watch_cursor.close()\n print(\"exiting...\")\n","sub_path":"transactions/watch_collection.py","file_name":"watch_collection.py","file_ext":"py","file_size_in_byte":1684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"456209983","text":"# -*- coding:ascii -*-\nfrom mako import runtime, filters, cache\nUNDEFINED = runtime.UNDEFINED\n__M_dict_builtin = dict\n__M_locals_builtin = locals\n_magic_number = 10\n_modified_time = 1425794099.807721\n_enable_loop = True\n_template_filename = 'C:\\\\Python34\\\\Scripts\\\\colonial\\\\product\\\\templates/products.search.html'\n_template_uri = 'products.search.html'\n_source_encoding = 'ascii'\nimport os, os.path, re\n_exports = ['contents']\n\n\ndef _mako_get_namespace(context, name):\n try:\n return context.namespaces[(__name__, name)]\n except KeyError:\n _mako_generate_namespaces(context)\n return context.namespaces[(__name__, name)]\ndef _mako_generate_namespaces(context):\n pass\ndef _mako_inherit(template, context):\n _mako_generate_namespaces(context)\n return runtime._inherit_from(context, '/home/templates/base_ajax.htm', _template_uri)\ndef render_body(context,**pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n __M_locals = __M_dict_builtin(pageargs=pageargs)\n def contents():\n return render_contents(context._locals(__M_locals))\n photographs = context.get('photographs', UNDEFINED)\n products = context.get('products', UNDEFINED)\n STATIC_URL = context.get('STATIC_URL', UNDEFINED)\n __M_writer = context.writer()\n __M_writer('\\r\\n\\r\\n')\n if 'parent' not in context._data or not hasattr(context._data['parent'], 'contents'):\n context['self'].contents(**pageargs)\n \n\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_contents(context,**pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n def contents():\n return render_contents(context)\n photographs = context.get('photographs', UNDEFINED)\n products = context.get('products', UNDEFINED)\n STATIC_URL = context.get('STATIC_URL', UNDEFINED)\n __M_writer = context.writer()\n __M_writer('\\r\\n\\t\\t
Search Results
\\r\\n\\r\\n')\n __M_writer('\\r\\n')\n for photo in photographs:\n __M_writer('\\t\\t
\\r\\n')\n for product in products:\n if product.id == photo.id:\n __M_writer('\\t\\t\\t\\t\\r\\n\\t\\t\\t\\t
')\n __M_writer(str( product.name ))\n __M_writer('
\\r\\n\\t\\t\\t\\t
')\n __M_writer(str( product.price ))\n __M_writer('
\\r\\n')\n __M_writer('\\t\\t
\\r\\n')\n __M_writer('\\r\\n\\r\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\n\"\"\"\n__M_BEGIN_METADATA\n{\"line_map\": {\"64\": 15, \"65\": 15, \"66\": 15, \"27\": 0, \"68\": 16, \"37\": 1, \"70\": 17, \"71\": 20, \"72\": 22, \"78\": 72, \"47\": 3, \"67\": 16, \"69\": 17, \"56\": 3, \"57\": 9, \"58\": 11, \"59\": 12, \"60\": 13, \"61\": 14, \"62\": 15, \"63\": 15}, \"uri\": \"products.search.html\", \"filename\": \"C:\\\\Python34\\\\Scripts\\\\colonial\\\\product\\\\templates/products.search.html\", \"source_encoding\": \"ascii\"}\n__M_END_METADATA\n\"\"\"\n","sub_path":"product/cached_templates/templates/products.search.html.py","file_name":"products.search.html.py","file_ext":"py","file_size_in_byte":3327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"5853567","text":"def apareo():\n file=open(\"MAESTRO.txt\")\n file1=open(\"NOVEDAD.txt\")\n file2=open(\"APAREO.txt\", \"w\")\n file3=open(\"errores.txt\", \"w\")\n for linea in file:\n print(\"Linea del for: \",linea)\n lista1=linea.split()\n print(\"Lista 1: \",lista1)\n for linea in file1:\n print(\"Linea del segundo for: \",linea)\n lista2=linea.split()\n print(\"Lista 2: \",lista2)\n if len(lista2)!= 4:\n file3.write(','.join(lista2) + '\\n')\n print(\"Lo que agrego en errores: \",file3)\n else:\n file.write(','.join(lista2) + '\\n')\n print(\"Lo que agrego en apareo: \",file)\n # print(file2)\n file.close()\n file1.close()\n file2.close()\n file3.close()\napareo()\n","sub_path":"Archivos/Ejercicio 3/ejercicio3.py","file_name":"ejercicio3.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"125600930","text":"# coding: utf-8\nimport re\n\nfrom reports.reports_tree.headers.structure.order import convert_headers\nfrom wikt.commons.iterators.base import InfrequentTimeoutMixin\nfrom wikt.commons.iterators.headers import GetHeadersMixin\nfrom wikt.commons.iterators.lang_old import PageMultiLanguageChanger\nfrom wikt.commons.utils.tools import chunks\n\n\nclass NoMorphology(InfrequentTimeoutMixin, PageMultiLanguageChanger,\n GetHeadersMixin):\n def __init__(self):\n super(NoMorphology, self).__init__()\n self.message_timeout = 100000\n\n def change_description(self):\n return u\"Добавление недостающего раздела морфологии\"\n\n def lang_action(self, page, lang, content):\n title = page.title\n\n if title.startswith('-') or title.endswith('-'):\n return content\n if title.startswith('*'):\n return content\n\n parts = re.split(\"^((?:==)(?:[^=].*?[^=])(?:==))$\", content,\n flags=re.MULTILINE)\n sections = [\n {'header2': '', 'content': parts.pop(0)}\n ]\n sections += [\n {'header2': part[0], 'content': part[1]}\n for part in chunks(parts, 2)\n ]\n for data in sections:\n # print '-' * 80\n header2 = data['header2']\n # print header2\n if not header2:\n continue\n p = re.compile(u'^==\\s*\\{\\{(з|заголовок)[^}]*\\}\\}\\s*==$', re.UNICODE)\n m = p.match(header2)\n if not m:\n # print header2, '==', '#' * 120\n return content\n\n new_content = content\n for data in sections:\n section_content = data['content']\n\n headers = convert_headers(self.get_headers(section_content))\n if not headers:\n continue\n\n if ' ' not in title and u'=== Морфологические и синтаксические свойства ===' not in headers:\n # print u'# [[{}]] (секция \"{}\")'.format(title, lang)\n # print '=' * 120\n # print section_content\n if section_content.strip().startswith(u''):\n new_section_content = \\\n re.sub(u'^\\s*',\n u'\\n=== Морфологические и синтаксические свойства ===\\n',\n section_content)\n new_content = new_content.replace(section_content,\n new_section_content)\n elif section_content.strip().startswith(u'{{падежи '):\n new_section_content = \\\n re.sub(u'^\\s*\\{\\{падежи ',\n u'\\n=== Морфологические и синтаксические свойства ===\\n{{падежи ',\n section_content)\n new_content = new_content.replace(section_content,\n new_section_content)\n return new_content\n\n\nif __name__ == '__main__':\n NoMorphology().run()\n","sub_path":"wikt/tasks/wikifiers/headers/structure/7_morphology_changer.py","file_name":"7_morphology_changer.py","file_ext":"py","file_size_in_byte":3234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"294318014","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport core\nfrom random import shuffle\nimport copy\nimport numpy\nimport logging\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n# http://code.activestate.com/recipes/521906-k-fold-cross-validation-partition/\n\ndef gridSearch(training_data,\n trainer,\n original_data_model,\n k=3,\n search_space=[.00001, .0001, .001, .01, .1, 1],\n randomize=True):\n\n training_data = training_data[numpy.random.permutation(training_data.size)]\n\n logger.info('using cross validation to find optimum alpha...')\n scores = []\n\n fields = sorted(original_data_model['fields'].keys())\n\n for alpha in search_space:\n all_score = 0\n all_N = 0\n for (training, validation) in kFolds(training_data, k):\n data_model = trainer(training, original_data_model, alpha)\n\n weight = numpy.array([data_model['fields'][field].weight\n for field in fields])\n bias = data_model['bias']\n\n labels = validation['label'] == 'match'\n predictions = numpy.dot(validation['distances'], weight) + bias\n\n true_dupes = numpy.sum(labels == 1)\n\n if true_dupes == 0 :\n logger.warning(\"not real positives, change size of folds\")\n continue\n\n true_predicted_dupes = numpy.sum(predictions[labels == 1] > 0)\n\n recall = true_predicted_dupes/float(true_dupes)\n\n if recall == 0 :\n score = 0\n\n else:\n precision = true_predicted_dupes/float(numpy.sum(predictions > 0))\n score = 2 * recall * precision / (recall + precision)\n\n\n all_score += score\n\n average_score = all_score/k\n logger.debug(\"Average Score: %f\", average_score)\n\n scores.append(average_score)\n\n best_alpha = search_space[::-1][scores[::-1].index(max(scores))]\n\n logger.info('optimum alpha: %f' % best_alpha)\n return best_alpha\n\n\ndef kFolds(training_data, k):\n train_dtype = training_data.dtype\n slices = [training_data[i::k] for i in xrange(k)]\n for i in xrange(k):\n validation = slices[i]\n training = [datum for s in slices if s is not validation for datum in s]\n validation = numpy.array(validation, train_dtype)\n training = numpy.array(training, train_dtype)\n\n yield (training, validation)\n","sub_path":"dedupe/crossvalidation.py","file_name":"crossvalidation.py","file_ext":"py","file_size_in_byte":2477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"404485960","text":"# General:\nimport tweepy # To consume Twitter's API\nimport pandas as pd # To handle data\nimport numpy as np # For number computing\n\n# For plotting and visualization:\nfrom IPython.display import display\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# We import our access keys:\nfrom credentials import * # This will allow us to use the keys as variables\n\nfrom textblob import TextBlob\nimport re\n\nimport csv\n\n# API's setup:\ndef twitter_setup():\n # Authentication and access using keys:\n auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)\n\n # Return API with authentication:\n api = tweepy.API(auth)\n return api\n\n# create an extractor object:\nextractor = twitter_setup()\n\ntweets = tweepy.Cursor(extractor.search, q=\"muslim\", rpp=20, result_type=\"recent\", include_entities=True, lang=\"en\").items(100)\n\nd = []\n\nfor tweet in tweets:\n # create a pandas dataframe as follows:\n d.append({'Date': tweet.created_at,'len':len(tweet.text), 'Tweets': tweet.text.encode('utf-8'), 'ID': tweet.id, 'Likes': tweet.favorite_count, 'RTs': tweet.retweet_count, 'Source': tweet.source})\n\ndata = pd.DataFrame(d)\nprint(\"Number of tweets extracted: {}.\\n\".format(len(data)))\n\n# display the first 10 elements of the dataframe:\ndisplay(data.head(10))\n\n# extract the mean of lenghts:\nmean = np.mean(data['len'])\n\nprint(\"The lenght's average in tweets: {}\".format(mean))\n\n# extract the tweet with more FAVs and more RTs:\n\nfav_max = np.max(data['Likes'])\nrt_max = np.max(data['RTs'])\n\nfav = data[data.Likes == fav_max].index[0]\nrt = data[data.RTs == rt_max].index[0]\n\n# Max FAVs:\nprint(\"The tweet with more likes is: \\n{}\".format(data['Tweets'][fav]))\nprint(\"Number of likes: {}\".format(fav_max))\nprint(\"{} characters.\\n\".format(data['len'][fav]))\n\n# Max RTs:\nprint(\"The tweet with more retweets is: \\n{}\".format(data['Tweets'][rt]))\nprint(\"Number of retweets: {}\".format(rt_max))\nprint(\"{} characters.\\n\".format(data['len'][rt]))\n\ntfav = pd.Series(data=data['Likes'].values, index=data['Date'])\ntret = pd.Series(data=data['RTs'].values, index=data['Date'])\n\n\n# Likes vs retweets visualization:\ntfav.plot(figsize=(16,4), label=\"Likes\", legend=True)\ntret.plot(figsize=(16,4), label=\"Retweets\", legend=True)\n\nplt.show()\n\ndef clean_tweet(tweet):\n '''\n Utility function to clean the text in a tweet by removing \n links and special characters using regex.\n '''\n return ' '.join(re.sub(\"(@[A-Za-z0-9]+)|([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)\", \" \", tweet).split())\n\ndef analize_sentiment(tweet):\n '''\n Utility function to classify the polarity of a tweet\n using textblob.\n '''\n analysis = TextBlob(clean_tweet(tweet))\n if analysis.sentiment.polarity > 0:\n return 1\n elif analysis.sentiment.polarity == 0:\n return 0\n else:\n return -1\n\n# create a column with the result of the analysis:\ndata['SA'] = np.array([ analize_sentiment(tweet) for tweet in data['Tweets'] ])\n\n# create a numpy vector mapped to labels:\npositive = 0\nneutral = 0\nnegative = 0\n\nfor sa in data['SA']:\n if sa == 1:\n positive = positive+1\n elif sa == 0:\n neutral = neutral+1\n else:\n negative = negative +1\n\nsize_sa = [positive, neutral, negative]\nlabels = 'Positive', 'Neutral', 'Negative'\ncolors = ['green', 'gray', 'red']\nexplode = (0, 0.1, 0.1) # explode 1st slice\n\nprint (\"Total positive sentiment: {}\".format(positive))\nprint (\"Total neutral sentiment: {}\".format(neutral))\nprint (\"Total negative sentiment: {}\".format(negative))\n\n# plt.pie(pie_data, explode=explode, labels=labels, colors=colors,\n# autopct='%1.1f%%')\n \n# plt.axis('equal')\n# plt.show()\n\n\n# objects = ('Positive', 'Neutral', 'Negative')\n# y_pos = np.arange(len(objects))\n# performance = size_sa\n \n# plt.bar(y_pos, performance, align='center', alpha=0.5)\n# plt.xticks(y_pos, objects)\n# plt.ylabel('Number')\n# plt.title('Sentiment Analysis')\n\n\nlabels = 'Positive', 'Neutral', 'Negative'\ncolor = ['yellowgreen','gray','lightcoral']\nperformance = size_sa\nexplode = (0.1, 0.1, 0.1)\nplt.pie(performance,autopct='%1.1f%%',colors=color, labels=labels , shadow= True,explode = explode , startangle=140)\n \nplt.show()\n# display the updated dataframe with the new column:\n# display(data.head(10))\n# print(data)\n\n","sub_path":"sentiment.py","file_name":"sentiment.py","file_ext":"py","file_size_in_byte":4305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"362652852","text":"import functools\nimport tensorflow as tf\nfrom core import trainer_seq, input_reader\nfrom core.model_builder import build_man_model\nfrom google.protobuf import text_format\nfrom object_detection.builders import input_reader_builder\nfrom object_detection.protos import input_reader_pb2\nfrom object_detection.protos import model_pb2\nfrom object_detection.protos import pipeline_pb2\nfrom object_detection.protos import train_pb2\nimport os\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"1\"\n\ntf.logging.set_verbosity(tf.logging.INFO)\n\nflags = tf.app.flags\n\nflags.DEFINE_string('train_dir', 'model/ssd_mobilenet_video1/',\n 'Directory to save the checkpoints and training summaries.')\nflags.DEFINE_string('pipeline_config_path', 'model/ssd_mobilenet_video.config',\n 'Path to a pipeline_pb2.TrainEvalPipelineConfig config '\n 'file. If provided, other configs are ignored')\nflags.DEFINE_string('train_config_path', '',\n 'Path to a train_pb2.TrainConfig config file.')\nflags.DEFINE_string('input_config_path', '',\n 'Path to an input_reader_pb2.InputReader config file.')\nflags.DEFINE_string('model_config_path', '',\n 'Path to a model_pb2.DetectionModel config file.')\nflags.DEFINE_string('image_root', '/media/2TB/Research/DataSet/ILSVRC2015/Data/VID/train/',\n 'Root path to input images')\n\nFLAGS = flags.FLAGS\n\n\ndef get_configs_from_pipeline_file():\n \"\"\"Reads training configuration from a pipeline_pb2.TrainEvalPipelineConfig.\n\n Reads training config from file specified by pipeline_config_path flag.\n\n Returns:\n model_config: model_pb2.DetectionModel\n train_config: train_pb2.TrainConfig\n input_config: input_reader_pb2.InputReader\n \"\"\"\n pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()\n with tf.gfile.GFile(FLAGS.pipeline_config_path, 'r') as f:\n text_format.Merge(f.read(), pipeline_config)\n\n model_config = pipeline_config.model.ssd\n train_config = pipeline_config.train_config\n input_config = pipeline_config.train_input_reader\n\n return model_config, train_config, input_config\n\ndef main(_):\n model_config, train_config, input_config = get_configs_from_pipeline_file()\n model_fn = functools.partial(\n build_man_model,\n model_config=model_config,\n is_training=True)\n create_input_dict_fn = functools.partial(\n input_reader.read_seq, input_config)\n trainer_seq.train(model_fn, create_input_dict_fn, train_config, FLAGS.train_dir, FLAGS.image_root)\n\n\n\n\n\n\nif __name__ == '__main__':\n tf.app.run()\n","sub_path":"train_seq.py","file_name":"train_seq.py","file_ext":"py","file_size_in_byte":2582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"513679906","text":"from Crypto.Cipher import AES\nfrom Crypto import Random\nfrom abc import *\nimport hashlib\n\n\nclass AESCrypto(object):\n \"\"\" \n AESCrypto Class that supports Encryption and Decryption for \n sepecific plainText. Key and encyrption mode should be input \n when generate the instance. In modes like CBC, plaintext will\n be complemented with PKCS5 padding to the multiple of 16 bytes\n length\n\n \"\"\"\n\n def __init__(self, plainText, key, mode):\n self.plainText = plainText.encode(\n 'utf8') # encoding plainText with utf8\n self.key = self.keyGenerate(key) # genereate 256bits key by sha256\n self.mode = mode\n\n def keyGenerate(self, key):\n \"\"\"generate a 256bits key by given key\"\"\"\n return hashlib.sha256(key.encode('utf8')).digest()\n\n def Encrypt(self):\n \"\"\"Encryption method for given plaintext\"\"\"\n self.IV = Random.new().read(AES.block_size) # block_size = 16\n print(self.IV)\n encryptor = AES.new(self.key, self.mode, self.IV)\n self.plainText += self.padding() # complement the missing bytes\n return encryptor.encrypt(self.plainText)\n\n def Decrypt(self, cipherText):\n \"\"\"Decryption by given cipherText\"\"\"\n decryptor = AES.new(self.key, self.mode, self.IV)\n pText = decryptor.decrypt(cipherText).decode('utf8')\n return self.unpadding(pText) # remove the complemented char\n\n def padding(self):\n \"\"\"\n padding with PKCS5\n calculate the missing length by mod and \n complement the missing bytes with unicode object\n \"\"\"\n length = 16 - (len(self.plainText) % 16)\n return length * chr(length).encode('utf8')\n\n def unpadding(self, pText):\n \"\"\"\n remove the complemented char with ord() function\n ord()<--->chr()\n \"\"\"\n # print(pText[-1])\n return pText[0:-ord(pText[-1])]\n\n\nplainText = 'Yaoxi Liu 250941525'\nkey = 'Sixteen byte key'\nmode = AES.MODE_CBC\n\nc = AESCrypto(plainText, key, mode)\n\ncipherText = c.Encrypt()\n\ndecryptText = c.Decrypt(cipherText)\n\nprint([hex(ord(x))for x in key])\n\nprint(cipherText)\n\nprint(decryptText)\n\n","sub_path":"AESCrypto.py","file_name":"AESCrypto.py","file_ext":"py","file_size_in_byte":2154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"33703839","text":"# -*- coding: utf-8 -*-\n\"\"\"\n pyidmatcher.config\n ~~~~~~~~~~~~~\n\n Matcher configuration file handling including input, output, config\n\"\"\"\nimport os\nimport json\nimport jsonschema\nimport codecs\nimport logging\nimport collections\n\nlogging.basicConfig(level=logging.INFO, format=' %(asctime)s - %(levelname)s- %(message)s')\n\nCONFIG_SCHEMA = {\n \"type\": \"object\",\n \"properties\": {\n \"inputs\": {\n \"source\": {\n \"description\": \"The source data, each entity is to be related to a master entity\",\n \"type\": \"object\",\n \"properties\": {\n \"path\": {\n \"description\": \"the relative or absolute path of the source data\",\n \"type\": \"string\"\n },\n \"encoding\": {\n \"description\": \"the file encoding, defaults to 'utf-8'\",\n \"type\": \"string\"\n }\n },\n \"required\": [\"path\"]\n },\n \"master\": {\n \"description\": \"The master data, unique set of entities to relate source data to\",\n \"type\": \"object\",\n \"properties\": {\n \"path\": {\n \"description\": \"the relative or absolute path of the master data\",\n \"type\": \"string\"\n },\n \"encoding\": {\n \"description\": \"the file encoding, defaults to 'utf-8'\",\n \"type\": \"string\"\n }\n },\n \"required\": [\"path\"]\n },\n \"training\": {\n \"description\": \"Optional data for training matcher model - source data labelled with master_id or null\",\n \"type\": \"object\",\n \"properties\": {\n \"path\": {\n \"description\": \"the relative or absolute path of the training data\",\n \"type\": \"string\"\n },\n \"encoding\": {\n \"description\": \"the file encoding, defaults to 'utf-8'\",\n \"type\": \"string\"\n }\n },\n \"required\": [\"path\"]\n },\n \"required\": [\"source\", \"master\"]\n },\n \"metadata\": {\n \"type\": \"object\",\n \"properties\": {\n \"master_id\": {\n \"description\": \"the unique id of the master record by which to associate source data entities\",\n \"type\": \"string\"\n },\n \"columns\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"master\": {\n \"description\": \"name of master column to use in matching\",\n \"type\": \"string\"\n },\n \"source\": {\n \"description\": \"name of source column to use in matching\",\n \"type\": \"string\"\n },\n \"weight\": {\n \"description\": \"if weighted average is to be performed\\\n , relative weight of paired columns\",\n \"type\": \"number\",\n \"minimum\": 0,\n \"exclusiveMinimum\": True\n }\n },\n \"required\": [\"master\", \"source\"]\n }\n }\n },\n \"required\": [\"master_id\", \"columns\"]\n },\n \"output\": {\n \"description\": \"The output data, or final destination of the pyidmatcher program\",\n \"type\": \"object\",\n \"properties\": {\n \"path\": {\n \"description\": \"the relative or absolute path of the destination data\",\n \"type\": \"string\"\n },\n \"encoding\": {\n \"description\": \"the file encoding, defaults to 'utf-8'\",\n \"type\": \"string\"\n },\n \"keep_orphans\": {\n \"description\": \"keep orphans - source entities with no master - in the output, defaults to false\",\n \"type\": \"boolean\"\n },\n \"schema\": {\n \"description\": \"the output schema, with source entities grouped by master entity \"\n \"or tagged with master entity columns\",\n \"oneOf\": [{\n \"description\": \"option 1: array of objects where name is column name, \"\n \"value is input (source or master)\",\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"additionalProperties\": {\"type\": \"string\"}\n }\n },\n {\n \"description\": \"option 2: array of master columns to include, with sub-array \"\n \"of source columns to include for each source grouped with master\",\n \"type\": \"object\",\n \"properties\": {\n \"master\": {\n \"type\": \"array\",\n \"items\": {\"type\": \"string\"},\n },\n \"source\": {\n \"type\": \"object\",\n \"properties\": {\n \"label\": {\n \"description\": \"the label (name) for the matched source sub-array\",\n \"type\": \"string\"\n },\n \"columns\": {\n \"type\": \"array\",\n \"items\": {\"type\": \"string\"}\n }\n }\n }\n }\n }]\n }\n },\n \"required\": [\"path\", \"schema\"]\n }\n },\n \"required\": [\"inputs\", \"metadata\", \"output\"]\n}\n\n# For use in _construct_connection Config class method\nConnection = collections.namedtuple('Connection', ['path', 'abs_path', 'encoding'])\n# For use in _parse_columns Config class staticmethod\nColumn = collections.namedtuple('Column', ['master', 'source', 'weight', 'function'])\n\n\nclass Config:\n \"\"\"\n Configuration file abstraction.\n \"\"\"\n\n def __init__(self, path, encoding=\"utf-8\"):\n \"\"\"\n :param path: location of the configuration file\n :param encoding: encoding, defaults to utf-8\n \"\"\"\n self.path = path\n with open(self.path, encoding=encoding) as fin:\n config_data = json.load(fin)\n\n # validate configuration file JSON schema is acceptable\n jsonschema.validate(config_data, CONFIG_SCHEMA)\n\n self.connections = {\n \"master\": self._construct_connection(config_data[\"inputs\"][\"master\"]),\n \"source\": self._construct_connection(config_data[\"inputs\"][\"source\"]),\n \"output\": self._construct_connection(config_data[\"output\"])\n }\n\n if \"training\" in config_data[\"inputs\"]:\n self.connections[\"training\"] = self._construct_connection(config_data[\"inputs\"].get([\"training\"]))\n\n self.master_id = config_data[\"metadata\"][\"master_id\"]\n self.matcher_columns = Config._parse_columns(config_data[\"metadata\"][\"columns\"])\n self.keep_orphans = config_data[\"output\"].get(\"keep_orphans\", False)\n self.schema_tabular, self.schema_grouped = Config._format_output_schema(config_data[\"output\"][\"schema\"])\n\n def _construct_connection(self, input_output):\n\n encoding = input_output.get(\"encoding\", \"utf-8\")\n try:\n codecs.lookup(encoding)\n except:\n raise jsonschema.ValidationError(\"\"\"{} is not a valid encoding, for standard encodings, see:\n http://docs.python.org/3/library/codecs.html#standard-encodings\"\"\".format(encoding))\n\n if os.path.isabs(input_output[\"path\"]):\n file = input_output[\"path\"]\n else:\n config_abs_path_dir = os.path.abspath(os.path.dirname(self.path))\n file = os.path.join(config_abs_path_dir, input_output[\"path\"])\n\n connection = {'file': file, 'encoding': encoding}\n return connection\n\n @staticmethod\n def _parse_columns(columns_in):\n \"\"\"\n\n :param columns_in: list of dictionaries defining columns\n :return: list of Columns namedtuple\n \"\"\"\n columns_out = []\n for col in columns_in:\n weight = col.get(\"weight\", 1)\n function = col.get(\"function\", None)\n new_column = Column(master=col[\"master\"], source=col[\"source\"], weight=weight, function=function)\n columns_out.append(new_column)\n return columns_out\n\n @staticmethod\n def _is_tabular(schema):\n \"\"\"\n Tests if output schema is tabular or grouped\n :return: True if tabular\n \"\"\"\n\n try:\n jsonschema.validate(schema, {\n \"type\": \"array\",\n \"items\": {\"type\": \"object\"}\n })\n return True\n except jsonschema.ValidationError:\n return False\n\n @staticmethod\n def _format_output_schema(schema):\n if Config._is_tabular(schema):\n schema_tabular = [(key, val)\n for column in schema\n for key, val in column.items()]\n schema_grouped = None\n else:\n schema_tabular = None\n schema_grouped = {\n \"master_columns\": schema[\"master\"],\n \"source_label\": schema[\"source\"][\"label\"],\n \"source_columns\": schema[\"source\"][\"columns\"]\n }\n\n return schema_tabular, schema_grouped\n","sub_path":"pyidmatcher/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":10435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"68078102","text":"import os\nimport unittest\nfrom pyats.topology import loader\nfrom genie.libs.sdk.apis.iosxe.ike.configure import configure_isakmp_policy\n\n\nclass TestConfigureIsakmpPolicy(unittest.TestCase):\n\n @classmethod\n def setUpClass(self):\n testbed = f\"\"\"\n devices:\n INT1:\n connections:\n defaults:\n class: unicon.Unicon\n a:\n command: mock_device_cli --os iosxe --mock_data_dir {os.path.dirname(__file__)}/mock_data --state connect\n protocol: unknown\n os: iosxe\n platform: iosxe\n type: iosxe\n \"\"\"\n self.testbed = loader.load(testbed)\n self.device = self.testbed.devices['INT1']\n self.device.connect(\n learn_hostname=True,\n init_config_commands=[],\n init_exec_commands=[]\n )\n\n def test_configure_isakmp_policy(self):\n result = configure_isakmp_policy(self.device, '666', 'pre-share', 'aes 256', '24', 'sha512', None, '56789')\n expected_output = None\n self.assertEqual(result, expected_output)\n\n def test_configure_isakmp_policy_1(self):\n result = configure_isakmp_policy(self.device, '123', None, None, None, None, None, None)\n expected_output = None\n self.assertEqual(result, expected_output)\n","sub_path":"pkgs/sdk-pkg/src/genie/libs/sdk/apis/tests/iosxe/ike/configure/configure_isakmp_policy/test_api_configure_isakmp_policy.py","file_name":"test_api_configure_isakmp_policy.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"484504280","text":"from unittest import TestCase, main\nfrom tests.helpers.products \\\n import physic_product, digital_product, book_product\nfrom shipping_label import ShippingLabel\n\n\nclass TestShippingLabel(TestCase):\n def test_should_can_create_a_shipping_label(self):\n shipping_label = ShippingLabel(\n product=physic_product\n )\n\n assert shipping_label.product == physic_product, \\\n 'Should set the product as the physic_product'\n\n def test_when_product_should_be_put_on_shipping_box(self):\n shipping_label = ShippingLabel(\n product=physic_product\n )\n\n assert shipping_label.should_be_put_on_shipping_box is True, \\\n 'Physics products should be put on shipping box'\n\n def test_when_product_should_not_be_put_on_shipping_box(self):\n shipping_label = ShippingLabel(\n product=digital_product\n )\n\n assert shipping_label.should_be_put_on_shipping_box is False, \\\n 'Digitals products should not be put on shipping box'\n\n def test_when_product_has_tax_exemption(self):\n shipping_label = ShippingLabel(\n product=book_product\n )\n\n assert shipping_label.has_tax_exemption is True, \\\n 'Book has tax exemption'\n\n def test_when_product_has_not_tax_exemption(self):\n shipping_label = ShippingLabel(\n product=digital_product\n )\n\n assert shipping_label.has_tax_exemption is False, \\\n 'Digital product has not tax exemption'\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"backend-solution/tests/test_shipping_label.py","file_name":"test_shipping_label.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"278045344","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n#Please select GPU first (from Edit->NotebookSetting)\nimport pandas as pd\nimport numpy as np\nimport io\nimport os\nimport re\nfrom google.colab import drive\ndrive.mount('/content/drive')\nimport torch\nimport torch.optim as optim\nimport random \nget_ipython().system('pip install fastai==2.3.1')\nfrom fastai.text.all import *\n\n\n# In[2]:\n\n\npath = '/content/drive/MyDrive/colab_data'\ndef de_emojify(inputString):\n return inputString.encode('ascii', 'ignore').decode('ascii')\ndef text_proc(df, text_col='text'):\n df['orig_text'] = df[text_col]\n # Remove twitter handles\n df[text_col] = df[text_col].apply(lambda x:re.sub('@[^\\s]+','',x))\n # Remove URLs\n df[text_col] = df[text_col].apply(lambda x:x.replace('
', ' '))\n return df[df[text_col]!='']\n\n\n# In[3]:\n\n\ndata = pd.read_csv(os.path.join(path, \"covid-19_articles_data.csv\"))\n# data = data[data.sentiment!='empty'].drop_duplicates().sample(1000, random_state = 10).reset_index(drop=True)\ndata = text_proc(data,'text').dropna(subset=['sentiment'])\nprint(len(data))\ndata.head(3)\n\n\n# # AWD-LSTM\n\n# In[4]:\n\n\ndls_lm = TextDataLoaders.from_df(data, text_col='text', is_lm=True, valid_pct=0.1)\n\n\n# In[5]:\n\n\nlearn = language_model_learner(dls_lm, AWD_LSTM, drop_mult = 0.3, metrics=[accuracy, Perplexity()]).to_fp16()\n\n\n# In[6]:\n\n\nlearn.unfreeze()\nlearn.fit_one_cycle(4, 1e-3) #4 means 4 epoch\n\n\n# In[13]:\n\n\nlearn.predict(\"If you've recently heard from an old friend, you're not alone. \", 100, temperature=0.75)\n\n\n# In[8]:\n\n\ndls_clas = DataBlock(\n blocks = (TextBlock.from_df('text', seq_len = dls_lm.seq_len, vocab = dls_lm.vocab), CategoryBlock),\n # blocks = (TextBlock.from_df('text', seq_len = 72, vocab = vocab_list), CategoryBlock),\n get_x = ColReader('text'),\n get_y = ColReader('sentiment'),\n splitter = RandomSplitter()\n).dataloaders(data, bs = 64)\n\n\n# In[15]:\n\n\nclasslearn = text_classifier_learner(dls_clas, AWD_LSTM, drop_mult=0.5, metrics=accuracy).to_fp16()\nclasslearn.unfreeze()\nclasslearn.fit_one_cycle(5, slice(1e-3/(2.6**4),1e-3))\n\n\n# In[17]:\n\n\npred_dl = dls_clas.test_dl(data['text'])\npreds = classlearn.get_preds(dl=pred_dl)\n\n\n# \n","sub_path":"kaggle_article_analysis/Notebook/covid19_article_sentiment/Covid19_article_wordEmbedding_gcolab.py","file_name":"Covid19_article_wordEmbedding_gcolab.py","file_ext":"py","file_size_in_byte":2177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"235234322","text":"\"\"\"\n\nДомашнее задание №1\n\nУсловный оператор: Возраст\n\n* Попросить пользователя ввести возраст при помощи input и положить \n результат в переменную\n* Написать функцию, которая по возрасту определит, чем должен заниматься пользователь: \n учиться в детском саду, школе, ВУЗе или работать\n* Вызвать функцию, перед��в ей возраст пользователя и положить результат \n работы функции в переменную\n* Вывести содержимое переменной на экран\n\n\"\"\"\n\n\ndef main():\n \"\"\"\n Эта функция вызывается автоматически при запуске скрипта в консоли\n В ней надо заменить pass на ваш код\n \"\"\"\n\n age_input = input(\"Введите, пожалуйста, ваш возраст: \")\n\n try:\n age = int(age_input)\n if (age < 0):\n print(\n f\"До вашего рождения еще {abs(age)} лет. Когда родитесь, тогда и поговорим ;)\")\n elif (0 < age < 7):\n print(\"Твое место в детском саду :)\")\n elif (7 <= age < 16):\n print(\"Твое место в школе за партой :)\")\n elif (16 <= age < 22):\n print(\"Твое место в вузе\")\n else:\n print(\"Пора работать!\")\n except ValueError:\n print(\"Возраст указывается цифрами ;)\")\n pass\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"learn_homework_1/if1.py","file_name":"if1.py","file_ext":"py","file_size_in_byte":1814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"310437306","text":"#!/usr/bin/env python3\n\n\"\"\"\nGoogle Code Jam\nRound 1B 2016\nProblem A\n\"\"\"\n\nletters = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n\nclass TestCase:\n def __init__(self, n, s):\n self.n = n\n self.s = s\n def solve(self):\n evac = []\n while sum(self.s):\n turn = \"\"\n for idx, p in enumerate(self.s):\n if 2*p >= sum(self.s):\n turn += letters[idx]\n self.s[idx] -= 1\n if turn == \"\":\n for idx, p in enumerate(self.s):\n if self.s[idx] > 0:\n turn += letters[idx]\n self.s[idx] -= 1\n break\n evac.append(turn)\n return \" \".join(evac)\n \n\ndef read_data(filename):\n with open(filename) as f:\n test_cases = []\n num_test_cases = int(f.readline())\n for _ in range(num_test_cases):\n n = int(next(f))\n s = [int(x) for x in next(f).split()]\n test_case = TestCase(n, s)\n test_cases.append(test_case)\n return num_test_cases, test_cases\n\nif __name__ == \"__main__\":\n num_test_cases, test_cases = read_data(\"A-small-attempt1.in\")\n for it in range(num_test_cases):\n test_case = test_cases[it]\n print(\"Case #{}:\".format(it + 1), test_case.solve())","sub_path":"solutions_5753053697277952_0/Python/Juampi/a2.py","file_name":"a2.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"498843300","text":"from __future__ import print_function\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport os\nimport json\n\nfrom compas.utilities import DataEncoder\n\nimport compas_rhino\nfrom compas_rv2.rhino import get_system\nfrom compas_rv2.rhino import get_scene\nfrom compas_rv2.rhino import select_filepath_save\nfrom compas_rv2.rhino import rv2_error\n\n\n__commandname__ = \"RV2file_save_as\"\n\n\nHERE = compas_rhino.get_document_dirname()\n\n\n@rv2_error()\ndef RunCommand(is_interactive):\n\n system = get_system()\n if not system:\n return\n\n scene = get_scene()\n if not scene:\n return\n\n dirname = system['session.dirname']\n filename = system['session.filename']\n extension = system['session.extension']\n\n filepath = select_filepath_save(dirname, extension)\n if not filepath:\n return\n dirname, basename = os.path.split(filepath)\n filename, _ = os.path.splitext(basename)\n\n filepath = os.path.join(dirname, filename + '.' + extension)\n\n # this should be templated somewhere\n # perhaps there should be a Session class/object/singleton\n\n session = {\n \"data\": {\"pattern\": None, \"form\": None, \"force\": None},\n \"settings\": scene.settings,\n }\n\n pattern = scene.get('pattern')[0]\n if pattern:\n session['data']['pattern'] = pattern.datastructure.to_data()\n\n form = scene.get('form')[0]\n if form:\n session['data']['form'] = form.datastructure.to_data()\n\n force = scene.get('force')[0]\n if force:\n session['data']['force'] = force.datastructure.to_data()\n\n with open(filepath, 'w+') as f:\n json.dump(session, f, cls=DataEncoder)\n\n\n# ==============================================================================sc\n# Main\n# ==============================================================================\n\nif __name__ == \"__main__\":\n\n RunCommand(True)\n","sub_path":"src/compas_rv2/ui/Rhino/RV2/dev/RV2file_save_as_cmd.py","file_name":"RV2file_save_as_cmd.py","file_ext":"py","file_size_in_byte":1873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"633882250","text":"import random\nfrom multiprocessing import Pool\n\ndef calculate_pi(nbr_estimates):\n\tret=0\n\tfor step in range(int(nbr_estimates)):\n\t\tx=random.uniform(0,1)\n\t\ty=random.uniform(0,1)\n\t\tis_in_unit_circle=x*x+y*y<1.0\n\t\tret+=is_in_unit_circle\n\treturn ret\n\nif __name__==\"__main__\":\n\titeration=1e8\n\tblock=100\n\tpool=Pool(processes=block)\n\twork_per_block=iteration/block\n\tworkload_per_block=[work_per_block]*block\n\tin_unit_circle=pool.map(calculate_pi,workload_per_block)\n\tpi=sum(in_unit_circle)*4/iteration\n\tprint(\"Estimated PI value : %f\\n\"%pi)\n","sub_path":"language/python/HIGH_PERFORMANCE/multiprocessing/cal_pi_multi.py","file_name":"cal_pi_multi.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"331481772","text":"from app import db, geolocator\nfrom app import constants as CONSTANTS\nimport app.crm.models as crm_models\nfrom app.common import BaseModel\nfrom app.mixins import StateMixin, AuditMixin\nfrom flask_security import current_user\n#from app.src.util.geopy import geolocator\n#from app.deals import constants as CONSTANTS\n#from app.deals import constants as CONSTANTS\n\n#from app.src.util.string_util import StringUtil\n\nclass Deal(db.Model):\n __tablename__ = 'deal'\n id = db.Column(db.Integer, primary_key=True)\n user_id = db.Column(db.Integer, db.ForeignKey('user.id'),\n nullable=False)\n list_price = db.Column(db.Integer)\n rehab_amount = db.Column(db.Integer)\n after_repair_value = db.Column(db.Integer)\n equity = db.Column(db.Integer)\n return_on_investment = db.Column(db.String(255))\n monthly_rent = db.Column(db.Integer)\n taxes = db.Column(db.Integer)\n insurance = db.Column(db.Integer)\n maintenance_percent = db.Column(db.Integer)\n management_percent = db.Column(db.Integer)\n utility_amount = db.Column(db.Integer)\n utility_description = db.Column(db.String(255))\n capex_reserves = db.Column(db.Integer)\n net_operating_income = db.Column(db.Integer)\n cap_rate = db.Column(db.String(255))\n property_id = db.Column(db.Integer, db.ForeignKey('property.id'))\n property = db.relationship('Property', uselist=False)\n #contacts = db.relationship('DealContact', backref='deal', lazy=True)\n\n def __repr__(self):\n return str(self.property)\n\n def addOwnerToDeal(self, contact):\n role = DealContactRole(name=\"Owner\")\n for dContact in self.contacts:\n if dContact.contact == contact:\n dContact.roles.append(role)\n return\n dealContact = DealContact(contact=contact, roles=[role])\n self.contacts.append(dealContact)\n\n def getInterestedContacts(self):\n return [contact for contact in current_user.contacts if contact.hasMatchingCriteriaForDeal(deal)]\n\nclass Property(db.Model):\n __tablename__ = 'property'\n id = db.Column(db.Integer, primary_key=True)\n address_id = db.Column(db.Integer, db.ForeignKey('address.id'))\n property_type = db.Column(db.Integer)\n address = db.relationship('Address', uselist=False)\n units = db.Column(db.Integer, default=1)\n sq_feet = db.Column(db.Integer)\n bedrooms = db.Column(db.Integer)\n bathrooms = db.Column(db.Integer)\n basement_type = db.Column(db.String(255))\n garage_type = db.Column(db.String(255))\n last_sale_date = db.Column(db.Date)\n owner_occupied = db.Column(db.Boolean)\n\n __mapper_args__ = {\n 'polymorphic_identity':CONSTANTS.OTHER,\n 'polymorphic_on':property_type\n }\n\n def __repr__(self):\n return str(self.address)\n\n def getPropertyType(self):\n return CONSTANTS.PROPERTY_TYPE[self.property_type]\n\nclass ResidentialProperty(Property):\n\n __mapper_args__ = {\n 'polymorphic_identity':CONSTANTS.RESIDENTIAL\n }\n\n def __init__(self, **kwargs):\n super(ResidentialProperty, self).__init__(**kwargs)\n\n\nclass SingleFamilyProperty(ResidentialProperty):\n\n __mapper_args__ = {\n 'polymorphic_identity':CONSTANTS.SFR\n }\n\n def __init__(self, **kwargs):\n super(SingleFamilyProperty, self).__init__(**kwargs)\n\nclass ResidentialMultiFamilyProperty(ResidentialProperty):\n\n __mapper_args__ = {\n 'polymorphic_identity':CONSTANTS.RESIDENTIAL_MULTI_FAMILY\n }\n\n def __init__(self, **kwargs):\n super(ResidentialMultiFamilyProperty, self).__init__(**kwargs)\n\nclass CommercialProperty(Property):\n\n __mapper_args__ = {\n 'polymorphic_identity':CONSTANTS.COMMERCIAL\n }\n\n def __init__(self, **kwargs):\n super(CommercialProperty, self).__init__(**kwargs)\n\nclass CommercialMultiFamilyProperty(CommercialProperty):\n\n __mapper_args__ = {\n 'polymorphic_identity':CONSTANTS.COMMERCIAL_MULTI_FAMILY\n }\n\n def __init__(self, **kwargs):\n super(CommercialMultiFamilyProperty, self).__init__(**kwargs)\n\nclass SelfStorageProperty(CommercialProperty):\n\n __mapper_args__ = {\n 'polymorphic_identity':CONSTANTS.SELF_STORAGE\n }\n\n def __init__(self, **kwargs):\n super(SelfStorageProperty, self).__init__(**kwargs)\n\nclass RetailProperty(CommercialProperty):\n\n __mapper_args__ = {\n 'polymorphic_identity':CONSTANTS.RETAIL\n }\n\n def __init__(self, **kwargs):\n super(RetailProperty, self).__init__(**kwargs)\n\nclass Address(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n line_1 = db.Column(db.String(255))\n line_2 = db.Column(db.String(255))\n line_3 = db.Column(db.String(255))\n line_4 = db.Column(db.String(255))\n city = db.Column(db.String(255))\n state_province = db.Column(db.String(255))\n postal_code = db.Column(db.String(20))\n county = db.Column(db.String(255))\n country = db.Column(db.String(255))\n latitude = db.Column(db.Numeric(precision=9,scale=6))\n longitude = db.Column(db.Numeric(precision=9,scale=6))\n\n def __init__(self, **kwargs):\n super(Address, self).__init__(**kwargs)\n\n\n def __repr__(self):\n return '{}, {}, {} {}'.format(self.line_1, self.city, self.state_province, self.postal_code)\n\n def geocode(self):\n location = geolocator.geocode('{} {} {} {}'.format(self.line_1, self.city, self.state_province, self.postal_code))\n if location is not None:\n self.latitude = location.latitude\n self.longitude = location.longitude\n else:\n self.latitude = None\n self.longitude = None\n\n#class DealContact(db.Model):\n# __tablename__ = 'dealcontact'\n# id = db.Column(db.Integer, primary_key=True)\n# deal_id = db.Column(db.Integer, db.ForeignKey('deal.id'))\n# address_id = db.Column(db.Integer, db.ForeignKey('contact.id'))\n# contact = db.relationship('Contact', uselist=False)\n# roles = db.relationship('DealContactRole', backref='contact', lazy=True)\n\n#class DealContactRole(db.Model):\n# __tablename__ = 'dealcontactrole'\n# id = db.Column(db.Integer, primary_key=True)\n# deal_contact_id = db.Column(db.Integer, db.ForeignKey('dealcontact.id'))\n# name = db.Column(db.String(255), nullable=False)\n","sub_path":"app/deals/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"84241482","text":"import requests\nfrom django.db import models\n\nfrom main.models.user import CustomUser\n\n__all__ = ['Discussion']\n\n\nclass Discussion(models.Model):\n TYPE_CHOICES = CustomUser.TYPE_CHOICES\n\n topic = models.ForeignKey(\n verbose_name='Тема', to='questionnaire.Topic', on_delete=models.CASCADE,\n )\n name = models.CharField(\n verbose_name='Название', max_length=255,\n )\n description = models.TextField(\n verbose_name='Описание',\n )\n preview = models.CharField(\n verbose_name='Предпросмотр', max_length=100,\n )\n type = models.PositiveSmallIntegerField(\n verbose_name='Тип', choices=TYPE_CHOICES,\n )\n created_at = models.DateTimeField(\n verbose_name='Дата-время создания', auto_now_add=True,\n )\n closed_at = models.DateTimeField(\n verbose_name='Дата-время закрытия', null=True, blank=True,\n )\n\n class Meta:\n verbose_name = 'Обсуждение'\n verbose_name_plural = 'Обсуждения'\n default_related_name = 'discussions'\n\n def __str__(self):\n return self.name\n\n def save(self, force_insert=False, force_update=False, using=None,\n update_fields=None):\n new = not self.id\n super().save(force_insert, force_update, using, update_fields)\n\n if new:\n data = {\n 'notification': {\n 'body': self.preview,\n 'title': 'Новое обсуждение',\n 'sound': 'default'\n },\n 'priority': 'high',\n 'data': {\n 'click_action': 'FLUTTER_NOTIFICATION_CLICK',\n 'id': '1',\n 'status': 'done'\n },\n 'to': 'cJB0a_cN1Us:APA91bFuZ47cosiH3NafguF7GdHySh_lbR5_B2LCs29Fh7tCVZSeip46LMiYuBiQGZo'\n 'Dbx9fss4SwOwZxjh2tVcDtGZTnhlyBdzaqRRFfC4oEiowhe0_bhZfCyInPw_SRxKf_cX-T2G6'\n }\n response = requests.post('https://fcm.googleapis.com/fcm/send', json=data, headers={\n 'Authorization': 'key=AAAA5AXWsQU:APA91bH9WJLN4IDMMBb631TzHuQCPrmTMZyZ328mIEPWgedff'\n 'YZTjxRL5_WQ6g_vT5lbbHNBZDiCQCgG6ydAkH2fpsZW5zGp_IYKTNu5YUSOAngT'\n 'zoviK66LNGCATR1p3EeOcBac5E2n'\n })\n print(response.content)\n","sub_path":"questionnaire/models/discussion.py","file_name":"discussion.py","file_ext":"py","file_size_in_byte":2439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"448240042","text":"# Network diagnostic/ pentesting/ whatever tool written by Christian Geer - https://github.com/H4l0g3n\r\n# Still in development\r\n# Resources:\r\n# https://www.raspberrypi.org/forums/viewtopic.php?t=188615\r\n# https://stackoverflow.com/questions/13368659/how-can-i-loop-through-an-ip-address-range-in-python\r\n# https://docs.python.org/3/library/socket.html\r\n\r\nimport socket\r\nimport os\r\nimport ipaddress\r\n\r\n\r\ndef greeting():\r\n print(\"\"\"\r\n _ _ ___ _____ \r\n | | | | / | | _ | \r\n | |_| |/ /| |_ _| |/' | ___ \r\n | _ / /_| \\ \\ / / /| |/ __|\r\n | | | \\___ |\\ V /\\ |_/ / (__ \r\n \\_| |_/ |_/ \\_/ \\___/ \\___|\r\n \"\"\")\r\n print(\" Wifi penetration testing suite\")\r\n input(\" Press enter to continue \")\r\n print(\"\\n\"\r\n \" 1. LAN Scan - Enumerate and scan devices on the local network\\n\"\r\n \"\\n\"\r\n \"\\n\")\r\n\r\n\r\ndef lanScan():\r\n gw = os.popen(\"ip -4 route show default\").read().split()\r\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n s.connect((gw[2], 0))\r\n ipaddr = s.getsockname()[0]\r\n gateway = gw[2]\r\n host = socket.gethostname()\r\n print(\"--------------------------------------\")\r\n print(\"LOCAL\")\r\n print(\"--------------------------------------\")\r\n print(\"IP:\", ipaddr, \" GW:\", gateway, \" Host:\", host)\r\n print(\"--------------------------------------\")\r\n\r\n rangeStart = int(ipaddress.IPv4Address(input(\"\\nIP range start: \")))\r\n rangeEnd = int(ipaddress.IPv4Address(input(\"IP range end: \")))\r\n startRange = int(input(\"\\nPort range start: \"))\r\n endRange = int(input(\"Port range end: \"))\r\n print(\"Working...\\n\")\r\n\r\n for ip in range(rangeStart, rangeEnd):\r\n print(\"-\" * 50)\r\n response = os.system(\"ping -c 1 \" + str(ip) + \"\\n\")\r\n if response == 0:\r\n print(\"\\n\" + str(ipaddress.IPv4Address(ip)))\r\n for port in range(startRange, endRange):\r\n # print(port) Uncomment this for diagnostic purposes\r\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n sock.settimeout(0.5)\r\n result = sock.connect_ex((str(ip), port))\r\n if result == 0:\r\n print(\"Port: \" + str(port) + \" is open: \")\r\n try:\r\n print(socket.getservbyport(port) + \"\\n\")\r\n except OSError:\r\n print(\"Unknown service\\n\")\r\n continue\r\n continue\r\n else:\r\n pass\r\n\r\n\r\ngreeting()\r\nlanScan()\r\n\r\ninput(\"Press enter to exit. :)\")\r\n","sub_path":"H4v0c.py","file_name":"H4v0c.py","file_ext":"py","file_size_in_byte":2604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"170051470","text":"from PyQt5 import uic, QtWidgets\n\ndef listar_dados():\n dado_lido = lista.lineEdit.text()\n lista.listWidget.addItem(dado_lido)\n lista.lineEdit.setText(\"\")\ndef deletar():\n lista.listWidget.clear()\n\napp = QtWidgets.QApplication([])\nlista = uic.loadUi(\"Pratica.ui\") #chama o arquivo pratica\nlista.pushButton.clicked.connect(listar_dados)\nlista.pushButton_2.clicked.connect(deletar)\n\nlista.show()\napp.exec()\n\n","sub_path":"Exemplo 02/Controle.py","file_name":"Controle.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"108113572","text":"from django import forms\nfrom .models import Postingan,Komentar\n\nclass PostinganForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = Postingan\n\t\tfields = (\n\t\t\t\t'judul',\n\t\t\t\t'isi',\n\t\t\t\t'kategori_post',\n\t\t\t\t'file',\n\t\t\t)\n\t\twidgets = {\n\t\t\t'judul' : forms.TextInput(\n\t\t\t\t\tattrs = {\n\t\t\t\t\t\t'placeholder' : 'Apa yang mau kamu bagikan?',\n\t\t\t\t\t\t'class' : 'form-group form-control',\n\t\t\t\t\t\t'name' : 'judul'\n\t\t\t\t\t}\n\t\t\t\t),\n\t\t\t'isi' : forms.Textarea(\n\t\t\t\t\tattrs = {\n\t\t\t\t\t\t'placeholder' : 'Apa yang ingin kamu ceritakan?',\n\t\t\t\t\t\t'class' : 'form-group form-control',\n\t\t\t\t\t}\n\t\t\t\t)\n\n\t\t}\n\nclass KomentarForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = Komentar\n\t\tfields = ('isi',)\n\t\twidgets={\n\t\t\t'isi' : forms.Textarea(\n\t\t\t\tattrs={\n\t\t\t\t\t'placeholder' : 'Masukkan komentar di sini',\n\t\t\t\t\t'class' : 'form-group form-control',\n\t\t\t\t}\n\t\t\t)\n\t\t}","sub_path":"grappost/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"617551634","text":"import numpy as np\nfrom data_reader import *\nimport csv\n\nDATAPATH = ' p a t h '\n\nclasses, data = read_all(DATAPATH)\ndata = np.array(data)\n\nmeans = [[\"class_id\", \"mean\"]]\n\nfor i in range(data.shape[0]):\n mean = np.mean(data[i], axis=0)\n means.append([classes[i][:-13], np.array2string(mean, separator=\" \", threshold=3000, max_line_width=999999)])\n\nwith open(\"C:\\\\users\\lexcorp\\Desktop\\class_mean.csv\", \"w\") as f:\n writer = csv.writer(f)\n writer.writerows(means)","sub_path":"class_mean.py","file_name":"class_mean.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"441248704","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the queensAttack function below.\ndef queensAttack(n, k, r_q, c_q, obstacles):\n up_lim = n+1\n down_lim = 0\n right_lim = n+1\n left_lim = 0\n for obs in obstacles:\n #up_lim\n if obs[1] == c_q:\n if obs[0] > r_q and obs[0] < up_lim:\n up_lim = obs[0]\n #down limit\n if obs[0] < r_q and obs[0] > up_lim:\n down_lim = obs[0]\n if obs[0] == r_q:\n if obs[1] > c_q and obs[1] < right_lim:\n right_lim = obs[1]\n #down limit\n if obs[1] < c_q and obs[1] > left_lim:\n left_lim = obs[1]\n # for i in range(0,n):\n # if obs[r_q + i][c_q + i]\n \n if up_lim > n:\n up_lim = n\n if right_lim > n:\n right_lim = n\n if down_lim < 1:\n down_lim = 1\n if left_lim < 1:\n left_lim = 1\n \n print(left_lim)\n\n left, right, up, down = c_q-left_lim, right_lim-c_q, up_lim-r_q, r_q-down_lim\n # print(up_lim, down_lim, right_lim, left_lim)\n diag_ur, diag_ul, diag_br, diag_bl = min(up, right), min(up, left), min(down, right), min(down, left)\n moves = [left, right, up, down, diag_ur, diag_ul, diag_br, diag_bl]\n print(moves)\n return sum(moves)\n \n\n# function obstacleFilter(obstacles, n):\n\n\n \n \nif __name__ == '__main__':\n f = open('in1.txt', 'r')\n\n nk = f.readline().split()\n n = int(nk[0])\n k = int(nk[1])\n r_qC_q = f.readline().split()\n r_q = int(r_qC_q[0])\n c_q = int(r_qC_q[1])\n obstacles = []\n\n for _ in range(k):\n obstacles.append(list(map(int, f.readline().rstrip().split())))\n\n result = queensAttack(n, k, r_q, c_q, obstacles)\n print(result)\n f.close()","sub_path":"hckrrnk/queens-attack/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"486373588","text":"# what is variable-variable is a container/box where we can store data\n#Data Types: Number(int,float,Complex), String, Char, Boolean(True/False, List[], Tuple(),Set{}, Dict{}\n#Integer\nx=100\ny=5\nprint(x/y)\n#Float\nx=2.35\ny=5.37\nprint(x+y)\n#String\nfirst_name=\"Anishur\"\nlast_name=\" Rahman\"\nfull_name=first_name+last_name\nprint(full_name)\n#Boolean\nx=True\nY=False\nprint(x)\n# List\nx=[\"Orange\",\"Pianaple\",\"Mango\"]\nprint(x)\n# Tuple\nx=(\"Orange\",\"Pianaple\",\"Mango\")\nprint(x)\n#set\nx={\"Orange\",\"Pineaple\",\"Mango\"}\nprint(x)\n# dictionary\nx={\"Name\":\"Anishur\",\"Age\":23,\"Salary\":1200000}\nprint(x)","sub_path":"VARIABLE.py","file_name":"VARIABLE.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"264465517","text":"#\n# @lc app=leetcode id=14 lang=python3\n#\n# [14] Longest Common Prefix\n#\n# https://leetcode.com/problems/longest-common-prefix/description/\n#\n# algorithms\n# Easy (32.98%)\n# Total Accepted: 409.8K\n# Total Submissions: 1.2M\n# Testcase Example: '[\"flower\",\"flow\",\"flight\"]'\n#\n# Write a function to find the longest common prefix string amongst an array of\n# strings.\n#\n# If there is no common prefix, return an empty string \"\".\n#\n# Example 1:\n#\n#\n# Input: [\"flower\",\"flow\",\"flight\"]\n# Output: \"fl\"\n#\n#\n# Example 2:\n#\n#\n# Input: [\"dog\",\"racecar\",\"car\"]\n# Output: \"\"\n# Explanation: There is no common prefix among the input strings.\n#\n#\n# Note:\n#\n# All given inputs are in lowercase letters a-z.\n#\n#\n\n\nclass Solution:\n def longestCommonPrefix(self, strs) -> str:\n lcp = ''\n if len(strs) <= 2:\n if len(strs) <= 1:\n lcp = ''.join(strs)\n else:\n for i in range(min(len(strs[0]), len(strs[1]))):\n if strs[0][i] == strs[1][i]:\n lcp += strs[0][i]\n else:\n break\n\n return lcp\n\n else:\n lcpLeft = self.longestCommonPrefix(strs[0:len(strs)//2])\n lcpRight = self.longestCommonPrefix(strs[len(strs)//2:])\n lcp = self.longestCommonPrefix([lcpLeft, lcpRight])\n\n return lcp\n\n\ns = Solution()\nprint(s.longestCommonPrefix([]))\n","sub_path":"LeetCode/14.longest-common-prefix.py","file_name":"14.longest-common-prefix.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"553940531","text":"\nimport datetime\nimport random\nstart_time = datetime.datetime.now()\n\ndef insertion_sort(temp):\n\n # Traverse through 1 to len(arr)\n for i in range(1, len(temp)):\n\n key = temp[i]\n\n # Move elements of arr[0..i-1], that are\n # greater than key, to one position ahead\n # of their current position\n j = i- 1\n while j >= 0 and key < temp[j]:\n temp[j + 1] = temp[j]\n j -= 1\n temp[j + 1] = key\n print(temp)\n#####################\n\ndef selection_sort(L):\n for i in range(len(L) - 1):\n min_index = i\n for j in range(i + 1, len(L) - 1):\n\n if L[j] < L[min_index]:\n min_index = j\n\n L[i], L[min_index] = L[min_index], L[i]\n print(L)\n\n\n#####################\n\ndef bubble_sort(list1):\n count=len(list1)\n for i in range(count):\n for j in range(count-1):\n if list1[j]>list1[j+1]:\n list1[j],list1[j+1]=list1[j+1],list1[j]\n print (list1)\n\ndef merge_sort(myList):\n if len(myList) > 1:\n mid = len(myList) // 2\n left = myList[:mid]\n right = myList[mid:]\n\n merge_sort(left)\n merge_sort(right)\n\n i = 0\n j = 0\n\n k = 0\n\n while i < len(left) and j < len(right):\n if left[i] < right[j]:\n myList[k] = left[i]\n i += 1\n else:\n myList[k] = right[j]\n j += 1\n k += 1\n\n while i < len(left):\n myList[k] = left[i]\n i += 1\n k += 1\n\n while j < len(right):\n myList[k] = right[j]\n j += 1\n k += 1\n\ndef partition(arr, low, high):\n i = (low-1) # index of smaller element\n pivot = arr[high] # pivot\n for j in range(low, high):\n\n\n if arr[j] <= pivot:\n\n i = i+1\n arr[i], arr[j] = arr[j], arr[i]\n\n arr[i+1], arr[high] = arr[high], arr[i+1]\n return (i+1)\n\ndef quick_sort(arr, low, high):\n if len(arr) == 1:\n return arr\n if low < high:\n\n pi = partition(arr, low, high)\n\n quick_sort(arr, low, pi-1)\n quick_sort(arr, pi+1, high)\n\ndef calculateTime(start_time):\n end_time = datetime.datetime.now()\n time_diff = (end_time - start_time)\n execution_time = time_diff.total_seconds()\n return execution_time\n\ndef experimenta (aux):\n\n\n arr1 = []\n arr2 = []\n arr3 = []\n arr4 = []\n arr5 = []\n arr6 = []\n arr7 = []\n arr8 = []\n arr9 = []\n arr10 = []\n print(\"number of array elements:\")\n x = input()\n x = int(x)\n for i in range(x):\n n = random.randint(1000000, 10000000)\n # arry.append(n)\n arr1.append(n)\n arr2.append(n)\n arr3.append(n)\n arr4.append(n)\n arr5.append(n)\n arr6.append(n)\n arr7.append(n)\n arr8.append(n)\n arr9.append(n)\n arr10.append(n)\n\n if aux == 'Insertion':\n print(\"Insertion sort algorithm:\\n\")\n insertion_sort(arr1)\n insertion_sort(arr2)\n insertion_sort(arr3)\n insertion_sort(arr4)\n insertion_sort(arr5)\n insertion_sort(arr6)\n insertion_sort(arr7)\n insertion_sort(arr8)\n insertion_sort(arr9)\n insertion_sort(arr10)\n execution_time = calculateTime(start_time)\n print(\"--- %s seconds ---\" % execution_time)\n\n elif aux == 'Selection':\n print(\"Selection sort algorithm:\\n\")\n selection_sort(arr1)\n selection_sort(arr2)\n selection_sort(arr3)\n selection_sort(arr4)\n selection_sort(arr5)\n selection_sort(arr6)\n selection_sort(arr7)\n selection_sort(arr8)\n selection_sort(arr9)\n selection_sort(arr10)\n execution_time = calculateTime(start_time)\n print(\"--- %s seconds ---\" % execution_time)\n\n elif aux == 'Bubble':\n print(\"Bubble sort algorithm:\\n\")\n bubble_sort(arr1)\n bubble_sort(arr2)\n bubble_sort(arr3)\n bubble_sort(arr4)\n bubble_sort(arr5)\n bubble_sort(arr6)\n bubble_sort(arr7)\n bubble_sort(arr8)\n bubble_sort(arr9)\n bubble_sort(arr10)\n execution_time = calculateTime(start_time)\n print(\"--- %s seconds ---\" % execution_time)\n\n elif aux == 'Merge':\n print(\"Merge sort algorithm:\")\n merge_sort(arr1)\n merge_sort(arr2)\n merge_sort(arr3)\n merge_sort(arr4)\n merge_sort(arr5)\n merge_sort(arr6)\n merge_sort(arr7)\n merge_sort(arr8)\n merge_sort(arr9)\n merge_sort(arr10)\n print('\\n', arr1, '\\n', arr2, '\\n', arr3,\n '\\n', arr4, '\\n', arr5, '\\n', arr6,\n '\\n', arr7, '\\n', arr8, '\\n', arr9,\n '\\n', arr10)\n\n execution_time = calculateTime(start_time)\n print(\"--- %s seconds ---\" % execution_time)\n\n\n elif aux == 'Quick':\n print(\"Quick sort algorithm:\")\n quick_sort(arr1, 0, len(arr1) - 1)\n quick_sort(arr2, 0, len(arr2) - 1)\n quick_sort(arr3, 0, len(arr3) - 1)\n quick_sort(arr4, 0, len(arr4) - 1)\n quick_sort(arr5, 0, len(arr5) - 1)\n quick_sort(arr6, 0, len(arr6) - 1)\n quick_sort(arr7, 0, len(arr7) - 1)\n quick_sort(arr8, 0, len(arr8) - 1)\n quick_sort(arr9, 0, len(arr9) - 1)\n quick_sort(arr10, 0, len(arr10) - 1)\n\n print('\\n', arr1, '\\n', arr2, '\\n', arr3,\n '\\n', arr4, '\\n', arr5, '\\n', arr6,\n '\\n', arr7, '\\n', arr8, '\\n', arr9,\n '\\n', arr10)\n\n execution_time = calculateTime(start_time)\n print(\"--- %s seconds ---\" % execution_time)\n\n\n\n\nexperimenta('Insertion')\n# experimenta('Selection')\n# experimenta('Bubble')\n# experimenta('Merge')\n# experi15menta('Qui?ck')\n\n\n","sub_path":"metode_de_sortare.py","file_name":"metode_de_sortare.py","file_ext":"py","file_size_in_byte":5805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"291487164","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport cv2\n\nimage = np.full((9,9),63,np.uint8)\nimage2 = np.full((9,9),223,np.uint8)\nstart_point = (3,3)\nend_point = (5,5)\nthickness = -1\ncolor = 127\nimage = cv2.rectangle(image, start_point, end_point, color, thickness) \nimage2 = cv2.rectangle(image2, start_point, end_point, color+20, thickness) # 147-157 seems nice\nimage_final = np.hstack((image,image2))\nplt.imshow(image_final,cmap='gray', vmin=0, vmax=255)\nplt.show()\n","sub_path":"TP1 - Vision/4joaco.py","file_name":"4joaco.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"437340945","text":"import mne\nfrom mne.utils import verbose, fill_doc\nimport numpy as np\nfrom bids import BIDSLayout\nfrom scipy.signal import detrend\n\nfrom ieeg.timefreq.utils import to_samples\nfrom ieeg.calc import scaling, stats\nfrom ieeg import Doubles, Signal\n\n\ndef crop_empty_data(raw: mne.io.Raw, start_pad: str = \"10s\",\n end_pad: str = \"10s\") -> mne.io.Raw:\n \"\"\"Crops out long stretches of data with no events.\n\n Takes raw instance with annotated events and crops the instance so that the\n raw file starts at start_pad before the first event and stops an amount of\n time in seconds given by end_pad after the last event.\n\n Parameters\n ----------\n raw : mne.io.Raw\n The raw file to crop.\n start_pad : str, optional\n The amount of time to pad the start of the file, by default \"10s\"\n end_pad : str, optional\n The amount of time to pad the end of the file, by default \"10s\"\n\n Returns\n -------\n mne.io.Raw\n The cropped raw file.\n \"\"\"\n\n crop_list = []\n\n start_pad = to_samples(start_pad, raw.info['sfreq']) / raw.info['sfreq']\n end_pad = to_samples(end_pad, raw.info['sfreq']) / raw.info['sfreq']\n\n # split annotations into blocks\n annot = raw.annotations.copy()\n block_idx = [idx + 1 for idx, val in\n enumerate(annot) if 'BAD boundary' in val['description']]\n block_annot = [annot[i: j] for i, j in\n zip([0] + block_idx, block_idx +\n ([len(annot)] if block_idx[-1] != len(annot) else []))]\n\n for block_an in block_annot:\n # remove boundary events from annotations\n no_bound = None\n for an in block_an:\n if 'boundary' not in an['description']:\n if no_bound is None:\n no_bound = mne.Annotations(**an)\n else:\n an.pop('orig_time')\n no_bound.append(**an)\n\n # Skip if block is all boundary events\n if no_bound is None:\n continue\n # get start and stop time from raw.annotations onset attribute\n t_min = no_bound.onset[0] - start_pad\n t_max = no_bound.onset[-1] + end_pad\n\n # create new cropped raw file\n crop_list.append(raw.copy().crop(tmin=t_min, tmax=t_max))\n\n return mne.concatenate_raws(crop_list)\n\n\n@fill_doc\n@verbose\ndef channel_outlier_marker(input_raw: Signal, outlier_sd: float = 3,\n max_rounds: int = np.inf, verbose: bool = True\n ) -> list[str]:\n \"\"\"Identify bad channels by variance.\n\n Parameters\n ----------\n input_raw : Signal\n Raw data to be analyzed.\n outlier_sd : int, optional\n Number of standard deviations above the mean to be considered an\n outlier, by default 3\n max_rounds : int, optional\n Maximum number of variance estimations, by default runs until no\n more bad channels are found.\n %(verbose)s\n\n Returns\n -------\n list[str]\n List of bad channel names.\n \"\"\"\n\n data = input_raw.get_data('data') # (trials X) channels X time\n names = input_raw.copy().pick('data').ch_names\n bads = [] # output for bad channel names\n\n # Pop out names to bads output using comprehension list\n for ind, i in stats.outlier_repeat(data, outlier_sd, max_rounds, 0):\n bads.append(names[ind])\n # log channels excluded per round\n if verbose:\n mne.utils.logger.info(f'outlier round {i} channels: {bads}')\n\n return bads\n\n\n@fill_doc\n@verbose\ndef trial_ieeg(raw: mne.io.Raw, event: str, times: Doubles,\n baseline: str = None, basetimes: Doubles = None,\n mode: str = \"mean\", outliers: int = None, verbose=None,\n **kwargs) -> mne.Epochs:\n \"\"\"Epochs data from a mne Raw iEEG instance.\n\n Takes a mne Raw instance and epochs the data around a specified event. If\n baseline is specified, the data is also epoched around the baseline event\n and the baseline is subtracted from the data epochs.\n\n Parameters\n ----------\n raw : mne.io.Raw\n The raw data to epoch.\n event : str\n The event to epoch around.\n times : tuple[float, float]\n The time window to epoch around the event.\n baseline : str\n The event to epoch the baseline.\n basetimes : tuple[float, float]\n The time window to epoch around the baseline event.\n mode : str\n The mode to use for baseline rescaling. See `mne.baseline.rescale` for\n more information.\n %(picks_all)s\n %(reject_epochs)s\n %(flat)s\n %(decim)s\n %(epochs_reject_tmin_tmax)s\n %(detrend_epochs)s\n %(proj_epochs)s\n %(on_missing_epochs)s\n %(verbose)s\n\n Returns\n -------\n mne.Epochs\n The epoched data.\n \"\"\"\n\n # determine the events\n events, ids = mne.events_from_annotations(raw)\n dat_ids = [ids[i] for i in mne.event.match_event_names(ids, event)]\n event_ids = {key.replace(event, \"\").strip(\"/\"): value for key, value in\n ids.items() if value in dat_ids}\n # epoch the data\n\n if baseline is None:\n epochs = mne.Epochs(raw, events, event_id=event_ids, tmin=times[0],\n tmax=times[1], baseline=None, verbose=verbose,\n **kwargs)\n elif basetimes is None:\n raise ValueError(\"Baseline event input {} must be paired with times\"\n \"\".format(baseline))\n else:\n kwargs['preload'] = True\n epochs = trial_ieeg(raw, event, times, **kwargs)\n base = trial_ieeg(raw, baseline, basetimes, **kwargs)\n scaling.rescale(epochs, base, mode=mode, copy=False)\n\n if outliers is not None:\n data = detrend(epochs.get_data(), axis=-1, type=\"linear\")\n max = np.max(np.abs(data), axis=-1)\n std = np.std(data, axis=-1)\n reject = np.any(max > (outliers * std), axis=-1)\n epochs.drop(reject, reason=\"outlier\")\n\n return epochs\n\n\nif __name__ == \"__main__\":\n from os import path\n from ieeg.io import raw_from_layout\n # %% Set up logging\n log_filename = \"output.log\"\n # op.join(LAB_root, \"Aaron_test\", \"Information.log\")\n mne.set_log_file(log_filename,\n \"%(levelname)s: %(message)s - %(asctime)s\",\n overwrite=True)\n mne.set_log_level(\"INFO\")\n HOME = path.expanduser(\"~\")\n LAB_root = path.join(HOME, \"Box\", \"CoganLab\")\n TASK = \"SentenceRep\"\n sub_num = 29\n subj = \"D\" + str(sub_num).zfill(4)\n # layout, raw, D_dat_raw, D_dat_filt = get_data(sub_num, TASK)\n bids_root = LAB_root + \"/BIDS-1.0_SentenceRep/BIDS\"\n layout = BIDSLayout(bids_root, derivatives=True)\n filt = raw_from_layout(layout.derivatives['filt'], subject=subj,\n extension='.edf', desc='filt', preload=True)\n raw = raw_from_layout(layout, subject=subj, extension='.edf', desc=None,\n preload=True)\n events, event_id = mne.events_from_annotations(filt)\n auds = mne.Epochs(filt, events, event_id['Audio'], baseline=None, tmin=-2,\n tmax=5, preload=True, detrend=1)\n bads = channel_outlier_marker(auds)\n auds.info['bads'] = bads\n","sub_path":"ieeg/navigate.py","file_name":"navigate.py","file_ext":"py","file_size_in_byte":7206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"254846930","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 17 13:54:28 2021\n@author: sylvain\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom pandas import read_csv\nimport time\n\nimport os\nos.chdir(os.path.dirname(os.path.realpath(__file__)))\n\n\nif __name__ == \"__main__\":\n\n # data = np.loadtxt(\"log-benchmark-pi.txt\", skiprows=1, delimiter=\"\\t\")\n\n data = read_csv(\"log-benchmark-pi.txt\", delimiter=\"\\t\")\n\n for i in range(data.shape[0]):\n x = data[\"time\"][i].split(':')\n data[\"time\"][i] = float(x[1])\n\n\n\n\n for k in range(1,5):\n p =data.loc[data[\"nb-process\"] == k]\n plt.scatter(p[\"nb-iter\"], p[\"time\"], label=f\"nb-processes = {k}\")\n\n plt.grid()\n\n ax = plt.gca()\n ax.set_xscale('log')\n ax.set_yscale('log')\n plt.xlabel(\"N nombre d'itérations\")\n plt.ylabel(\"temps execution (s)\")\n\n\n plt.legend()\n\n plt.show()\n","sub_path":"MPI_Kahn_C/test/data-benchmark/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"13988342","text":"'''\nScript: cli_main.py\nDate: 11/24/2019\nDev: Kory Shaffer\n'''\n\nimport sys\nimport io\nimport os.path\nfrom donor_models import Charity\nfrom donor_models import Donor\n\ndef main():\n\n input = \"\".join(open(\"mailroominput.txt\", \"r\").readlines())\n sys.stdin = io.StringIO(input)\n\n local_charity = Charity('Local Charity')\n local_charity.AddDonor('William Gates, III')\n local_charity.Donors['William Gates, III'].AddDonation(60000.00)\n local_charity.Donors['William Gates, III'].AddDonation(50000.00)\n local_charity.AddDonor('Mark Zuckerberg')\n local_charity.Donors['Mark Zuckerberg'].AddDonation(10000.00)\n local_charity.Donors['Mark Zuckerberg'].AddDonation(60000.00)\n local_charity.Donors['Mark Zuckerberg'].AddDonation(396.10)\n local_charity.AddDonor('Jeff Bezos')\n local_charity.Donors['Jeff Bezos'].AddDonation(877.33)\n\n user_input(local_charity)\n\ndef user_input(local_charity):\n selection = ''\n while selection != '5':\n selection = input('\\nPlease select type one of following commands: \\n' \n '1 : Add a Donor\\n'\n '2 : Send Donation Thank You\\n' \n '3 : Create a Report\\n'\n '4 : Send letters to all donors\\n'\n '5 : QUIT\\n ')\n if selection == '1':\n name = input('\\n What is the Donors Name\\n')\n local_charity.AddDonor(name)\n print('\\n {:s} has been added to the donor list\\n'.format(name))\n\n elif selection == '2':\n name = input('\\n What is the Donors Name\\n')\n amount = None\n while not amount:\n try:\n amount = float(input('\\n How much did they donate?\\n'))\n except ValueError:\n print('\\nplease enter a valid dollar amount\\n')\n try:\n local_charity.Donors[name].send_thank_you(amount)\n except KeyError:\n print('\\n Please try again with a valid donor name or add your requested donor to the database')\n\n print('\\n A thank you has been made for {:s} in the ammount of {:.2f}\\n'.format(name, amount))\n\n elif selection == '3':\n local_charity.create_report()\n elif selection == '4':\n local_charity.send_all_thank_yous()\n elif selection == '5':\n print('QUITTING')\n else:\n print('INVALID SELECTION')\n\nif __name__ == '__main__':\n main()","sub_path":"students/K_Shaffer/mailroom/cli_main.py","file_name":"cli_main.py","file_ext":"py","file_size_in_byte":2486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"304354544","text":"from flask import Flask, send_from_directory, make_response, request, send_file\nimport jwt\nimport wtforms_json\nfrom .ays import ays_api\nfrom .oauth import oauth_api\nfrom .webhooks import webhooks_api\nfrom .cockpit import cockpit_api\nfrom JumpScale import j\n\n\napp = Flask(__name__)\n\napp.config[\"WTF_CSRF_ENABLED\"] = False\nwtforms_json.init()\n\nlogger = j.logger.get('j.cockpit.api')\n\n\ndef process_jwt_token():\n authorization = request.cookies.get(\n 'jwt',\n request.headers.get(\n 'Authorization',\n None\n ))\n\n if authorization is None:\n response = make_response('Not JWT token')\n response.status_code = 401\n return response\n\n msg = \"\"\n ss = authorization.split(' ', 1)\n if len(ss) != 2:\n msg = \"Unauthorized\"\n else:\n type, token = ss[0], ss[1]\n if type.lower() == 'bearer':\n try:\n headers = jwt.get_unverified_header(token)\n payload = jwt.decode(token, app.config['oauth'].get('jwt_key'), algorithm=headers['alg'], audience=app.config['oauth']['organization'], issuer='itsyouonline')\n # case JWT is for an organization\n if 'globalid' in payload and payload['globalid'] == app.config['oauth'].get('organization'):\n return\n\n # case JWT is for a user\n if 'scope' in payload and 'user:memberof:%s' % app.config['oauth'].get('organization') in payload['scope'].split(','):\n return\n\n msg = 'Unauthorized'\n except jwt.ExpiredSignatureError as e:\n msg = 'Your JWT has expired'\n\n except jwt.DecodeError as e:\n msg = 'Your JWT is invalid'\n except Exception as e:\n msg = 'Unexpected error : %s' % str(e)\n\n else:\n msg = 'Your JWT is invalid'\n\n logger.error(msg)\n response = make_response(msg)\n response.status_code = 401\n return response\n\n\nays_api.before_request(process_jwt_token)\ncockpit_api.before_request(process_jwt_token)\n\napp.register_blueprint(ays_api)\napp.register_blueprint(oauth_api)\napp.register_blueprint(webhooks_api)\napp.register_blueprint(cockpit_api)\n\n\n@app.route('/apidocs/')\ndef send_js(path):\n root = j.sal.fs.joinPaths(j.sal.fs.getParent(__file__), 'apidocs')\n return send_from_directory(root, path)\n\n@app.route('/', methods=['GET'])\ndef home():\n path = j.sal.fs.joinPaths(j.sal.fs.getParent(__file__), 'index.html')\n print(path)\n return send_file(path)\n\nif __name__ == \"__main__\":\n app.run(debug=True, host='0.0.0.0')\n","sub_path":"jscockpit/ays_api/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"323498956","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 11 13:56:00 2018\n\n@author: bgris\n\"\"\"\n\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 10 09:33:51 2018\n\n@author: bgris\n\"\"\"\n\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Dec 8 12:24:15 2017\n\n@author: bgris\n\"\"\"\n\nimport structured_vector_fields as struct\nimport function_compute_pointsvectors as cmp\nimport group\nimport pytest\nimport numpy as np\nimport odl\nimport estimate_structured_base_pointsvectors as est_coeff\nimport os\nimport matplotlib.pyplot as plt\nimport numpy.random as rd\n#%% functions\ndef Rtheta(theta,points):\n # theta is the angle, in rad\n # input = list of points, for ex given by space.points() or\n # np.array(vect_field).T\n #output = list of points of same size, rotated of an angle theta\n\n points_rot=np.empty_like(points).T\n points_rot[0]=np.cos(theta)*points.T[0].copy() - np.sin(theta)*points.T[1].copy()\n points_rot[1]=np.sin(theta)*points.T[0].copy() + np.cos(theta)*points.T[1].copy()\n\n return points_rot.T.copy()\n#\n\n\ndef Rot_inf(points):\n # theta is the angle, in rad\n # input = list of points, for ex given by space.points() or\n # np.array(vect_field).T\n #output = list of points of same size, rotated of an angle theta\n\n points_rot=np.empty_like(points).T\n points_rot[0] = -points.T[1].copy()\n points_rot[1] = points.T[0].copy()\n\n return points_rot.T.copy()\n#\n\ndef generate_image_rectangle(space, a, b, width):\n\n \"\"\"\n ONLY DIMENSION 2\n\n generates a black and white image of a 'finger' with 2 articulations\n at a and b, with ending point at c and constant width width\n \"\"\"\n\n dim=2\n points=space.points().T\n\n vector_ab_unit, vector_ab_norm_orth, vector_ab_norm = cmp.compute_vect_unit(a, b)\n #width_list = width * vector_ab_unit\n limit = 0.2*vector_ab_norm\n limit_orth = 0.2*width\n width_list_orth = width * vector_ab_norm_orth\n\n points_prod_ab = sum([(points[u] - a[u])*vector_ab_unit[u] for u in range(dim)])\n\n points_prod_ab_orth = sum([(points[u] - a[u] + 0.5*width_list_orth[u])*vector_ab_norm_orth[u] for u in range(dim)])\n\n I_arti0 = (0-limit <= points_prod_ab )*(points_prod_ab <= vector_ab_norm + limit)\n I_arti0 *= (points_prod_ab_orth >= 0 - limit_orth)* (points_prod_ab_orth <= width + limit_orth)\n\n\n return space.element((I_arti0 == 1))\n#\n\n\ndef generate_vectorfield_rotationrectangle(space, a, b, width):\n\n \"\"\"\n ONLY DIMENSION 2\n\n generates a black and white image of a 'finger' with 2 articulations\n at a and b, with ending point at c and constant width width\n \"\"\"\n\n dim = 2\n points = space.points().T\n I = generate_image_rectangle(space, a, b, width)\n points_a = np.array([points[u] - a[u] for u in range(dim)])\n vect = space.tangent_bundle.element(Rot_inf(points_a.T).T)\n\n return vect*I\n\n#\n\ndef generate_vectorfield_translationrectangle(space, a, b, width):\n\n \"\"\"\n ONLY DIMENSION 2\n\n generates a black and white image of a 'finger' with 2 articulations\n at a and b, with ending point at c and constant width width\n \"\"\"\n\n dim = 2\n I = generate_image_rectangle(space, a, b, width)\n vect = space.tangent_bundle.element([(a[u] - b[u]) * space.one() for u in range(2)])\n\n return vect*I\n\n#\n\n\ndef generate_GD_from_athetas(a, theta_b, r_b):\n b = [a[0] + r_b*np.cos(theta_b), a[1] + r_b*np.sin(theta_b)]\n\n return np.reshape(np.array([a, b]), (-1, 1)).squeeze()\n#\ndef generate_random_param(n, r_b):\n param = []\n for i in range(n):\n theta_b = rd.uniform(0, 2*np.pi)\n a0 = rd.uniform(-1, 1)\n a1 = rd.uniform(-1, 1)\n u = generate_GD_from_athetas([a0, a1], theta_b, r_b)\n param.append(u.copy())\n return np.array(param).T\n\ndef generate_truth_from_param(param, cont0, cont1):\n\n v_temp0 = generate_vectorfield_rotationrectangle(space, param[0:2], param[2:4], width).copy()\n v_temp1 = generate_vectorfield_translationrectangle(space, param[0:2], param[2:4], width).copy()\n truth_temp = np.empty((space.shape[0], space.shape[1], 2))\n truth_temp[:, :, 0] = cont0 * v_temp0[0] + cont1 * v_temp1[0]\n truth_temp[:, :, 1] = cont0 * v_temp0[1] + cont1 * v_temp1[1]\n\n return truth_temp.copy()\n#\n\n#%% generate data\n\n\nspace = odl.uniform_discr(\n min_pt =[-10, -10], max_pt=[10, 10], shape=[512, 512],\n dtype='float32', interp='linear')\n\n\nwidth = 1\nvector_fields_list = []\nimage_list = []\n#a_list = [[0,0], [0,0], [0,0]]\n#b_list = [[0, 2], [0, 2], [-2, 0]]\n#c_list = [[0, 5], [-5, 2], [-5, 0]]\n#nbdata = 3\n#\n#for i in range(nbdata):\n# a = a_list[i]\n# b = b_list[i]\n# c = c_list[i]\n# vector_fields_list.append(generate_vectorfield_2articulations_0(space, a, b, c, width))\n# image_list.append(generate_image_2articulations(space, a, b, c, width))\n##\n\n\n\n\n\nr_b = 4\nsigma = 0.1\n\nnbdata = 10\nparam = generate_random_param(nbdata, r_b)\nCont = rd.uniform(-2, 2, [nbdata, 2])\npoints_list = []\nvectors_list = []\nvector_ab_unit, vector_ab_norm_orth, ab_norm = cmp.compute_vect_unit(param.T[0][0:2], param.T[0][2:4])\nnb_ab = int((ab_norm + 0.4*ab_norm) / sigma) +1\nnb_ab_orth = int(2 * width / sigma) +1\n\n\nfor i in range(nbdata):\n a = param.T[i][0:2]\n b = param.T[i][2:4]\n truth_temp = generate_truth_from_param(param.T[i], Cont[i][0], Cont[i][1]).copy()\n image_list.append(generate_image_rectangle(space, a, b, width))\n\n vector_fields_list.append(truth_temp.copy())\n points, vectors = cmp.compute_pointsvectors_rectangle_nb(a, b, 1.2 * width, sigma, nb_ab, nb_ab_orth)\n points_list.append(points.copy())\n vectors_list.append(vectors.copy())\n#\n\n\n\n#%% Create projected vector fields (structured and unstructured)\n#sigma = 0.5\n#width = 1\n## Set number of points\n#vector_ab_unit, vector_ab_norm_orth, ab_norm = cmp.compute_vect_unit(a_list[0], b_list[0])\n#vector_bc_unit, vector_bc_norm_orth, bc_norm = cmp.compute_vect_unit(b_list[0], c_list[0])\n#nb_ab = int((ab_norm + 0.2*width) / sigma) +1\n#nb_ab_orth = int(2 * width / sigma) +1\n#nb_bc = int((bc_norm + 0.2*width) / sigma) +1\n#nb_bc_orth = int(2*width / sigma) +1\n#\n\n\n#points_temp, vectors_temp = cmp.compute_pointsvectors_2articulations_nb(a_list[0], b_list[0], c_list[0], width, sigma, nb_ab, nb_ab_orth, nb_bc, nb_bc_orth)\nnb_vectors = len(vectors_list[0][0])\nnb_points = len(points_list[0][0])\n\ndim = 2\n\n\n# Create list of structured and unstructured\ndef kernel(x, y):\n return np.exp(- sum([ (xi - yi) ** 2 for xi, yi in zip(x, y)]) / (sigma ** 2))\n\nget_unstructured_op = struct.get_from_structured_to_unstructured(space, kernel)\nstructured_list=[]\nunstructured_list=[]\n\nfor i in range(nbdata):\n points = points_list[i].copy()\n vectors = vectors_list[i].copy()\n #points, vectors = cmp.compute_pointsvectors_2articulations_nb(a_list[i], b_list[i], c_list[i], width, sigma, nb_ab, nb_ab_orth, nb_bc, nb_bc_orth)\n eval_field = np.array([space.element(vector_fields_list[i][:,:,u]).interpolation(\n points) for u in range(dim)]).copy()\n\n vector_syst = np.zeros(dim*nb_points)\n basis = np.identity(dim)\n\n\n\n for k0 in range(nb_points):\n for l0 in range(dim):\n vector_syst[dim*k0 + l0] += np.dot(eval_field.T[k0],\n basis[:, l0])\n\n eval_kernel = struct.make_covariance_matrix(points, kernel)\n\n matrix_syst = np.kron(eval_kernel, basis)\n\n alpha_concatenated = np.linalg.solve(matrix_syst, vector_syst)\n alpha = struct.get_structured_vectors_from_concatenated(alpha_concatenated, nb_points, dim)\n structured = struct.create_structured(points, alpha)\n\n structured_list.append(structured.copy())\n unstructured_list.append(get_unstructured_op(structured).copy())\n#\n#%% See projection\n\n#plt.plot(points[0] , points[1], 'xb')\n\nfor i in range(nbdata):\n space.tangent_bundle.element([vector_fields_list[i][:,:,u] for u in range(2)])[0].show('truth' + str(i), clim=[-5,5])\n plt.plot(param.T[i][0::2], param.T[i][1::2], 'xb')\n unstructured_list[i][0].show('projected' + str(i), clim=[-5,5])\n plt.plot(param.T[i][0::2], param.T[i][1::2], 'xb')\n\n#\n\nfor i in range(nbdata):\n (space.tangent_bundle.element([vector_fields_list[i][:,:,u] for u in range(2)]) - unstructured_list[i]).show('diff' + str(i))\n\n#\n#%% Save data\npath = '/home/bgris/data/RotationTranslationRectangle_dimcont2/'\nname_exp = 'rb_' + str(r_b) + '_width_' + str(width) + '_sigma_' + str(sigma) + 'nb_fixed' + '_nbdata_' + str(nbdata) + '/'\nname = path + name_exp\n\nos.mkdir(name)\nfor i in range(nbdata):\n np.savetxt(name + 'structured' + str(i), structured_list[i])\n np.savetxt(name + 'unstructured' + str(i), unstructured_list[i])\n np.savetxt(name + 'param' + str(i), param.T[i])\n np.savetxt(name + 'points' + str(i), points_list[i])\n np.savetxt(name + 'vectors' + str(i), vectors_list[i])\n#\nnp.savetxt(name + 'nameab', [nb_ab])\nnp.savetxt(name + 'nameaborth', [nb_ab_orth])\n#%% Load data\nstructured_load = []\nunstructured_load = []\npoints_load = []\nvectors_load = []\nparam_load = []\n\nfor i in range(nbdata):\n structured_load.append(np.loadtxt(name + 'structured' + str(i)))\n unstructured_load.append(np.loadtxt(name + 'unstructured' + str(i)))\n points_load.append(np.loadtxt(name + 'points' + str(i)))\n vectors_load.append(np.loadtxt(name + 'vectors' + str(i)))\n param_load.append(np.loadtxt(name + 'param' + str(i)))\n#\n\n\nparam_load = np.array(param_load).T\n\n#%% See decomposition\n\ni = 1\n\nv_temp0 = Cont[i][0] * generate_vectorfield_rotationrectangle(space, param.T[i][0:2], param.T[i][2:4], width).copy()\nv_temp1 = Cont[0][1] * generate_vectorfield_translationrectangle(space, param.T[i][0:2], param.T[i][2:4], width).copy()\nv_temp0.show(str(i) + 'rotation', clim = [-5, 5])\nplt.plot(param.T[i][0],param.T[i][1], 'xr')\nv_temp1.show(str(i) + 'translation', clim = [-5, 5])\nplt.plot(param.T[i][0],param.T[i][1], 'xr')\nplt.plot(points_list[i][0], points_list[i][1], 'xb')","sub_path":"generate_data_rotationtranslationrectangle_dimcon2.py","file_name":"generate_data_rotationtranslationrectangle_dimcon2.py","file_ext":"py","file_size_in_byte":9907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"373004613","text":"import os\r\nfrom threading import Thread\r\nfrom applic import app , db\r\nfrom flask_script import Manager, prompt_bool\r\n\r\nmanager = Manager(app)\r\n\r\n@manager.command\r\ndef initdb():\r\n db.create_all()\r\n print ('Initialized the database')\r\n\r\n\r\n@manager.command\r\ndef dropdb():\r\n if prompt_bool(\r\n \"Are you sure you want to lose all your data\"):\r\n db.drop_all()\r\n print ('Dropped the database')\r\n\r\n\r\n@manager.command\r\ndef run():\r\n\r\n app.secret_key = os.urandom(12)\r\n #app.run(debug=True)\r\n app.run(debug = True, threaded = True)\r\n\r\nif __name__ == '__main__':\r\n manager.run()\r\n\r\n","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"9954111","text":"\nimport numpy as np \nimport os\nimport sys\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.svm import SVC\nfrom sklearn.cross_validation import cross_val_score\n\n\nfrom patch_feature import PatchFeatures\nfrom features import *\nfrom local_settings import *\nfrom data_loader import *\nfrom kutil import printf\n\nclass MultiscalePatchFeatures(object):\n def __init__(self, settings):\n self._stg = settings\n self._mdls = [PatchFeatures(**stg) for stg in self._stg]\n\n def load_images(self, im_gen, fit_every=100):\n for i, im in enumerate(im_gen):\n for mdl in self._mdls:\n mdl._load_image(im)\n if (i+1) % fit_every == 0:\n mdl.fit()\n for mdl in self._mdls:\n mdl.fit()\n return self \n\n def _represent_image(self, im):\n return np.hstack([mdl._represent_image(im) for mdl in self._mdls])\n\ndef c_param_search(frame):\n X = StandardScaler().fit_transform(frame.values.astype(float))\n y = frame.index.values\n for c in 10.0**np.arange(-1, 3).astype(float):\n clf = SVC(kernel='rbf', C=c, cache_size=2000)\n pclf = SVC(probability=True, kernel='rbf', C=c, cache_size=2000)\n print(\"C=\", c)\n print(cross_val_score(clf, X, y, cv=2, n_jobs=4))\n print(cross_val_score(pclf, X, y, cv=2, n_jobs=4, scoring='log_loss'))\n\ndef test_multiscale(settings, names):\n\n settings =[ {'patch_size': 8, 'stride_size': 3, 'im_size': 200, 'num_patches_per_image_train': 1000, 'num_centroids': 250},\n {'patch_size': 10, 'stride_size': 3, 'im_size': 200, 'num_patches_per_image_train': 1000, 'num_centroids': 250},\n {'patch_size': 20, 'stride_size': 3, 'im_size': 200, 'num_patches_per_image_train': 1000, 'num_centroids': 250}\n ]\n print(settings)\n \n printf(\"Train...\")\n mdl = MultiscalePatchFeatures(setting)\n paths_train = PathLoader(os.path.join(IMAGE_DATA_ROOT, \"train\")).get_path_list()\n paths_test = PathLoader(os.path.join(IMAGE_DATA_ROOT, \"test\")).get_path_test()\n mdl.load_images(ImageLoader(paths_test), fit_every=250).load_images(ImageLoader(paths_train), fit_every=250)\n\n printf(\"Loading labeled data...\")\n frame_train = build_feature_mat_train(os.path.join(IMAGE_DATA_ROOT, \"train\"), mdl._represent_image)\n frame_train.to_pickle(os.path.join(FEATURES_ROOT, name + \"_train.pkl\"))\n \n print(\"Loading features...\")\n frame = load_features_train(name)\n print(\"CV... \")\n c_param_search(frame)\n\n\nif __name__ == \"__main__\":\n test_multiscale(name=\"multi_take2\")","sub_path":"ameba/multiscale_patch_feature.py","file_name":"multiscale_patch_feature.py","file_ext":"py","file_size_in_byte":2621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"458283077","text":"# ex:ts=4:sw=4:sts=4:et\n# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-\nfrom __future__ import absolute_import\nimport sys\nimport time\nfrom datetime import timedelta\n\nfrom svtplay_dl.utils.terminal import get_terminal_size\n\nprogress_stream = sys.stderr\n\nclass ETA(object):\n \"\"\"\n An ETA class, used to calculate how long it takes to process\n an arbitrary set of items. By initiating the object with the\n number of items and continuously updating with current\n progress, the class can calculate an estimation of how long\n time remains.\n \"\"\"\n\n def __init__(self, end, start=0):\n \"\"\"\n Parameters:\n end: the end (or size, of start is 0)\n start: the starting position, defaults to 0\n \"\"\"\n self.start = start\n self.end = end\n self.pos = start\n\n self.now = time.time()\n self.start_time = self.now\n\n def update(self, pos):\n \"\"\"\n Set new absolute progress position.\n\n Parameters:\n pos: new absolute progress\n \"\"\"\n self.pos = pos\n self.now = time.time()\n\n def increment(self, skip=1):\n \"\"\"\n Like update, but set new pos relative to old pos.\n\n Parameters:\n skip: progress since last update (defaults to 1)\n \"\"\"\n self.update(self.pos + skip)\n\n @property\n def left(self):\n \"\"\"\n returns: How many item remains?\n \"\"\"\n return self.end - self.pos\n\n def __str__(self):\n \"\"\"\n returns: a time string of the format HH:MM:SS.\n \"\"\"\n duration = self.now - self.start_time\n\n # Calculate how long it takes to process one item\n try:\n elm_time = duration / (self.end - self.left)\n except ZeroDivisionError:\n return \"(unknown)\"\n\n return str(timedelta(seconds=int(elm_time * self.left)))\n\n\ndef progress(byte, total, extra = \"\"):\n \"\"\" Print some info about how much we have downloaded \"\"\"\n if total == 0:\n progresstr = \"Downloaded %dkB bytes\" % (byte >> 10)\n progress_stream.write(progresstr + '\\r')\n return\n progressbar(total, byte, extra)\n\ndef progressbar(total, pos, msg=\"\"):\n \"\"\"\n Given a total and a progress position, output a progress bar\n to stderr. It is important to not output anything else while\n using this, as it relies soley on the behavior of carriage\n return (\\\\r).\n\n Can also take an optioal message to add after the\n progressbar. It must not contain newliens.\n\n The progress bar will look something like this:\n\n [099/500][=========...............................] ETA: 13:36:59\n\n Of course, the ETA part should be supplied be the calling\n function.\n \"\"\"\n width = get_terminal_size()[0] - 25\n rel_pos = int(float(pos)/total*width)\n bar = ''.join([\"=\" * rel_pos, \".\" * (width - rel_pos)])\n\n # Determine how many digits in total (base 10)\n digits_total = len(str(total))\n fmt_width = \"%0\" + str(digits_total) + \"d\"\n fmt = \"\\r[\" + fmt_width + \"/\" + fmt_width + \"][%s] %s\"\n\n progress_stream.write(fmt % (pos, total, bar, msg))\n\n","sub_path":"lib/svtplay_dl/output.py","file_name":"output.py","file_ext":"py","file_size_in_byte":3124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"278861445","text":"import random\n\nimport networkx as nx\nimport numpy as np\nfrom solution import Solution\nimport matplotlib.pyplot as plt\n\n\nclass Action():\n\n def __init__(self, v1, v2, action):\n self.v1 = v1\n self.v2 = v2\n self.action = action\n\n\nclass Edges(Solution):\n\n def __init__(self, problem, style):\n super(Edges, self).__init__(problem)\n self.style = style\n self.v_indexes = list(map(lambda x: x[0], self.p.vertices))\n self.n = len(self.v_indexes)\n\n def set_random(self, problem):\n v = problem.vertices.copy()\n random.shuffle(v)\n self.nodes = v[:int(np.ceil(problem.n / 2))]\n self.unused = v[int(np.ceil(problem.n / 2)):]\n\n self.v_indexes = list(map(lambda x: x[0], self.p.vertices))\n self.nodes = list(map(lambda x: x[0], self.nodes))\n self.unused = list(map(lambda x: x[0], self.unused))\n self.n = len(self.v_indexes)\n\n self.path = self.build_path(self.nodes)\n self.dist = self.path_distance(self.path)\n\n def path_distance(self, path):\n dist = 0\n # print(\"Path\", path)\n for edge in path:\n dist += self.p.distances[edge[0] - 1, edge[1] - 1]\n return dist\n\n def visualise(self, save, alg, style):\n G = nx.Graph()\n plt.figure(figsize=(16, 16))\n for i in range(len(self.p.vertices)):\n G.add_node(i, pos=(self.p.vertices[i][1], self.p.vertices[i][2]))\n\n for edge in self.path:\n G.add_edge(edge[0] - 1, edge[1] - 1, weight=self.p.distances[edge[0] - 1, edge[1] - 1])\n\n pos = nx.get_node_attributes(G, 'pos')\n labels = nx.get_edge_attributes(G, 'weight')\n\n nx.draw_networkx_edge_labels(G, pos, edge_labels=labels)\n nx.draw_networkx(G, pos, node_size=30, edge_labels=nx.get_node_attributes(G, \"weight\"))\n if save:\n plt.savefig(alg + \"_\" + style + \".png\")\n plt.show()\n else:\n plt.show()\n\n def optimize(self):\n if self.style == 'greedy':\n self.optimize_greedy()\n elif self.style == 'steep':\n self.optimize_steepest()\n\n def swap_nodes(self, start, end, nodes):\n if start == end: return nodes\n new_nodes = []\n if start < end:\n for i in range(0, start):\n new_nodes.append(nodes[i])\n for i in range(end, start - 1, -1):\n new_nodes.append(nodes[i])\n for i in range(end + 1, len(nodes)):\n new_nodes.append(nodes[i])\n else:\n for i in range(len(nodes) - 1, start - 1, -1):\n new_nodes.append(nodes[i])\n for i in range(end + 1, start):\n new_nodes.append(nodes[i])\n for i in range(end, -1, -1):\n new_nodes.append(nodes[i])\n return new_nodes\n\n def calc_outer_move(self, v1, v2):\n delta = 0\n if v1 not in self.nodes:\n temp = v1\n v1 = v2\n v2 = temp\n v1_ind = self.nodes.index(v1)\n pre = self.nodes[v1_ind - 1]\n aft = self.nodes[(v1_ind + 1) % (self.n // 2)]\n delta -= self.p.distances[pre - 1, v1 - 1]\n delta -= self.p.distances[v1 - 1, aft - 1]\n delta += self.p.distances[pre - 1, v2 - 1]\n delta += self.p.distances[v2 - 1, aft - 1]\n return delta\n\n def calc_swap_move(self, v1, v2):\n delta = 0\n v1_ind = self.nodes.index(v1)\n v2_ind = self.nodes.index(v2)\n if abs(v2_ind - v1_ind) == 0 or abs(v1_ind - v2_ind) == (self.n // 2) - 1:\n return 0\n\n if v2_ind < v1_ind == v2_ind + 1:\n return 0\n\n delta -= self.p.distances[v1 - 1, self.nodes[v1_ind - 1] - 1]\n delta -= self.p.distances[self.nodes[(v2_ind + 1) % (self.n // 2)] - 1, v2 - 1]\n\n delta += self.p.distances[v1 - 1, self.nodes[(v2_ind + 1) % (self.n // 2)] - 1]\n delta += self.p.distances[self.nodes[v1_ind - 1] - 1, v2 - 1]\n return delta\n\n def do_swap_move(self, v1, v2):\n v1_ind = self.nodes.index(v1)\n v2_ind = self.nodes.index(v2)\n self.nodes = self.swap_nodes(v1_ind, v2_ind, self.nodes)\n\n def do_outer_move(self, v1, v2):\n if v1 in self.nodes:\n v1_ind = self.nodes.index(v1)\n self.nodes[v1_ind] = v2\n else:\n v2_ind = self.nodes.index(v2)\n self.nodes[v2_ind] = v1\n\n def optimize_greedy(self):\n self.path = self.build_path(self.nodes)\n print(\"Start distance\", self.path_distance(self.path))\n improved = True\n while improved:\n improved = False\n best_action = None\n for i in self.v_indexes:\n i_in = i in self.nodes\n for j in self.v_indexes:\n if i == j: continue\n j_in = j in self.nodes\n if (i_in and not j_in) or (j_in and not i_in):\n if self.calc_outer_move(i, j) < 0:\n best_action = Action(i, j, \"outer\")\n improved = True\n break\n if i_in and j_in:\n # print(\"Swap delta\", self.calc_swap_move(i, j))\n if self.calc_swap_move(i, j) < 0:\n best_action = Action(i, j, \"swap\")\n improved = True\n break\n if improved:\n break\n if improved:\n if best_action.action == \"swap\":\n self.do_swap_move(best_action.v1, best_action.v2)\n else:\n self.do_outer_move(best_action.v1, best_action.v2)\n self.path = self.build_path(self.nodes)\n\n self.path = self.build_path(self.nodes)\n self.dist = self.path_distance(self.path)\n print(\"End distance:\", self.dist)\n\n def optimize_steepest(self):\n self.path = self.build_path(self.nodes)\n print(\"Start distance\", self.path_distance(self.path))\n while True:\n best_action = None\n best_delta = 0\n for i in self.v_indexes:\n i_in = i in self.nodes\n for j in self.v_indexes:\n if i == j: continue\n j_in = j in self.nodes\n if (i_in and not j_in) or (j_in and not i_in):\n delta = self.calc_outer_move(i, j)\n if delta < best_delta:\n best_action = Action(i, j, \"outer\")\n best_delta = delta\n if i_in and j_in:\n # print(\"Swap delta\", self.calc_swap_move(i, j))\n delta = self.calc_swap_move(i, j)\n if delta < best_delta:\n best_action = Action(i, j, \"swap\")\n best_delta = delta\n if best_delta < 0:\n # print(best_action.v1, \" v2\", best_action.v2)\n # if best_action.action == \"swap\":\n # print(\"v1 ind\", self.nodes.index(best_action.v1), \"v2 ind\", self.nodes.index(best_action.v2))\n if best_action.action == \"swap\":\n self.do_swap_move(best_action.v1, best_action.v2)\n else:\n self.do_outer_move(best_action.v1, best_action.v2)\n self.path = self.build_path(self.nodes)\n #self.visualise(False, \"\", \"\")\n else:\n break\n self.path = self.build_path(self.nodes)\n self.dist = self.path_distance(self.path)\n print(\"End distance:\", self.dist)\n print(\"Path len:\", len(self.nodes))\n","sub_path":"zad3/edges.py","file_name":"edges.py","file_ext":"py","file_size_in_byte":7756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"331115904","text":"import csv\nimport math\nimport helper\n\ndef read_data(file_name):\n with open(file_name, encoding='utf-8') as f:\n reader = csv.reader(f)\n list = [row for row in reader] # リスト型に変換している\n start_time = list[1][3]\n start_time_second = start_time[-2:]\n start_time_minute = start_time[3:5]\n start_time_hour = start_time[:2]\n if int(start_time_second) == 0:\n load_start_second = 0\n else:\n load_start_second = 60-int(start_time_second)\n read_data = {}\n for i in range(3,150): #開始位置決定用 要確認\n time_value = list[i][1]\n time_value_second = int(time_value[-2:])\n if load_start_second == time_value_second :\n if list[i][2] == None or list[i][2] == '': # 後でNoneなのかゼロなのか聞く\n load_start_index_value = i+60\n else:\n load_start_index_value = i\n break\n\n load_start_index_value = int(load_start_index_value)\n count = 0 # 実行回数決定用\n for row in list:\n count += 1\n count = count-3-load_start_index_value\n count = math.floor(count / 60)\n\n value_sum = 0\n null_count = 0\n for i in range(0,count):\n for a in range(0,60):\n value = list[load_start_index_value][2]\n try:\n value_sum += int(value)\n except:\n null_count+=1\n\n load_start_index_value += 1\n average = round((value_sum/60),1)\n time = list[load_start_index_value][1]\n\n try:\n hour = time[:2]\n hour = int(hour)+int(start_time_hour)\n except:\n hour = time[:1]\n hour = int(hour)+int(start_time_hour)\n\n if len(time) <= 7:\n minute = time[2:4]\n else:\n minute = time[3:5]\n\n minute = int(minute)+int(start_time_minute)\n\n if minute > 59: # 時間計算\n minute = minute - 60\n hour = int(hour) + 1\n if hour < 10:\n hour = '0' + str(hour)\n else:\n str(hour)\n\n if minute < 10: #分が一桁だと、マッチしないので、05分という形に変換する。\n minute = '0'+ str(minute)\n else:\n minute = str(minute)\n\n read_data_index = helper.create_write_data(hour,minute) # read_data(辞書型)に追加するフォーマットに変換\n if null_count>0:\n read_data[read_data_index] = None\n\n else:\n read_data[read_data_index] = average\n\n null_count = 0\n value_sum = 0\n return read_data\n","sub_path":"read_data.py","file_name":"read_data.py","file_ext":"py","file_size_in_byte":2667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"352908808","text":"# -*-coding: utf-8 -*-\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt \n\nkernel = np.ones((6,6),np.float32)/36\nimg = cv2.imread('../images/business_small_2x.jpg', 1)\nimg = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n# cv2.imshow('image', img)\n# cv2.waitKey(0)\n# cv2.destroyAllWindows()\n\noutput = cv2.filter2D(img, -1, kernel)\nplt.subplot(121),plt.imshow(img),plt.title('Original')\nplt.xticks([]), plt.yticks([])\nplt.subplot(122),plt.imshow(output),plt.title('Averaging')\nplt.xticks([]), plt.yticks([])\nplt.show()","sub_path":"code-demo/opencv_demo/cv2_filter0.py","file_name":"cv2_filter0.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"15095730","text":"import os\nfrom nose import tools as ntools\nfrom draftfast.csv_parse import salary_download\nfrom draftfast.rules import DRAFT_KINGS\n\nCURRENT_DIR = os.path.dirname(os.path.abspath(__file__))\nsalaries = '{}/data/nba-test-salaries.csv'.format(CURRENT_DIR)\nprojections = '{}/data/nba-test-projections.csv'.format(CURRENT_DIR)\n\n\ndef test_dk_nba_parse():\n players = salary_download.generate_players_from_csvs(\n salary_file_location=salaries,\n game=DRAFT_KINGS,\n )\n ntools.assert_equals(len(players), 221)\n\n\ndef test_dk_nba_use_avg():\n players = salary_download.generate_players_from_csvs(\n salary_file_location=salaries,\n game=DRAFT_KINGS,\n )\n ntools.assert_equals(players[0].proj, 60.462)\n\n\ndef test_dk_nba_use_proj():\n players = salary_download.generate_players_from_csvs(\n salary_file_location=salaries,\n projection_file_location=projections,\n game=DRAFT_KINGS,\n )\n ntools.assert_equals(players[0].proj, 62.29)\n","sub_path":"draftfast/test/test_csv_parse.py","file_name":"test_csv_parse.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"211193978","text":"\"\"\" Functions for reading the USGS DEM format. \"\"\"\n\nimport math\nimport itertools\nimport re\n\ndef coerce_float(s):\n return float(s.replace(\"D\",\"e\",1)\n .replace(\"E\",\"e\",1)\n .replace(\"F\",\"e\",1)\n .replace(\"G\",\"e\",1))\n\nFMT_KEY = {\"A\":str,\n \"I\":int,\n \"D\":coerce_float,\n \"E\":coerce_float,\n \"F\":coerce_float,\n \"G\":coerce_float}\n\n\nBLOCKA = [(\"fnm\",0,40,\"A40\"),\n (\"free\",40,109,\"A69\"),\n (\"se_corner\",109,135,\"2(I4,I2,F7.4)\"),\n (\"proc_code\",135,136,\"A1\",),\n (\"sectional_indicator\",137,140,\"A3\"),\n (\"origin_code\",140,144,\"A4\"),\n (\"dem_level_code\",144,150,\"I6\"),\n (\"elev_pattern\",150,156,\"I6\"),\n (\"crs\",156,162,\"I6\"),\n (\"crs_zone\",162,168,\"I6\"),\n (\"proj\",168,528,\"15D24.15\"),\n (\"crs_unit\",528,534,\"I6\"),\n (\"z_unit\",534,540,\"I6\"),\n (\"nsides\",540,546,\"I6\"),\n (\"quad_coords\",546,738,\"4(2D24.15)\"),\n (\"minmax\",738,786,\"2D24.15\"),\n (\"angle_ccw\",786,810,\"D24.15\"),\n (\"z_acc\",810,816,\"I6\"),\n (\"resolution\",816,852,\"3E12.6\"),\n (\"size\",852,864,\"2I6\"), # Old format end\n (\"max_contour_int\",864,869,\"I5\"),\n (\"max_contour_units\",869,870,\"I1\"),\n (\"min_contour_int\",870,875,\"I5\"),\n (\"min_contour_units\",875,876,\"I1\"),\n (\"src_year\",876,880,\"I4\"),\n (\"rev_year\",880,884,\"I4\"),\n (\"inspection_flag\",884,885,\"A1\"),\n (\"validation_flag\",885,886,\"A1\"),\n (\"void_flag\",886,888,\"I1\"),\n (\"z_datum\",888,890,\"I2\"),\n (\"xy_datum\",890,892,\"I2\"),\n (\"edition\",892,896,\"I4\"),\n (\"void_perc\",896,900,\"I4\"),\n (\"edge_flag\",900,908,\"4I2\"),\n (\"z_shift\",908,915,\"F7.2\")]\n\nBLOCKB = [(\"rc_id\",0,12,\"2I6\"),\n (\"mn\",12,24,\"2I6\"),\n (\"xy\",24,72,\"2D24.15\"),\n (\"zdatum\",72,96,\"D24.15\"),\n (\"zminmax\",96,144,\"2D24.15\")]\n\nBLOCKC = [(\"filestatcode\",0,6,\"I6\"),\n (\"file_rmse\",6,24,\"3I6\"),\n (\"file_nsmp\",24,30,\"I6\"),\n (\"demstatcode\",30,36,\"I6\"),\n (\"dem_rmse\",36,54,\"3I6\"),\n (\"dem_nsmp\",54,60,\"I6\")]\n\ndef nreps(fmt):\n \"\"\" Return the number of characters in a single record of\n a potentially multiple-record string. \"\"\"\n try:\n reps = int(fmt[0])\n return reps * nreps(fmt[1:].lstrip(\"(\").rstrip(\")\"))\n\n except ValueError:\n return 1\n #return fmt.count(\",\") + 1\n\ndef nreps_re(fmt):\n M = re.match(r\"\\A\\d+\", fmt)\n if M:\n return int(fmt[:M.end()]) * nreps_re(fmt[M.end():].lstrip(\"(\").rstrip(\")\"))\n else:\n return 1\n\ndef reclen(fmt):\n \"\"\" Return the number of characters in a single record of\n a potentially multiple-record string. \"\"\"\n try:\n reps = int(fmt[0])\n return reclen(fmt[1:].lstrip(\"(\").rstrip(\")\"))\n\n except ValueError:\n if \",\" in fmt:\n return list(map(lambda a: reclen(a)[0], fmt.split(\",\")))\n\n if fmt[0] in (\"G\", \"F\", \"E\", \"D\"):\n nch = int(fmt[1:].split(\".\")[0])\n elif fmt[0] in (\"A\", \"I\"):\n nch = int(fmt[1:])\n else:\n raise ValueError(\"Unrecognized format code {0}\"\n .format(fmt))\n return [nch]\n\ndef dtype(fmt):\n \"\"\" Return a constructor for the datatype encoded in *fmt*. \"\"\"\n try:\n reps = int(fmt[0])\n return dtype(fmt[1:].lstrip(\"(\").rstrip(\")\"))\n\n except ValueError:\n if \",\" in fmt:\n return list(map(lambda a: dtype(a)[0], fmt.split(\",\")))\n else:\n return [FMT_KEY[fmt[0]]]\n\ndef cumsum(u):\n if len(u) == 1:\n return u\n elif len(u) == 2:\n return [u[0], u[0]+u[1]]\n else:\n return [u[0]] + cumsum([u[0]+u[1]] + u[2:])\n\ndef cumsum_loop(u):\n v = [u[0]]\n for i in u[1:]:\n v.append(v[-1] + i)\n return v\n\ndef parse(fmt, s):\n \"\"\" Based on an ASCII format string, parse the information in *s*. \"\"\"\n nch = reclen(fmt)\n reps = nreps_re(fmt)\n t = dtype(fmt)\n pos = [0] + cumsum(nch*reps)\n vals = [t_(s[a:b]) for a,b,t_ in zip(pos[:-1], pos[1:],\n itertools.cycle(t))]\n return vals if len(vals) > 1 else vals[0]\n\ndef read_block(blk, KEY):\n \"\"\" Read a block from a USGS .dem file given *KEY*, which is a list of\n tuples in (name::str, pos::int, end::int, format::str) form. \"\"\"\n if len(blk) > 1024:\n blk = blk[:1024]\n blkdict = {}\n for field,pos0,pos1,fmt in KEY[:20]:\n blkdict[field] = parse(fmt, blk[pos0:pos1])\n return blkdict\n\ndef demread(fnm):\n \"\"\" Read a USGS (or CDED) .dem file and return a dictionary for block A, a list of raw data values, and a dictionary for block B. \"\"\"\n with open(fnm, \"r\") as f:\n data = f.read()\n\n blocka = read_block(data[:1024], BLOCKA)\n\n dem = []\n profnz = parse(\"2I6\", data[1036:1048])[0]\n profnch = profnz * 6\n for profnum in range(blocka[\"size\"][1]):\n i = (int(math.ceil((profnch-146) / 1024.)) + 1) * profnum + 1\n bhdr = read_block(data[i*1024:(i + 1) * 1024], BLOCKB)\n #profdem = read_block(data[1168:2048], [(\"data\",0,876,\"146I6\")])[\"data\"]\n profdem = read_block(data[1024*i:1024*(i+1)],\n [(\"data\",144,1020,\"146I6\")])[\"data\"]\n blknz = [170 for _ in range((profnz-146)//170)] + [(profnz-146) % 170]\n for j, nz in enumerate(blknz):\n fmt = str(nz) + \"I6\"\n profdem.extend(read_block(data[1024*(j+i+1) : 1024*(j+i+2)],\n [(\"data\",0,1020,fmt)])[\"data\"])\n dem.extend(profdem)\n\n blockc = read_block(data[-1024:], BLOCKC)\n return blocka, dem, blockc\n\n","sub_path":"karta/raster/_dem.py","file_name":"_dem.py","file_ext":"py","file_size_in_byte":5766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"28938206","text":"\"\"\"\nThe gray code is a binary numeral system where two successive values differ in only one bit.\n\nGiven a non-negative integer n representing the total number of bits in the code, print the sequence of gray code. A gray code sequence must begin with 0.\n\nFor example, given n = 2, return [0,1,3,2]. Its gray code sequence is:\n\nNote:\nFor a given n, a gray code sequence is not uniquely defined.\n\nFor example, [0,2,3,1] is also a valid gray code sequence according to the above definition.\n格雷码转换\n\"\"\"\n\n\n\n\"\"\"\nS1 . 利用公式G(N) = (B(n)/2) XOR B(n)\n\"\"\"\nclass Solution1(object):\n def grayCode(self, n):\n \"\"\"\n :type n: int\n :rtype: List[int]\n \"\"\"\n ans = [0]*(2**n)\n\n for i in range(2**n):\n ans[i] = i//2 ^ i\n\n return ans\n\n\n\"\"\"\nS2\n利用对称, 初始是 [0,1] \nn = 2 : 先生成 00 01 同时对称11 10 (因为返回十进制值 所以顺便计算累加)\n\"\"\"\nclass Solution(object):\n def grayCode(self, n):\n\n \"\"\"\n :type n: int\n :rtype: List[int]\n \"\"\"\n if n == 0 :\n return [0]\n ans = [0,1]\n i =1\n while i < n: \n tmp_len = 2**(i+1) # 计算此次list 的长度\n tmp = [0]*tmp_len\n for j in range(len(ans)): # 对称的使用原list 生成下一个 ,前面加0 或者1\n tmp[j] = ans[j]\n tmp[tmp_len-j-1] = ans[j]+ 2**i\n ans = tmp #改变ans\n i +=1\n\n return ans\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nif __name__ == '__main__':\n S =Solution1()\n ss =S.grayCode(0)\n print(ss)\n\n \n\n\"\"\"\n A : 题目生成格雷码 \n\n 1. 利用维基百科 的公式\n (假设以二进制为0的值做为格雷码的0)\n G:格雷码 B:二进制码\n G(N) = (B(n)/2) XOR B(n)\n 二进制码->格雷码(编码):从最右边一位起,依次将每一位与左边一位异或(XOR),作为对应格雷码该位的值,最左边一位不变(相当于左边是0);\n 格雷码-〉二进制码(解码):从左边第二位起,将每位与左边一位解码后的值异或,作为该位解码后的值(最左边一位依然不变)\n 2. 利用格雷码 生成时 上下对称 \n n = 1 0 1\n n = 2 00 01 | 11 10 \n n = 3 000 001 011 010 |110 111 101 100 \n 能够发现左右对称, 所以我们可以依次递归操作, 每次叠加\n \"\"\" \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\"\"\"\nQ: \n 格雷码: 在一组数的编码中,若任意两个相邻的代码只有一位二进制数不同,则称这种编码为格雷码(Gray Code)\n\"\"\"","sub_path":"89_Gray Code.py","file_name":"89_Gray Code.py","file_ext":"py","file_size_in_byte":2623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"557098707","text":"# Instantiate var of type int and assign value 10\ntypes_of_people = 10\n# Instantiate var x of type string and assign to it f-string\nx = f\"There are {types_of_people} types of people.\"\n\nbinary = \"binary\"\ndo_not = \"don't\"\ny = f\"Those who know {binary} and those who {do_not}.\"\n\nprint(x)\nprint(y)\n\nprint(f\"I said: {x}\")\nprint(f\"I also said: '{y}'\")\n\nhilarious = False\njoke_evaluation = \"Isn't that joke so funny?! {}\"\n\nprint(joke_evaluation.format(hilarious))\n\nw = \"This is the left side of...\"\ne = \"a string with a right side.\"\n\nprint(w + e)\n\n# Study Drills\n#1. Busy busy\n#2. Lies, maybe 6, one where string concat with string\n#3. Hah\n#4. concats two strings together with \"+\"","sub_path":"assignment2/ex6.py","file_name":"ex6.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"497205835","text":"import numpy as np\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import load_model\nfrom keras.applications.resnet50 import preprocess_input # for resnet\n\nfrom utils import load_array\n\nTEST_DIR = 'data/test'\nMODEL_FILE = 'weights.h5'\nIMG_SHAPE = (360, 640)\n\nmodel = load_model(MODEL_FILE)\n\n\ndef preprocess_gen_test(gen_flow):\n for imgs in gen_flow:\n yield preprocess_input(imgs)\n\n\ntest_datagen = ImageDataGenerator().flow_from_directory(\n TEST_DIR,\n target_size=IMG_SHAPE,\n shuffle=False,\n batch_size=10,\n class_mode=None)\ntest_generator = preprocess_gen_test(test_datagen)\n\npreds = model.predict_generator(test_generator, 1000)\npreds = np.clip(preds, 0.02, 0.98)\n\nraw_test_filenames = [f.split('/')[-1] for f in test_datagen.filenames]\nwith open('submit.csv', 'w') as f:\n f.write('image,ALB,BET,DOL,LAG,NoF,OTHER,SHARK,YFT\\n')\n for i, pred in enumerate(preds):\n pred_string = ','.join(['%f' % p for p in pred])\n f.write(\"%s,%s\\n\" % (raw_test_filenames[i], pred_string))\n\nprint(\"Predicted and submission file written!\")\n","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"206810289","text":"import random\nfrom typing import Literal, Optional\n\nimport discord\nfrom redbot.core import checks, commands\nfrom redbot.core.bot import Red\nfrom redbot.core.utils.chat_formatting import humanize_list\n\nfrom .asynccleverbot import cleverbot as ac\n\n\nclass Core(commands.Cog):\n\n __author__ = [\"Predeactor\"]\n __version__ = \"v1.0.5\"\n\n async def red_delete_data_for_user(\n self,\n *,\n requester: Literal[\"discord_deleted_user\", \"owner\", \"user\", \"user_strict\"],\n user_id: int,\n ):\n \"\"\"\n Nothing to delete...\n \"\"\"\n pass\n\n # Nothing to delete, I assume that if the user was previously in self.conversation,\n # then it will automatically removed after cog reload/bot restart.\n\n def __init__(self, bot: Red):\n self.bot = bot\n self.conversation = {}\n super().__init__()\n\n def format_help_for_context(self, ctx: commands.Context) -> str:\n \"\"\"Thanks Sinbad!\"\"\"\n pre_processed = super().format_help_for_context(ctx)\n return \"{pre_processed}\\n\\nAuthor: {authors}\\nCog Version: {version}\".format(\n pre_processed=pre_processed,\n authors=humanize_list(self.__author__),\n version=self.__version__,\n )\n\n async def _get_api_key(self):\n travitia = await self.bot.get_shared_api_tokens(\"travitia\")\n # No need to check if the API key is not registered, the\n # @apicheck() do it automatically.\n return travitia.get(\"api_key\")\n\n async def _make_cleverbot_session(self):\n cleverbot_session = ac.Cleverbot(await self._get_api_key(), context=ac.DictContext())\n return cleverbot_session\n\n @staticmethod\n async def ask_question(session, question: str, user_id: Optional[int] = None):\n try:\n answer = await session.ask(question, user_id if user_id is not None else \"00\")\n answered = True\n except Exception as e:\n answer = \"An error happened: {error}. Please try again later. Session closed.\".format(\n error=str(e)\n )\n answered = False\n return answer, answered\n\n @staticmethod\n def _message_by_timeout():\n messages = [\n \"5 minutes without messages ? Sorry but I have to close your conversation.\",\n \"Sorry but after 5 minutes, I close your conversation.\",\n \"Conversation stopped.\",\n \"Since I'm lonely, I close our conversation.\",\n \"It's so lonely on the outside... Closing our conversation.\",\n \"I feel... alone. Closing the conversation.\",\n ]\n return random.choice(messages)\n\n # Commands for settings\n\n @checks.is_owner()\n @commands.command()\n async def travaitiaapikey(self, ctx: commands.Context, *, api_key: str):\n \"\"\"Set the API key for Travitia API.\n\n To set the API key:\n 1. Go to [this server](https://discord.gg/s4fNByu).\n 2. Go to #playground and use `> api`.\n 3. Say yes and follow instructions.\n 4. When you receive your key, use this command again with your API key.\n \"\"\"\n await ctx.bot.set_shared_api_tokens(\"travitia\", api_key=api_key)\n try:\n await ctx.message.delete()\n except (discord.Forbidden, discord.HTTPException):\n await ctx.send(\n \"Please delete your message, token is sensitive and should be kept secret.\"\n )\n await ctx.send(\"API key for `travitia` registered.\")\n\n\ndef apicheck():\n \"\"\"\n Check for hidding commands if the API key is not registered.\n Taken from https://github.com/PredaaA/predacogs/blob/master/nsfw/core.py#L200\n \"\"\"\n\n async def predicate(ctx: commands.Context):\n travitia_keys = await ctx.bot.get_shared_api_tokens(\"travitia\")\n key = travitia_keys.get(\"api_key\") is None\n if ctx.invoked_with == \"help\" and key:\n return False\n if key:\n await ctx.send(\"The API key is not registered, the command is unavailable.\")\n return False\n return True\n\n return commands.check(predicate)\n","sub_path":"cleverbot/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":4100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"373323143","text":"# A Gaussian wave packet impinges on a rectangular barrier. Part of the wave\n# is reflected, part of the wave is transmitted.\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport matplotlib.style\nmatplotlib.style.use(\"seaborn\")\n\n# Grid points\nNx = 1600\n\n# Evolution step\ndt = 0.0001\n\n# Propagation end\ntmax = 6\n\n# x- and y-window size\nxmax = 50\nymax = xmax\n\n# number of .png images\nimages = 100\n\n# 0 = periodic boundary\nabsorb_coeff = 20\n\n# If 1, it plots on the screen but does not save the images\n# If 2, it saves the images but does not plot on the screen\n# If 3, it saves the images and plots on the screen\noutput_choice = 3\n\n# Fixes a maximum scale of |psi|**2 for the plots. If 0, it does not fix it.\nfixmaximum = 1.05\n\n# Initial wavefunction\n# A Gaussian wave packet moving rightwards\ndef psi_0(x):\n vx = 10 # value of the initial velocity\n f = 0.j + np.exp(-((x+15)/4)**2) # Gaussian profile\n f = f*np.exp(1.j*vx*x) # Multiply by an x-dependent phase to introduce velocity\n return f\n\n# A barrier modeled by V=0 for |x|>5 and V=40 for |x|<5\ndef eval_V(x, t, psi):\n V = np.piecewise(x, [abs(x-5)<2.5, abs(x-5)>=2.5],[40,0])\n return V;\n\ndef init_grid(Nx, xmax):\n x = np.linspace(-xmax, xmax-2*xmax/Nx, Nx)\n return x\n\n# Builds the Laplacian in Fourier space\ndef build_Laplacian_fourier(Nx, xmax):\n kx = np.linspace(-Nx/4/xmax, Nx/4/xmax-1/2/xmax, Nx) # x variable\n return (2*np.pi*1.j*kx)**2 \n\n# Introduces an absorbing shell at the border of the computational window\ndef absorb_potential(x, xmax, dt, absorb_coeff):\n wx = xmax/20\n return np.exp(-absorb_coeff*(2-np.tanh((x+xmax)/wx)+np.tanh((x-xmax)/wx))*dt);\n\n# Saves the data of abs(psi)**2 at different values of t\ndef save_psi2(psi):\n return abs(psi)**2\n\n\n\n# builds spatial grid\nx = init_grid(Nx, xmax)\n\n# loads initial condition\npsi = psi_0(x)\n\n# Laplacian in Fourier space\nL = build_Laplacian_fourier( Nx, xmax )\n\n# linear phase in Fourier space (including point swap)\nlinear_phase = np.fft.fftshift( np.exp(1.j*L*dt/2) )\n\n# Absorbing shell at the border of the computational window\nborder = absorb_potential(x, xmax, dt, absorb_coeff)\n\nd_psi_real = np.gradient(np.real(psi), x)\nd_psi_imag = np.gradient(np.imag(psi), x)\n\n# Number of computational steps between consecutive graphic outputs\nsteps_image = int(tmax/dt/images)\nNsteps = steps_image*images + 1\nidx_image = 1\n\n# potential operator\nV = eval_V(x, 0, psi)\n\nV_bohmian = np.imag( np.gradient(psi,x)/psi )\n\n# Main computational loop\nfor j in range(Nsteps):\n \n print(\"step = \", j)\n \n # Generates image output\n if j % steps_image == 0:\n plt.clf()\n plt.plot(x, np.abs(psi)**2, label=\"Psi0^2\")\n plt.plot(x, border, label=\"border\")\n plt.plot(x, V, label=\"V\")\n plt.ylim([0.0, 2.0])\n plt.grid()\n plt.legend()\n plt.savefig(\"IMG_amp_psi0_{:08d}.png\".format(idx_image), dpi=150)\n #\n #plt.clf()\n #plt.plot(x, d_psi_real, label=\"Re\")\n #plt.plot(x, d_psi_imag, label=\"Im\")\n #plt.plot(x, V, label=\"V\") \n #plt.ylim([-10.0, 10.0])\n #plt.grid()\n #plt.legend()\n #plt.savefig(\"IMG_d_psi_{:08d}.png\".format(idx_image), dpi=150)\n #\n plt.clf()\n plt.plot(x, V_bohmian, label=\"V bohmian\")\n plt.ylim([-25.0, 250.0])\n plt.grid()\n plt.legend()\n plt.savefig(\"IMG_V_bohmian_{:08d}.png\".format(idx_image), dpi=150) \n #\n idx_image = idx_image + 1\n\n\n V[:] = eval_V(x, j*dt, psi) # potential operator\n psi *= np.exp(-1.j*dt*V) # potential phase\n psi = np.fft.fft(psi) # 1D Fourier transform\n psi *=linear_phase # linear phase from the Laplacian term\n psi = border*np.fft.ifft(psi) # inverse Fourier transform and damping by the absorbing shell\n \n d_psi_real[:] = np.gradient(np.real(psi), x)\n d_psi_imag[:] = np.gradient(np.imag(psi), x)\n \n V_bohmian = np.imag( np.gradient(psi,x)/psi )\n","sub_path":"ComputationalQM/TDSE/rect_barrier_1d.py","file_name":"rect_barrier_1d.py","file_ext":"py","file_size_in_byte":4019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"309021291","text":"from pandas import DataFrame\n\nimport numpy as np\n\nfrom pandas.core.reshape import melt, convert_dummies\nimport pandas.util.testing as tm\n\ndef test_melt():\n df = tm.makeTimeDataFrame()[:10]\n df['id1'] = (df['A'] > 0).astype(int)\n df['id2'] = (df['B'] > 0).astype(int)\n\n molten1 = melt(df)\n molten2 = melt(df, id_vars=['id1'])\n molten3 = melt(df, id_vars=['id1', 'id2'])\n\ndef test_convert_dummies():\n df = DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',\n 'foo', 'bar', 'foo', 'foo'],\n 'B' : ['one', 'one', 'two', 'three',\n 'two', 'two', 'one', 'three'],\n 'C' : np.random.randn(8),\n 'D' : np.random.randn(8)})\n\n result = convert_dummies(df, ['A', 'B'])\n result2 = convert_dummies(df, ['A', 'B'], prefix_sep='.')\n\n expected = DataFrame({'A_foo' : [1, 0, 1, 0, 1, 0, 1, 1],\n 'A_bar' : [0, 1, 0, 1, 0, 1, 0, 0],\n 'B_one' : [1, 1, 0, 0, 0, 0, 1, 0],\n 'B_two' : [0, 0, 1, 0, 1, 1, 0, 0],\n 'B_three' : [0, 0, 0, 1, 0, 0, 0, 1],\n 'C' : df['C'].values,\n 'D' : df['D'].values},\n columns=result.columns, dtype=float)\n expected2 = expected.rename(columns=lambda x: x.replace('_', '.'))\n\n tm.assert_frame_equal(result, expected)\n tm.assert_frame_equal(result2, expected2)\n\nif __name__ == '__main__':\n import nose\n nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],\n exit=False)\n\n","sub_path":"pandas/tests/test_reshape.py","file_name":"test_reshape.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"28620594","text":"#Program to print all numbers in a given range with an even digits only\n\ndef evenDigitsNum1(num):\n #checks if num is made only by even digits - Naive\n while num:\n if (num%10)%2==1:\n return 0\n num//=10\n return 1\n\ndef evenDigitsNum2(num):\n #checks if num is made only by even digits - List comprehension\n lst = [int(i) for i in str(num)]\n return all((i%2)==0 for i in lst)\n\n#checking program\nls=list(range(17,1000))\nn=[num for num in ls if evenDigitsNum2(num)==1]\nprint(n)\n","sub_path":"Shimon_Labs/evenDigitsNum.py","file_name":"evenDigitsNum.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"482065103","text":"from numpy import asscalar, isscalar, asfarray, ndarray, prod\nimport numpy as np\nfrom baseClasses import MultiArray\n\nscipyInstalled = True\ntry:\n import scipy\n import scipy.sparse as SP\nexcept:\n scipyInstalled = False\n \nfrom baseClasses import Stochastic\n\nclass FuncDesignerException(BaseException):\n def __init__(self, msg):\n self.msg = msg\n def __str__(self):\n return self.msg\n\ndef checkSizes(a, b):\n if a.size != 1 and b.size != 1 and a.size != b.size:\n raise FuncDesignerException('operation of oovar/oofun ' + a.name + \\\n ' and object with inappropriate size:' + str(a.size) + ' vs ' + b.size)\n\nscipyAbsentMsg = 'Probably scipy installation could speed up running the code involved'\n\npwSet = set()\ndef pWarn(msg):\n if msg in pwSet: return\n pwSet.add(msg)\n print('FuncDesigner warning: ' + msg)\n\n\nclass diagonal:\n isOnes = False\n __array_priority__ = 150000# set it greater than 1 to prevent invoking numpy array __mul__ etc\n \n def __init__(self, arr, scalarMultiplier=1.0, size=0):\n #assert arr is None or arr.ndim <= 1\n self.diag = arr.copy() if arr is not None else None # may be None, then n has to be provided\n self.scalarMultiplier = scalarMultiplier if isscalar(scalarMultiplier) \\\n else asscalar(scalarMultiplier) if type(scalarMultiplier) == ndarray\\\n else scalarMultiplier[0, 0] if scipyInstalled and SP.isspmatrix(scalarMultiplier)\\\n else raise_except()\n self.size = arr.size if size == 0 else size\n if arr is None:\n self.isOnes = True\n \n copy = lambda self: diagonal(self.diag, scalarMultiplier = self.scalarMultiplier, size = self.size)\n \n def toarray(self):\n if self.isOnes:\n tmp = np.empty(self.size)\n \n # for PyPy compatibility\n scalarMultiplier = asscalar(self.scalarMultiplier) if type(self.scalarMultiplier) == ndarray else self.scalarMultiplier\n \n tmp.fill(scalarMultiplier)\n return np.diag(tmp)\n else:\n return np.diag(self.diag * self.scalarMultiplier)\n \n def resolve(self, useSparse):\n if useSparse in (True, 'auto') and scipyInstalled and self.size > 50:\n if self.isOnes:\n tmp = np.empty(self.size)\n tmp.fill(self.scalarMultiplier)\n else:\n tmp = self.diag*self.scalarMultiplier\n return SP.dia_matrix((tmp,0), shape=(self.size,self.size)) \n else:\n return self.toarray()\n\n def __add__(self, item):\n if type(item) == DiagonalType:\n # TODO: mb use other.diag.copy(), self.diag.copy() for more safety, especially for parallel computations?\n if self.isOnes and item.isOnes:\n return diagonal(None, self.scalarMultiplier + item.scalarMultiplier, size=self.size)\n else:\n if self.isOnes:\n d1 = np.empty(self.size) \n d1.fill(self.scalarMultiplier )\n else:\n d1 = self.diag\n if item.isOnes:\n d2 = np.empty(item.size) \n d2.fill(item.scalarMultiplier )\n else:\n d2 = item.diag\n return diagonal(d1 * self.scalarMultiplier + d2 * item.scalarMultiplier)\n elif np.isscalar(item) or type(item) == np.ndarray:\n return self.resolve(False)+item\n else: # sparse matrix\n assert SP.isspmatrix(item)\n return self.resolve(True)+item\n \n def __radd__(self, item):\n return self.__add__(item)\n \n def __neg__(self):\n return diagonal(self.diag, -self.scalarMultiplier, size=self.size)\n \n def __mul__(self, item): \n #!!! PERFORMS MATRIX MULTIPLICATION!!!\n if np.isscalar(item):\n return diagonal(self.diag, item*self.scalarMultiplier, size=self.size)\n if type(item) == DiagonalType:#diagonal:\n scalarMultiplier = item.scalarMultiplier * self.scalarMultiplier\n if self.isOnes:\n diag = item.diag\n elif item.isOnes:\n diag = self.diag\n else:\n diag = self.diag * item.diag\n return diagonal(diag, scalarMultiplier, size=self.size) \n elif isinstance(item, np.ndarray):\n if item.size == 1:\n return diagonal(self.diag, scalarMultiplier = np.asscalar(item)*self.scalarMultiplier, size=self.size)\n elif min(item.shape) == 1:\n #TODO: assert item.ndim <= 2 \n r = self.scalarMultiplier*item.flatten()\n if self.diag is not None: r *= self.diag\n return r.reshape(item.shape)\n else:\n # new; TODO: improve it\n if self.isOnes:\n D = np.empty(self.size)\n D.fill(self.scalarMultiplier)\n else:\n D = self.scalarMultiplier * self.diag if self.scalarMultiplier != 1.0 else self.diag\n return D.reshape(-1, 1) * item # ! different shapes !\n \n \n# T = np.dot(self.resolve(False), item)\n# from numpy import array_equal, all\n# assert array_equal(T.shape, T2.shape) and all(T==T2)\n# print '!'\n #prev\n # !!!!!!!!!! TODO: rework it!!!!!!!!!!!\n# if self.size < 100 or not scipyInstalled:\n# return np.dot(self.resolve(False), item)\n# else:\n# return self.resolve(True)._mul_sparse_matrix(item)\n else:\n #assert SP.isspmatrix(item)\n if prod(item.shape) == 1:\n return diagonal(self.diag, scalarMultiplier = self.scalarMultiplier*item[0, 0], size=self.size)\n else:\n tmp = self.resolve(True)\n if not SP.isspmatrix(tmp): # currently lil_matrix and K^ works very slow on sparse matrices\n tmp = SP.lil_matrix(tmp) # r.resolve(True) can yield dense ndarray\n return tmp._mul_sparse_matrix(item)\n #return r\n \n def __getattr__(self, attr):\n if attr == 'T': return self # TODO: mb using copy will be more safe\n elif attr == 'shape': return self.size, self.size\n elif attr == 'ndim': return 2\n raise AttributeError('you are trying to obtain incorrect attribute \"%s\" for FuncDesigner diagonal' %attr)\n \n def __rmul__(self, item):\n return self.__mul__(item) if isscalar(item) else self.__mul__(item.T).T\n \n def __div__(self, other):\n #TODO: check it\n if isinstance(other, np.ndarray) and other.size == 1: other = np.asscalar(other)\n if np.isscalar(other) or prod(other.shape)==1: \n return diagonal(self.diag, self.scalarMultiplier/other, size=self.size) \n else: \n # TODO: check it\n return diagonal(self.diag/other if self.diag is not None else 1.0/other, self.scalarMultiplier, size=self.size) \n\nDiagonalType = type(diagonal(np.array([0, 0])))\n\nEye = lambda n: 1.0 if n == 1 else diagonal(None, size=n)\n\ndef Diag(x, *args, **kw):\n if isscalar(x) or (type(x)==ndarray and x.size == 1) or isinstance(x, (Stochastic, MultiArray)): \n return x\n else: \n return diagonal(asfarray(x) if x is not None else x, *args, **kw)\n\nclass fixedVarsScheduleID:\n fixedVarsScheduleID = 0\n def _getDiffVarsID(*args):\n fixedVarsScheduleID.fixedVarsScheduleID += 1\n return fixedVarsScheduleID.fixedVarsScheduleID\nDiffVarsID = fixedVarsScheduleID()\n_getDiffVarsID = lambda *args: DiffVarsID._getDiffVarsID(*args)\n\ntry:\n import numpypy\n isPyPy = True\nexcept ImportError:\n isPyPy = False\n\ndef raise_except(*args, **kwargs):\n raise FuncDesignerException('bug in FuncDesigner engine, inform developers')\n \nclass Extras:\n pass\n\n","sub_path":"vendor/oosuite/FuncDesigner/FuncDesigner/FDmisc.py","file_name":"FDmisc.py","file_ext":"py","file_size_in_byte":7993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"513056051","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nimport time\n\nfrom a2c_ppo_acktr.distributions import Categorical, DiagGaussian, Bernoulli\nfrom a2c_ppo_acktr.utils import init\n\n\nclass Flatten(nn.Module):\n def forward(self, x):\n return x.view(x.size(0), -1)\n\n\nclass Policy(nn.Module):\n def __init__(self, occ_obs_shape, sign_obs_shape, state_rep, action_space, recurrent_policy):\n super(Policy, self).__init__()\n\n if state_rep in ['sign','original']:\n self.base = MLPBase(sign_obs_shape, recurrent_policy)\n elif state_rep == 'full':\n self.base = CNNBase(occ_obs_shape, sign_obs_shape, recurrent_policy)\n else:\n raise NotImplemented('Only implemented sign, origianal, and full state representation')\n\n num_outputs = action_space.n # 2\n self.dist = Categorical(self.base.output_size, num_outputs)\n\n @property\n def is_recurrent(self):\n return self.base.is_recurrent\n\n @property\n def recurrent_hidden_state_size(self):\n \"\"\"Size of rnn_hx.\"\"\"\n return self.base.recurrent_hidden_state_size\n\n def act(self, occ_inputs, sign_inputs, rnn_hxs, masks, deterministic=False): # Not deterministic, chooses actions wrt output probabilities\n\n value, actor_features, rnn_hxs = self.base(occ_inputs, sign_inputs, rnn_hxs, masks)\n\n dist = self.dist(actor_features)\n\n if deterministic:\n action = dist.mode()\n else:\n action = dist.sample()\n\n action_log_probs = dist.log_probs(action)\n dist_entropy = dist.entropy().mean()\n\n #print(action.shape)\n return value, action, action_log_probs, rnn_hxs\n\n def get_value(self, occ_inputs, sign_inputs, rnn_hxs, masks):\n value, _, _ = self.base(occ_inputs, sign_inputs, rnn_hxs, masks)\n return value\n\n def evaluate_actions(self, occ_inputs, sign_inputs, rnn_hxs, masks, action):\n value, actor_features, rnn_hxs = self.base(occ_inputs, sign_inputs, rnn_hxs, masks)\n dist = self.dist(actor_features)\n\n action_log_probs = dist.log_probs(action)\n dist_entropy = dist.entropy().mean()\n\n return value, action_log_probs, dist_entropy, rnn_hxs\n\n# TODO : adapt reccurent to new observations\nclass NNBase(nn.Module):\n\n def __init__(self, recurrent, recurrent_input_size, hidden_size):\n super(NNBase, self).__init__()\n\n self._hidden_size = hidden_size\n self._recurrent = recurrent\n\n if recurrent:\n self.gru = nn.GRU(recurrent_input_size, hidden_size)\n for name, param in self.gru.named_parameters():\n if 'bias' in name:\n nn.init.constant_(param, 0)\n elif 'weight' in name:\n nn.init.orthogonal_(param)\n\n @property\n def is_recurrent(self):\n return self._recurrent\n\n @property\n def recurrent_hidden_state_size(self):\n if self._recurrent:\n return self._hidden_size\n return 1\n\n @property\n def output_size(self):\n return self._hidden_size\n\n def _forward_gru(self, x, hxs, masks):\n if x.size(0) == hxs.size(0):\n x, hxs = self.gru(x.unsqueeze(0), (hxs * masks).unsqueeze(0))\n x = x.squeeze(0)\n hxs = hxs.squeeze(0)\n else:\n # x is a (T, N, -1) tensor that has been flatten to (T * N, -1)\n N = hxs.size(0)\n T = int(x.size(0) / N)\n\n # unflatten\n x = x.view(T, N, x.size(1))\n\n # Same deal with masks\n masks = masks.view(T, N)\n\n # Let's figure out which steps in the sequence have a zero for any agent\n # We will always assume t=0 has a zero in it as that makes the logic cleaner\n has_zeros = ((masks[1:] == 0.0) \\\n .any(dim=-1)\n .nonzero()\n .squeeze()\n .cpu())\n\n\n # +1 to correct the masks[1:]\n if has_zeros.dim() == 0:\n # Deal with scalar\n has_zeros = [has_zeros.item() + 1]\n else:\n has_zeros = (has_zeros + 1).numpy().tolist()\n\n # add t=0 and t=T to the list\n has_zeros = [0] + has_zeros + [T]\n\n\n hxs = hxs.unsqueeze(0)\n outputs = []\n for i in range(len(has_zeros) - 1):\n # We can now process steps that don't have any zeros in masks together!\n # This is much faster\n start_idx = has_zeros[i]\n end_idx = has_zeros[i + 1]\n\n rnn_scores, hxs = self.gru(\n x[start_idx:end_idx],\n hxs * masks[start_idx].view(1, -1, 1)\n )\n\n outputs.append(rnn_scores)\n\n # assert len(outputs) == T\n # x is a (T, N, -1) tensor\n x = torch.cat(outputs, dim=0)\n # flatten\n x = x.view(T * N, -1)\n hxs = hxs.squeeze(0)\n\n return x, hxs\n\nclass Flatten(nn.Module):\n def forward(self, x):\n x = x.view(x.size(0), -1)\n return x\n\nclass CNNBase(NNBase):\n def __init__(self, occ_num_inputs, sign_num_inputs, recurrent):\n\n combined_size = occ_num_inputs[0]*16*5 + sign_num_inputs\n\n hidden_size = int(np.power(2,np.floor(np.log2(combined_size))))\n\n\n super(CNNBase, self).__init__(recurrent, hidden_size, hidden_size)\n\n init_ = lambda m: init(m,\n nn.init.orthogonal_,\n lambda x: nn.init.constant_(x, 0),\n nn.init.calculate_gain('relu'))\n\n self.lane = nn.Sequential(\n init_(nn.Conv1d(1,8,6,stride=1)),\n nn.ReLU(),nn.MaxPool1d(4),\n init_(nn.Conv1d(8,16,6,stride=1)),\n nn.ReLU(),nn.MaxPool1d(5)\n )\n\n self.actor = nn.Sequential(\n init_(nn.Linear(combined_size,hidden_size)),\n nn.ReLU(),\n init_(nn.Linear(hidden_size,hidden_size)),\n nn.ReLU()\n )\n\n self.critic = nn.Sequential(\n init_(nn.Linear(combined_size, hidden_size)),\n nn.ReLU(),\n init_(nn.Linear(hidden_size, hidden_size)),\n nn.ReLU()\n )\n\n init_ = lambda m: init(m,\n nn.init.orthogonal_,\n lambda x: nn.init.constant_(x, 0))\n\n self.critic_linear = init_(nn.Linear(hidden_size, 1))\n\n self.train()\n\n def forward(self, occ_inputs, sign_inputs, rnn_hxs, masks):\n\n hidden_lanes = self.lane(occ_inputs.view(occ_inputs.size(0)*occ_inputs.size(1),1,-1))\n hidden_lanes = hidden_lanes.view(-1,occ_inputs.size(1)*16*5)\n hidden_input = torch.cat((hidden_lanes, sign_inputs),1)\n\n #if self.is_recurrent:\n # x, rnn_hxs = self._forward_gru(x, rnn_hxs, masks)\n\n hidden_critic = self.critic(hidden_input)\n hidden_actor = self.actor(hidden_input)\n\n return self.critic_linear(hidden_critic), hidden_actor, rnn_hxs\n\n\nclass MLPBase(NNBase):\n def __init__(self, num_inputs, recurrent):\n\n hidden_size = 256\n\n super(MLPBase, self).__init__(recurrent, num_inputs, hidden_size)\n\n init_ = lambda m: init(m,\n nn.init.orthogonal_,\n lambda x: nn.init.constant_(x, 0),\n nn.init.calculate_gain('relu'))\n\n self.actor = nn.Sequential(\n init_(nn.Linear(num_inputs, hidden_size)),\n nn.ReLU(),\n init_(nn.Linear(hidden_size, hidden_size)),\n nn.ReLU()\n )\n\n self.critic = nn.Sequential(\n init_(nn.Linear(num_inputs, hidden_size)),\n nn.ReLU(),\n init_(nn.Linear(hidden_size, hidden_size)),\n nn.ReLU()\n )\n\n self.critic_linear = init_(nn.Linear(hidden_size, 1))\n\n self.train()\n\n def forward(self, occ_inputs, sign_inputs, rnn_hxs, masks):\n x = sign_inputs\n\n if self.is_recurrent:\n x, rnn_hxs = self._forward_gru(x, rnn_hxs, masks)\n hidden_critic = self.critic(x)\n hidden_actor = self.actor(x)\n\n return self.critic_linear(hidden_critic), hidden_actor, rnn_hxs\n","sub_path":"a2c_ppo_acktr/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":8203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"308210293","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\nimport csv\nimport posixpath\nfrom pathlib import Path\nfrom re import sub\nfrom re import compile\n\n\n# fishshortcuts = \"\"\nqute_shortcuts = \"\"\nranger_shortcuts = \"\"\nbash_shortcuts = \"\"\n# zsh_shortcuts has the same syntax as bash_shortcuts\nnautilus_shortcuts = \"\"\n\nhome = str(Path.home()) + \"/\"\n\nranger_location = home + \".config/ranger/rc.conf\"\nbash_location = home + \".bashrc\"\nzsh_location = home + \".zshrc\"\nqute_location = home + \".config/qutebrowser/config.py\"\nnautilus_location = home + \".config/gtk-3.0/bookmarks\"\n\n\n# These are the labels that demarcate where the shortcuts\n# go in the config files.\nbeg = \"# DO NOT DELETE LMAO\\n\"\nend = \"# DO NOT DELETE LMAO\"\n\n\n# First we open the list of folder shortcuts and go down each line adding each\n# in the required syntax to each of the three configs:\nwith open(home+\".scripts/folders\") as fold:\n for line in csv.reader(fold, dialect=\"excel-tab\"):\n # Adds the ranger go, tab, move and yank commands:\n ranger_shortcuts += (\"map g\" + line[0]\n + \" cd \" + line[1]\n + \"\\n\")\n\n ranger_shortcuts += (\"map t\" + line[0]\n + \" tab_new \" + line[1]\n + \"\\n\")\n\n ranger_shortcuts += (\"map m\" + line[0]\n + \" shell mv %s \" + line[1]\n + \"\\n\")\n\n ranger_shortcuts += (\"map Y\" + line[0]\n + \" shell cp -r %s \" + line[1]\n + \"\\n\")\n\n # Adds the bash_shortcuts shortcuts:\n bash_shortcuts += (\"alias \" + line[0]\n + \"=\\\"cd \" + line[1]\n + \" && ls -a\\\"\"\n + \"\\n\")\n\n # qutebrowser shortcuts:\n qute_shortcuts += (\"config.bind(';\" + line[0]\n + \"', 'set downloads.location.directory \" + line[1]\n + \" ;; hint links download')\"\n + \"\\n\")\n\n # nautilus bookmarks\n nautilus_shortcuts += (\"file://\" + posixpath.expanduser(line[1])\n\t\t\t + \"\\n\")\n\n\n# Goes through the config file file and adds the shortcuts to both\n# bash_shortcuts and ranger.\nwith open(home + \".scripts/configs\") as conf:\n for line in csv.reader(conf, dialect=\"excel-tab\"):\n # fishshortcuts+=(\"alias \"+line[0]+\"=\\\"vim \"+line[1]+\"\\\"\\n\")\n # fishshortcuts+=(\"abbr --add \"+line[0]+\" \\\"vim \"+line[1]+\"\\\"\\n\")\n bash_shortcuts += (\"alias \" + line[0]\n + \"=\\\"vim \" + line[1]\n + \"\\\"\"\n + \"\\n\")\n\n ranger_shortcuts += (\"map \" + line[0] + \" shell vim \" + line[1] + \"\\n\")\n\n\ndef replaceInMarkers(text, shortcuts):\n markers = compile(beg+\"(.|\\s)*\"+end)\n replacement = beg+shortcuts+end\n return sub(markers, replacement, text)\n\n\ndef writeShortcuts(location, shortcuts):\n with open(location, \"r+\") as input:\n final = \"\"\n final += input.read()\n final = replaceInMarkers(final, shortcuts)\n input.seek(0)\n input.write(final)\n input.truncate()\n\n\ndef main():\n writeShortcuts(ranger_location, ranger_shortcuts)\n writeShortcuts(bash_location, bash_shortcuts)\n writeShortcuts(zsh_location, bash_shortcuts)\n writeShortcuts(qute_location, qute_shortcuts)\n writeShortcuts(nautilus_location, nautilus_shortcuts)\n\nif __name__ == '__main__':\n main()\n","sub_path":"oldpython/shortcuts.py","file_name":"shortcuts.py","file_ext":"py","file_size_in_byte":3473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"609285076","text":"\n\nfrom PyQt5.QtCore import QDate\nfrom PyQt5.QtGui import QTextCharFormat\nfrom PyQt5.QtWidgets import *\nfrom worktimeCheckApp.Config.Config import *\nfrom worktimeCheckApp.DataBase.dbUtils import *\nfrom worktimeCheckApp.MainView.Dialog.timeSelectDlg import *\nimport datetime\n\n\n\n\nremainTime_one_week = 144000.0\nlunchTime = 3600.0\nt = ['월', '화', '수', '목', '금', '토', '일']\n\nclass MainView(QMainWindow):\n def __init__(self):\n super().__init__()\n global db\n props = PropertiesReader()\n db = dbUtils(props)\n\n self.mainUI = createMainUi()\n self.setCentralWidget(self.mainUI)\n self.setWindowTitle(\"Henry's WorkTime\")\n\n\n self.resize(400, 300)\n # self.statusBar().showMessage(self.mainUI.resultTime)\n\n\nclass calendar(QCalendarWidget):\n def __init__(self):\n super().__init__()\n self.selectDay = str(datetime.datetime.now().date())\n print(self.selectDay)\n self.cal = QCalendarWidget()\n self.cal.setVerticalHeaderFormat(0) # vertical header 숨기기\n self.cal.installEventFilter(self)\n self.cal.clicked[QDate].connect(self.selectDate)\n\n\n def selectDate(self,date):\n self.selectDay = str(date.toPyDate())\n print(self.selectDay)\n\n\nclass createMainUi(QWidget):\n\n def __init__(self):\n super().__init__()\n self.resultTime = self.convertTimeFormat(remainTime_one_week, \"%d:%02d:%02d\")\n menubar = QMenuBar()\n menubar.setNativeMenuBar(False)\n self.selectDay = None\n self.cal = calendar()\n\n\n fm = QTextCharFormat()\n fm.setForeground(Qt.red)\n fm.setBackground(Qt.yellow)\n\n self.today = str(datetime.datetime.now().date())\n self.cal.setDateTextFormat(QDate.fromString(self.today, \"yyyy-MM-dd\"), fm)\n\n sublayout1 = QVBoxLayout()\n sublayout1.addWidget(self.cal.cal)\n\n label2 = QLabel('Worked Time!!')\n self.workTimeLabel = QLabel('Time')\n sublayout2 = QHBoxLayout()\n sublayout2.addWidget(label2)\n sublayout2.addWidget(self.workTimeLabel)\n\n label3 = QLabel('Remain Time!!')\n self.remainTimeLabel = QLabel('Time')\n sublayout3 = QHBoxLayout()\n sublayout3.addWidget(label3)\n sublayout3.addWidget(self.remainTimeLabel)\n\n button1 = QPushButton(\"Show work time\")\n button1.clicked.connect(self.getWorkTime)\n button2 = QPushButton(\"Remained work time (at week)\")\n button2.clicked.connect(self.calcremainTime)\n button3 = QPushButton(\"resetByDate\")\n button3.clicked.connect(self.resultByDate)\n button4 = QPushButton(\"Insert Work Time\")\n button4.clicked.connect(self.showDayInfo)\n grid_layout = QGridLayout(self)\n grid_layout.addLayout(sublayout1, 0, 0, 1, 4)\n grid_layout.addLayout(sublayout2, 1, 0, 1, 4)\n grid_layout.addLayout(sublayout3, 2, 0, 1, 4)\n grid_layout.addWidget(button1, 4, 0, 1, 1)\n grid_layout.addWidget(button2, 4, 1, 1, 1)\n grid_layout.addWidget(button3, 4, 2, 1, 1)\n grid_layout.addWidget(button4, 4, 3, 1, 1)\n\n\n def showDayInfo(self):\n dlg = timeSelectDialog()\n dlg.exec_()\n dlg.close()\n db.insertData(self.cal.selectDay, dlg.startWorkTime,dlg.endWorkTime,lunchTime)\n\n\n\n def getWorkTime(self):\n workTime = db.selectDay(self.cal.selectDay)\n workTime = self.convertTimeFormat(workTime, \"%d:%02d:%02d\")\n self.workTimeLabel.setText(workTime)\n\n\n def resultByDate(self):\n db.resetByDate(self.cal.selectDay)\n global startWorkTime\n startWorkTime = None\n print(\"reset Data is date = \" + self.cal.selectDay)\n\n def calcremainTime(self):\n print(str(datetime.datetime.now().date()).split('-')[2])\n r = datetime.datetime.today().weekday()\n today = str(datetime.datetime.now().date()).split('-')\n theDay = int(today[2])-r\n print(theDay)\n startDay = str(today[0]+\"-\"+today[1]+\"-\"+str(int(today[2])-r))\n reformat = QDate.fromString(startDay, \"yyyy-MM-dd\")\n print(reformat.toString(\"yyyy-MM-dd\"))\n\n time = db.calcRemainTime(reformat.toString(\"yyyy-MM-dd\"))\n self.resultTime = self.convertTimeFormat(remainTime_one_week-time, \"%d:%02d:%02d\")\n self.remainTimeLabel.setText(self.resultTime)\n\n def convertTimeFormat(self, time, format):\n resultTime = \"0\"\n if(time != None):\n m, s = divmod(time, 60)\n h, m = divmod(m, 60)\n resultTime = format % (h, m, s)\n print(resultTime)\n return resultTime\n\n\n","sub_path":"worktimeCheckApp/MainView/MainView.py","file_name":"MainView.py","file_ext":"py","file_size_in_byte":4609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"231092062","text":"from __future__ import (absolute_import, division, print_function, unicode_literals)\n\nimport re\nimport numpy as np\n\nMISSING = float('inf')\n\n\ndef load_arff(filename):\n\t\"\"\"Load matrix from an ARFF file\"\"\"\n\tdata = []\n\tattr_names = []\n\tstr_to_enum = []\n\tenum_to_str = []\n\treading_data = False\n\n\trows = [] # we read data into array of rows, then convert into array of columns\n\n\tf = open(filename)\n\tfor line in f.readlines():\n\t\tline = line.rstrip()\n\t\tif len(line) > 0 and line[0] != '%':\n\t\t\tif not reading_data:\n\t\t\t\tif line.lower().startswith(\"@relation\"):\n\t\t\t\t\tdataset_name = line[9:].strip()\n\t\t\t\telif line.lower().startswith(\"@attribute\"):\n\t\t\t\t\tattr_def = line[10:].strip()\n\t\t\t\t\tif attr_def[0] == \"'\":\n\t\t\t\t\t\tattr_def = attr_def[1:]\n\t\t\t\t\t\tattr_name = attr_def[:attr_def.index(\"'\")]\n\t\t\t\t\t\tattr_def = attr_def[attr_def.index(\"'\") + 1:].strip()\n\t\t\t\t\telse:\n\t\t\t\t\t\tsearch = re.search(r'(\\w*)\\s*(.*)', attr_def)\n\t\t\t\t\t\tattr_name = search.group(1)\n\t\t\t\t\t\tattr_def = search.group(2)\n\t\t\t\t\t\t# Remove white space from atribute values\n\t\t\t\t\t\tattr_def = \"\".join(attr_def.split())\n\n\t\t\t\t\tattr_names += [attr_name]\n\n\t\t\t\t\tstr_to_enum = {}\n\t\t\t\t\tenum_to_str = {}\n\t\t\t\t\tif not (\n\t\t\t\t\t\t\tattr_def.lower() == \"real\" or attr_def.lower() == \"continuous\" or attr_def.lower() == \"integer\"):\n\t\t\t\t\t\t# attribute is discrete\n\t\t\t\t\t\tassert attr_def[0] == '{' and attr_def[-1] == '}'\n\t\t\t\t\t\tattr_def = attr_def[1:-1]\n\t\t\t\t\t\tattr_vals = attr_def.split(\",\")\n\t\t\t\t\t\tval_idx = 0\n\t\t\t\t\t\tfor val in attr_vals:\n\t\t\t\t\t\t\tval = val.strip()\n\t\t\t\t\t\t\tenum_to_str[val_idx] = val\n\t\t\t\t\t\t\tstr_to_enum[val] = val_idx\n\t\t\t\t\t\t\tval_idx += 1\n\n\t\t\t\t\tenum_to_str.append(enum_to_str)\n\t\t\t\t\tstr_to_enum.append(str_to_enum)\n\n\t\t\t\telif line.lower().startswith(\"@data\"):\n\t\t\t\t\treading_data = True\n\n\t\t\telse:\n\t\t\t\t# reading data\n\t\t\t\trow = []\n\t\t\t\tval_idx = 0\n\t\t\t\t# print(\"{}\".format(line))\n\t\t\t\tvals = line.split(\",\")\n\t\t\t\tfor val in vals:\n\t\t\t\t\tval = val.strip()\n\t\t\t\t\tif not val:\n\t\t\t\t\t\traise Exception(\"Missing data element in row with data '{}'\".format(line))\n\t\t\t\t\telse:\n\t\t\t\t\t\trow += [float(MISSING if val == \"?\" else str_to_enum[val_idx].get(val, val))]\n\n\t\t\t\t\tval_idx += 1\n\n\t\t\t\trows += [row]\n\n\tf.close()\n\treturn rows","sub_path":"data_utils.py","file_name":"data_utils.py","file_ext":"py","file_size_in_byte":2129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"139383070","text":"from PyQt4.QtGui import QImage\nimport numpy as np\n\n\ndef qimage_to_numpy(qimage):\n \"\"\" Returns an RGB array given any image. \"\"\"\n\n # Each row in a bitmap is stored in the size of multiples of 4\n # If there are less number of bits in a row, it is padded with 0s\n # a 32 bit format ensures that we can get away with dealing with padding\n qimage = qimage.convertToFormat(QImage.Format_ARGB32)\n w, h = qimage.width(), qimage.height()\n string = qimage.bits().asstring(qimage.numBytes())\n\n arr = np.fromstring(string, dtype=np.uint8)\n arr = arr.reshape(h, w, 4)\n arr = arr[..., 0:3]\n\n c0 = arr[..., 0].copy()\n c2 = arr[..., 2].copy()\n\n arr[..., 0] = c2\n arr[..., 2] = c0\n return arr\n\n\ndef numpy_to_qimage(array):\n \"\"\" Returns QImage from an RGB array .\"\"\"\n\n rows, cols, channels = array.shape\n array4 = np.zeros((rows, cols, 4), dtype=np.uint8)\n array4[..., 0:3] = array\n array4[..., 3] = 255\n\n c0 = array[..., 0].copy()\n c2 = array[..., 2].copy()\n\n array4[..., 0] = c2\n array4[..., 2] = c0\n\n string = array4.tostring()\n\n img = QImage(string, cols, rows, QImage.Format_ARGB32)\n return img\n\n\ndef rgb_to_gray(arr):\n \"\"\" Converts an RGB numpy array to grayscale. \"\"\"\n\n r = arr[:, :, 0].astype(np.float)\n g = arr[:, :, 1].astype(np.float)\n b = arr[:, :, 2].astype(np.float)\n\n garr = 0.299*r + 0.587*g + 0.114*b\n return garr.astype(np.uint8)\n\n\ndef gray_to_rgb(arr):\n \"\"\" Converts intensity array to RGB. \"\"\"\n\n return np.dstack([arr, arr, arr])\n\n\ndef fft_to_qimage(arr):\n \"Converts frquency spectrum image to displayable qimage\"\n\n arr = np.fft.fftshift(arr)\n magnitude = np.absolute(arr)**2\n magnitutde_log = np.log(magnitude)\n mn = magnitutde_log.min()\n mx = magnitutde_log.max()\n norm_img = 255*(magnitutde_log - mn)/(mx - mn)\n norm_img = norm_img.astype(np.uint8)\n rgb_image = gray_to_rgb(norm_img)\n return numpy_to_qimage(rgb_image)\n","sub_path":"fredo/util/numpy_util.py","file_name":"numpy_util.py","file_ext":"py","file_size_in_byte":1959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"498539149","text":"from gensim.models import KeyedVectors\nimport nltk\nimport numpy as np\nimport pandas as pd\n\n\nclass TextFeature:\n\tdef __init__(self, word2vec_model='', lexicon_nrc0=''):\n\t\t# Decide to use word2vec as feature or not\n\t\tif word2vec_model:\n\t\t\tself.word2vec = KeyedVectors.load_word2vec_format(fname=word2vec_model, binary=True)\n\n\t\tself.lexicon = {}\n\t\t# Decide to use NRCv0.92 lexicon as feature or not\n\t\tif lexicon_nrc0:\n\t\t\tself.lexicon['nrc0'] = pd.read_csv(lexicon_nrc0, engine='c', encoding='utf-8').set_index('English (en)').T.to_dict('list')\n\n\tdef text_w2v(self, word_tokens):\n\t\t\"\"\"\n\t\tTurns text to vector\n\t\t:param word_tokens: (Array of String) words of text\n\t\t:return: average vector of all words\n\t\t\"\"\"\n\n\t\t# Return false if word2vec model is not loaded\n\t\ttry:\n\t\t\tvocab = self.word2vec.vocab\n\t\texcept Exception as e:\n\t\t\tprint('This instance of class doesn\\'t use word2vec')\n\t\t\treturn False\n\t\t# calculate average of vectors for each word that exists in both text and word2vec model. Other words are ignored\n\t\tvector_sum = np.array([0] * self.word2vec.vector_size)\n\t\tnumber_of_words = 0\n\t\tfor word in word_tokens:\n\t\t\tif word in vocab:\n\t\t\t\tvector_sum = vector_sum + self.word2vec[word]\n\t\t\t\tnumber_of_words = number_of_words + 1\n\t\t# If there is at least 1 word in text return the result\n\t\tif number_of_words == 0:\n\t\t\treturn False\n\t\treturn vector_sum/number_of_words\n\n\tdef text_l2v(self, word_tokens, lexicon_name, vector_size):\n\t\t\"\"\"\n\t\tTurns text to lexicon vector\n\t\t:param word_tokens: (Array of String) words of text\n\t\t:param lexicon_name: (String)\n\t\t:param vector_size: (Int) usable feature size of lexicon for each word\n\t\t:return: average lexicon vector of all words\n\t\t\"\"\"\n\n\t\t# Return false if lexicon is not loaded\n\t\ttry:\n\t\t\tlexicon = self.lexicon[lexicon_name]\n\t\texcept Exception as e:\n\t\t\tprint('This instance of class doesn\\'t use ' + lexicon_name)\n\t\t\treturn False\n\t\t# Stem words\n\t\tstemmer = nltk.stem.PorterStemmer()\n\t\tstemmed_words = []\n\t\tfor word in word_tokens:\n\t\t\tstemmed_words.append(stemmer.stem(word))\n\t\tstemmed_words = np.array(stemmed_words)\n\t\t# List of words in lexicon\n\t\tlexicon_words = list(lexicon.keys())\n\t\t# calculate average of vectors for each word that exists in both text and lexicon. Other words are ignored\n\t\tvector_sum = np.array([0] * vector_size)\n\t\tnumber_of_words = 0\n\t\tfor word in stemmed_words:\n\t\t\tif word in lexicon_words:\n\t\t\t\tvector_sum = vector_sum + np.array(lexicon[word][0:vector_size])\n\t\t\t\tnumber_of_words = number_of_words + 1\n\t\t# If there is at least 1 word in text return the result\n\t\tif number_of_words == 0:\n\t\t\treturn False\n\t\treturn vector_sum / number_of_words\n","sub_path":"build/lib/textfeature/TextFeature.py","file_name":"TextFeature.py","file_ext":"py","file_size_in_byte":2605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"119178700","text":"#!/usr/bin/env python3\n\nfrom ast import literal_eval as make_tuple\nimport psycopg2\nfrom datetime import datetime, timedelta\nimport os\nimport time\n\nminimum_time=timedelta(seconds=10) # time between noise and air in DB, so should be bigger than 10sec\n\ndef main():\n\tglobal connect, cursor\n\n\tid_query=[]\n\n\tSQL = '''SELECT * FROM omsk.noise_to_air ORDER BY id ASC;'''\n\tcursor.execute(SQL)\n\tlast_ID_list=cursor.fetchall()\n\n\tSQL = '''SELECT (time_track) FROM omsk.tracks ORDER BY time_track desc LIMIT 1;'''\n\tcursor.execute(SQL)\n\tlast_time_air= cursor.fetchall()\n\n\t# print('LAST TIME TRACK', last_time_air)\n\n\tif len(last_time_air)>0: # the table can be empty..\n\t\tlast_time_air=last_time_air[0][0]\n\n\t\twe_have_data=False\n\t\tfor ID in last_ID_list:\n\n\t\t\t## separate query to not have a headache with formatiing.. since string+datetime..\n\t\t\tSQL='''SELECT (base_name) FROM omsk.noise WHERE id = (%s)'''\n\t\t\tdata = ID\n\t\t\tcursor.execute(SQL, data)\n\t\t\tbase_name = cursor.fetchall()[0][0]\n\n\t\t\tSQL='''SELECT (time_noise) FROM omsk.noise WHERE id = (%s)'''\n\t\t\tdata = ID\n\t\t\tcursor.execute(SQL, data)\n\t\t\ttime_noise = cursor.fetchall()[0][0]\n\t\t\t\n\t\t\t# print('BASE NAME', base_name)\n\t\t\t# print('TIME NOISE', time_noise)\n\n\t\t\tif last_time_air-time_noise>=minimum_time:#minimum_time: # if noise_time is greater than noise datetime by 10 sec -> fine\n\t\t\t\t# if base_name == VNK001 -> use distance_1, VNK002 -> distance_2\n\t\t\t\t# print('10 SEC PASSED!')\n\n\t\t\t\tSQL={\n\t\t\t\t\t'OMSK001': '''SELECT (track, distance_1, time_track) FROM omsk.tracks WHERE time_track >= (%s)::timestamp - INTERVAL '10 seconds' and time_track <= (%s)::timestamp + INTERVAL '10 seconds' and distance_1 IS NOT NULL ORDER BY distance_1 asc LIMIT 1;''',\n\t\t\t\t\t'OMSK002': '''SELECT (track, distance_2, time_track) FROM omsk.tracks WHERE time_track >= (%s)::timestamp - INTERVAL '10 seconds' and time_track <= (%s)::timestamp + INTERVAL '10 seconds' and distance_2 IS NOT NULL ORDER BY distance_2 asc LIMIT 1;''',\n\t\t\t\t\t'OMSK003': '''SELECT (track, distance_3, time_track) FROM omsk.tracks WHERE time_track >= (%s)::timestamp - INTERVAL '10 seconds' and time_track <= (%s)::timestamp + INTERVAL '10 seconds' and distance_3 IS NOT NULL ORDER BY distance_3 asc LIMIT 1;''',\n\t\t\t\t\t'OMSK004': '''SELECT (track, distance_4, time_track) FROM omsk.tracks WHERE time_track >= (%s)::timestamp - INTERVAL '10 seconds' and time_track <= (%s)::timestamp + INTERVAL '10 seconds' and distance_4 IS NOT NULL ORDER BY distance_4 asc LIMIT 1;'''\t\t\t\t\t\t\n\t\t\t\t}\n\n\t\t\t\tSQL=SQL[base_name]\n\t\t\t\tdata = (time_noise, time_noise)\n\t\t\t\tcursor.execute(SQL, data)\n\t\t\t\ttrack_distance= cursor.fetchall()\n\n\t\t\t\t# print('TRACK DISTANCE', track_distance)\n\n\t\t\t\twe_have_data=True\n\t\t\t\tif len(track_distance)==0: ## no aircraft within 10 sec..\n\t\t\t\t\tSQL='''\n\t\t\t\t\t\tDELETE FROM omsk.noise_to_air WHERE id = (%s);\n\t\t\t\t\t'''\n\t\t\t\t\tdata=ID\n\t\t\t\t\t# print('NO aircraft within 10s')\n\t\t\t\telse:\n\t\t\t\t\tSQL='''\n\t\t\t\t\t\tUPDATE omsk.noise SET (track, distance, aircraft_time) = (%s, %s, %s) WHERE id=(%s);\n\t\t\t\t\t\tDELETE FROM omsk.noise_to_air WHERE id = (%s);\n\t\t\t\t\t'''\n\t\t\t\t\tdata=make_tuple(track_distance[0][0])+ID+ID\n\t\t\t\t\t# print('update noise ID: ', ID)\n\n\t\t\t\tid_query.append(cursor.mogrify(SQL, data).decode('utf-8'))\n\n\t\tif we_have_data:\n\t\t\tfull_query_from_ID_list=''.join([x for x in id_query])\n\t\t\tcursor.execute(full_query_from_ID_list)\n\t\t\tconnect.commit()\n\n\n\n\t# \t# CHECK, THAT WE HAVE time in air > than this noise time (means potential aircraft already in DB)\n\t\t\n\t# \tSQL = '''\n\t# \tSELECT (time_track) FROM eco.tracks ORDER BY time_track desc LIMIT 1;\n\t# \t'''\n\n\t# \tcursor.execute(SQL)\n\t# \tlast_time_air= cursor.fetchall()\n\t# \t# print(last_time_air1)\n\t# \tif len(last_time_air)>0: # the table can be empty..\n\t# \t\tlast_time_air=last_time_air[0][0]\n\n\t# \telse:\n\t# \t\tshutil.move(file_temp, path+\"/no_aircraft/\"+file_temp)\n\t# \t\tcontinue # go to next file\n\t# \t# print('LAST AIR:', last_time_air)\n\t# \t# print('CURRENT NOISE:', sql_query[0])\n\n\t# \tif last_time_air-sql_query[0]>minimum_time:#minimum_time: # if noise_time is greater than noise datetime by 10 sec -> fine\n\n\t# \t\t# dont forget make index on time_track by CREATE INDEX idx_time_track ON eco.tracks ( time_track ); check index, via \\d eco.tracks;\n\t# \t\t# CREATE INDEX time_dist_track ON eco.tracks (time_track, distance_1, track);\n\n\t# \t\tSQL='''\n\t# \t\tSELECT (track, distance_1, time_track)\n\t# \t\t FROM eco.tracks \n\t# \t\t WHERE time_track >= (%s)::timestamp - INTERVAL '10 seconds'\n\t# \t\t and time_track <= (%s)::timestamp + INTERVAL '10 seconds'\n\t# \t\t and distance_1 IS NOT NULL\n\t# \t\tORDER BY distance_1 asc LIMIT 1;\n\t# \t\t'''\n\t# \t\t# print('SQL QUERY', sql_query)\n\t# \t\tdata = (sql_query[0],sql_query[0])\n\t# \t\tcursor.execute(SQL, data)\n\t# \t\tanswer= cursor.fetchall()\n\t\t\n\t# \t\tif len(answer)==0: ## \n\t# \t\t\tshutil.move(file_temp, path+\"/no_aircraft/\"+file_temp)\n\t# \t\t\ttrack_distance=(None,None,None)\n\t# \t\telse:\n\t# \t\t\twe_have_data=True\n\t# \t\t\ttrack_distance=make_tuple(answer[0][0])\n\t# \t\t\tos.remove(file_temp)\n\n\n\t# \t\tSQL = '''\n\t# \t\tINSERT INTO eco.noise (\n\t# \t\ttime_noise, base_name, stat_1, stat_2, stat_3, leq, slow, spectrum, meteo_stat, temperature,\n\t# \t\thumadity, presure, wind, dir, gps_coordinate, gps_stat, temperature_core, temperature_mb, temperature_hdd, free_hdd,\n\t# \t\tups_stat, ups_mode, ups_time, track, distance, aircraft_time) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s, %s,%s,%s,%s,%s,%s,%s,%s,%s,%s, %s,%s,%s,%s,%s,%s) ON CONFLICT DO NOTHING;\n\t# \t\t'''\n\t# \t\tdata = sql_query + track_distance\n\t# \t\tnoise_query.append(cursor.mogrify(SQL, data).decode('utf-8'))\n\n\n\n\n\t# # do it after processing of the all txt list \n\t# if we_have_data:\n\t# \tfull_noise_query=''.join([x for x in noise_query])\n\t\t\n\t# \t# print(full_noise_query)\n\t# \tcursor.execute(full_noise_query)\n\t# \tconnect.commit()\n\n\n\n\t# except psycopg2.ProgrammingError as e:\n\t# \tconnect.rollback()\n\t# \tprint('something wrong with file at time' +str(e) + str(datetime.now().strftime('%Y-%m-%d-%H-%M-%S')))\n\t# \tshutil.move(txt_temp, path+\"/wrong_files/\")\n\t# # except psycopg2.InterfaceError as e:\n\t# \t# print('AZAZA')\n\n\t# except Exception as e:\n\t# \tprint(str(e))\n\t# \ttry:\n\t# \t\tconnect = psycopg2.connect(database='eco_db', user='postgres', host='localhost', password='z5UHwrg8', port=5432)\n\t# \t\tcursor = connect.cursor()\n\t# \texcept Exception as e:\n\t# \t\tprint(str(e))\n\n\n\n\n\n\n\n\n\ntry:\n\tconnect = psycopg2.connect(database='eco_db', user='postgres', host='localhost', password='z5UHwrg8', port=5432)\n\tcursor = connect.cursor()\nexcept Exception as err:\n\tprint('SQL connect problem')\n\n\n\nwhile True:\n\ttime.sleep(10)\n\tmain()\n\n# print('FTP_PARSE start working'+str(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) #str(datetime.now().strftime('%Y-%m-%d %H:%M:%S')))\n\n\n# class ExampleHandler(FileSystemEventHandler):\n# def on_created(self, event): # when file is created\n# main()\n\n# observer = Observer()\n# event_handler = ExampleHandler() # create event handler\n# # set observer to use created handler in directory\n# observer.schedule(event_handler, path=path)\n# observer.start()\n\n# # sleep until keyboard interrupt, then stop + rejoin the observer\n# try:\n# while True:\n# time.sleep(1)\n# except KeyboardInterrupt:\n# observer.stop()\n\n# observer.join()\n\n\n","sub_path":"SERVER/OMSK_signer.py","file_name":"OMSK_signer.py","file_ext":"py","file_size_in_byte":7169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"566102963","text":"import pygame\nfrom settings import Settings\nimport game_functions as gf\nfrom pygame.sprite import Group\nfrom ship import Ship\nfrom target import Target\nfrom button import Button\n\ndef run_game():\n\t# Utworzenie ekranu gry.\n\tag_settings = Settings()\n\tscreen = pygame.display.set_mode((ag_settings.screen_width, \n\tag_settings.screen_height))\n\tpygame.display.set_caption('Another Game')\n\t\t\n\t# Utworzenie statku, grupy pocisku i celu.\n\tship = Ship(ag_settings, screen)\n\ttarget = Target(ag_settings, screen)\n\tbullets = Group()\n\t# Utworzenie przycisku startującego grę.\n\tplay_button = Button(screen)\n\t\n\twhile True:\n\t\tgf.check_events(ship, bullets, screen, ag_settings, play_button,\n\t\t target)\n\t\tgf.update_screen(screen, ag_settings, ship, bullets, target,\n\t\t play_button)\n\t\tif ag_settings.game_active:\n\t\t\tgf.move_items(ship, bullets, target, ag_settings)\n\t\t\n\t\t\nrun_game()\n","sub_path":"another_game/another_game.py","file_name":"another_game.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"156573138","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 14 08:30:59 2018\n\n@author: student\n\"\"\"\n\ndef minion_game(string):\n vowels,kevsc,stusc = 'AEIOU',0,0\n for i in range(len(string)):\n if string[i] in vowels:\n kevsc += (len(string)-i)\n else:\n stusc += (len(string)-i)\n if kevsc > stusc:\n print (\"Kevin\", kevsc)\n elif kevsc < stusc:\n print (\"Stuart\", stusc)\n else:\n print (\"Draw\")\n\nminion_game('BANANA')","sub_path":"Free Prac/minion_game.py","file_name":"minion_game.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"554050693","text":"class Solution:\n def assignBikes(self, workers: List[List[int]], bikes: List[List[int]]) -> List[int]:\n distances = []\n for i in range(len(workers)):\n for j in range(len(bikes)):\n dis = abs(workers[i][0] - bikes[j][0]) + abs(workers[i][1] - bikes[j][1])\n distances.append([dis, i, j])\n \n \n distances = sorted(distances)\n \n ans = [0 for _ in range(len(workers))]\n w_used = [0 for _ in range(len(workers))]\n b_used = [0 for _ in range(len(bikes))]\n cnt = 0\n for dis in distances:\n w_i = dis[1]\n b_i = dis[2]\n \n if w_used[w_i] == 0 and b_used[b_i] == 0:\n w_used[w_i] = 1\n b_used[b_i] = 1\n ans[w_i] = b_i\n cnt += 1\n \n if cnt > len(workers):\n break\n return ans\n","sub_path":"submission/python/python/1057.py","file_name":"1057.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"22404784","text":"# -*- coding: utf-8 -*-\r\n\"\"\"Flask app for catalog server.\r\n\r\nThis submodule creates a logger named like itself (catalog.app) that logs\r\ninformation on the progress of the sign in procedure to a NullHandler.\r\nLog output can be redirected as desired by the application importing the\r\nsubmodule.\r\n\r\nWritten by Nikolaus Ruf\r\n\"\"\"\r\n\r\nfrom flask import Flask, redirect, url_for, flash, request, session, \\\r\n make_response\r\nimport json\r\nimport logging\r\nfrom oauth2client import client\r\nimport os\r\nimport requests\r\n\r\n\r\nlogger = logging.getLogger(__name__)\r\nlogger.addHandler(logging.NullHandler())\r\n# log output is configured for the root logger in the startup script\r\n\r\napp = Flask(__name__)\r\n\r\n\r\ndef get_token():\r\n \"\"\"Generate a random 16 byte token encoded as a 32 character hex string.\r\n\r\n :return: string; URL-safe token\r\n \"\"\"\r\n return os.urandom(16).encode(\"hex\")\r\n\r\n\r\ndef get_client_id(file_name):\r\n \"\"\"Retrieve the client ID from the client secret file.\r\n\r\n The ID is inserted into each web page template as part of the Google OAuth2\r\n sign in configuration.\r\n\r\n :param file_name: string; file name with full or relative path\r\n :return: string; client ID\r\n \"\"\"\r\n with open(file_name, \"r\") as file_:\r\n client_id = json.load(file_)[\"web\"][\"client_id\"]\r\n return client_id\r\n\r\n\r\n@app.route(\"/\")\r\ndef serve_main_page():\r\n \"\"\"Serve main page.\r\n\r\n :return: HTML code for page\r\n \"\"\"\r\n session[\"state\"] = get_token()\r\n # each page has a sign-in button, so each page gets a state token to\r\n # protect against CSRF; on pages with HTML forms, the same state token is\r\n # used to protect the form submission\r\n return app.config[\"content\"].render_main_page(\r\n client_id=get_client_id(app.config[\"google_client_secret_file\"]),\r\n state=session[\"state\"],\r\n user_id=session.get(\"user_id\", None),\r\n user_name=session.get(\"user_name\", None)\r\n )\r\n\r\n\r\n@app.route(\"/sign_in\", methods=[\"POST\"])\r\ndef handle_sign_in():\r\n \"\"\"Handle sign in request.\r\n\r\n This function logs every step since helps with debugging the workflow.\r\n\r\n :return: sign in response\r\n \"\"\"\r\n logger.info(\"Attempt sign in.\")\r\n logger.info(\"Check header.\")\r\n if not request.headers.get('X-Requested-With'):\r\n # this check is recommended by Google as a an extra safeguard\r\n logger.warn(\"Invalid request header, abort sign in.\")\r\n response = make_response(json.dumps(\"Invalid request header.\"), 401)\r\n response.headers[\"Content-Type\"] = \"application/json\"\r\n return response\r\n\r\n logger.info(\"Check anti-CSRF token.\")\r\n if request.args.get(\"state\") != session[\"state\"]:\r\n logger.warn(\"Token mismatch, abort sign in.\")\r\n response = make_response(json.dumps(\"Token mismatch.\"), 401)\r\n response.headers[\"Content-Type\"] = \"application/json\"\r\n return response\r\n\r\n logger.info(\"Request credentials.\")\r\n try:\r\n credentials = client.credentials_from_clientsecrets_and_code(\r\n filename=app.config[\"google_client_secret_file\"],\r\n scope=\"openid\",\r\n code=request.data\r\n )\r\n except client.FlowExchangeError:\r\n logger.warn(\"Failed to obtain credentials, abort sign in.\")\r\n response = make_response(\r\n json.dumps(\"Failed to obtain credentials.\"), 401\r\n )\r\n response.headers[\"Content-Type\"] = \"application/json\"\r\n return response\r\n\r\n logger.info(\"Revoke access token.\") # no need to keep it\r\n response = requests.post(\r\n 'https://accounts.google.com/o/oauth2/revoke',\r\n params={'token': credentials.access_token},\r\n headers={'content-type': 'application/x-www-form-urlencoded'}\r\n )\r\n if response.status_code != 200:\r\n logger.info(\"Failed to revoke token.\")\r\n # this does not affect sign in since we know the user\r\n\r\n logger.info(\"Check user ID against database.\")\r\n auth_id = credentials.id_token[\"sub\"]\r\n user_name = credentials.id_token[\"name\"]\r\n user_id = app.config[\"content\"].get_user_id(auth_id)\r\n if user_id is None:\r\n # the content manager creates a new record for a non-existent user as\r\n # long as the user ID is not the empty string after running it through\r\n # bleach.clean() - so this should really not happen\r\n logger.warn(\"Invalid user ID, abort sign in.\")\r\n response = make_response(\r\n json.dumps(\"Invalid user ID.\"), 401\r\n )\r\n response.headers[\"Content-Type\"] = \"application/json\"\r\n return response\r\n\r\n logger.info(\"Sign-in complete.\")\r\n session[\"user_id\"] = user_id\r\n session[\"user_name\"] = user_name\r\n response = make_response(json.dumps('Sign in complete'), 200)\r\n response.headers['Content-Type'] = 'application/json'\r\n return response\r\n\r\n\r\n@app.route(\"/sign_out/\")\r\ndef handle_sign_out():\r\n \"\"\"Handle sign in out request.\r\n\r\n :return: redirect to main page\r\n \"\"\"\r\n session.pop(\"user_id\", None)\r\n session.pop(\"user_name\", None)\r\n # revoking the OAuth2 token already happened during sign in since the app\r\n # does not need the token for anything else\r\n return redirect(url_for(\"serve_main_page\"))\r\n\r\n\r\n@app.route(\"/category/view//\")\r\ndef serve_category_page(category_id):\r\n \"\"\"Serve page for one category.\r\n\r\n :return: HTML code for page or redirect URL\r\n \"\"\"\r\n session[\"state\"] = get_token()\r\n page = app.config[\"content\"].render_category_page(\r\n client_id=get_client_id(app.config[\"google_client_secret_file\"]),\r\n state=session[\"state\"],\r\n user_id=session.get(\"user_id\", None),\r\n user_name=session.get(\"user_name\", None),\r\n category_id=category_id\r\n )\r\n if page is not None:\r\n return page\r\n # page can be None of the ID is invalid or something is wrong with the\r\n # database; the content manager generates an appropriate flash message in\r\n # this case\r\n return redirect(url_for(\"serve_main_page\"))\r\n\r\n\r\n@app.route(\"/category/add/\", methods=[\"GET\", \"POST\"])\r\ndef serve_add_category_page():\r\n \"\"\"Serve page for adding a category or handle POST request.\r\n\r\n :return: HTML code for page or redirect URL\r\n \"\"\"\r\n user_id = session.get(\"user_id\", None)\r\n # both GET and POST require sign in so we check it first\r\n if user_id is None:\r\n flash(\"You need to be logged in to edit content.\")\r\n return redirect(url_for(\"serve_main_page\"))\r\n if request.method == \"GET\":\r\n session[\"state\"] = get_token()\r\n return app.config[\"content\"].render_add_category_page(\r\n client_id=get_client_id(app.config[\"google_client_secret_file\"]),\r\n state=session[\"state\"],\r\n user_name=session.get(\"user_name\", None)\r\n )\r\n # POST\r\n if request.form.get(\"state\", default=\"\") != session[\"state\"]:\r\n # this can happen if somebody uses the 'back' button of the browser\r\n # since the state token is generated for every page\r\n flash(\"Sorry, the form data was stale. Please try again.\")\r\n return redirect(url_for(\"serve_add_category_page\"))\r\n category_id = app.config[\"content\"].add_category(\r\n name=request.form.get(\"name\", type=str), user_id=user_id\r\n )\r\n if category_id is None:\r\n return redirect(url_for(\"serve_main_page\"))\r\n return redirect(url_for(\"serve_category_page\", category_id=category_id))\r\n\r\n\r\n@app.route(\"/category/edit//\", methods=[\"GET\", \"POST\"])\r\ndef serve_edit_category_page(category_id):\r\n \"\"\"Serve page for editing a category or handle POST request.\r\n\r\n :param category_id: integer; category ID\r\n :return: HTML code for page or redirect URL\r\n \"\"\"\r\n user_id = session.get(\"user_id\", None)\r\n if user_id is None:\r\n flash(\"You need to be logged in to edit content.\")\r\n return redirect(url_for(\"serve_main_page\"))\r\n if request.method == \"GET\":\r\n session[\"state\"] = get_token()\r\n page = app.config[\"content\"].render_edit_category_page(\r\n client_id=get_client_id(app.config[\"google_client_secret_file\"]),\r\n state=session[\"state\"],\r\n user_id=user_id,\r\n user_name=session.get(\"user_name\", None),\r\n category_id=category_id\r\n )\r\n if page is not None:\r\n return page\r\n return redirect(url_for(\"serve_main_page\"))\r\n # POST\r\n if request.form.get(\"state\", default=\"\") != session[\"state\"]:\r\n flash(\"Sorry, the form data was stale. Please try again.\")\r\n return redirect(url_for(\"serve_edit_category_page\", id_=category_id))\r\n app.config[\"content\"].edit_category(\r\n category_id=category_id,\r\n name=request.form.get(\"name\", type=str),\r\n user_id=user_id\r\n )\r\n return redirect(url_for(\"serve_category_page\", category_id=category_id))\r\n\r\n\r\n@app.route(\"/category/delete//\", methods=[\"GET\", \"POST\"])\r\ndef serve_delete_category_page(category_id):\r\n \"\"\"Serve page for deleting a category or handle POST request.\r\n\r\n :param category_id: integer; category ID\r\n :return: HTML code for page or redirect URL\r\n \"\"\"\r\n user_id = session.get(\"user_id\", None)\r\n if user_id is None:\r\n flash(\"You need to be logged in to edit content.\")\r\n return redirect(url_for(\"serve_main_page\"))\r\n if request.method == \"GET\":\r\n session[\"state\"] = get_token()\r\n page = app.config[\"content\"].render_delete_category_page(\r\n client_id=get_client_id(app.config[\"google_client_secret_file\"]),\r\n state=session[\"state\"],\r\n user_id=user_id,\r\n user_name=session.get(\"user_name\", None),\r\n category_id=category_id\r\n )\r\n if page is not None:\r\n return page\r\n return redirect(url_for(\"serve_main_page\"))\r\n # POST\r\n if request.form.get(\"state\", default=\"\") != session[\"state\"]:\r\n flash(\"Sorry, the form data was stale. Please try again.\")\r\n return redirect(\r\n url_for(\"serve_delete_category_page\", category_id=category_id)\r\n )\r\n app.config[\"content\"].delete_category(category_id, user_id)\r\n return redirect(url_for(\"serve_main_page\"))\r\n\r\n\r\n@app.route(\"/item/view//\")\r\ndef serve_item_page(item_id):\r\n \"\"\"Serve page for one item.\r\n\r\n :param item_id: integer; item ID\r\n :return: HTML code for page\r\n \"\"\"\r\n session[\"state\"] = get_token()\r\n page = app.config[\"content\"].render_item_page(\r\n client_id=get_client_id(app.config[\"google_client_secret_file\"]),\r\n state=session[\"state\"],\r\n user_id=session.get(\"user_id\", None),\r\n user_name=session.get(\"user_name\", None),\r\n item_id=item_id\r\n )\r\n if page is not None:\r\n return page\r\n return redirect(url_for(\"serve_main_page\"))\r\n\r\n\r\n@app.route(\"/item/add/\", methods=[\"GET\", \"POST\"])\r\n@app.route(\"/item/add//\")\r\n# category ID as path parameter is only used with GET requests to pre-select\r\n# the category in the form; a POST request must provide it as a form parameter\r\ndef serve_add_item_page(category_id=-1):\r\n \"\"\"Serve page for adding an item or handle POST request.\r\n\r\n :param category_id: integer; category ID proposed for item or -1 to\r\n indicate no proposal\r\n :return: HTML code for page or redirect URL\r\n \"\"\"\r\n user_id = session.get(\"user_id\", None)\r\n if user_id is None:\r\n flash(\"You need to be logged in to edit content.\")\r\n return redirect(url_for(\"serve_main_page\"))\r\n if request.method == \"GET\":\r\n session[\"state\"] = get_token()\r\n page = app.config[\"content\"].render_add_item_page(\r\n client_id=get_client_id(app.config[\"google_client_secret_file\"]),\r\n state=session[\"state\"],\r\n user_id=user_id,\r\n user_name=session.get(\"user_name\", None),\r\n category_id=category_id\r\n )\r\n if page is not None:\r\n return page\r\n return redirect(url_for(\"serve_main_page\"))\r\n # POST\r\n if request.form.get(\"state\", default=\"\") != session[\"state\"]:\r\n flash(\"Sorry, the form data was stale. Please try again.\")\r\n return redirect(\r\n url_for(\"serve_add_item_page\", category_id=category_id)\r\n )\r\n item_id = app.config[\"content\"].add_item(\r\n name=request.form.get(\"name\", type=str),\r\n description=request.form.get(\"description\", type=str),\r\n category_id=int(request.form.get(\"category\", type=str)),\r\n user_id=user_id\r\n )\r\n if item_id is None:\r\n return redirect(url_for(\"serve_main_page\"))\r\n return redirect(url_for(\"serve_item_page\", item_id=item_id))\r\n\r\n\r\n@app.route(\"/item/edit//\", methods=[\"GET\", \"POST\"])\r\ndef serve_edit_items_page(item_id):\r\n \"\"\"Serve page for editing an item or handle POST request.\r\n\r\n :param item_id: integer; item ID\r\n :return: HTML code for page or redirect URL\r\n \"\"\"\r\n user_id = session.get(\"user_id\", None)\r\n if user_id is None:\r\n flash(\"You need to be logged in to edit content.\")\r\n return redirect(url_for(\"serve_main_page\"))\r\n if request.method == \"GET\":\r\n session[\"state\"] = get_token()\r\n page = app.config[\"content\"].render_edit_item_page(\r\n client_id=get_client_id(app.config[\"google_client_secret_file\"]),\r\n state=session[\"state\"],\r\n user_id=user_id,\r\n user_name=session.get(\"user_name\", None),\r\n item_id=item_id\r\n )\r\n if page is not None:\r\n return page\r\n return redirect(url_for(\"serve_main_page\"))\r\n # POST\r\n if request.form.get(\"state\", default=\"\") != session[\"state\"]:\r\n flash(\"Sorry, the form data was stale. Please try again.\")\r\n return redirect(url_for(\"serve_edit_items_page\", item_id=item_id))\r\n app.config[\"content\"].edit_item(\r\n item_id=item_id,\r\n name=request.form.get(\"name\", type=str),\r\n description=request.form.get(\"description\", type=str),\r\n category_id=int(request.form.get(\"category\", type=str)),\r\n user_id=user_id\r\n )\r\n return redirect(url_for(\"serve_item_page\", item_id=item_id))\r\n\r\n\r\n@app.route(\"/item/delete//\", methods=[\"GET\", \"POST\"])\r\ndef serve_delete_items_page(item_id):\r\n \"\"\"Serve page for deleting an item or handle POST request.\r\n\r\n :param item_id: integer; item ID\r\n :return: HTML code for page or redirect URL\r\n \"\"\"\r\n user_id = session.get(\"user_id\", None)\r\n if user_id is None:\r\n flash(\"You need to be logged in to edit content.\")\r\n return redirect(url_for(\"serve_main_page\"))\r\n if request.method == \"GET\":\r\n session[\"state\"] = get_token()\r\n page = app.config[\"content\"].render_delete_item_page(\r\n client_id=get_client_id(app.config[\"google_client_secret_file\"]),\r\n state=session[\"state\"],\r\n user_id=user_id,\r\n user_name=session.get(\"user_name\", None),\r\n item_id=item_id\r\n )\r\n if page is not None:\r\n return page\r\n return redirect(url_for(\"serve_main_page\"))\r\n # POST\r\n if request.form.get(\"state\", default=\"\") != session[\"state\"]:\r\n flash(\"Sorry, the form data was stale. Please try again.\")\r\n return redirect(url_for(\"serve_delete_items_page\", item_id=item_id))\r\n app.config[\"content\"].delete_item(item_id, user_id)\r\n return redirect(url_for(\"serve_main_page\"))\r\n\r\n\r\n@app.route(\"/json/categories/\")\r\n@app.route(\"/json/latest_items//\")\r\n@app.route(\"/json/category//\")\r\n@app.route(\"/json/item//\")\r\ndef serve_json(num=None, id_=None):\r\n \"\"\"Provide selected database content as JSON object.\r\n\r\n :param num: positive integer or None; request for latest items needs to\r\n provide a number of items to return\r\n :param id_: integer or None; requests for a specific category or item need\r\n to provide an ID\r\n :return: JSON response\r\n \"\"\"\r\n resource = request.path.split(\"/\")[2]\r\n content = app.config[\"content\"].get_content(resource, num, id_)\r\n if content is None:\r\n response = make_response(\r\n json.dumps(\"Requested content not found.\"), 404\r\n )\r\n response.headers[\"Content-Type\"] = \"application/json\"\r\n return response\r\n response = make_response(json.dumps(content), 200)\r\n # build response by hand and do not use flask.jsonify() as the latter\r\n # destroys the order in an ordered dict\r\n response.headers[\"Content-Type\"] = \"application/json\"\r\n return response\r\n","sub_path":"vagrant/catalog/catalog/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":16539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"437856248","text":"#!/hpcf/apps/python/install/2.7.13/bin/python\nimport sys\nimport os\np_dir = os.path.dirname(os.path.realpath(__file__)) + \"/\"\nsys.path.append(os.path.abspath(p_dir+\"../utils/\"))\nfrom utils import *\n\n\n\"\"\"\n\nmyBaseName=$(basename -- ${COL1})\n\nsort -k1,1 -k2,2n ${COL1} > {{jid}}/${myBaseName}.sorted\ncd {{jid}}\nmodule load ucsc/041619\nbedGraphToBigWig ${myBaseName}.sorted {{chrom_size}} ${myBaseName%.sorted}.bw\n\n\"\"\"\n\ndef bdg_to_bw(output,chrom_size):\n\tos.system(\"sort -k1,1 -k2,2n %s.bdg > %s.sorted\"%(output,output))\n\tos.system(\"bedGraphToBigWig %s.sorted %s %s.bw;rm %s.sorted\"%(output,chrom_size,output,output))\n\ndef my_args():\n\tmainParser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\tmainParser.add_argument('-o',\"--output\", help=\"output file name, output_FDR.bw, output_LFC.bw, output_FDR.bdg, output_LFC.bdg\", default=\"output\")\n\n\tmainParser.add_argument('-f',\"--mageck_RRA\", help=\"mageck_RRA sgRNA summary file\",required=True)\n\tmainParser.add_argument('-b','--gRNA_bed', help=\"gRNA bed file, need strand info\",required=True)\n\n\tgroup = mainParser.add_mutually_exclusive_group(required=True)\n\tgroup.add_argument(\"--cas9\", help=\"use cas9 cut site (-3) as gRNA score\", action='store_true')\n\tgroup.add_argument('--ABE', help=\"ABE mode\", action='store_true')\n\tgroup.add_argument('--CBE', help=\"CBE mode\", action='store_true')\n\n\tgenome=mainParser.add_argument_group(title='Genome Info')\n\tgenome.add_argument('-g','--genome', help=\"genome version: hg19, hg38, mm9, mm10. By default, specifying a genome version will automatically update index file, black list, chrom size and effectiveGenomeSize, unless a user explicitly sets those options.\", default='hg19',type=str)\n\tgenome.add_argument('-s','--chrom_size', help=\"chrome size\", default=myData['hg19_chrom_size'])\n\n\n\t##------- add parameters above ---------------------\n\targs = mainParser.parse_args()\t\n\treturn args\n\ndef to_bed(df,output):\n\tdf['name'] = df[0]+df[1].astype(str)+df[2].astype(str)\n\tdf = df.sort_values(4)\n\tshape0 = df.shape[0]\n\tdf1 = df.drop_duplicates('name')\n\tmean_value = pd.DataFrame(df.groupby('name')[4].mean())\n\tdf1[4] = df['name'].map(mean_value[4].to_dict())\n\tshape1 = df1.shape[0]\n\tif shape1 (end - start):\r\n return\r\n \r\n ## If the current optimum value comes from the entry (start + 1, end),\r\n ## then the nucleotide at the starting index does not form a base\r\n ## pair with another nucleotide\r\n if matrix[start][end] == matrix[start + 1][end]:\r\n traceBack(start + 1, end, sequence, matrix, result)\r\n \r\n ## If the current optimum value comes from the entry (start, end - 1),\r\n ## then the nucleotide at the end index does not form a base\r\n ## pair with another nucleotide\r\n elif matrix[start][end] == matrix[start][end - 1]:\r\n traceBack(start, end - 1, sequence, matrix, result)\r\n \r\n ## If the current optimum value comes from the entry (start + 1, end - 1),\r\n ## then the nucleotides at the start and end indices form a valid base\r\n ## pair. In this case, add the base pair (start, end) to the list\r\n elif matrix[start][end] == (getEnergyForPair(\r\n sequence[start], sequence[end]) + \r\n matrix[start + 1][end - 1]):\r\n result.append([start, end])\r\n traceBack(start + 1, end - 1, sequence, matrix, result)\r\n \r\n ## If the current optimum value is the result of partitioning the\r\n ## sequence into two subsequences, program considers the two\r\n ## subsequences independently\r\n else:\r\n for index in range(start + 1, end):\r\n if matrix[start][end] == (matrix[start][index] + \r\n matrix[index + 1][end]):\r\n traceBack(start, index, sequence, matrix, result)\r\n traceBack(index + 1, end, sequence, matrix, result)\r\n break\r\n\r\n## This program uses the function 'getGlobalOptimum' to get the minimum free\r\n## energy. Then, the program uses the function 'traceBack' to reconstruct \r\n## the optimum secondary structure from the matrix in the forward pass \r\ndef getOptimalStructure(sequence):\r\n \r\n ## 'startTime' is the time when the function start running\r\n ## 'result' is the optimum secondary structure\r\n startTime = time.time()\r\n result = []\r\n\r\n ## Uses the matrix obtained in the forward pass to reconstruct the\r\n ## secondary structure\r\n matrix = getGlobalOptimum(sequence)\r\n traceBack(0, len(sequence) - 1, sequence, matrix, result)\r\n \r\n ## 'endTime' is the time when the function terminates\r\n endTime = time.time()\r\n \r\n ## Returns the total computation time, the minimum free energy of the\r\n ## secondary structure, and the secondary structure \r\n return endTime - startTime, matrix[0][len(sequence) - 1], result\r\n \r\n \r\n \r\n","sub_path":"final_project_bpdependent.py","file_name":"final_project_bpdependent.py","file_ext":"py","file_size_in_byte":7227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"57235487","text":"\"\"\"\nA collection of utilities for generally munging imaging data.\n\"\"\"\nimport os\nimport sys\nimport re\nimport io\nimport glob\nimport zipfile\nimport tarfile\nimport logging\nimport tempfile\nimport shutil\nimport shlex\nimport pipes\nimport contextlib\nimport subprocess as proc\n\nimport dicom as dcm\nimport numpy as np\nimport nibabel as nib\nimport pyxnat\n\nimport datman.config\nimport datman.scanid as scanid\n\nlogger = logging.getLogger(__name__)\n\ndef check_checklist(session_name, study=None):\n \"\"\"Reads the checklist identified from the session_name\n If there is an entry returns the comment, otherwise\n returns None\n \"\"\"\n\n try:\n ident = scanid.parse(session_name)\n except scanid.ParseException:\n logger.warning('Invalid session id:{}'.format(session_name))\n return\n\n if study:\n cfg = datman.config.config(study=study)\n else:\n cfg = datman.config.config(study=session_name)\n\n try:\n checklist_path = os.path.join(cfg.get_path('meta'),\n 'checklist.csv')\n except KeyError:\n logger.warning('Unable to identify meta path for study:{}'\n .format(cfg.study_name))\n return\n\n try:\n with open(checklist_path, 'r') as f:\n lines = f.readlines()\n except IOError:\n logger.warning('Unable to open checklist file:{} for reading'\n .format(checklist_path))\n return\n\n for line in lines:\n parts = line.split(None, 1)\n if parts: # fix for empty lines\n if os.path.splitext(parts[0])[0] == 'qc_{}'.format(session_name):\n try:\n return parts[1].strip()\n except IndexError:\n return ''\n\n return None\n\ndef check_blacklist(scan_name, study=None):\n \"\"\"Reads the checklist identified from the session_name\n If there is an entry returns the comment, otherwise\n returns None\n \"\"\"\n\n try:\n ident, tag, series_num, _ = scanid.parse_filename(scan_name)\n blacklist_id = \"_\".join([str(ident), tag, series_num])\n except scanid.ParseException:\n logger.warning('Invalid session id:{}'.format(scan_name))\n return\n\n if study:\n cfg = datman.config.config(study=study)\n else:\n cfg = datman.config.config(study=ident.get_full_subjectid_with_timepoint())\n\n try:\n checklist_path = os.path.join(cfg.get_path('meta'),\n 'blacklist.csv')\n except KeyError:\n logger.warning('Unable to identify meta path for study:{}'\n .format(study))\n return\n\n try:\n with open(checklist_path, 'r') as f:\n lines = f.readlines()\n except IOError:\n logger.warning('Unable to open blacklist file:{} for reading'\n .format(checklist_path))\n return\n for line in lines:\n parts = line.split(None, 1)\n if parts: # fix for empty lines\n if blacklist_id in parts[0]:\n try:\n return parts[1].strip()\n except IndexError:\n return\n\n\ndef get_subject_from_filename(filename):\n filename = os.path.basename(filename)\n filename = filename.split('_')[0:5]\n filename = '_'.join(filename)\n\n return filename\n\n\ndef script_path():\n \"\"\"\n Returns the full path to the executing script.\n \"\"\"\n return os.path.abspath(os.path.dirname(sys.argv[0]))\n\ndef guess_tag(description, tagmap):\n \"\"\"\n Given a series description return a list of series tags this might be.\n\n By \"series tag\" we mean a short code like T1, DTI, etc.. that indicates\n more generally what the data is (usually the DICOM header\n SeriesDescription).\n\n is a dictionary that maps a regex to a series tag, where the regex\n matches the series description dicom header. If not specified this modules\n SERIES_TAGS_MAP is used.\n \"\"\"\n matches = list(set(\n [tag for p,tag in tagmap.iteritems() if re.search(p,description)]\n ))\n if len(matches) == 0: return None\n if len(matches) == 1: return matches[0]\n return matches\n\ndef mangle_basename(base_path):\n \"\"\"\n strip off final slash to get the appropriate basename if necessary.\n \"\"\"\n base_path = os.path.normpath(base_path)\n base = os.path.basename(base_path).lower()\n\n return base\n\ndef mangle(string):\n \"\"\"Mangles a string to conform with the naming scheme.\n\n Mangling is roughly: convert runs of non-alphanumeric characters to a dash.\n\n Does not convert '.' to avoid accidentally mangling extensions and does\n not convert '+'\n \"\"\"\n if not string:\n string = \"\"\n return re.sub(r\"[^a-zA-Z0-9.+]+\",\"-\",string)\n\ndef get_extension(path):\n \"\"\"\n Get the filename extension on this path.\n\n This is a slightly more sophisticated version of os.path.splitext in that\n this will correctly return the extension for '.tar.gz' files, for example.\n :D\n \"\"\"\n if path.endswith('.tar.gz'):\n return '.tar.gz'\n if path.endswith('.nii.gz'):\n return '.nii.gz'\n else:\n return os.path.splitext(path)[1]\n\ndef get_archive_headers(path, stop_after_first = False):\n \"\"\"\n Get dicom headers from a scan archive.\n\n Path can be a path to a tarball or zip of dicom folders, or a folder. It is\n assumed that this archive contains the dicoms from a single exam, organized\n into folders for each series.\n\n The entire archive is scanned and dicom headers from a single file in each\n folder are returned as a dictionary that maps path->headers.\n\n If stop_after_first == True only a single set of dicom headers are\n returned for the entire archive, which is useful if you only care about the\n exam details.\n \"\"\"\n if os.path.isdir(path):\n return get_folder_headers(path, stop_after_first)\n elif zipfile.is_zipfile(path):\n return get_zipfile_headers(path, stop_after_first)\n elif os.path.isfile(path) and path.endswith('.tar.gz'):\n return get_tarfile_headers(path, stop_after_first)\n else:\n raise Exception(\"{} must be a file (zip/tar) or folder.\".format(path))\n\ndef get_tarfile_headers(path, stop_after_first = False):\n \"\"\"\n Get headers for dicom files within a tarball\n \"\"\"\n tar = tarfile.open(path)\n members = tar.getmembers()\n\n manifest = {}\n # for each dir, we want to inspect files inside of it until we find a dicom\n # file that has header information\n for f in filter(lambda x: x.isfile(), members):\n dirname = os.path.dirname(f.name)\n if dirname in manifest: continue\n try:\n manifest[dirname] = dcm.read_file(tar.extractfile(f))\n if stop_after_first: break\n except dcm.filereader.InvalidDicomError as e:\n continue\n return manifest\n\ndef get_zipfile_headers(path, stop_after_first = False):\n \"\"\"\n Get headers for a dicom file within a zipfile\n \"\"\"\n zf = zipfile.ZipFile(path)\n\n manifest = {}\n for f in zf.namelist():\n dirname = os.path.dirname(f)\n if dirname in manifest: continue\n try:\n manifest[dirname] = dcm.read_file(io.BytesIO(zf.read(f)))\n if stop_after_first: break\n except dcm.filereader.InvalidDicomError as e:\n continue\n except zipfile.BadZipfile:\n logger.warning('Error in zipfile:{}'\n .format(path))\n break\n return manifest\n\ndef get_folder_headers(path, stop_after_first = False):\n \"\"\"\n Generate a dictionary of subfolders and dicom headers.\n \"\"\"\n\n manifest = {}\n\n # for each dir, we want to inspect files inside of it until we find a dicom\n # file that has header information\n subdirs = []\n for filename in os.listdir(path):\n filepath = os.path.join(path,filename)\n try:\n if os.path.isdir(filepath):\n subdirs.append(filepath)\n continue\n manifest[path] = dcm.read_file(filepath)\n break\n except dcm.filereader.InvalidDicomError as e:\n pass\n\n if stop_after_first: return manifest\n\n # recurse\n for subdir in subdirs:\n manifest.update(get_folder_headers(subdir, stop_after_first))\n return manifest\n\ndef get_all_headers_in_folder(path, recurse = False):\n \"\"\"\n Get DICOM headers for all files in the given path.\n\n Returns a dictionary mapping path->headers for *all* files (headers == None\n for files that are not dicoms).\n \"\"\"\n\n manifest = {}\n for dirname, dirnames, filenames in os.walk(path):\n for filename in filenames:\n filepath = os.path.join(dirname,filename)\n headers = None\n try:\n headers = dcm.read_file(filepath)\n except dcm.filereader.InvalidDicomError as e:\n continue\n manifest[filepath] = headers\n if not recurse: break\n return manifest\n\ndef col(arr, colname):\n \"\"\"\n Return the named column of an ndarray.\n\n Column names are given by the first row in the ndarray\n \"\"\"\n idx = np.where(arr[0,] == colname)[0]\n return arr[1:,idx][:,0]\n\ndef subject_type(subject):\n \"\"\"\n Uses subject naming to determine what kind of files we are looking at. If\n we find a strangely-named subject, we return None.\n\n TO DEPRICATE.\n \"\"\"\n try:\n subject = subject.split('_')\n\n if subject[2] == 'PHA':\n return 'phantom'\n elif subject[2] != 'PHA' and subject[2][0] == 'P':\n return 'humanphantom'\n elif str.isdigit(subject[2]) == True and len(subject[2]) == 4:\n return 'subject'\n else:\n return None\n\n except:\n return None\n\ndef get_subjects(path):\n \"\"\"\n Finds all of the subject folders in the supplied directory, and returns\n their basenames.\n \"\"\"\n subjects = filter(os.path.isdir, glob.glob(os.path.join(path, '*')))\n for i, subj in enumerate(subjects):\n subjects[i] = os.path.basename(subj)\n subjects.sort()\n\n return subjects\n\ndef get_phantoms(path):\n \"\"\"\n Finds all of the phantom folders in the supplied directory, and returns\n their basenames.\n \"\"\"\n phantoms = []\n subjects = get_subjects(path)\n for subject in subjects:\n subjtype = subject_type(subject)\n if subjtype == 'phantom':\n phantoms.append(subject)\n\n return phantoms\n\ndef get_xnat_catalog(data_path, subject):\n \"\"\"\n For a given subject, finds and returns all of the xml files as full\n paths. In almost all cases, this will be a single catalog.\n\n\n THIS IS BROKEN.\n \"\"\"\n dicoms = os.listdir(os.path.join(data_path, 'dicom'))\n subjects = filter(lambda x: subject in x, dicoms)\n\n catalogs = []\n\n for subject in subjects:\n folders = os.listdir(os.path.join(data_path, 'dicom', subject))\n folders.sort()\n files = os.listdir(os.path.join(data_path, 'dicom', subject, folders[0]))\n files = filter(lambda x: '.xml' in x, files)\n catalogs.append(os.path.join(data_path, 'dicom', subject, folders[0], files[0]))\n\n catalogs.sort()\n\n return catalogs\n\ndef define_folder(path):\n \"\"\"\n Sets a variable to be the path to a folder. Also, if the folder does not\n exist, this makes it so, unless we lack the permissions to do so, which\n leads to a graceful exit.\n \"\"\"\n if not os.path.isdir(path):\n try:\n os.makedirs(path)\n except OSError as e:\n logger.error('failed to make directory {}'.format(path))\n raise(e)\n\n if not has_permissions(path):\n raise OSError(\"User does not have permission to access {}\".format(path))\n\n return path\n\ndef has_permissions(path):\n \"\"\"\n Checks for write access to submitted path.\n \"\"\"\n if os.access(path, 7) == True:\n flag = True\n else:\n logger.error('You do not have write access to path {}'.format(path))\n flag = False\n\n return flag\n\ndef make_epitome_folders(path, n_runs):\n \"\"\"\n Makes an epitome-compatible folder structure with functional data FUNC of n\n import pipesruns, and a single T1.\n\n This works assuming we've run everything through freesurfer.\n\n If we need multisession, it might make sense to run this multiple times\n (once per session).\n \"\"\"\n run('mkdir -p ' + path + '/TEMP/SUBJ/T1/SESS01/RUN01')\n for r in np.arange(n_runs)+1:\n num = \"{:0>2}\".format(str(r))\n run('mkdir -p ' + path + '/TEMP/SUBJ/FUNC/SESS01/RUN' + num)\n\ndef run_dummy_q(list_of_names):\n \"\"\"\n This holds the script until all of the queued items are done.\n \"\"\"\n logger.info('Holding for remaining processes.')\n opts = 'h_vmem=3G,mem_free=3G,virtual_free=3G'\n holds = \",\".join(list_of_names)\n cmd = 'qsub -sync y -hold_jid {} -l {} -b y echo'.format(holds, opts)\n run(cmd)\n logger.info('... Done.')\n\ndef run(cmd, dryrun=False, specialquote=True, verbose=True):\n \"\"\"\n Runs the command in default shell, returning STDOUT and a return code.\n The return code uses the python convention of 0 for success, non-zero for\n failure\n \"\"\"\n # Popen needs a string command.\n if isinstance(cmd, list):\n cmd = \" \".join(cmd)\n\n # perform shell quoting for special characters in filenames\n if specialquote:\n cmd = _escape_shell_chars(cmd)\n\n if dryrun:\n logger.info(\"Performing dry-run. Skipped command: {}\".format(cmd))\n return 0, ''\n\n logger.debug(\"Executing command: {}\".format(cmd))\n\n p = proc.Popen(cmd, shell=True, stdout=proc.PIPE, stderr=proc.PIPE)\n out, err = p.communicate()\n\n if p.returncode and verbose:\n logger.error('run({}) failed with returncode {}. STDERR: {}'\n .format(cmd, p.returncode, err))\n\n return p.returncode, out\n\n\ndef _escape_shell_chars(arg):\n \"\"\"\n An attempt to sanitize shell arguments without disabling\n shell expansion.\n\n >>> _escape_shell_chars('This (; file has funky chars')\n 'This \\\\(\\\\; file has funky chars'\n \"\"\"\n arg = arg.replace('(', '\\\\(')\n arg = arg.replace(';', '\\\\;')\n arg = arg.replace(')', '\\\\)')\n\n return(arg)\n\n\ndef get_files_with_tag(parentdir, tag, fuzzy = False):\n \"\"\"\n Returns a list of files that have the specified tag.\n\n Filenames must conform to the datman naming convention (see\n scanid.parse_filename) in order to be considered.\n\n If fuzzy == True, then filenames are matched if the given tag is found\n within the filename's tag.\n \"\"\"\n\n files = []\n for f in os.listdir(parentdir):\n try:\n _, filetag, _, _ = scanid.parse_filename(f)\n if tag == filetag or (fuzzy and tag in filetag):\n files.append(os.path.join(parentdir,f))\n except scanid.ParseException:\n continue\n\n return files\n\ndef makedirs(path):\n \"\"\"\n Make the directory (including parent directories) if they don't exist\n \"\"\"\n if not os.path.exists(path):\n os.makedirs(path)\n\ndef loadnii(filename):\n \"\"\"\n Usage:\n nifti, affine, header, dims = loadnii(filename)\n\n Loads a Nifti file (3 or 4 dimensions).\n\n Returns:\n a 2D matrix of voxels x timepoints,\n the input file affine transform,\n the input file header,\n and input file dimensions.\n \"\"\"\n\n # load everything in\n nifti = nib.load(filename)\n affine = nifti.get_affine()\n header = nifti.get_header()\n dims = nifti.shape\n\n # if smaller than 3D\n if len(dims) < 3:\n raise Exception('Your data has less than 3 dimensions!')\n\n # if smaller than 4D\n if len(dims) > 4:\n raise Exception('Your data is at least a penteract (> 4 dimensions!)')\n\n # load in nifti and reshape to 2D\n nifti = nifti.get_data()\n if len(dims) == 3:\n dims = tuple(list(dims) + [1])\n nifti = nifti.reshape(dims[0]*dims[1]*dims[2], dims[3])\n\n return nifti, affine, header, dims\n\ndef check_returncode(returncode):\n if returncode != 0:\n raise ValueError\n\ndef get_loaded_modules():\n \"\"\"Returns a space separated list of loaded modules\n\n These are modules loaded by the environment-modules system. This function\n just looks in the LOADEDMODULES environment variable for the list.\n \"\"\"\n return \" \".join(os.environ.get(\"LOADEDMODULES\",\"\").split(\":\"))\n\ndef splitext(path):\n \"\"\"\n Function that will remove extension, including specially-defined extensions\n that fool os.path.splitext\n \"\"\"\n for ext in ['.nii.gz', '.mnc.gz']:\n if path.endswith(ext):\n return path[:-len(ext)], path[-len(ext):]\n return os.path.splitext(path)\n\n@contextlib.contextmanager\ndef make_temp_directory(suffix='', prefix='tmp', path=None):\n temp_dir = tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=path)\n try:\n yield temp_dir\n finally:\n shutil.rmtree(temp_dir)\n\ndef remove_empty_files(path):\n for root, dirs, files in os.walk(path):\n for f in files:\n filename = os.path.join(root, f)\n if os.path.getsize(filename) == 0:\n os.remove(filename)\n\ndef nifti_basename(fpath):\n \"\"\"\n return basename without extension (either .nii.gz or .nii)\n \"\"\"\n basefpath = os.path.basename(fpath)\n stem = basefpath.replace('.nii','').replace('.gz', '')\n\n return(stem)\n\ndef filter_niftis(candidates):\n \"\"\"\n Takes a list and returns all items that contain the extensions '.nii' or '.nii.gz'.\n \"\"\"\n candidates = filter(lambda x: 'nii.gz' == '.'.join(x.split('.')[1:]) or\n 'nii' == '.'.join(x.split('.')[1:]), candidates)\n\n return candidates\n\ndef split_path(path):\n \"\"\"\n Splits a path into all the component parts, returns a list\n\n >>> split_path('a/b/c/d.txt')\n ['a', 'b', 'c', 'd.txt']\n \"\"\"\n dirname = path\n path_split = []\n while True:\n dirname, leaf = os.path.split(dirname)\n if (leaf):\n path_split = [leaf] + path_split\n else:\n break\n return(path_split)\n\nclass cd(object):\n \"\"\"\n A context manager for changing directory. Since best practices dictate\n returning to the original directory, saves the original directory and\n returns to it after the block has exited.\n\n May raise OSError if the given path doesn't exist (or the current directory\n is deleted before switching back)\n \"\"\"\n\n def __init__(self, path):\n user_path = os.path.expanduser(path)\n self.new_path = os.path.expandvars(user_path)\n\n def __enter__(self):\n self.old_path = os.getcwd()\n os.chdir(self.new_path)\n\n def __exit__(self, e, value, traceback):\n os.chdir(self.old_path)\n\nclass XNATConnection(object):\n def __init__(self, xnat_url, user_name, password):\n self.server = xnat_url\n self.user = user_name\n self.password = password\n\n def __enter__(self):\n self.connection = pyxnat.Interface(server=self.server, user=self.user,\n password=self.password)\n return self.connection\n\n def __exit__(self, type, value, traceback):\n self.connection.disconnect()\n\ndef get_xnat_credentials(config, xnat_cred):\n if not xnat_cred:\n xnat_cred = os.path.join(config.get_path('meta'), 'xnat-credentials')\n\n logger.debug(\"Retrieving xnat credentials from {}\".format(xnat_cred))\n try:\n credentials = read_credentials(xnat_cred)\n user_name = credentials[0]\n password = credentials[1]\n except IndexError:\n logger.error(\"XNAT credential file {} is missing the user name or \" \\\n \"password.\".format(xnat_cred))\n sys.exit(1)\n return user_name, password\n\ndef read_credentials(cred_file):\n credentials = []\n try:\n with open(cred_file, 'r') as creds:\n for line in creds:\n credentials.append(line.strip('\\n'))\n except:\n logger.error(\"Cannot read credential file or file does not exist: \" \\\n \"{}.\".format(cred_file))\n sys.exit(1)\n return credentials\n\ndef get_relative_source(source, target):\n if os.path.isfile(source):\n source_file = os.path.basename(source)\n source = os.path.dirname(source)\n else:\n source_file = ''\n\n rel_source_dir = os.path.relpath(source, os.path.dirname(target))\n rel_source = os.path.join(rel_source_dir, source_file)\n return rel_source\n\ndef check_dependency_configured(program_name, shell_cmd=None, env_vars=None):\n \"\"\"\n Name to add to the exception message if the program is\n not correctly configured.\n A command line command that will be put into 'which', to\n check whether the shell can find it.\n A list of shell variables that are expected to be set.\n Doesnt verify the value of these vars, only that they are\n all set.\n\n Raises EnvironmentError if the command is not findable or if any environment\n variable isnt configured.\n \"\"\"\n message = (\"{} required but not found. Please check that \"\n \"it is installed and correctly configured.\".format(program_name))\n\n if shell_cmd is not None:\n return_val, found = run('which {}'.format(shell_cmd))\n if return_val or not found:\n raise EnvironmentError(message)\n\n if env_vars is None:\n return\n\n if not isinstance(env_vars, list):\n env_vars = [env_vars]\n\n try:\n for variable in env_vars:\n os.environ[variable]\n except KeyError:\n raise EnvironmentError(message)\n\ndef validate_subject_id(subject_id, config):\n \"\"\"\n Checks that a given subject id\n a) Matches the datman convention\n b) Matches a study tag that is defined in the configuration file for\n the current study\n c) Matches a site that is defined for the given study tag\n\n If all validation checks pass, will return a datman scanid instance. This\n can be ignored if the validation is all that's wanted.\n \"\"\"\n try:\n scanid = datman.scanid.parse(subject_id)\n except datman.scanid.ParseException:\n raise RuntimeError(\"Subject id {} does not match datman\"\n \" convention\".format(subject_id))\n\n valid_tags = config.get_study_tags()\n\n try:\n sites = valid_tags[scanid.study]\n except KeyError:\n raise RuntimeError(\"Subject id {} has undefined study code {}\".format(\n subject_id, scanid.study))\n\n if scanid.site not in sites:\n raise RuntimeError(\"Subject id {} has undefined site {} for study {}\".format(\n subject_id, scanid.site, scanid.study))\n\n return scanid\n\n# vim: ts=4 sw=4 sts=4:\n","sub_path":"datman/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":22757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"390607714","text":"from window_state import WindowState, MainMenuState, GameState, EndGameState\n\n\nclass WindowManager:\n def __init__(self, window):\n self.window = window\n\n # Register the window states\n WindowState.main_menu = WindowState('main_menu', MainMenuState(self.window))\n WindowState.game = WindowState('game', GameState(self.window))\n WindowState.end_game = WindowState('end_game', EndGameState(self.window))\n\n # Start on the main_menu state\n self.current_state = WindowState.main_menu\n\n def run(self, events):\n # Run the current state then move to the next state\n self.current_state.state.run()\n self.current_state = self.current_state.state.next(events)\n","sub_path":"src/window_manager.py","file_name":"window_manager.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"559128825","text":"from random import randint\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QLabel, QVBoxLayout\napp = QApplication([])\nmy_win = QWidget()\nmy_win.show()\ntext = QLabel('Победитель:')\nwinner = QLabel('?')\ndef show_winner():\n number = randint(0, 999)\n winner.setText(str(number))\n text.setText\n my_win.setWindowTitle ('Определитель победителя')\n\nbutton = QPushButton('Сгенерировать')\nv_line = QVBoxLayout()\nv_line.addWidget(text,alignment = Qt.AlignCenter)\nv_line.addWidget(winner,alignment = Qt.AlignCenter)\nv_line.addWidget(button,alignment = Qt.AlignCenter)\nmy_win.setLayout(v_line)\nbutton.clicked.connect(show_winner)\nmy_win.show\n\n\n\n\n\n\napp.exec_()","sub_path":"winner_generator.py","file_name":"winner_generator.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"337782326","text":"class ExtendibleArray:\n def __init__(self):\n self.old = None\n self.new = [None]\n self.shadow = -1\n self.end = 1\n\n def getlist(self):\n return list(filter(None.__ne__, self.new + self.old))\n\n def getItems(self):\n return str(list(filter(None.__ne__, self.new + self.old))).strip(\"[]\")\n\n def add(self, item):\n if self.end == len(self.new):\n self.old = self.new\n self.new = [None] * 2 * len(self.new)\n self.shadow = self.end - 1\n self.new[self.end] = item\n self.new[self.shadow] = self.old[self.shadow]\n self.old[self.shadow] = None\n self.end = self.end + 1\n self.shadow = self.shadow - 1\n return True\n\n def get(self, index):\n if index < 0 and index >= self.size():\n print(f\"Index {index}, size {self.size()}\")\n if index + 1 <= self.shadow:\n return self.old[index + 1]\n else:\n return self.new[index + 1]\n\n def size(self):\n return self.end - 1\n\n def set(self, index, value):\n original = self.get(index)\n if index + 1 <= self.shadow:\n self.old[index + 1] = value\n else:\n self.new[index + 1] = value\n return original\n\n def remove(self, index):\n # Grab the value of the element we're removing; this also verifies the index.\n result = self.get(index)\n # 1 > -1\n if index + 1 > self.shadow:\n # shfit elements past this element down on top of the element\n self.new[index + 1 :] = self.new[index + 2 :]\n # Move the element before the shadow down.\n self.old[self.shadow + 1] = self.new[self.shadow + 1]\n # To be nice to the garbage collector, clear the endpoints.\n self.new[self.shadow + 1] = None\n\n # Pull the shadow and endpoint closer together.\n self.shadow = self.shadow + 1\n self.end = self.end - 1\n else:\n self.old[index + 1 :] = self.old[index + 2 :]\n for i in range(2):\n self.old[self.shadow + 1 + i] = self.new[self.shadow + 1 + i]\n self.shadow = self.shadow + 1\n\n # # Shuffle the elements of the new array down one position.\n # # The beginning position is one past the shadow, as it always is.\n self.old[index + 1 :] = self.old[index + 2 :]\n\n # Clear the element that just got moved. */\n self.new[self.end - 1] = None\n\n # Back up the end position, since we just lost an element. */\n self.end = self.end - 1\n\n # Finally, see if we just emptied the new array. This happens if\n # the end pointer is one step past the shadow pointer.\n if self.end == self.shadow + 1:\n # Drop the new array and promote the old array to new.\n self.new = self.old\n # Make a blank array for new elements.\n self.old = [None] * (int(len(self.new) / 2))\n # Move the end and shadow pointers off the ends of the array.\n self.shadow = -1\n self.end = len(self.new)\n\n return result\n","sub_path":"data-structures/ExtendibleArray.py","file_name":"ExtendibleArray.py","file_ext":"py","file_size_in_byte":3177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"424910714","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 31 23:14:18 2019\nini file parser package depends on configparser\n@author: Micha\n\"\"\"\nimport configparser\n\nclass ini:\n def __init__(self,filename):\n self.name = filename\n self.config = configparser.ConfigParser()\n def readfile(self):\n fileList=self.config.read(self.name)\n if len(fileList) !=1 :\n raise Exception('Error: no file or several files')\n \n def ini2sysconfig(self,source='sysconfig'):\n \"\"\"\n converts a sysconfig section in configuration to a sys config dictionary\n returns a sysconfig dictionary\n \"\"\"\n sysconfig_section = self.config[source]\n dictionary = {}\n for key in sysconfig_section.keys():\n if (key == 'ldcoeffs1') or (key == 'ldcoeffs2'):\n dictionary[key]=eval(sysconfig_section[key])\n else:\n dictionary[key]=sysconfig_section.getfloat(key) \n return dictionary\n \n def ini2fitpar(self):\n \"\"\"\n converts a fitpar section in configuration to a fitpar dictionary\n The fitpar dictionary is composed of lists only!\n returns a runconfig dictionary\n \"\"\"\n fitpar_section = self.config['fitpar']\n dictionary = {}\n for key in fitpar_section.keys():\n dictionary[key]=eval(fitpar_section[key])\n \n return dictionary\n\n def ini2runconfig(self):\n \"\"\"\n converts a runconfig section in configuration to a runconfig dictionary\n returns a runconfig dictionary\n \"\"\"\n runconfig_section = self.config['runconfig']\n rc_dict = {}\n if 'atm1' in runconfig_section:\n rc_dict['atm1']= runconfig_section['atm1']\n if 'atm2' in runconfig_section:\n rc_dict['atm2']= runconfig_section['atm2']\n if 'irm' in runconfig_section:\n rc_dict['irm']= runconfig_section['irm']\n if 'boost' in runconfig_section:\n rc_dict['boost']= runconfig_section['boost']\n if 'ldfunc1' in runconfig_section:\n rc_dict['ldfunc1']= runconfig_section['ldfunc1']\n if 'ldfunc2' in runconfig_section:\n rc_dict['ldfunc2']= runconfig_section['ldfunc2']\n if 'ntr1' in runconfig_section: \n rc_dict['ntr1']= runconfig_section.getint('ntr1')\n else:\n rc_dict['ntr1']= 10000\n if 'ntr2' in runconfig_section: \n rc_dict['ntr2']= runconfig_section.getint('ntr2')\n else:\n rc_dict['ntr2']= 10000\n if 'rvsigmascale' in runconfig_section:\n rc_dict['rvsigmascale']= runconfig_section.getfloat('rvsigmascale')\n if 'lcsigmascale' in runconfig_section:\n rc_dict['lcsigmascale']= runconfig_section.getfloat('lcsigmascale')\n if 'chosenparams' in runconfig_section:\n rc_dict['chosenparams']= eval(runconfig_section['chosenparams'])\n if 'constraint_type' in runconfig_section:\n rc_dict['constraint_type']= runconfig_section.getint('constraint_type')\n if 'threads' in runconfig_section:\n rc_dict['threads'] = runconfig_section.getint('threads')\n if 'ncpus' in runconfig_section: \n rc_dict['ncpus'] = runconfig_section.getint('ncpus')\n if 'claret' in runconfig_section:\n rc_dict['claret'] = runconfig_section.getboolean('claret')\n if 'tablecoeffs' in runconfig_section:\n rc_dict['tablecoeffs'] = runconfig_section.getboolean('tablecoeffs')\n if 'phoebelogger' in runconfig_section:\n rc_dict['phoebelogger'] = runconfig_section.getboolean('phoebelogger')\n #rc_dict['phoebeflux'] = runconfig_section.getboolean('phoebeflux')\n return rc_dict\n\n def parseGeneral(self):\n general_section = self.config['general']\n gen_dict = {}\n \n gen_dict['rootpath']= general_section['rootpath']\n gen_dict['claretpath']= general_section['claretpath']\n gen_dict['ldtable']= general_section['ldtable']\n gen_dict['gbtable']= general_section['gbtable']\n gen_dict['gb_bol_table']= general_section['gb_bol_table']\n if 'location' in general_section:\n gen_dict['location']= general_section['location']\n if 'machine' in general_section:\n gen_dict['machine']= general_section['machine']\n gen_dict['sysname']= general_section['sysname']\n if 'runlabel' in general_section:\n gen_dict['runlabel']= general_section['runlabel']\n if 'newlabel' in general_section:\n gen_dict['newlabel']= general_section['newlabel']\n gen_dict['fulllcfilename']= general_section['fulllcfilename']\n if 'rvdatafn' in general_section:\n gen_dict['rvdatafn']= general_section['rvdatafn']\n if 'source' in general_section:\n gen_dict['source'] = general_section['source']\n if 'eccmodel_filename' in general_section:\n gen_dict['eccmodel_filename'] = general_section['eccmodelfilename']\n gen_dict['plot']= general_section.getboolean('plot')\n gen_dict['savedata']= general_section.getboolean('savedata')\n gen_dict['dataexists']= general_section.getboolean('dataexists')\n if 'kepleroffset' in general_section:\n gen_dict['kepleroffset']= general_section.getfloat('kepleroffset')\n\n \n gen_dict['write_log']= general_section.getboolean('write_log')\n \n if 'timelimits' in general_section:\n gen_dict['timelimits'] = eval(general_section['timelimits'])\n if 'fmax' in general_section:\n gen_dict['fmax'] = general_section.getfloat('fmax')\n if 'resylim' in general_section:\n gen_dict['resylim'] = eval(general_section['resylim'])\n if 'plot_components' in general_section:\n gen_dict['plot_components'] = general_section.getboolean('plot_components')\n if 'runphoebe' in general_section:\n gen_dict['runphoebe'] = general_section.getboolean('runpheobe')\n return gen_dict \n \n def parse_eccbeer(self):\n eccbeer_section = self.config['eccbeer']\n# print(eccbeer_section.items())\n# eccbeer_dict['alpha_refl1']= eccbeer_section.getfloat('alpha_refl1')\n# eccbeer_dict['alpha_refl2']= eccbeer_section.getfloat('alpha_refl2')\n# eccbeer_dict['eccModelShift']= eccbeer_section.getfloat('eccModelShift')\n# eccbeer_dict['deltaPhase']= eccbeer_section.getfloat('deltaPhase')\n# eccbeer_dict['deltaPhaseOrig']= eccbeer_section.getfloat('deltaPhaseOrig')\n dictionary = {}\n ignore_list =['id','run_phoebe_params','lcpath','prim_results','scnd_results','lum_prim','Xinit','lbf','ubf']\n for key in eccbeer_section.keys():\n if key in ignore_list:\n continue\n else:\n if (key == 'ldcoeffs1') or (key == 'ldcoeffs2'):\n dictionary[key]=eval(eccbeer_section[key])\n else:\n dictionary[key]=eccbeer_section.getfloat(key) \n return dictionary\n \n def parseCuts(self):\n cuts_section = self.config['cuts']\n cut_dict = {}\n cut_dict['runpar'] = eval(cuts_section['runpar'])\n cut_dict['verbose'] = cuts_section.getboolean('verbose')\n cut_dict['errorrun'] = cuts_section.getboolean('errorrun')\n for cutpar in cut_dict['runpar']:\n cut_dict[cutpar] = eval(cuts_section[cutpar])\n return cut_dict\n","sub_path":"packages/iniParser.py","file_name":"iniParser.py","file_ext":"py","file_size_in_byte":7580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"122591538","text":"import datetime\nimport pytz\nfrom fastapi import APIRouter\nfrom cash_server.orm_db import session, Cash, engine, Incas\nfrom datetime import datetime, timedelta\nfrom sqlalchemy import desc\nimport sqlalchemy\n\n# now = datetime.datetime.now(tz=pytz.timezone(\"Asia/Yekaterinburg\"))\n#\n# print(now.strftime(\"%Y-%m-%d %H-%M-%S +05\"))\n\nterminal_id = 'barbusa_1'\n# first()[0]\nans = session.query(Cash.terminal).distinct()\n# term = {}\nterminals_list = []\nfor i in ans:\n print(i[\"terminal\"])\n amount_in_terminals = 0\n try:\n incas_time_t = session.query(Incas.time).order_by(desc(Incas.time)).filter(Incas.terminal == i[\"terminal\"]).first()[0]\n except TypeError: # Нет записи в базе данных\n print(\"запись об инкассации в терминале отсутствует\")\n cash_amount_t = session.query(Cash.amount).filter(Cash.time > incas_time_t & Cash.terminal == i[\"terminal\"]).all()\n # print(\"Сумма в терминале:\", cash_amount_t)\n for j in cash_amount_t:\n amount_in_terminals += j[\"amount\"]\n\n cash_amount_t = session.query(Cash.amount).filter(Cash.terminal == i[\"terminal\"]).all()\n for j in cash_amount_t:\n amount_in_terminals += j[\"amount\"]\n\nprint(amount_in_terminals)\n\n\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"529485740","text":"from music21 import *\nimport settings\n\n#tomidi is the function that transfers the piece into a list of midi numbers\ndef tomidi():\n for i in range(len(settings.piece)):\n if str(type(settings.piece[i])) == \"\": #to test if this is music, not title or author or something else\n measurefinder(settings.piece[i]) #piece[i] = rightHand\n break\n return settings.midilist\n\ndef measurefinder(righthand): #used in tomidi(), righthand = everything for the right hand\n for i in range(len(righthand)):\n if str(type(righthand[i])) == \"\": #to test if this is a measure\n insidemeasure(righthand[i]) #righthand[j] = measure\n\ndef insidemeasure(measure): #used in measurefinder, measure = the measure we are working on now.\n for i in range(len(measure)): #count the elements in a measure, could be a note or a chord.\n if str(type(measure[i])) == \"\": #if this element is a note\n settings.midilist.append(measure[i].midi)\n elif str(type(measure[i])) == \"\": #if this element is a chord\n chordlist = []\n for p in measure[i]: #p = part\n chordlist.append(p.midi) #chordlist is the sub-list reprents a chord\n settings.midilist.append(chordlist) #put the sublist into the midilist\n\n#this function divides the midilist into monotonic sequences\ndef dividing():\n l = settings.midilist #l is the local name for settings.midilist\n while(len(l) > 1):\n if len(l) == 1:\n return\n l = increasing(l) #找递增序列\n if len(l) == 1:\n return\n l = uniform(l) #找不变序列\n if len(l) == 1:\n return\n l = decreasing(l) #找递减序列\n return\n\ndef increasing(l): #used in dividing\n i = 0\n j = 1\n while (validTest(i,j,l) == 0): #True:递增,不变; False:递减\n i = i + 1\n j = j + 1\n if j == len(l): #已碰到最后一个音或和弦\n break\n if j == 1:\n return l\n settings.aList.append(l[0:j]) #找到一个单调序列\n return l[j-1:] #保留上一个序列的最后一个音,清楚其他上一个序列的音\n\ndef uniform(l):\n i = 0\n j = 1\n while (validTest(i,j,l) == 1):\n i = i + 1\n j = j + 1\n if j == len(l):\n break\n if j == 1:\n return l\n settings.aList.append(l[0:j])\n return l[j-1:]\n\ndef decreasing(l): #used in dividing\n i = 0\n j = 1\n while (validTest(i,j,l) == 2): #判定结果 True递减, False递增或不变\n i = i + 1\n j = j + 1\n if j == len(l): #已碰到最后一个音或和弦\n break\n if j == 1:\n return l\n settings.aList.append(l[0:j]) #找到一个单调序列\n return l[j-1:] #保留上一个序列的最后一个音,清楚其他上一个序列的音\n\ndef validTest(i,j,l): #used in increasing() and decreasing()\n #规则:音高不变算作递增序列\n\n #Comparison between two notes\n if (type(l[i]) is int) and (type(l[j]) is int):\n if l[i] < l[j]:\n return 0\n elif l[i] == l[j]:\n return 1\n else:\n return 2\n\n #Comparison between a chord and a note\n if (type(l[i]) is list) and (type(l[j]) is int):\n if l[i][0] < l[j]:\n return 0\n elif l[i][0] == l[j]:\n return 1\n else:\n return 2\n\n #Comparison between a note and a chord\n if (type(l[i]) is int) and (type(l[j]) is list):\n if l[i] < l[j][0]:\n return 0\n elif l[i] == l[j][0]:\n return 1\n else:\n return 2\n\n #Comparison between two chords\n if (type(l[i]) is list) and (type(l[j]) is list):\n if l[i][0] < l[j][0]:\n return 0\n elif l[i][0] == l[j][0]:\n return 1\n else:\n return 2\n\n\n","sub_path":"HMM Training/Divider.py","file_name":"Divider.py","file_ext":"py","file_size_in_byte":3974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"276071770","text":"# -*- coding: utf8 -*-\nfrom lxml import etree\nimport requests, threading\nimport datetime\nimport math\nimport xml.etree.ElementTree as ET\nimport gzip\nfrom io import BytesIO\nfrom decimal import getcontext, Decimal\nimport numpy as np\nimport time\nimport socket\nfrom dbconnect_for_new import connectDB\n\n#要抓的時間區段\nEndDate = '20180101'\nStartDate = '20180101'\n# end_date = ''\n# minute_time = ''\nend_min = '0002'\nstr_min = '0000'\nvd = []\n\n\ndef parseXML(tree, temp):\n\n while True:\n try:\n for infos in tree:\n for info in infos:\n undivided_total_speed = 0\n undivide_total_laneoccupy = 0\n undivided_total_volume = 0\n data = []\n cnt = 0\n\n data.append(info.attrib[\"vdid\"])\n date = datetime.datetime.strptime(info.attrib[\"datacollecttime\"], '%Y/%m/%d %H:%M:%S')\n data.append(str(date.date()))\n data.append(str(date.time()))\n # data.append(info.attrib[\"datacollecttime\"])\n for lane in info:\n total_volume = 0\n now_speed = int(lane.attrib[\"speed\"])\n now_laneoccupy = int(lane.attrib[\"laneoccupy\"])\n if now_speed < 0:\n now_speed = 0\n now_laneoccupy = 0\n data.append(now_speed)\n data.append(now_laneoccupy)\n for cars in lane:\n now_volume = int(cars.attrib[\"volume\"])\n if now_volume >= 0:\n undivided_total_speed += now_speed * int(cars.attrib[\"volume\"])\n undivide_total_laneoccupy += now_laneoccupy * int(cars.attrib[\"volume\"])\n undivided_total_volume += now_volume\n total_volume += now_volume\n data.append(total_volume)\n cnt += 1\n while cnt != 6:\n data.append(0)\n data.append(0)\n data.append(0)\n cnt += 1\n if undivided_total_volume > 0:\n undivided_total_speed = undivided_total_speed / undivided_total_volume\n undivide_total_laneoccupy = undivide_total_laneoccupy / undivided_total_volume\n data.append(undivided_total_speed)\n data.append(undivide_total_laneoccupy)\n data.append(undivided_total_volume)\n temp.setdefault(info.attrib[\"vdid\"], []).append(tuple(data))\n except Exception as e:\n print(e)\n continue\n break\n return temp\n\n\ndef insertzero(end_date, minutemen, temp, vd):\n day = datetime.datetime.strptime(end_date, '%Y%m%d')\n time = datetime.datetime.strptime(minutemen, '%H%M')\n for i in vd:\n data = []\n cnt = 0\n data.append(i)\n data.append(str(day.date()))\n data.append(str(time.time()))\n while cnt != 6:\n data.append(0)\n data.append(0)\n data.append(0)\n cnt += 1\n data.append(0)\n data.append(0)\n data.append(0)\n temp.setdefault(i, []).append(tuple(data))\n cnt = 0\n # print(temp)\n return temp\n\n\ndef UpAndInsert(x, createmonth, temp):\n # print(temp)\n for i in temp:\n result = x.query_table_for_show()\n # print(result)\n t = str(createmonth) + \"-\" + str(i)\n if t not in result:\n # print(t)\n x.create(t)\n data = temp.get(str(i))\n data = str(data)[1:-1]\n x.insert_undivide(data)\n else:\n x.getcode(t)\n data = temp.get(str(i))\n data = str(data)[1:-1]\n # print(i)\n x.insert_undivide(data)\n print('--------success insert--------')\n\n\ndef TimeToSearch():\n\n global EndDate, StartDate, str_min, end_min\n # , end_date, minute_time\n\n StartDate = datetime.datetime.strptime(StartDate, \"%Y%m%d\")\n EndDate = datetime.datetime.strptime(EndDate, \"%Y%m%d\")\n substract_time_day = EndDate - StartDate + datetime.timedelta(1)\n total_days = math.floor((substract_time_day.total_seconds() / 86400))\n\n # 處理小時\n EndTime = datetime.datetime.strptime(end_min, \"%H%M\")\n StartTime = datetime.datetime.strptime(str_min, \"%H%M\")\n substract_time = EndTime - StartTime + datetime.timedelta(minutes=1)\n total_minutes = math.floor((substract_time.total_seconds() / 60))\n\n return total_days, total_minutes, StartTime\n\n\ndef main():\n\n x = connectDB()\n time_data = TimeToSearch()\n\n for day in range(0, time_data[0]):\n temp = {}\n count = 0\n end_date = StartDate + datetime.timedelta(day)\n createmonth = end_date.strftime(\"%Y-%m\")\n end_date = end_date.strftime(\"%Y%m%d\")\n print(end_date)\n for minute in range(0, time_data[1]):\n # print(minute)\n minutemen = time_data[2] + datetime.timedelta(minutes=minute)\n minutemen = minutemen.strftime(\"%H%M\")\n while True:\n try:\n headers = {'user-agent': '\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36'}\n result = requests.get(\"http://tisvcloud.freeway.gov.tw/history/vd/\" + str(end_date) + \"/vd_value_\" + str(minutemen) + \".xml.gz\", headers=headers)\n result.encoding = 'utf8'\n sitemap = gzip.GzipFile(fileobj=BytesIO(result.content))\n root = ET.parse(sitemap)\n tree = root.getroot()\n temp = parseXML(tree, temp)\n vd = list(temp.keys())\n result.close()\n except Exception as e:\n if count < 5:\n count += 1\n print(e)\n print('---------------------------------------' + end_date + \" \" + minutemen)\n continue\n else:\n temp = insertzero(end_date, minutemen, temp, vd)\n count = 0\n break\n break\n print(temp)\n UpAndInsert(x, createmonth, temp)\n time.sleep(1)\n\n x.exit()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"get_data_for_new.py","file_name":"get_data_for_new.py","file_ext":"py","file_size_in_byte":6621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"163829117","text":"import os\nimport logging\nimport sys\nimport socket\nimport numpy as np\nimport rospy\nimport torch\nimport torch.nn as nn\nfrom mpi4py import MPI\n\nfrom torch.optim import Adam\nfrom collections import deque\n\nfrom model.net import Actor,Critic, CNNPolicy, GaussianExploration\nfrom td3_stage_world import StageWorld\nfrom model.td3 import td3_update_stage\nfrom model.td3 import generate_action, select_action\n\nfrom model.replay_memory import ReplayMemory\n\n\nMAX_EPISODES = 30000\nLASER_BEAM = 512\nLASER_HIST = 3\nHORIZON = 3072\nGAMMA = 0.99\nLAMDA = 0.95\nBATCH_SIZE = 1024\nEPOCH = 2\nCOEFF_ENTROPY = 5e-4\nCLIP_VALUE = 0.1\nNUM_ENV = 1\nOBS_SIZE = 512\nACT_SIZE = 2\nLEARNING_RATE = 5e-5\n\nREPLAY_SIZE = 100000\nSEED = 123456\n\nexploration_noise = 0.1 \nTAU = 0.005 # target policy update parameter\npolicy_noise = 0.2 # target policy smoothing noise\nnoise_clip = 0.5\npolicy_delay = 2 # delayed policy updates parameter\n\nMAX_ACTION = 1.0\n\ndef run(comm, env, policy, policy_path, action_bound, optimizer):\n\n actor, actor_target, critic_1, critic_1_target, critic_2, critic_2_target = policy\n actor_opt, critic_1_opt, critic_2_opt = optimizer\n\n noise = GaussianExploration(action_bound)\n\n # rate = rospy.Rate(5)\n buff = []\n global_update = 0\n global_step = 0\n\n replay_memory = ReplayMemory(REPLAY_SIZE, SEED)\n\n #world reset\n if env.index == 0:\n env.reset_world()\n\n\n for id in range(MAX_EPISODES):\n \n #reset\n env.reset_pose()\n\n terminal = False\n ep_reward = 0\n step = 1\n \n # generate_goal\n env.generate_goal_point()\n \n # get_state\n obs = env.get_laser_observation()\n obs_stack = deque([obs, obs, obs])\n goal = np.asarray(env.get_local_goal())\n speed = np.asarray(env.get_self_speed())\n state = [obs_stack, goal, speed]\n\n \n while not terminal and not rospy.is_shutdown():\n \n state_list = comm.gather(state, root=0)\n\n ## get_action\n #-------------------------------------------------------------------------\n # generate actions at rank==0\n mean, action = select_action(env=env, state_list=state_list,\n actor=actor, action_bound=action_bound)\n\n # exploration\n a = noise.get_action(mean, step)\n \n '''\n a = a + np.random.normal(0, exploration_noise, size=(1,2)) #action size check\n a = a.clip(action_bound[0], action_bound[1])\n '''\n\n # execute actions\n real_action = comm.scatter(a, root=0)\n #------------------------------------------------------------------------- \n \n ### step ############################################################\n ## run action\n env.control_vel(real_action)\n #-------------------------------------------------------------------------\n\n # rate.sleep()\n rospy.sleep(0.001)\n\n ## get reward\n #-------------------------------------------------------------------------\n # get informtion\n r, terminal, result = env.get_reward_and_terminate(step)\n ep_reward += r\n global_step += 1\n\n #-------------------------------------------------------------------------\n # get next state\n #-------------------------------------------------------------------------\n\n s_next = env.get_laser_observation()\n left = obs_stack.popleft()\n obs_stack.append(s_next)\n goal_next = np.asarray(env.get_local_goal())\n speed_next = np.asarray(env.get_self_speed())\n state_next = [obs_stack, goal_next, speed_next]\n\n\n # add transitons in buff and update policy\n r_list = comm.gather(r, root=0)\n terminal_list = comm.gather(terminal, root=0)\n #-------------------------------------------------------------------------\n\n ########################################################################\n\n state_next_list = comm.gather(state_next, root=0)\n \n\n \n #-------------------------------------------------------------------------\n ## save memory (replay_memory)\n if env.index == 0:\n replay_memory.push(state[0], state[1],state[2], a, r_list, state_next[0], state_next[1], state_next[2], terminal_list)\n \n step += 1\n state = state_next\n\n ## training \n #------------------------------------------------------------------------------ \n if env.index == 0:\n policy_list = [actor, actor_target, critic_1, critic_1_target, critic_2, critic_2_target]\n optimizer_list = [actor_opt, critic_1_opt, critic_2_opt]\n\n if len(replay_memory) > BATCH_SIZE:\n # update policy\n td3_update_stage(policy=policy_list, optimizer=optimizer_list, batch_size=BATCH_SIZE, memory=replay_memory, epoch = step, \n replay_size=REPLAY_SIZE, gamma=GAMMA, num_step=BATCH_SIZE, num_env=NUM_ENV, frames=LASER_HIST, \n obs_size=OBS_SIZE, act_size=ACT_SIZE, tau=TAU, policy_noise=policy_noise, noise_clip=noise_clip, policy_delay=policy_delay)\n global_update += 1\n\n # save policy\n if env.index == 0:\n if global_update != 0 and global_update % 20 == 0:\n torch.save(actor.state_dict(), policy_path + '/actor_{}'.format(global_update))\n torch.save(critic_1.state_dict(), policy_path + '/critic_1_{}'.format(global_update))\n torch.save(critic_2.state_dict(), policy_path + '/critic_2_{}'.format(global_update))\n\n logger.info('########################## model saved when update {} times#########'\n '################'.format(global_update))\n distance = np.sqrt((env.goal_point[0] - env.init_pose[0])**2 + (env.goal_point[1]-env.init_pose[1])**2)\n\n logger.info('Env %02d, Goal (%05.1f, %05.1f), Episode %05d, setp %03d, Reward %-5.1f, Distance %05.1f, %s' % \\\n (env.index, env.goal_point[0], env.goal_point[1], id + 1, step, ep_reward, distance, result))\n logger_cal.info(ep_reward)\n\n\nif __name__ == '__main__':\n\n # config log\n hostname = socket.gethostname()\n if not os.path.exists('./log/' + hostname + 'td3'):\n os.makedirs('./log/' + hostname + 'td3')\n output_file = './log/' + hostname + 'td3' + '/output.log'\n cal_file = './log/' + hostname + 'td3' + '/cal.log'\n\n # config log\n logger = logging.getLogger('mylogger')\n logger.setLevel(logging.INFO)\n\n file_handler = logging.FileHandler(output_file, mode='a')\n file_handler.setLevel(logging.INFO)\n file_handler.setFormatter(logging.Formatter(\"%(asctime)s - %(levelname)s - %(message)s\"))\n stdout_handler = logging.StreamHandler(sys.stdout)\n stdout_handler.setLevel(logging.INFO)\n logger.addHandler(file_handler)\n logger.addHandler(stdout_handler)\n\n logger_cal = logging.getLogger('loggercal')\n logger_cal.setLevel(logging.INFO)\n cal_f_handler = logging.FileHandler(cal_file, mode='a')\n file_handler.setLevel(logging.INFO)\n logger_cal.addHandler(cal_f_handler)\n\n comm = MPI.COMM_WORLD\n\n rank = comm.Get_rank()\n size = comm.Get_size()\n \n env = StageWorld(512, index=rank, num_env=NUM_ENV)\n \n print(\"ENV\")\n \n reward = None\n action_bound = [[0, -1], [1, 1]] \n \n # torch.manual_seed(1)\n # np.random.seed(1)\n if rank == 0:\n policy_path = 'policy_0819'\n \n #actor\n actor = Actor(frames=LASER_HIST, action_space=2, max_action = MAX_ACTION)\n actor.cuda()\n \n actor_opt = Adam(actor.parameters(), lr=LEARNING_RATE)\n \n actor_target = Actor(frames=LASER_HIST, action_space=2, max_action = MAX_ACTION)\n actor_target.cuda()\n\n actor_target.load_state_dict(actor.state_dict())\n\n\n #critic1\n critic_1 = Critic(frames=LASER_HIST, action_space=2)\n critic_1.cuda()\n \n critic_1_opt = Adam(critic_1.parameters(), lr=LEARNING_RATE)\n \n critic_1_target = Critic(frames=LASER_HIST, action_space=2)\n critic_1_target.cuda()\n\n critic_1_target.load_state_dict(critic_1.state_dict())\n \n\n #critic2\n critic_2 = Critic(frames=LASER_HIST, action_space=2)\n critic_2.cuda()\n \n critic_2_opt = Adam(critic_2.parameters(), lr=LEARNING_RATE)\n \n critic_2_target = Critic(frames=LASER_HIST, action_space=2)\n critic_2_target.cuda()\n\n critic_2_target.load_state_dict(critic_2.state_dict())\n\n\n mse = nn.MSELoss()\n\n if not os.path.exists(policy_path):\n os.makedirs(policy_path)\n \n file = policy_path + '/stage1_2.pth'\n if os.path.exists(file):\n logger.info('####################################')\n logger.info('#######Actor Loading Model##########')\n logger.info('####################################')\n state_dict = torch.load(file)\n actor.load_state_dict(state_dict)\n else:\n logger.info('#####################################')\n logger.info('#######Actor Start Training##########')\n logger.info('#####################################')\n \n file = policy_path + '/stage1_2.pth'\n if os.path.exists(file):\n logger.info('####################################')\n logger.info('######Critic_1 Loading Model########')\n logger.info('####################################')\n state_dict = torch.load(file)\n critic_1.load_state_dict(state_dict)\n else:\n logger.info('#####################################')\n logger.info('######Critic_1 Start Training########')\n logger.info('#####################################')\n\n file = policy_path + '/stage1_2.pth'\n if os.path.exists(file):\n logger.info('####################################')\n logger.info('######Critic_2 Loading Model########')\n logger.info('####################################')\n state_dict = torch.load(file)\n critic_2.load_state_dict(state_dict)\n else:\n logger.info('#####################################')\n logger.info('######Critic_2 Training##############')\n logger.info('#####################################')\n\n\n policy_list = [actor, actor_target, critic_1, critic_1_target, critic_2, critic_2_target]\n optimizer_list = [actor_opt, critic_1_opt, critic_2_opt]\n\n\n else:\n actor = None\n actor_target = None\n\n critic_1 = None\n critic_1_target = None\n critic_2 = None\n critic_2_target = None\n\n policy_path = None\n actor_opt = None\n critic_1_opt = None\n critic_2_opt = None\n\n policy_list = [actor, actor_target, critic_1, critic_1_target, critic_2, critic_2_target]\n optimizer_list = [actor_opt, critic_1_opt, critic_2_opt]\n\n\n try:\n run(comm=comm, env=env, policy=policy_list, policy_path=policy_path, action_bound=action_bound, optimizer=optimizer_list)\n except KeyboardInterrupt:\n pass\n","sub_path":"rl_collision_avoidance/TD3/td3_stage.py","file_name":"td3_stage.py","file_ext":"py","file_size_in_byte":11525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"610581364","text":"# -*- coding: utf-8 -*-\nimport re\n\nseq = set([]) # 输入序列\noverlap = set([])\nn = input()\nm = input().split(' ')\nfor i in range(int(n)):\n num = int(m[i])\n seq.add(num) # 生成保存输入序列的list\n while num != 1:\n if num % 2 == 0:\n num = num / 2\n else:\n num = (3 * num + 1) / 2\n overlap.add(num)\n\nans = seq.difference(overlap)\nans_list = list(ans)\nans_list.sort(reverse=True)\nm = len(ans_list)\n\nfor i in range(m):\n if i != m-1:\n print(ans_list[i], end=' ')\n else:\n print(ans_list[i], end='')\n\n","sub_path":"PAT_1005.py","file_name":"PAT_1005.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"489033043","text":"import random\nrandom.seed(1378)\ncharDict = {\n 0:'0',\n 1:'1',\n 2:'2',\n 3:'3',\n 4:'4',\n 5:'5',\n 6:'6',\n 7:'7',\n 8:'8',\n 9:'9',\n 10:'a',\n 11:'b',\n 12:'c',\n 13:'d',\n 14:'e',\n 15:'f',\n }\nnumberOfTests = 200\nprint(numberOfTests)\nfor i in range(numberOfTests):\n testLen = 80\n for j in range(testLen * 2):\n print(charDict[random.randint(0, 15)], end=\"\")\n print()\n\n","sub_path":"testGenerator/testGenerator.py","file_name":"testGenerator.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"230387524","text":"# -*- coding: utf-8 -*-\nfrom decimal import Decimal\n\nfrom django.test import TestCase, SimpleTestCase\n\nfrom . import factories\nfrom ..utils import (calculate_aqhi, append_aqhi_field)\n\n\nclass TestCalcAqhi(TestCase):\n def test_create_records(self):\n pass\n \"\"\"\n for i in range(3):\n print(factories.CityRecordFactory().aqhi)\n print(factories.StationRecordFactory().aqhi)\n \"\"\"\n\n\nclass TestAppendAqhiField(SimpleTestCase):\n def test_append(self):\n processed_dict = {\n 'city': {'aqi': Decimal('48'),\n 'area_cn': '北京',\n 'co': Decimal('0.35'),\n 'no2': Decimal('36'),\n 'o3': Decimal('43'),\n 'o3_8h': Decimal('44'),\n 'pm10': Decimal('47'),\n 'pm2_5': Decimal('24'),\n 'primary_pollutant': '',\n 'quality': 'E',\n 'so2': Decimal('3')},\n 'stations': {'万寿西宫': {'aqi': Decimal('26'),\n 'co': Decimal('0.4'),\n 'no2': Decimal('39'),\n 'o3': Decimal('45'),\n 'o3_8h': Decimal('44'),\n 'pm10': None,\n 'pm2_5': Decimal('18'),\n 'primary_pollutant': '',\n 'quality': 'E',\n 'so2': Decimal('2')},\n }\n }\n\n result = append_aqhi_field(processed_dict)\n self.assertEqual(\n result['city']['aqhi'],\n Decimal('1.9008')\n )\n self.assertEqual(\n result['stations']['万寿西宫']['aqhi'],\n None\n )\n","sub_path":"aqhi/airquality/tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"57076832","text":"\nfrom FlaskApp import db, json_response\nfrom flask.ext.cors import *\nfrom flask import Response\nfrom Model import *\n\n# Anything not listed in the second flask import can just be called from the\n# imported model\nimport flask\nfrom flask import Response, render_template, request, send_from_directory\nfrom werkzeug.security import generate_password_hash, check_password_hash\n\nfrom urllib.request import urlopen\nimport json\n\n# This is the application context which does the routing.\napp = flask.Flask(__name__)\n\n#################################\n# Routing for Webpages#\n\n@app.route('/')\ndef main_page():\n \"\"\"\n Serve the main page from ./templates/main.html\n \"\"\"\n return render_template(\"main.html\")\n\n@app.route('/characters')\ndef characters_page():\n \"\"\"\n Serve the characters list page from ./templates/characters.html\n \"\"\"\n characters = list(Character.query.all())\n return render_template(\"characters.html\", characters = characters)\n \n@app.route('/characters/')\ndef character_template_page(char_id):\n \"\"\"\n Serve the character page from ./templates/character_template.html\n \"\"\"\n char = Character.query.get(char_id)\n if char is None:\n flask.abort(404)\n return render_template(\"character_template.html\", c=char)\n\n@app.route('/events')\ndef events_page():\n \"\"\"\n Serve the events list page from ./templates/events.html\n \"\"\"\n events = list(Event.query.all())\n return render_template(\"events.html\", events = events)\n \n@app.route('/events/')\ndef event_template_page(event_id):\n \"\"\"\n Serve the event page from ./templates/event_template.html\n \"\"\"\n event = Event.query.get(event_id)\n if event is None:\n flask.abort(404)\n return render_template(\"event_template.html\", event=event)\n\n@app.route('/locations')\ndef locations_page():\n \"\"\"\n Serve the locations list page from ./templates/locations.html\n \"\"\"\n locations = list(Location.query.all())\n return render_template(\"locations.html\", locations = locations)\n \n@app.route('/locations/')\ndef location_template_page(loc_id):\n \"\"\"\n Serve the location page from ./templates/location_template.html\n \"\"\"\n location = Location.query.get(loc_id)\n if location is None:\n flask.abort(404)\n return render_template(\"location_template.html\", loc=location)\n\n@app.route('/organizations')\ndef organizations_page():\n \"\"\"\n Serve the organizations list page from ./templates/organizations.html\n \"\"\"\n organizations = list(Organization.query.all())\n return render_template(\"organizations.html\", organizations = organizations)\n \n@app.route('/organizations/')\ndef organization_template_page(org_id):\n \"\"\"\n Serve the organization page from ./templates/organization_template.html\n \"\"\"\n org = Organization.query.get(org_id)\n if org is None:\n flask.abort(404)\n return render_template(\"organization_template.html\", org=org)\n \n@app.route('/episodes')\ndef episodes_page():\n \"\"\"\n Serve the episodes list page from ./templates/episodes.html\n \"\"\"\n eps = list(Episode.query.order_by(Episode.ep_id).all())\n seasons = {}\n for e in eps:\n s = seasons.get(e.season)\n if s is None:\n s = []\n seasons[e.season] = s\n s.append(e)\n skeys = list(seasons.keys())\n skeys.sort()\n \n return render_template(\"episodes.html\", episodes=eps, epsbyseason=seasons,\n seasons=skeys)\n\n@app.route('/episodes/')\ndef episode_template_page(ep_id):\n \"\"\"\n Serve the episode page from ./templates/episode_template.html\n \"\"\"\n ep = Episode.query.get(ep_id)\n if ep is None:\n flask.abort(404)\n return render_template(\"episode_template.html\", ep=ep)\n\n@app.route('/about')\ndef about_page():\n \"\"\"\n Serve the about page from ./templates/about.html\n \"\"\"\n return render_template(\"about.html\")\n \n@app.route('/pythians')\ndef pythians_page():\n \"\"\"\n Serve the Pythian's API page from ./templates/pythians_template.html\n \"\"\"\n host = \"http://104.239.139.162/scrape/\"\n \n athletes = json.loads(urlopen(host + \"athletes\").read().decode(\"utf-8\"))\n countries = json.loads(urlopen(host + \"countries\").read().decode(\"utf-8\"))\n events = json.loads(urlopen(host + \"events\").read().decode(\"utf-8\"))\n medals = json.loads(urlopen(host + \"medals\").read().decode(\"utf-8\"))\n \n result = {}\n \n for country in countries:\n result[countries[country][\"name\"]] = {\"male_name\": \"\", \"male_total\": 0, \"female_name\": \"\", \"female_total\": 0}\n \n for athlete in athletes:\n medal_list = athletes[athlete][\"medals\"] #list of medals\n for medal in medal_list:\n country = medal[\"repr\"]\n if athletes[athlete][\"gender\"] == \"Men\":\n if len(medal_list) > result[country][\"male_total\"]:\n result[country][\"male_total\"] = len(medal_list)\n result[country][\"male_name\"] = athletes[athlete][\"first\"] + \" \" + athletes[athlete][\"last\"]\n else:\n if len(medal_list) > result[country][\"female_total\"]:\n result[country][\"female_total\"] = len(medal_list)\n result[country][\"female_name\"] = athletes[athlete][\"first\"] + \" \" + athletes[athlete][\"last\"]\n \n return render_template(\"pythians_template.html\", result = result)\n\n@app.route('/results')\ndef search_results_page():\n \"\"\"\n Serve the results page from the search bar on the layout.hmtl\n \"\"\"\n query = request.args.get('query')\n return render_template(\"search_results.html\", query = query)\n\n@app.route('/doc/')\ndef doc_page(path):\n \"\"\"\n Send documents from the auto-generated html directory.\n \n This implements the documentation routing endpoint.\n \"\"\"\n return send_from_directory('html',path)\n\n@app.route('/img/')\ndef get_image(path):\n \"\"\"\n Send files from the ./static/images/ directory, allowing arbitrary linking\n to it from any page.\n \n Use this endpoint instead of embedding url_for('static', ...) in the\n template code.\n \"\"\"\n return send_from_directory('static/images', path)\n\n@app.route('/tests')\ndef display_unit_tests():\n import tests\n import subprocess\n output = subprocess.check_output([\"./tests.py\"], stderr=subprocess.STDOUT,\n shell=True)\n return render_template('tests.html', results=output.decode(\"ascii\").split('\\n'))\n\n############################################\n############################################\n#Routing for API#\n\n@app.route('/api/tests')\n@cross_origin()\ndef run_unit_tests():\n import tests\n import subprocess\n minimize = False\n process = \"./tests.py --json\"\n if(request.args.get(\"min\") is not None):\n print(\"/api/tests?min={}\".format(request.args.get(\"min\")))\n try:\n minimize = request.args.get(\"min\").lower() == \"true\"\n print(\"Value of min: {}\".format(minimize))\n except:\n minimize = False\n if minimize:\n process += \" -c\"\n \n try:\n output = subprocess.check_output([process],\n stderr=subprocess.STDOUT,\n shell=True)\n except subprocess.CalledProcessError as cpe:\n output = cpe.output\n output = output.decode('utf-8')\n return Response(output, mimetype=\"application/json\")\n\n@app.route('/api/characters')\n@cross_origin()\ndef get_all_characters():\n return json_response([c.serialize(True) for c in Character.query.all()])\n\n@app.route('/api/characters/')\n@cross_origin()\ndef get_character(cid):\n ch = Character.query.get(cid)\n if not ch is None:\n return flask.jsonify(ch.serialize())\n return flask.abort(404)\n\n@app.route('/api/events')\n@cross_origin()\ndef get_all_events():\n return json_response([e.serialize(True) for e in Event.query.all()])\n\n@app.route('/api/events/')\n@cross_origin()\ndef get_event(eid):\n event = Event.query.get(eid)\n if event is None:\n flask.abort(404)\n return flask.jsonify(event.serialize())\n\n@app.route('/api/organizations')\n@cross_origin()\ndef get_all_orgs():\n return json_response([o.serialize(True) for o in Organization.query.all()])\n\n@app.route('/api/organizations/')\n@cross_origin()\ndef get_org(oid):\n org = Organization.query.get(oid)\n if org is None:\n flask.abort(404)\n return flask.jsonify(org.serialize())\n\n@app.route('/api/locations')\n@cross_origin()\ndef get_all_locations():\n return json_response([l.serialize(True) for l in Location.query.all()])\n\n@app.route('/api/locations/')\n@cross_origin()\ndef get_location(lid):\n loc = Location.query.get(lid)\n if loc is None:\n flask.abort(404)\n return flask.jsonify(loc.serialize())\n\n@app.route('/api/episodes')\n@cross_origin()\ndef get_all_episodes():\n return json_response([e.serialize(True) for e in Episode.query.all()])\n\n@app.route('/api/episodes/')\n@cross_origin()\ndef get_episode(eid):\n ep = Episode.query.get(eid)\n if ep is None:\n flask.abort(404)\n return flask.jsonify(ep.serialize())\n\n@app.route('/api/search')\n@cross_origin()\ndef search():\n searchterm = request.args.get('query')\n if searchterm is None:\n return json_response({})\n return json_response(searchdb(searchterm))\n","sub_path":"Routing.py","file_name":"Routing.py","file_ext":"py","file_size_in_byte":9311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"318018754","text":"from imgurpython import ImgurClient\r\nfrom imgurpython.helpers.error import ImgurClientError\r\nimport string\r\n\r\n\r\nclient_id = ''\r\nclient_secret = ''\r\n\r\nclient = ImgurClient(client_id, client_secret)\r\n\r\ntry:\r\n #try to search through a ton of galleries and images\r\n #and look at title and desc\r\n\r\n terms = ['dog','dogs','pup','pups','puppies','puppers',\\\r\n 'puppers','bark','doggo','doggos','doge',\\\r\n 'woof','woofer','cute','snuggle','sleepy',\\\r\n 'nap','baby','babies','husky','retriever',\\\r\n 'lab','labs','sleep','asleep','friends',\\\r\n 'spaniel']\r\n\r\n\r\n #get first 10 pages of content\r\n pages = [] #list of GalleryTag objects\r\n for i in range(0,10):\r\n\r\n #returns a GalleryTag object, who's .items is a list of\r\n #GalleryImage/GalleryAlbum objects. The tag is actually a \"topic\" page\r\n #on imgur's website.\r\n page = client.gallery_tag(tag='aww',window='day',page=i)\r\n pages.append(page)\r\n\r\n\r\n final_links = [] #link, number of relevant terms\r\n for page in pages:\r\n #list of GalleryImage and GalleryAlbum objects\r\n links = page.items\r\n\r\n #find any relevant terms in title or description\r\n for item in links:\r\n title = item.title.strip().lower()\r\n desc = item.description\r\n if desc == None:\r\n desc = ' '\r\n else:\r\n desc = desc.strip().lower()\r\n #greater chance of matching keywords but likely less\r\n #relevant, put in a check\r\n if len(desc) >= 500:\r\n desc = ' '\r\n\r\n #get rid of spaces, punctuation, and numbers.\r\n #combine both into one string for comparison\r\n exclude = string.digits + string.punctuation\r\n str1 = ''.join((title+desc).split(' '))\r\n str2 = ''.join(char for char in str1 if char not in exclude)\r\n\r\n #amount of relevant terms in title and description\r\n rel_list = [term for term in terms if term in str2]\r\n\r\n #get the links\r\n if len(rel_list) > 0:\r\n\r\n if item.is_album == False: #single image\r\n if item.animated == True:\r\n #print(item.gifv + str(rel_list))\r\n final_links.append((item.gifv,len(rel_list)))\r\n else:\r\n #print(item.link + str(rel_list))\r\n final_links.append((item.link,len(rel_list)))\r\n else: #album\r\n pass\r\n\r\n final_links.sort(key=lambda x: x[1],reverse=True)\r\n print(final_links[0:5])\r\n\r\n\r\n\r\nexcept ImgurClientError as e:\r\n print(e.error_message)\r\n print(e.status_code)\r\n","sub_path":"ImgurScrape1.py","file_name":"ImgurScrape1.py","file_ext":"py","file_size_in_byte":2763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"474852637","text":"import numpy as np\r\nimport pandas as pd\r\nimport glob\r\nimport tensorflow as tf\r\nfrom tensorflow.python.ops import control_flow_ops\r\nfrom tqdm import tqdm\r\n\r\n\r\nimport midi_manipulation\r\n\r\ndef get_songs(path):\r\n files = glob.glob('{}/*.mid*'.format(path))\r\n songs = []\r\n for f in tqdm(files):\r\n try:\r\n song = np.array(midi_manipulation.midiToNoteStateMatrix(f))\r\n if np.array(song).shape[0] > 50:\r\n songs.append(song)\r\n except Exception as e:\r\n raise e \r\n return songs\r\n\r\nsongs = get_songs('Jazz') \r\nprint (\"{} songs processed\".format(len(songs)))\r\n\r\nlowest_note = midi_manipulation.lowerBound\r\nhighest_note = midi_manipulation.upperBound\r\nnote_range = highest_note-lowest_note \r\n\r\nnum_timesteps = 15 \r\nn_visible = 2*note_range*num_timesteps \r\nn_hidden = 50 \r\n\r\nnum_epochs = 200\r\nbatch_size = 100\r\nlr = tf.constant(0.005, tf.float32)\r\n\r\nx = tf.placeholder(tf.float32, [None, n_visible], name=\"x\")\r\nW = tf.Variable(tf.random_normal([n_visible, n_hidden], 0.01), name=\"W\") \r\nbh = tf.Variable(tf.zeros([1, n_hidden], tf.float32, name=\"bh\"))\r\nbv = tf.Variable(tf.zeros([1, n_visible], tf.float32, name=\"bv\"))\r\n\r\ndef sample(probs):\r\n return tf.floor(probs + tf.random_uniform(tf.shape(probs), 0, 1))\r\n\r\ndef gibbs_sample(k):\r\n def gibbs_step(count, k, xk):\r\n hk = sample(tf.sigmoid(tf.matmul(xk, W) + bh))\r\n xk = sample(tf.sigmoid(tf.matmul(hk, tf.transpose(W)) + bv))\r\n return count+1, k, xk\r\n \r\n ct = tf.constant(0)\r\n [_, _, x_sample] = control_flow_ops.while_loop(lambda count, num_iter, *args: count < num_iter,\r\n gibbs_step, [ct, tf.constant(k), x])\r\n x_sample = tf.stop_gradient(x_sample) \r\n return x_sample\r\nx_sample = gibbs_sample(1) \r\nh = sample(tf.sigmoid(tf.matmul(x, W) + bh)) #sigmoid(wx + b)\r\nh_sample = sample(tf.sigmoid(tf.matmul(x_sample, W) + bh)) #sample from sigmoid(x_sample*W + b)\r\nsize_bt = tf.cast(tf.shape(x)[0], tf.float32)\r\nW_adder = tf.multiply(lr/size_bt, tf.subtract(tf.matmul(tf.transpose(x), h), tf.matmul(tf.transpose(x_sample), h_sample))) #weight updation is necassary based on decoder model\r\nbv_adder = tf.multiply(lr/size_bt, tf.reduce_sum(tf.subtract(x, x_sample), 0, True))\r\nbh_adder = tf.multiply(lr/size_bt, tf.reduce_sum(tf.subtract(h, h_sample), 0, True))\r\nupdt = [W.assign_add(W_adder), bv.assign_add(bv_adder), bh.assign_add(bh_adder)]\r\n\r\nwith tf.Session() as sess:\r\n\r\n init = tf.global_variables_initializer()\r\n sess.run(init)\r\n for epoch in tqdm(range(num_epochs)):\r\n for song in songs:\r\n song = np.array(song)\r\n song = song[:int(np.floor(song.shape[0]/num_timesteps)*num_timesteps)]\r\n song = np.reshape(song, [song.shape[0]//num_timesteps, song.shape[1]*num_timesteps])\r\n for i in range(1, len(song), batch_size): \r\n tr_x = song[i:i+batch_size]\r\n sess.run(updt, feed_dict={x: tr_x})\r\n sample = gibbs_sample(1).eval(session=sess, feed_dict={x: np.zeros((50, n_visible))})\r\n for i in range(sample.shape[0]):\r\n if not any(sample[i,:]):\r\n continue\r\n S = np.reshape(sample[i,:], (num_timesteps, 2*note_range))\r\n midi_manipulation.noteStateMatrixToMidi(S, \"generated_chord_{}\".format(i))\r\n","sub_path":"PES1201701295_source/Model_1.py","file_name":"Model_1.py","file_ext":"py","file_size_in_byte":3345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"80437691","text":"from compdevkit import FunctionsTest\n\nfrom compconfig import functions, helpers\n\n\ndef test_functions():\n\tta = FunctionsTest(\n\t\tget_inputs=functions.get_inputs,\n\t\tvalidate_inputs=functions.validate_inputs,\n\t\trun_model=functions.run_model,\n\t\tok_adjustment={\"Tax Information\": {\"mstat\": [{\"value\": \"Joint\"}]}, \"Policy\":{}},\n\t\tbad_adjustment={\"Tax Information\": {\"mstat\": [{\"value\": 2}]}, \"Policy\":{\"STD\": -1}}\n\t)\n\tta.test()\n\ndef test_convert_adj():\n adj = {\n \"STD\": [\n {\"MARS\": \"single\", \"year\": \"2019\", \"value\": 0},\n {\"MARS\": \"mjoint\", \"year\": 2019, \"value\": 1}\n ],\n \"EITC_c\": [{\"EIC\": \"0kids\", \"year\": \"2019\", \"value\": 1000.0}],\n \"BEN_ssi_repeal\": [\n {\"year\": 2019, \"value\": True}\n ]\n }\n\n res = helpers.convert_adj(adj, 2019)\n\n assert res == {\n \"STD\": {\n 2019: [0, 1, 12268.8, 18403.2, 24537.6]\n },\n \"EITC_c\": {\n 2019: [1000.0, 3538.53, 5844.04, 6575.05]\n },\n \"BEN_ssi_repeal\": {\n 2019: True\n }\n }","sub_path":"compconfig/compconfig/tests/test_functions.py","file_name":"test_functions.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"632349104","text":"from typing import Dict\r\n\r\n\r\nclass Solution:\r\n \"\"\"\r\n Python Easy-to-understand Recursive solution\r\n \"\"\"\r\n def evaluate(self, expression):\r\n \"\"\"\r\n :type expression: str\r\n :rtype: int\r\n \"\"\"\r\n return self.eval_with_env(expression, {})\r\n\r\n def match_parenthesis(self, expression: str) -> Dict[int, int]:\r\n parenthesis = {}\r\n stack = []\r\n for i in range(len(expression)):\r\n if expression[i] == '(':\r\n stack.append(i)\r\n elif expression[i] == ')':\r\n j = stack.pop()\r\n parenthesis[j] = i\r\n return parenthesis\r\n\r\n def eval_with_env(self, expr: str, env: Dict[str, int]) -> int:\r\n if expr.startswith('('):\r\n parenthesis = self.match_parenthesis(expr)\r\n if expr.startswith(\"add\", 1): # (add operand1 operand2)\r\n if expr[5].isalpha() or expr[5].isnumeric() or expr[5] == '-':\r\n sub_expr = expr[5:-1] # type: str\r\n operand_1, operand_2 = sub_expr.split(\" \", 1)\r\n return self.eval_with_env(operand_1, env.copy()) + self.eval_with_env(operand_2, env.copy())\r\n elif expr[5] == '(':\r\n operand_1 = expr[5:parenthesis[5] + 1]\r\n operand_2 = expr[parenthesis[5] + 2:-1]\r\n return self.eval_with_env(operand_1, env.copy()) + self.eval_with_env(operand_2, env.copy())\r\n elif expr.startswith(\"mult\", 1): # (mult operand1 operand2)\r\n if expr[6].isalpha() or expr[6].isnumeric() or expr[6] == '-':\r\n sub_expr = expr[6:-1] # type: str\r\n operand_1, operand_2 = sub_expr.split(\" \", 1)\r\n return self.eval_with_env(operand_1, env.copy()) * self.eval_with_env(operand_2, env.copy())\r\n elif expr[6] == '(':\r\n operand_1 = expr[6:parenthesis[6] + 1]\r\n operand_2 = expr[parenthesis[6] + 2:-1]\r\n return self.eval_with_env(operand_1, env.copy()) * self.eval_with_env(operand_2, env.copy())\r\n elif expr.startswith(\"let\", 1): # (let identifier expr ... expr)\r\n rest = expr[5:-1]\r\n while True:\r\n\r\n identifier, rest = rest.split(\" \", 1) # type: str, str\r\n if rest[0].isnumeric() or rest[0] == '-':\r\n value, rest = rest.split(\" \", 1)\r\n env[identifier] = int(value)\r\n elif rest[0].isalpha():\r\n sub_expr, rest = rest.split(\" \", 1)\r\n env[identifier] = self.eval_with_env(sub_expr, env.copy())\r\n elif rest[0] == '(':\r\n close_parenthesis_pos = self.match_parenthesis(rest)[0]\r\n sub_expr = rest[:close_parenthesis_pos + 1]\r\n rest = rest[close_parenthesis_pos + 2:]\r\n env[identifier] = self.eval_with_env(sub_expr, env.copy())\r\n\r\n if rest[0] == '(' or rest[0].isnumeric() or rest[0] == '-' or \\\r\n (rest[0].isalpha() and rest.find(\" \") == -1):\r\n return self.eval_with_env(rest, env.copy())\r\n\r\n elif expr[0].isnumeric() or expr[0] == '-':\r\n return int(expr)\r\n elif expr[0].isalpha():\r\n expr = expr.strip()\r\n return env[expr]\r\n\r\n\r\nif __name__ == '__main__':\r\n expression = [\r\n '(add 1 2)',\r\n '(mult 3 (add 2 3))',\r\n '(let x 2 (mult x 5))',\r\n '(let x 2 (mult x (let x 3 y 4 (add x y))))',\r\n '(let x 3 x 2 x)',\r\n '(let x 1 y 2 x (add x y) (add x y))',\r\n '(let x 2 (add (let x 3 (let x 4 x)) x))',\r\n '(let a1 3 b2 (add a1 1) b2)',\r\n ]\r\n output = [\r\n 3,\r\n 15,\r\n 10,\r\n 14,\r\n 2,\r\n 5,\r\n 6,\r\n 4,\r\n ]\r\n for i in range(len(expression)):\r\n sol = Solution().evaluate(expression[i])\r\n print(sol)\r\n print(\"solution is right\" if sol == output[i] else \"solution is wrong\")\r\n","sub_path":"contests/weekly_contest_60/parse_lisp_expression_1.py","file_name":"parse_lisp_expression_1.py","file_ext":"py","file_size_in_byte":4151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"267441302","text":"# TRAINING SETTINGS\nBATCH_SIZE = 4\nVISIBLE_GPUS = None\nN_EPOCHS = 3\nSEED = 42\n\n# MODEL DEFINITION + INITIALIZATION\nWEIGHT_STDDEV = 0.02\nMAX_LENGTH = 512\nN_HEADS = 12\nN_LAYER = 12\nACT_FN = \"gelu\"\nN_EMBED = 768\n\n# REGULARIZATION\nEMBED_P_DROP = 0.1\nATTN_P_DROP = 0.1\nRESID_P_DROP = 0.1\nCLF_P_DROP = 0.1\nL2_REG = 0.01\nVECTOR_L2 = True\n\n# LOSS + OPTIMIZATION\nB1 = 0.9\nB2 = 0.999\nEPSILON = 1e-8\nLR_SCHEDULE = 'warmup_linear'\nLR = 6.25e-5\nLR_WARMUP = 0.002\nMAX_GRAD_NORM = 1\nLM_LOSS_COEF = 0.5\nROLLING_AVG_DECAY = 0.99\n\n# Logging\nSUMMARIZE_GRADS = False\n","sub_path":"finetune/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"233793367","text":"from django.contrib.auth import authenticate, login, logout\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.views import View\nfrom django.utils.encoding import force_text\nfrom django.contrib.auth.models import User, Group\nfrom django.utils.http import urlsafe_base64_decode\nfrom django.utils.encoding import force_bytes\nfrom django.utils.http import urlsafe_base64_encode\nfrom django.template.loader import render_to_string\nfrom django.urls import reverse, reverse_lazy\nfrom .tokens import account_activation_token\nfrom django.views.generic import CreateView, DeleteView, UpdateView, ListView, DetailView\nfrom django.contrib import messages\nfrom .forms import *\nfrom django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin, UserPassesTestMixin\nfrom dal import autocomplete\nfrom django.core.mail import EmailMessage\nimport openpyxl\nfrom django.core.exceptions import ValidationError\nfrom .models import *\nfrom .forms import SignUpForm\nfrom django.db.models import Q\nimport random\nfrom django_filters.views import FilterView\nfrom hana.filter import TaskFilter\n\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\ndef validate_file_extension(value):\n ext = os.path.splitext(value.name)[1] # [0] returns path+filename\n valid_extensions = ['.xlsx', '.xls']\n if not ext.lower() in valid_extensions:\n raise ValidationError('Unsupported file extension.')\n\n\ndef toggle_task_completed(task_id: int) -> bool:\n \"\"\"Toggle the `completed` bool on Task from True to False or vice versa.\"\"\"\n try:\n task = Task.objects.get(id=task_id)\n task.completed = not task.completed\n task.save()\n return True\n\n except Task.DoesNotExist:\n messages.info(f\"Task {task_id} not found.\")\n return False\n\nclass HomeView(View):\n def get(self, request):\n return render(request, 'base1.html')\n\n\nclass UserView(LoginRequiredMixin, View):\n def get(self, request):\n u_form = UserUpdateForm(instance=request.user)\n p_form = ProfileUpdateForm(instance=request.user.profile)\n ctx = {\n \"u_form\": u_form,\n \"p_form\": p_form\n }\n return render(request, \"hana/profile.html\", ctx)\n\n def post(self, request):\n u_form = UserUpdateForm(request.POST, instance=request.user)\n p_form = ProfileUpdateForm(request.POST, request.FILES, instance=request.user.profile)\n if u_form.is_valid() and p_form.is_valid():\n u_form.save()\n p_form.save()\n messages.success(request, \"{}, your account has been updated!\".format(request.user))\n return redirect('profile')\n else:\n u_form = UserUpdateForm(request.POST, instance=request.user)\n p_form = ProfileUpdateForm(request.POST, request.Files, instance=request.user.profile)\n ctx = {\n \"u_form\": u_form,\n \"p_form\": p_form\n }\n return render(request, \"hana/profile.html\", ctx)\n\n\nclass ActivateView(View):\n def get(self, request, uidb64, token, *args, **kwargs):\n try:\n uid = force_text(urlsafe_base64_decode(uidb64))\n user = User.objects.get(pk=uid)\n except (TypeError, ValueError, OverflowError, User.DoesNotExist):\n user = None\n # checking if the user exists, if the token is valid.\n if user is not None and account_activation_token.check_token(user, token):\n # if valid set active true\n user.is_active = True\n # set signup_confirmation true\n user.profile.signup_confirmation = True\n user.save()\n login(request, user)\n messages.success(request, ('Your account have been confirmed.'))\n return redirect('home')\n else:\n messages.warning(request, ('The confirmation link was invalid, possibly because it has already been used.'))\n return redirect('home')\n\n\nclass SignupView(View):\n form_class = SignUpForm\n template_name = 'hana/signup.html'\n\n def get(self, request, *args, **kwargs):\n form = self.form_class()\n return render(request, self.template_name, {'form': form})\n\n def post(self, request, *args, **kwargs):\n form = self.form_class(request.POST)\n if form.is_valid():\n group = Group.objects.get(name=\"Employee\")\n user = form.save(commit=False)\n user.is_active = False # Deactivate account till it is confirmed\n user.save()\n group.user_set.add(user)\n\n current_site = get_current_site(request)\n subject = 'Activate Your hana-Account'\n message = render_to_string('hana/activation_request.html', {\n 'user': user,\n 'domain': current_site.domain,\n 'uid': urlsafe_base64_encode(force_bytes(user.pk)),\n 'token': account_activation_token.make_token(user),\n })\n to_email = form.cleaned_data.get('email')\n email = EmailMessage(subject, message, to=[to_email])\n email.send()\n messages.success(request, ('Authentication email has been sent to employee to complete registration.'))\n return redirect('home')\n\n return render(request, self.template_name, {'form': form})\n\n\nclass UserLoginView(View):\n def get(self, request):\n form = UserLoginForm()\n return render(request, \"hana/login.html\", {'form': form})\n\n def post(self, request):\n form = UserLoginForm(request.POST or None)\n if form.is_valid(): # uruchomienie walidacji\n user = form.authenticate_user()\n if user is not None:\n if user.is_active:\n login(request, user)\n if request.GET.get('next'):\n return redirect(request.GET.get('next'))\n\n return redirect(reverse('home'))\n else:\n form.add_error(None, \"Your account is not active\")\n else:\n # user is None\n form.add_error(None, \"Wrong email or password\")\n return render(request, \"hana/login.html\", {'form': form})\n\n\nclass UserLogoutView(View):\n def get(self, request):\n logout(request)\n messages.info(request, \"You are now logged out\")\n return redirect(reverse('home'))\n\nclass ExcelUploadView(LoginRequiredMixin, View):\n\n def get(self, request):\n return render(request, \"hana/excel_upload.html\")\n\n def post(self, request):\n excel_file = request.FILES['excel_file']\n # validations here to check extension or file size\n validate_file_extension(excel_file)\n wb = openpyxl.load_workbook(excel_file)\n # getting a particular sheet by name out of many sheets\n active_sheet = wb.active\n print(active_sheet)\n\n excel_data = list()\n print(excel_data)\n\n # iterating over the rows and\n # getting value from each cell in row\n for row in active_sheet.iter_rows(min_row=2, max_col=2):\n row_data = list()\n for cell in row:\n row_data.append(str(cell.value))\n excel_data.append(row_data)\n Task.objects.create(name=row_data[0], due_date=row_data[1], created_by=self.request.user)\n print(excel_data)\n return redirect(\"excel-table\")\n # return render(request, 'hana/excel_upload.html', {\"excel_data\": excel_data})\n\n\nclass UsersListView(View):\n def get(self, request):\n users = User.objects.all()\n return render(request, \"hana/users_list.html\", {'users': users})\n\n\nclass UserDeleteView(PermissionRequiredMixin, DeleteView):\n permission_required = 'auth.delete_user'\n raise_exception = True\n permission_denied_message = \"You are not authorized for this action!\"\n\n model = User\n success_url = reverse_lazy('user-list')\n template_name = \"hana/employee_confirm_delete.html\"\n\nclass UserUpdateView(LoginRequiredMixin, UpdateView):\n\n model = User\n success_url = reverse_lazy('user-list')\n form_class = SignUpForm\n template_name = 'hana/user_update_form.html'\n\n\nclass PostListView(ListView):\n model = Post\n template_name = 'hana/home.html'\n context_object_name = 'posts'\n ordering = ['-date_posted']\n paginate_by = 5\n\n\nclass UserPostsListView(ListView):\n model = Post\n template_name = 'hana/user_posts.html'\n context_object_name = 'posts'\n ordering = ['-date_posted']\n paginate_by = 5\n\n def get_queryset(self):\n user = get_object_or_404(User, username=self.kwargs.get(\"username\"))\n return Post.objects.filter(author=user).order_by('-date_posted')\n\n\nclass PostDetailView(DetailView):\n model = Post\n template_name = 'hana/post_detail.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['now'] = timezone.now()\n return context\n\n\nclass PostCreateView(LoginRequiredMixin, CreateView):\n model = Post\n fields = ['title', 'content']\n\n def form_valid(self, form):\n form.instance.author = self.request.user\n return super(PostCreateView, self).form_valid(form)\n\n\nclass PostUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):\n model = Post\n fields = ['title', 'content']\n\n def form_valid(self, form):\n form.instance.author = self.request.user\n return super().form_valid(form)\n\n def test_func(self):\n post = self.get_object()\n if self.request.user == post.author:\n return True\n return False\n\n\nclass PostDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):\n model = Post\n success_url = reverse_lazy('home')\n\n def test_func(self):\n post = self.get_object()\n if self.request.user == post.author:\n return True\n return False\n\n\nclass PostCommentCreateView(LoginRequiredMixin, CreateView):\n model = Comment\n fields = ['title', 'content']\n\n def form_valid(self, form):\n post = get_object_or_404(Post, pk=self.kwargs['pk'])\n form.instance.author = self.request.user\n form.instance.post = post\n return super().form_valid(form)\n\n\nclass EmployeePostkView(LoginRequiredMixin, View):\n def get(self, request):\n if request.user.is_authenticated:\n posts = Post.objects.filter(author=request.user)\n return render(request, \"hana/user_posts.html\", {'posts': posts})\n return redirect(reverse('user-login'))\n\n\nclass ExcelTableView(FilterView):\n model = Task\n template_name = 'hana/excel_view.html'\n context_object_name = 'tasks'\n paginate_by = 10\n filterset_class = TaskFilter\n ordering = ['name']\n\nclass TaskAutocompleteView(LoginRequiredMixin, autocomplete.Select2QuerySetView):\n\n def get_queryset(self):\n # Don't forget to filter out results depending on the visitor !\n if not self.request.user.is_authenticated:\n return Task.objects.none()\n\n qs = Task.objects.all()\n\n if self.q:\n qs = qs.filter(Q(name__istartswith=self.q) |\n Q(note__icontains=self.q))\n\n return qs\n\n\nclass TaskAddView(LoginRequiredMixin, CreateView):\n form_class = AddEditTaskForm\n template_name = \"hana/task_create.html\"\n success_url = reverse_lazy('excel-table')\n\n def form_valid(self, form):\n # form save\n form.instance.created_by = self.request.user\n self.object = form.save(commit=False)\n self.object.save()\n\n return super().form_valid(form)\n\n\nclass TaskSearchResultView(ListView):\n model = Task\n template_name = 'hana/task_list.html'\n context_object_name = \"found_tasks\"\n\n def get_queryset(self):\n query_string = self.request.GET.get(\"q\").strip()\n if query_string:\n found_tasks = Task.objects.filter(\n Q(name__icontains=query_string) |\n Q(note__icontains=query_string)\n )\n else:\n found_tasks = Task.objects.none()\n print(found_tasks)\n return found_tasks\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['query_string'] = self.request.GET.get(\"q\")\n return context\n\n\nclass TaskEditView(LoginRequiredMixin, UpdateView):\n model = Task\n form_class = AddEditTaskForm2\n\n\n def get_success_url(self):\n if self.request.user.groups.filter(name=\"Team Leader\").exists():\n return reverse('excel-table')\n else:\n return reverse('my-tasks')\n\n\nclass TaskDetailView(LoginRequiredMixin, View):\n def get(self, request, task_id):\n if request.user.is_authenticated:\n user = request.user\n task = get_object_or_404(Task, pk=task_id)\n comment_list = Info.objects.filter(task=task_id).order_by('-date')\n form = AddEditTaskForm(instance=task)\n\n context = {\n 'form': form,\n 'task': task,\n 'comment_list':comment_list,\n 'user' :user,\n }\n return render(request, \"hana/task_detail.html\", context)\n\n def post(self, request, task_id):\n # Save task edits\n task = get_object_or_404(Task, pk=task_id)\n if request.POST.get(\"add_comment\"):\n Info.objects.create(\n author=request.user, task=task,\n body=(request.POST[\"comment-body\"].strip())\n )\n messages.success(request, \"Comment posted\")\n return redirect(reverse_lazy(\"task-detail\", args=[task_id,]))\n request.POST.get(\"add_edit_task\")\n form = AddEditTaskForm(request.POST, instance=task)\n if form.is_valid():\n if request.POST.get(\"notify\"):\n current_site = get_current_site(request)\n subject = render_to_string('email/assigned_subject.txt', {\"task\": task})\n body = render_to_string('email/assigned_body.txt', {\n 'task': task,\n 'site': current_site,\n })\n to_email = \"\"\n if task.assigned_to:\n task.status = 1\n to_email = task.assigned_to.email\n email = EmailMessage(subject, body, to=[to_email])\n email.send()\n messages.success(request, 'Notification email has been sent to employee!')\n form.save()\n messages.success(request, \"Task sucessfully submitted!\")\n else:\n messages.warning(request, \"No email defined to sent the info! Task can't be submitted!\")\n else:\n form.save()\n messages.success(request, \"Task sucessfully submitted without notification email!\")\n return redirect(reverse('excel-table'))\n else:\n print(form.errors)\n return render(request, 'hana/task_detail.html', locals())\n\n\nclass TaskAllocateView(View):\n def post(self, request):\n if request.POST.get(\"task_allocate\") is not None:\n tasks = Task.objects.filter(assigned_to=None)\n for task in tasks:\n task.assigned_to = random.choice(User.objects.all())\n task.status = 1\n task.save()\n current_site = get_current_site(request)\n subject = render_to_string('email/assigned_subject.txt', {\"task\": task})\n body = render_to_string('email/assigned_body.txt', {\n 'task': task,\n 'site': current_site,\n })\n to_email = task.assigned_to.email\n email = EmailMessage(subject, body, to=[to_email])\n email.send()\n if tasks:\n messages.success(request, \"Tasks succesfully allocated to your employees. Check status!\")\n messages.success(request, ('Notification email has been sent to assignees!'))\n else:\n messages.warning(request, \"All tasks already allocated!\")\n messages.warning(request, ('Notification email already sent!'))\n\n return redirect(reverse(\"excel-table\"))\n return redirect(reverse(\"excel-table\"))\n\nclass ExportToExcelView(View):\n def post(self, request):\n if request.POST.get(\"export_to_excel\") is not None:\n wb = openpyxl.Workbook()\n sheet = wb.active\n\nclass TaskDeleteView(LoginRequiredMixin, DeleteView):\n model = Task\n success_url = reverse_lazy('excel-table')\n\nclass EmployeeTaskView(LoginRequiredMixin, View):\n def get(self, request):\n if request.user.is_authenticated:\n tasks = Task.objects.filter(assigned_to=request.user)\n return render(request, \"hana/employee_task.html\", {'tasks': tasks})\n return redirect(reverse('user-login'))\n\n\nclass ToggleDoneUndoneView(LoginRequiredMixin, View):\n def post(self, request, task_id):\n task = get_object_or_404(Task, pk=task_id)\n results_changed = toggle_task_completed(task.id)\n if results_changed:\n if not task.completed:\n task.completed_date = datetime.datetime.now()\n messages.success(request, f\"Changed completion status for task: {task.name}\")\n return redirect(\"task-detail\", task_id=task_id)\n\nclass AddAttachementView(View):\n def get(self, request):\n form = ModelFormWithFileField()\n return render(request, \"hana/task_detail.html\", {\"form\": form})\n\n def post(self, request):\n form = ModelFormWithFileField(request.POST, request.FILES)\n if form.is_valid():\n form.instance.added_by = self.request.user\n form.instance.task = self.request.task\n self.object = form.save(commit=False)\n self.object.save()\n messages.info(request, \"Attachement {} succesfully uploaded\".format(self.object.filename))\n return redirect(reverse('task-update'))\n else:\n return render(request, \"hana/task_detail.html\", {\"form\": form})\n\n\nclass RemoveAttachementView(DeleteView):\n model = Attachment\n success_url = reverse_lazy('task-update')\n\n\nclass TaskCommentAddView(LoginRequiredMixin, CreateView):\n form_class = AddInfoForm\n template_name = \"hana/task_detail.html\"\n success_url = reverse_lazy('excel-table')\n\n def get(self, request, *args, **kwargs):\n if request.POST.get(\"add_comment\"):\n self.object = None\n return super().get(request, *args, **kwargs)\n\n def form_valid(self, form):\n # form save\n form.instance.created_by = self.request.user\n self.object = form.save(commit=False)\n self.object.save()\n return super().form_valid(form)\n'''\nclass InfoUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):\n model = Info\n fields = ['body', ]\n context_object_name = 'task'\n success_url = reverse_lazy('task-detail', kwargs =[task,])\n\n def form_valid(self, form):\n form.instance.author = self.request.user\n return super().form_valid(form)\n\n def test_func(self):\n info = self.get_object()\n if self.request.user == info.author:\n return True\n return False\n\n'''\nclass InfoDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):\n model = Info\n template_name = \"hana/info_confirm_delete.html\"\n\n def test_func(self):\n info = self.get_object()\n if self.request.user == info.author:\n return True\n return False\n\n def get_success_url(self):\n return reverse('task-detail', args=(self.object.task.id,))\n'''\nclass TaskStatusFilterView(View):\n def get(self, request):\n form = TaskStatusFilterForm()\n return render(request, \"hana/excel_view.html\", {\"form\" : form})\n\n def post(self, request):\n form = TaskStatusFilterForm(request.POST)\n if form.is_valid():\n tasks = Task.objects.filter(status=form.cleaned_data['status'])\n paginator = Paginator(tasks, 10)\n\n page = int(request.GET.get(\"page\", 1))\n try:\n tasks = paginator.page(page)\n except PageNotAnInteger:\n tasks = paginator.page(1)\n except EmptyPage:\n tasks = paginator.page(paginator.num_pages)\n\n tasks = paginator.get_page(page)\n\n ctx={\n 'form': form,\n 'tasks':tasks,\n 'page':page\n }\n return render(request, \"hana/excel_view.html\", ctx)\n'''\n","sub_path":"hana/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":20721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"379224774","text":"import socket\nimport random\nimport time\nsettings = []\nreadbuffer = \"\"\n\nHOST = \"irc.twitch.tv\"\nPORT = 6667\nAUTH = \"oauth:lgotdqoii6bqqw6mo6uraw6jgvi647\"\nNICK = \"nsheng1568\"\nCHAT_CHANNEL = \"kjiao\"\n\nmarkets = ['F_AD', 'F_BO', 'F_BP', 'F_C', 'F_CC', 'F_CD',\n 'F_CL', 'F_CT', 'F_DX', 'F_EC', 'F_ED', 'F_ES', 'F_FC', 'F_FV', 'F_GC',\n 'F_HG', 'F_HO', 'F_JY', 'F_KC', 'F_LB', 'F_LC', 'F_LN', 'F_MD', 'F_MP',\n 'F_NG', 'F_NQ', 'F_NR', 'F_O', 'F_OJ', 'F_PA', 'F_PL', 'F_RB', 'F_RU',\n 'F_S', 'F_SB', 'F_SF', 'F_SI', 'F_SM', 'F_TU', 'F_TY', 'F_US', 'F_W', 'F_XX',\n 'F_YM']\n\nmarkets += ['CASH', 'AAPL', 'ABBV', 'ABT', 'ACN', 'AEP', 'AIG', 'ALL',\n 'AMGN', 'AMZN', 'APA', 'APC', 'AXP', 'BA', 'BAC', 'BAX', 'BK', 'BMY', 'BRKB', 'C',\n 'CAT', 'CL', 'CMCSA', 'COF', 'COP', 'COST', 'CSCO', 'CVS', 'CVX', 'DD', 'DIS', 'DOW',\n 'DVN', 'EBAY', 'EMC', 'EMR', 'EXC', 'F', 'FB', 'FCX', 'FDX', 'FOXA', 'GD', 'GE',\n 'GILD', 'GM', 'GOOGL', 'GS', 'HAL', 'HD', 'HON', 'HPQ', 'IBM', 'INTC', 'JNJ', 'JPM',\n 'KO', 'LLY', 'LMT', 'LOW', 'MA', 'MCD', 'MDLZ', 'MDT', 'MET', 'MMM', 'MO', 'MON',\n 'MRK', 'MS', 'MSFT', 'NKE', 'NOV', 'NSC', 'ORCL', 'OXY', 'PEP', 'PFE', 'PG', 'PM',\n 'QCOM', 'RTN', 'SBUX', 'SLB', 'SO', 'SPG', 'T', 'TGT', 'TWX', 'TXN', 'UNH', 'UNP',\n 'UPS', 'USB', 'UTX', 'V', 'VZ', 'WAG', 'WFC', 'WMT', 'XOM']\n\n\ndef gen_rand_order():\n order = random.choice(['buy', 'buy', 'buy', 'buy', 'sell'])\n sym = random.choice(markets)\n val = random.randint(1, 100)\n\n return order + ' ' + sym + ' ' + str(val)\n\nwhile True:\n s = socket.socket()\n s.connect((HOST, PORT))\n\n s.send(bytes(\"PASS %s\\r\\n\" % AUTH, \"UTF-8\"))\n s.send(bytes(\"NICK %s\\r\\n\" % NICK, \"UTF-8\"))\n s.send(bytes(\"USER %s %s bla :%s\\r\\n\" % (NICK, HOST, NICK), \"UTF-8\"))\n s.send(bytes(\"JOIN #%s\\r\\n\" % CHAT_CHANNEL, \"UTF-8\"))\n s.send(bytes(\"PRIVMSG #%s :%s\\r\\n\" % (CHAT_CHANNEL, gen_rand_order()), \"UTF-8\"))\n print (gen_rand_order())\n time.sleep(3)\n","sub_path":"bot3.py","file_name":"bot3.py","file_ext":"py","file_size_in_byte":2051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"167790437","text":"class fDaftarTransaksi:\r\n\r\n def __init__(self, formObj, parentForm):\r\n self.app = formObj.ClientApplication\r\n self.form = formObj\r\n\r\n def DisplayFilteredQuery(self,branch_code):\r\n oqlTextFiltered = \\\r\n \"select from RECON_JournalBaru \"\\\r\n \"[ \"\\\r\n \" branch_code = :branch_code \"\\\r\n \"] \"\\\r\n \"( \"\\\r\n \"journal_date \"\\\r\n \", journal_no \"\\\r\n \", branch_code \"\\\r\n \", description \"\\\r\n \", user_id \"\\\r\n \", self \"\\\r\n \") then order by journal_date;\"\r\n \r\n qDaftarTransaksi = self.qDaftarTransaksi\r\n qDaftarTransaksi.OQLText = oqlTextFiltered\r\n qDaftarTransaksi.SetParameter('branch_code',branch_code)\r\n qDaftarTransaksi.DisplayData()\r\n \r\n def DisplayNonFilteredQuery(self):\r\n oqlTextNonFiltered = \\\r\n \"select from RECON_JournalBaru \"\\\r\n \"( \"\\\r\n \"journal_date \"\\\r\n \", journal_no \"\\\r\n \", branch_code \"\\\r\n \", description \"\\\r\n \", user_id \"\\\r\n \", self \"\\\r\n \") then order by journal_date;\"\r\n \r\n qDaftarTransaksi = self.qDaftarTransaksi\r\n qDaftarTransaksi.OQLText = oqlTextNonFiltered\r\n qDaftarTransaksi.DisplayData()\r\n\r\n def FormShow(self,formObj,parameter):\r\n uipTransaksi = self.uipTransaksi\r\n self.pDaftarTransaksi_bFilter.Visible = False #uipTransaksi.AllowedUser\r\n self.pDaftarTransaksi_bAll.Visible = False #uipTransaksi.AllowedUser\r\n self.pDaftarTransaksi_Kode_Cabang.Enabled = False #uipTransaksi.AllowedUser\r\n \r\n #self.DisplayFilteredQuery(uipTransaksi.Kode_Cabang)\r\n self.DisplayNonFilteredQuery()\r\n\r\n def btnNewClick(self,sender):\r\n app = self.app\r\n NewStatus = 1\r\n ph = app.CreateValues(['NewStatus', NewStatus])\r\n fJournalNew = app.CreateForm(\"reconcilerak/Transaksi/fJournalBaru\", \"fJournalBaru\", 0, ph, None)\r\n fJournalNew.Show(NewStatus)\r\n self.qDaftarTransaksi.Refresh()\r\n \r\n\r\n def bFilterClick(self,sender):\r\n qDaftarTransaksi = self.qDaftarTransaksi\r\n self.DisplayFilteredQuery(self.uipTransaksi.Kode_Cabang)\r\n \r\n def bAllClick(self,sender):\r\n self.DisplayNonFilteredQuery()\r\n\r\n def bUbahClick(self,sender):\r\n app = self.app\r\n NewStatus = 0\r\n Journal_No = self.qDaftarTransaksi.GetFieldValue(\"RECON_JournalBaru.Journal_No\")\r\n ph = app.CreateValues(['NewStatus', NewStatus],['Journal_No',Journal_No])\r\n fJournalEdit = app.CreateForm(\"reconcilerak/Transaksi/fJournalBaru\", \"fJournalBaru\", 0, ph, None)\r\n\r\n fJournalEdit.Show(NewStatus,Journal_No)\r\n\r\n self.qDaftarTransaksi.Refresh()\r\n \r\n def bHapusClick(self,sender):\r\n Journal_No = self.qDaftarTransaksi.GetFieldValue(\"RECON_JournalBaru.Journal_No\")\r\n if self.app.ConfirmDialog('Yakin hapus data journal no %s ?'% Journal_No):\r\n param = self.app.CreateValues([\"Journal_No\",Journal_No])\r\n ph = self.app.ExecuteScript(\"reconcilerak/Transaksi/Journal.DeleteJournal\", param )\r\n self.qDaftarTransaksi.Refresh()\r\n\r\n def bTutupClick(self,sender):\r\n sender.ExitAction = 2\r\n","sub_path":"dialogs/reconcilerak/Transaksi/fDaftarTransaksi_intr.py","file_name":"fDaftarTransaksi_intr.py","file_ext":"py","file_size_in_byte":3121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"263701034","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n# -*- coding: utf-8 -*-\n\"\"\"\nDefines a class used to produce and provide an interface for data\nrequired to create :ref:`execution-config`.\n\nThe drpcomplete file/data is primarily required for the survey-level\n:ref:`execution` of the DAP. However, it does provide useful information\nregarding the completed DRP files.\n\n----\n\n.. include license and copyright\n.. include:: ../include/copy.rst\n\n----\n\n.. include common links, assuming primary doc root is up one directory\n.. include:: ../include/links.rst\n\"\"\"\n\nimport os\nimport time\nimport warnings\nimport glob\n\n\nfrom IPython import embed\n\nimport numpy\n\nfrom astropy.io import fits\nimport astropy.constants\n\nfrom pydl.pydlutils.yanny import yanny\n\nfrom ..datacube import MaNGADataCube\nfrom ..spectra import MaNGARSS\nfrom ..config import defaults\nfrom ..util.parser import arginp_to_list, list_to_csl_string, parse_drp_file_name\nfrom ..util.exception_tools import print_frame\n\nclass DRPComplete:\n r\"\"\"\n Database with information needed by the DAP to analyze the completed\n DRP files.\n\n This class searches the defined paths for files resulting from the\n 3D phase of the MaNGA DRP, and then collates the information\n necessary to run those files through the MaNGA DAP. The necessary\n parameters are pulled from the provided platetargets files; see\n :func:`update`.\n\n Args:\n\n platelist (:obj:`str`, :obj:`list`, optional):\n List of plates to search for. Default is to search the full\n DRP path.\n ifudesignlist (:obj:`str`, :obj:`list`, optional):\n List of ifudesign to search for. Default is to search the\n full DRP path.\n drpall (:obj:`str`, optional):\n The full path to the DRPall fits file. Default is set by\n :func:`mangadap.config.defaults.drpall_file`.\n platetargets (:obj:`str`, :obj:`list`, optional):\n List of platetargets files to search through to find any\n given plate ifudesign combination. Default is returned as\n the first element in\n :func:`mangadap.config.defaults.plate_target_files`.\n catid (:obj:`str`, :obj:`list`, optional):\n List of target catalog ID numbers. Default is returned as\n the second element in\n :func:`mangadap.config.defaults.plate_target_files`.\n drpver (:obj:`str`, optional):\n DRP version, which is:\n\n - used to define the default DRP redux path\n - used when declaring a drpfits instance\n - used in the name of the drpcomplete fits file\n - included as a header keyword in the output file\n\n Default is defined by\n :func:`mangadap.config.defaults.drp_version`.\n redux_path (:obj:`str`, optional): \n The path to the top level directory containing the DRP\n output files; this is the same as the ``redux_path`` in\n the :class:`mangadap.util.drpfits.DRPFits` class. Default\n is defined by\n :func:`mangadap.config.defaults.drp_redux_path`.\n dapver (:obj:`str`, optional):\n DAP version, which is:\n\n - used to define the default DAP analysis path\n - included as a header keyword in the output drpcomplete\n fits file\n\n Default is defined by\n :func:`mangadap.config.defaults.dap_version`.\n analysis_path (:obj:`str`, optional):\n The path to the top level directory for the DAP output\n files; this is **different** from the directory_path in the\n :class:`mangadap.dapfile` class. Default is defined by\n :func:`mangadap.config.defaults.dap_analysis_path`\n directory_path (:obj:`str`, optional):\n Direct path to the output file produced using\n :func:`mangadap.config.defaults.dap_common_path`\n readonly (:obj:`bool`, optional):\n Flag that the drpcomplete fits file is only opened for\n reading, not for updating. If True, many of the attributes\n will be set to None.\n\n Attributes:\n platelist (:obj:`list`):\n List of plates to search for, see above.\n ifudesignlist (:obj:`list`):\n List of IFU designs to search for, see above.\n drpall (:obj:`str`):\n The DRPall file, see above.\n platetargets (`numpy.ndarray`):\n List of platetargets files to search through, see above.\n catid (`numpy.ndarray`):\n List of target catalog IDs, see above\n drpver (:obj:`str`):\n DRP version, see above.\n redux_path (:obj:`str`):\n Path to the top level of directory for the DRP output, see\n above.\n dapver (:obj:`str`):\n DAP version, see above.\n analysis_path (:obj:`str`):\n Path to the top level directory for the DAP output files,\n see above.\n directory_path (:obj:`str`):\n Direct path to the output file produced using\n :func:`mangadap.config.defaults.dap_common_path`\n readonly (:obj:`bool`):\n Flag that the drpcomplete fits file is only opened for\n reading, not for updating.\n hdu (`astropy.io.fits.HDUList`_):\n Fits data with binary table data.\n nobs (:obj:`int`):\n Number of observations in the file\n\n \"\"\"\n def __init__(self, platelist=None, ifudesignlist=None, drpall=None, platetargets=None,\n catid=None, drpver=None, redux_path=None, dapver=None, analysis_path=None,\n directory_path=None, readonly=False):\n\n # Input properties\n self.drpver = defaults.drp_version() if drpver is None else str(drpver)\n self.redux_path = defaults.drp_redux_path(self.drpver) \\\n if redux_path is None else str(redux_path)\n\n self.dapver = defaults.dap_version() if dapver is None else str(dapver)\n self.analysis_path = defaults.dap_analysis_path(self.drpver, self.dapver) \\\n if analysis_path is None else str(analysis_path)\n self.directory_path = defaults.dap_common_path(drpver=self.drpver, dapver=self.dapver,\n analysis_path=self.analysis_path) \\\n if directory_path is None else str(directory_path)\n\n self.hdu = None\n self.nobs = None\n if os.path.exists(self.file_path()):\n print('READING')\n self._read()\n\n if readonly:\n self.readonly=True\n self.platelist = None\n self.ifudesignlist = None\n self.platetargets = None\n self.catid = None\n return\n\n self.readonly = False\n\n self.platelist = arginp_to_list(platelist, evaluate=True)\n self.ifudesignlist = arginp_to_list(ifudesignlist, evaluate=True)\n\n self.drpall = defaults.drpall_file(drpver=self.drpver,redux_path=self.redux_path) \\\n if drpall is None else drpall\n \n if (platetargets is not None and catid is None) or \\\n (platetargets is None and catid is not None):\n raise ValueError('To use user-provided platetargets files, must provide both '\n 'platetargets and catid.')\n\n self.platetargets = None\n if platetargets is not None:\n self.platetargets = numpy.array( arginp_to_list(platetargets) )\n self.catid = numpy.array( arginp_to_list(catid) ).astype(numpy.int)\n else:\n try:\n self.platetargets, self.catid = defaults.plate_target_files()\n except:\n warnings.warn('Could not define platetargets files. '\n 'Updates must use DRPall file.')\n\n def __getitem__(self, key):\n return self.hdu['DRPC'].data[key]\n\n def __len__(self):\n return self.nobs\n\n # ******************************************************************\n # Utility functions\n # ******************************************************************\n\n def _read_platetargets(self):\n \"\"\"\n Read all the platetargets files using `pydl.pydlutils.yanny`_\n and return a list of yanny structures.\n\n Returns:\n list: A list of yanny structures, one per platetargets file.\n\n Raises:\n FileNotFoundError: Raised if cannot open one or more\n platetargets files.\n\n .. todo::\n This should be made more efficient by collating the required\n plateTargets data into a single record array!\n \"\"\"\n plttrg_data = []\n\n root = '/'+os.path.join(*(self.platetargets[0].split('/')[:-1]))\n print('plateTargets root directory: {0}'.format(root))\n\n for p in self.platetargets:\n pfile = p.split('/')[-1]\n print('Reading plateTargets file: {0}'.format(pfile), end='\\r')\n # Check that the file exists\n if not os.path.exists(p):\n raise FileNotFoundError('Cannot open {0}!'.format(p))\n # Read and append the data\n plttrg_data.append( yanny(filename=p) )\n print('\\nReading plateTargets file: DONE')\n\n return plttrg_data\n\n def _read_fix_data(self, fix_file):\n \"\"\"\n Read fix data from the provided file.\n\n Args:\n fix_file (:obj:`str`):\n SDSS parameter file with the fix data.\n\n Returns:\n yanny: A yanny structure with the data fixes. Returns\n None and raises a warning if the file does not exist.\n \"\"\"\n if not os.path.isfile(fix_file):\n warnings.warn('No redshift fix file available.')\n return None\n return yanny(filename=fix_file)\n\n def _read_redshift_fix(self):\n \"\"\"\n Wrapper for :func:`_read_fix_data` and\n :func:`~mangadap.config.defaults.redshift_fix_file` that\n returns the redshift fix data.\n \"\"\"\n return self._read_fix_data(defaults.redshift_fix_file())\n\n def _read_photometry_fix(self):\n \"\"\"\n Wrapper for :func:`_read_fix_data` and\n :func:`~mangadap.config.defaults.photometry_fix_file` that\n returns the photometry fix data.\n \"\"\"\n return self._read_fix_data(defaults.photometry_fix_file())\n\n def _match_platetargets(self, quiet=True):\n \"\"\"\n Read the platetargets files and match the data therein to the\n completed DRP files based on the plate and ifudesign.\n\n If a plate-ifudesign combination is not found in the\n plateTargets file, the other parameter values are set to -9999\n and the MaNGA ID is set to NULL.\n\n If the plate-ifudesign combination is found, the columns in the\n plateTargets files that the code expects to find are 'plateid',\n 'ifudesign', 'mangaid', 'object_ra', and 'object_dec'; the\n values that will be replaced with 'NULL' (str) or -9999 (int or\n float) if they do not exist are 'nsa_version', 'nsa_nsaid',\n 'manga_target1', 'manga_target3', 'z' or 'nsa_z',\n 'nsa_elpetro_ba', 'nsa_elpetro_phi', 'nsa_elpetro_th50_R',\n 'nsa_vdisp'.\n \n .. todo::\n\n - Instead of searching through all files for the correct\n plate-ifudesign, use the MaNGA ID to get the correct\n catalog? Requires getting the MaNGA ID from somewhere,\n but it takes a long time to read from the fits files...\n - Is there some more concise method of performing the same\n thing as what is done below with many try/except blocks?\n\n Args:\n quiet (:obj:`bool`, optional):\n Suppress terminal output\n\n Returns:\n numpy.array: 14 arrays with: MaNGA ID, object right\n ascension, object declination, catalog ID, index of the\n entry in the catalog, catalog version, ID of object in the\n catalog, main MaNGA survey target bitmask, ancillary MaNGA\n survey target bitmask, velocity, velocity dispersion,\n ellipticity, position angle, and effective radius.\n \"\"\"\n # Read the platetargets file\n plttrg_data = self._read_platetargets()\n ntrg = len(plttrg_data)\n print('Read {0} plateTargets file(s)'.format(ntrg))\n\n # Initialize the output arrays (needed in case some DRP targets not found)\n n_drp = len(self.platelist)\n mangaid = []\n # TODO: try mangaid = numpy.empty(n_drp, dtype=object) ?\n objra = numpy.full(n_drp, -9999.0, dtype=numpy.float64)\n objdec = numpy.full(n_drp, -9999.0, dtype=numpy.float64)\n catid = numpy.full(n_drp, -9999, dtype=numpy.int32)\n catindx = numpy.full(n_drp, -9999, dtype=numpy.int32)\n trg_version = []\n # TODO: try trg_version = numpy.empty(n_drp, dtype=object) ?\n trg_id = numpy.full(n_drp, -9999, dtype=numpy.int32)\n manga_trg1 = numpy.full(n_drp, -9999, dtype=numpy.int64)\n manga_trg3 = numpy.full(n_drp, -9999, dtype=numpy.int64)\n vel = numpy.full(n_drp, -9999.0, dtype=numpy.float64)\n veldisp = numpy.full(n_drp, -9999.0, dtype=numpy.float64)\n ell = numpy.full(n_drp, -9999.0, dtype=numpy.float64)\n pa = numpy.full(n_drp, -9999.0, dtype=numpy.float64)\n Reff = numpy.full(n_drp, -9999.0, dtype=numpy.float64)\n\n print('Searching platetargets file for observed galaxies...', end='\\r')\n for i in range(n_drp):\n plttrg_j = 0\n mangaid_i = 'NULL'\n for j in range(ntrg):\n indx = numpy.where((plttrg_data[j]['PLTTRGT']['plateid'] == self.platelist[i]) &\n (plttrg_data[j]['PLTTRGT']['ifudesign'] == self.ifudesignlist[i]))\n if len(indx[0]) > 1:\n raise ValueError('Multiple instances of {0}-{1} (MaNGA ID={2}) in {3}'.format(\n self.platelist[i], self.ifudesignlist[i], mangaid_i,\n self.platetargets[j]))\n if len(indx[0]) == 1:\n plttrg_j = j\n mangaid_i = plttrg_data[j]['PLTTRGT']['mangaid'][indx][0].decode(\"ascii\")\n if not quiet:\n print('Found {0}-{1} (MaNGA ID={2}) in {3} (CAT={4})'.format(\n self.platelist[i], self.ifudesignlist[i], mangaid_i,\n self.platetargets[j], self.catid[j]))\n break\n\n if len(indx[0]) == 0:\n warnings.warn('Could not find {0}-{1} in any plateTargets file!'.format(\n self.platelist[i], self.ifudesignlist[i]))\n mangaid = mangaid + ['NULL']\n continue\n mangaid.append(plttrg_data[plttrg_j]['PLTTRGT']['mangaid'][indx][0].decode(\"ascii\"))\n objra[i] = plttrg_data[plttrg_j]['PLTTRGT']['object_ra'][indx][0]\n objdec[i] = plttrg_data[plttrg_j]['PLTTRGT']['object_dec'][indx][0]\n\n # From David Wake:\n #\n # \"MANGAID consists of CATID-CATIND, where CATID identifies\n # a parent catalog, in the case of the main manga samples\n # CATID = 1 which refers to nsa_v1_0_0.fits, and CATIND is\n # the position within that catalog (zero indexed). So if you\n # strip out CATIND from MANGAID you have the position within\n # nsa_v1_0_0.fits without any matching required.\"\n catid[i], catindx[i] = map(lambda x: int(x), mangaid[i].split('-'))\n if catid[i] != self.catid[plttrg_j]:\n warnings.warn('{0}-{1} (MaNGA ID={2}) found in {3} (CAT={4})!'.format(\n self.platelist[i], self.ifudesignlist[i], mangaid_i,\n self.platetargets[j], self.catid[j]))\n\n try:\n trg_version.append(plttrg_data[plttrg_j]['PLTTRGT']['nsa_version'][indx][0].decode(\n 'ascii'))\n except:\n trg_version.append('NULL')\n\n try:\n trg_id[i] = plttrg_data[plttrg_j]['PLTTRGT']['nsa_nsaid'][indx][0]\n except:\n trg_id[i] = -9999\n \n try:\n manga_trg1[i] = plttrg_data[plttrg_j]['PLTTRGT']['manga_target1'][indx][0]\n except:\n manga_trg1[i] = -9999\n\n try:\n manga_trg3[i] = plttrg_data[plttrg_j]['PLTTRGT']['manga_target3'][indx][0]\n except:\n manga_trg3[i] = -9999\n\n # As of MPL-6, platetargets files now include the redshift\n # from the targeting catalog which is the combination of the\n # NSA data and the ancillary targets; the NSA only redshift\n # column is 'nsa_z'. To be compatible with previous\n # versions, first try to grab the redshift using the keyword\n # 'z', then try with 'nsa_z', then just set it to -9999.\n try:\n vel[i] = plttrg_data[plttrg_j]['PLTTRGT']['z'][indx][0] \\\n * astropy.constants.c.to('km/s').value\n except:\n try:\n vel[i] = plttrg_data[plttrg_j]['PLTTRGT']['nsa_z'][indx][0] \\\n * astropy.constants.c.to('km/s').value\n except:\n vel[i] = -9999.0\n\n try:\n if plttrg_data[plttrg_j]['PLTTRGT']['nsa_elpetro_ba'][indx][0] < 0:\n raise\n ell[i] = 1.0-plttrg_data[plttrg_j]['PLTTRGT']['nsa_elpetro_ba'][indx][0]\n except:\n ell[i] = -9999.0\n warnings.warn('Ellipticity for {0}-{1} is bogus!'.format(self.platelist[i],\n self.ifudesignlist[i]))\n\n try:\n # THERE'S A BUG IN THE MANGACORE/V1_2_0 PLATETARGETS FILES\n pa[i] = plttrg_data[plttrg_j]['PLTTRGT']['nsa_elpetro_phi'][indx][0]\n if pa[i] < 0:\n raise\n except:\n pa[i] = -9999.0\n warnings.warn('PA for {0}-{1} is bogus!'.format(self.platelist[i],\n self.ifudesignlist[i]))\n\n try:\n Reff[i] = plttrg_data[plttrg_j]['PLTTRGT']['nsa_elpetro_th50_r'][indx][0]\n except:\n Reff[i] = -9999.0\n\n try:\n veldisp[i] = plttrg_data[plttrg_j]['PLTTRGT']['nsa_vdisp'][indx][0]\n except:\n veldisp[i] = -9999.0\n\n # Correct for known nan issue\n if numpy.isnan(ell[i]):\n raise ValueError('nan encountered!')\n if numpy.isnan(pa[i]):\n raise ValueError('nan encountered!')\n\n print('Searching platetargets file for observed galaxies...DONE')\n\n return numpy.array(mangaid), objra, objdec, catid, catindx, numpy.array(trg_version), \\\n trg_id, manga_trg1, manga_trg3, vel, veldisp, ell, pa, Reff\n\n def _match_drpall(self):\n \"\"\"\n Find the data needed for the DRP complete databas in the DRPall\n file.\n\n If a plate-ifudesign combination is not found in the DRPall\n file, the other parameter values are set to -9999 and the MaNGA\n ID is set to NULL.\n\n If the plate-ifudesign combination is found, the columns in the\n plateTargets files that the code expects to find are 'plate',\n 'ifudsgn', 'mangaid', 'objra', 'objdec'; the values that will be\n replaced with 'NULL' (str) or -9999 (int or float) if they do\n not exist are 'nsa_version', 'nsa_nsaid', 'mngtarg1',\n 'mngtarg3', 'z', 'nsa_elpetro_ba', 'nsa_elpetro_phi',\n 'nsa_elpetro_th50_r', 'nsa_vdisp'.\n \n Returns:\n numpy.array: 14 arrays with: MaNGA ID, object right\n ascension, object declination, catalog ID, index of the\n entry in the catalog, catalog version, ID of object in the\n catalog, main MaNGA survey target bitmask, ancillary MaNGA\n survey target bitmask, velocity, velocity dispersion,\n ellipticity, position angle, and effective radius.\n \"\"\"\n # Open the drpall file\n hdu = fits.open(self.drpall)\n # Check the plateifus are unique\n pltifu = hdu[1].data['PLATEIFU']\n if len(numpy.unique(pltifu)) != len(pltifu):\n raise ValueError('The PLATEIFU column in the DRPall file is not unique!')\n\n # Find the rows in the DRPall file with the correct; assumes the \n this_pltifu = numpy.array(['{0}-{1}'.format(p,i) \n for p,i in zip(self.platelist,self.ifudesignlist)])\n rows = numpy.array([numpy.where(pltifu == pi)[0][0] if pi in pltifu else -1 \\\n for pi in this_pltifu])\n if numpy.any(rows < 0):\n raise ValueError('Could not find the following in the DRPall file: '\n '{0}'.format(this_pltifu[rows < 0]))\n indx = rows > -1\n rows = rows[indx]\n\n # Initialize the data\n n_drp = len(self.platelist)\n mangaid = numpy.full(n_drp, 'NULL', dtype=object)\n objra = numpy.full(n_drp, -9999.0, dtype=float)\n objdec = numpy.full(n_drp, -9999.0, dtype=float)\n catid = numpy.full(n_drp, -9999, dtype=int)\n catindx = numpy.full(n_drp, -9999, dtype=int)\n trg_version = numpy.full(n_drp, 'NULL', dtype=object)\n trg_id = numpy.full(n_drp, -9999, dtype=int)\n manga_trg1 = numpy.full(n_drp, -9999, dtype=int)\n manga_trg3 = numpy.full(n_drp, -9999, dtype=int)\n vel = numpy.full(n_drp, -9999.0, dtype=float)\n veldisp = numpy.full(n_drp, -9999.0, dtype=float)\n ell = numpy.full(n_drp, -9999.0, dtype=float)\n pa = numpy.full(n_drp, -9999.0, dtype=float)\n Reff = numpy.full(n_drp, -9999.0, dtype=float)\n\n # Get the data\n mangaid[indx] = hdu[1].data['mangaid'][rows]\n objra[indx] = hdu[1].data['objra'][rows]\n objdec[indx] = hdu[1].data['objdec'][rows]\n for i in numpy.where(indx)[0]:\n catid[i], catindx[i] = map(lambda x: x.astype(int).tolist(),\n numpy.array(mangaid[i].split('-')))\n trg_version[indx] = hdu[1].data['nsa_version'][rows]\n trg_id[indx] = hdu[1].data['nsa_nsaid'][rows]\n manga_trg1[indx] = hdu[1].data['mngtarg1'][rows]\n manga_trg3[indx] = hdu[1].data['mngtarg3'][rows]\n vel[indx] = hdu[1].data['z'][rows] * astropy.constants.c.to('km/s').value\n # There is no vdisp\n ell[indx] = 1 - hdu[1].data['nsa_elpetro_ba'][rows]\n pa[indx] = hdu[1].data['nsa_elpetro_phi'][rows]\n Reff[indx] = hdu[1].data['nsa_elpetro_th50_r'][rows]\n\n # Done\n hdu.close()\n return mangaid.astype(str), objra, objdec, catid, catindx, trg_version.astype(str), \\\n trg_id, manga_trg1, manga_trg3, vel, veldisp, ell, pa, Reff\n\n def _all_data_exists(self):\n \"\"\"\n Determines if the data for all the plates/ifudesigns selected in\n the current compilation is already present in the current\n drpcomplete fits file.\n\n Returns:\n :obj:`bool`: Flag that all data has already been collated\n for the requested plate/ifudesign list.\n \"\"\"\n if not self._confirm_access():\n return False\n for p,i in zip(self.platelist, self.ifudesignlist):\n if numpy.sum((self['PLATE'] == p) & (self['IFUDESIGN'] == i)) == 0:\n return False\n return True\n\n def _read(self):\n \"\"\"Read the data in the existing file at :func:`file_path`.\"\"\"\n if self.hdu is not None:\n self.hdu.close()\n self.hdu = None\n self.hdu = fits.open(self.file_path())\n self.nobs = self.hdu['DRPC'].header['NAXIS2']\n print('Read data: {0} rows'.format(self.nobs))\n\n def _confirm_access(self, reread=False):\n \"\"\"\n Check the drpcomplete fits file at :func:`file_path` is\n accessible, and read the data if not yet read.\n\n Args:\n reread (:obj:`bool`, optional):\n Force the file to be re-read\n \n Returns:\n :obj:`bool`: Flag that :attr:`hdu` is read and valid.\n \"\"\"\n if not os.path.exists(self.file_path()):\n return False\n if self.hdu is None or reread:\n self._read()\n return True\n\n def _find_completed_reductions(self, mindesign=19, combinatorics=False, on_disk=False):\n \"\"\"\n Search the DRP path for reduced CUBE files.\n\n Function allows for one or both of :attr:`platelist` and\n :attr:`ifudesignlist` to be None. The behavior is:\n \n - If both are None, all available CUBE files are used to\n create :attr:`platelist` and :attr:`ifudesignlist`.\n\n - If one is None, all available CUBE files within the\n provided list of the one that is not None are used to\n create :attr:`platelist` and :attr:`ifudesignlist`. E.g.,\n if :attr:`platelist` =[7443] and :attr:`ifudesignlist` is\n None, all CUBE files with plate=7443 are chosen.\n\n - If both are not None and they have different lengths, or\n the same length and combinatorics is True, all available\n CUBE files within the constraints of both lists are\n selected. E.g., if :attr:`platelist` =[7443,7495] and\n :attr:`ifudesignlist` =[12704], the CUBE files with\n (plate,ifudesign)=[(7443,12704),(7459,12704)] are chosen.\n\n - If both are not None and they have the same length and\n combinatorics is False, all available CUBE files in the\n matched lists are chosen. E.g. if :attr:`platelist`\n =[7443,7495] and :attr:`ifudesignlist` =[12704,12703], the\n CUBE files with\n (plate,ifudesign)=[(7443,12704),(7459,12703) are chosen\n\n Args:\n mindesign (:obj:`int`, optional):\n Minimum bundle design to consider. For example, to\n ignore all the 7-fiber bundles, set `mindesign=19`\n (default) to only select the bundles with 19 or more\n fibers.\n combinatorics (:obj:`bool`, optional):\n Create :attr:`platelist` and :attr:`ifudesignlist` by\n determining all possible combinations of the input\n values. See above.\n on_disk (:obj:`bool`, optional):\n When searching for available files to analyze, search\n the DRP directory path instead of using the data in the\n DRPall file.\n\n Returns:\n list: Two lists with the available plates and ifudesigns for\n which to collect data.\n \"\"\"\n # Are the plate and IFU lists matched?\n matchedlist = (self.platelist is not None and self.ifudesignlist is not None \\\n and (len(self.platelist) == len(self.ifudesignlist) and not combinatorics))\n\n # Get the list of files\n if matchedlist:\n # Lists already matched, just construct the file names\n files = [os.path.join(*MaNGADataCube.default_paths(p, i, drpver=self.drpver,\n redux_path=self.redux_path)) \\\n for p,i in zip(self.platelist, self.ifudesignlist)]\n elif on_disk:\n # Find the DRP LOGCUBE files on disk\n print('Searching for completed DRP CUBE files...', end='\\r')\n files = glob.glob(os.path.join(self.redux_path, '*', 'stack', '*-LOGCUBE.fits.gz'))\n print('Searching for completed DRP CUBE files...DONE.')\n else:\n # Use the DRPall file\n drpall_hdu = fits.open(self.drpall)\n pltifu = drpall_hdu[1].data['PLATEIFU']\n if len(numpy.unique(pltifu)) != len(pltifu):\n raise ValueError('The PLATEIFU column in the DRPall file is not unique!')\n files = [os.path.join(*MaNGADataCube.default_paths(int(p), int(i), drpver=self.drpver,\n redux_path=self.redux_path)) \\\n for p,i in zip(drpall_hdu[1].data['plate'], drpall_hdu[1].data['ifudsgn'])]\n\n # Only use those files that exist\n for i in range(len(files)):\n if not os.path.isfile(files[i]):\n warnings.warn('No such file: {0}'.format(files[i]))\n del files[i]\n\n # Get the list of plate and ifus\n pltifu = numpy.array(list(map(lambda x: os.path.split(x)[-1].split('-')[1:3],\n files))).astype(int)\n\n # Ignore any below the minimum ifusize\n pltifu = pltifu[numpy.invert(pltifu[:,1]//100 < mindesign),:]\n if matchedlist:\n return pltifu[:,0], pltifu[:,1]\n\n # Select those that match to the provided list\n indx = numpy.ones(len(pltifu), dtype=bool) if self.platelist is None \\\n else numpy.array([p in self.platelist for p in pltifu[:,0]])\n indx &= numpy.ones(len(pltifu), dtype=bool) if self.ifudesignlist is None \\\n else numpy.array([i in self.ifudesignlist for i in pltifu[:,1]])\n return pltifu[indx,0], pltifu[indx,1]\n \n def _find_modes(self):\n \"\"\"\n Using the provided list of CUBE DRP files, find if the RSS mode\n is also available.\n\n .. warning::\n - This assumes everything in :attr:`platelist` and\n :attr:`ifudesignlist` has a 'CUBE' file.\n\n Currently only two modes are possible:\n\n - (1) only 'CUBE' file are available\n\n - (2) both 'CUBE' and 'RSS' files are available\n\n Returns:\n `numpy.ndarray`: Array of modes for each input DRP file.\n \"\"\"\n print('Checking for RSS counterparts...', end='\\r')\n has_rss = [os.path.isfile(os.path.join(*MaNGARSS.default_paths(p, i, drpver=self.drpver,\n redux_path=self.redux_path)))\n for p,i in zip(self.platelist, self.ifudesignlist)]\n print('Checking for RSS counterparts...DONE.')\n modes = numpy.ones(len(has_rss), dtype=int)\n modes[has_rss] = 2\n return modes\n \n def _write_parameter_struct(self, ostream):\n \"\"\"\n Write the structure of the DAP SDSS-style parameter set to a\n file.\n\n Args:\n ostream (_io.TextIOWrapper) : File stream for output\n \n \"\"\"\n ostream.write('typedef struct {\\n')\n ostream.write(' long plate;\\n')\n ostream.write(' long ifudesign;\\n')\n ostream.write(' char mode[4];\\n')\n ostream.write(' double vel;\\n')\n ostream.write(' double vdisp;\\n')\n ostream.write(' double ell;\\n')\n ostream.write(' double pa;\\n')\n ostream.write(' double reff;\\n')\n ostream.write('} DAPPAR;\\n')\n\n def _write_parameter_list(self, ostream, index, mode):\n \"\"\"\n Write a DAPPAR entry to the SDSS-style parameter file.\n\n Args:\n ostream (_io.TextIOWrapper) : File stream for output\n index (int) : Index of the entry in :attr:`data` to write\n mode (int) : Mode of the entry; see :func:`_find_modes`\n\n \"\"\"\n ostream.write('DAPPAR {0:4d} {1:5d} {2:4s} {3:14.7e} {4:14.7e} {5:14.7e} {6:14.7e}'\n ' {7:14.7e}\\n'.format(self.hdu['DRPC'].data['PLATE'][index],\n self.hdu['DRPC'].data['IFUDESIGN'][index], mode,\n self.hdu['DRPC'].data['VEL'][index],\n self.hdu['DRPC'].data['VDISP'][index],\n self.hdu['DRPC'].data['ELL'][index],\n self.hdu['DRPC'].data['PA'][index],\n self.hdu['DRPC'].data['REFF'][index]))\n\n # ******************************************************************\n # User functions\n # ******************************************************************\n def file_name(self):\n \"\"\"Return the name of the DRP complete database.\"\"\"\n return ('drpcomplete_{0}.fits'.format(self.drpver))\n\n def file_path(self):\n \"\"\"Return the full pat to the DRP complete database.\"\"\"\n return os.path.join(self.directory_path, self.file_name())\n\n def update(self, platelist=None, ifudesignlist=None, combinatorics=False, force=False,\n alldrp=False, use_platetargets=False, on_disk=False, quiet=False):\n \"\"\"\n Update the DRP complete file.\n \n If `platelist` and/or `ifudesignlist` are provided, the existing\n :attr:`platelist` and :attr:`ifudesignlist` attributes are\n replaced.\n\n If `platelist` and `ifudesignlist` do not have the same length\n or `combinatorics` is True, the lists are expanded to include\n all combinations of their elements.\n\n If `platelist` and `ifudesignlist` are None or `alldrp` is True,\n all available plates/ifudesigns are collected from within the\n DRP directory structure.\n\n If the result of :func:`file_path` does not exist, it is\n created.\n\n If the result of :func:`file_path` does exist, the available\n plates and ifudesigns in the file are compared against the list\n (provided or collected) to update. If the lists differ, the\n drpcomplete fits file is re-created from scratch. If all the\n plates and ifudesigns are available, nothing is done, unless\n `force=True`.\n\n Args:\n platelist (:obj:`str`, :obj:`list`, optional):\n List of plates to include in the drpcomplete fits file.\n ifudesignlist (:obj:`str`, :obj:`list`, optional):\n List of ifudesigns to include in the drpcomplete fits\n file.\n combinatorics (:obj:`bool`, optional):\n Determine all combinations of the entered plates and\n ifudesigns.\n force (:obj:`bool`, optional):\n Overwrite any existing drpcomplete fits file with a new\n one built from scratch.\n alldrp (:obj:`bool`, optional):\n Find the full list of available DRP files.\n use_platetargets (:obj:`bool`, optional):\n Generate the data using the platetargets files instead\n of the DRPall file.\n on_disk (:obj:`bool`, optional):\n When searching for available files to analyze, search\n the DRP directory path instead of using the data in the\n DRPall file.\n quiet (:obj:`bool`, optional):\n Suppress terminal output\n\n Raises:\n ValueError:\n Raised if drpcomplete fits file was opened in read-only\n mode.\n\n \"\"\"\n if self.readonly:\n raise ValueError('drpcomplete fits file was opened as read-only!')\n\n if platelist is not None:\n self.platelist = arginp_to_list(platelist, evaluate=True)\n if ifudesignlist is not None:\n self.ifudesignlist = arginp_to_list(ifudesignlist, evaluate=True)\n\n if alldrp:\n self.platelist = None\n self.ifudesignlist = None\n\n # This *only* searches for CUBE files\n self.platelist, self.ifudesignlist \\\n = self._find_completed_reductions(combinatorics=combinatorics, on_disk=on_disk)\n\n # Check if these plate-ifus are already in the table.\n if not force and self._all_data_exists():\n print('{0} up to date.'.format(self.file_path()))\n return\n else:\n print('Updating {0}.'.format(self.file_path()))\n\n # Past this point, the drpcomplete fits file will be ovewritten\n # if it already exists. Only DRP files defined by \n # self.platelist and self.ifudesignlist will be included; i.e.,\n # this does *not* append to the file but completely overwrites\n # it.\n\n # Check for the RSS files\n modes = self._find_modes()\n\n # Notify\n print('Number of DRP files for DRPComplete file: {0}'.format(len(self.platelist)))\n\n # Get the data\n mangaid, objra, objdec, catid, catindx, trg_version, trg_id, manga_trg1, manga_trg3, vel, \\\n veldisp, ell, pa, Reff = (self._match_platetargets(quiet=quiet)\n if use_platetargets else self._match_drpall())\n\n # Apply any corrections to the redshifts using the redshift fix file\n redshift_fix_data = self._read_redshift_fix()\n if redshift_fix_data is not None:\n fix_pltifu = numpy.array(['{0}-{1}'.format(p,i) \n for p,i in zip(redshift_fix_data['DAPZCORR']['plate'],\n redshift_fix_data['DAPZCORR']['ifudesign'])])\n this_pltifu = numpy.array(['{0}-{1}'.format(p,i) \n for p,i in zip(self.platelist,self.ifudesignlist)])\n rows = numpy.array([numpy.where(fix_pltifu == pi)[0][0] if pi in fix_pltifu else -1 \\\n for pi in this_pltifu])\n indx = rows > -1\n rows = rows[indx]\n vel[indx] = redshift_fix_data['DAPZCORR']['z'][rows] \\\n * astropy.constants.c.to('km/s').value\n\n photometry_fix_data = self._read_photometry_fix()\n if photometry_fix_data is not None:\n fix_pltifu = numpy.array(['{0}-{1}'.format(p,i) \n for p,i in zip(photometry_fix_data['DAPPHOTCORR']['plate'],\n photometry_fix_data['DAPPHOTCORR']['ifudesign'])])\n this_pltifu = numpy.array(['{0}-{1}'.format(p,i) \n for p,i in zip(self.platelist,self.ifudesignlist)])\n rows = numpy.array([numpy.where(fix_pltifu == pi)[0][0] if pi in fix_pltifu else -1 \\\n for pi in this_pltifu])\n indx = rows > -1\n rows = rows[indx]\n ell[indx] = photometry_fix_data['DAPPHOTCORR']['ell'][rows]\n pa[indx] = photometry_fix_data['DAPPHOTCORR']['pa'][rows]\n Reff[indx] = photometry_fix_data['DAPPHOTCORR']['reff'][rows]\n\n # Write the data to disk\n self.write(self.platelist, self.ifudesignlist, modes, mangaid, objra, objdec, catid,\n catindx, trg_version, trg_id, manga_trg1, manga_trg3, vel, veldisp, ell, pa,\n Reff)\n\n def write(self, platelist, ifudesignlist, modes, mangaid, objra, objdec, catid, catindx,\n trg_version, trg_id, manga_trg1, manga_trg3, vel, veldisp, ell, pa, Reff, drpver=None,\n redux_path=None, dapver=None, analysis_path=None, clobber=True):\n r\"\"\"\n Write the drpcomplete fits binary table.\n\n Header keywords are:\n - VERSDRP: DRP version\n - RDXPTH: DRP reduction path\n - VERSDAP: DAP version\n - SISPTH: DAP analysis path\n - PLTTRG *N*: plateTargets file *N*\n - CATID *N*: ID Number of target catalog *N*\n - AUTHOR: Set to 'K.B. Westfall '\n\n Binary table columns:\n 1. PLATE (1J): Plate number\n 2. IFUDESIGN (1J): IFU design\n 3. MODES (1B): Modes available \n - MODES=1: only 'CUBE' file is available\n - MODES=2: both 'CUBE' and 'RSS' files are available\n\n 4. MANGAID (*n* A): MaNGA ID (same as 'CATID-CATINDX')\n 5. OBJRA (1D): Object right ascension\n 6. OBJDEC (1D): Object declination\n 7. CATID (1J): Catalog ID used for target selection\n 8. CATINDX (1J): Index in catalog with target data\n 9. TRG_VERSION (*n* A): Version of the catalog (e.g., NSA)\n used in targetting (def='NULL' if not available)\n 10. TRG_ID (1J): Target ID in catalog (def=-9999 if not\n available)\n 11. MANGA_TARGET1: Main MaNGA survey target bitmask\n 12. MANGA_TARGET3: Ancillary MaNGA survey target bitmask\n 13. VEL (1D): Systemic velocity from catalog (def=-9999.0 if\n not available)\n 14. VDISP (1D): Velocity dispersion from catalog\n (def=-9999.0 if not available)\n 15. ELL (1D): Ellipticity (1-b/a) from catalog (def=-9999.0\n if not available); Elliptical Petrosian value from NSA\n 16. PA (1D): Position angle from catalog (def=-9999.0 if not\n available); Elliptical Petrosian value from NSA\n 17. REFF (1D): Effective radius from catalog (def=-9999.0 if\n not available); Elliptical Petrosian value from NSA\n\n Args:\n platelist (list) : List of plates\n ifudesignlist (list) : List of IFU designs\n modes (numpy.array): Mode values, see above\n mangaid (numpy.array): MaNGA IDs\n objra (numpy.array): Object right ascensions\n objdec (numpy.array): Object declinations\n catid (numpy.array): Catalog ID used for target selection\n catindx (numpy.array): Index in catalog with target data\n trg_version (numpy.array): Version of the catalog (e.g.,\n NSA) used in targetting\n trg_id (numpy.array): Target ID in catalog\n manga_trg1 (numpy.array): Main MaNGA survey target bitmask\n manga_trg3 (numpy.array): Ancillary MaNGA survey target\n bitmask\n vel (numpy.array): Velocity from catalog, if available\n veldisp (numpy.array): Velocity dispersion from catalog,\n if available\n ell (numpy.array): Ellipticity (1-b/a) from catalog, if\n available\n pa (numpy.array): Position angle from catalog, if\n available\n Reff (numpy.array): Effective radius from catalog, if\n available\n drpver (str) : (**Optional**) DRP version, see above.\n redux_path (str) : (**Optional**) Path to the top level of\n directory for the DRP output, see above.\n dapver (str) : (**Optional**) DAP version, see above.\n analysis_path (str) : (**Optional**) Path to the top level\n directory for the DAP output files, see above.\n clobber (bool): (**Optional**) Overwrite any existing file.\n\n Raises:\n AttributeError: Raised if drpcomplete fits file was opened\n in read-only mode.\n FileExistsError: Raised if the drpcomplete file exists and\n clobber=False.\n \"\"\"\n if self.readonly:\n raise AttributeError('drpcomplete fits file was opened as read-only!')\n\n out=self.file_path()\n if os.path.isfile(out) and not clobber:\n raise FileExistsError('DRP complete file already exists: {0}'.format(out))\n\n if not os.path.isdir(self.directory_path):\n os.makedirs(self.directory_path)\n\n # Create the primary header\n nplttrg = 0 if self.platetargets is None else len(self.platetargets)\n\n hdr = fits.Header()\n hdr['VERSDRP'] = self.drpver if drpver is None else drpver\n hdr['RDXPTH'] = self.redux_path if redux_path is None else redux_path\n hdr['VERSDAP'] = self.dapver if dapver is None else dapver\n hdr['SISPTH'] = self.analysis_path if analysis_path is None else analysis_path\n for i in range(nplttrg):\n hdr['PLTTRG{0}'.format(i+1)] = self.platetargets[i]\n hdr['CATID{0}'.format(i+1)] = self.catid[i]\n hdr['AUTHOR'] = 'K.B. Westfall '\n\n # Create the Binary Table\n cols = []\n cols.append(fits.Column(name='PLATE', format='1J', array=numpy.array(platelist)))\n cols.append(fits.Column(name='IFUDESIGN', format='1J', array=numpy.array(ifudesignlist)))\n cols.append(fits.Column(name='MODES', format='1B', array=modes))\n cols.append(fits.Column(name='MANGAID', format='{0}A'.format(mangaid.dtype.itemsize),\n array=mangaid))\n cols.append(fits.Column(name='OBJRA', format='1D', array=objra))\n cols.append(fits.Column(name='OBJDEC', format='1D', array=objdec))\n cols.append(fits.Column(name='CATID', format='1J', array=catid))\n cols.append(fits.Column(name='CATINDX', format='1J', array=catindx))\n cols.append(fits.Column(name='TRG_VERSION',\n format='{0}A'.format(trg_version.dtype.itemsize),\n array=trg_version))\n cols.append(fits.Column(name='TRG_ID', format='1J', array=trg_id))\n cols.append(fits.Column(name='MANGA_TARGET1', format='1J', array=manga_trg1))\n cols.append(fits.Column(name='MANGA_TARGET3', format='1J', array=manga_trg3))\n cols.append(fits.Column(name='VEL', format='1D', array=vel))\n cols.append(fits.Column(name='VDISP', format='1D', array=veldisp))\n cols.append(fits.Column(name='ELL', format='1D', array=ell))\n cols.append(fits.Column(name='PA', format='1D', array=pa))\n cols.append(fits.Column(name='REFF', format='1D', array=Reff))\n\n # Create the HDUList and write it to the fits file\n self.hdu = fits.HDUList([ fits.PrimaryHDU(header=hdr),\n fits.BinTableHDU.from_columns(cols, name='DRPC') ])\n print('Writing to disk: {0}'.format(out))\n self.hdu.writeto(out, overwrite=clobber) #clobber=clobber)\n self.nobs = self.hdu['DRPC'].header['NAXIS2']\n\n# def grab_data(self, plate=None, ifudesign=None, index=None, reread=False):\n# \"\"\"\n# Return the data for a given plate-ifudesign, or index in the\n# :attr:`data`. Even though *plate*, *ifudesign*, and *index* are\n# all optional, the arguments must contain either *plate* and\n# *ifudesign* or *index*.\n#\n# Args:\n# plate (int): (**Optional**) Plate number\n# ifudesign (int): (**Optional**) IFU design\n# index (int): (**Optional**) Index of the row in :attr:`data`\n# with the data to return\n# reread (bool): (**Optional**) Force the database to be re-read\n#\n# Returns:\n# list: List of the 14 elements of :attr:`data`; see\n# :func:`write`.\n#\n# Raises:\n# ValueError: Raised if the row with the data is unknown\n# because either *index* is not defined or one or both of\n# *plate* and *ifudesign* is not defined. Also raised if\n# *index* does not exist in the data array.\n# \"\"\"\n# if (plate is None or ifudesign is None) and index is None:\n# raise ValueError('Must provide plate and ifudesign or row index!')\n#\n# if index is None:\n# index = self.entry_index(plate, ifudesign, reread=reread)\n# else:\n# self._confirm_access(reread=reread)\n# if index >= self.nobs:\n# raise ValueError('Selected row index does not exist')\n#\n# # TODO: Can't I just select index, i.e.,\n# # self.hdu[1].data[index]?\n#\n# return [ self.hdu['DRPC'].data['PLATE'][index], self.hdu['DRPC'].data['IFUDESIGN'][index],\n# self.hdu['DRPC'].data['MODES'][index], self.hdu['DRPC'].data['MANGAID'][index],\n# self.hdu['DRPC'].data['OBJRA'][index], self.hdu['DRPC'].data['OBJDEC'][index],\n# self.hdu['DRPC'].data['CATID'][index], self.hdu['DRPC'].data['CATINDX'][index],\n# self.hdu['DRPC'].data['TRG_VERSION'][index], self.hdu['DRPC'].data['TRG_ID'][indx],\n# self.hdu['DRPC'].data['NSA_V100_ID'][index], self.hdu['DRPC'].data['VEL'][index],\n# self.hdu['DRPC'].data['VDISP'][index], self.hdu['DRPC'].data['ELL'][index],\n# self.hdu['DRPC'].data['PA'][index], self.hdu['DRPC'].data['REFF'][index] ]\n\n def write_par(self, ofile, mode, plate=None, ifudesign=None, index=None, reread=False,\n clobber=True):\n \"\"\"\n Write the SDSS-style parameter (Yanny) file for use with the\n MaNGA DAP.\n\n Args: \n ofile (str): Output file name\n mode (str): Mode of the DRP file to analyze; must be either\n 'RSS' or 'CUBE'\n plate (int): (**Optional**) Plate number\n ifudesign (int): (**Optional**) IFU design\n index (int): (**Optional**) Index of the row in :attr:`data`\n with the data to return\n reread (bool): (**Optional**) Force the database to be re-read\n clobber (bool): (**Optional**) Overwrite any existing parameter\n file\n\n Raises:\n IOError: Raised if the parameter file already exists and\n clobber is False.\n ValueError: Raised if\n\n - the row with the data is unknown because either\n *index* is not defined or one or both of *plate* and\n *ifudesign* is not defined.\n - *index* does not exist in the data array.\n - the 'RSS' mode is selected but unavailable\n\n \"\"\"\n if not self._confirm_access(reread=reread):\n raise IOError('Could not access database!')\n\n if os.path.exists(ofile) and not clobber:\n raise IOError('Parameter file already exists. Set clobber=True to overwrite.')\n\n if (plate is None or ifudesign is None) and index is None:\n raise ValueError('Must provide plate and ifudesign or row index!')\n\n if mode != 'CUBE' and mode != 'RSS':\n raise ValueError('Mode must be either CUBE or RSS.')\n\n if index is None:\n index = self.entry_index(plate, ifudesign, reread=reread)\n else:\n if index >= self.nobs:\n raise ValueError('Selected row index does not exist')\n\n if mode == 'RSS' and self.hdu['DRPC'].data['MODES'][index] != 2:\n raise ValueError('RSS mode not available for plate={0},ifudesign={1} !'.format(\n self.hdu['DRPC'].data['PLATE'][index],\n self.hdu['DRPC'].data['IFUDESIGN'][index]))\n\n # Write the SDSS parameter file\n ostream = open(ofile, 'w')\n ostream.write('\\n')\n self._write_parameter_struct(ostream)\n ostream.write('\\n')\n ostream.write('\\n')\n self._write_parameter_list(ostream, index, mode)\n ostream.write('\\n')\n ostream.close()\n\n def write_config(self, ofile, plate=None, ifudesign=None, index=None, sres_ext=None,\n sres_fill=None, covar_ext=None, reread=False, overwrite=True):\n \"\"\"\n Write a config file with the data used to instantiate a\n :class:`mangadap.datacube.manga.MaNGADataCube` datacube for\n analysis.\n\n Args: \n ofile (:obj:`str`):\n Output file name.\n plate (:obj:`int`, optional):\n Plate number.\n ifudesign (:obj:`int`, optional):\n IFU design.\n index (:obj:`int`, optional):\n Index of the row in :attr:`data` with the data to\n return.\n sres_ext (:obj:`str`, optional):\n The extension to use when constructing the spectral\n resolution vectors for the MaNGA datacubes. See\n :func:`mangadap.datacube.manga.MaNGADataCube.spectral_resolution`.\n sres_fill (:obj:`bool`, optional):\n Fill masked spectral-resolution data by simple linear\n interpolation.\n covar_ext (:obj:`str`, optional):\n Extension in the MaNGA DRP CUBE file to use as the\n single spatial correlation matrix for all wavelength\n channels.\n reread (:obj:`bool`, optional):\n Force the database to be re-read\n overwrite (:obj:`bool`, optional):\n Overwrite any existing parameter file\n\n Raises:\n IOError:\n Raised if the parameter file already exists and\n clobber is False.\n ValueError:\n Raised if\n - the row with the data is unknown because either\n ``index`` is not defined or one or both of\n ``plate`` and ``ifudesign`` is not defined.\n - ``index`` does not exist in the data array.\n\n \"\"\"\n if not self._confirm_access(reread=reread):\n raise IOError('Could not access database!')\n\n if (plate is None or ifudesign is None) and index is None:\n raise ValueError('Must provide plate and ifudesign or row index!')\n\n if index is None:\n index = self.entry_index(plate, ifudesign, reread=reread)\n elif index >= self.nobs:\n raise ValueError('Selected row index does not exist')\n\n MaNGADataCube.write_config(ofile, self['PLATE'][index], self['IFUDESIGN'][index], log=True,\n z=self['VEL'][index]/astropy.constants.c.to('km/s').value,\n vdisp=self['VDISP'][index], ell=self['ELL'][index],\n pa=self['PA'][index], reff=self['REFF'][index],\n sres_ext=sres_ext, sres_fill=sres_fill, covar_ext=covar_ext,\n drpver=self.drpver, redux_path=self.redux_path,\n overwrite=overwrite)\n\n def entry_index(self, plate, ifudesign, reread=False):\n \"\"\"\n Find the index of the row with the parameter data for the\n specified plate and ifudesign.\n\n .. warning::\n - This is very inefficient if you're looking for multiple\n entries...\n\n Args:\n plate (:obj:`int`):\n Plate number\n ifudesign (:obj:`int`):\n IFU design\n reread (:obj:`bool`, optional):\n Force the database to be re-read\n\n Returns:\n int: Index of the row in :attr:`data` with the data for the\n given *plate* and *ifudesign*\n\n Raises:\n ValueError:\n Raised if the given `plate` and `ifudesign` were not\n found.\n \"\"\"\n if not self._confirm_access(reread=reread):\n raise IOError('Could not access database!')\n indx = (self['PLATE'] == plate) & (self['IFUDESIGN'] == ifudesign)\n if numpy.sum(indx) == 0:\n raise ValueError('Could not find plate={0},ifudesign={1} in drpcomplete file!'.format(\n plate, ifudesign))\n return numpy.where(indx)[0][0]\n\n def can_analyze(self, row=None):\n \"\"\"\n Determine if the DAP can analyze a plate-ifu entry in the\n database.\n\n The selection is currently:\n\n - MaNGAID != 'NULL'\n - MANGA_TARGET1 > 0 or MANGA_TARGET3 > 0\n - VEL > -500\n\n Args:\n row (:obj:`int`, optional):\n The specific row to test. By default, return a boolean\n vector for all the database rows.\n\n Returns:\n Either a single boolean or boolean `numpy.ndarray`_\n flagging that DAP can (True) or cannot (False) analyze\n the data associated with the database entry (or entries).\n \"\"\"\n if row is None:\n return (self['MANGAID'] != 'NULL') \\\n & ((self['MANGA_TARGET1'] > 0) | (self['MANGA_TARGET3'] > 0)) \\\n & (self['VEL'] > -500.0)\n return self['MANGAID'][row] != 'NULL' \\\n and (self['MANGA_TARGET1'][row] > 0 or self['MANGA_TARGET3'][row] > 0) \\\n and self['VEL'][row] > -500.0\n\n","sub_path":"mangadap/survey/drpcomplete.py","file_name":"drpcomplete.py","file_ext":"py","file_size_in_byte":56886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"93185620","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author: Xiaobai Lei\nimport re\n\nfrom django.utils.deprecation import MiddlewareMixin\nfrom django.shortcuts import (\n redirect, reverse, HttpResponse\n)\n\nfrom rbac.models import Permission\n# 白名单列表\nWHITE_URL_LIST = [\n r'^/login/$',\n r'^/logout/$',\n r'^/register/$',\n r'^/favicon.ico$',\n r'^/admin/.*',\n r'^/get_vaildcode_img/$',\n]\n\n\nclass PermissionMiddleware(MiddlewareMixin):\n \"\"\"权限验证中间件\"\"\"\n\n def process_request(self, request):\n # 1.当前访问的url\n current_path = request.path_info\n\n # 2.白名单判断,如果在白名单的就直接放过去\n for path in WHITE_URL_LIST:\n if re.search(path, current_path):\n return None\n\n # 3.检验当前用户是否登录\n # user_id = request.session.get('user_id')\n if not request.user.is_authenticated:\n return redirect(reverse('login'))\n\n # 面包屑导航栏层级记录,默认首页为第一位,主要存储title(展示在页面用)和url(用户点击后可直接跳转到相应页面)\n request.breadcrumb_list = [\n {\n 'title': '首页',\n 'url': '/index/',\n }\n ]\n # 4.获取用户权限信息并进行校验\n permission_list = request.session.get('permission_list')\n for item in permission_list:\n # 由于url的是以正则形式存储,因此采用正则与当前访问的url进行完全匹配,如果符合则证明有权限\n if re.search('^{}$'.format(item['url']), current_path):\n # 将当前访问路径的所属菜单pk记录到show_id中,用户访问子权限时依旧会显示父权限(二级菜单)\n request.show_id = item['parent_id'] or item['id']\n # 将当前访问的父子信息记录到breadcrumb_list中(面包屑导航栏)\n # 如果是子权限的话,就根据父权限id查出父权限信息,将父权限和子权限都记录下来\n parent_obj = Permission.objects.filter(pk=item['parent_id']).first()\n if item['parent_id']:\n request.breadcrumb_list.extend([\n {\n 'title': parent_obj.title,\n 'url': parent_obj.url,\n },\n {\n 'title': item['title'],\n 'url': item['url'],\n }])\n else:\n # 排除首页,因为首页初始化就存在了\n if item['title'] != '首页':\n request.breadcrumb_list.append({\n 'title': item['title'],\n 'url': item['url'],\n })\n return None\n else:\n return HttpResponse(\"无此权限\")\n","sub_path":"LkCRM/rbac/middlewares/rbac.py","file_name":"rbac.py","file_ext":"py","file_size_in_byte":2956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"147531327","text":"from xml.dom import minidom, Node\n\ndoc = minidom.Document()\ndoc.appendChild(doc.createComment(\"Sample XML Document - Chapter 8\"))\n\n#\nbook=doc.createElement(\"book\")\ndoc.appendChild(book)\n#\ntitle=doc.createElement(\"title\")\ntitle.appendChild(doc.createTextNode(\"Sample XML Thing\"))\nbook.appendChild(title)\n\n\n","sub_path":"python标准库/xml/建立xml文件.py","file_name":"建立xml文件.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"41288732","text":"import data\nfrom math import ceil, floor\n\nclass Chapter:\n \"\"\"[summary]\n \"\"\"\n def __init__(self, name, chapterNum, numberOfAyahs, numberOfPages):\n self.name = name\n self.chapterNum = chapterNum\n self.numberOfAyahs = numberOfAyahs\n self.numberOfPages = numberOfPages\n\n def __eq__(self, other):\n return self.chapterNum == other.chapterNum\n\n def __repr__(self):\n return self.name\n\n\ndef getHeader():\n head = open(\"./templates/head\", \"r\")\n shead = \"\"\n for line in head:\n # print(line)\n shead = shead+line\n return (shead)\n\ndef createButton():\n head = open(\"./templates/createButton.html\", \"r\")\n shead = \"\"\n for line in head:\n # print(line)\n shead = shead+line\n return (shead)\n\n\ndef diff_dates(date1, date2):\n \"\"\"[summary]\n\n Args:\n date1 (datetime.date): [description]\n date2 (datetime.date): [description]\n\n Returns:\n [type]: [description]\n \"\"\"\n\n return abs(date2-date1).days\n\n\ndef getDaysAyah(numberOfDays, startSurah, endSurah=0):\n \"\"\"[summary]\n\n Args:\n numberOfDays (int): [description]\n startSurah (chapter): [description]\n endSurah (chapter, optional): [description]. Defaults to 0.\n \"\"\"\n endSurah -= 1\n ayahGoal = 0\n dailyAyahGoal = 0\n\n for i in (data.allChapters[endSurah:startSurah]):\n ayahGoal = ayahGoal + i.numberOfAyahs\n\n dailyAyahGoal = ceil(ayahGoal/numberOfDays)\n\n return (dailyAyahGoal)\n\n\ndef getDaysPages(numberOfDays, startSurah, endSurah=1, precision=0):\n \"\"\"[summary]\n\n Args:\n numberOfDays (int): [description]\n startSurah (chapter): [description]\n endSurah (chapter, optional): [description]. Defaults to 1.\n \"\"\"\n chaptersToLearn = data.allChapters[endSurah:startSurah]\n pageToDay = 0\n learnSoFar = 0\n # endSurah -= 1\n pageGoal = 0\n dailyPageGoal = 0\n day = 1\n numPagesTillSurah = {}\n for i in reversed(chaptersToLearn):\n pageGoal = pageGoal + i.numberOfPages\n numPagesTillSurah[i.name] = pageGoal\n\n dailyPageGoal = round(pageGoal/numberOfDays,precision)\n surahFinishDay = {}\n for i in reversed(chaptersToLearn):\n if(numPagesTillSurah[i.name] % dailyPageGoal > .2):\n surahFinishDay[i.name] = ceil(numPagesTillSurah[i.name]/dailyPageGoal)\n else:\n surahFinishDay[i.name] = floor(numPagesTillSurah[i.name]/dailyPageGoal)\n\n surahFinishDayS = sorted(surahFinishDay)\n returnValue = [dailyPageGoal, surahFinishDay, chaptersToLearn]\n return returnValue\n\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"579401017","text":"# 85824\nimport numpy\nimport math\nimport time\nimport random\nimport sys\n#from PIL import Image\n\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Dropout, Activation\nfrom keras.layers import Convolution2D, MaxPooling2D, Flatten, AveragePooling2D, Cropping2D, ZeroPadding2D\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.optimizers import SGD, Adam\nfrom keras.utils import np_utils\nfrom keras.datasets import mnist\nfrom keras.regularizers import l2\nfrom keras import regularizers\nfrom keras.models import load_model\n\ntrain_csv = sys.argv[1]\n\ndef histeqq(im,nbr_bins=256):\n\n #get image histogram\n imhist,bins = numpy.histogram(im.flatten(),nbr_bins,normed=True)\n cdf = imhist.cumsum() #cumulative distribution function\n cdf = 255 * cdf / cdf[-1] #normalize\n\n #use linear interpolation of cdf to find new pixel values\n im2 = numpy.interp(im.flatten(),bins[:-1],cdf)\n\n return im2.reshape(im.shape), cdf\n\ntrain_in = []\ntrain_out = []\ntest_in = []\ntest_out = []\n\nvi = []\nvo = []\n\ntrain_N = 28709\n\nwith open(train_csv, 'r') as fp:\n fp.readline()\n for i in range(train_N):\n a = fp.readline().replace('\\n','').split(',')\n label = int(a[0])\n feature = a[1].split(' ')\n feature = [int(x) for x in feature]\n ans = [0 for i in range(7)]\n ans[label] = 1\n train_in.append(feature)\n train_out.append(ans)\n tmp = []\n for i in range(48):\n for j in range(48):\n tmp.append(feature[i*48+47-j])\n train_in.append(tmp)\n train_out.append(ans)\n\nN = len(train_in)\nfor T in range(N):\n out = numpy.array(train_in[T])\n out = out.reshape(48,48)\n out, h = histeqq(out)\n train_in[T] = out.reshape(48*48)\n for i in range(48*48):\n train_in[T][i] /= 255\n\ntotal = len(train_in)\n\nvi = train_in[: int(total * 0.1)]\ntrain_in = train_in[int(total * 0.1):]\nvo = train_out[: int(total * 0.1)]\ntrain_out = train_out[int(total * 0.1):]\n\ntrain_in = numpy.array(train_in)\ntrain_out = numpy.array(train_out)\n\nvi = numpy.array(vi)\nvo = numpy.array(vo)\n\n\nmodel2 = Sequential()\n\nmodel2.add(Dense(input_dim=48*48,output_dim=689))\nmodel2.add(Dense(500))\nmodel2.add(Activation('relu'))\nmodel2.add(Dense(7))\nmodel2.add(Activation('softmax'))\nmodel2.summary()\nmodel2.compile(loss='categorical_crossentropy',optimizer=\"adam\",metrics=['accuracy'])\n\nwith open(\"progress.txt\", \"w\") as fp:\n for i in range(100):\n model2.fit(train_in, train_out, epochs=1, batch_size=128, validation_data=(vi, vo))\n score = model2.evaluate(train_in, train_out)\n a = score[1]\n score = model2.evaluate(vi, vo)\n b = score[1]\n fp.write(\"%f %f\\n\" % (a, b))\nmodel2.save('my_model.h5')","sub_path":"hw3/dnn_train.py","file_name":"dnn_train.py","file_ext":"py","file_size_in_byte":2739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"407754429","text":"# -*- coding: utf-8 -*-\nfrom rtcenter.helpers.classes.APIViews import CoreView\nfrom rtcenter.helpers.classes.PhotoStorage import PhotoStorage\nfrom rtcenter.helpers.classes.APIViews import APIError\nimport models as pdb\n\nclass ProductAPI(CoreView):\n\n def submit_new_product_action(self):\n bar_code = self.get_parameters('bar_code')\n title = self.get_parameters('title')\n price = self.get_parameters('price')\n\n if not title:\n raise APIError(-101, u'商品名称不能为空')\n if not bar_code or (len(bar_code)!=8 and len(bar_code)!=13):\n raise APIError(-101, u'条形码不规范')\n if not price:\n raise APIError(-101, u'价格不能为空')\n\n try:\n pd = pdb.NewProduct(bar_code=bar_code,\n title = title,\n price = price)\n\n\n if 'pic' in self.request.FILES:\n f = self.request.FILES['pic']\n path, w, h = PhotoStorage.save_uploaded_file(f, sizes=[(512,512)])\n pd.add_photo(path[0], path[1])\n \n pd.save()\n\n success = True\n except:\n success = True\n \n return {'success': success}","sub_path":"rtcenter/rtcenter/product/mapi.py","file_name":"mapi.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"412115424","text":"class MyHashSet(object):\n\n def __init__(self):\n self.hasSet = []\n for i in range(1000):\n self.hasSet.append([])\n\n def add(self, key):\n \"\"\"\n :type key: int\n :rtype: None\n \"\"\"\n remain = key % 1000\n list = self.hasSet[remain]\n if list.count(key) == 0:\n list.append(key)\n\n def remove(self, key):\n \"\"\"\n :type key: int\n :rtype: None\n \"\"\"\n remain = key % 1000\n list = self.hasSet[remain]\n if list.count(key) == 1:\n index = list.index(key)\n self.hasSet[remain] = list[0: index] + list[index + 1:]\n\n def contains(self, key):\n \"\"\"\n :type key: int\n :rtype: bool\n \"\"\"\n remain = key % 1000\n list = self.hasSet[remain]\n return list.count(key) == 1\n\n# Your MyHashSet object will be instantiated and called as such:\n# obj = MyHashSet()\n# obj.add(key)\n# obj.remove(key)\n# param_3 = obj.contains(key)","sub_path":"dataStructure/HashMap/MyHashSet.py","file_name":"MyHashSet.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"319694149","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/ali/ownCloud/Project/python/django-aparnik-framework-project/testandbuildprojectframework/aparnik/packages/shops/products/migrations/0013_product_currency.py\n# Compiled at: 2020-01-05 09:49:45\n# Size of source mod 2**32: 500 bytes\nfrom django.db import migrations, models\n\nclass Migration(migrations.Migration):\n dependencies = [\n ('products', '0012_product_delivery_type')]\n operations = [\n migrations.AddField(model_name='product',\n name='currency',\n field=models.CharField(choices=[('IRR', 'IRR'), ('D', 'Dollar')], default='IRR', max_length=10, verbose_name='Currency'))]","sub_path":"pycfiles/django-apar-1.1.6.43.tar/0013_product_currency.cpython-37.py","file_name":"0013_product_currency.cpython-37.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"106977969","text":"\"\"\"\nCopyright (c) 2018-present, Facebook, Inc.\nAll rights reserved.\n\nThis source code is licensed under the BSD-style license found in the\nLICENSE file in the root directory of this source tree. An additional grant\nof patent rights can be found in the PATENTS file in the same directory.\n\"\"\"\n\nimport unittest\nimport time\nimport warnings\n\nfrom magma.pipelined.openflow.registers import DIRECTION_REG\nfrom magma.pipelined.tests.app.flow_query import RyuRestFlowQuery\nfrom magma.pipelined.tests.app.table_isolation import RyuRestTableIsolator,\\\n RyuForwardFlowArgsBuilder\nfrom magma.pipelined.tests.app.packet_injector import ScapyPacketInjector\nfrom magma.pipelined.tests.app.packet_builder import IPPacketBuilder,\\\n ARPPacketBuilder\n\n\ndef _pkt_total(stats):\n return sum(n.packets for n in stats)\n\n\n@unittest.skip(\"Rest tests currently disabled and are left as an api example\")\nclass ARPTableTest(unittest.TestCase):\n TID = 2\n IFACE = \"gtp_br0\"\n MAC_DEST = \"0e:9f:0f:0d:98:4e\"\n IP_DEST = \"192.168.128.0\"\n\n def setUp(self):\n warnings.simplefilter(\"ignore\")\n\n def test_rest_arp_flow(self):\n \"\"\"\n Sends an arp request to the ARP table\n\n Assert:\n The arp rule is matched 2 times for each arp packet\n No other rule is matched\n \"\"\"\n isolator = RyuRestTableIsolator(\n RyuForwardFlowArgsBuilder(self.TID).set_reg_value(DIRECTION_REG,\n 0x10)\n .build_requests()\n )\n flow_query = RyuRestFlowQuery(\n self.TID,\n match={\n 'eth_type': 2054,\n DIRECTION_REG: 16,\n 'arp_tpa': self.IP_DEST + '/255.255.255.0'\n }\n )\n pkt_sender = ScapyPacketInjector(self.IFACE)\n packets = ARPPacketBuilder().set_arp_layer(self.IP_DEST + \"/28\")\\\n .build()\n\n # 16 as the bitmask was /28\n num_pkts = 16\n arp_start = flow_query.lookup()[0].packets\n total_start = _pkt_total(RyuRestFlowQuery(self.TID).lookup())\n\n with isolator:\n pkt_sender.get_response(packets)\n time.sleep(2.5)\n\n arp_final = flow_query.lookup()[0].packets\n total_final = _pkt_total(RyuRestFlowQuery(self.TID).lookup())\n\n self.assertEqual(arp_final - arp_start, num_pkts * 2)\n self.assertEqual(total_final - total_start, num_pkts * 2)\n\n def test_rest_ip_flow(self):\n \"\"\"\n Sends an ip packet\n\n Assert:\n The correct ip rule is matched\n No other rule is matched\n \"\"\"\n isolator = RyuRestTableIsolator(\n RyuForwardFlowArgsBuilder(self.TID).set_reg_value(DIRECTION_REG,\n 0x1)\n .build_requests()\n )\n flow_query = RyuRestFlowQuery(\n self.TID, match={\n 'eth_type': 2048,\n DIRECTION_REG: 1\n }\n )\n pkt_sender = ScapyPacketInjector(self.IFACE)\n packet = IPPacketBuilder()\\\n .set_ether_layer(self.MAC_DEST, \"00:00:00:00:00:04\")\\\n .set_ip_layer(self.IP_DEST, \"10.0.0.0\")\\\n .build()\n\n num_pkts = 42\n ip_start = flow_query.lookup()[0].packets\n total_start = _pkt_total(RyuRestFlowQuery(self.TID).lookup())\n\n with isolator:\n pkt_sender.send(packet, num_pkts)\n time.sleep(2.5)\n\n total_final = _pkt_total(RyuRestFlowQuery(self.TID).lookup())\n ip_final = flow_query.lookup()[0].packets\n\n self.assertEqual(ip_final - ip_start, num_pkts)\n self.assertEqual(total_final - total_start, num_pkts)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"lte/gateway/python/magma/pipelined/tests/test_arp_table.py","file_name":"test_arp_table.py","file_ext":"py","file_size_in_byte":3863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"323986517","text":"'''\n测试登录功能\n'''\nimport pytest\n\nfrom ZongHe.baw import DbOp, Member\nfrom ZongHe.caw import DataRead\n\n# 测试前置:获取测试数据 数据是列表 通过readyaml读取来的\n@pytest.fixture(params=DataRead.readyaml(\"ZongHe/data_case/login_data.yaml\"))\ndef login_data(request): # 固定写法\n return request.param\n\n# 测试前置:获取测试数据 数据是列表 通过readyaml读取来的\n@pytest.fixture(params=DataRead.readyaml(\"ZongHe/data_case/login_setup.yaml\"))\ndef setup_data(request): # 固定写法\n return request.param\n\n\n# 测试前置和后置\n@pytest.fixture()\ndef register(setup_data,url,baserequests,db):\n # 注册\n phone = setup_data['casedata']['mobilephone']\n DbOp.deleteUser(db,phone)\n Member.register(url,baserequests,setup_data['casedata'])\n yield\n # 删除注册用户\n DbOp.deleteUser(db,phone)\n\n\ndef test_login(register,login_data,url,baserequests):\n # 登录\n # 检查登录结果\n r = Member.login(url,baserequests,login_data['casedata'])\n assert str(r.json()['msg']) == str(login_data['expect']['msg'])\n assert str(r.json()['status']) == str(login_data['expect']['status'])\n assert str(r.json()['code']) == str(login_data['expect']['code'])\n","sub_path":"ZongHe/test_script/test_script/test_login2.py","file_name":"test_login2.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"628473245","text":"def list_ec2_images(ec2_client, module):\n image_ids = module.params.get('image_ids')\n owners = module.params.get('owners')\n executable_users = module.params.get('executable_users')\n filters = module.params.get('filters')\n owner_param = []\n for owner in owners:\n if owner.isdigit():\n if ('owner-id' not in filters):\n filters['owner-id'] = list()\n filters['owner-id'].append(owner)\n elif (owner == 'self'):\n owner_param.append(owner)\n else:\n if ('owner-alias' not in filters):\n filters['owner-alias'] = list()\n filters['owner-alias'].append(owner)\n filters = ansible_dict_to_boto3_filter_list(filters)\n try:\n images = ec2_client.describe_images(ImageIds=image_ids, Filters=filters, Owners=owner_param, ExecutableUsers=executable_users)\n images = [camel_dict_to_snake_dict(image) for image in images['Images']]\n except (ClientError, BotoCoreError) as err:\n module.fail_json_aws(err, msg='error describing images')\n for image in images:\n try:\n image['tags'] = boto3_tag_list_to_ansible_dict(image.get('tags', []))\n launch_permissions = ec2_client.describe_image_attribute(Attribute='launchPermission', ImageId=image['image_id'])['LaunchPermissions']\n image['launch_permissions'] = [camel_dict_to_snake_dict(perm) for perm in launch_permissions]\n except (ClientError, BotoCoreError) as err:\n pass\n module.exit_json(images=images)","sub_path":"Data Set/bug-fixing-5/5a9562a1012debab80768b17aefd6cd99f4f694f--bug.py","file_name":"5a9562a1012debab80768b17aefd6cd99f4f694f--bug.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"466249036","text":"from openpyxl import load_workbook\nfrom string import ascii_lowercase\nfrom tkinter import Tk, Label, Radiobutton,Button, W, NORMAL,DISABLED, StringVar\nimport sys\nimport os\n\nwb = load_workbook('orgchart.xlsx', read_only=True)\nsh = wb.worksheets[0]\nws = wb.active\ncolumn_header = {}\n#Used Columns\nAdd = \"Add/Delete/Change/Transfer/Rehire\"\nAdd2 = \"Add/Delete/Change\"\nAdd3 = \"Add/Delete/Change/Transfer\"\nwindows = \"Windows for Adds\"\nemail = \"Email Address\"\nName = \"AgentName\"\ncic_id = \"CIC_ID\"\n\n\n#loop through and find column name\nfor c in ascii_lowercase:\n\tx = sh[c.upper() + \"1\"].value\n\tcolumn_header[c.upper()] = sh[c.upper() + \"1\"].value\n\ndef orgchart_data(add, windows, agent_email,agent_name, agent_tsr):\n n = 2\n while n < sh.max_row :\n if sh[add + str(n)].value == \"Add\":\n username = sh[windows + str(n)].value\n agentName = sh[agent_name + str(n)].value\n email = sh[agent_email + str(n)].value\n tsr = sh[agent_tsr + str(n)].value\n \n print(username, email,agentName, tsr)\n n += 1\n\ndef sed(a):\n for k,v in column_header.items():\n if v == a:\n a = k\n return str(k)\n\ndef column_headers():\n add = sed(Add) or sed(Add2) or sed(Add3)\n agent_username = sed(windows)\n agentemail = sed(email) or \"AZ\"\n \n agent_name = sed(Name)\n agent_tsr = sed(cic_id)\n \n orgchart_data(add, agent_username, agentemail,agent_name, agent_tsr)\n\nclass IAAutoGUI:\n def __init__(self, master, server = 0):\n self.master = master\n self.server = server\n \n master.title(\"IA Auto\")\n self.master.minsize(width=300,height=200)\n \n self.serv_label = Label(master, text=\"Select Server\")\n self.serv_label.grid(columnspan = 2, sticky = W)\n \n self.cms_button = Button(master, text=\"CMS\", command=lambda:self.get_server(1))\n self.cms_button.grid(row = 1)\n\n self.sf_button = Button(master, text=\"Salesforce\", command=lambda:self.get_server(2))\n self.sf_button.grid(row = 1, column = 1, sticky = W)\n\n self.loc_label = Label(master, text=\"Select Location\")\n self.loc_label.grid(row = 2, columnspan = 2, sticky = W, pady = 10)\n\n self.orl_button = Button(master, text=\"orl\", command=lambda:self.get_location(\"orl\"),\n state=DISABLED)\n self.orl_button.grid(row = 3)\n \n self.lvn_button = Button(master, text=\"lvn\", command=lambda:self.get_location(\"lvn\"),\n state=DISABLED)\n self.lvn_button.grid(row = 3, column = 1, sticky = W )\n \n self.spg_button = Button(master,text=\"spg\", command=lambda:self.get_location(\"spg\"),\n state=DISABLED)\n self.spg_button.grid(row = 3, column = 2, sticky = W) \n\n self.dept_label = Label(master, text=\"Choose Department\")\n self.dept_label.grid(row = 4, sticky = W, pady = 10)\n\n self.department = StringVar()\n self.outbnd = Radiobutton(master,indicatoron = 0, width = 20, padx = 20,\n text='Outbound', variable=self.department, value = \"outbnd\")\n self.outbnd.grid(row = 5)\n\n self.ct = Radiobutton(master, indicatoron = 0, width = 20, padx = 20,\n text='Call Transfer', variable=self.department, value = \"ct\")\n self.ct.grid(row = 5, column = 2)\n \n self.act = Radiobutton(master, indicatoron = 0, width = 20, padx = 20,\n text='Activations', variable=self.department, value = \"act\")\n self.act.grid(row = 6)\n \n self.cc = Radiobutton(master, indicatoron = 0, width = 20, padx = 20,\n text='Customer Care', variable=self.department, value = \"cc\")\n self.cc.grid(row = 6, column = 2)\n\n #self.outbnd.select()\n\n self.run_button = Button(master, width = 10, height = 2, padx = 10,\n text = \"Run\", command = lambda:self.get_department())\n self.run_button.grid(row = 8, pady = 10)\n \n self.reset_button = Button(master, width = 10, height = 2, padx = 10,\n text = \"Reset\", command = lambda:self.restart_program())\n self.reset_button.grid(row = 8, column = 2, pady = 10)\n\n col_count, row_count = root.grid_size()\n\n\n \n def get_server(self, button_id):\n if button_id == 1:\n self.server = 1\n print(\"CMS\")\n elif button_id == 2:\n self.server = 2\n print(\"Salesforce\")\n \n self.cms_button.configure(state=DISABLED)\n self.sf_button.configure(state=DISABLED)\n self.orl_button.configure(state=NORMAL)\n self.lvn_button.configure(state=NORMAL)\n self.spg_button.configure(state=NORMAL)\n \n def get_location(self, location):\n self.location = location\n \n print(location)\n self.orl_button.configure(state=DISABLED)\n self.lvn_button.configure(state=DISABLED)\n self.spg_button.configure(state=DISABLED)\n\n if self.server == 1:\n if location == \"spg\":\n self.outbnd.configure(state = DISABLED)\n self.act.configure(state = DISABLED)\n self.cc.configure(state = DISABLED)\n elif location == \"lvn\":\n self.act.configure(state = DISABLED)\n self.cc.configure(state = DISABLED)\n else:\n pass\n else:\n self.ct.configure(state = DISABLED)\n self.act.configure(state = DISABLED)\n self.cc.configure(state = DISABLED)\n self.act.configure(state = DISABLED)\n self.cc.configure(state = DISABLED)\n \n def get_department(self):\n self.department = self.department.get()\n print(self.department)\n \n self.outbnd.configure(state = DISABLED)\n self.ct.configure(state = DISABLED)\n self.act.configure(state = DISABLED)\n self.cc.configure(state = DISABLED)\n self.act.configure(state = DISABLED)\n self.cc.configure(state = DISABLED)\n column_headers()\n \n def restart_program(self):\n python = sys.executable\n os.execl(python, python, * sys.argv)\n \n \n \n\nif __name__ == '__main__':\n root = Tk()\n my_gui = IAAutoGUI(root)\n root.mainloop()\n\nprint(my_gui.server)\nprint(my_gui.location)\nprint(my_gui.department)\n","sub_path":"testScripts/tktest.py","file_name":"tktest.py","file_ext":"py","file_size_in_byte":6570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"280836517","text":"import os\nimport sys\nimport numpy as np\n\nfrom sprocket.model import GV, F0statistics\nfrom sprocket.util import HDF5, estimate_twf, melcd\nfrom sprocket.util import static_delta, align_data, extfrm\n\ndef extsddata(data, npow, power_threshold=-20):\n \"\"\"Get power extract static and delta feature vector\n\n Paramters\n ---------\n data : array, shape (`T`, `dim`)\n Acoustic feature vector\n npow : array, shape (`T`)\n Normalized power vector\n power_threshold : float, optional,\n Power threshold\n Default set to -20\n\n Returns\n -------\n extsddata : array, shape (`T_new` `dim * 2`)\n Silence remove static and delta feature vector\n\n \"\"\"\n\n extsddata = extfrm(static_delta(data), npow,\n power_threshold=power_threshold)\n return extsddata\n\ndef transform_jnt(array_list):\n num_files = len(array_list)\n for i in range(num_files):\n if i == 0:\n jnt = array_list[i]\n else:\n jnt = np.r_[jnt, array_list[i]]\n return jnt\n\n\ndef get_alignment(odata, onpow, tdata, tnpow, opow=-20, tpow=-20,\n sd=0, cvdata=None, given_twf=None, otflag=None,\n distance='melcd'):\n \"\"\"Get alignment between original and target\n\n Paramters\n ---------\n odata : array, shape (`T`, `dim`)\n Acoustic feature vector of original\n onpow : array, shape (`T`)\n Normalized power vector of original\n tdata : array, shape (`T`, `dim`)\n Acoustic feature vector of target\n tnpow : array, shape (`T`)\n Normalized power vector of target\n opow : float, optional,\n Power threshold of original\n Default set to -20\n tpow : float, optional,\n Power threshold of target\n Default set to -20\n sd : int , optional,\n Start dimension to be used for alignment\n Default set to 0\n cvdata : array, shape (`T`, `dim`), optional,\n Converted original data\n Default set to None\n given_twf : array, shape (`T_new`, `dim * 2`), optional,\n Alignment given twf\n Default set to None\n otflag : str, optional\n Alignment into the length of specification\n 'org' : alignment into original length\n 'tar' : alignment into target length\n Default set to None\n distance : str,\n Distance function to be used\n Default set to 'melcd'\n\n Returns\n -------\n jdata : array, shape (`T_new` `dim * 2`)\n Joint static and delta feature vector\n twf : array, shape (`T_new` `dim * 2`)\n Time warping function\n mcd : float,\n Mel-cepstrum distortion between arrays\n\n \"\"\"\n\n oexdata = extsddata(odata[:, sd:], onpow,\n power_threshold=opow)\n texdata = extsddata(tdata[:, sd:], tnpow,\n power_threshold=tpow)\n\n if cvdata is None:\n align_odata = oexdata\n else:\n cvexdata = extsddata(cvdata, onpow,\n power_threshold=opow)\n align_odata = cvexdata\n\n if given_twf is None:\n twf = estimate_twf(align_odata, texdata,\n distance=distance, otflag=otflag)\n else:\n twf = given_twf\n\n jdata = align_data(oexdata, texdata, twf)\n mcd = melcd(align_odata[twf[0]], texdata[twf[1]])\n\n return jdata, twf, mcd\n\ndef read_feats(filename, ext):\n\n datalist = []\n with open(\"./%s.txt\"%filename, 'r') as fp:\n for line in fp:\n f = line.rstrip()\n data = np.load(\"./feat/%s/%s/%s.npy\"%(ext,filename,f))\n if(ext==\"mcep\"):\n data = np.delete(data,[0,25,50,75],axis=1)\n datalist.append(data)\n\n return datalist\n\ndef align_feature_vectors(odata, onpows, tdata, tnpows,\n opow=-100, tpow=-100, itnum=3, sd=0,\n given_twfs=None, otflag=None):\n \"\"\"Get alignment to create joint feature vector\n\n Paramters\n ---------\n odata : list, (`num_files`)\n List of original feature vectors\n onpows : list , (`num_files`)\n List of original npows\n tdata : list, (`num_files`)\n List of target feature vectors\n tnpows : list , (`num_files`)\n List of target npows\n opow : float, optional,\n Power threshold of original\n Default set to -100\n tpow : float, optional,\n Power threshold of target\n Default set to -100\n itnum : int , optional,\n The number of iteration\n Default set to 3\n sd : int , optional,\n Start dimension of feature vector to be used for alignment\n Default set to 0\n given_twf : array, shape (`T_new` `dim * 2`)\n Use given alignment while 1st iteration\n Default set to None\n otflag : str, optional\n Alignment into the length of specification\n 'org' : alignment into original length\n 'tar' : alignment into target length\n Default set to None\n\n Returns\n -------\n jfvs : list,\n List of joint feature vectors\n twfs : list,\n List of time warping functions\n \"\"\"\n num_files = len(odata)\n for it in range(1, itnum + 1):\n twfs, jfvs = [], []\n for i in range(num_files):\n if it == 1 and given_twfs is not None:\n gtwf = given_twfs[i]\n else:\n gtwf = None\n jdata, twf, _ = get_alignment(odata[i],\n onpows[i],\n tdata[i],\n tnpows[i],\n opow=opow,\n tpow=tpow,\n sd=sd,\n given_twf=gtwf,\n otflag=otflag)\n twfs.append(twf)\n jfvs.append(jdata)\n\n it += 1\n return jfvs, twfs\n\nif __name__ == '__main__':\n argvs = sys.argv\n org_mceps = read_feats(argvs[1],\"mcep\")\n tar_mceps = read_feats(argvs[2],\"mcep\")\n org_npows = read_feats(argvs[1],\"npow\")\n tar_npows = read_feats(argvs[2],\"npow\")\n org_aps = read_feats(argvs[1],\"ap\")\n tar_aps = read_feats(argvs[2],\"ap\")\n org_f0s = read_feats(argvs[1],\"f0\")\n tar_f0s = read_feats(argvs[2],\"f0\")\n\n jmceps, twfs = align_feature_vectors(org_mceps,\n org_npows,\n tar_mceps,\n tar_npows,\n )\n jnt_mcep = transform_jnt(jmceps)\n\n japs = []\n for i in range(len(org_aps)):\n # extract codeap joint feature vector\n jap, _, _ = get_alignment(org_aps[i],\n org_npows[i],\n tar_aps[i],\n tar_npows[i]\n )\n japs.append(jap)\n jnt_ap = transform_jnt(japs)\n np.save(\"./jnt_feat/jnt_mcep/%s-%s_mcep.npy\"%(argvs[1],argvs[2]),jnt_mcep)\n np.save(\"./jnt_feat/jnt_ap/%s-%s_ap.npy\"%(argvs[1],argvs[2]),jnt_ap)\n\n f0stats1 = F0statistics()\n f0stats2 = F0statistics()\n org_f0stats = f0stats1.estimate(org_f0s)\n tar_f0stats = f0stats2.estimate(tar_f0s)\n np.save(\"./feat/f0stats/%s.npy\"%(argvs[1]),org_f0stats)\n np.save(\"./feat/f0stats/%s.npy\"%(argvs[2]),tar_f0stats)","sub_path":"myVC/data/gen_jointfeat.py","file_name":"gen_jointfeat.py","file_ext":"py","file_size_in_byte":7401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"644460683","text":"# Time Complexity : O(n)\n# Space Complexity : O(1)\n# Did this code successfully run on Leetcode : Yes\n# Any problem you faced while coding this : \n# Your code here along with comments explaining your approach\nclass Solution:\n def maxArea(self, height: List[int]) -> int:\n maxArea, l, r = 0, 0, len(height)-1\n while l < r:\n maxArea = max( maxArea, min(height[l], height[r])* (r-l))\n if height[l] < height[r]:\n l+=1\n else: \n r-=1\n return maxArea\n \n","sub_path":"Problem3.py","file_name":"Problem3.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"131247200","text":"from django.conf.urls import url\nfrom django.urls import include, path\n\nfrom Job import views\n\n# app_name = 'Event'\nurlpatterns = [\n\n url(r'^job/$', views.JobDefault.as_view(), name='job'),\n url(r'^job&search/$', views.JobSearch.as_view(), name='jobsearch'),\n path('job_add_to_favorite', views.add_to_favorite, name='job_add_to_favorite'),\n path('job_get_favorite_status', views.job_get_favorite_status, name='job_get_favorite_status'),\n path('job_fetch_data', views.fetch_data ,name='job_fetch_data'),\n path('add_referral', views.add_referral, name='add_referral'),\n path('referral_list', views.Referrals.as_view(), name='referral_list'),\n path('toggle_referral_activate', views.toggle_activate_referral, name='toggle_referral_activate'),\n]","sub_path":"Django_UCSD/Job/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"91599351","text":"# This module contains entities that are closely related to creating, maintaining and\n# accessing a local network of agents.\n# It defines a local webserver handing POST requests that are sent between multiple agents\n# and a connector that ensures that a connection between the vehicles is created.\n# it also implements utility funcions that provide information about the network.\n#\n# version: 1.0 (1.1.2020)\n#\n# TODO simplify code in `AutoConnector`\n\nimport os\nimport time\nimport logging\nimport vehicle\nimport requests\nimport traceback\nimport netifaces as ni\nimport socket as socketlib\nimport constants as const\nfrom wifi import Cell\nfrom wireless import Wireless\nfrom contextlib import closing\nfrom threading import Thread, Event\nfrom PyAccessPoint import pyaccesspoint\nfrom http.server import BaseHTTPRequestHandler\n\nlogging.basicConfig(format=\"%(asctime)s ::%(levelname)s:: %(message)s\", level=logging.DEBUG)\n\n\ndef get_local_ip():\n \"\"\" determines the local IP of the current device\n\n Returns:\n str: local IP of the current device\n \"\"\"\n\n f = os.popen('hostname -I')\n ip = f.read()\n return ip\n\n\ndef check_if_up(ip_address):\n \"\"\" determines if there is a webserver is running on a given ip adress\n\n Args:\n ip_address (str): ip4 adress from the local network\n\n Returns:\n bool: Boolean whether the server is running (True) or not (False)\n \"\"\"\n\n socket = socketlib.socket(socketlib.AF_INET, socketlib.SOCK_STREAM)\n socket.settimeout(0.004)\n\n try:\n with closing(socket):\n socket.connect((str(ip_address), 80))\n return True\n except socketlib.error:\n return False\n\n\nclass Server(BaseHTTPRequestHandler):\n \"\"\" custom http server handling POST or GET requests \"\"\"\n\n communication = None\n\n def do_GET(self):\n \"\"\" handles GET request \"\"\"\n\n if self.path == \"/favicon.ico\":\n self.send_response(404)\n self.end_headers()\n else:\n try:\n file_to_open = \"

Agent

ID: \"+ self.communication.agent.id + \"

\"\n except AttributeError:\n raise AttributeError(\n \"The class `Server` was not provided with a communication instance before a POST request was sent.\")\n\n self.send_response(200)\n self.end_headers()\n self.wfile.write(bytes(file_to_open, \"utf-8\"))\n\n def do_POST(self):\n \"\"\" handles POST requests by triggering a communication event \"\"\"\n\n content_length = int(self.headers[\"Content-Length\"])\n body = self.rfile.read(content_length)\n self.send_response(200)\n self.end_headers()\n self.wfile.write(bytes(\"

POST

\", \"utf-8\"))\n\n response = bytes(body).decode(\"utf-8\")\n response_data = response.split(\"=\", 1)\n\n try:\n # trigger event callbacks\n for data in response_data:\n self.communication.trigger(data)\n except AttributeError:\n raise AttributeError(\n \"The class `Server` was not provided with a communication instance before a POST request was sent.\")\n\n\nclass SSIDBlock:\n def __init__(self, ssid, blocktime):\n self.blocktime = blocktime\n self.ssid = ssid\n\n def __repr__(self):\n return f\"SSIDBlock [#{self.ssid} {self.blocktime}]\"\n\n\nclass AutoConnector(Thread):\n def __init__(self, event):\n super().__init__()\n\n self.stopped = event\n self.count = 0\n self.wlan_count_found = 0\n self.wireless_module = Wireless()\n\n self.wlan_name_found_to_connect = \"\"\n self.wlan_name_found_to_connect_time = 999999999999\n\n self.access_point = pyaccesspoint.AccessPoint(ssid=\"Test Wlan\", password=const.Connection.WLAN_PASSWORD)\n self.access_point.stop()\n\n self.hotspot_status = False\n self.own_wlan_name_from_hotspot = \"parknet\"\n self.own_wlan_time_from_hotspot = 9999999999999\n\n self.last_wlan_connected = \"unset\"\n self.block_list = []\n\n def run(self):\n while not self.stopped.wait(5):\n self.wlan_count_found = 0\n self.count += 1\n print(f\"Connected with: {self.wireless_module.current()}\")\n\n if self.wireless_module.current() is None:\n if self.last_wlan_connected != \"unset\":\n time_for_block = int(time.time()) + 60\n self.add_to_block_list(self.last_wlan_connected, time_for_block)\n self.last_wlan_connected = \"unset\"\n else:\n self.last_wlan_connected = self.wireless_module.current()\n\n self.wlan_scan()\n print(f\"Loop Count: {self.count} Wlans: {self.wlan_count_found}\")\n self.connect_to_network_or_create_hotspot()\n\n def wlan_scan(self):\n self.wlan_name_found_to_connect = \"\"\n self.wlan_name_found_to_connect_time = 999999999999\n\n try:\n wlan_scans = Cell.all(\"wlan0\")\n for wlan_scan in wlan_scans:\n if wlan_scan.ssid.startswith(\"parknet\"):\n if wlan_scan.ssid != self.own_wlan_name_from_hotspot:\n try:\n time_wlan_create = int(wlan_scan.ssid.replace(\"parknet\", \"\"))\n if time_wlan_create < self.wlan_name_found_to_connect_time:\n if not self.is_blocked(str(wlan_scan.ssid)):\n print(\n f\"{wlan_scan.ssid} Quality: {wlan_scan.quality} Protected: {wlan_scan.encrypted}\")\n\n self.wlan_count_found += 1\n self.wlan_name_found_to_connect = wlan_scan.ssid\n self.wlan_name_found_to_connect_time = time_wlan_create\n else:\n print(f\"Blocked Wlan Hotspot Found: {wlan_scan.ssid}\")\n except ValueError:\n print(f\"Wrong Wlan Hotspot Found: {wlan_scan.ssid.replace('parknet', '')}\")\n else:\n print(\n f\"Found own hotspot {wlan_scan.ssid} Quality: {wlan_scan.quality} Protected: {wlan_scan.encrypted}\")\n\n if wlan_scan.ssid == \"Lukas123\":\n print(\"Found Lukas Wlan\")\n\n self.wlan_name_found_to_connect = wlan_scan.ssid\n self.wlan_name_found_to_connect_time = 1\n self.wlan_count_found += 1\n break\n except:\n print(f\"Error while scanning for wifi {traceback.format_exc()}\")\n\n def start_hotspot(self):\n if not self.hotspot_status:\n print(\"Starting hotspot\")\n\n self.own_wlan_name_from_hotspot = \"parknet\" + str(int(time.time()))\n self.own_wlan_time_from_hotspot = int(time.time())\n self.access_point.ssid = self.own_wlan_name_from_hotspot\n\n self.access_point.start()\n self.hotspot_status = True\n vehicle.start_interface()\n\n def stop_hotspot(self):\n if self.hotspot_status:\n print(\"Disabling hotspot\")\n\n self.access_point.stop()\n self.hotspot_status = False\n self.own_wlan_time_from_hotspot = 9999999999999\n\n def connect_to_network_or_create_hotspot(self):\n if self.wlan_count_found <= 0:\n print(\"Hotspot mode\")\n self.start_hotspot()\n elif self.own_wlan_time_from_hotspot > self.wlan_name_found_to_connect_time:\n if self.hotspot_status:\n print(\"Hotspot mode off\")\n self.stop_hotspot()\n elif self.wireless_module.current() != self.wlan_name_found_to_connect:\n print(f\"Connecting to network {self.wlan_name_found_to_connect}\")\n print(\n f\"Status: {self.wireless_module.connect(self.wlan_name_found_to_connect, const.Connection.WLAN_PASSWORD)}\")\n time.sleep(2)\n print(f\"Wlan network: {self.wireless_module.current()}\")\n vehicle.start_interface()\n\n if self.wireless_module.current() is not None:\n self.last_wlan_connected = self.wireless_module.current()\n\n def is_blocked(self, ssid):\n for block in self.block_list:\n if block.ssid == ssid and block.blocktime > int(time.time()):\n return True\n else:\n return False\n\n return False\n\n def add_to_block_list(self, ssid, blocktime):\n self.block_list.append(SSIDBlock(ssid, blocktime))\n print(f\"Blocking {ssid} for {blocktime}\")\n\n def print_list(self):\n for block in self.block_list:\n print(f\"{block} Blocked: {self.is_blocked(block.ssid)}\")\n\n @staticmethod\n def start_connector():\n stopFlag = Event()\n thread = AutoConnector(stopFlag)\n thread.start()\n","sub_path":"connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":9032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"2840339","text":"import sys\nimport logging\nfrom functools import partial\nfrom pathlib import Path\n\n__logger__ = logging.getLogger(__name__)\nlogging.basicConfig(level=logging.INFO)\n\n\nclass Tokenizer:\n KEYWORD = \"keyword\"\n SYMBOL = \"symbol\"\n IDENTIFIER = \"identifier\"\n INT_CONST = \"integerConstant\"\n STR_CONST = \"stringConstant\"\n\n KEYWORDS = (\n \"class\", \"constructor\", \"function\", \"method\", \"field\", \"static\",\n \"var\", \"int\", \"char\", \"boolean\", \"void\", \"true\", \"false\", \"null\",\n \"this\", \"let\", \"do\", \"if\", \"else\", \"while\", \"return\"\n )\n SYMBOLS = (\n \"{\", \"}\", \"(\", \")\", \"[\", \"]\", \".\", \",\", \";\", \"+\", \"-\", \"*\", \"/\", \"&\", \"|\", \"<\", \">\", \"=\", \"~\"\n )\n\n def __init__(self, infile):\n self._source = open(infile, \"r\").readlines()\n self._code = iter(self._source)\n self.current_line = None\n self.current_token = True\n self.tokens = None\n self.has_more_token = True\n\n def _token_builder(self, token=None, tokens=None, str_const=False):\n tokens = tokens if tokens else list()\n token = token if token else \"\"\n try:\n char = next(self.current_line)\n except StopIteration:\n if token:\n tokens.append(token)\n return tokens\n if char == \"\\\"\":\n str_const = False if token else True\n if token:\n tokens.append(f\"\\\"{token}\\\"\")\n return self._token_builder(tokens=tokens, str_const=str_const)\n elif char in self.SYMBOLS and not str_const:\n if token:\n tokens.append(token)\n tokens.append(char)\n return self._token_builder(tokens=tokens)\n elif char != \" \" or str_const:\n token += char\n return self._token_builder(token, tokens, str_const)\n elif char == \" \":\n if token:\n tokens.append(token)\n return self._token_builder(tokens=tokens)\n return tokens\n\n def get_next_code_snippet(self):\n item = next(self._code).strip()\n if item.startswith(\"//\") or not item:\n return self.get_next_code_snippet()\n elif item.startswith(\"/**\"):\n while not item.endswith(\"*/\"):\n item = self.get_next_code_snippet()\n return self.get_next_code_snippet()\n code_snippet = item.strip().split(\"//\")[0].strip()\n return code_snippet\n\n def next_token(self):\n try:\n self.current_token = next(self.tokens)\n except (TypeError, StopIteration):\n try:\n code_snippet = self.get_next_code_snippet()\n # __logger__.debug(f\"Current line to process: {code_snippet}\")\n self.current_line = iter(code_snippet)\n tokens = self._token_builder()\n self.tokens = iter(tokens)\n return self.next_token()\n except StopIteration:\n self.has_more_token = False\n self.current_token = False\n return self.current_token\n\n def get_token_type(self):\n if self.current_token.startswith(\"\\\"\") and self.current_token.endswith(\"\\\"\"):\n return self.STR_CONST\n if self.current_token in self.KEYWORDS:\n return self.KEYWORD\n elif self.current_token in self.SYMBOLS:\n return self.SYMBOL\n try:\n int(self.current_token)\n except ValueError:\n return self.IDENTIFIER\n return self.INT_CONST\n\n def keyword(self):\n return self.current_token\n\n def symbol(self):\n special_chars_map = {\n \"<\": \"<\",\n \">\": \">\",\n \"&\": \"&\"\n }\n self.current_token = special_chars_map.get(\n self.current_token, self.current_token\n )\n return self.current_token\n\n def identifier(self):\n return self.current_token\n\n def integerConstant(self):\n return self.current_token\n\n def stringConstant(self):\n return self.current_token[1:-1]\n\n\nclass Parser:\n STATEMENT_KEYWORDS = (\"let\", \"if\", \"while\", \"do\", \"return\")\n OP = (\"+\", \"-\", \"*\", \"/\", \"&\", \"|\", \"<\", \">\", \"=\")\n UNARY_OP = (\"-\", \"~\")\n KEYWORD_CONSTANTS = (\"true\", \"false\", \"null\", \"this\")\n\n def __init__(self, source_path, tokenizer):\n self.source_p = source_path\n self.outfile = self._construct_outfile_path()\n self.tokenizer = tokenizer\n self.file = open(self.outfile, 'w')\n\n def _construct_outfile_path(self):\n return Path(self.source_p.parent, f\"{self.source_p.stem}\").with_suffix(\".xml\")\n\n def compile_class(self):\n class_grammar = self._class_grammar()\n # __logger__.info(f\"Writing: {self.outfile}\")\n for elem in class_grammar[\"fixpattern\"]:\n elem_grammar = class_grammar[elem]\n self.tokenizer.next_token()\n # __logger__.info(f\"Matching grammar for: {self.tokenizer.current_token}\")\n validator_kwargs = elem_grammar[\"validator\"]\n if self._match_grammar(**validator_kwargs):\n if \"non-terminal\" in elem_grammar[\"write\"]:\n self.file.write(elem_grammar[\"write\"][\"non-terminal\"])\n elem_grammar[\"write\"][\"terminal\"]()\n\n self.tokenizer.next_token()\n while self._if_optional_grammar_applies(class_grammar, optional_grammar_key=\"optional_1\"):\n grammar = self._get_optional_grammar(class_grammar, optional_key=\"optional_1\")\n validator_kwargs = grammar[\"validator\"]\n if self._match_grammar(**validator_kwargs):\n grammar[\"write\"][\"terminal\"]()\n\n while self._if_optional_grammar_applies(class_grammar, optional_grammar_key=\"optional_2\"):\n grammar = self._get_optional_grammar(class_grammar, optional_key=\"optional_2\")\n validator_kwargs = grammar[\"validator\"]\n if self._match_grammar(**validator_kwargs):\n self.file.write(grammar[\"write\"][\"non-terminal\"])\n grammar[\"write\"][\"terminal\"]()\n\n close_grammar = self._get_close_pattern_grammar(class_grammar)\n # __logger__.info(f\"Matching grammar for: {self.tokenizer.current_token}\")\n validator_kwargs = close_grammar[\"validator\"]\n if self._match_grammar(**validator_kwargs):\n close_grammar[\"write\"][\"terminal\"]()\n\n self._write_non_terminal_tag(\"class\", open_tag=False)\n return self.file.close()\n\n def compile_class_var_dec(self):\n class_var_dec_grammar = self._class_var_dec_grammar()\n self._write_non_terminal_tag(\"classVarDec\")\n for elem in class_var_dec_grammar[\"fixPattern\"]:\n # __logger__.info(f\"Matching grammar for: {self.tokenizer.current_token}\")\n elem_grammar = class_var_dec_grammar[elem]\n validator_kwargs = elem_grammar[\"validator\"]\n if self._match_grammar(**validator_kwargs):\n if \"non-terminal\" in elem_grammar[\"write\"]:\n self.file.write(elem_grammar[\"write\"][\"non-terminal\"])\n elem_grammar[\"write\"][\"terminal\"]()\n\n while self._if_optional_group_applies(class_var_dec_grammar):\n for elem in class_var_dec_grammar[\"optionalGroup\"]:\n # __logger__.info(f\"Matching grammar for: {self.tokenizer.current_token}\")\n elem_grammar = class_var_dec_grammar[elem]\n validator_kwargs = elem_grammar[\"validator\"]\n if self._match_grammar(**validator_kwargs):\n elem_grammar[\"write\"][\"terminal\"]()\n\n close_grammar = self._get_close_pattern_grammar(class_var_dec_grammar)\n # __logger__.info(f\"Matching grammar for: {self.tokenizer.current_token}\")\n validator_kwargs = close_grammar[\"validator\"]\n if self._match_grammar(**validator_kwargs):\n close_grammar[\"write\"][\"terminal\"]()\n self._write_non_terminal_tag(\"classVarDec\", open_tag=False)\n\n if self.block_is_not_done(class_var_dec_grammar):\n return self.compile_class_var_dec()\n return\n\n def compile_subroutine_dec(self):\n subroutine_dec_grammar = self._subroutine_dec_grammar()\n for elem in subroutine_dec_grammar[\"fixPattern\"]:\n # __logger__.info(f\"Matching grammar for: {self.tokenizer.current_token}\")\n elem_grammar = subroutine_dec_grammar[elem]\n validator_kwargs = elem_grammar[\"validator\"]\n if self._match_grammar(**validator_kwargs):\n if \"non-terminal\" in elem_grammar[\"write\"]:\n self.file.write(elem_grammar[\"write\"][\"non-terminal\"])\n elem_grammar[\"write\"][\"terminal\"]()\n self._write_non_terminal_tag(\"subroutineDec\", open_tag=False)\n\n def compile_parameter_list(self):\n parameter_list_grammar = self._parameter_list_grammar()\n if self._if_optional_group_applies(parameter_list_grammar):\n for elem in parameter_list_grammar[\"optionalGroup\"]:\n # __logger__.info(f\"Matching grammar for: {self.tokenizer.current_token}\")\n elem_grammar = parameter_list_grammar[elem]\n validator_kwargs = elem_grammar[\"validator\"]\n if self._match_grammar(**validator_kwargs):\n elem_grammar[\"write\"][\"terminal\"]()\n\n while self._if_optional_group_applies(parameter_list_grammar, optional_group_key=\"optionalGroup_2\"):\n for elem in parameter_list_grammar[\"optionalGroup_2\"]:\n # __logger__.info(f\"Matching grammar for: {self.tokenizer.current_token}\")\n elem_grammar = parameter_list_grammar[elem]\n validator_kwargs = elem_grammar[\"validator\"]\n if self._match_grammar(**validator_kwargs):\n elem_grammar[\"write\"][\"terminal\"]()\n\n def compile_subroutine_body(self):\n subroutine_body_grammar = self._subroutine_body_grammar()\n for elem in subroutine_body_grammar[\"fixPattern\"]:\n # __logger__.info(f\"Matching grammar for: {self.tokenizer.current_token}\")\n elem_grammar = subroutine_body_grammar[elem]\n validator_kwargs = elem_grammar[\"validator\"]\n if self._match_grammar(**validator_kwargs):\n if \"non-terminal\" in elem_grammar[\"write\"]:\n self.file.write(elem_grammar[\"write\"][\"non-terminal\"])\n elem_grammar[\"write\"][\"terminal\"]()\n\n if self._if_optional_grammar_applies(subroutine_body_grammar):\n optional_grammar = subroutine_body_grammar[\"optional\"]\n elem_grammar = subroutine_body_grammar[optional_grammar]\n # __logger__.info(f\"token is: {self.tokenizer.current_token}, matched grammar: {optional_grammar}\")\n elem_grammar[\"write\"][\"terminal\"]()\n\n for elem in subroutine_body_grammar[\"fixPattern_2\"]:\n # __logger__.info(f\"Matching grammar for: {self.tokenizer.current_token}\")\n elem_grammar = subroutine_body_grammar[elem]\n validator_kwargs = elem_grammar[\"validator\"]\n if self._match_grammar(**validator_kwargs):\n elem_grammar[\"write\"][\"terminal\"]()\n self._write_non_terminal_tag(elem=\"subroutineBody\", open_tag=False)\n\n def compile_var_dec(self):\n var_dec_grammar = self._var_dec_grammar()\n self._write_non_terminal_tag(\"varDec\")\n for elem in var_dec_grammar[\"fixPattern\"]:\n # __logger__.info(f\"Matching grammar for: {self.tokenizer.current_token}\")\n elem_grammar = var_dec_grammar[elem]\n validator_kwargs = elem_grammar[\"validator\"]\n if self._match_grammar(**validator_kwargs):\n if \"non-terminal\" in elem_grammar[\"write\"]:\n self.file.write(elem_grammar[\"write\"][\"non-terminal\"])\n elem_grammar[\"write\"][\"terminal\"]()\n\n while self._if_optional_group_applies(var_dec_grammar):\n for elem in var_dec_grammar[\"optionalGroup\"]:\n # __logger__.info(f\"Matching grammar for: {self.tokenizer.current_token}\")\n elem_grammar = var_dec_grammar[elem]\n validator_kwargs = elem_grammar[\"validator\"]\n if self._match_grammar(**validator_kwargs):\n elem_grammar[\"write\"][\"terminal\"]()\n\n close_grammar = self._get_close_pattern_grammar(var_dec_grammar)\n # __logger__.info(f\"Matching grammar for: {self.tokenizer.current_token}\")\n\n validator_kwargs = close_grammar[\"validator\"]\n if self._match_grammar(**validator_kwargs):\n close_grammar[\"write\"][\"terminal\"]()\n self._write_non_terminal_tag(\"varDec\", open_tag=False)\n\n if self.block_is_not_done(var_dec_grammar):\n return self.compile_var_dec()\n return\n\n def compile_statements(self):\n self._write_non_terminal_tag(elem=\"statements\")\n while self.tokenizer.current_token in self.STATEMENT_KEYWORDS:\n func = getattr(self, f\"compile_{self.tokenizer.current_token}_statement\")\n func()\n self._write_non_terminal_tag(elem=\"statements\", open_tag=False)\n\n def compile_let_statement(self):\n let_grammar = self._let_statement_grammar()\n self._write_non_terminal_tag(elem=\"letStatement\")\n for elem in let_grammar[\"fixPattern\"]:\n # __logger__.info(f\"Matching grammar for: {self.tokenizer.current_token}\")\n elem_grammar = let_grammar[elem]\n validator_kwargs = elem_grammar[\"validator\"]\n if self._match_grammar(**validator_kwargs):\n elem_grammar[\"write\"][\"terminal\"]()\n\n if self._if_optional_group_applies(let_grammar):\n for elem in let_grammar[\"optionalGroup\"]:\n # __logger__.info(f\"Matching grammar for: {self.tokenizer.current_token}\")\n elem_grammar = let_grammar[elem]\n validator_kwargs = elem_grammar[\"validator\"]\n if self._match_grammar(**validator_kwargs):\n elem_grammar[\"write\"][\"terminal\"]()\n\n for elem in let_grammar[\"fixPattern_2\"]:\n # __logger__.info(f\"Matching grammar for: {self.tokenizer.current_token}\")\n elem_grammar = let_grammar[elem]\n validator_kwargs = elem_grammar[\"validator\"]\n if self._match_grammar(**validator_kwargs):\n elem_grammar[\"write\"][\"terminal\"]()\n self._write_non_terminal_tag(elem=\"letStatement\", open_tag=False)\n\n def compile_if_statement(self):\n if_grammar = self._if_statement_grammar()\n self._write_non_terminal_tag(elem=\"ifStatement\")\n for elem in if_grammar[\"fixPattern\"]:\n # __logger__.info(f\"Matching grammar for: {self.tokenizer.current_token}\")\n elem_grammar = if_grammar[elem]\n validator_obj = elem_grammar[\"validator\"]\n if isinstance(validator_obj, dict):\n if self._match_grammar(**validator_obj):\n elem_grammar[\"write\"][\"terminal\"]()\n else:\n if validator_obj():\n elem_grammar[\"write\"][\"terminal\"]()\n\n if self._if_optional_group_applies(block_grammar=if_grammar):\n for elem in if_grammar[\"optionalGroup\"]:\n # __logger__.info(f\"Matching grammar for: {self.tokenizer.current_token}\")\n elem_grammar = if_grammar[elem]\n validator_obj = elem_grammar[\"validator\"]\n if isinstance(validator_obj, dict):\n if self._match_grammar(**validator_obj):\n elem_grammar[\"write\"][\"terminal\"]()\n else:\n if validator_obj():\n elem_grammar[\"write\"][\"terminal\"]()\n\n self._write_non_terminal_tag(elem=\"ifStatement\", open_tag=False)\n\n def compile_while_statement(self):\n while_grammar = self._while_statement_grammar()\n self._write_non_terminal_tag(elem=\"whileStatement\")\n for elem in while_grammar[\"fixPattern\"]:\n # __logger__.info(f\"Matching grammar for: {self.tokenizer.current_token}\")\n elem_grammar = while_grammar[elem]\n validator_obj = elem_grammar[\"validator\"]\n if isinstance(validator_obj, dict):\n if self._match_grammar(**validator_obj):\n elem_grammar[\"write\"][\"terminal\"]()\n else:\n if validator_obj():\n elem_grammar[\"write\"][\"terminal\"]()\n self._write_non_terminal_tag(elem=\"whileStatement\", open_tag=False)\n\n def compile_do_statement(self):\n do_grammar = self._do_statement_grammar()\n self._write_non_terminal_tag(elem=\"doStatement\")\n for elem in do_grammar[\"fixPattern\"]:\n # __logger__.info(f\"Matching grammar for: {self.tokenizer.current_token}\")\n elem_grammar = do_grammar[elem]\n validator_kwargs = elem_grammar[\"validator\"]\n if self._match_grammar(**validator_kwargs):\n elem_grammar[\"write\"][\"terminal\"]()\n self._write_non_terminal_tag(elem=\"doStatement\", open_tag=False)\n\n def compile_return_statement(self):\n return_grammar = self._return_grammar()\n self._write_non_terminal_tag(elem=\"returnStatement\")\n fix_grammar = self._get_fixpattern_grammar(return_grammar)\n validator_kwargs = fix_grammar[\"validator\"]\n if self._match_grammar(**validator_kwargs):\n fix_grammar[\"write\"][\"terminal\"]()\n\n optional_grammar = self._get_optional_grammar(return_grammar)\n if optional_grammar[\"validator\"]():\n optional_grammar[\"write\"][\"terminal\"]()\n\n close_grammar = self._get_close_pattern_grammar(return_grammar)\n # __logger__.info(f\"Matching grammar for: {self.tokenizer.current_token}\")\n validator_kwargs = close_grammar[\"validator\"]\n if self._match_grammar(**validator_kwargs):\n close_grammar[\"write\"][\"terminal\"]()\n self._write_non_terminal_tag(elem=\"returnStatement\", open_tag=False)\n\n def compile_expression(self):\n expression_grammar = self._expression_grammar()\n self._write_non_terminal_tag(elem=\"expression\")\n term_grammar = self._get_fixpattern_grammar(expression_grammar)\n # __logger__.info(f\"Matching grammar for: {self.tokenizer.current_token}\")\n\n def _compile_term():\n if self.is_simple_term():\n self._handle_simple_term(term_grammar)\n elif self._if_optional_group_applies(expression_grammar, optional_group_key=\"optionalSubGroup_1\"):\n self._write_non_terminal_tag(\"term\")\n for elem in expression_grammar[\"optionalSubGroup_1\"]:\n elem_grammar = expression_grammar[elem]\n validator_kwargs = elem_grammar[\"validator\"]\n if self._match_grammar(**validator_kwargs):\n elem_grammar[\"write\"][\"terminal\"]()\n self._write_non_terminal_tag(\"term\", open_tag=False)\n\n elif self._if_optional_group_applies(expression_grammar, optional_group_key=\"optionalSubGroup_2\"):\n self._write_non_terminal_tag(\"term\")\n for elem in expression_grammar[\"optionalSubGroup_2\"]:\n elem_grammar = expression_grammar[elem]\n validator_kwargs = elem_grammar[\"validator\"]\n if self._match_grammar(**validator_kwargs):\n is_recursive = elem_grammar.get(\"recursive\", False)\n if not is_recursive:\n elem_grammar[\"write\"][\"terminal\"]()\n else:\n _compile_term()\n self._write_non_terminal_tag(\"term\", open_tag=False)\n\n elif self._is_identifier():\n self._write_non_terminal_tag(\"term\")\n cached_token_dict = self._lookahead_for_next_token()\n if self._if_optional_group_applies(\n expression_grammar, optional_group_key=\"optionalSubGroup_3\", index=1\n ):\n self._write_terminal_element_from_cache(cached_token_dict)\n for elem in expression_grammar[\"optionalSubGroup_3\"][1:]:\n elem_grammar = expression_grammar[elem]\n validator_kwargs = elem_grammar[\"validator\"]\n if self._match_grammar(**validator_kwargs):\n elem_grammar[\"write\"][\"terminal\"]()\n\n elif self._if_optional_group_applies(\n expression_grammar, optional_group_key=\"optionalSubGroup_4\", index=1\n ):\n self._write_subroutine_call(cached_token_dict)\n else:\n self._write_terminal_element_from_cache(cached_token_dict)\n self._write_non_terminal_tag(\"term\", open_tag=False)\n\n _compile_term()\n while self._if_optional_group_applies(expression_grammar):\n for optional_elem in expression_grammar[\"optionalGroup\"]:\n optional_grammar = expression_grammar[optional_elem]\n optional_validator_kwargs = optional_grammar[\"validator\"]\n if self._match_grammar(**optional_validator_kwargs):\n recursive = optional_grammar.get(\"recursive\", False)\n if not recursive:\n optional_grammar[\"write\"][\"terminal\"]()\n else:\n _compile_term()\n\n self._write_non_terminal_tag(elem=\"expression\", open_tag=False)\n\n def _handle_simple_term(self, grammar):\n self.file.write(grammar[\"write\"]['non-terminal'])\n grammar[\"write\"][\"terminal\"]()\n self.file.write(grammar[\"write\"]['non-terminal-close'])\n\n def _write_subroutine_call(self, cached_token_dict=None):\n subroutine_grammar = self._subroutine_call_grammar()\n if cached_token_dict:\n applicable = True if self.tokenizer.current_token == \".\" else False\n else:\n applicable, cached_token_dict = self.look_ahead_for_optional_grammar(subroutine_grammar)\n self._write_terminal_element_from_cache(cached_token_dict)\n if applicable:\n elem = subroutine_grammar[\"optionalGroup\"][1]\n elem_grammar = subroutine_grammar[elem]\n validator_kwargs = elem_grammar[\"validator\"]\n if self._match_grammar(**validator_kwargs):\n elem_grammar[\"write\"][\"terminal\"]()\n\n fix_pattern_index = 0 if applicable else 1\n for elem in subroutine_grammar[\"fixPattern\"][fix_pattern_index:]:\n # __logger__.info(f\"Matching grammar for: {self.tokenizer.current_token}\")\n elem_grammar = subroutine_grammar[elem]\n validator_kwargs = elem_grammar[\"validator\"]\n if self._match_grammar(**validator_kwargs):\n elem_grammar[\"write\"][\"terminal\"]()\n\n def compile_expression_list(self):\n expression_list_grammar = self._expression_list_grammar()\n self._write_non_terminal_tag(elem=\"expressionList\")\n optional_grammar = self._get_optional_grammar(expression_list_grammar)\n if optional_grammar[\"validator\"]():\n optional_grammar[\"write\"][\"terminal\"]()\n while self._if_optional_group_applies(expression_list_grammar, optional_group_key=\"optionalGroup_2\"):\n for elem in expression_list_grammar[\"optionalGroup_2\"]:\n # __logger__.info(f\"Matching grammar for: {self.tokenizer.current_token}\")\n elem_grammar = expression_list_grammar[elem]\n validator_obj = elem_grammar[\"validator\"]\n if isinstance(validator_obj, dict):\n if self._match_grammar(**validator_obj):\n elem_grammar[\"write\"][\"terminal\"]()\n else:\n if validator_obj():\n elem_grammar[\"write\"][\"terminal\"]()\n self._write_non_terminal_tag(elem=\"expressionList\", open_tag=False)\n\n def _write_terminal_element(self, advance_token=True):\n token_type = self.tokenizer.get_token_type()\n get_value = getattr(self.tokenizer, token_type)\n self.file.write(f\"<{token_type}>{get_value()}\\n\")\n if advance_token:\n self.tokenizer.next_token()\n\n def _write_terminal_element_from_cache(self, cached_token_dict):\n token_type = cached_token_dict.get(\"cached_token_type\")\n token_value = cached_token_dict.get(\"cached_token\")\n self.file.write(f\"<{token_type}>{token_value}\\n\")\n\n def write_non_terminal_close_tag(self, block_grammar):\n closing_elem = block_grammar[\"closePattern\"]\n close_tag = block_grammar[closing_elem][\"write\"][\"non-terminal\"]\n self.file.write(close_tag)\n\n def _write_non_terminal_tag(self, elem, open_tag=True):\n tag_to_write = f\"<{elem}>\\n\" if open_tag else f\"\\n\"\n self.file.write(tag_to_write)\n\n @staticmethod\n def _get_fixpattern_grammar(grammar):\n key = grammar[\"fixPattern\"]\n return grammar[key]\n\n @staticmethod\n def _get_optional_grammar(grammar, optional_key=\"optional\"):\n key = grammar[optional_key]\n return grammar[key]\n\n @staticmethod\n def _get_close_pattern_grammar(grammar):\n key = grammar[\"closePattern\"]\n return grammar[key]\n\n def _match_grammar(self, expected_token=None, expected_token_type=None, optional=None, force_pass=False):\n if force_pass:\n return True\n token = self.tokenizer.current_token\n token_type = self.tokenizer.get_token_type()\n if isinstance(expected_token, tuple):\n if token in expected_token:\n return True\n elif (self.tokenizer.IDENTIFIER in expected_token and\n token_type == self.tokenizer.IDENTIFIER):\n return True\n elif optional:\n return False\n elif expected_token and token == expected_token:\n return True\n elif expected_token_type and not expected_token:\n if expected_token_type == token_type:\n return True\n elif optional:\n return False\n # __logger__.error(f\"Expected: type: '{expected_token_type}', token: '{expected_token}'.\"\n # f\"Got: type: '{token_type}', token: '{token}'\")\n self.file.close()\n sys.exit(3)\n\n def _match_expression_grammar(self):\n token = self.tokenizer.current_token\n token_type = self.tokenizer.get_token_type()\n if token in self.KEYWORD_CONSTANTS:\n return True\n elif token_type in (\n self.tokenizer.INT_CONST, self.tokenizer.STR_CONST, self.tokenizer.IDENTIFIER\n ):\n return True\n elif token in (*self.UNARY_OP, \"(\"):\n return True\n return False\n\n def is_simple_term(self):\n token = self.tokenizer.current_token\n token_type = self.tokenizer.get_token_type()\n if token in self.KEYWORD_CONSTANTS:\n return True\n elif token_type in (self.tokenizer.INT_CONST, self.tokenizer.STR_CONST):\n return True\n return False\n\n def _is_identifier(self):\n token_type = self.tokenizer.get_token_type()\n if token_type == self.tokenizer.IDENTIFIER:\n return True\n return False\n\n def _if_optional_group_applies(self, block_grammar, optional_group_key=None, index=0):\n optional_group_key = optional_group_key if optional_group_key else \"optionalGroup\"\n elem_index_to_match = index if index else 0\n elem = block_grammar[optional_group_key][elem_index_to_match]\n grammar = block_grammar[elem]\n validator_kwargs = grammar[\"validator\"]\n if self._match_grammar(**validator_kwargs):\n return True\n return False\n\n def _if_optional_grammar_applies(self, block_grammar, optional_grammar_key=None):\n optional_grammar_key = optional_grammar_key if optional_grammar_key else \"optional\"\n elem_key = block_grammar[optional_grammar_key]\n grammar = block_grammar[elem_key]\n validator_kwargs = grammar[\"validator\"]\n if self._match_grammar(**validator_kwargs):\n return True\n return False\n\n def _lookahead_for_next_token(self):\n cached_token_dict = dict(\n cached_token=self.tokenizer.current_token,\n cached_token_type=self.tokenizer.get_token_type(),\n )\n self.tokenizer.next_token()\n return cached_token_dict\n\n def look_ahead_for_optional_grammar(self, block_grammar):\n first_elem, second_elem = block_grammar[\"optionalGroup\"][0], block_grammar[\"optionalGroup\"][1]\n grammar = block_grammar[first_elem]\n if self._check_if_grammar_is_valid(grammar=grammar):\n cached_token_dict = self._lookahead_for_next_token()\n grammar = block_grammar[second_elem]\n if self._check_if_grammar_is_valid(grammar=grammar):\n return True, cached_token_dict\n return False, cached_token_dict\n\n def _check_if_grammar_is_valid(self, grammar):\n validator_kwargs = grammar[\"validator\"]\n if self._match_grammar(**validator_kwargs):\n return True\n return False\n\n def block_is_not_done(self, block_grammar):\n first_elem = block_grammar[\"fixPattern\"][0]\n grammar = block_grammar[first_elem]\n validator_kwargs = grammar[\"validator\"]\n if self._match_grammar(**validator_kwargs, optional=True):\n return True\n return False\n\n def _class_grammar(self):\n grammar_map = {\n \"fixpattern\": (\"class\", \"ClassName\", \"{\",),\n \"optional_1\": \"classVarDec\",\n \"optional_2\": \"subRoutineDec\",\n \"closePattern\": \"}\",\n \"class\": {\n \"validator\": {\n \"expected_token\": \"class\"\n },\n \"write\": {\n \"non-terminal\": \"\\n\",\n \"terminal\": partial(self._write_terminal_element, False)\n }\n },\n \"ClassName\": {\n \"validator\": {\n \"expected_token\": self.source_p.stem,\n \"expected_token_type\": self.tokenizer.IDENTIFIER\n },\n \"write\": {\n \"terminal\": partial(self._write_terminal_element, False)\n }\n },\n \"{\": {\n \"validator\": {\n \"expected_token\": \"{\",\n \"expected_token_type\": self.tokenizer.SYMBOL\n },\n \"write\": {\n \"terminal\": partial(self._write_terminal_element, False)\n }\n },\n \"classVarDec\": {\n \"validator\": {\n \"expected_token\": (\"static\", \"field\"),\n \"optional\": True\n },\n \"write\": {\n \"terminal\": self.compile_class_var_dec\n }\n },\n \"subRoutineDec\": {\n \"validator\": {\n \"expected_token\": (\"constructor\", \"function\", \"method\"),\n \"optional\": True\n },\n \"write\": {\n \"non-terminal\": \"\\n\",\n \"terminal\": self.compile_subroutine_dec\n }\n },\n \"}\": {\n \"validator\": {\n \"expected_token\": \"}\",\n },\n \"write\": {\n \"terminal\": self._write_terminal_element,\n }\n }\n }\n return grammar_map\n\n def _class_var_dec_grammar(self):\n grammar_map = {\n \"fixPattern\": (\"static|field\", \"type\", \"varName\"),\n \"optionalGroup\": (\",\", \"varName\"),\n \"closePattern\": \";\",\n \"static|field\": {\n \"validator\": {\n \"expected_token\": (\"static\", \"field\")\n },\n \"write\": {\n \"terminal\": self._write_terminal_element\n }\n },\n \"type\": {\n \"validator\": {\n \"expected_token\": ('int', 'char', 'boolean', self.tokenizer.IDENTIFIER),\n },\n \"write\": {\n \"terminal\": self._write_terminal_element\n }\n },\n \"varName\": {\n \"validator\": {\n \"expected_token_type\": self.tokenizer.IDENTIFIER\n },\n \"write\": {\n \"terminal\": self._write_terminal_element\n }\n },\n \",\": {\n \"validator\": {\n \"expected_token\": \",\",\n \"expected_token_type\": self.tokenizer.SYMBOL,\n \"optional\": True\n },\n \"write\": {\n \"terminal\": self._write_terminal_element\n }\n },\n \";\": {\n \"validator\": {\n \"expected_token\": \";\",\n },\n \"write\": {\n \"terminal\": self._write_terminal_element,\n }\n }\n }\n return grammar_map\n\n def _subroutine_dec_grammar(self):\n grammar_map = {\n \"fixPattern\": (\n \"constructor|function|method\", \"void|type\", \"subroutineName\",\n \"(\", \"parameterList\", \")\", \"subroutineBody\"\n ),\n \"constructor|function|method\": {\n \"validator\": {\n \"expected_token\": (\"constructor\", \"function\", \"method\")\n },\n \"write\": {\n \"terminal\": self._write_terminal_element\n },\n },\n \"void|type\": {\n \"validator\": {\n \"expected_token\": ('int', 'char', 'boolean', 'void', self.tokenizer.IDENTIFIER),\n },\n \"write\": {\n \"terminal\": self._write_terminal_element\n }\n },\n \"subroutineName\": {\n \"validator\": {\n \"expected_token_type\": self.tokenizer.IDENTIFIER\n },\n \"write\": {\n \"terminal\": self._write_terminal_element\n }\n },\n \"(\": {\n \"validator\": {\n \"expected_token\": \"(\",\n \"expected_token_type\": self.tokenizer.SYMBOL\n },\n \"write\": {\n \"terminal\": self._write_terminal_element\n }\n },\n \"parameterList\": {\n \"validator\": {\n \"force_pass\": True\n },\n \"write\": {\n \"non-terminal\": \"\\n\",\n \"terminal\": self.compile_parameter_list\n }\n },\n \")\": {\n \"validator\": {\n \"expected_token\": \")\",\n \"expected_token_type\": self.tokenizer.SYMBOL\n },\n \"write\": {\n \"terminal\": self._write_terminal_element,\n \"non-terminal\": \"\\n\",\n }\n },\n \"subroutineBody\": {\n \"validator\": {\n \"expected_token\": \"{\",\n \"expected_token_type\": self.tokenizer.SYMBOL\n },\n \"write\": {\n \"non-terminal\": \"\\n\",\n \"terminal\": self.compile_subroutine_body\n }\n }\n\n }\n return grammar_map\n\n def _parameter_list_grammar(self):\n grammar_map = {\n \"optionalGroup\": (\"type\", \"varName\"),\n \"optionalGroup_2\": (\",\", \"type\", \"varName\"),\n \"type\": {\n \"validator\": {\n \"expected_token\": ('int', 'char', 'boolean', self.tokenizer.IDENTIFIER),\n \"optional\": True\n },\n \"write\": {\n \"terminal\": self._write_terminal_element\n }\n },\n \"varName\": {\n \"validator\": {\n \"expected_token_type\": self.tokenizer.IDENTIFIER\n },\n \"write\": {\n \"terminal\": self._write_terminal_element\n }\n },\n \",\": {\n \"validator\": {\n \"expected_token\": \",\",\n \"expected_token_type\": self.tokenizer.SYMBOL,\n \"optional\": True\n },\n \"write\": {\n \"terminal\": self._write_terminal_element\n }\n },\n }\n return grammar_map\n\n def _subroutine_body_grammar(self):\n grammar_map = {\n \"fixPattern\": \"{\",\n \"optional\": \"varDec\",\n \"fixPattern_2\": (\"statements\", \"}\"),\n \"{\": {\n \"validator\": {\n \"expected_token\": \"{\",\n \"expected_token_type\": self.tokenizer.SYMBOL\n },\n \"write\": {\n \"terminal\": self._write_terminal_element\n }\n },\n \"varDec\": {\n \"validator\": {\n \"expected_token\": \"var\",\n \"expected_token_type\": self.tokenizer.KEYWORD,\n \"optional\": True\n },\n \"write\": {\n \"terminal\": self.compile_var_dec\n }\n },\n \"statements\": {\n \"validator\": {\n \"expected_token\": (\"let\", \"if\", \"while\", \"do\", \"return\"),\n },\n \"write\": {\n \"terminal\": self.compile_statements\n }\n },\n \"}\": {\n \"validator\": {\n \"expected_token\": \"}\",\n },\n \"write\": {\n \"terminal\": self._write_terminal_element,\n }\n }\n }\n return grammar_map\n\n def _var_dec_grammar(self):\n grammar_map = {\n \"fixPattern\": (\"var\", \"type\", \"varName\"),\n \"optionalGroup\": (\",\", \"varName\"),\n \"closePattern\": \";\",\n \"var\": {\n \"validator\": {\n \"expected_token\": \"var\"\n },\n \"write\": {\n \"terminal\": self._write_terminal_element\n }\n },\n \"type\": {\n \"validator\": {\n \"expected_token\": ('int', 'char', 'boolean', self.tokenizer.IDENTIFIER),\n },\n \"write\": {\n \"terminal\": self._write_terminal_element\n }\n },\n \"varName\": {\n \"validator\": {\n \"expected_token_type\": self.tokenizer.IDENTIFIER\n },\n \"write\": {\n \"terminal\": self._write_terminal_element\n }\n },\n \",\": {\n \"validator\": {\n \"expected_token\": \",\",\n \"expected_token_type\": self.tokenizer.SYMBOL,\n \"optional\": True\n },\n \"write\": {\n \"terminal\": self._write_terminal_element\n }\n },\n \";\": {\n \"validator\": {\n \"expected_token\": \";\",\n \"expected_token_type\": self.tokenizer.SYMBOL\n },\n \"write\": {\n \"terminal\": self._write_terminal_element,\n }\n }\n }\n return grammar_map\n\n def _let_statement_grammar(self):\n grammar_map = {\n \"fixPattern\": (\"let\", \"varName\"),\n \"optionalGroup\": (\"[\", \"expression\", \"]\"),\n \"fixPattern_2\": (\"=\", \"expression\", \";\"),\n \"let\": {\n \"validator\": {\n \"expected_token\": \"let\",\n },\n \"write\": {\n \"terminal\": self._write_terminal_element\n }\n },\n \"varName\": {\n \"validator\": {\n \"expected_token_type\": self.tokenizer.IDENTIFIER\n },\n \"write\": {\n \"terminal\": self._write_terminal_element\n }\n },\n \"[\": {\n \"validator\": {\n \"expected_token\": \"[\",\n \"optional\": True\n },\n \"write\": {\n \"terminal\": self._write_terminal_element\n }\n },\n \"expression\": {\n \"validator\": {\n \"force_pass\": True\n },\n \"write\": {\n \"terminal\": self.compile_expression\n }\n },\n \"]\": {\n \"validator\": {\n \"expected_token\": \"]\"\n },\n \"write\": {\n \"terminal\": self._write_terminal_element\n }\n },\n \"=\": {\n \"validator\": {\n \"expected_token\": \"=\",\n },\n \"write\": {\n \"terminal\": self._write_terminal_element\n }\n },\n \";\": {\n \"validator\": {\n \"expected_token\": \";\",\n \"expected_token_type\": self.tokenizer.SYMBOL\n },\n \"write\": {\n \"terminal\": self._write_terminal_element,\n }\n }\n }\n return grammar_map\n\n def _do_statement_grammar(self):\n grammar_map = {\n \"fixPattern\": (\"do\", \"subroutineCall\", \";\"),\n \"do\": {\n \"validator\": {\n \"expected_token\": \"do\",\n },\n \"write\": {\n \"terminal\": self._write_terminal_element\n }\n },\n \"subroutineCall\": {\n \"validator\": {\n \"expected_token_type\": self.tokenizer.IDENTIFIER,\n },\n \"write\": {\n \"terminal\": self._write_subroutine_call\n }\n },\n \";\": {\n \"validator\": {\n \"expected_token\": \";\",\n \"expected_token_type\": self.tokenizer.SYMBOL\n },\n \"write\": {\n \"terminal\": self._write_terminal_element,\n }\n }\n }\n return grammar_map\n\n def _expression_grammar(self):\n grammar_map = {\n \"fixPattern\": \"term\",\n \"optionalSubGroup_1\": (\"(\", \"expression\", \")\"),\n \"optionalSubGroup_2\": (\"unary_op\", \"term\"),\n \"optionalSubGroup_3\": (\"varName\", \"[\", \"expression\", \"]\"),\n \"optionalSubGroup_4\": (\"subroutineName|className|varName\", \".|(\"),\n \"optionalGroup\": (\"op\", \"term\"),\n \"term\": {\n \"validator\": {\n \"force_pass\": True\n },\n \"write\": {\n \"non-terminal\": \"\\n\",\n \"terminal\": self._write_terminal_element,\n \"non-terminal-close\": \"\\n\",\n \"recursive\": True\n },\n \"recursive\": True,\n },\n \"(\": {\n \"validator\": {\n \"expected_token\": \"(\",\n \"optional\": True\n },\n \"write\": {\n \"terminal\": self._write_terminal_element\n }\n },\n \"expression\": {\n \"validator\": {\n \"force_pass\": True\n },\n \"write\": {\n \"terminal\": self.compile_expression\n }\n },\n \")\": {\n \"validator\": {\n \"expected_token\": \")\",\n },\n \"write\": {\n \"terminal\": self._write_terminal_element\n }\n },\n \"unary_op\": {\n \"validator\": {\n \"expected_token\": (\"-\", \"~\"),\n \"optional\": True\n },\n \"write\": {\n \"non-terminal\": \"\\n\",\n \"terminal\": self._write_terminal_element,\n \"non-terminal-close\": \"\\n\"\n }\n },\n \"varName\": {\n \"validator\": {\n \"expected_token_type\": self.tokenizer.IDENTIFIER,\n },\n \"write\": {\n \"terminal\": self._write_terminal_element,\n }\n },\n \"[\": {\n \"validator\": {\n \"expected_token\": \"[\",\n \"optional\": True\n },\n \"write\": {\n \"terminal\": self._write_terminal_element\n }\n },\n \"]\": {\n \"validator\": {\n \"expected_token\": \"]\",\n },\n \"write\": {\n \"terminal\": self._write_terminal_element\n }\n },\n \"subroutineName|className|varName\": {\n \"validator\": {\n \"expected_token_type\": self.tokenizer.IDENTIFIER,\n \"optional\": True\n },\n \"write\": {\n \"terminal\": self._write_terminal_element,\n }\n },\n \".|(\": {\n \"validator\": {\n \"expected_token\": (\".\", \"(\"),\n \"optional\": True\n },\n \"write\": self._write_subroutine_call\n },\n \"op\": {\n \"validator\": {\n \"expected_token\": self.OP,\n \"optional\": True\n },\n \"write\": {\n \"terminal\": self._write_terminal_element\n }\n }\n }\n return grammar_map\n\n def _subroutine_call_grammar(self):\n grammar_map = {\n \"optionalGroup\": (\"className|varName\", \".\"),\n \"fixPattern\": (\"subroutineName\", \"(\", \"expressionList\", \")\"),\n \"className|varName\": {\n \"validator\": {\n \"expected_token_type\": self.tokenizer.IDENTIFIER,\n \"optional\": True\n },\n \"write\": {\n \"terminal\": self._write_terminal_element,\n }\n },\n \".\": {\n \"validator\": {\n \"expected_token\": \".\",\n \"optional\": True\n },\n \"write\": {\n \"terminal\": self._write_terminal_element,\n }\n },\n \"subroutineName\": {\n \"validator\": {\n \"expected_token_type\": self.tokenizer.IDENTIFIER,\n },\n \"write\": {\n \"terminal\": self._write_terminal_element,\n }\n },\n \"(\": {\n \"validator\": {\n \"expected_token\": \"(\",\n },\n \"write\": {\n \"terminal\": self._write_terminal_element,\n }\n },\n \"expressionList\": {\n \"validator\": {\n \"force_pass\": True\n },\n \"write\": {\n \"terminal\": self.compile_expression_list\n }\n },\n \")\": {\n \"validator\": {\n \"expected_token\": \")\",\n },\n \"write\": {\n \"terminal\": self._write_terminal_element,\n }\n },\n }\n return grammar_map\n\n def _expression_list_grammar(self):\n grammar_map = {\n \"optional\": \"expression\",\n \"optionalGroup_2\": (\",\", \"expression\"),\n \"expression\": {\n \"validator\": self._match_expression_grammar,\n \"write\": {\n \"terminal\": self.compile_expression\n }\n },\n \",\": {\n \"validator\": {\n \"expected_token\": \",\",\n \"optional\": True\n },\n \"write\": {\n \"terminal\": self._write_terminal_element\n }\n },\n }\n return grammar_map\n\n def _return_grammar(self):\n grammar_map = {\n \"fixPattern\": \"return\",\n \"optional\": \"expression\",\n \"closePattern\": \";\",\n \"return\": {\n \"validator\": {\n \"expected_token\": \"return\",\n },\n \"write\": {\n \"terminal\": self._write_terminal_element\n }\n },\n \"expression\": {\n \"validator\": self._match_expression_grammar,\n \"write\": {\n \"terminal\": self.compile_expression\n }\n },\n \";\": {\n \"validator\": {\n \"expected_token\": \";\",\n },\n \"write\": {\n \"terminal\": self._write_terminal_element,\n }\n }\n }\n return grammar_map\n\n def _if_statement_grammar(self):\n grammar_map = {\n \"fixPattern\": (\"if\", \"(\", \"expression\", \")\", \"{\", \"statements\", \"}\"),\n \"optionalGroup\": (\"else\", \"{\", \"statements\", \"}\"),\n \"if\": {\n \"validator\": {\n \"expected_token\": \"if\",\n },\n \"write\": {\n \"terminal\": self._write_terminal_element\n }\n },\n \"(\": {\n \"validator\": {\n \"expected_token\": \"(\",\n },\n \"write\": {\n \"terminal\": self._write_terminal_element,\n }\n },\n \"expression\": {\n \"validator\": self._match_expression_grammar,\n \"write\": {\n \"terminal\": self.compile_expression\n }\n },\n \")\": {\n \"validator\": {\n \"expected_token\": \")\",\n },\n \"write\": {\n \"terminal\": self._write_terminal_element,\n }\n },\n \"{\": {\n \"validator\": {\n \"expected_token\": \"{\",\n },\n \"write\": {\n \"terminal\": self._write_terminal_element,\n }\n },\n \"statements\": {\n \"validator\": {\n \"expected_token\": self.STATEMENT_KEYWORDS,\n },\n \"write\": {\n \"terminal\": self.compile_statements,\n }\n },\n \"}\": {\n \"validator\": {\n \"expected_token\": \"}\",\n },\n \"write\": {\n \"terminal\": self._write_terminal_element,\n }\n },\n \"else\": {\n \"validator\": {\n \"expected_token\": \"else\",\n \"optional\": True\n },\n \"write\": {\n \"terminal\": self._write_terminal_element\n }\n }\n }\n return grammar_map\n\n def _while_statement_grammar(self):\n grammar_map = {\n \"fixPattern\": (\"while\", \"(\", \"expression\", \")\", \"{\", \"statements\", \"}\"),\n \"while\": {\n \"validator\": {\n \"expected_token\": \"while\",\n },\n \"write\": {\n \"terminal\": self._write_terminal_element\n }\n },\n \"(\": {\n \"validator\": {\n \"expected_token\": \"(\",\n },\n \"write\": {\n \"terminal\": self._write_terminal_element,\n }\n },\n \"expression\": {\n \"validator\": self._match_expression_grammar,\n \"write\": {\n \"terminal\": self.compile_expression\n }\n },\n \")\": {\n \"validator\": {\n \"expected_token\": \")\",\n },\n \"write\": {\n \"terminal\": self._write_terminal_element,\n }\n },\n \"{\": {\n \"validator\": {\n \"expected_token\": \"{\",\n },\n \"write\": {\n \"terminal\": self._write_terminal_element,\n }\n },\n \"statements\": {\n \"validator\": {\n \"expected_token\": self.STATEMENT_KEYWORDS,\n },\n \"write\": {\n \"terminal\": self.compile_statements,\n }\n },\n \"}\": {\n \"validator\": {\n \"expected_token\": \"}\",\n },\n \"write\": {\n \"terminal\": self._write_terminal_element,\n }\n }\n }\n return grammar_map\n\n\ndef _file_translation(source_path):\n tokenizer = Tokenizer(source_path)\n token_writer = Parser(source_path, tokenizer)\n return token_writer.compile_class()\n\n\ndef _dir_translation(source_path):\n for file in source_path.iterdir():\n if file.suffix == \".jack\":\n tokenizer = Tokenizer(infile=file)\n token_writer = Parser(source_path=file, tokenizer=tokenizer)\n token_writer.compile_class()\n return\n\n\ndef main(source):\n source_path = Path(source)\n translate_method = _file_translation if source_path.is_file() else _dir_translation\n return partial(translate_method, source_path)()\n\n\nif __name__ == '__main__':\n input_path = sys.argv[1]\n main(source=input_path)","sub_path":"projects/10/submission_folder/JackAnalyzer.py","file_name":"JackAnalyzer.py","file_ext":"py","file_size_in_byte":54652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"362101595","text":"__author__ = 'Huangcius-HL'\n\n# coding:uft-8\n\n# 导入requests模块\nimport requests\n\n# 导入Json模块\nimport json\n\n# 自定义payload参数\npayload = {\"yoyo\": \"hello world\",\n \"pythonQQ群\": \"123456789\"}\n\n# 将自定义payload参数转化为Json格式\ndata_json = json.dumps(payload)\n\n# 调用接口(json使用表明该接口请求为json格式)\nr = requests.post('http://httpbin.org/post', json=data_json)\n\n# 打印接口请求返回text类型信息\nprint(r.text)","sub_path":"带参数Json格式Post请求.py","file_name":"带参数Json格式Post请求.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"561753717","text":"# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution:\n def increasingBST(self, root: TreeNode) -> TreeNode:\n def dfs(node):\n if not node.left and not node.right: return node\n head = None\n if node.right:\n node.right = dfs(node.right)\n if node.left:\n head = dfs(node.left)\n cur = head\n while cur.right:\n cur = cur.right\n cur.right = node\n return head\n else:\n return node\n return dfs(root)\nt = TreeNode(2)\nt.left = TreeNode(1)\ntt = Solution().increasingBST(t)\nprint(tt.val, tt.right.val)\n","sub_path":"leetcode/python/897_increasing-order-search-tree2.py","file_name":"897_increasing-order-search-tree2.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"178581090","text":"# Wrapper function for initiating a new verification with Telnyx API\nimport requests\nimport config\nimport os\n\ndef CreateVerification(phone_number):\n url = \"https://api.telnyx.com/v2/verifications\"\n auth = \"Bearer \" + os.getenv(\"API_KEY\")\n headers = {\n \"Authorization\": auth,\n \"Content-Type\": \"application/json\",\n \"Accept\": \"application/json\"\n }\n payload = {\n \"phone_number\": phone_number,\n \"verify_profile_id\": os.getenv(\"VERIFY_KEY\"),\n \"type\": \"sms\",\n \"timeout\": 300\n }\n r = requests.post(url, headers=headers, json=payload)\n return r\n\n# Wrapper function for submitting a new verification code with Telnyx API\ndef SubmitVerificationCode(code, phone_number):\n url = \"https://api.telnyx.com/v2/verifications/by_phone_number/\" + phone_number + \"/actions/verify\"\n auth = \"Bearer \" + os.getenv(\"API_KEY\")\n headers = {\n \"Authorization\": auth,\n \"Content-Type\": \"application/json\",\n \"Accept\": \"application/json\"\n }\n payload = {\n \"code\": code\n }\n r = requests.post(url, headers=headers, json=payload)\n return r","sub_path":"flask-2fa/telnyx_wrappers.py","file_name":"telnyx_wrappers.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"605836744","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 28 19:39:32 2018\n\n@author: admin\n\"\"\"\n\nfrom mxnet.gluon import nn\nfrom mxnet import nd\nimport seaborn as sns\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import rc\nimport sys\nsys.path.append('..')\nimport utils\nfrom mxnet import autograd\nfrom mxnet import gluon\nimport sys\nsys.path.append('..')\n\nimport utils\n\n\n#net = nn.Sequential()\n#batch_size = 256\n#train_data, test_data = utils.load_data_fashion_mnist(batch_size)\n#\n#'''多层感知机已经在之前章节里介绍。与之前章节不同,这里我们定义一个包含两个隐含层的模型,\n#两个隐含层都输出256个节点。我们定义激活函数Relu并直接使用Gluon提供的交叉熵损失函数。'''\n#\n##含两个隐藏层的多层感知机\n#num_inputs=28*28\n#num_outputs = 10\n#\n#num_hidden1 = 256\n#num_hidden2 = 256\n#num_hidden3 = 256\n#num_hidden4 = 256\n#weight_scale = .01\n#\n#W1 = nd.random_normal(shape=(num_inputs, num_hidden1), scale=weight_scale)\n#b1 = nd.zeros(num_hidden1)\n#\n#W2 = nd.random_normal(shape=(num_hidden1, num_hidden2), scale=weight_scale)\n#b2 = nd.zeros(num_hidden2)\n#\n#W3 = nd.random_normal(shape=(num_hidden2, num_hidden3), scale=weight_scale)\n#b3 = nd.zeros(num_hidden3)\n#\n#W4 = nd.random_normal(shape=(num_hidden3, num_hidden4), scale=weight_scale)\n#b4 = nd.zeros(num_hidden4)\n#\n#W5 = nd.random_normal(shape=(num_hidden4, num_outputs), scale=weight_scale)\n#b5 = nd.zeros(num_outputs)\n#\n#\n#\n#params = [W1, b1, W2, b2, W3, b3, W4, b4, W5, b5]\n#\n#for param in params:\n# param.attach_grad()\n# \n#drop_prob1 =0.5\n#drop_prob2 =0.5\n#drop_prob3 =0.5\n#drop_prob4 =0.2\n#\"\"\"\n#Acc1:0.2->0.2->0.2->0.2\n#Acc2:0.2->0.2->0.2->0.5\n#Acc3:0.2->0.2->0.5->0.5\n#Acc4:0.2->0.5->0.5->0.5\n#Acc5:0.5->0.5->0.5->0.5\n#Acc6:0.5->0.2->0.2->0.2\n#Acc7:0.5->0.5->0.2->0.2\n#Acc8:0.5->0.5->0.5->0.2\n#Acc9:0->0->0.2->0.5\n#Acc10:0->0->0.5->0.2\n#\"\"\"\n#with net.name_scope():\n# net.add(nn.Flatten())\n# # 第一层全连接。\n# net.add(nn.Dense(256, activation=\"relu\"))\n# # 在第一层全连接后添加丢弃层。\n# net.add(nn.Dropout(drop_prob1))\n## 第二层全连接。\n# net.add(nn.Dense(256, activation=\"relu\"))\n# # 在第二层全连接后添加丢弃层。\n# net.add(nn.Dropout(drop_prob2))\n## 第三层全连接\n# net.add(nn.Dense(256,activation='relu'))\n## 在第三层全连接后添加丢弃层\n# net.add(nn.Dropout(drop_prob3))\n## 第四层全连接\n# net.add(nn.Dense(256,activation='relu'))\n## 在第四层全连接后添加丢弃层\n# net.add(nn.Dropout(drop_prob4))\n# net.add(nn.Dense(10))\n#net.initialize()\n#\n#\n#\n#batch_size = 256\n#train_data, test_data = utils.load_data_fashion_mnist(batch_size)\n#\n#softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss()\n#trainer = gluon.Trainer(net.collect_params(),\n# 'sgd', {'learning_rate': 0.5})\n#Test_acc_8=[]\n#Test_acc_8=[]\n#Cost_8=[]\n#learning_rate = .5\n#for epoch in range(30):\n# train_loss = 0.\n# Test_acc = 0.\n# for data, label in train_data:\n# with autograd.record():\n# output = net(data)\n# loss = softmax_cross_entropy(output, label)\n# loss.backward()\n# trainer.step(batch_size)\n# train_loss += nd.mean(loss).asscalar()\n# Test_acc += utils.accuracy(output, label)\n#\n# Test_acc_0 = utils.evaluate_accuracy(test_data, net)\n# Test_acc_8.append(Test_acc_0)\n# Test_acc_8.append(Test_acc/len(train_data))\n# Cost_8.append(train_loss/len(train_data))\n# print(\"Epoch %d. Loss: %f, Train acc %f, Test acc %f\" % (\n# epoch, train_loss/len(train_data),\n# Test_acc/len(train_data), Test_acc_0))\n\n\n# ============================================================================\n#sns.set_style(\"darkgrid\")\n# sns.boxplot(data=data, palette=\"deep\")\n# sns.despine(left=True) # 删除左边边框\n# st = sns.axes_style(\"darkgrid\")\n# \n# ============================================================================\nsns.set_style(\"white\")\nsns.set_style(\"whitegrid\")\nsns.set_style(\"ticks\") \nsns.set_context(\"paper\")\n\"\"\"\n有4种预设好的上下文(context),按相对大小排序分别是:paper, notebook, talk,和poster.\n缺省的规模是notebook,上述的所有图表都是它.\n\"\"\"\n#sns.despine(offset=10, trim=True)\n#sns.despine(left=True)#删除左边的边框\n#sns.despine()\n\nrc('font',**{'family':'sans-serif','sans-serif':['Times New Roman'],'size':14})\nparams={'text.usetex':False,'mathtext.fontset':'stixsans'}\nplt.rcParams.update(params)\nfig,ax=plt.subplots(num=1,figsize=(4.5,3))\nplt.subplots_adjust(right=0.99,left=0.125,bottom=0.15,top=0.975)\n\n#ax.spines['right'].set_visible(False)\n#ax.spines['top'].set_visible(False)\n\n#ax.grid(True, linestyle = \"--\", color = \"k\", linewidth = \"0.6\")\nax.xaxis.grid(True,which='major',lw=0.5,linestyle='--',color='0.3',zorder=1)\nax.yaxis.grid(True,which='major',lw=0.5,linestyle='--',color='0.3',zorder=1)\n\n#plt.plot(data.loc[:,'Step'],data.loc[:,'Value'],'c>')\nepoch_=np.linspace(0,30,30)\n\n\nplt.plot(epoch_,Test_acc_1,'--',lw=1.5,label=\"1\",color='c')\nplt.plot(epoch_,Test_acc_2,'--',lw=1.5,label=\"2\",color='m')\nplt.plot(epoch_,Test_acc_3,'--',lw=1.5,label=\"3\",color='g')\nplt.plot(epoch_,Test_acc_4,'--',lw=1.5,label=\"4\")\nplt.plot(epoch_,Test_acc_5,'--',lw=1.5,label=\"5\",)\nplt.plot(epoch_,Test_acc_6,'--',lw=1.5,label=\"6\")\nplt.plot(epoch_,Test_acc_7,'--',lw=1.5,label=\"7\",color='b')\nplt.plot(epoch_,Test_acc_8,'--',lw=1.5,label=\"8\",color='y')\n\nplt.legend(loc='lower left',ncol=2)\nplt.xlabel(\"Epoch\",)\nplt.ylabel(\"Accuracy\")\n\n\nleft, bottom, width, height = 0.55, 0.35, 0.4, 0.4\nax2 = fig.add_axes([left, bottom, width, height]) \nax2.plot(epoch_[20:30],Test_acc_1[20:30],'--',lw=1,color='c')\nax2.plot(epoch_[20:30],Test_acc_2[20:30],'--',lw=1,color='m')\nax2.plot(epoch_[20:30],Test_acc_3[20:30],'--',lw=1,color='g')\n#ax2.plot(epoch_[20:30],Test_acc_4[20:30],'--',lw=1,)\n#ax2.plot(epoch_[20:30],Test_acc_5[20:30],'--',lw=1,)\n#ax2.plot(epoch_[20:30],Test_acc_6[20:30],'--',lw=1,)\nax2.plot(epoch_[20:30],Test_acc_7[20:30],'--',lw=1,color='b')\nax2.plot(epoch_[20:30],Test_acc_8[20:30],'--',lw=1,color='y')\n\nax2.set_yticks(np.arange(0.87,0.90,0.005))\nax2.set_xticks(np.arange(20,31,1))\n \n\n\n\n\n\n#plt.plot(epoch_,Test_acc_7,'--',color='lightseagreen',lw=1.5,label=\"drop_prob(0.2->0.2->0.2->0.2) Test\")\n#plt.plot(epoch_,Test_acc_2,'--',lw=1,label=\"drop_prob(0.2,0.2,0.5,0.5)\")\n#plt.plot(epoch_,Test_acc_3,'--',lw=1,label=\"drop_prob(0.2->0.2->0.2->0.5)\")\n#plt.annotate(\"[ 21 ,%.2f ]\"%Test_acc_8[21],xy=(21,Test_acc_8[21]),xytext=(25,0.85),arrowprops=dict(arrowstyle=\"->\",connectionstyle=\"arc3\"))\n\n\n\n#plt.legend(loc='lower right')\n#plt.xlabel(\"Epoch\",)\n#plt.ylabel(\"Accuracy\")\n#plt.yticks(np.arange(0.5,0.95,0.02))\n\n#plt.grid(True, linestyle = \"--\", color = \"k\", linewidth = \"0.6\")\n#plt.grid(True)\n#sns.despine()\n#plt.suptitle('Train accuracy')\nplt.show()\n#plt.grid(True, linestyle = \"--\", color = \"g\", linewidth = \"3\") \n\nfig.savefig('drop_prob_test_1.png',dpi=1000)","sub_path":"chapter-3/Dropout/dropout_3.py","file_name":"dropout_3.py","file_ext":"py","file_size_in_byte":6965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"360456054","text":"# import the necessary packages\nfrom scipy.spatial import distance as dist\nfrom imutils import face_utils\nfrom threading import Thread\nfrom util.activity_tracker import ActivityTracker\nimport numpy as np\nimport playsound\nimport argparse\nimport imutils\nimport time\nimport dlib\nimport cv2\nimport requests, json\n\n# Check if a point is inside a rectangle\ndef rect_contains(rect, point) :\n if point[0] < rect[0] :\n return False\n elif point[1] < rect[1] :\n return False\n elif point[0] > rect[2] :\n return False\n elif point[1] > rect[3] :\n return False\n return True\n\n# Draw delaunay triangles\ndef draw_delaunay(img, subdiv, delaunay_color ) :\n triangleList = subdiv.getTriangleList()\n size = img.shape\n r = (0, 0, size[1], size[0])\n \n for t in triangleList :\n pt1 = (t[0], t[1])\n pt2 = (t[2], t[3])\n pt3 = (t[4], t[5])\n\n if rect_contains(r, pt1) and rect_contains(r, pt2) and rect_contains(r, pt3) : \n cv2.line(img, pt1, pt2, delaunay_color, 1)\n cv2.line(img, pt2, pt3, delaunay_color, 1)\n cv2.line(img, pt3, pt1, delaunay_color, 1)\n\ndef eye_aspect_ratio(eye):\n\t# compute the euclidean distances between the two sets of\n\t# vertical eye landmarks (x, y)-coordinates\n\tA = dist.euclidean(eye[1], eye[5])\n\tB = dist.euclidean(eye[2], eye[4])\n\n\t# compute the euclidean distance between the horizontal\n\t# eye landmark (x, y)-coordinates\n\tC = dist.euclidean(eye[0], eye[3])\n\n\t# compute the eye aspect ratio\n\tear = (A + B) / (2.0 * C)\n\n\t# return the eye aspect ratio\n\treturn ear\n \n# define two constants, one for the eye aspect ratio to indicate\n# blink and then a second constant for the number of consecutive\n# frames the eye must be below the threshold for to set off the\n# alarm\nEYE_AR_THRESH = 0.27\nEYE_AR_CONSEC_FRAMES = 48\n\n# initialize the frame counter as well as a boolean used to\n# indicate if the alarm is going off\nCOUNTER = 0\nALARM_ON = False\n\nshape_predictor_path = r\"C:\\Users\\Stefa\\Desktop\\nwHacks2020\\nwHacks-2020-Back-End\\computer-vision\\shape_predictor_68_face_landmarks.dat\"\n\n# initialize dlib's face detector (HOG-based) and then create\n# the facial landmark predictor\nprint(\"[INFO] loading facial landmark predictor...\")\ndetector = dlib.get_frontal_face_detector()\npredictor = dlib.shape_predictor(shape_predictor_path)\n\n# grab the indexes of the facial landmarks for the left and\n# right eye, respectively\n# (nStart, nEnd) = face_utils.FACIAL_LANDMARKS_IDXS[\"nose_tip\"]\n# (cStart, cEnd) = face_utils.FACIAL_LANDMARKS_IDXS[\"chin\"]\n(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS[\"left_eye\"]\n(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS[\"right_eye\"]\n# (lmStart, lmEnd) = face_utils.FACIAL_LANDMARKS_IDXS[\"left_mouth_corner\"]\n# (rmStart, rmEnd) = face_utils.FACIAL_LANDMARKS_IDXS[\"right_mouth_corner\"]\n\n# start the video stream thread\nprint(\"[INFO] starting video stream thread...\")\n# vs = VideoStream(src=args[\"webcam\"]).start()\ncap = cv2.VideoCapture(0)\ntime.sleep(1.0)\nlast_iteration = time.time()\n\ntracker = ActivityTracker()\n\n# loop over frames from the video stream\nwhile True:\n\t# grab the frame from the threaded video file stream, resize\n\t# it, and convert it to grayscale\n\t# channels)\n\tret, frame = cap.read(0)\n\t# frame = imutils.resize(frame, width=450)\n\tgray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\t\n\tsize = frame.shape\n\t\n\t# detect faces in the grayscale frame\n\trects = detector(gray, 0)\n\n\t# loop over the face detections\n\tfor rect in rects:\n\t\t# determine the facial landmarks for the face region, then\n\t\t# convert the facial landmark (x, y)-coordinates to a NumPy\n\t\t# array\n\t\tshape = predictor(gray, rect)\n\t\tshape = face_utils.shape_to_np(shape)\n\t\t\n\n\t\t# extract the left and right eye coordinates, then use the\n\t\t# coordinates to compute the eye aspect ratio for both eyes\n\t\timage_points = np.array([\n\t\t\t\t\t\t\t\t\t(shape[33, :]), # Nose tip\n\t\t\t\t\t\t\t\t\t(shape[8, :]), # Chin\n\t\t\t\t\t\t\t\t\t(shape[36, :]), # Left eye left corner\n\t\t\t\t\t\t\t\t\t(shape[45, :]), # Right eye right corne\n\t\t\t\t\t\t\t\t\t(shape[48, :]), # Left Mouth corner\n\t\t\t\t\t\t\t\t\t(shape[54, :]) # Right mouth corner\n\t\t\t\t\t\t\t\t], dtype=\"double\")\n\t\t\n\t\tmany_points = np.array([(shape[i,:]) for i in range(68)], dtype=\"double\")\n\t\t\t\t\t\t\t\t# \t(shape[0, :]), \n\t\t\t\t\t\t\t\t# \t(shape[3, :]), \n\t\t\t\t\t\t\t\t# \t(shape[13, :]), \n\t\t\t\t\t\t\t\t# \t(shape[16, :]), \n\t\t\t\t\t\t\t\t# \t(shape[29, :]), \n\t\t\t\t\t\t\t\t# \t(shape[30, :]), \n\t\t\t\t\t\t\t\t# \t(shape[17, :]), \n\t\t\t\t\t\t\t\t# \t(shape[21, :]), \n\t\t\t\t\t\t\t\t# \t(shape[26, :]), \n\t\t\t\t\t\t\t\t# \t(shape[22, :]), \n\t\t\t\t\t\t\t\t# \t(shape[33, :]), # Nose tip\n\t\t\t\t\t\t\t\t# \t(shape[8, :]), # Chin\n\t\t\t\t\t\t\t\t# \t(shape[36, :]), # Left eye left corner\n\t\t\t\t\t\t\t\t# \t(shape[45, :]), # Right eye right corne\n\t\t\t\t\t\t\t\t# \t(shape[48, :]), # Left Mouth corner\n\t\t\t\t\t\t\t\t# \t(shape[54, :]) # Right mouth corner\n\t\t\t\t\t\t\t\t# ], dtype=\"double\")\n\t\t# 3D model points.\n\t\tmodel_points = np.array([\n\t\t\t\t\t\t\t\t\t(0.0, 0.0, 0.0), # Nose tip\n\t\t\t\t\t\t\t\t\t(0.0, -330.0, -65.0), # Chin\n\t\t\t\t\t\t\t\t\t(-225.0, 170.0, -135.0), # Left eye left corner\n\t\t\t\t\t\t\t\t\t(225.0, 170.0, -135.0), # Right eye right corne\n\t\t\t\t\t\t\t\t\t(-150.0, -150.0, -125.0), # Left Mouth corner\n\t\t\t\t\t\t\t\t\t(150.0, -150.0, -125.0) # Right mouth corner \n\t\t\t\t\t\t\t\t])\n\n\t\t# Camera internals\n\t\tfocal_length = size[1]\n\t\tcenter = (size[1]/2, size[0]/2)\n\t\tcamera_matrix = np.array(\n\t\t\t\t\t\t\t\t[[focal_length, 0, center[0]],\n\t\t\t\t\t\t\t\t[0, focal_length, center[1]],\n\t\t\t\t\t\t\t\t[0, 0, 1]], dtype = \"double\"\n\t\t\t\t\t\t\t\t)\n\t\t# print (\"Camera Matrix :\\n {0}\".format(camera_matrix))\n\n\t\tdist_coeffs = np.zeros((4,1)) # Assuming no lens distortion\n\t\t(success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_ITERATIVE)\n\t\t# print (\"Rotation Vector:\\n {0}\".format(rotation_vector))\n\t\t# print (\"Translation Vector:\\n {0}\".format(translation_vector))\n\t\t\n\t\t# Project a 3D point (0, 0, 1000.0) onto the image plane.\n\t\t# We use this to draw a line sticking out of the nose\n\t\t(nose_end_point2D, jacobian) = cv2.projectPoints(np.array([(0.0, 0.0, 500.0)]), rotation_vector, translation_vector, camera_matrix, dist_coeffs)\n\t\t\n\t\tfor p in many_points:\n\t\t\tcv2.circle(frame, (int(p[0]), int(p[1])), 2, (0,255,0), -1)\n\n\t\tp1 = ( int(image_points[0][0]), int(image_points[0][1]))\n\t\tp2 = ( int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1]))\n\t\t\n\t\tdiff = (p1[0] - p2[0], p1[1] - p2[1])\n\t\tsq_dist = diff[0] * diff[0] + diff[1] * diff[1]\n\t\t\n\t\tif sq_dist > 6000:\n\t\t\ttracker.start_activity(\"Distracted\")\n\t\t\tcv2.putText(frame, \"DISTRACTION ALERT!\", (10, 60),\n\t\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)\n\t\telse:\n\t\t\ttracker.start_activity(\"Focused\")\n\t\tcv2.arrowedLine(frame, p1, p2, (0,255,0), 2) \n\t\t\n\t\tleftEye = shape[lStart:lEnd]\n\t\trightEye = shape[rStart:rEnd]\n\n\t\tleftEAR = eye_aspect_ratio(leftEye)\n\t\trightEAR = eye_aspect_ratio(rightEye)\n\n\t\t# average the eye aspect ratio together for both eyes\n\t\tear = (leftEAR + rightEAR) / 2.0\n\n\t\t# check to see if the eye aspect ratio is below the blink\n\t\t# threshold, and if so, increment the blink frame counter\n\t\tif ear < EYE_AR_THRESH:\n\t\t\tCOUNTER += 1\n\t\t\tif COUNTER >= EYE_AR_CONSEC_FRAMES:\n\t\t\t\tif not ALARM_ON:\n\t\t\t\t\tALARM_ON = True\n\t\t\t\ttracker.start_activity(\"Drowsy\")\n\t\t\t\tcv2.putText(frame, \"DROWSINESS ALERT!\", (10, 30),\n\t\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)\n\t\telse:\n\t\t\ttracker.start_activity(\"Focused\")\n\t\t\tCOUNTER = 0\n\t\t\tALARM_ON = False\n\n\t\t# draw the computed eye aspect ratio on the frame to help\n\t\t# with debugging and setting the correct eye aspect ratio\n\t\t# thresholds and frame counters\n\t\tcv2.putText(frame, \"Eye Aspect Ratio: {:.2f}\".format(ear), (330, 420),\n\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)\n\n\t\tcv2.putText(frame, \"Head Vector: {0}\".format(rotation_vector), (80, 450),\n\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)\n\n\t\ttry:\n\t\t\tdelauney_rect = (0, 0, size[1], size[0])\n\t\t\tsubdiv = cv2.Subdiv2D(delauney_rect)\n\t\t\t\n\t\t\tfor p in many_points:\n\t\t\t\tsubdiv.insert((int(p[0]), int(p[1])))\n\t\t\t\n\t\t\tdraw_delaunay(frame, subdiv, (0, 100, 0))\n\t\texcept:\n\t\t\tprint(\"error drawing delaunay\")\n\t\t\n\t\t# compute the convex hull for the left and right eye, then\n\t\t# visualize each of the eyes\n\t\tleftEyeHull = cv2.convexHull(leftEye)\n\t\trightEyeHull = cv2.convexHull(rightEye)\n\t\tcv2.drawContours(frame, [leftEyeHull], -1, (0, 0, 128), 1)\n\t\tcv2.drawContours(frame, [rightEyeHull], -1, (0, 0, 128), 1)\n\n\tif time.time() - last_iteration > 10:\n\t\ttry:\n\t\t\turl = r\"http://localhost:5000/data\"\n\t\t\theaders = {'Content-type': 'application/json'}\n\t\t\tr = requests.post(url, data=json.dumps(tracker.get_activities_dict()), headers=headers)\n\t\t\tprint(\"Posted\")\n\t\texcept:\n\t\t\tprint(\"Failed to post\")\n\n\t\ttracker.clear()\n\t\tlast_iteration = time.time()\n\n\t# show the frame\n\tcv2.imshow(\"Frame\", frame)\n\tkey = cv2.waitKey(1) & 0xFF\n \n\t# if the `q` key was pressed, break from the loop\n\tif key == ord(\"q\"):\n\t\tbreak\n\n# do a bit of cleanup\ncv2.destroyAllWindows()\ncap.release()","sub_path":"computer-vision/har_with_posting.py","file_name":"har_with_posting.py","file_ext":"py","file_size_in_byte":8981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"384397122","text":"#!/usr/bin/env python\n\nimport random\nimport glob\n\nclass Word:\n \"\"\"Represents the word that a user is trying to guess\"\"\"\n def __init__(self, content):\n self.__content = content\n self.user_facing = self.hide_content()\n\n def hide_content(self):\n content = list(self.__content)\n\n for index, character in enumerate(content):\n if content[index] != ' ':\n content[index] = '-'\n \n hidden = \"\".join(content)\n return hidden\n\n def find(self, guess_letter):\n return [i for i, ltr in enumerate(self.__content) if ltr == guess_letter]\n\n def revealLettersForCharacters(self, arrayOfIndices):\n for i in arrayOfIndices:\n self.user_facing = self.replaceCharAtIndexWith(i, self.__content[i])\n\n def replaceCharAtIndexWith(self, index, replacement):\n return self.user_facing[:index] + replacement + self.user_facing[index:-1]\n\n def testGuess(self,guess):\n hits = self.find(guess)\n if len(hits) > 0:\n self.revealLettersForCharacters(hits)\n return True\n else:\n return False\n \nclass Hangman:\n \"\"\"A class to represent the game and set up the board and word to guess\"\"\"\n def __init__(self):\n self.board = Board()\n self.answer = Word('giblets')\n self.userHasWon = False\n self.guesses = []\n\n def printState(self):\n print(self.board.draw())\n print(self.guesses)\n print(self.answer.user_facing)\n\n def invalidUserGuess(self, guess):\n return self.duplicateGuess(guess) or self.notSingleLetter(guess)\n\n def duplicateGuess(self, guess):\n if guess in self.guesses:\n print('You already guessed that')\n return True\n \n def notSingleLetter(self, guess):\n if len(guess) != 1:\n print('Guess should be a single character')\n return True\n\n def start(self):\n while not self.userHasWon:\n guess = raw_input('Guess a Letter: ')\n \n if self.invalidUserGuess(guess):\n continue \n self.guesses.append(guess)\n\n if self.answer.testGuess(guess):\n print(self.answer.user_facing)\n else: \n self.board.increment()\n print(self.answer.user_facing)\n\n self.printState()\n\n if self.board.hasLost:\n self.userHasLost = True\n\t \n def guess(self, letter):\n self.guesses.append(letter)\n indexes = self.find(letter)\n if indexes == []:\n return False\n\n for i in indexes:\n first_letter = self.user_facing[i]\n return True\n\nclass Board:\n \"\"\"Represents a hanging man\"\"\"\n BOARD_PIC_FILES = glob.glob('files/*.txt')\n BOARD_PICS = []\n \n def __init__(self):\n self.progress = 0\n for index in range(len(self.BOARD_PIC_FILES)): \n board_pic_file_name = self.BOARD_PIC_FILES[index]\n board_pic_file = open(board_pic_file_name, 'r')\n board_pic_file_contents = ''.join(board_pic_file.read())\n self.BOARD_PICS.append(board_pic_file_contents)\n board_pic_file.close() \n\n def draw(self):\n return self.BOARD_PICS[self.progress]\n\n def hasLost(self):\n return self.progress == len(self.BOARD_PIC_FILES - 1) \n\n def increment(self):\n if (self.progress < len(self.BOARD_PIC_FILES) - 1):\n self.progress += 1 \n\nhangman = Hangman().start()\n","sub_path":"hangman/hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":3134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"465645892","text":"from tastypie import authorization\nfrom tastypie.authentication import MultiAuthentication\nfrom tastypie.exceptions import BadRequest\n\nfrom crits.events.event import Event, EventType\nfrom crits.events.handlers import add_new_event\nfrom crits.core.api import CRITsApiKeyAuthentication, CRITsSessionAuthentication\nfrom crits.core.api import CRITsSerializer, CRITsAPIResource\n\n\nclass EventResource(CRITsAPIResource):\n \"\"\"\n Class to handle everything related to the Event API.\n\n Currently supports GET and POST.\n \"\"\"\n\n class Meta:\n object_class = Event\n allowed_methods = ('get', 'post')\n resource_name = \"events\"\n authentication = MultiAuthentication(CRITsApiKeyAuthentication(),\n CRITsSessionAuthentication())\n authorization = authorization.Authorization()\n serializer = CRITsSerializer()\n\n def get_object_list(self, request):\n \"\"\"\n Use the CRITsAPIResource to get our objects but provide the class to get\n the objects from.\n\n :param request: The incoming request.\n :type request: :class:`django.http.HttpRequest`\n :returns: Resulting objects in the specified format (JSON by default).\n \"\"\"\n\n return super(EventResource, self).get_object_list(request, Event)\n\n def obj_create(self, bundle, **kwargs):\n \"\"\"\n Handles creating Events through the API.\n\n :param bundle: Bundle containing the information to create the Event.\n :type bundle: Tastypie Bundle object.\n :returns: Bundle object.\n :raises BadRequest: If a campaign name is not provided or creation fails.\n \"\"\"\n\n analyst = bundle.request.user.username\n title = bundle.data.get('title', None)\n description = bundle.data.get('description', None)\n event_type = bundle.data.get('event_type', None)\n source = bundle.data.get('source', None)\n method = bundle.data.get('method', None)\n reference = bundle.data.get('reference', None)\n date = bundle.data.get('date', None)\n bucket_list = bundle.data.get('bucket_list', None)\n ticket = bundle.data.get('ticket', None)\n\n if not title and not event_type and not source:\n raise BadRequest('Must provide a title, event_type, and source.')\n et = EventType.objects(name=event_type).first()\n if not et:\n raise BadRequest('Not a valid Event Type.')\n\n result = add_new_event(title,\n description,\n event_type,\n source,\n method,\n reference,\n date,\n analyst,\n bucket_list,\n ticket)\n if result['success']:\n return bundle\n else:\n raise BadRequest(str(result['message']))\n","sub_path":"crits/events/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"106732653","text":"from colorama import init, Fore, Back, Style\nfrom os import system\nfrom tkinter import *\nimport tkinter.messagebox as messagebox\n# ініціалізація модуля colorama\ninit(autoreset=True)\n\n\n# IViewable -- інтерфейс для збору інформації від об'єктів, що\n# відображатимуться на екрані\nclass IViewable():\n\n def getRenderInfo(self):\n \"\"\"Збирає інформацію для виведення на екран.\"\"\"\n pass\n\n\n# IRender -- інтерфейс для відображення об'єктів на екран\nclass IRender():\n # TO DELETE\n def renderWhileArrangingShips(self, field, playerName):\n \"\"\"Рендерить екран розстановки кораблів.\"\"\"\n pass\n\n def render(self, field, fieldEnemy, showall=False):\n \"\"\"Рендерить ігрові поля гравців.\"\"\"\n pass\n\n def renderScreenEnterName(self, numOfPlayers):\n \"\"\"Рендерить екран вводу імені гравців.\"\"\"\n pass\n\n def renderScreenPrepareForTurn(self, playerName):\n \"\"\"Рендерить екран передачі ходу іншому гравцю.\"\"\"\n pass\n\n def printText(self, message):\n \"\"\"Виводить текст на екран.\"\"\"\n pass\n\n def inputCoords(self):\n \"\"\"Чекає, коли гравець введе координати клітинки, повертає їх.\"\"\"\n pass\n\n def inputEnter(self):\n \"\"\"Чекає, коли гравець натисне Enter.\"\"\"\n pass\n\n def printError(self, e):\n \"\"\"Виводить повідомлення про помилку на екран.\"\"\"\n pass\n\n def mainMenu(self):\n \"\"\"Рендерить головне меню.\"\"\"\n pass\n\n\n# Graphic -- клас для відображення об'єктів графічним інтерфейсом\nclass Graphic(IRender):\n\n def __init__(self):\n \"\"\"Створює графічне вікно, в якому буде вимальовуватись гра.\"\"\"\n self.root = Tk()\n self.root.title = \"Морский бій\"\n self.root.geometry(\"800x470+100+100\")\n self.backgroundImg = PhotoImage(file='../other_files/rsz_background.gif')\n\n # прапорець, що показує, чи натиснули на якусь кнопку\n self.clicked = False\n # атрибут для даних, отриманих від гравця\n self.enteredData = None\n\n #ширина рабочего поля\n self.width = 800\n #высота рабочего поля\n self.height = 400\n #self.messageFrameHeight = 50\n #self.buttonFrameHeight = 50\n #цвет фона холста\n self.bg = \"white\"\n #отступ между ячейками\n self.indent = 2\n #размер одной из сторон квадратной ячейки\n self.gauge = 32\n #смещение по y (отступ сверху)\n self.offset_y = 40\n #смещение по x пользовательского поля\n self.offset_x_user = 30\n #смещение по x поля компьютера\n self.offset_x_comp = 430\n\n #ініціалізація контейнерів для віджетів\n self.frame = Frame(self.root)\n self.frame.pack()\n self.messageFrame = Frame(self.frame)\n #self.messageFrame['height'] = self.messageFrameHeight\n self.messageFrame.pack()\n self.buttonsFrame = Frame(self.frame)\n self.buttonsFrame.pack()\n #self.menuFrame = Frame(self.root)\n \n # ініціалізація поля для малювання\n self.canv = Canvas(self.root)\n self.canv[\"height\"] = self.height\n self.canv[\"width\"] = self.width\n self.canv[\"bg\"] = self.bg\n self.canv.pack()\n\n\n def printError(self, e):\n \"\"\"Виводить повідомлення про помилку на екран.\"\"\"\n messagebox.showerror(\"Ой!\", e)\n\n\n def printText(self, text):\n \"\"\"Виводить текст на екран.\"\"\"\n # видаляємо контейнер зі старим текстом\n self.messageFrame.destroy()\n # і створюємо контейнер для нового\n self.messageFrame = Frame(self.frame)\n self.messageFrame.pack(side = BOTTOM)\n message = Label(self.messageFrame, text=text)\n message.pack(side = LEFT)\n # рендеринг\n self.root.update_idletasks()\n self.root.update()\n \n \n def clickOnCellHandler(self, event):\n \"\"\"Повертає координати натиснутої клітинки.\n Спрацьовує, коли натиснули на клітинку.\"\"\"\n letters = 'abcdefghij'\n \n for i in range(10):\n for j in range(10):\n # обчислюємо координату верхнього лівого кута кожної клітинки \n xn = j*self.gauge + (j+1)*self.indent + self.offset_x_comp\n yn = i*self.gauge + (i+1)*self.indent + self.offset_y\n xk = xn + self.gauge\n yk = yn + self.gauge\n # з'ясовуємо, на яку саме клітинку клікнув користувач\n if event.x >= xn and event.x <= xk and event.y >= yn and event.y <= yk:\n letter = letters[j]\n self.clicked = True\n self.enteredData = letter + str(i)\n\n # якщо умова вище не виконалась, це означає, що гра в режимі розстановки\n # тоді координата xn буде іншою\n xn = j*self.gauge + (j+1)*self.indent + self.offset_x_user\n xk = xn + self.gauge\n # з'ясовуємо, на яку саме клітинку клікнув користувач\n if event.x >= xn and event.x <= xk and event.y >= yn and event.y <= yk:\n letter = letters[j]\n self.clicked = True\n self.enteredData = letter + str(i)\n\n \n def randomButtonHandler(self):\n \"\"\"Обробник події натискання кнопки випадкової розстановки кораблів.\"\"\"\n self.clicked = True\n self.enteredData = 'r'\n self.buttonsFrame.destroy()\n\n\n def enterButtonHandler(self):\n \"\"\"Обробник події натискання кнопки закінчення розстановки кораблів.\"\"\"\n self.clicked = True\n self.enteredData = ''\n self.buttonsFrame.destroy()\n\n\n def inputCoords(self):\n \"\"\"У циклі чекає, коли користувач натисне на клітинку, повертає її координати.\"\"\"\n self.clicked = False\n self.enteredData = None\n # коли користувач натисне на клітинку, спрацює clickOnCellHandler,\n # який змінить змінну self.clicked на False -- це забезпечить вихід із циклу\n while not self.clicked:\n self.root.update_idletasks()\n self.root.update()\n return self.enteredData\n\n\n def inputEnterHandler(self):\n \"\"\"Обробник події натискання кнопки Enter.\"\"\"\n self.clicked = True\n\n\n def inputEnter(self):\n \"\"\"Чекає, поки гравець не натисне кнопку Enter на екрані.\"\"\"\n enterButton = Button(self.messageFrame, text='Enter', command=self.inputEnterHandler, bd=1)\n enterButton.pack(side = RIGHT)\n \n self.clicked = False\n while not self.clicked:\n self.root.update_idletasks()\n self.root.update()\n enterButton.destroy()\n\n\n def renderScreenEnterName(self, numOfPlayers):\n \"\"\"Відображає екран для вводу імен гравців.\n Повертає імена цих гравців (або гравця, якщо він один).\"\"\"\n self.canv.delete('all')\n self.setBackground()\n\n # контейнер для полів вводу і кнопок\n entryFrame = Frame(self.frame)\n entryFrame.pack()\n entries = Frame(entryFrame)\n entries.pack(side = LEFT)\n \n if numOfPlayers == 2:\n playerName1, playerName2 = '', ''\n row1 = Frame(entries)\n row1.pack()\n label1 = Label(row1, text=\"Введіть ім'я першого гравця: \")\n entry1 = Entry(row1, width = 30)\n label1.pack(side = LEFT)\n entry1.pack(side = RIGHT)\n\n row2 = Frame(entries)\n row2.pack()\n label2 = Label(row2, text=\"Введіть ім'я другого гравця: \")\n entry2 = Entry(row2, width = 30)\n label2.pack(side = LEFT)\n entry2.pack(side = RIGHT)\n\n def getNames():\n \"\"\"Записує у змінні playerName значення з полів вводу.\"\"\"\n nonlocal playerName1, playerName2, notEntered\n playerName1 = entry1.get()\n playerName2 = entry2.get()\n notEntered = False\n #print(playerName1, playerName2, notEntered)\n\n # кнопка для вводу\n enterButton = Button(entryFrame, text=\"Ввести\", command= getNames )\n enterButton.pack(side = RIGHT)\n\n notEntered = True\n while notEntered:\n self.root.update_idletasks()\n self.root.update()\n entryFrame.destroy()\n return playerName1, playerName2\n\n elif numOfPlayers == 1:\n playerName = ''\n row = Frame(entries)\n row.pack()\n label = Label(row, text=\"Введіть ваше ім'я: \")\n entry = Entry(row, width = 30)\n label.pack(side = LEFT)\n entry.pack(side = RIGHT)\n\n def getName():\n nonlocal playerName, notEntered\n playerName = entry.get()\n notEntered = False\n\n enterButton = Button(entryFrame, text=\"Ввести\", command= getName )\n enterButton.pack(side = RIGHT)\n\n notEntered = True\n while notEntered:\n self.root.update_idletasks()\n self.root.update()\n entryFrame.destroy()\n return playerName\n\n\n def renderScreenPrepareForTurn(self, playerName):\n \"\"\"Рендерить екран передачі ходу іншому гравцю.\"\"\"\n self.canv.delete('all')\n self.setBackground()\n self.printText(f\"Гравець {playerName}, приготуйтесь! Натисніть Enter, щоб продовжити.\")\n self.inputEnter()\n\n \n def renderWhileArrangingShips(self, field, playerName):\n \"\"\"Відображає поле поточного гравця під час розстановки кораблів.\n field -- інформація про поле поточного гравця\n field: list[10][10]: True / False\n \"\"\"\n self.canv.delete('all')\n #создание поля для пользователя\n #перебор строк\n for i in range(10):\n #перебор столбцов\n for j in range(10):\n xn = j*self.gauge + (j+1)*self.indent + self.offset_x_user\n xk = xn + self.gauge\n yn = i*self.gauge + (i+1)*self.indent + self.offset_y\n yk = yn + self.gauge\n #добавление прямоугольника на холст с тегом в формате:\n #префикс_строка_столбец\n cell = self.canv.create_rectangle(xn,yn,xk,yk,tag = \"my_\"+str(i)+\"_\"+str(j), fill='blue')\n self.canv.tag_bind(cell, \"\", self.clickOnCellHandler)\n # якщо там корабель -- фарбуємо в жовтий\n if field[i][j]:\n self.canv.itemconfig(\"my_\"+str(i)+\"_\"+str(j), fill=\"yellow\")\n\n #добавление букв и цифр\n for i in reversed(range(10)):\n #цифры пользователя\n xc = self.offset_x_user - 15\n yc = i*self.gauge + (i+1)*self.indent + self.offset_y + round(self.gauge/2)\n self.canv.create_text(xc,yc,text=str(i))\n #буквы\n symbols = \"abcdefghij\"\n for i in range(10):\n #буквы пользователя\n xc = i*self.gauge + (i+1)*self.indent + self.offset_x_user + round(self.gauge/2)\n yc = self.offset_y - 15\n self.canv.create_text(xc,yc,text=symbols[i])\n \n # кнопки вводу і рандому\n self.buttonsFrame.destroy()\n self.buttonsFrame = Frame(self.frame)\n self.buttonsFrame.pack()\n randomButton = Button(self.buttonsFrame, text='Random', command = self.randomButtonHandler)\n randomButton.pack(side = LEFT)\n enterButton = Button(self.buttonsFrame, text='Завершити розстановку', command = self.enterButtonHandler)\n enterButton.pack(side = RIGHT)\n\n # відображаємо все це на екран\n self.printText(f\"{playerName}, розставте кораблі!\")\n\n\n def paintMiss(self, xn, yn):\n \"\"\"Малює крапку на клітинці.\"\"\"\n self.canv.create_oval(xn+(self.gauge/2-3), yn+(self.gauge/2-3), \n xn+(self.gauge/2+3), yn+(self.gauge/2+3), fill=\"white\")\n\n\n def paintCross(self,xn,yn):\n \"\"\"Малює хрестик на клітинці.\"\"\"\n xk = xn + self.gauge\n yk = yn + self.gauge\n self.canv.create_line(xn+2,yn+2,xk-2,yk-2,width=\"3\", fill='black')\n self.canv.create_line(xk-2,yn+2,xn+2,yk-2,width=\"3\", fill='black')\n\n\n def paintCell(self, i, j, player, cellInfo, showall=False):\n \"\"\"Розфарбовує вказану клітинку.\"\"\"\n isHitStatus = cellInfo['isHitStatus']\n shipStatus = cellInfo['shipStatus']\n\n if player == \"currPlayer\":\n tag = f\"my_{i}_{j}\"\n # координати клітинки\n xn = j*self.gauge + (j+1)*self.indent + self.offset_x_user\n yn = i*self.gauge + (i+1)*self.indent + self.offset_y\n\n if isHitStatus == True and shipStatus == 'not belongs':\n #print(Style.BRIGHT + Fore.WHITE + Back.BLUE + '•', end='')\n self.canv.itemconfig(tag, fill=\"blue\")\n self.paintMiss(xn, yn)\n elif isHitStatus == False and shipStatus == 'not belongs':\n #print(Back.BLUE + ' ',end='')\n self.canv.itemconfig(tag, fill=\"blue\")\n elif isHitStatus == True and shipStatus == 'sunk':\n #print(Fore.BLACK + Back.WHITE + 'X', end='')\n self.canv.itemconfig(tag, fill=\"white\")\n self.paintCross(xn, yn)\n elif isHitStatus == True and shipStatus == 'not sunk':\n #print(Fore.BLACK + Back.RED + 'X', end='')\n self.canv.itemconfig(tag, fill=\"red\")\n self.paintCross(xn, yn)\n elif isHitStatus == False and shipStatus == 'not sunk':\n #print(Back.YELLOW + ' ', end='')\n self.canv.itemconfig(tag, fill=\"yellow\")\n\n elif player == \"currEnemy\":\n tag = f\"nmy_{i}_{j}\"\n # координати клітинки\n xn = j*self.gauge + (j+1)*self.indent + self.offset_x_comp\n yn = i*self.gauge + (i+1)*self.indent + self.offset_y\n\n if not showall:\n if not isHitStatus:\n self.canv.itemconfig(tag, fill='blue')\n elif shipStatus == 'not belongs':\n self.canv.itemconfig(tag, fill='blue')\n self.paintMiss(xn, yn)\n elif shipStatus == 'sunk':\n self.canv.itemconfig(tag, fill='white')\n self.paintCross(xn, yn)\n elif shipStatus == 'not sunk':\n self.canv.itemconfig(tag, fill='red')\n self.paintCross(xn, yn)\n \n else:\n if isHitStatus == True and shipStatus == 'not belongs':\n self.canv.itemconfig(tag, fill=\"blue\")\n self.paintMiss(xn, yn)\n elif isHitStatus == False and shipStatus == 'not belongs':\n self.canv.itemconfig(tag, fill=\"blue\")\n elif isHitStatus == True and shipStatus == 'sunk':\n self.canv.itemconfig(tag, fill=\"white\")\n self.paintCross(xn, yn)\n elif isHitStatus == True and shipStatus == 'not sunk':\n self.canv.itemconfig(tag, fill=\"red\")\n self.paintCross(xn, yn)\n elif isHitStatus == False and shipStatus == 'not sunk':\n self.canv.itemconfig(tag, fill=\"yellow\")\n\n\n def render(self, field, fieldEnemy, showall=False):\n \"\"\"Рендерить ігрові поля гравця і суперника.\"\"\"\n self.canv.delete('all')\n \n #создание поля для пользователя\n #перебор строк\n for i in range(10):\n #перебор столбцов\n for j in range(10):\n xn = j*self.gauge + (j+1)*self.indent + self.offset_x_user\n xk = xn + self.gauge\n yn = i*self.gauge + (i+1)*self.indent + self.offset_y\n yk = yn + self.gauge\n #добавление прямоугольника на холст с тегом в формате:\n #префикс_строка_столбец\n self.canv.create_rectangle(xn,yn,xk,yk,tag = \"my_\"+str(i)+\"_\"+str(j))\n # розфарбовування залежно від того, чи є корабель, чи влучали в клітинку...\n self.paintCell(i, j, \"currPlayer\", field[\"cellsInfo\"][i][j])\n\n #создание поля для компьютера\n #перебор строк\n for i in range(10):\n #перебор столбцов\n for j in range(10):\n xn = j*self.gauge + (j+1)*self.indent + self.offset_x_comp\n xk = xn + self.gauge\n yn = i*self.gauge + (i+1)*self.indent + self.offset_y\n yk = yn + self.gauge\n #добавление прямоугольника на холст с тегом в формате:\n #префикс_строка_столбец\n tag = \"nmy_\"+str(i)+\"_\"+str(j)\n cell = self.canv.create_rectangle(xn,yn,xk,yk,tag = tag)\n # розфарбовування залежно від того, чи є корабель, чи влучали в клітинку...\n self.paintCell(i, j, \"currEnemy\", fieldEnemy[\"cellsInfo\"][i][j], showall)\n #клік по клітинці викликає функцію inputCoords\n self.canv.tag_bind(cell, \"\", self.clickOnCellHandler) \n\n #добавление букв и цифр\n for i in reversed(range(10)):\n #цифры пользователя\n xc = self.offset_x_user - 15\n yc = i*self.gauge + (i+1)*self.indent + self.offset_y + round(self.gauge/2)\n self.canv.create_text(xc,yc,text=str(i))\n #цифры компьютера\n xc = self.offset_x_comp - 15\n yc = i*self.gauge + (i+1)*self.indent + self.offset_y + round(self.gauge/2)\n self.canv.create_text(xc,yc,text=str(i))\n #буквы\n symbols = \"abcdefghij\"\n for i in range(10):\n #буквы пользователя\n xc = i*self.gauge + (i+1)*self.indent + self.offset_x_user + round(self.gauge/2)\n yc = self.offset_y - 15\n self.canv.create_text(xc,yc,text=symbols[i])\n\n #буквы компьютера\n xc = i*self.gauge + (i+1)*self.indent + self.offset_x_comp + round(self.gauge/2)\n yc = self.offset_y - 15\n self.canv.create_text(xc,yc,text=symbols[i])\n\n \n # відображаємо\n self.root.update_idletasks()\n self.root.update()\n\n\n def setBackground(self):\n \"\"\"Встановлює зображення на задньому плані.\"\"\"\n self.canv.create_image(0, 0, image=self.backgroundImg, anchor=NW)\n\n\n def singlePlayButtonHandler(self):\n \"\"\"Обробник події натискання кнопки 'Почати гру з роботом'.\"\"\"\n self.clicked = True\n self.enteredData = '1'\n self.buttonsFrame.destroy()\n\n\n def multiPlayButtonHandler(self):\n \"\"\"Обробник події натискання кнопки 'Почати гру з гравцем'.\"\"\"\n self.clicked = True\n self.enteredData = '2'\n self.buttonsFrame.destroy()\n\n\n def mainMenu(self):\n \"\"\"Виводить на екран головне меню, повертає вибір користувача.\n 1 -- почати гру з роботом\n 2 -- почати гру з гравцем\n 0 -- вийти з гри\"\"\"\n self.canv.delete('all')\n self.setBackground()\n self.printText('Вітаємо в Battleship!')\n self.buttonsFrame = Frame(self.frame)\n self.buttonsFrame.pack(side=BOTTOM)\n newSinglePlayButton = Button(self.buttonsFrame, text='Нова гра з роботом', command = self.singlePlayButtonHandler)\n newSinglePlayButton.pack(side=LEFT)\n newMultiPlayButton = Button(self.buttonsFrame, text='Нова гра з гравцем', command = self.multiPlayButtonHandler)\n newMultiPlayButton.pack(side=RIGHT)\n\n # чекаємо, коли гравець натисне на якусь кнопку\n self.clicked = False\n while not self.clicked:\n self.root.update_idletasks()\n self.root.update()\n self.messageFrame.destroy()\n return self.enteredData\n\n\n# Console -- клас для відображення об'єктів консольним інтерфейсом\nclass Console(IRender):\n\n def __init__(self):\n self.clear()\n\n\n def printError(self, e):\n print('\\n' + str(e))\n input()\n\n\n def printText(self, message):\n print(message)\n\n\n def inputCoords(self):\n print(\"a3 -- обрати клітинку а3\")\n return(input(\"Ваш вибір: \"))\n \n\n def inputEnter(self):\n input()\n\n\n def renderScreenEnterName(self, numOfPlayers):\n \"\"\"Рендерить екран вводу імені.\"\"\"\n self.clear()\n self.printShipsDecoration()\n if numOfPlayers == 2:\n playerName1 = input(\"Введіть ім'я першого гравця: \")\n playerName2 = input(\"Введіть ім'я другого гравця: \")\n return playerName1, playerName2\n elif numOfPlayers == 1:\n playerName = input(\"Введіть ваше ім'я: \")\n return playerName\n\n\n def renderScreenPrepareForTurn(self, playerName):\n \"\"\"Рендерить екран передачі ходу іншому гравцеві.\"\"\"\n self.clear()\n self.printShipsDecoration()\n print(f\"Гравець {playerName}, приготуйтеся!\")\n print(\"Натисніть Enter, щоб продовжити: \", end = '')\n input()\n\n\n def renderWhileArrangingShips(self, field, playerName):\n \"\"\"Відображає поле поточного гравця під час розстановки кораблів.\n field -- інформація про поле поточного гравця\n field: list[10][10]: True / False\n \"\"\"\n\n self.clear()\n print(f\"{playerName}, розставте кораблі!\\n\")\n print(' a b c d e f g h i j ')\n print(' ┌───┬───┬───┬───┬───┬───┬───┬───┬───┬───┐')\n for i in range(10): # для всіх рядків\n # відображаємо поле поточного гравця\n print('{} │'.format(i), end='')\n \n for j in range(10):# для всіх клітинок в одному рядку\n print(' ', end='')\n\n belongsTo = field[i][j]\n\n if not belongsTo:\n print(Back.BLUE + ' ',end='')\n else:\n print(Back.YELLOW + ' ', end='')\n \n print(' │', end='')\n if j == 9: print()\n\n if i != 9:\n print(' ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤')\n else:\n print(' └───┴───┴───┴───┴───┴───┴───┴───┴───┴───┘')\n\n print('''\nr -- випадкова розстановка\nПорожній рядок -- закінчити розстановку\\n''')\n\n\n def render(self, field, fieldEnemy, showall=False):\n \"\"\"Відображає гру на екран.\n field -- інформація про поле поточного гравця\n field: {\n playerName: str,\n cellsInfo: list[10][10] {\n isHitStatus = True / False,\n shipStatus = 'not belongs' / 'sunk' / 'not sunk'\n }\n }\n fieldEnemy -- аналогічна інформація про поле гравця, який зараз не ходить\n \"\"\"\n # ┌└├┤┐─│┘┬ ┴ ┼\n # ┌───┐\n # │ Х │\n # ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤\n\n # очистимо консоль, налаштуємо її розмір\n self.clear()\n \n if field and fieldEnemy:\n # якщо передали ці аргументи -- відображаємо ігрові поля\n print('{:41s}{:20s}{:41s}'.format(field['playerName'], '',\n fieldEnemy['playerName']))\n print(' a b c d e f g h i j ' + ' ' * 20 +\n ' a b c d e f g h i j ')\n print(' ┌───┬───┬───┬───┬───┬───┬───┬───┬───┬───┐' + ' ' * 20 +\n ' ┌───┬───┬───┬───┬───┬───┬───┬───┬───┬───┐')\n\n for i in range(10): # для всіх рядків\n # спочатку відображаємо поле поточного гравця\n print('{} │'.format(i), end='')\n \n for j in range(10):# для всіх клітинок в одному рядку\n print(' ', end='')\n\n cellInfo = field['cellsInfo'][i][j]\n isHitStatus = cellInfo['isHitStatus']\n shipStatus = cellInfo['shipStatus']\n\n if isHitStatus == True and shipStatus == 'not belongs':\n print(Style.BRIGHT + Fore.WHITE + Back.BLUE + '•', end='')\n elif isHitStatus == False and shipStatus == 'not belongs':\n print(Back.BLUE + ' ',end='')\n elif isHitStatus == True and shipStatus == 'sunk':\n print(Fore.BLACK + Back.WHITE + 'X', end='')\n elif isHitStatus == True and shipStatus == 'not sunk':\n print(Fore.BLACK + Back.RED + 'X', end='')\n elif isHitStatus == False and shipStatus == 'not sunk':\n print(Back.YELLOW + ' ', end='')\n \n print(' │', end='')\n \n\n # тепер переходимо до поля суперника\n # гравець бачить тільки ті клітинки суперника, в які він влучив\n # (але тільки якщо showall == False)\n print(' ' * 20 + '{} │'.format(i), end='')\n\n for j in range(10):# для всіх клітинок в одному рядку\n print(' ', end='')\n\n cellInfo = fieldEnemy['cellsInfo'][i][j]\n isHitStatus = cellInfo['isHitStatus']\n shipStatus = cellInfo['shipStatus']\n \n if not showall:\n if not isHitStatus:\n print(Back.BLUE + ' ', end='')\n elif shipStatus == 'not belongs':\n print(Style.BRIGHT + Fore.WHITE + Back.BLUE + '•', end='')\n elif shipStatus == 'sunk':\n print(Fore.BLACK + Back.WHITE + 'X', end='')\n elif shipStatus == 'not sunk':\n print(Fore.BLACK + Back.RED + 'X', end='')\n \n # якщо показуємо все поле ворога\n else:\n if isHitStatus == True and shipStatus == 'not belongs':\n print(Style.BRIGHT + Fore.WHITE + Back.BLUE + '•', end='')\n elif isHitStatus == False and shipStatus == 'not belongs':\n print(Back.BLUE + ' ',end='')\n elif isHitStatus == True and shipStatus == 'sunk':\n print(Fore.BLACK + Back.WHITE + 'X', end='')\n elif isHitStatus == True and shipStatus == 'not sunk':\n print(Fore.BLACK + Back.RED + 'X', end='')\n elif isHitStatus == False and shipStatus == 'not sunk':\n print(Back.YELLOW + ' ', end='')\n \n print(' │', end='')\n\n if j == 9: print()\n \n\n if i != 9:\n print(' ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤' + ' ' * 20 +\n ' ├───┼───┼───┼───┼───┼───┼───┼───┼───┼───┤')\n else:\n print(' └───┴───┴───┴───┴───┴───┴───┴───┴───┴───┘' + ' ' * 20 +\n ' └───┴───┴───┴───┴───┴───┴───┴───┴───┴───┘')\n\n\n def printShipsDecoration(self):\n print(r\"\"\"\n | | | \n )_) )_) )_) \n )___))___))___)\\ \n )____)____)_____)\\ |>\n _____|____|____|____\\__ | | | \n---------\\ /--------- )_) )_) )_) \n ^^^^^ ^^^^^^^^^^^^^^^^^^^^^ )___))___))___)\\\\\n ^^^^ ^^^^ ^^^ ^^ )____)____)_____)\\\\\n ^^^^ ^^^ _____|____|____|____\\\\__ \n ---------\\ /---------\n ^^^^^ ^^^^^^^^^^^^^^^^^^^^^\n | | | ^^^^ ^^^^ ^^^ ^^ \n )_) )_) )_) ^^^^ ^^^\n )___))___))___)\\ \n )____)____)_____)\\\\ \n _____|____|____|____\\\\\\__\n---------\\ /---------\n ^^^^^ ^^^^^^^^^^^^^^^^^^^^^\n ^^^^ ^^^^ ^^^ ^^ \n ^^^^ ^^^\n\n\"\"\")\n\n def mainMenu(self):\n \"\"\"Виводить головне меню на екран.\n Повертає 0 для виходу з гри, 1 для гри з роботом, 2 для гри з гравцем.\"\"\"\n\n correctOption = False\n # поки не ввели коректне значення\n while not correctOption:\n\n self.clear()\n self.printShipsDecoration()\n # ┌└├┤┐─│┘┬ ┴ ┼\n print('''\n┌──────────────┐\n│ Морський бій │\n└──────────────┘\n\n1 -- почати гру з роботом\n2 -- почати гру з іншим гравцем\n0 -- вийти з гри\n\nВаш вибір: ''', end='')\n option = input()\n if option in ('0', '1', '2'):\n return option\n \n\n def clear(self):\n \"\"\"Очищує консоль, налаштовує її розмір.\"\"\"\n system('cls')\n system('mode con: cols=113 lines=34')","sub_path":"python_files/display.py","file_name":"display.py","file_ext":"py","file_size_in_byte":34330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"431176509","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom LianJia.items import LianjiaItem\nimport re\n\nclass LianjiaSpider(scrapy.Spider):\n name = \"lianjia\"\n allowed_domains = [\"bj.lianjia.com\"]\n start_urls = ['https://bj.lianjia.com/ershoufang/']\n\n headers = {\n 'User-Agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36'\n }\n\n def start_requests(self):\n for j in self.start_urls:\n for i in range(100):\n yield scrapy.Request(j + 'pg' + str(i), headers=self.headers)\n\n def parse(self, response):\n item = LianjiaItem()\n print (len(response.xpath('/html/body/div[4]/div[1]/ul/li')))\n for i in response.xpath('/html/body/div[4]/div[1]/ul/li'):\n item['block_name'] = i.xpath('.//div[@class=\"title\"]/a/text()')[0].extract()\n item['address'] = i.xpath('.//div[@class=\"positionInfo\"]/a/text()')[0].extract()\n item['scale'] = i.xpath('.//div[@class=\"address\"]/div[1]')[0].extract().split('|')[1]\n item['area'] = i.xpath('.//div[@class=\"address\"]/div[1]')[0].extract().split('|')[2][1:3]\n item['direction'] = i.xpath('.//div[@class=\"address\"]/div[1]')[0].extract().split('|')[3]\n item['height'] = i.xpath('.//div[@class=\"flood\"]/div[1]/text()')[0].extract()\n item['follow'] = i.xpath('.//div[@class=\"followInfo\"]/text()')[0].extract().split('/')[0]\n item['see_times'] = i.xpath('.//div[@class=\"followInfo\"]/text()')[0].extract().split('/')[1]\n item['price'] = i.xpath('.//div[@class=\"priceInfo\"]/div[1]/span/text()')[0].extract()\n item['per_price'] = i.xpath('.//div[@class=\"priceInfo\"]/div[2]/span/text()')[0].extract()\n\n # 取数字\n item['follow'] = re.split(r'\\D+', item['follow'])[0]\n item['see_times'] = re.split(r'\\D+', item['see_times'])[1]\n item['per_price'] = re.split(r'\\D+', item['per_price'])[1]\n\n\n yield item","sub_path":"LianJia/spiders/lianjia.py","file_name":"lianjia.py","file_ext":"py","file_size_in_byte":2022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"333801195","text":"\"\"\"Unit tests for colors module.\"\"\"\n\n# -----------------------------------------------------------------------------\n# Imports\n# -----------------------------------------------------------------------------\nimport sys\n\nimport numpy as np\n\nfrom kwiklib.utils.colors import (COLORS_COUNT, COLORMAP, COLORMAP_TEXTURE,\n next_color, SHIFTLEN)\n \n\n# -----------------------------------------------------------------------------\n# Utility functions\n# -----------------------------------------------------------------------------\ndef hsv_rect(hsv, coords):\n col = hsv_to_rgb(hsv)\n col = np.clip(col, 0, 1)\n rgb_rect(col, coords)\n\ndef rgb_rect(rgb, coords):\n x0, y0, x1, y1 = coords\n a = 2./len(rgb)\n c = np.zeros((len(rgb), 4))\n c[:,0] = np.linspace(x0, x1-a, len(rgb))\n c[:,1] = y0\n c[:,2] = np.linspace(x0+a, x1, len(rgb))\n c[:,3] = y1\n from galry import rectangles\n rectangles(coordinates=c, color=rgb)\n\n\n# -----------------------------------------------------------------------------\n# Tests\n# -----------------------------------------------------------------------------\ndef test_colors_1():\n for c in xrange(1, COLORS_COUNT):\n assert next_color(c) == c + 1\n assert next_color(COLORS_COUNT) == 1\n \ndef test_color_galry():\n from galry import figure, imshow, show, ylim, rectangles\n autodestruct = True\n if autodestruct:\n autodestruct = 100\n\n figure(constrain_navigation=False, toolbar=False, \n autodestruct=autodestruct,\n )\n for i in xrange(SHIFTLEN):\n y0 = 1 - 2 * i / float(SHIFTLEN)\n y1 = 1 - 2 * (i + 1) / float(SHIFTLEN)\n rgb_rect(COLORMAP_TEXTURE[i, ...], (-1, y0, 1, y1))\n ylim(-1,1)\n show()\n ","sub_path":"kwiklib/utils/tests/test_colors.py","file_name":"test_colors.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"554298532","text":"# Copyright 2018 REMME\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ------------------------------------------------------------------------\n\nimport hashlib\nfrom processor.protos.certificate_pb2 import CertificateTransaction, CertificateStorage\nfrom processor.shared.basic_client import BasicClient, _sha512\nfrom processor.certificate.certificate_handler import CertificateHandler\n\n\nclass CertificateClient(BasicClient):\n def __init__(self):\n super().__init__(CertificateHandler)\n\n def _send_transaction(self, method, data, extra_addresses_input_output):\n addresses_input_output = []\n if extra_addresses_input_output:\n addresses_input_output += extra_addresses_input_output\n return super()._send_transaction(method, data, addresses_input_output)\n\n def register_certificate(self, certificate_raw, signature_rem, signature_crt):\n transaction = CertificateTransaction()\n transaction.type = CertificateTransaction.CREATE\n transaction.certificate_raw = certificate_raw\n transaction.signature_rem = signature_rem\n transaction.signature_crt = signature_crt\n crt_address = self._family_handler._prefix + hashlib.sha512(transaction.certificate_raw.encode('utf-8')).hexdigest()[0:64]\n print('Certificate address', crt_address)\n\n self._send_transaction(CertificateTransaction.CREATE, transaction.SerializeToString(), [crt_address])\n\n def revoke_certificate(self, address):\n transaction = CertificateTransaction()\n transaction.type = CertificateTransaction.REVOKE\n transaction.address = address\n self._send_transaction(CertificateTransaction.REVOKE, transaction.SerializeToString(), [address])\n\n def get_signer_address(self):\n return self.make_address(self._signer.get_public_key().as_hex())\n\n def sign_text(self, data):\n return self._signer.sign(data.encode('utf-8'))\n\n def get_status(self, address):\n data = self.get_value(address)\n storage = CertificateStorage()\n storage.ParseFromString(data)\n return storage.revoked\n","sub_path":"processor/processor/certificate/certificate_client.py","file_name":"certificate_client.py","file_ext":"py","file_size_in_byte":2598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"463299406","text":"from django.conf.urls import patterns, include, url\n\nfrom paper_life.apps.document import views\n\nurlpatterns = patterns('',\n url('^upload/$', views.UploadView.as_view()),\n url('^search/$', views.SearchView.as_view()),\n url('^connect/$', views.ConnectView.as_view()),\n\n url('^record/(?P\\d+)/$', views.DocumentRecordView.as_view()),\n url('^docdetail/(?P.*)/$', views.DocumentView.as_view()),\n\n url('^add-file/$', views.UploadFileView.as_view()),\n url('^add-record/$', views.UploadRecordView.as_view()),\n url('^update-record/$', views.UpdateRecordView.as_view()),\n)\n","sub_path":"paper_life/apps/document/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"83887876","text":"f = open('dictionary_nba1.csv','r')\nw = open('dictionary_nba2.csv','w')\n\nf=f.readlines()\ni= 0\nfor x in f:\n if i == 16:\n i =0 \n if i == 0:\n w.write(x)\n i = i+1\n \n","sub_path":"kalo_style_classification-master/nba.py","file_name":"nba.py","file_ext":"py","file_size_in_byte":188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"103272512","text":"import torch\nimport numpy as np\n\nfrom .attack import Attack\nfrom textattack.attack_results import FailedAttackResult, SuccessfulAttackResult\n\nclass GreedyWordSwapWIR(Attack):\n \"\"\"\n An attack that greedily chooses from a list of possible perturbations in \n order of index, after ranking indices by importance.\n \n Reimplementation of paper:\n Is BERT Really Robust? A Strong Baseline for Natural Language Attack on \n Text Classification and Entailment by Jin et. al, 2019\n \n https://github.com/jind11/TextFooler \n \n Args:\n goal_function: A function for determining how well a perturbation is doing at achieving the attack's goal.\n transformation: The type of transformation.\n max_depth (:obj:`int`, optional): The maximum number of words to change. Defaults to 32. \n \"\"\"\n WIR_TO_REPLACEMENT_STR = {\n 'unk': '[UNK]',\n 'delete': '[DELETE]',\n }\n\n def __init__(self, goal_function, transformation, constraints=[], wir_method='unk', max_depth=32):\n super().__init__(goal_function, transformation, constraints=constraints)\n self.max_depth = max_depth\n try: \n self.replacement_str = self.WIR_TO_REPLACEMENT_STR[wir_method]\n except KeyError:\n raise KeyError(f'Word Importance Ranking method {wir_method} not recognized.') \n \n def attack_one(self, tokenized_text, correct_output):\n original_tokenized_text = tokenized_text\n num_words_changed = 0\n \n # Sort words by order of importance\n original_result = self.goal_function.get_results([tokenized_text], correct_output)[0]\n cur_score = original_result.score\n len_text = len(tokenized_text.words)\n \n leave_one_texts = \\\n [tokenized_text.replace_word_at_index(i,self.replacement_str) for i in range(len_text)]\n leave_one_scores = np.array([result.score for result in \\\n self.goal_function.get_results(leave_one_texts, correct_output)])\n index_order = (-leave_one_scores).argsort()\n\n new_tokenized_text = None\n new_text_label = None\n i = 0\n while ((self.max_depth is None) or num_words_changed <= self.max_depth) and i < len(index_order):\n transformed_text_candidates = self.get_transformations(\n tokenized_text,\n original_tokenized_text,\n indices_to_replace=[index_order[i]])\n i += 1\n if len(transformed_text_candidates) == 0:\n continue\n num_words_changed += 1\n results = sorted(self.goal_function.get_results(transformed_text_candidates, correct_output), \n key=lambda x: -x.score)\n # Skip swaps which don't improve the score\n if results[0].score > cur_score:\n cur_score = results[0].score\n else:\n continue\n # If we succeeded, return the index with best similarity.\n if results[0].succeeded:\n best_result = results[0]\n # @TODO: Use vectorwise operations\n max_similarity = -float('inf')\n for result in results:\n if not result.succeeded:\n break\n candidate = result.tokenized_text\n try:\n similarity_score = candidate.attack_attrs['similarity_score']\n except KeyError:\n # If the attack was run without any similarity metrics, \n # candidates won't have a similarity score. In this\n # case, break and return the candidate that changed\n # the original score the most.\n break\n if similarity_score > max_similarity:\n max_similarity = similarity_score\n best_result = result\n return SuccessfulAttackResult( \n original_result,\n best_result\n )\n else:\n tokenized_text = results[0].tokenized_text\n \n if len(results):\n return FailedAttackResult(original_result, results[0])\n else:\n return FailedAttackResult(original_result)\n","sub_path":"textattack/search_methods/greedy_word_swap_wir.py","file_name":"greedy_word_swap_wir.py","file_ext":"py","file_size_in_byte":4348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"92909487","text":"# Copyright (c) Alibaba, Inc. and its affiliates.\n# The AIRDet implementation is also open-sourced by the authors, and available at https://github.com/tinyvision/AIRDet.\n\nimport os.path as osp\nimport pickle\n\nimport cv2\nimport torch\nimport torch.nn as nn\nimport torchvision\n\nfrom modelscope.metainfo import Models\nfrom modelscope.models.base.base_torch_model import TorchModel\nfrom modelscope.models.builder import MODELS\nfrom modelscope.utils.config import Config\nfrom modelscope.utils.constant import ModelFile, Tasks\nfrom .backbone import build_backbone\nfrom .head import build_head\nfrom .neck import build_neck\nfrom .utils import parse_config\n\n\nclass SingleStageDetector(TorchModel):\n \"\"\"\n The base class of single stage detector.\n \"\"\"\n\n def __init__(self, model_dir: str, *args, **kwargs):\n \"\"\"\n init model by cfg\n \"\"\"\n super().__init__(model_dir, *args, **kwargs)\n\n config_path = osp.join(model_dir, self.config_name)\n config = parse_config(config_path)\n self.cfg = config\n model_path = osp.join(model_dir, config.model.name)\n label_map = osp.join(model_dir, config.model.class_map)\n self.label_map = pickle.load(open(label_map, 'rb'))\n self.size_divisible = config.dataset.size_divisibility\n self.num_classes = config.model.head.num_classes\n self.conf_thre = config.model.head.nms_conf_thre\n self.nms_thre = config.model.head.nms_iou_thre\n\n if self.cfg.model.backbone.name == 'TinyNAS':\n self.cfg.model.backbone.structure_file = osp.join(\n model_dir, self.cfg.model.backbone.structure_file)\n self.backbone = build_backbone(self.cfg.model.backbone)\n self.neck = build_neck(self.cfg.model.neck)\n self.head = build_head(self.cfg.model.head)\n self.apply(self.init_bn)\n\n self.load_pretrain_model(model_path)\n\n def load_pretrain_model(self, pretrain_model):\n\n state_dict = torch.load(pretrain_model, map_location='cpu')['model']\n new_state_dict = {}\n for k, v in state_dict.items():\n k = k.replace('module.', '')\n new_state_dict[k] = v\n self.load_state_dict(new_state_dict, strict=True)\n\n def init_bn(self, M):\n for m in M.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eps = 1e-3\n m.momentum = 0.03\n\n def inference(self, x):\n\n if self.training:\n return self.forward_train(x)\n else:\n return self.forward_eval(x)\n\n def forward_train(self, x):\n\n pass\n\n def forward_eval(self, x):\n\n x = self.backbone(x)\n x = self.neck(x)\n prediction = self.head(x)\n\n return prediction\n\n def preprocess(self, image):\n image = torch.from_numpy(image).type(torch.float32)\n image = image.permute(2, 0, 1)\n shape = image.shape # c, h, w\n if self.size_divisible > 0:\n import math\n stride = self.size_divisible\n shape = list(shape)\n shape[1] = int(math.ceil(shape[1] / stride) * stride)\n shape[2] = int(math.ceil(shape[2] / stride) * stride)\n shape = tuple(shape)\n pad_img = image.new(*shape).zero_()\n pad_img[:, :image.shape[1], :image.shape[2]].copy_(image)\n pad_img = pad_img.unsqueeze(0)\n\n return pad_img\n\n def postprocess(self, preds):\n bboxes, scores, labels_idx = postprocess_gfocal(\n preds, self.num_classes, self.conf_thre, self.nms_thre)\n bboxes = bboxes.cpu().numpy()\n scores = scores.cpu().numpy()\n labels_idx = labels_idx.cpu().numpy()\n labels = [self.label_map[idx + 1][0]['name'] for idx in labels_idx]\n\n return (bboxes, scores, labels)\n\n\ndef multiclass_nms(multi_bboxes,\n multi_scores,\n score_thr,\n iou_thr,\n max_num=100,\n score_factors=None):\n \"\"\"NMS for multi-class bboxes.\n\n Args:\n multi_bboxes (Tensor): shape (n, #class*4) or (n, 4)\n multi_scores (Tensor): shape (n, #class), where the last column\n contains scores of the background class, but this will be ignored.\n score_thr (float): bbox threshold, bboxes with scores lower than it\n will not be considered.\n nms_thr (float): NMS IoU threshold\n max_num (int): if there are more than max_num bboxes after NMS,\n only top max_num will be kept.\n score_factors (Tensor): The factors multiplied to scores before\n applying NMS\n\n Returns:\n tuple: (bboxes, labels), tensors of shape (k, 5) and (k, 1). Labels \\\n are 0-based.\n \"\"\"\n num_classes = multi_scores.size(1)\n # exclude background category\n if multi_bboxes.shape[1] > 4:\n bboxes = multi_bboxes.view(multi_scores.size(0), -1, 4)\n else:\n bboxes = multi_bboxes[:, None].expand(\n multi_scores.size(0), num_classes, 4)\n scores = multi_scores\n # filter out boxes with low scores\n valid_mask = scores > score_thr # 1000 * 80 bool\n\n # We use masked_select for ONNX exporting purpose,\n # which is equivalent to bboxes = bboxes[valid_mask]\n # (TODO): as ONNX does not support repeat now,\n # we have to use this ugly code\n # bboxes -> 1000, 4\n bboxes = torch.masked_select(\n bboxes,\n torch.stack((valid_mask, valid_mask, valid_mask, valid_mask),\n -1)).view(-1, 4) # mask-> 1000*80*4, 80000*4\n if score_factors is not None:\n scores = scores * score_factors[:, None]\n scores = torch.masked_select(scores, valid_mask)\n labels = valid_mask.nonzero(as_tuple=False)[:, 1]\n\n if bboxes.numel() == 0:\n bboxes = multi_bboxes.new_zeros((0, 5))\n labels = multi_bboxes.new_zeros((0, ), dtype=torch.long)\n scores = multi_bboxes.new_zeros((0, ))\n\n return bboxes, scores, labels\n\n keep = torchvision.ops.batched_nms(bboxes, scores, labels, iou_thr)\n\n if max_num > 0:\n keep = keep[:max_num]\n\n return bboxes[keep], scores[keep], labels[keep]\n\n\ndef postprocess_gfocal(prediction, num_classes, conf_thre=0.05, nms_thre=0.7):\n assert prediction.shape[0] == 1\n for i, image_pred in enumerate(prediction):\n # If none are remaining => process next image\n if not image_pred.size(0):\n continue\n multi_bboxes = image_pred[:, :4]\n multi_scores = image_pred[:, 4:]\n detections, scores, labels = multiclass_nms(multi_bboxes, multi_scores,\n conf_thre, nms_thre, 500)\n\n return detections, scores, labels\n","sub_path":"ai/modelscope/modelscope/models/cv/tinynas_detection/detector.py","file_name":"detector.py","file_ext":"py","file_size_in_byte":6677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"365992194","text":"import itertools\n\nimport operator\n\n\nletters = ['a','b','c','d','e','f','g','h']\nnumbers = [23,45,67,87,98,12,33,78]\nbooleans = [1,1,1,1,0,0,0,0]\n# itertools.chain\nchain = itertools.chain(letters,numbers)\n\nprint(list(chain))\n\n# itertools.count <--- will count infinitely\nfor i in itertools.count(10,0.25):\n if i < 20:\n print(i)\n else:\n break\n\n# itertools.compress\n# given two lists a and b, return the elements of a\n# for which the corresponding elements of b are True.\ncompress = itertools.compress(letters,booleans)\nprint(list(compress))\n\n\nmymap = map(operator.mul,numbers,numbers)\nprint(list(mymap))","sub_path":"python/concepts/data_structures/itertools_basics.py","file_name":"itertools_basics.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"21804493","text":"import sys\n\nsys.stdin = open('input_2667.txt', 'r')\n# sys.stdout = open('output_2667.txt', 'w')\n\nN = int(input())\n\ndiff = [(-1, 0), (1, 0), (0, -1), (0, 1)]\ndangi = 1\ndef paper(a, b):\n global dangi\n for (x, y) in diff:\n dx, dy = a + x, b + y\n if dx < 0 or dx == N or dy < 0 or dy == N:\n continue\n else:\n if APT[dx][dy] != 0:\n APT[dx][dy] = 0\n dangi += 1\n paper(dx, dy)\n\nAPT = []\nfor _ in range(N):\n APT += [list(map(int, input()))]\n\ncnt = 0\nresult = []\nfor a in range(N):\n for b in range(N):\n dangi = 1\n if APT[a][b] != 0:\n APT[a][b] = 0\n paper(a,b)\n cnt += 1\n result += [dangi]\nresult = sorted(result)\n\nprint(cnt)\nfor x in result:\n print(x)","sub_path":"백준/2667_단지번호붙이기.py","file_name":"2667_단지번호붙이기.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"143601323","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n# MIT License\n#\n# Copyright (c) 2018 Justin Shenk\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n# IMPORTANT:\n#\n# This code is derived from Iván de Paz Centeno's implementation of MTCNN\n# (https://github.com/ipazc/mtcnn/) and Octavia Arriaga's facial expression recognition repo\n# (https://github.com/oarriaga/face_classification).\n#\nimport logging\nimport os\nimport pkg_resources\nimport requests\nimport sys\nfrom typing import Sequence, Tuple, Union\n\nimport cv2\nimport numpy as np\n\nfrom tensorflow.keras.models import load_model\n\n\nfrom .utils import load_image\n\nlogging.basicConfig(level=logging.INFO)\nlog = logging.getLogger(\"fer\")\n\nNumpyRects = Union[np.ndarray, Sequence[Tuple[int, int, int, int]]]\n\n__author__ = \"Justin Shenk\"\n\nPADDING = 40\nSERVER_URL = \"http://localhost:8501/v1/models/emotion_model:predict\"\n\n\nclass FER(object):\n \"\"\"\n Allows performing Facial Expression Recognition ->\n a) Detection of faces\n b) Detection of emotions\n \"\"\"\n\n def __init__(\n self,\n cascade_file: str = None,\n mtcnn=False,\n tfserving: bool = False,\n scale_factor: float = 1.1,\n min_face_size: int = 50,\n min_neighbors: int = 5,\n offsets: tuple = (10, 10),\n ):\n \"\"\"\n Initializes the face detector and Keras model for facial expression recognition.\n :param cascade_file: file URI with the Haar cascade for face classification\n :param mtcnn: use MTCNN network for face detection (not yet implemented)\n :param scale_factor: parameter specifying how much the image size is reduced at each image scale\n :param min_face_size: minimum size of the face to detect\n :param offsets: padding around face before classification\n \"\"\"\n self.__scale_factor = scale_factor\n self.__min_neighbors = min_neighbors\n self.__min_face_size = min_face_size\n self.__offsets = offsets\n self.tfserving = tfserving\n\n if cascade_file is None:\n cascade_file = cv2.data.haarcascades + \"haarcascade_frontalface_default.xml\"\n\n if mtcnn:\n try:\n from facenet_pytorch import MTCNN\n except ImportError:\n raise Exception(\n \"MTCNN not installed, install it with pip install facenet-pytorch and from facenet_pytorch import MTCNN\"\n )\n self.__face_detector = \"mtcnn\"\n self._mtcnn = MTCNN(keep_all=True)\n else:\n self.__face_detector = cv2.CascadeClassifier(cascade_file)\n\n self._initialize_model()\n\n def _initialize_model(self):\n if self.tfserving:\n self.__emotion_target_size = (64, 64) # hardcoded for now\n else:\n # Local Keras model\n emotion_model = pkg_resources.resource_filename(\n \"fer\", \"data/emotion_model.hdf5\"\n )\n log.debug(\"Emotion model: {}\".format(emotion_model))\n self.__emotion_classifier = load_model(emotion_model, compile=False)\n self.__emotion_classifier.make_predict_function()\n self.__emotion_target_size = self.__emotion_classifier.input_shape[1:3]\n return\n\n def _classify_emotions(self, gray_faces: np.ndarray) -> np.ndarray: # b x w x h\n \"\"\"Run faces through online or offline classifier.\"\"\"\n if self.tfserving:\n gray_faces = np.expand_dims(gray_faces, -1) # to 4-dimensions\n instances = gray_faces.tolist()\n response = requests.post(SERVER_URL, json={\"instances\": instances})\n response.raise_for_status()\n\n emotion_predictions = response.json()[\"predictions\"]\n return emotion_predictions\n else:\n return self.__emotion_classifier(gray_faces)\n\n @staticmethod\n def pad(image):\n \"\"\"Pad image.\"\"\"\n row, col = image.shape[:2]\n bottom = image[row - 2 : row, 0:col]\n mean = cv2.mean(bottom)[0]\n\n padded_image = cv2.copyMakeBorder(\n image,\n top=PADDING,\n bottom=PADDING,\n left=PADDING,\n right=PADDING,\n borderType=cv2.BORDER_CONSTANT,\n value=[mean, mean, mean],\n )\n return padded_image\n\n @staticmethod\n def depad(image):\n row, col = image.shape[:2]\n return image[PADDING : row - PADDING, PADDING : col - PADDING]\n\n @staticmethod\n def tosquare(bbox):\n \"\"\"Convert bounding box to square by elongating shorter side.\"\"\"\n x, y, w, h = bbox\n if h > w:\n diff = h - w\n x -= diff // 2\n w += diff\n elif w > h:\n diff = w - h\n y -= diff // 2\n h += diff\n if w != h:\n log.debug(f\"{w} is not {h}\")\n\n return (x, y, w, h)\n\n def find_faces(self, img: np.ndarray, bgr=True) -> list:\n \"\"\"Image to list of faces bounding boxes(x,y,w,h)\"\"\"\n if isinstance(self.__face_detector, cv2.CascadeClassifier):\n if bgr:\n gray_image_array = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n else: # assume gray\n gray_image_array = img\n\n faces = self.__face_detector.detectMultiScale(\n gray_image_array,\n scaleFactor=self.__scale_factor,\n minNeighbors=self.__min_neighbors,\n flags=cv2.CASCADE_SCALE_IMAGE,\n minSize=(self.__min_face_size, self.__min_face_size),\n )\n elif self.__face_detector == \"mtcnn\":\n boxes, probs = self._mtcnn.detect(img)\n faces = []\n if type(boxes) == np.ndarray:\n for face in boxes:\n faces.append(\n [\n int(face[0]),\n int(face[1]),\n int(face[2]) - int(face[0]),\n int(face[3]) - int(face[1]),\n ]\n )\n\n return faces\n\n @staticmethod\n def __preprocess_input(x, v2=False):\n x = x.astype(\"float32\")\n x = x / 255.0\n if v2:\n x = x - 0.5\n x = x * 2.0\n return x\n\n def __apply_offsets(self, face_coordinates):\n \"\"\"Offset face coordinates with padding before classification.\n x1, x2, y1, y2 = 0, 100, 0, 100 becomes -10, 110, -10, 110\n \"\"\"\n x, y, width, height = face_coordinates\n x_off, y_off = self.__offsets\n x1 = x - x_off\n x2 = x + width + x_off\n y1 = y - y_off\n y2 = y + height + y_off\n return x1, x2, y1, y2\n\n @staticmethod\n def _get_labels():\n return {\n 0: \"angry\",\n 1: \"disgust\",\n 2: \"fear\",\n 3: \"happy\",\n 4: \"sad\",\n 5: \"surprise\",\n 6: \"neutral\",\n }\n\n def detect_emotions(\n self, img: np.ndarray, face_rectangles: NumpyRects = None\n ) -> list:\n \"\"\"\n Detects bounding boxes from the specified image with ranking of emotions.\n :param img: exact image path, numpy array (BGR or gray) or based64 encoded images\n could be passed.\n :return: list containing all the bounding boxes detected with their emotions.\n \"\"\"\n img = load_image(img)\n\n emotion_labels = self._get_labels()\n\n if face_rectangles is None:\n face_rectangles = self.find_faces(img, bgr=True)\n\n gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n gray_img = self.pad(gray_img)\n\n emotions = []\n gray_faces = []\n if face_rectangles is not None:\n for face_coordinates in face_rectangles:\n face_coordinates = self.tosquare(face_coordinates)\n\n # offset to expand bounding box\n # Note: x1 and y1 can be negative\n x1, x2, y1, y2 = self.__apply_offsets(face_coordinates)\n\n # account for padding in bounding box coordinates\n x1 += PADDING\n y1 += PADDING\n x2 += PADDING\n y2 += PADDING\n x1 = np.clip(x1, a_min=0, a_max=None)\n y1 = np.clip(y1, a_min=0, a_max=None)\n\n gray_face = gray_img[max(0, y1) : y2, max(0, x1) : x2]\n\n try:\n gray_face = cv2.resize(gray_face, self.__emotion_target_size)\n except Exception as e:\n log.warn(\"{} resize failed: {}\".format(gray_face.shape, e))\n continue\n\n # Local Keras model\n gray_face = self.__preprocess_input(gray_face, True)\n gray_faces.append(gray_face)\n\n # predict all faces\n if not len(gray_faces):\n return emotions # no valid faces\n\n # classify emotions\n emotion_predictions = self._classify_emotions(np.array(gray_faces))\n\n # label scores\n for face_idx, face in enumerate(emotion_predictions):\n labelled_emotions = {\n emotion_labels[idx]: round(float(score), 2)\n for idx, score in enumerate(face)\n }\n\n emotions.append(\n dict(box=face_rectangles[face_idx], emotions=labelled_emotions)\n )\n\n self.emotions = emotions\n\n return emotions\n\n def top_emotion(\n self, img: np.ndarray\n ) -> Tuple[Union[str, None], Union[float, None]]:\n \"\"\"Convenience wrapper for `detect_emotions` returning only top emotion for first face in frame.\n :param img: image to process\n :return: top emotion and score (for first face in frame) or (None, None)\n\n \"\"\"\n emotions = self.detect_emotions(img=img)\n top_emotions = [\n max(e[\"emotions\"], key=lambda key: e[\"emotions\"][key]) for e in emotions\n ]\n\n # Take first face\n if len(top_emotions):\n top_emotion = top_emotions[0]\n else:\n return (None, None)\n score = emotions[0][\"emotions\"][top_emotion]\n\n return top_emotion, score\n\n\ndef parse_arguments(args):\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--image\", type=str, help=\"Image filepath\")\n return parser.parse_args()\n\n\ndef top_emotion():\n args = parse_arguments(sys.argv)\n fer = FER()\n top_emotion, score = fer.top_emotion(args.image)\n print(top_emotion, score)\n\n\ndef main():\n top_emotion()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/fer/fer.py","file_name":"fer.py","file_ext":"py","file_size_in_byte":11555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"428492057","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\n\nfrom common.models import Goods,Types\nfrom datetime import datetime\nfrom PIL import Image\nimport time,os\nfrom django.db.models import Q\nfrom django.core.paginator import Paginator\n# Create your views here.\n\n# 浏览商品信息\ndef index(request,pIndex):\n '''浏览信息'''\n #获取商品类别信息\n tlist = Types.objects.extra(select={'_has':'concat(path,id)'}).order_by('_has')\n for ob in tlist:\n ob.pname = '. . .'*(ob.path.count(',')-1)\n\n #获取商品信息查询对象\n mod = Goods.objects\n mywhere=[] #定义一个用于存放搜索条件列表\n\n # 获取、判断并封装关keyword键搜索\n kw = request.GET.get(\"keyword\",None)\n if kw:\n # 查询商品名中只要含有关键字的都可以\n list = mod.filter(goods__contains=kw)\n mywhere.append(\"keyword=\"+kw)\n else:\n list = mod.filter()\n # 获取、判断并封装商品类别typeid搜索条件\n typeid = request.GET.get('typeid','0')\n if typeid != '0':\n tids = Types.objects.filter(Q(id=typeid) | Q(pid=typeid)).values_list('id',flat=True)\n list = list.filter(typeid__in=tids)\n mywhere.append(\"typeid=\"+typeid)\n # 获取、判断并封装商品状态state搜索条件\n state = request.GET.get('state','')\n if state != '':\n list = list.filter(state=state)\n mywhere.append(\"state=\"+state)\n\n #执行分页处理\n pIndex = int(pIndex)\n page = Paginator(list,5) #以5条每页创建分页对象\n maxpages = page.num_pages #最大页数\n #判断页数是否越界\n if pIndex > maxpages:\n pIndex = maxpages\n if pIndex < 1:\n pIndex = 1\n list2 = page.page(pIndex) #当前页数据\n plist = page.page_range #页码数列表\n\n #遍历商品信息,并获取对应的商品类别名称,以typename名封装\n for vo in list2:\n ty = Types.objects.get(id=vo.typeid)\n vo.typename = ty.name\n #封装信息加载模板输出\n context = {'typelist':tlist,\"goodslist\":list2,'plist':plist,'pIndex':pIndex,'maxpages':maxpages,'mywhere':mywhere,'typeid':int(typeid)}\n return render(request,\"myadmin/goods/index.html\",context)\n \n\ndef add(request):\n '''加载添加页面'''\n #获取商品类息\n tlist = Types.objects.extra(select={'_has':'concat(path,id)'}).order_by('_has')\n for ob in tlist:\n ob.pname = '. . .'*(ob.path.count(',')-1)\n context={'typelist':tlist}\n return render(request,\"myadmin/goods/add.html\",context)\n\ndef insert(request):\n '''执行添加'''\n try:\n # 图片的上传处理\n myfile = request.FILES.get(\"pic\",None)\n if not myfile:\n return HttpResponse(\"没有上传文件信息\")\n filename = str(time.time())+\".\"+myfile.name.split('.').pop()\n destination = open(\"./static/goods/\"+filename,\"wb+\")\n for chunk in myfile.chunks(): # 分块写入文件 \n destination.write(chunk) \n destination.close()\n\n # 图片的缩放\n im = Image.open(\"./static/goods/\"+filename)\n # 缩放到375*375(缩放后的宽高比例不变):\n im.thumbnail((375, 375)) \n im.save(\"./static/goods/\"+filename,None)\n \n im = Image.open(\"./static/goods/\"+filename)\n # 缩放到220*220(缩放后的宽高比例不变):\n im.thumbnail((220,220)) \n im.save(\"./static/goods/m_\"+filename,None)\n\n im = Image.open(\"./static/goods/\"+filename)\n # 缩放到75*75(缩放后的宽高比例不变):\n im.thumbnail((75, 75)) \n im.save(\"./static/goods/s_\"+filename,None)\n\n #保存商品信息\n ob = Goods()\n ob.goods = request.POST['goods']\n ob.typeid = request.POST['typeid']\n ob.company = request.POST['company']\n ob.price = request.POST['price']\n ob.store = request.POST['store']\n ob.content = request.POST['content']\n ob.picname = filename\n ob.state = 1\n ob.addtime = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n ob.save()\n context={\"info\":\"添加成功!\"}\n except Exception as err:\n print(err)\n context={\"info\":\"添加失败\"}\n return render(request,\"myadmin/info.html\",context)\n\ndef delete(request,gid):\n '''删除信息'''\n try:\n ob = Goods.objects.get(id=gid)\n #执行图片删除\n os.remove(\"./static/goods/\"+ob.picname)\n os.remove(\"./static/goods/s_\"+ob.picname)\n os.remove(\"./static/goods/m_\"+ob.picname)\n ob.delete()\n context={\"info\":\"删除成功!\"}\n except Exception as err:\n print(err)\n context={\"info\":\"删除失败\"}\n return render(request,\"myadmin/info.html\",context)\n\n\ndef edit(request,gid):\n '''加载编辑信息页面'''\n try:\n tlist = Types.objects.extra(select={'_has':'concat(path,id)'}).order_by('_has')\n for ob in tlist:\n ob.pname = '. . .'*(ob.path.count(',')-1)\n ob = Goods.objects.get(id=gid)\n context={\"goods\":ob,'typelist':tlist} #同时传两个参数时,放在一个字典中传过去\n return render(request,\"myadmin/goods/edit.html\",context)\n except Exception as err:\n context={\"info\":\"没有找到要修改的信息!\"}\n return render(request,\"myadmin/info.html\",context)\n\ndef update(request,gid):\n '''执行编辑信息'''\n try:\n # 图片的上传处理\n myfile = request.FILES.get(\"pic\",None)\n if not myfile:\n return HttpResponse(\"没有上传文件信息\")\n filename = str(time.time())+\".\"+myfile.name.split('.').pop()\n destination = open(\"./static/goods/\"+filename,\"wb+\")\n for chunk in myfile.chunks(): # 分块写入文件 \n destination.write(chunk) \n destination.close()\n\n # 图片的缩放\n im = Image.open(\"./static/goods/\"+filename)\n # 缩放到375*375(缩放后的宽高比例不变):\n im.thumbnail((375, 375)) \n im.save(\"./static/goods/\"+filename,None)\n \n im = Image.open(\"./static/goods/\"+filename)\n # 缩放到220*220(缩放后的宽高比例不变):\n im.thumbnail((220,220)) \n im.save(\"./static/goods/m_\"+filename,None)\n\n im = Image.open(\"./static/goods/\"+filename)\n # 缩放到75*75(缩放后的宽高比例不变):\n im.thumbnail((75, 75)) \n im.save(\"./static/goods/s_\"+filename,None)\n\n \n ob = Goods.objects.get(id=gid)\n old_picname = request.POST['old_picname']\n #删除原来的旧的图片\n os.remove(\"./static/goods/\"+old_picname)\n os.remove(\"./static/goods/s_\"+old_picname)\n os.remove(\"./static/goods/m_\"+old_picname)\n ob.goods = request.POST['goods']\n ob.typeid = request.POST['typeid']\n ob.company = request.POST['company']\n ob.price = request.POST['price']\n ob.store = request.POST['store']\n ob.content = request.POST['content']\n ob.picname = filename\n ob.state = request.POST['state']\n ob.save()\n context={\"info\":\"修改成功!\"}\n except Exception as err:\n print(err)\n context={\"info\":\"修改失败\"}\n return render(request,\"myadmin/info.html\",context)\n","sub_path":"myadmin/views/goods.py","file_name":"goods.py","file_ext":"py","file_size_in_byte":7292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"366102953","text":"import json \nimport time\nimport requests\nimport urllib, urllib2\nfrom lxml import html\nfrom lxml import etree\nimport locale\nimport random\n\ndef getFBtoInstagramUrls(fbPageName):\n #Get FB place id\n url = 'https://www.facebook.com/' + fbPageName\n r = requests.get(url)\n facebookID = r.text.split('\"pageID\":\"')[1].split('\"')[0]\n\n instagramAPIUrl = 'https://api.instagram.com/v1/locations/search?facebook_places_id={0}&client_id=1b724eef0ecc4bd58e63cab65576bec5'\n\n url = instagramAPIUrl.format(facebookID)\n r = requests.get(url)\n data = json.loads(r.text)\n instagramID = data['data'][0]['id']\n instagramMediaUrl = 'https://api.instagram.com/v1/locations/{0}/media/recent?client_id=1b724eef0ecc4bd58e63cab65576bec5'\n \n url = instagramMediaUrl.format(instagramID)\n\n r = requests.get(url)\n data = json.loads(r.text)\n \n\n allImageUrls = []\n\n #Grabbing image urls for std resolution pics \n for item in data['data']:\n imageUrl = item['images']['standard_resolution']['url']\n\n allImageUrls.append(imageUrl)\n \n\n loopThroughPages = True\n\n while loopThroughPages:\n\n if data['pagination']:\n nextPageUrl = data['pagination']['next_url']\n r = requests.get(nextPageUrl)\n data = json.loads(r.text)\n\n for item in data['data']:\n imageUrl = item['images']['standard_resolution']['url']\n allImageUrls.append(imageUrl)\n with open('./data/imageUrlList_FB2instagram_' + fbPageName + '_' + facebookID + '.txt','w') as filename:\n filename.write(str(allImageUrls))\n \n else:\n loopThroughPages = False\n\n return allImageUrls\n \n\n#print facebookID\n","sub_path":"tmpFBtoInstagram.py","file_name":"tmpFBtoInstagram.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"66404137","text":"#Importing the modules we need\nimport numpy as np\nimport tensorflow as tf\nprint(tf.config.list_physical_devices('GPU'))\nfrom tensorflow import keras\nfrom tensorflow import Tensor\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.models import Sequential, Model\nfrom tensorflow.keras.layers import Dense, Activation, Flatten, Input, Conv2D, MaxPooling2D\nfrom tensorflow.keras.losses import categorical_crossentropy\nfrom tensorflow.keras.initializers import HeNormal\nfrom matplotlib import pyplot as plt\nfrom tensorflow.keras.layers import PReLU, ReLU\nfrom sklearn.model_selection import train_test_split\nimport tensorflow_addons as tfa\n\n#Functions we use to split 5 particle data into EM shower vs non-EM shower/Track particles\ndef oneHotEncode(inputArray):\n y = inputArray\n b = np.zeros((y.size, y.max()+1))\n b[np.arange(y.size),y] = 1\n return b\n \ndef particleToEMTrack(inputArray):\n for i in range(inputArray.shape[0]):\n v=inputArray[i]\n if v==0 or v==1:\n inputArray[i] = 0\n else:\n inputArray[i] = 1 \n return inputArray\n\n#Define a data generator\ndatagen = ImageDataGenerator(width_shift_range=1.0, height_shift_range=1.0)\n#Getting and preprocessing the data\nX = np.load('X_64_train.napy')\nX = X/225.0\ny = np.load('y_train_split.npy')\n \n#Only used to split data into EM shower vs non EM shower\ny = np.argmax(y, axis=1)\ny = particleToEMTrack(y)\ny = oneHotEncode(y)\n \n#Splitting data into train and validation/testing data\nX_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.1)\n\n#Load data into data generator\nit = datagen.flow(X_train, y_train)\n\n#Define model\ninput_shape = (64, 64, 1)\ninitializer = tf.keras.initializers.HeNormal()\nmodel = keras.Sequential(\n [\n keras.Input(shape=input_shape),\n \n layers.Conv2D(64, kernel_size=(3, 3), activation=\"relu\", kernel_initializer=initializer, kernel_regularizer=l2(0.0005), bias_regularizer=l2(0.0005)),\n layers.Conv2D(64, kernel_size=(3, 3), activation=\"relu\", kernel_initializer=initializer, kernel_regularizer=l2(0.0005), bias_regularizer=l2(0.0005)),\n layers.BatchNormalization(),\n layers.MaxPooling2D(pool_size=(2, 2)),\n# layers.Dropout(0.1),\n \n layers.BatchNormalization(),\n layers.Conv2D(128, kernel_size=(3, 3), activation=\"relu\", kernel_initializer=initializer, kernel_regularizer=l2(0.0005), bias_regularizer=l2(0.0005)),\n layers.Conv2D(128, kernel_size=(3, 3), activation=\"relu\", kernel_initializer=initializer, kernel_regularizer=l2(0.0005), bias_regularizer=l2(0.0005)),\n layers.BatchNormalization(),\n layers.MaxPooling2D(pool_size=(2, 2)),\n# layers.Dropout(0.1),\n \n layers.BatchNormalization(),\n layers.Conv2D(256, kernel_size=(3, 3), activation=\"relu\", kernel_initializer=initializer, kernel_regularizer=l2(0.0005), bias_regularizer=l2(0.0005)),\n layers.Conv2D(256, kernel_size=(3, 3), activation=\"relu\", kernel_initializer=initializer, kernel_regularizer=l2(0.0005), bias_regularizer=l2(0.0005)),\n layers.BatchNormalization(),\n layers.MaxPooling2D(pool_size=(2, 2)),\n# layers.Dropout(0.1),\n \n layers.Flatten(),\n# layers.Dropout(0.5),\n layers.Dense(128, activation='relu'),\n layers.BatchNormalization(),\n layers.Dense(5, activation=\"softmax\"),\n ]\n)\n \nmodel.summary()\n\n#Compile model\nmodel.compile(loss=\"categorical_crossentropy\", optimizer=\"adam\", metrics=[\"accuracy\", tfa.metrics.F1Score(num_classes=2)])\n\n#Set directory to store log files for graphing\nlog_dir = \"logs/fit/\"\ntensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)\n\n#Train model\nmodel.fit(it, batch_size=128, epochs=50, validation_data=(X_val, y_val), verbose=2, callbacks=[tensorboard_callback])\n","sub_path":"Model-2.py","file_name":"Model-2.py","file_ext":"py","file_size_in_byte":3794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"401307194","text":"\n# 46. Majority Element\n# Given an array of integers, the majority number is the number that occurs more than half of the size of the array. Find it.\n#\n# Example\n# Given [1, 1, 1, 1, 2, 2, 2], return 1\n#\n# Challenge\n# O(n) time and O(1) extra space\n\nclass Solution:\n \"\"\"\n @param: nums: a list of integers\n @return: find a majority number\n \"\"\"\n def majorityNumber(self, nums):\n # write your code here\n \"if we use hash table to save the count, then it is not O(1) extra space solution\"\n \"use majority number to replace following number, and anyhow in the rest of the numbers, majority element is still the majority number\"\n\n\n count = 1\n major = nums[0]\n\n current = 0\n\n for i in range(1,len(nums)-1):\n \"compare\"\n\n if nums[i] == major:\n count += 1\n else:\n \"distroy one major element and current element\"\n count -= 1\n\n if count <= 0:\n \"we already used up current major element, lets set major element as next element\"\n count = 1\n major = nums[i+1]\n i += 1\n\n return major\n\n\n","sub_path":"Algorithm/Python/HighFrequency/MajorityElement.py","file_name":"MajorityElement.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"230843316","text":"from flask import Blueprint, abort\nfrom flask_restx import Resource, fields, marshal\nfrom project import db\nfrom project.api import api\nfrom project.models.availability import Availability, create_availability\nfrom project.models.user import User, add_user, Role, promote_to_member\nfrom project.models.event import Event, add_event, update_event\nfrom typing import Tuple\nfrom project.decorators import token_required\nfrom project.error_handlers import *\nimport datetime as dt\nfrom typing import Dict\n\n\ndef verify_at_least_1_day_available(availability) -> bool:\n \"\"\"Counts the number of days selected as available and returns the number\n as an integer.\"\"\"\n return any(value not in {False, None} for value in\n availability['days'].values()) or\\\n all(value is None for value in availability['days'].values())\n\n\ndef starttime_after_endtime(availability) -> bool:\n \"\"\"Checks whether the start time is before the end time and returns True\n if it is and False if it is not.\"\"\"\n return availability['start'] > availability['end']\n\n\nevents_blueprint = Blueprint('events', __name__)\n\n\nclass Time(fields.Raw):\n __schema_type__ = 'string'\n __schema_format__ = 'time'\n\n def format(self, value):\n return value.isoformat()\n\n\nweekday = fields.Boolean(\n default=True,\n description='Whether an event should be scheduled on this day')\nweekend = fields.Boolean(\n default=False,\n description='Whether an event should be scheduled on this day')\ndays_input_output = {\n 'sunday': weekend,\n 'monday': weekday,\n 'tuesday': weekday,\n 'wednesday': weekday,\n 'thursday': weekday,\n 'friday': weekday,\n 'saturday': weekend}\navailability_input_output = api.model(\n 'Availability', {\n 'start': Time(\n description='Your earliest availability for the event',),\n 'end': Time(\n description='Your latest availability for the event',),\n 'days': days_input_output})\nevent_input_output = api.model(\n 'Event', {\n 'name': fields.String(\n description='The name of the event', required=True,\n example='My event', min_length=1, max_length=32),\n 'location': fields.String(\n default=None,\n description='The location where the event will take place',\n example='My office', max_length=256),\n 'description': fields.String(\n default=None, description='A description for the event',\n example='This is an awesome description.', max_length=1024),\n 'duration': fields.Integer(\n description='The duration of the event in minutes', default=60),\n 'url': fields.String(\n description='The unique url for this event', required=True,\n example='myevent', min_length=1, max_length=32),\n 'color': fields.String(\n description=\"A hex representation of a colour with or without the \"\n \"leading '#'\", required=True, example='#000000',\n min_length=7, max_length=7),\n 'availability': fields.Nested(availability_input_output)})\n\nday = fields.Boolean(\n description='Whether an event should be scheduled on this day')\ndays_put_input = api.model(\n 'Days', {\n 'sunday': day,\n 'monday': day,\n 'tuesday': day,\n 'wednesday': day,\n 'thursday': day,\n 'friday': day,\n 'saturday': day})\navailability_put_input = api.model(\n 'Availability', {\n 'start': fields.String(\n description='Your earliest availability for the event'),\n 'end': fields.String(\n description='Your latest availability for the event'),\n 'days': fields.Nested(days_put_input)})\nevent_put_input = api.model(\n 'Event', {\n 'name': fields.String(\n description='The name of the event', example='My event',\n min_length=1, max_length=32),\n 'location': fields.String(\n description='The location where the event will take place',\n example='My office', max_length=256),\n 'description': fields.String(\n description='A description for the event',\n example='This is an awesome description.', max_length=1024),\n 'duration': fields.Integer(\n description='The duration of the event in minutes', example=60),\n 'url': fields.String(\n description='The unique url for this event', example='myevent',\n min_length=1, max_length=32),\n 'color': fields.String(\n description=\"A hex representation of a colour with or without the \"\n \"leading '#'\", example='#000000', min_length=7, max_length=7),\n 'availability': fields.Nested(availability_put_input)})\n\n\n@api.route('/users//events')\nclass Events(Resource):\n @token_required\n @api.marshal_with(event_input_output)\n def get(self, public_id, current_user=None):\n \"\"\"Returns all the events that the user has created.\"\"\"\n if current_user.public_id != public_id:\n raise PermissionError\n\n events = Event.query.\\\n filter(Event.user_id == current_user.id).\\\n all()\n\n for event in events:\n event.color = '#' + event.color\n\n return events, 200\n\n @token_required\n @api.expect(event_input_output, validate=True)\n def post(self, public_id, current_user=None):\n \"\"\"Creates an Event for the specified user.\"\"\"\n if current_user.public_id != public_id:\n raise PermissionError\n\n payload = api.payload\n\n if not verify_at_least_1_day_available(payload['availability']):\n raise NoDayAvailable\n\n if starttime_after_endtime(payload['availability']):\n raise StartAfterEnd\n\n if ' ' in payload['url']:\n raise UrlContainsSpace\n\n user_id = current_user.id\n availability = create_availability(\n sunday=payload['availability']['days']['sunday'],\n monday=payload['availability']['days']['monday'],\n tuesday=payload['availability']['days']['tuesday'],\n wednesday=payload['availability']['days']['wednesday'],\n thursday=payload['availability']['days']['thursday'],\n friday=payload['availability']['days']['friday'],\n saturday=payload['availability']['days']['saturday'],\n start=dt.time.fromisoformat(payload['availability']['start']),\n end=dt.time.fromisoformat(payload['availability']['end']))\n add_event(\n user_id=user_id,\n availability=availability,\n name=payload['name'],\n location=payload['location'],\n description=payload['description'],\n duration=payload['duration'],\n url=payload['url'],\n color=payload['color'].lstrip('#'))\n db.session.commit()\n if current_user.role == Role.ON_BOARDING:\n promote_to_member(current_user)\n return {'message': 'success'}, 201\n\n\n@api.route('/users//events/')\nclass EventDetail(Resource):\n\n @token_required\n def put(self, public_id, event_url, current_user=None):\n\n if current_user.public_id != public_id:\n raise PermissionError\n\n event = Event.query.filter_by(url=event_url).first()\n data = marshal(api.payload, event_put_input, skip_none=True)\n\n if not verify_at_least_1_day_available(data['availability']):\n raise NoDayAvailable\n\n if data['availability']['start'] and data['availability']['end']\\\n and starttime_after_endtime(data['availability']):\n raise StartAfterEnd\n\n if 'url' in data and data['url'] == ' ':\n raise UrlContainsSpace\n\n if event is not None:\n user = event.user\n if user.public_id != current_user.public_id:\n raise PermissionError\n event = update_event(event, data)\n return {\"message\": \"Success\"}, 200\n return {\"error\": \"Event not found\"}, 404\n\n @api.marshal_with(event_input_output)\n def get(self, public_id, event_url):\n\n response = {}\n event = Event.query.filter_by(url=event_url).first()\n if event is not None:\n event.color = '#' + event.color\n\n return event, 200\n else:\n response['error'], code = \"Event not found\", 404\n return response, code\n\n @token_required\n def delete(self, public_id, event_url, current_user=None):\n if current_user.public_id != public_id:\n raise PermissionError\n event = Event.query.filter_by(url=event_url).first()\n\n response = {}\n if event is not None:\n user = event.user\n if user.public_id != current_user.public_id:\n raise PermissionError\n db.session.delete(event)\n db.session.commit()\n response['message'], code = 'Success', 200\n else:\n response, code = \"Event not found\", 404\n return response, code\n","sub_path":"server/project/api/events_handler.py","file_name":"events_handler.py","file_ext":"py","file_size_in_byte":9149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"68884299","text":"import datetime\r\nfrom decimal import Decimal, getcontext\r\nfrom os import getcwd\r\n\r\nfrom sqlalchemy import Column, String, Integer, DateTime, DECIMAL\r\nfrom sqlalchemy.orm import Session\r\n\r\nfrom declarative import Base\r\n\r\n\r\nclass Order(Base):\r\n __tablename__ = 'orders'\r\n\r\n orderid = Column(Integer, primary_key=True, autoincrement=True)\r\n customerid = Column(String(5))\r\n employeeid = Column(Integer)\r\n contact\r\n orderdate = Column(DateTime)\r\n requireddate = Column(DateTime)\r\n shippeddate = Column(DateTime)\r\n shipvia = Column(Integer)\r\n freight = Column(DECIMAL)\r\n shipname = Column(String(40))\r\n shipaddress = Column(String(60))\r\n shipcity = Column(String(15))\r\n shipregion = Column(String(15))\r\n shippostalcode = Column(String(10))\r\n shipcountry = Column(String(15))\r\n\r\n\r\n @staticmethod\r\n def parse_string_from_date(date_value):\r\n return datetime.datetime.strftime(date_value, \"%Y.%m.%d\")\r\n\r\n @staticmethod\r\n def parse_date_from_string(date_string):\r\n return datetime.datetime.strptime(date_string, \"%Y-%m-%d\")\r\n\r\n @staticmethod\r\n def read_row_from_csv(row_number):\r\n if row_number > 0:\r\n try:\r\n file = open(getcwd() + \"\\\\orders.csv\", \"r\", encoding='utf-8')\r\n except IOError:\r\n print(\"File not found or not accessible.\")\r\n return []\r\n else:\r\n lines = file.readlines()\r\n file.close()\r\n try:\r\n return lines[row_number]\r\n except IndexError:\r\n print(\"Row number out of range\")\r\n return []\r\n return []\r\n\r\n def to_csv(self):\r\n csv_row = \"\\n{0!s:};{1!s:};{2!s:};{3!s:};{4!s:;};{5!s:};{6!s:};\" \\\r\n \"{7!s:}:{8!s:};{9!s:};{10!s:};{11!s:};{12!s:};\" \\\r\n \"{13!s:}\".format(self.orderid, self.customerid, self.employeeid,\r\n Order.parse_string_from_date(self.orderdate), Order.parse_string_from_date(self.requireddate),\r\n Order.parse_string_from_date(self.shippeddate), self.shipvia, self.freight,\r\n self.shipname, self.shipaddress, self.shipcity, self.shipregion, self.shippostalcode,\r\n self.shipcountry)\r\n try:\r\n file = open(getcwd() + \"\\\\orders.csv\", \"r\")\r\n except IOError:\r\n print(\"File not found or not accessible.\")\r\n else:\r\n file.write(csv_row)\r\n file.close()\r\n\r\n @staticmethod\r\n def parse(row):\r\n split_row = row.split(\";\")\r\n new_order = Order()\r\n getcontext().prec = 4\r\n new_order.orderid = split_row[0]\r\n new_order.customerid = split_row[1]\r\n new_order.employeeid = int(split_row[2])\r\n new_order.orderdate = Order.parse_date_from_string(split_row[3])\r\n new_order.requireddate = Order.parse_date_from_string(split_row[4])\r\n new_order.shippeddatedate = Order.parse_date_from_string(split_row[5])\r\n new_order.shipvia = int(split_row[6])\r\n new_order.freight = Decimal(split_row[7])\r\n new_order.shipname = split_row[8]\r\n new_order.shipaddress = split_row[9]\r\n new_order.shipcity = split_row[10]\r\n new_order.shipregion = split_row[11]\r\n new_order.shippostalcode = split_row[12]\r\n new_order.shipcountry = split_row[13]\r\n\r\n return new_order\r\n\r\n def persist(self, connection):\r\n session = Session(connection)\r\n result = session.query(Order).filter(Order.orderid == self.orderid).first()\r\n if result:\r\n print(\"The data is already present in the table.\")\r\n else:\r\n session.add(self)\r\n session.commit()\r\n session.close()\r\n\r\n def __repr__(self):\r\n return \"{0!s: >11s} | {1!s: >5s} | {2!s: >11s} | {3!s: >31s} | {4!s: >11s} | {5!s: >11s} | {6!s: >11s} |\" \\\r\n \" {7!s: >7s} | {8!s: >40s} | {9!s: >60s} | {10!s: >15s} | {11!s: >15s} | {12!s: >10s} |\" \\\r\n \" {13!s: >15s}\".format(self.orderid, self.customerid, self.employeeid,\r\n Order.parse_string_from_date(self.orderdate), Order.parse_string_from_date(self.requireddate),\r\n Order.parse_string_from_date(self.shippeddate), self.shipvia, self.freight,\r\n self.shipname, self.shipaddress, self.shipcity, self.shipregion, self.shippostalcode,\r\n self.shipcountry)\r\n","sub_path":"Order.py","file_name":"Order.py","file_ext":"py","file_size_in_byte":4506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"333089831","text":"import numpy as np\nimport pandas as pd\nimport csv\nimport pickle\nimport random\nfrom sklearn.svm import SVC\nfrom sklearn.preprocessing import normalize\nfrom sklearn.model_selection import ParameterGrid, StratifiedKFold\nfrom sklearn.metrics import accuracy_score\nfrom joblib import Parallel, delayed\n\n\n# Configuration section\niter = 5\ncvCount = 10\nseed = 42\nwdiff = 0.0\nwtest = 1.0\nnumSamples = 1000\n\nRandomSearchDict = dict()\n\n# Define a custom scoring mechanism\ndef myScoring(y_pred_train, y_true_train, y_pred_test, y_true_test):\n train_acc = accuracy_score(y_true_train, y_pred_train)\n test_acc = accuracy_score(y_true_test, y_pred_test)\n acc_diff = (train_acc - test_acc)*(-1)\n return wdiff*acc_diff + wtest*test_acc, train_acc, test_acc\n\n\ndef Stratified_kfold(X_train, Y_train, combination):\n combination_list =[]\n score_list = []\n train_acc_list = []\n test_acc_list =[]\n skf = StratifiedKFold(n_splits=cvCount, random_state=seed)\n s = 0\n tr_acc = 0\n te_acc = 0\n for train_idx, test_idx in skf.split(X_train, Y_train):\n split_x_train, split_x_test = X_train[train_idx], X_train[test_idx]\n y_true_train, y_true_test = Y_train[train_idx], Y_train[test_idx]\n svm = SVC(**combination)\n clf = svm.fit(split_x_train, y_true_train.ravel())\n y_pred_train = clf.predict(split_x_train)\n y_pred_test = clf.predict(split_x_test)\n score, fold_train_acc, fold_test_acc = myScoring(y_pred_train, y_true_train, y_pred_test, y_true_test)\n s += score\n tr_acc += fold_train_acc\n te_acc += fold_test_acc\n combination_list.append(combination)\n score_list.append(s / cvCount)\n train_acc_list.append(tr_acc / cvCount)\n test_acc_list.append(te_acc / cvCount)\n return combination_list[0], score_list[0], train_acc_list[0], test_acc_list[0]\n\n\nbestparamdict = dict()\n\nc = [x for x in np.linspace(0.1, 15, num=200)]\ngamma = [x for x in np.linspace(0.001, 1, num=100)]\ngrid = {'C': c, 'gamma': gamma}\ntrain_acc_list = []\ntest_acc_list = []\nfor i in range(iter):\n X_train = np.load('final_train_binarydata_' + str(i) + '.npy')\n Y_train = np.load('final_train_labels_' + str(i) + '.npy')\n\n X_train = X_train.astype('float')\n X_train = normalize(X_train)\n Y_train = Y_train.astype('float')\n Y_train = Y_train.astype(int)\n\n randomCombinations = random.sample(list(ParameterGrid(grid)), numSamples)\n\n print(\"parallel loop started\")\n\n r = Parallel(n_jobs=-2,verbose=10)(delayed(Stratified_kfold)(X_train,Y_train,combination) for combination in randomCombinations)\n combination, score, train_acc, test_acc= zip(*r)\n\n combination_list = list(combination)\n\n score_list = list(score)\n trainacclist = list(train_acc)\n testacclist = list(test_acc)\n\n req_idx = score_list.index(max(score_list))\n train_acc_list.append(trainacclist[req_idx])\n test_acc_list.append(testacclist[req_idx])\n bestparamdict[str(i)] = combination_list[req_idx]\n\nprint('Train acc = ' + str(sum(train_acc_list)/iter))\nprint('Test acc = ' + str(sum(test_acc_list) / iter))\n\nwith open('new_dict_of_randomsearch_bestparams_rbf.pkl', 'wb') as f:\n pickle.dump(bestparamdict, f)\nprint('Done')\n","sub_path":"Thesis ch 2/fixedBucketsize(0.432)/9.SVM_rbfRandomSearch.py","file_name":"9.SVM_rbfRandomSearch.py","file_ext":"py","file_size_in_byte":3202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"590841963","text":"# File: proj2.py\r\n# Author: Ujjwal Rehani\r\n# Date: 4/13/2017 \r\n# Section: 21\r\n# E-mail: urehani1@umbc.edu \r\n# Description:\r\n# This project simulates a vending machine\r\n\r\nSNACK_NAME = 0\t#zero index is name\r\nSNACK_PRICE = 1 #first index is price\r\nSNACK_QUANTITY = 2\t#second index is quantity\r\nSNACK_CODE = 3\t#third index is snack code\r\n\r\n# printGreeting() explains the program to the user\r\n# Input: none\r\n# Output: none (prints greeting)\r\ndef printGreeting():\r\n\tprint(\"This program simulates a vending machine. You\")\r\n\tprint(\"may choose which vending machine you 'load' in,\")\r\n\tprint(\"and may also specify how much money you have\")\r\n\tprint(\"available for purchasing vending machine items.\\n\")\r\n\t\r\n# loadFile() reads in file and creates 3d list\r\n# Input: none\r\n# Output: machineList a 3d list\r\ndef loadFile():\r\n\tmachineList = []\r\n\titemList = []\r\n\tfileName = input(\"Please enter file to load machine from: \")\r\n\tmyFile = open(fileName,\"r\")\r\n\tcount = 0\r\n\t\r\n\t#Creates lists for each snack\r\n\tfor line in myFile:\r\n\t\trow = []\r\n\t\tline = line.strip()\r\n\t\tname, price, quantity, code = line.split()\r\n\t\t\r\n\t\trow.append(name)\r\n\t\trow.append(price)\r\n\t\trow.append(quantity)\r\n\t\trow.append(code)\r\n\t\t\r\n\t\titemList.append(row)\r\n\t\t\t\r\n\tmachineList.append(itemList)\r\n\tmyFile.close()\r\n\r\n\treturn machineList\r\n\t\r\n# displayMachine() prints out current vending machine\r\n# Input: itemList the 3d list that contains all the snacks\r\n# Output: none (prints items and their info)\r\ndef displayMachine(itemList):\r\n\tcount = 0;\r\n\tcount2 = 0;\r\n\tcount3 = 0;\r\n\twhile(count < len(itemList[0]) ):\r\n\t\tif(itemList[0][count][SNACK_QUANTITY] == 0):\r\n\t\t\tprint(end=\"\")\r\n\t\telse:\r\n\t\t\tprint(\"\\t\",itemList[0][count][SNACK_NAME],end=\"\")\r\n\t\tcount+=1\r\n\tprint()\r\n\twhile(count2 < len(itemList[0]) ):\r\n\t\tif(itemList[0][count2][SNACK_QUANTITY] == 0):\r\n\t\t\tprint(end=\"\")\r\n\t\telse:\r\n\t\t\tprint(\"\\t\",itemList[0][count2][SNACK_PRICE],end=\"\")\r\n\t\tcount2+=1\r\n\tprint()\r\n\twhile(count3 < len(itemList[0]) ):\r\n\t\tif(itemList[0][count3][SNACK_QUANTITY] == 0):\r\n\t\t\tprint(end=\"\")\r\n\t\telse:\r\n\t\t\tprint(\"\\t\",itemList[0][count3][SNACK_CODE],end=\"\")\r\n\t\tcount3+=1\r\n\tprint()\r\n\t\r\n# displayBalance() prints out balance on card\r\n# Input: initialBalance the balance that is first loaded on card\r\n# Output: balance; amount of money on card\r\ndef displayBalance(initialBalance):\r\n\tprint(\"You have $\",initialBalance,\"left on your card.\")\r\n\t\r\n# addMoney() adds money to card\r\n# Input: balance; \t\tamount of money currently on card\r\n# Output: newBalance; \tbalance after adding money\r\ndef addMoney(balance):\r\n\tprint(\"Please enter the amount of money you want to add to your card.\")\r\n\tnewMoney = float(input(\"Enter a decimal number (greater than or equal to zero): \"))\r\n\t\r\n\t#Checks for positive amount\r\n\twhile(newMoney < 0):\r\n\t\tnewMoney = float(input(\"Enter a decimal number (greater than or equal to zero): \"))\r\n\t\r\n\tnewBalance = balance + newMoney\t\r\n\treturn newBalance\r\n\t\r\n\r\n# menuChoice() displays menu and asks user to select an option\r\n# Input:\t\tNone\r\n# Output: \t\tNone (prints choices)\r\ndef menuChoice(snackList,initialBalance):\r\n\tokChoices = [\"1\",\"2\",\"3\",\"4\",\"5\"]\r\n\tuserChoice = \"\"\r\n\tprint(\"1 - Display Vending Machine\")\r\n\tprint(\"2 - Make Selection\")\r\n\tprint(\"3 - Display Card Balance\")\r\n\tprint(\"4 - Add Money to Card\")\r\n\tprint(\"5 - Quit\")\r\n\tprint()\r\n\t\r\n\t#Programs runs unless quit is chosen\r\n\twhile(userChoice != \"5\"):\r\n\t\tuserChoice = input(\"Enter a number between 1 and 5 (inclusive): \")\r\n\t\t#Checks for valid input\r\n\t\twhile(userChoice not in okChoices):\r\n\t\t\tuserChoice = input(\"Enter a number between 1 and 5 (inclusive): \")\r\n\t\t\t\r\n\t\t#Calls respective function based on choice\t\r\n\t\tif (userChoice == \"1\"):\r\n\t\t\tdisplayMachine(snackList)\r\n\t\t\t\r\n\t\t\tprint()\r\n\t\t\tprint(\"1 - Display Vending Machine\")\r\n\t\t\tprint(\"2 - Make Selection\")\r\n\t\t\tprint(\"3 - Display Card Balance\")\r\n\t\t\tprint(\"4 - Add Money to Card\")\r\n\t\t\tprint(\"5 - Quit\")\r\n\t\t\tprint()\r\n\t\t\t\r\n\t\telif (userChoice == \"2\"):\r\n\t\t\tnewList,newMoney = makeSelection(snackList,initialBalance)\r\n\t\t\tsnackList = newList\r\n\t\t\tinitialBalance = newMoney\r\n\t\t\t\r\n\t\t\tprint()\r\n\t\t\tprint(\"1 - Display Vending Machine\")\r\n\t\t\tprint(\"2 - Make Selection\")\r\n\t\t\tprint(\"3 - Display Card Balance\")\r\n\t\t\tprint(\"4 - Add Money to Card\")\r\n\t\t\tprint(\"5 - Quit\")\r\n\t\t\tprint()\r\n\t\t\t\r\n\t\telif (userChoice == \"3\"):\r\n\t\t\tdisplayBalance(initialBalance)\r\n\t\t\t\r\n\t\t\tprint()\r\n\t\t\tprint(\"1 - Display Vending Machine\")\r\n\t\t\tprint(\"2 - Make Selection\")\r\n\t\t\tprint(\"3 - Display Card Balance\")\r\n\t\t\tprint(\"4 - Add Money to Card\")\r\n\t\t\tprint(\"5 - Quit\")\r\n\t\t\tprint()\r\n\t\t\t\r\n\t\telif (userChoice == \"4\"):\r\n\t\t\tinitialBalance = addMoney(initialBalance)\r\n\t\t\tprint(\"1 - Display Vending Machine\")\r\n\t\t\tprint(\"2 - Make Selection\")\r\n\t\t\tprint(\"3 - Display Card Balance\")\r\n\t\t\tprint(\"4 - Add Money to Card\")\r\n\t\t\tprint(\"5 - Quit\")\r\n\t\t\tprint()\r\n\t\t\t\r\n\t\telif (userChoice == \"5\"):\r\n\t\t\tsaveFile(snackList)\r\n\t\t\r\n\r\n# firstBalance() asks user for the initial amount to be put on card\r\n# Input: \tNone\r\n# Output: first_balance \tthe initial amount of money on card\r\ndef firstBalance():\r\n\tprint(\"Please enter the amount of money you have on your card.\")\r\n\tfirst_balance = float(input(\"Enter a decimal number (greater than or equal to zero): \"))\r\n\t\r\n\t#Checks for positive balance\r\n\twhile (first_balance < 0):\r\n\t\tprint(\"The decimal number must be positive. Please try again!\")\r\n\t\tfirst_balance = float(input(\"Enter a decimal number (greater than or equal to zero): \"))\r\n\t\r\n\treturn first_balance\r\n\t\r\n# makeSelection() makes the selection for user and updates list and money\r\n# Input: itemList, money list with all the snacks and amount of money the user has\r\n# Output: first_balance \tthe initial amount of money on card\r\ndef makeSelection(itemList, money):\r\n\tcodeList = []\r\n\tcount = 0\r\n\titemChoice = input(\"Please enter one of the choices from the vending machine: \")\r\n\t\r\n\t#Creates a list of all codes\r\n\twhile(count < len(itemList[0])):\r\n\t\tcodeList.append(itemList[0][count][SNACK_CODE])\r\n\t\tcount+=1\r\n\t\r\n\t#Checks for valid code\r\n\twhile(itemChoice not in codeList):\r\n\t\tprint(\"That is not a valid choice, please try again.\")\r\n\t\titemChoice = input(\"Please enter one of the choices from the vending machine: \")\r\n\t\r\n\t#Gets the index of the snack which the code is associated with\r\n\tindex = 0\r\n\twhile(index < len(codeList)):\r\n\t\tif codeList[index] == itemChoice:\r\n\t\t\tsnackIndex = index\r\n\t\t\tindex+=1\r\n\t\telse:\r\n\t\t\tindex +=1\r\n\t\r\n\t#Gets the cost and quantity of choosen snack\r\n\tsnackCost = float(itemList[0][snackIndex][SNACK_PRICE])\t\t\r\n\tsnackQuantity = int(itemList[0][snackIndex][SNACK_QUANTITY])\r\n\t\r\n\t#Checks if selected snack has not ran out or if user is short on money\r\n\tif(snackQuantity == 0):\r\n\t\tprint(\"That is not a valid choice, please try again.\")\r\n\telif(money < snackCost):\r\n\t\tprint(\"Sorry, you don't have enough money for that.\")\r\n\telse:\r\n\t\t#if snack exists, money and quantity are subtracted\r\n\t\tmoney = money - snackCost\r\n\t\tsnackQuantity -= 1\r\n\t\r\n\t\tprint(\"Congrats, you bought a\",itemList[0][snackIndex][SNACK_NAME])\r\n\t\tprint(\"You now have $\",money,\"left on your card.\")\r\n\t\r\n\t#updates quantity in list\r\n\titemList[0][snackIndex][SNACK_QUANTITY] = snackQuantity\r\n\t\r\n\treturn itemList, money \r\n\t\t\t\r\n# saveFile() saves current vending machine to text file \r\n# Input: finalList the 3d list that contains all the items\r\n# Output: none (new text file gets written to)\r\ndef saveFile(finalList):\r\n\tmyFile = open(\"test.txt\",\"w\")\r\n\tcount = 0\r\n\tcount2 = 0\r\n\tcount3 = 0\r\n\tcount4 = 0\r\n\twhile(count\nimport numpy as np\nimport scipy\nfrom scipy import sparse\nfrom .setFunction import SetFunction\nimport submodlib_cpp as subcp\nfrom submodlib_cpp import DisparitySum \nfrom submodlib.helper import create_kernel, create_cluster\n\nclass DisparitySumFunction(SetFunction):\n\t\"\"\"Implementation of the Disparity-Sum function.\n\n\tDisparity-Sum models diversity by computing the sum of pairwise distances of all the elements in a subset. It is defined as\n\n\t.. math::\n\t\t\tf(X) = \\\\sum_{i, j \\\\in X} (1 - s_{ij})\n\n\tParameters\n\t----------\n\n\tn : int\n\t\tNumber of elements in the ground set\n\t\n\tsijs : list, optional\n\t\tSimilarity matrix to be used for getting :math:`s_{ij}` entries as defined above. When not provided, it is computed based on the following additional parameters\n\n\tdata : list, optional\n\t\tData matrix which will be used for computing the similarity matrix\n\n\tmetric : str, optional\n\t\tSimilarity metric to be used for computing the similarity matrix\n\t\n\tn_neighbors : int, optional\n\t\tWhile constructing similarity matrix, number of nearest neighbors whose similarity values will be kept resulting in a sparse similarity matrix for computation speed up (at the cost of accuracy)\n\t\n\t\"\"\"\n\n\tdef __init__(self, n, sijs=None, data=None, mode=None, metric=\"cosine\", num_neigh=-1, partial=False, ground_sub=None):\n\t\tself.n = n\n\t\tself.mode = mode\n\t\tself.metric = metric\n\t\tself.sijs = sijs\n\t\tself.data = data\n\t\tself.num_neigh = num_neigh\n\t\tself.partial = partial\n\t\tself.ground_sub = ground_sub\n\t\tself.cpp_obj = None\n\t\tself.cpp_sijs = None\n\t\tself.cpp_ground_sub = ground_sub\n\t\tself.cpp_content = None\n\n\t\tif self.n==0:\n\t\t\traise Exception(\"ERROR: Number of elements in ground set can't be 0\")\n\n\t\tif self.partial==True and self.ground_sub==None:\n\t\t\traise Exception(\"ERROR: Ground subset not specified\")\n\t\t\n\t\tif mode!=None and mode not in ['dense', 'sparse']:\n\t\t\traise Exception(\"ERROR: Incorrect mode\")\n\t\t\n\t\tif metric not in ['euclidean', 'cosine']:\n\t\t\traise Exception(\"ERROR: Unsupported metric\")\n\n\t\tif type(self.sijs)!=type(None): # User has provided sim matrix directly: simply consume it\n\t\t\tif np.shape(self.sijs)[0]!=self.n:\n\t\t\t\traise Exception(\"ERROR: Inconsistentcy between n and no of examples in the given similarity matrix\")\n\t\t\t\n\t\t\tif type(self.sijs) == scipy.sparse.csr.csr_matrix and num_neigh==-1:\n\t\t\t\traise Exception(\"ERROR: num_neigh for given sparse matrix not provided\")\n\t\t\tif self.mode!=None: # Ensure that there is no inconsistency in similarity matrix and provided mode\n\t\t\t\tif type(self.sijs) == np.ndarray and self.mode!=\"dense\":\n\t\t\t\t\tprint(\"WARNING: Incorrect mode provided for given similarity matrix, changing it to dense\")\n\t\t\t\t\tself.mode=\"dense\"\n\t\t\t\tif type(self.sijs) == scipy.sparse.csr.csr_matrix and self.mode!=\"sparse\":\n\t\t\t\t\tprint(\"WARNING: Incorrect mode provided for given similarity matrix, changing it to sparse\")\n\t\t\t\t\tself.mode=\"sparse\"\n\t\t\telse: # Infer mode from similarity matrix\n\t\t\t\tif type(self.sijs) == np.ndarray:\n\t\t\t\t\tself.mode=\"dense\"\n\t\t\t\tif type(self.sijs) == scipy.sparse.csr.csr_matrix:\n\t\t\t\t\tself.mode=\"sparse\"\n\t\telse:\n\t\t\tif type(self.data)!=type(None): # User has only provided data: build similarity matrix/cluster-info and consume it\n\t\t\t\t\n\t\t\t\tif np.shape(self.data)[0]!=self.n:\n\t\t\t\t\traise Exception(\"ERROR: Inconsistentcy between n and no of examples in the given data matrix\")\n\n\t\t\t\tif self.mode==None:\n\t\t\t\t\tself.mode=\"sparse\"\n\n\t\t\t\tif self.num_neigh==-1:\n\t\t\t\t\tself.num_neigh=np.shape(self.data)[0] #default is total no of datapoints\n\n\t\t\t\t\n\t\t\t\tself.cpp_content = np.array(subcp.create_kernel(self.data.tolist(), self.metric, self.num_neigh))\n\t\t\t\tval = self.cpp_content[0]\n\t\t\t\trow = list(map(lambda arg: int(arg), self.cpp_content[1]))\n\t\t\t\tcol = list(map(lambda arg: int(arg), self.cpp_content[2]))\n\t\t\t\tif self.mode==\"dense\":\n\t\t\t\t\tself.sijs = np.zeros((n,n))\n\t\t\t\t\tself.sijs[row,col] = val\n\t\t\t\tif self.mode==\"sparse\":\n\t\t\t\t\tself.sijs = sparse.csr_matrix((val, (row, col)), [n,n])\n\n\t\t\telse:\n\t\t\t\traise Exception(\"ERROR: Neither data nor similarity matrix provided\")\n\t\t\n\t\tif self.partial==False: \n\t\t\tself.cpp_ground_sub = {-1} #Provide a dummy set for pybind11 binding to be successful\n\t\t\n\t\t#Breaking similarity matrix to simpler native data sturctures for implicit pybind11 binding\n\t\tif self.mode==\"dense\":\n\t\t\tself.cpp_sijs = self.sijs.tolist() #break numpy ndarray to native list of list datastructure\n\t\t\t\n\t\t\tif type(self.cpp_sijs[0])==int or type(self.cpp_sijs[0])==float: #Its critical that we pass a list of list to pybind11\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t #This condition ensures the same in case of a 1D numpy array (for 1x1 sim matrix)\n\t\t\t\tl=[]\n\t\t\t\tl.append(self.cpp_sijs)\n\t\t\t\tself.cpp_sijs=l\n\t\t\tif np.shape(self.cpp_sijs)[0]!=np.shape(self.cpp_sijs)[1]: #TODO: relocate this check to some earlier part of code\n\t\t\t\traise Exception(\"ERROR: Dense similarity matrix should be a square matrix\")\n\n\t\t\tself.cpp_obj = DisparitySum(self.n, self.mode, self.cpp_sijs, self.num_neigh, self.partial, self.cpp_ground_sub)\n\t\t\n\t\tif self.mode==\"sparse\": #break scipy sparse matrix to native component lists (for csr implementation)\n\t\t\tself.cpp_sijs = {}\n\t\t\tself.cpp_sijs['arr_val'] = self.sijs.data.tolist() #contains non-zero values in matrix (row major traversal)\n\t\t\tself.cpp_sijs['arr_count'] = self.sijs.indptr.tolist() #cumulitive count of non-zero elements upto but not including current row\n\t\t\tself.cpp_sijs['arr_col'] = self.sijs.indices.tolist() #contains col index corrosponding to non-zero values in arr_val\n\t\t\tself.cpp_obj = DisparitySum(self.n, self.mode, self.cpp_sijs['arr_val'], self.cpp_sijs['arr_count'], self.cpp_sijs['arr_col'], self.num_neigh, self.partial, self.cpp_ground_sub)\n\t\t\n\t\tself.cpp_ground_sub=self.cpp_obj.getEffectiveGroundSet()\n\t\tself.ground_sub=self.cpp_ground_sub\n\n\tdef evaluate(self, X):\n\t\t\"\"\"Computes the score of a set\n\n\t\tParameters\n\t\t----------\n\t\tX : set\n\t\t\tThe set whose score needs to be computed\n\t\t\n\t\tReturns\n\t\t-------\n\t\tfloat\n\t\t\tThe function evaluation on the given set\n\n\t\t\"\"\"\n\n\t\tif type(X)!=set:\n\t\t\traise Exception(\"ERROR: X should be a set\")\n\n\t\tif X.issubset(self.cpp_ground_sub)==False:\n\t\t\traise Exception(\"ERROR: X is not a subset of ground set\")\n\t\t\n\t\treturn self.cpp_obj.evaluate(X)\n\n\tdef maximize(self, budget, optimizer='NaiveGreedy', stopIfZeroGain=False, stopIfNegativeGain=False, verbosity=False):\n\t\t\"\"\"Find the optimal subset with maximum score\n\n\t\tParameters\n\t\t----------\n\t\tbudget : int\n\t\t\tDesired size of the optimal set\n\t\toptimizer : optimizers.Optimizer\n\t\t\tThe optimizer that should be used to compute the optimal set\n\n\t\tReturns\n\t\t-------\n\t\tset\n\t\t\tThe optimal set of size budget\n\n\t\t\"\"\"\n\n\t\treturn self.cpp_obj.maximize(optimizer, budget, stopIfZeroGain, stopIfNegativeGain, verbosity)\n\t\n\tdef marginalGain(self, X, element):\n\t\t\"\"\"Find the marginal gain of adding an item to a set\n\n\t\tParameters\n\t\t----------\n\t\tX : set\n\t\t\tSet on which the marginal gain of adding an element has to be calculated\n\t\telement : int\n\t\t\tElement for which the marginal gain is to be calculated\n\n\t\tReturns\n\t\t-------\n\t\tfloat\n\t\t\tMarginal gain of adding element to X\n\n\t\t\"\"\"\n\n\t\tif type(X)!=set:\n\t\t\traise Exception(\"ERROR: X should be a set\")\n\n\t\tif type(element)!=int:\n\t\t\traise Exception(\"ERROR: element should be an int\")\n\n\t\tif X.issubset(self.cpp_ground_sub)==False:\n\t\t\traise Exception(\"ERROR: X is not a subset of ground set\")\n\n\t\treturn self.cpp_obj.marginalGain(X, element)","sub_path":"submodlib/functions/disparitySum.py","file_name":"disparitySum.py","file_ext":"py","file_size_in_byte":7370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"127950311","text":"from django.shortcuts import render, redirect, HttpResponse\nfrom book.models import User, Book, Author, Category\nfrom django.views.generic import View\nfrom django.views.generic import ListView, DetailView\nfrom django.shortcuts import get_object_or_404\nfrom django.db.models import Q\nfrom .userforms import UserForm\nimport hashlib\nimport json\n# Create your views here.\n\n\ndef Islogin(func):\n def inner(request, *args, **kwargs):\n user = request.COOKIES.get('user', None)\n if user is None:\n return redirect('login.html')\n response = func(request, *args, **kwargs)\n # response.write(user)\n return response\n return inner\n\n\nclass Index(ListView):\n queryset = Category.objects.all()\n context_object_name = \"category\"\n template_name = 'index.html'\n\n\nclass Search(Index):\n def get_queryset(self):\n queryset = super().get_queryset()\n keyword = self.request.GET.get('keyword')\n if not keyword:\n return queryset\n return queryset.filter(Q(name__icontains=keyword))\n\n\ndef BookDetail(request, book_id):\n if request.method == \"GET\":\n book = get_object_or_404(Book, pk=book_id)\n return render(request, \"Book.html\", {\"book\": book})\n if request.method == \"POST\":\n user = request.COOKIES.get('user', None)\n if user is None:\n return HttpResponse(json.dumps(\"0\"))\n userobj = User.objects.get(Account=user)\n bookobj = get_object_or_404(Book, pk=book_id)\n users = get_object_or_404(User, book=bookobj)\n if users:\n return HttpResponse(json.dumps(\"2\"))\n bookobj.user.add(userobj)\n return HttpResponse(json.dumps(\"1\"))\n\n\n# class BookDetail(DetailView):\n# queryset = Book.objects.all()\n# template_name = 'Book.html'\n# context_object_name = 'book'\n# pk_url_kwarg = 'book_id'\n#\n# @Islogin\n# def post(self, request, *args, **kwargs):\n# user = request.COOKIES.get('user', None)\n# book_id = self.kwargs.get('book_id')\n# userobj = User.objects.filter(Account=user)\n# bookobj = get_object_or_404(Book, pk=book_id)\n# bookobj.user.add(userobj)\n# return HttpResponse(\"添加成功\")\n\n\ndef login(request):\n if request.method == \"GET\":\n return render(request, \"login.html\")\n elif request.method == \"POST\":\n user = request.POST.get('user', None)\n pwd = request.POST.get('password', None)\n pwd = hashlib.md5((pwd + \"Password\").encode('utf-8')).hexdigest()\n obj = User.objects.filter(Account=user)\n if obj.exists():\n if pwd == obj[0].Password:\n response = HttpResponse(json.dumps('账号密码正确'))\n response.set_cookie('user', user, expires=60*60*24)\n return response\n else:\n message = \"请输入正确的密码\"\n return HttpResponse(json.dumps(message))\n else:\n message = \"账号或密码错误\"\n return HttpResponse(json.dumps(message))\n\n\n@Islogin\ndef myself(request):\n user = request.COOKIES.get('user', None)\n books = Book.objects.filter(user__Account=user)\n context = {\n 'books': books\n }\n return render(request, \"MyShelf.html\", context=context)\n\n\ndef register(request):\n message = {'status': False, 'errors': None}\n if request.method == \"GET\":\n return render(request, \"register.html\")\n if request.method == \"POST\":\n acccount = request.POST.get('user', None)\n psw = request.POST.get('psw', None)\n phone = request.POST.get('phone', None)\n if acccount is None or psw is None or phone is None:\n message['errors'] = \"请填写完整\"\n return HttpResponse(json.dumps(message))\n else:\n if len(phone) != 11:\n message['errors'] = \"请输入正确的电话号码\"\n return HttpResponse(json.dumps(message))\n obj = User.objects.filter(Account=acccount)\n if obj.exists():\n message['errors'] = \"该账号已存在,请更换\"\n return HttpResponse(json.dumps(message))\n else:\n User.objects.create(Account=acccount, Password=psw, PhoneNum=phone)\n message['status'] = True\n return HttpResponse(json.dumps(message))\n\n\ndef logout(request):\n response = HttpResponse(json.dumps('退出成功'))\n response.delete_cookie('user')\n return response\n\n","sub_path":"book/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"66956352","text":"def from10ToP(ch, p):\n res, k = 0, 1\n while ch > 0:\n res += k * (ch % p)\n k *= 10\n ch //= p\n print(res)\n\ndef fromPTo10(ch, p):\n ch = str(ch)\n print(int(ch, p))\n\ndef from8To2(ch):\n ch = int(ch, 8)\n ch = bin(ch)\n print(ch[2:])\n \nch = int(input())\np = int(input())\nfrom10ToP(ch, p)\nch = int(input())\np = int(input())\nfromPTo10(ch, p)\n\nch = input()\nfrom8To2(ch)\n","sub_path":"6/7.py","file_name":"7.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"178728238","text":"# -*- coding: utf-8 -*-\n# Copyright 2015 Hewlett-Packard Development Company, L.P.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom oslo_config import cfg\nfrom oslo_service import loopingcall\nfrom oslo_service import service\nfrom tooz import coordination\n\nfrom cue import objects\nimport cue.taskflow.client as taskflow_client\nfrom cue.taskflow.flow import check_cluster_status\n\n\nclass MonitorService(service.Service):\n\n def __init__(self):\n super(MonitorService, self).__init__()\n\n coord_url = (\"%s://%s:%s\"\n % (\n cfg.CONF.taskflow.coord_url,\n cfg.CONF.taskflow.zk_hosts,\n cfg.CONF.taskflow.zk_port\n ))\n\n self.coordinator = coordination.get_coordinator(\n coord_url, b'cue-monitor')\n self.coordinator.start()\n # Create a lock\n self.lock = self.coordinator.get_lock(b\"status_check\")\n\n def start(self):\n loop_interval_seconds = int(cfg.CONF.cue_monitor.loop_interval_seconds)\n\n pulse = loopingcall.FixedIntervalLoopingCall(\n self.check\n )\n pulse.start(interval=loop_interval_seconds)\n pulse.wait()\n\n # On stop, try to release the znode\n def stop(self):\n self.lock.release()\n self.coordinator.stop()\n\n def wait(self):\n pass\n\n def reset(self):\n self.lock.release()\n self.coordinator.stop()\n\n def check(self):\n if not self.lock.acquired:\n self.lock.acquire(blocking=False)\n\n if self.lock.acquired:\n\n clusters = get_cluster_id_node_ids()\n\n taskflow_client_instance = taskflow_client.get_client_instance()\n job_list = taskflow_client_instance.joblist()\n\n cluster_ids = []\n for job in job_list:\n if 'cluster_status_check' in job.details['store']:\n cluster_ids.append(job.details['store']['cluster_id'])\n\n filtered_clusters = []\n for cluster in clusters:\n if cluster[0] not in cluster_ids:\n filtered_clusters.append(cluster)\n\n for cluster in filtered_clusters:\n job_args = {\n 'cluster_status_check': '',\n 'cluster_id': cluster[0],\n 'context': {},\n 'default_rabbit_user': 'cue_monitor',\n 'default_rabbit_pass': cluster[0],\n }\n flow_kwargs = {\n 'cluster_id': cluster[0],\n 'node_ids': cluster[1]\n }\n taskflow_client_instance.post(check_cluster_status, job_args,\n flow_kwargs=flow_kwargs)\n\n\n# Returns a list of tuples where [0] is cluster_id\n# and [1] is a list of that clusters node ids\ndef get_cluster_id_node_ids():\n\n clusters = objects.Cluster.get_clusters(None, project_only=False)\n\n cluster_ids = []\n for cluster in clusters:\n if cluster.status not in ['ACTIVE', 'DOWN']:\n continue\n node_ids = []\n for node in objects.Node.get_nodes_by_cluster_id(None, cluster.id):\n node_ids.append(node.id)\n cluster_ids.append((cluster.id, node_ids))\n\n return cluster_ids\n","sub_path":"cue/monitor/monitor_service.py","file_name":"monitor_service.py","file_ext":"py","file_size_in_byte":3816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"481347995","text":"'''\nCreated on Sep 7, 2012\n\n@author: Jessica\n'''\nimport csv\nimport matplotlib.pyplot as plt\n\n\n\nf = csv.reader(open('WAIS_rho.csv','rU'))\nrho, dep = zip(*f)\n\nplt.plot(rho,dep, 'bs')\nplt.xlabel('Density (g/cm^3)')\nplt.ylabel('Depth (m)')\nplt.savefig('rho_dep.png')","sub_path":"openFile.py","file_name":"openFile.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"411984331","text":"import json\nfrom functools import wraps\nfrom traceback import format_exc\n\n\nclass BaseChaosException(Exception):\n def __init__(self, message, status, stacktrace=None):\n \"\"\"\n :param message: Message for human-read.\n :param status: Whether the exception should be sent to cs.\n :param stacktrace: Traceback information.\n \"\"\"\n if isinstance(message, str):\n self.message = json.dumps([message], ensure_ascii=False)\n else:\n self.message = json.dumps(list(message), ensure_ascii=False)\n self.status = status\n self.stacktrace = stacktrace\n Exception.__init__(self, message, status, stacktrace)\n\n def __str__(self):\n exception_msg = 'Message: %s\\n' % '\\n'.join(json.loads(self.message))\n if self.stacktrace:\n exception_msg += \"Stacktrace:\\n%s\" % self.stacktrace\n return exception_msg\n\n # def format_response(self):\n # response = dict(message=self.message, status=self.status)\n # response['error'] = str(self)\n # return response\n\n\nclass ServerException(BaseChaosException):\n def __init__(self, message, stacktrace=None):\n \"\"\"\n :param message: Message should be read by project maintainer and CS group.\n :param stacktrace: possible Traceback information.\n \"\"\"\n super().__init__(message=message, status=2, stacktrace=stacktrace)\n\n\nclass ValueException(BaseChaosException):\n\n def __init__(self,message,status=1):\n super().__init__(message=message, status=2, stacktrace=format_exc())\n\ndef exception_wrapper(message=None):\n def wrapper(func):\n\n @wraps(func)\n def function(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except BaseChaosException:\n raise\n except Exception as e:\n raise ServerException(message=message or str(e), stacktrace=format_exc()) from None\n\n return function\n\n return wrapper\n\n\n@exception_wrapper('test')\ndef test():\n try:\n a = 1 / 0\n except Exception as e:\n raise ValueException('{}'.format(e))\n\n\nif __name__ == '__main__':\n try:\n test()\n except Exception as e:\n print('{}'.format(e))\n raise e","sub_path":"test_any/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"322824633","text":"print('--- start of my_math ---')\n\nPI = 3.14159\nE = 2.71828\nmessage = 'My math module'\n\ndef sum(start, *numbers):\n '''Calculate the sum of unlimited number\n Params:\n start:int/float, the start sum\n *numbers:int/float, the numbers to sum up\n Return: int/float\n '''\n for x in numbers:\n start += x\n return start\n\ndef sum_range(start, stop, step=1):\n ''' Calculate the sum of intergers\n Params:\n start:int, start range number\n stop:int, stop range number\n step:int, the step between value\n Returns: int\n '''\n sum = 0\n for i in range(start, stop, step):\n sum += i\n return sum\n\ndef fact(n):\n '''Calculate the factorial of n\n Params:\n n:int\n Return: int\n '''\n p = 1\n for i in range(1, n + 1):\n p *= i\n return p\n\nprint('--- end of my_math ---')","sub_path":"basic-python/module-package/my_math.py","file_name":"my_math.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"222007166","text":"import random\nfrom decimal import *\nfrom operator import attrgetter\n\nfrom model import calculations\nfrom model.base import AbstractExchangeActor, AbstractExchange, AbstractModel\n\n\nclass RandomRateExchangeActor(AbstractExchangeActor):\n\t\"\"\"\n\tRandom Rate solution of the model.\n\tWhere the expected utility in de Equal Gain solution are equal, the utility here is calculated on a random exchange ratio.\n\t\"\"\"\n\n\tdef __init__(self, model, actor_name: str, demand: str, supply: str, group: str):\n\t\t\"\"\"\n\t\tConstructor, needs to call super()\n\n\t\t:param model:\n\t\t:param actor_name:\n\t\t:param demand:\n\t\t:param supply:\n\t\t:param group:\n\t\t\"\"\"\n\t\tsuper().__init__(model, actor_name, demand, supply, group)\n\n\t\tself.eu = 0\n\t\tself.is_highest_gain = False\n\n\nclass RandomRateExchange(AbstractExchange):\n\t\"\"\"\n\tAn exchange for the random rate model\n\t\"\"\"\n\n\tactor_class = RandomRateExchangeActor\n\t\"\"\" For the factory, so the Abstract know's which type he has to create \"\"\"\n\n\tdef __init__(self, i: str, j: str, p: str, q: str, m, groups):\n\t\tsuper().__init__(i, j, p, q, m, groups)\n\t\tself.highest_gain = 0\n\t\tself.lowest_gain = 0\n\t\tself.total_gain = 0\n\t\tself.is_highest_highest = False\n\t\tself.is_lowest_highest = False\n\n\tdef calculate(self):\n\t\t# TODO REWRITE\n\t\t# smaller functions\n\t\t# less repeating\n\n\t\t# first we try to move j to the position of i on issue p\n\t\t# we start with the calculation for j\n\n\t\ta = float(self.j.s_demand / self.i.s)\n\t\tb = float(self.i.s_demand / self.j.s)\n\n\t\tif b > a:\n\t\t\ta, b = b, a\n\n\t\tself.dp = Decimal(random.uniform(a, b))\n\t\tself.dq = Decimal(random.uniform(a, b))\n\n\t\tself.i.move = calculations.reverse_move(self.model.ActorIssues[self.i.supply], self.i, self.dq)\n\t\tself.j.move = abs(self.i.x_demand - self.j.x)\n\n\t\tif abs(self.i.move) > abs(self.j.x_demand - self.i.x):\n\t\t\tself.dq = calculations.by_absolute_move(self.model.ActorIssues[self.i.supply], self.i)\n\t\t\tself.dp = calculations.by_exchange_ratio(self.i, self.dq)\n\n\t\t\tself.i.move = abs(self.j.x_demand - self.i.x)\n\t\t\tself.j.move = calculations.reverse_move(self.model.ActorIssues[self.j.supply], self.j, self.dp)\n\n\t\t# TODO add check of NBS.\n\t\t# this check is only necessary for the smallest exchange,\n\t\t# because if the smallest exchange exceeds the limit the larger one will definitely do so\n\n\t\tif self.i.x > self.j.x_demand:\n\t\t\tself.i.move *= -1\n\n\t\tif self.j.x > self.i.x_demand:\n\t\t\tself.j.move *= -1\n\n\t\tself.i.moves.append(self.i.move)\n\t\tself.j.moves.append(self.j.move)\n\n\t\tself.i.y = self.i.x + self.i.move\n\t\tself.j.y = self.j.x + self.j.move\n\n\t\tself.i.eu = abs(calculations.expected_utility(self.i, self.dq, self.dp))\n\t\tself.j.eu = abs(calculations.expected_utility(self.j, self.dp, self.dq))\n\n\t\tb1 = self.i.is_move_valid(self.i.move)\n\t\tb2 = self.j.is_move_valid(self.j.move)\n\n\t\tself.is_valid = b1 and b2\n\n\t\tif self.is_valid: # TODO and self.re_calc:\n\n\t\t\tself.check_nbs_j()\n\t\t\tself.check_nbs_i()\n\n\t\tif self.i.eu > self.j.eu:\n\t\t\tself.highest_gain = self.i.eu\n\t\t\tself.lowest_gain = self.j.eu\n\t\telse:\n\t\t\tself.highest_gain = self.j.eu\n\t\t\tself.lowest_gain = self.i.eu\n\n\t\tself.total_gain = self.i.eu + self.j.eu\n\n\nclass RandomRateModel(AbstractModel):\n\t\"\"\"\n\tThe Random Rate implementation\n\t\"\"\"\n\n\t@staticmethod\n\tdef new_exchange_factory(i, j, p, q, model, groups):\n\t\t\"\"\"\n\t\tCreates a new instance of the RandomRateExchange\n\t\t\"\"\"\n\t\treturn RandomRateExchange(i, j, p, q, model, groups)\n\n\tdef sort_exchanges(self):\n\n\t\t\"\"\"\n\t\tIn the Random Rate solution the list is sorted on booleans.\n\t\tEach actor can have a bool true for the highest gain\n\t\tEach exchange can have a bool true for both highest gains are highest.\n\t\tWe are especially interested in those exchanges where both actors expect to get there highest gain.\n\t\t\"\"\"\n\t\thighest_gains = dict()\n\t\thighest_gain_exchange = dict()\n\n\t\tfor actor in self.Actors:\n\t\t\thighest_gains[actor] = 0\n\n\t\tfor exchange in self.Exchanges:\n\n\t\t\tif exchange.i.eu > highest_gains[exchange.i.actor_name]:\n\t\t\t\thighest_gains[exchange.i.actor_name] = exchange.i.eu\n\t\t\t\thighest_gain_exchange[exchange.i.actor_name] = exchange.i\n\n\t\t\tif exchange.j.eu > highest_gains[exchange.j.actor_name]:\n\t\t\t\thighest_gains[exchange.j.actor_name] = exchange.j.eu\n\t\t\t\thighest_gain_exchange[exchange.j.actor_name] = exchange.j\n\n\t\tfor exchange in highest_gain_exchange.values():\n\t\t\texchange.is_highest_gain = True\n\n\t\tfor exchange in self.Exchanges:\n\n\t\t\tif exchange.i.is_highest_gain:\n\t\t\t\tif exchange.highest_gain == exchange.i.eu:\n\t\t\t\t\texchange.is_highest_highest = True\n\t\t\t\telse:\n\t\t\t\t\texchange.is_lowest_highest = True\n\n\t\t\tif exchange.j.is_highest_gain:\n\t\t\t\tif exchange.highest_gain == exchange.j.eu:\n\t\t\t\t\texchange.is_highest_highest = True\n\t\t\t\telse:\n\t\t\t\t\texchange.is_lowest_highest = True\n\n\t\tself.Exchanges.sort(\n\t\t\tkey=attrgetter(\"is_highest_highest\", \"is_lowest_highest\", \"highest_gain\", \"lowest_gain\"),\n\t\t\treverse=True)\n\n\tdef highest_gain(self) -> RandomRateExchange:\n\n\t\t\"\"\"\n\t\tReturns the exchange where **both** actors expect the highest gain.\n\t\tIn most of the cases there is only one actor of an exchange expecting the highest gain.\n\t\tThese need to be re-calculated in _recalc_hihgest_\n\t\t\"\"\"\n\t\tself.sort_exchanges()\n\n\t\thighest = self.Exchanges.pop(0)\n\n\t\t# proceed or recalc\n\t\tif highest.i.is_highest_gain and highest.j.is_highest_gain:\n\t\t\treturn highest\n\t\telse:\n\t\t\treturn self._recalculate_highest()\n\n\tdef _recalculate_highest(self):\n\t\t\"\"\"\n\t\tThis method calculates the combinations of each highest gain and his second highest gain.\n\t\tThe gain of the highest exchange gets altered to the gain of the second.\n\t\tTherefore the highest exchange results in a lower gain for the actor,\n\t\twhich is the result of a higher shift towards the other actor or an lower demand.\n\n\t\t:return:\n\t\t\"\"\"\n\t\tself.sort_exchanges()\n\n\t\thighest = []\n\t\tleft_over = []\n\n\t\tfor exchange in self.Exchanges:\n\t\t\tif exchange.i.is_highest_gain or exchange.j.is_highest_gain:\n\t\t\t\thighest.append(exchange)\n\t\t\telse:\n\t\t\t\tleft_over.append(exchange)\n\n\t\tsecond_highest_gains = dict()\n\t\tsecond_highest_gain_exchange = dict()\n\n\t\tfor actor in self.Actors:\n\t\t\tsecond_highest_gains[actor] = 0\n\n\t\tfor exchange in left_over:\n\n\t\t\tif exchange.i.eu > second_highest_gains[exchange.i.actor_name]:\n\t\t\t\tsecond_highest_gains[exchange.i.actor_name] = exchange.i.eu\n\t\t\t\tsecond_highest_gain_exchange[exchange.i.actor_name] = exchange.i\n\n\t\t\tif exchange.j.eu > second_highest_gains[exchange.j.actor_name]:\n\t\t\t\tsecond_highest_gains[exchange.j.actor_name] = exchange.j.eu\n\t\t\t\tsecond_highest_gain_exchange[exchange.j.actor_name] = exchange.j\n\n\t\tfor exchange_pair in highest:\n\t\t\tif exchange_pair.i.is_highest_gain:\n\t\t\t\teu = second_highest_gain_exchange[exchange_pair.i.actor_name].eu\n\n\t\t\t\tdelta_eu = exchange_pair.i.eu - eu\n\n\t\t\t\tif exchange_pair.i.y == exchange_pair.j.x_demand: # i moves to j completely on q\n\n\t\t\t\t\tdelta_nbs = delta_eu / exchange_pair.j.s\n\t\t\t\t\tnbs_adjusted = exchange_pair.j.nbs_1 - delta_nbs\n\t\t\t\t\tactor = exchange_pair.j\n\n\t\t\t\t\tposition = calculations.position_by_nbs(self.ActorIssues[actor.supply],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\texchange_actor=actor,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnbs=nbs_adjusted,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tdenominator=self.nbs_denominators[actor.supply])\n\n\t\t\t\t\texchange_pair.j.x = position\n\n\t\t\t\telif exchange_pair.j.y == exchange_pair.i.x_demand: # j moves to i completely on p\n\n\t\t\t\t\tdelta_nbs = (delta_eu / exchange_pair.i.s)\n\t\t\t\t\tnbs_adjusted = exchange_pair.i.nbs_1 - delta_nbs\n\t\t\t\t\tactor = exchange_pair.i\n\n\t\t\t\t\tposition = calculations.position_by_nbs(self.ActorIssues[actor.supply],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\texchange_actor=actor,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnbs=nbs_adjusted,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tdenominator=self.nbs_denominators[actor.supply])\n\n\t\t\t\t\texchange_pair.i.x = position\n\n\t\t\t\telse:\n\t\t\t\t\t# TODO is there a third option that both exchanges are possible?\n\t\t\t\t\tpass\n\n\t\t\t\texchange_pair.i.eu -= delta_eu\n\t\t\t\texchange_pair.j.eu += delta_eu\n\n\t\t\t\tif abs(exchange_pair.i.eu - eu) < 1e-10:\n\t\t\t\t\traise Exception(\"Should be equal\")\n\n\n\n\t\t\t# # should be absolute?\n\t\t\t#\n\t\t\t#\n\t\t\t# delta_nbs = delta_eu * exchange_pair.i.s\n\t\t\t#\n\t\t\t# gain_j = (delta_nbs * exchange_pair.j.s)\n\t\t\t#\n\t\t\t# # exchange_pair.i.eu = exchange_pair.i.eu - delta_eu\n\t\t\t# # exchange_pair.j.eu = exchange_pair.j.eu + gain_j\n\t\t\t#\n\t\t\t# dominator =\n\t\t\t# increase = exchange_pair.i.x < exchange_pair.i.y\n\t\t\t# nbs_adjusted = 0\n\t\t\t#\n\t\t\t# if increase:\n\t\t\t# \tnbs_adjusted = exchange_pair.i.nbs_1 + delta_nbs\n\t\t\t# else:\n\t\t\t# \tnbs_adjusted = exchange_pair.i.nbs_1 - delta_nbs\n\t\t\t#\n\t\t\t# sum_c_s_x = 0\n\t\t\t#\n\t\t\t# x = 0\n\t\t\t#\n\t\t\t# for actor_name, actor_issue in self.ActorIssues[exchange_pair.i.supply].items():\n\t\t\t#\n\t\t\t# \tif actor_name == exchange_pair.i.actor_name:\n\t\t\t# \t\tx = actor_issue.position\n\t\t\t# \telse:\n\t\t\t# \t\tsum_c_s_x += actor_issue.salience * actor_issue.power * actor_issue.position\n\t\t\t#\n\t\t\t# yiq = (nbs_adjusted * dominator - sum_c_s_x) / (exchange_pair.i.c * exchange_pair.i.s)\n\t\t\t#\n\t\t\t# dq = calculations.exchange_ratio(abs(x - yiq), exchange_pair.i.s, exchange_pair.i.c, dominator)\n\t\t\t# dp = calculations.by_exchange_ratio(exchange_pair.i, dq)\n\t\t\t#\n\t\t\t# dq_old = exchange_pair.dq\n\t\t\t# dp_old = exchange_pair.dp\n\t\t\t#\n\t\t\t# eui_old = exchange_pair.i.eu\n\t\t\t# euj_old = exchange_pair.j.eu\n\t\t\t#\n\t\t\t# eui_new = eu\n\t\t\t# euj_new = exchange_pair.j.eu + gain_j\n\t\t\t#\n\t\t\t# if eui_old > eui_new and euj_old < euj_new:\n\t\t\t# \tprint(True)\n\t\t\t#\n\t\t\t# print(dp)\n","sub_path":"model/randomrate.py","file_name":"randomrate.py","file_ext":"py","file_size_in_byte":9171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"288754532","text":"import requests\nfrom bs4 import BeautifulSoup\nimport mysql.connector\n\n\n# lacznie z baza\n\nmydb = mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n passwd=\"adminek107\",\n database=\"wynajem_warszawa\"\n)\n\nmycursor = mydb.cursor()\n\n\ndef ranged():\n for i in range(range1, range2):\n\n url = 'https://www.gumtree.pl/s-mieszkania-i-domy-do-wynajecia/warszawa/v1c9008l3200008p'\n\n gumtree = requests.get(url)\n\n gumtree_content = gumtree.content\n\n soup = BeautifulSoup(gumtree_content, 'html.parser')\n\n print(url)\n page(soup)\n\n link_list = []\n\n for link in soup.find_all('a'):\n link_list.append(link.get('href'))\n\n flat_link = []\n link_list.remove(None)\n\nfor link in link_list:\n if \"a-mieszkania\" in link:\n flat_link.append(\"https://www.gumtree.pl\" + link)\n\nfor flat in flat_link:\n sql = \"INSERT INTO gumtree (link, opis) VALUES (%s, %s)\"\n val = (flat, \"test\")\n mycursor.execute(sql, val)\n mydb.commit()\n\n print(mycursor.rowcount, \"record inserted.\")\n","sub_path":"src/gumtree_rest.py","file_name":"gumtree_rest.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"506611714","text":"'''\nCreated on 2012-5-7\n\n@author: leonardo\n'''\nimport unittest\n\n\nclass Test(unittest.TestCase):\n\n\n def testfoo(self):\n self.assertEqual(1, 1)\n self.assertEqual(\"a\", \"b\")\n\n\nif __name__ == \"__main__\":\n #import sys;sys.argv = ['', 'Test.testfoo']\n unittest.main()","sub_path":"src/module_study/Unittest.py","file_name":"Unittest.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"494437530","text":"import os\nfrom treedict import TreeDict\n\nfiledir = os.path.join(os.path.dirname(__file__))\npaths = TreeDict()\n\n'''\n# Server Paths (but these don't work on Acropolis)\npaths.abc = os.path.join(os.environ['klmshare'], 'Data_Central', 'Abecedarian', 'data', 'ABC-CARE', 'extensions', 'cba-iv', 'append-abccare_iv.dta')\npaths.nlsy = os.path.join(os.environ['klmshare'], 'Data_Central', 'data-repos', 'nlsy', 'extensions', 'abc-match-nlsy', 'nlsy-abc-match.dta')\npaths.cnlsy = os.path.join(os.environ['klmshare'], 'Data_Central', 'data-repos', 'nlsy', 'extensions', 'abc-match-cnlsy', 'cnlsy-abc-match.dta')\npaths.psid = os.path.join(os.environ['klmshare'], 'Data_Central', 'data-repos', 'psid', 'extensions', 'abc-match')\n'''\n\npaths.abc = os.path.join(filedir, '..', '..', '..', '..', 'data', 'abccare', 'extensions' ,'cba-iv' ,'append-abccare_iv.dta')\npaths.cnlsy = os.path.join(filedir, '..', '..', '..', '..', 'data', 'nlsy', 'extensions', 'abc-match-cnlsy', 'cnlsy-abc-match.dta')\npaths.psid = os.path.join(filedir, '..', '..', '..', '..', 'data', 'psid', 'extensions', 'abc-match', 'psid-abc-match.dta')\npaths.nlsy = os.path.join(filedir, '..', '..', '..', '..', 'data', 'nlsy', 'extensions', 'abc-match-nlsy', 'nlsy-abc-match.dta')\n\npaths.data = os.path.join(filedir, 'hdf5')\npaths.psid_bsid = os.path.join(filedir, 'psid_sampling', 'psid_bsid.dta')\npaths.nlsy_bsid = os.path.join(filedir, 'nlsy_sampling', 'samples_nlsy.csv')\npaths.cnlsy_bsid = os.path.join(filedir, 'nlsy_sampling', 'samples_cnlsy.csv')\n\npaths.rslts = os.path.join(filedir, 'rslt', 'projections')\n","sub_path":"CBA-Bonn-HO_2019-04-28a_jlg/scripts/abccare/cba/income/paths.py","file_name":"paths.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"591738457","text":"# ! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport turtle\nfrom alphabet import alphabet\n\ncursor = turtle.Turtle()\nwindow = turtle.Screen()\nwindow.bgcolor(\"#000000\")\ncursor.pensize(3)\n\n\ndef displayMessage(message, fontSize, color, x, y):\n cursor.color(color)\n message = message.upper()\n\n for character in message:\n if character in alphabet:\n letter = alphabet[character]\n cursor.penup()\n for dot in letter:\n cursor.goto(x + dot[0] * fontSize, y + dot[1] * fontSize)\n cursor.pendown()\n\n x += fontSize\n\n if character == \" \":\n x += fontSize\n x += characterSpacing\n\n\nfontSize = 27\ncharacterSpacing = 7\nfontColor = \"#4688F1\"\n\nmessage = \"testing is power\"\ndisplayMessage(message, fontSize, fontColor, -270, 0)\n","sub_path":"Python. Быстрый старт/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"126586603","text":"from turtle import *\r\nfrom pylab import *\r\n\r\nbgcolor('black')\r\ncolor('cyan') # Fargelegger\r\nshape('turtle') # Gir sirkelform til objektet\r\nspeed(0) # Endrer farten til objektet\r\nN = 1000 # Antall ganger pekeren skal bevege seg\r\navstand = 10 # Hvor langt pekeren går mellom hver gang\r\n\r\nfor i in range(N):\r\n vinkel = randint(0,360) # Tilfeldig vinkel hver gang\r\n right(vinkel) # Snur tilfeldig vinkel\r\n forward(avstand) # Går framove","sub_path":"nettverksdag2019/brownske.py","file_name":"brownske.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"379335181","text":"def run(x):\n list=[]\n for i in range(1,x):\n if i%2==0:\n a=-i\n list.append(a)\n elif i%2!=0:\n b=i\n list.append(b)\n a = sum(list)\n print(a)\n\n\nrun(100)","sub_path":"PythonProjects/各种练习/1+2-3+4..100的和.py","file_name":"1+2-3+4..100的和.py","file_ext":"py","file_size_in_byte":217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"217359900","text":"research_stopwords = [\"paper\",\"research\",\"researcher\",\"researchers\",\"result\",\"results\",\"method\",\"methods\",\"methodology\",\"purpose\",\n \"suggest\",\"suggests\",\"suggested\",\"indicate\",\"show\",\"author\",\"authors\",\"authored\",\"discuss\",\"discusses\",\"discussed\",\n \"finding\", \"findings\",\"detail\",\"example\",\"examine\",\"examined\",\"examining\",\"significant\",\n \"concluded\",\"rigorous\",\"observed\",\"observation\",\"observations\",\"purpose\",\"variable\",\"variables\",\"evidence\",\n \"propose\",\"purposes\",\"proposed\",\"suggested\",\"question\",\"questions\",\"questionnaire\",\n \"questionnaires\",\"survey\",\"survey\",\"article\",\"practice\",\"output\",\n \"hypothesis\",\"hypotheses\",\"address\",\"addresses\",\"university\",\"reveal\",\"revealed\",\"discussion\",\n \"publish\",\"pubilshed\",\"objective\",\"objectives\",\"conclusion\",\"conclusions\",\"background\",\"study\",\"studies\",\"studied\",\n \"studies\",\"based\",\"presented\",\"including\",\"provide\",\"provides\",\"provided\",\"support\",\n \"improve\",\"improved\",\"discusses\",\"collected\",\"identify\",\"identified\",\"related\",\"conducted\",\n \"dataset\",\"problem\",\"implement\",\"collect\",\"experiment\",\"procedure\",\"show\",\n \"analysis\",\"literature\",\"overview\",\"present\",\"introduction\",\"introduce\",\"introducing\",\n \"illustrate\",\"scope\",\"tradeoff\",\"trade-off\",\"scientist\",\"abstract\",\"summary\",\"bias\",\"imply\",\"implied\",\n \"implication\",\"design\",\"approach\",\"compare\",\"comparison\",\"report\",\"document\",\"input\",\n \"participant\",\"participants\",\"factor\",\"factors\"]\n\ngeneral_stopwords = [\"wide\",\"widely\",\"can\",\"use\",\"user\",\"used\",\"etc\",\"percent\",\"percentage\",\"include\",\n \"maximum\",\"minimum\",\"NA\"]\n\ncountries_stopwords = [\"china\",\"united states\",\"american\",\"australia\",\"european\",\"canada\",\"sweden\",\"netherlands\",\n \"germany\"]\n \ncontinents_stopwords = [\"europe\",\"european\",\"asia\",\"asian\",\"africa\",\"african\"]\n \nnumbers_stopwords = [\"one\",\"two\",\"three\",\"four\",\"five\",\"six\",\"seven\",\"eight\",\"nine\",\"ten\",\n \"hundred\",\"thousand\",\"million\",\"ii\",\"iii\"]\n\nlogical_stopwords = [\"false\",\"true\"]\n \nmonths_stopwords = [\"january\",\"february\",\"march\",\"april\",\"june\",\"july\",\"august\",\"september\",\n \"october\",\"november\",\"december\"]\n \ntime_stopwords = [\"year\",\"years\",\"day\",\"days\",\"week\",\"weeks\",\"today\",\"month\",\"months\",\"decade\",\"decades\"]\n \nordinal_stopwords = [\"first\",\"primary\",\"second\",\"secondary\",\"third\",\"fourth\",\n \"fifth\",\"sixth\",\"seventh\",\"eighth\",\"ninth\",\"tenth\",\n \"firstly\",\"secondly\",\"thirdly\",\"fourthly\",\"fifthly\"]\n \njudgement_stopword = [\"good\",\"better\",\"best\",\"superior\"]\n \ndirections_stopwords = [\"north\",\"south\",\"east\",\"west\",\"northern\",\"eastern\",\"western\",\"southern\"]\n\nuniversity_stopwords =[\"dr\",\"dr.\",\"professor\",\"doctor\",\"postdoctoral\"]\nNSF_stopwords =[\"project\",\"projects\",\"nsf\"]","sub_path":"other.py","file_name":"other.py","file_ext":"py","file_size_in_byte":3435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"344134203","text":"from collections import namedtuple\n\nfrom dagster import check\nfrom dagster.core.definitions.pipeline import PipelineDefinition\nfrom dagster.core.types.dagster_type import DagsterType, DagsterTypeKind\nfrom dagster.serdes import whitelist_for_serdes\nfrom dagster.utils.backcompat import canonicalize_backcompat_args\n\n\ndef build_dagster_type_namespace_snapshot(pipeline_def):\n check.inst_param(pipeline_def, 'pipeline_def', PipelineDefinition)\n return DagsterTypeNamespaceSnapshot(\n {dt.key: build_dagster_type_snap(dt) for dt in pipeline_def.all_dagster_types()}\n )\n\n\ndef build_dagster_type_snap(dagster_type):\n check.inst_param(dagster_type, 'dagster_type', DagsterType)\n return DagsterTypeSnap(\n kind=dagster_type.kind,\n key=dagster_type.key,\n name=dagster_type.name,\n display_name=dagster_type.display_name,\n description=dagster_type.description,\n is_builtin=dagster_type.is_builtin,\n type_param_keys=dagster_type.type_param_keys,\n loader_schema_key=dagster_type.loader_schema_key,\n materializer_schema_key=dagster_type.materializer_schema_key,\n )\n\n\n@whitelist_for_serdes\nclass DagsterTypeNamespaceSnapshot(\n namedtuple('_DagsterTypeNamespaceSnapshot', 'all_dagster_type_snaps_by_key')\n):\n def __new__(cls, all_dagster_type_snaps_by_key):\n return super(DagsterTypeNamespaceSnapshot, cls).__new__(\n cls,\n all_dagster_type_snaps_by_key=check.dict_param(\n all_dagster_type_snaps_by_key,\n 'all_dagster_type_snaps_by_key',\n key_type=str,\n value_type=DagsterTypeSnap,\n ),\n )\n\n def get_dagster_type_snap(self, key):\n check.str_param(key, 'key')\n return self.all_dagster_type_snaps_by_key[key]\n\n\n@whitelist_for_serdes\nclass DagsterTypeSnap(\n namedtuple(\n '_DagsterTypeSnap',\n 'kind key name description display_name is_builtin type_param_keys '\n 'loader_schema_key materializer_schema_key ',\n )\n):\n def __new__(\n cls,\n kind,\n key,\n name,\n description,\n display_name,\n is_builtin,\n type_param_keys,\n loader_schema_key=None,\n materializer_schema_key=None,\n input_hydration_schema_key=None,\n output_materialization_schema_key=None,\n ):\n return super(DagsterTypeSnap, cls).__new__(\n cls,\n kind=check.inst_param(kind, 'kind', DagsterTypeKind),\n key=check.str_param(key, 'key'),\n name=check.opt_str_param(name, 'name'),\n display_name=check.str_param(display_name, 'display_name'),\n description=check.opt_str_param(description, 'description'),\n is_builtin=check.bool_param(is_builtin, 'is_builtin'),\n type_param_keys=check.list_param(type_param_keys, 'type_param_keys', of_type=str),\n loader_schema_key=canonicalize_backcompat_args(\n check.opt_str_param(loader_schema_key, 'loader_schema_key'),\n 'loader_schema_key',\n check.opt_str_param(input_hydration_schema_key, 'input_hydration_schema_key'),\n 'input_hydration_schema_key',\n '0.10.0',\n ),\n materializer_schema_key=canonicalize_backcompat_args(\n check.opt_str_param(materializer_schema_key, 'materializer_schema_key'),\n 'materializer_schema_key',\n check.opt_str_param(\n output_materialization_schema_key, 'output_materialization_schema_key'\n ),\n 'output_materialization_schema_key',\n '0.10.0',\n ),\n )\n","sub_path":"python_modules/dagster/dagster/core/snap/dagster_types.py","file_name":"dagster_types.py","file_ext":"py","file_size_in_byte":3705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"28369687","text":"import urllib.parse\nimport urllib.request\nimport random,time\n\nclass TiebaSpyder():\n \n def __init__(self): \n header_list = [{\"User-Agent\":\"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0) Gecko/20100101 Firefox/6.0\"},\n {\"User-Agent\":\"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50\"},\n {\"User-Agent\":\"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.163 Safari/535.1\"}]\n # 随机获取一个user-agent\n headers = random.choice(header_list)\n self.headers = headers\n baseurl =\"https://tieba.baidu.com/f?\" \n self.baseurl =baseurl \n \n # 页面读取\n def readPage(self,url):\n \n req = urllib.request.Request(url,headers=self.headers)\n res = urllib.request.urlopen(req)\n html = res.read().decode(\"utf-8\")\n return html\n \n # 文件写入\n def writePage(self,filename,html):\n with open (filename,\"w\",encoding=\"utf-8\") as f : \n f.write(html)\n # 主函数\n def work(self): \n name = input(\"请输入贴吧名:\")\n begin = int(input(\"请输入起始页:\"))\n end = int(input(\"请输入结束页:\"))\n # 对贴吧名进行编码\n kw = {\"kw\":name}\n kw = urllib.parse.urlencode(kw)\n \n # 拼接url 发请求 获取响应\n for i in range(begin,end+1):\n # 拼接url\n pn = (i-1) * 50\n url = self.baseurl+kw +\"&pn=\" + str(pn)\n time.sleep(2)\n # 发起请求\n html = self.readPage(url) \n filename = name+\"吧第\"+str(i)+\"页.html\"\n print(\"正在获取第\"+str(i)+\"页\")\n self.writePage(filename, html) \n print(\"第\"+str(i)+\"页获取成功\")\n print(\"*\"*30)\n \n\n\n\nif __name__ == \"__main__\":\n T = TiebaSpyder()\n T.work()\n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n","sub_path":"贴吧爬取类.py","file_name":"贴吧爬取类.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"206014595","text":"import bpy\r\nfrom ... preferences import get_preferences\r\nfrom ...ui_framework.operator_ui import Master\r\n\r\n\r\nclass HOPS_OT_MOD_Skin(bpy.types.Operator):\r\n bl_idname = \"hops.mod_skin\"\r\n bl_label = \"Add skin Modifier\"\r\n bl_options = {'REGISTER', 'UNDO'}\r\n bl_description = \"\"\"LMB - Add skin Modifier\r\nLMB + CTRL - Add new skin Modifier\r\n\r\n\"\"\"\r\n called_ui = False\r\n\r\n def __init__(self):\r\n\r\n HOPS_OT_MOD_Skin.called_ui = False\r\n\r\n @classmethod\r\n def poll(cls, context):\r\n return any(o.type == 'MESH' for o in context.selected_objects)\r\n\r\n def invoke(self, context, event):\r\n for object in [o for o in context.selected_objects if o.type == 'MESH']:\r\n if event.ctrl:\r\n self.add_skin_modifier(object)\r\n else:\r\n if not self.skin_modifiers(object):\r\n self.add_skin_modifier(object)\r\n\r\n # Operator UI\r\n if not HOPS_OT_MOD_Skin.called_ui:\r\n HOPS_OT_MOD_Skin.called_ui = True\r\n\r\n ui = Master()\r\n draw_data = [\r\n [\"SKIN\"],\r\n [\"Skin Modifier added\"]]\r\n ui.receive_draw_data(draw_data=draw_data)\r\n ui.draw(draw_bg=get_preferences().ui.Hops_operator_draw_bg, draw_border=get_preferences().ui.Hops_operator_draw_border)\r\n\r\n return {\"FINISHED\"}\r\n\r\n @staticmethod\r\n def skin_modifiers(object):\r\n return [modifier for modifier in object.modifiers if modifier.type == \"SKIN\"]\r\n\r\n def add_skin_modifier(self, object):\r\n skin_mod = object.modifiers.new(name=\"skin\", type=\"SKIN\")\r\n skin_mod.branch_smoothing = 0\r\n skin_mod.use_smooth_shade = True\r\n skin_mod.use_x_symmetry = False\r\n skin_mod.use_y_symmetry = False\r\n skin_mod.use_z_symmetry = False\r\n","sub_path":"operators/modifiers/skin.py","file_name":"skin.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"272239540","text":"import tensorflow as tf\n\ndef get_mnist_classification_variables():\n # here we begin creation of a new model, involving cnn\n # weights are initialized with a small amount of noise to break symmetry, and prevent 0 gradients\n def weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n # since neurons are ReLU, initialize them with a slight positive bias to avoid \"dead neurons\"\n def bias_variable(shape):\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n\n def conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1,1,1,1], padding=\"SAME\")\n\n def max_pool_2x2(x):\n return tf.nn.max_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1], padding=\"SAME\")\n\n # x is the images, while y is the result\n x = tf.placeholder(tf.float32, shape=[None, 28,28,1])\n y = tf.placeholder(tf.float32, shape=[None, 10])\n\n # implementation of the first layer\n # compute 32 features for each 5x5 patch\n W_conv1 = weight_variable([5,5,1,32]) # (patch_size, patch_size, # input channels, # output channels)\n b_conv1 = bias_variable([32])\n # reshape x to a 4d tensor, with 2nd and 3rd dimensions being width and height, and 4th dimension being number of colour channels\n x_image = tf.reshape(x, [-1,28,28,1])\n\n # convolve x_image with W, then add b, apply the ReLU function, then max pool\n h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)\n h_pool1 = max_pool_2x2(h_conv1) # this reduces image size to 14x14\n\n # implementation of the second layer\n # 64 features for each 5x5\n W_conv2 = weight_variable([5,5,32,64])\n b_conv2 = bias_variable([64])\n\n h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)\n h_pool2 = max_pool_2x2(h_conv2) # 7x7\n\n # implementation of fully-connected layer with 1024 neurons - this processes based on the entire image\n W_fc1 = weight_variable([7*7 * 64, 1024]) # 64 layers of 7x7 image\n b_fc1 = bias_variable([1024])\n\n h_pool2_flat = tf.reshape(h_pool2, [-1,7*7*64]) # flatten the image into a single line\n h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n\n # dropout - reduces overfitting\n # dropout randomly selects units in a neural network, and temporarily removes all incoming and outgoing connections\n keep_prob = tf.placeholder(tf.float32)\n h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n\n # layer for softmax\n W_fc2 = weight_variable([1024, 10])\n b_fc2 = bias_variable([10])\n\n y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2\n\n # differences from beginner\n # 1. replace gradient descent with ADAM\n # 2. include keep_prob in the parameters of feed_dict to control dropout rate\n # 3. add logging to every 100th iteration in the process\n # 4. use tf.Session instead of tf.InteractiveSession - separates the process of creating the graph from evaluating the graph\n\n cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=y_conv))\n train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\n\n correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y,1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n return {\n \"x\": x,\n \"y\": y,\n \"keep_prob\": keep_prob,\n \"y_conv\": y_conv,\n \"train_step\": train_step,\n \"accuracy\": accuracy\n }\n","sub_path":"remake/utils/mnist_classification_without_null_variables.py","file_name":"mnist_classification_without_null_variables.py","file_ext":"py","file_size_in_byte":3410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"238820654","text":"#lecture 36\n#while loops\nx = 0\n#while x is less than 5\nwhile x < 5:\n print(f\"the current vale of x is : {x}\")\n x = x + 1\n\n#breaks continues and pass\n\nx = [1,2,3]\n# pass is used by programmers as a pass\n# they know they want a loop there,but are not ready\n# to write that code just yet.\nfor i in x:\n pass\n\nmystring = \"sammy\"\nfor letter in mystring:\n if letter == 'a':\n # if the letter is a it will skip a\n # and continue going through the loop\n continue\n print(letter)\n\n\nmystring = \"sammy\"\nfor letter in mystring:\n if letter == 'a':\n # if the letter is a it will completely stop the program\n break\n print(letter)\n\nx = 0\n\nwhile x < 5:\n if x ==2 :\n break\n print(x)\n x = x + 1\n ","sub_path":"learning/lecture36.py","file_name":"lecture36.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"79476233","text":"\"\"\"\n【当前模型存在的问题在】\n1 在数据预处理的时候 对6小段数据进行数据增强,但是每段数据长度不同,也就会导致它的中间重叠 overlap 的部分不一样长,\n 或者说增强的倍数不一样\n2 在 run.py 中, 原始代码中, 画图时,横轴其实是 iteration, 而不是 epoch\n\n\"\"\"\n# !/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\n# import time\nimport numpy as np\nfrom scipy.io import loadmat\nimport collections\nimport json\nimport shutil\nimport itertools\nimport tensorflow as tf\nfrom sklearn import metrics\nimport matplotlib.pyplot as plt\n\nfrom CNN_model import CNN # 导入CNN类\nfrom Config import Config # 导入配置文件 Config类\n\n# 另一种操作 with tf.device('/gpu:0')\nos.environ['CUDA_VISIBLE_DEVICES'] = Config.env # 确定使用哪块GPU env = '0'\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # 程序本身相关信息打印程度 “0”:INFO “1”:WARNING; “2”\tERROR “3”\tFATAL\n\n# train_tensorboard_dir = 'logdir\\\\train' # TensorBoard 保存路径 可视化能展示你训练过程中绘制的图像、网络结构等。\n# epoch_valid_tensorboard_dir = 'logdir\\\\epoch_valid' # 每个epoch valid\n# iter_train_tensorboard_dir = 'logdir\\\\iter_train' # 每个iteration train\n\n# linux系统文件命名有不同\n# epoch_valid_tensorboard_dir = 'logdir/epoch_valid' # 每个epoch valid\n# iter_train_tensorboard_dir = 'logdir/iter_train' # 每个iteration train\ntensorboard_dir = 'logdir'\nepoch_valid_tensorboard_dir = 'logdir/epoch_valid' # 每个epoch valid\niter_train_tensorboard_dir = 'logdir/iter_train' # 每个iteration train\n\nsave_dir = 'checkpoints' # checkpoint 路径 训练过程中的模型快照。\nsave_path = os.path.join(save_dir, 'best_validation') # 最佳验证结果保存路径\nsave_result_dir = 'CNN_Model_results' # 保存文件夹\n\n\ndef normalize(array):\n \"\"\"\n 对所有样本的每一个维度进行归一化 也就是归一化处理次数 = 特征维度 = 2048(每一个点都是一个维度)\n 归一化处理 #经过处理的数据符合标准正态分布,即均值为0,标准差为1\n z-score标准化方法适用于属性A的最大值和最小值未知的情况,或有超出取值范围的离群数据的情况。\n 该种归一化方式要求原始数据的分布可以近似为高斯分布,否则归一化的效果会变得很糟糕。\n \"\"\"\n # plt.figure(1)\n # plt.subplot(2,1,1)\n # sample_index = 1 # 显示哪个样本的索引\n # plt.plot(array[sample_index,:])\n print(\"array.shape before normalize:\", array.shape) # (14400, 2048)\n # print(\"array.before normalize\", array[1:5,:])\n column_mean = np.mean(array, axis=0) # 对每列,即所有样本的每个维度求均值\n sigma = np.std(array, axis=0)\n print(\"column_mean.shape\", column_mean.shape) # (2048,) 对每一个维度进行运算,总共2048个维度\n print(\"sigma.shape\", sigma.shape) # (2048,)\n\n norm_array = ((array - column_mean) / sigma)\n # print(\"array.shape after normalize:\" , norm_array.shape) # (14400, 2048)\n # plt.subplot(2,1,2)\n # plt.plot(norm_array[sample_index,:])\n # print(\"array.after normalize\", norm_array[1:5,:])\n # plt.show()\n return norm_array\n\n\n# 绘图及保存\ndef plot_save(array, fig_path=None, xlabel='epochs', ylabel='loss value', dpi=400):\n '''\n Plot and Save Figure\n '''\n # Plot Array\n fig, ax = plt.subplots()\n ax.plot(array)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n plt.tight_layout()\n plt.savefig(os.path.join(fig_path, (ylabel + '.png')), dpi=dpi)\n # try:\n # plt.savefig(os.path.join(fig_path, (ylabel + '.svg')))\n # plt.savefig(os.path.join(fig_path, (ylabel + '.pfg')))\n # except: pass\n plt.close()\n\n\ndef feed_data(x_batch, y_batch, dropout_keep_prob_fc, is_training):\n \"\"\"\n 以字典的形式存储 feed data\n :param x_batch: 模型的输入 input\n :param y_batch: 模型的真实标签\n :param dropout_keep_prob_fc: 神经元保留率\n :param is_training: 判断是训练还是在测试 训练则 True 测试则 FAlse\n :return: feed_dict\n \"\"\"\n feed_dict = {model.input_x: x_batch,\n model.real_label: y_batch,\n model.dropout_keep_prob_fc: dropout_keep_prob_fc,\n model.is_training: is_training}\n return feed_dict\n\n\ndef batch_iter(data, label, batch_size): # 类似于mnist数据集中自带的 next_batch 获取下一个batch\n '''\n generating batch data, and return a iteration.\n -------------------------------------------------------------------------------\n '''\n data_len = len(label)\n num_batch = int((data_len - 1) / batch_size) + 1\n\n for i in range(num_batch):\n start_id = i * batch_size\n end_id = min((i + 1) * batch_size, data_len)\n # len(data)\n yield data[start_id:end_id], label[start_id:end_id]\n # yield 是一个类似 return 的关键字,迭代一次遇到yield时就返回yield后面的值。\n # 重点是:下一次迭代时,从上一次迭代遇到的yield后面的代码开始执行。\n\n\ndef evaluate(sess, merged, x_valid, y_valid):\n \"\"\"\n 经过训练,对验证集数据、或者测试集 进行评估,评估其在所有batch上的平均准确率与平均损失\n :param sess:\n :param x_valid: 验证集的 x_valid (None, 2048)\n :param y_valid: 验证集的 y_valid (None, 3)\n\n :return: 所有batch的 softmax以后的概率分布 汇总 (8640, 3)\n \"\"\"\n data_len = len(y_valid) # batch_size = 1, data_len = num_batch\n total_loss = 0.0\n total_acc = 0.0\n valid_y_pred_results = []\n batch_valid = batch_iter(x_valid, y_valid,\n 1) # batch_size =1 while valuation & testing num_batch = data_len = len(y_valid)\n for x_batch, y_batch in batch_valid:\n feed_dict = feed_data(x_batch, y_batch, 1.0,\n False) # EVAL: keep_prob=1.0, valuation & testing are not during training.\n summery, valid_loss, valid_acc, valid_y_pred_batch = sess.run([merged, model.loss, model.acc, model.pred_label],\n feed_dict=feed_dict)\n total_loss += valid_loss\n total_acc += valid_acc\n valid_y_pred_results.append(valid_y_pred_batch.tolist()[0]) # tolist()[0] 多维矩阵(8640, 1, 3) 转化为 列表 (8640, 3)\n\n # 评估验证集在所有batch上的平均准确率与平均损失\n valid_avg_loss = float(total_loss / data_len)\n valid_avg_acc = float(total_acc / data_len)\n # print(\"valid_y_pred_batch:\",valid_y_pred_batch)\n # softmax以后的概率分布 [[0.85549366 0.1202668 0.02423953]]\n # print(\"valid_y_pred_batch.shape:\",valid_y_pred_batch.shape) # (1, 3)\n print(\" valid_y_pred_results.shape:\", np.array(valid_y_pred_results).shape) # (8640, 3)\n\n return summery, valid_avg_loss, valid_avg_acc, valid_y_pred_results\n\n\n'''配置 Tensorboard,重新训练时,请将tensorboard文件夹删除,不然图会覆盖'''\n\n\ndef run_main(x_train_raw, y_train_raw, train_ratio):\n \"\"\"\n 主函数入口\n\n 模型训练函数,训练一段时间后对模型进行验证,如果在一定时间后没有提升,则提前结束训练\n 每次调用函数,都先对数据集打乱后划分 训练集和验��集,训练集占比为 train_ratio\n 训练集与验证集划分比例 3:1, 综述, 训练集:验证集:测试集 = 6:2:2 = 3: 1: 1\n :param x_train_raw:\n :param y_train_raw:\n :param train_ratio: 0.75 训练集与验证集划分比例 3:1\n :return:\n \"\"\"\n print(\"x_train_raw.shape\", x_train_raw.shape) # (34560, 2048)\n print(\"y_train_raw.shape\", y_train_raw.shape) # (34560, 3)\n split_train = int(len(y_train_raw) * train_ratio)\n x_train = x_train_raw[: split_train, :] # 取数组 (行:0 ~ split_train,列:全部)\n x_valid = x_train_raw[split_train:, :] # 取数组 (行:split_train ~ 最后一行,列:全部)\n y_train = y_train_raw[: split_train, :]\n y_valid = y_train_raw[split_train:, :]\n print(\"x_train.shape\", x_train.shape) # (25920, 2048)\n print(\"y_train.shape\", y_train.shape) # (25920, 3)\n print(\"x_valid.shape\", x_valid.shape) # (8640, 2048)\n print(\"y_valid.shape\", y_valid.shape) # (8640, 3)\n\n print('''=========================Configuring TensorBoard and Saver...=============================''')\n # =============================================================================================\n\n # 配置 Saver\n saver = tf.train.Saver()\n # 创建session\n # GPU Configuration\n gpu_config = tf.ConfigProto(log_device_placement=False, allow_soft_placement=True)\n # log_device_placement = True ,可以获取到 operations 和 Tensor 被指派到哪个设备(几号CPU或几号GPU)上运行,会在终端打印出各项操作是在哪个设备上运行的。\n # 为防止收到操作出错, allow_soft_placement=True,允许tf自动选择一个存在并且可用的设备来运行操作。\n\n gpu_config.gpu_options.allow_growth = True # 让TensorFlow在运行过程中动态申请显存,需要多少就申请多少;\n # 全局初始化初始化\n init = tf.global_variables_initializer()\n with tf.Session(config=gpu_config) as sess:\n sess.run(init)\n valid_writer.add_graph(sess.graph)\n train_writer.add_graph(sess.graph)\n print('===================================Training and evaluating...================================')\n best_acc_val = 0.0 # 最佳验证集准确率\n last_improved = 0 # 记录上一次提升批次\n require_improvement = Config.require_improvement # 如果超过require_improvement轮未提升,提前结束训练\n\n train_loss_list = [] # loss value list while training\n valid_loss_list = [] # loss value list while validating\n train_acc_list = [] # acc value list while training\n valid_acc_list = [] # acc value list while validating\n\n train_iteration_loss_list = []\n valid_iteration_loss_list = []\n train_iteration_acc_list = []\n valid_iteration_acc_list = []\n y_pred_list = []\n\n flag = False\n for epoch in range(Config.num_epochs): # num_epochs = 300\n # print(\"The length of x_train is :\", len(x_train)) # 25920\n num_batch = int(len(x_train) / (Config.batch_size)) # num_batch = num_iterations\n # print(\"The number of batchs per epoch:\", num_batch) # 810\n # print('Epoch:', epoch + 1)\n # batch_size = 32\n batch_train = batch_iter(x_train, y_train, Config.batch_size) # 获取 “ next_batch ”\n # loss_train_epoch = []\n # acc_train_epoch = []\n num_iteration = 0 # 记录每个epoch 总共迭代iteration次数 = batch个数(num_batch)\n avg_train_loss = 0\n avg_train_acc = 0\n ''' =================================== Training ================================================='''\n for x_batch, y_batch in batch_train:\n num_iteration = num_iteration + 1\n\n feed_dict = feed_data(x_batch, y_batch, Config.dropout_keep_prob_fc, True) # Training\n # 函数 feed_data(x_batch, y_batch, dropout_keep_prob_fc, is_training):\n train_summery, _optim, loss_train, acc_train, y_pred_per_batch = sess.run([merged,\n model.optim,\n model.loss,\n model.acc,\n model.pred_label],\n feed_dict=feed_dict)\n train_iteration_loss_list.append(float(loss_train)) # loss 拼接\n train_iteration_acc_list.append(float(acc_train)) # acc 拼接\n y_pred_list.append(y_pred_per_batch)\n\n train_writer.add_summary(train_summery, ((epoch * num_batch) + num_iteration))\n\n avg_train_loss = avg_train_loss + loss_train\n avg_train_acc = avg_train_acc + acc_train\n print(\"Epoch: %03d/%03d batch: %03d/%03d train_loss: %.9f train_acc: %.9f\" %\n (epoch, Config.num_epochs, num_iteration, num_batch, loss_train, acc_train))\n\n print(\"The number of num_iteration is:\", num_iteration)\n avg_train_loss = avg_train_loss / num_batch # 训练集,每个epoch的平均损失,或者除以 num_iteration\n avg_train_acc = avg_train_acc / num_batch # 训练集,每个epoch的平均准确率\n\n # 每1个epoch存入tensorboard\n # train_writer.add_summary(train_summery, epoch) # 保存最后一个summery,写入本地\n\n train_loss_list.append(float(avg_train_loss)) # loss 拼接\n train_acc_list.append(float(avg_train_acc)) # acc 拼接\n\n if epoch % Config.display_epoch == 0: # 每10个epoch在屏幕上输出训练的结果\n print(\n \" The average training loss and accracy:================================================================= \")\n print(\"Epoch: %03d/%03d train_loss: %.9f train_acc: %.9f\" % (\n epoch, Config.num_epochs, avg_train_loss, avg_train_acc))\n\n ''' =================================== Valid ================================================='''\n if epoch % Config.valid_epoch == 0: # 每10个epoch,用验证集,进行评估,并屏幕打印一次结果\n\n valid_summery, avg_valid_loss, avg_valid_acc, y_pred_valid = evaluate(sess, merged, x_valid,\n y_valid) # 用验证集评估训练的效果\n valid_loss_list.append(float(avg_valid_loss)) # loss 拼接\n valid_acc_list.append(float(avg_valid_acc)) # acc 拼接\n # 每1个epoch存入tensorboard\n valid_writer.add_summary(valid_summery, epoch) # 保存最后一个summery,写入本地\n\n print(\"Epoch: %03d/%03d valid_loss: %.9f valid_acc: %.9f\" % (\n epoch, Config.num_epochs, avg_valid_loss, avg_valid_acc))\n if epoch % Config.save_epoch == 0:\n if avg_valid_acc > best_acc_val:\n # 保存最好结果\n best_acc_val = avg_valid_acc\n last_improved = epoch\n print('model save in path: ', save_path)\n saver.save(sess=sess, save_path=save_path)\n print('===================== Valid best Save success!================================')\n\n if epoch - last_improved > require_improvement:\n # 验证集正确率长期不提升,提前结束训练\n print(\"===================No optimization for many epochs, auto-stopping...=============\")\n flag = True\n break # 跳出循环\n\n if flag: # 同上\n break\n\n # 实现了对字典对象中元素的排序\n train_and_valid_result = collections.OrderedDict()\n # train_result ={}\n train_and_valid_result['train_loss'] = train_loss_list\n print(train_and_valid_result['train_loss'])\n print('acc_value_train type and length is:', type(train_acc_list), ' ', len(train_acc_list))\n train_and_valid_result['train_acc'] = train_acc_list\n train_and_valid_result['NA'] = '---------------------------------------------'\n print('loss_value_valid type and length is:', type(valid_loss_list), ' ', len(valid_loss_list))\n train_and_valid_result['valid_loss'] = valid_loss_list\n print('acc_value_valid type and length is:', type(valid_acc_list), ' ', len(valid_acc_list))\n train_and_valid_result['valid_acc'] = valid_acc_list\n\n # 保存结果到 json 文件\n json_file = os.path.join(save_result_dir, 'train_and_valid_result.json')\n\n with open(json_file, 'w', encoding='utf-8') as json_file:\n json.dump(train_and_valid_result, json_file, indent=4)\n\n # 绘图并保存\n # plot_save(train_loss_list, fig_path=save_result_dir, xlabel='epoches', ylabel='train loss value')\n # plot_save(train_acc_list, fig_path=save_result_dir, xlabel='epoches', ylabel='train acc value')\n plot_save(valid_loss_list, fig_path=save_result_dir, xlabel='epoches', ylabel='valid loss value')\n plot_save(valid_acc_list, fig_path=save_result_dir, xlabel='epoches', ylabel='valid acc value')\n\n plot_save(train_iteration_loss_list, fig_path=save_result_dir, xlabel='iterations', ylabel='train loss value')\n plot_save(train_iteration_acc_list, fig_path=save_result_dir, xlabel='iterations', ylabel='train acc value')\n # plot_save(valid_iteration_loss_list, fig_path=save_result_dir, xlabel='iterations', ylabel='valid loss value')\n # plot_save(valid_iteration_acc_list, fig_path=save_result_dir, xlabel='iterations', ylabel='valid acc value')\n # # try:\n # except: pass\n\n\ndef test(x_test, y_test):\n loss_value_test = [] # loss value list while testing\n acc_value_test = [] # acc value list while testing\n # time_set_test = [] # average time while testing\n msg_list = [] # testing results\n gpu_config = tf.ConfigProto(log_device_placement=False, allow_soft_placement=True)\n gpu_config.gpu_options.allow_growth = True\n\n with tf.Session(config=gpu_config) as sess_test:\n tf.global_variables_initializer().run()\n saver = tf.train.Saver()\n print('loading model from: ', save_path)\n saver.restore(sess=sess_test, save_path=save_path) # 读取保存的模型\n print('loaded!')\n\n print('Testing...')\n test_summery, test_loss, test_acc, test_y_pred = evaluate(sess_test, merged, x_test, y_test)\n\n test_result = collections.OrderedDict() # OrderedDict是dict的子类,它记住了内容添加的顺序。\n test_result['test_acc'] = test_acc\n test_result['test_loss'] = test_acc\n test_result['test_y_pred'] = test_y_pred\n test_result['y_test'] = y_test.tolist()\n\n test_result['learning_rate'] = Config.learning_rate\n test_result['dropout_keep_prob_fc'] = Config.dropout_keep_prob_fc\n\n test_path = 'test_result' + '.json'\n json_file = os.path.join(save_result_dir, test_path)\n with open(json_file, 'w', encoding='utf-8') as json_file:\n json.dump(test_result, json_file, indent=4)\n return test_acc, test_loss\n\n\nclass js_methods(object):\n # 嵌套类A.__dict,后可用json序列化\n def __init__(self, num, lr, dropout_fc, acc, loss):\n self.num = num\n self.lr = lr\n self.dropout_fc = dropout_fc\n self.acc = acc\n self.loss = loss\n\n def __repr__(self):\n return repr((self.num, self.lr, self.dropout_fc, self.acc, self.loss))\n\n\n'''=====================================================================================================================\n ======= 主函数 =======\n======================================================================================================================='''\n\n# if __name__ == '__main__'的意思是:当.py文件被直接运行时,if __name__ == '__main__'之下的代码块将被运行;\n# 当.py文件以模块形式被导入时,if __name__ == '__main__'之下的代码块不被运行。\nif __name__ == '__main__':\n # json_config = os.path.join(save_result_dir, 'config.json') # save_result = 'results'\n #\n # print(\"===============================Loading data...===============================================\")\n # # 导入数据,取原始数据的 80% 作为训练集, 20% 作为测试集\n # dataset = loadmat('preprocessed_data.mat')\n # data = dataset['data']\n # label = dataset['label']\n # print(\"data.shape: \", data.shape)\n # print(\"label.shape \", label.shape)\n json_config = os.path.join(save_result_dir, 'config.json') # save_result = 'results'\n\n print(\"===============================Loading data...===============================================\")\n # 导入数据,取原始数据的 80% 作为训练集, 20% 作为测试集\n\n dataset_path = os.path.join(Config.PATH, 'micphone_dataset.mat')\n dataset = loadmat(dataset_path)\n normal_data = dataset['normal_data']\n normal_label = dataset['normal_label']\n Inner_data = dataset['Inner_Raceway_data']\n Inner_label = dataset['Inner_Raceway_label']\n Outer_data = dataset['Outer_Raceway_data']\n Outer_label = dataset['Outer_Raceway_label']\n print(normal_data.shape,normal_label.shape,Inner_data.shape,Inner_label.shape,Outer_data.shape,Outer_label.shape)\n # (14400, 2048) (14400, 3) (14400, 2048) (14400, 3) (14400, 2048) (14400, 3)\n data = np.concatenate((normal_data , Inner_data , Outer_data), axis=0)\n label = np.concatenate((normal_label , Inner_label , Outer_label), axis=0)\n print(\"data.shape\", data.shape, 'label.shape', label.shape)\n\n num_sample = len(data)\n num_train = int(num_sample * Config.train_ratio)\n num_test = num_sample - num_train\n index_permutation = np.arange(num_sample)\n np.random.shuffle(index_permutation) # 数据打乱\n train_data = data[index_permutation][: num_train] # 训练集 随机抽取 80%\n test_data = data[index_permutation][num_train:] # 测试集 随机抽取 20%\n # 需要分开预处理,以保证均值、方差等不公用,保证测试集的独立性\n # train_data = train_data\n # test_data = test_data\n train_label = label[index_permutation][: num_train] # 分段操作的时候,取得到前面,取不到后面\n test_label = label[index_permutation][num_train:] # 分段操作的时候,取得到前面,取不到后面\n print(train_data.shape, train_label.shape, test_data.shape, test_label.shape)\n # (34560, 2048) (34560, 3) (8640, 2048) (8640, 3)\n\n results = []\n # itertools.product(list1, list2) 依次取出list1中的每1个元素,与list2中的每1个元素,组��元组,然后将所有的元组组成一个列表,返回。\n # 在每周循环周期repeat内,排列组合循环不同的学习率和 保留的神经元概率(dropout_keep_prob_fc_list),保留50% 或者100%\n # (i,j,k)=(0,0.005,0.5),(0,0.005,1),(0,0.001,0.5),(0,0.005.1),(0,0.001,0.5),(0,0.001,1)...各种排列组合\n for (i, j, k, bat) in itertools.product(range(Config.repeat), # repeat = 5 # 是epoch对上一级循环\n Config.learning_rate_list,\n Config.dropout_keep_prob_fc_list,\n Config.batch_size_list):\n\n print('Current working space is: ', os.getcwd()) # 返回当前工作目录。\n print('Configuring and Saving CNN model...')\n # save_path = os.path.join(save_dir, 'best_validation') # 最佳验证结果保存路径\n # 检查路径是否存在,不存在则创建一个\n\n for path in [epoch_valid_tensorboard_dir, iter_train_tensorboard_dir, save_dir, save_result_dir]:\n if not os.path.exists(path):\n os.makedirs(path)\n\n Config.learning_rate = j # 改变Config中默认的learning_rate\n Config.dropout_keep_prob_fc = k # 改变Config中默认的神经元保留率\n Config.batch_size = bat\n\n config = Config()\n model = CNN(config) # 调用 CNN 类 实例化,self.acc -> model.acc\n\n print(\"===============================CNN Model is Finished=======================\")\n print(\"========================= run_main_function is training =======================\")\n # ================================================================================================\n # 保存结果到本地\n '''需要注意的是,tf.summary.merge_all()要写在tf.summary.scalar()\n 或是tf.summary.histogram()等方法之后,\n 不然会报Fetch argument None has invalid type的错。\n '''\n merged = tf.summary.merge_all() # 可以将所有summary全部保存到磁盘,以便tensorboard显示。\n train_writer = tf.summary.FileWriter(iter_train_tensorboard_dir) # 定义一个写入summary的目标文件,dir为写入文件地址\n valid_writer = tf.summary.FileWriter(epoch_valid_tensorboard_dir)\n\n # 在训练集上训练, 在验证集上评估\n run_main(train_data, train_label, train_ratio=config.RATIO_train) # 训练集中,训练集与验证集再划分为 4:1\n print(\"========================= Testing... =======================\")\n # 在测试集上测试结果\n test_acc, test_loss = test(test_data, test_label)\n print(\"test_acc: \", test_acc, \"test_loss: \", test_loss)\n\n results.append(js_methods(i, j, k, test_acc, test_loss))\n print(\"results.append is finished\")\n shutil.copy('Config.py', save_result_dir) # 把配置文件保存到本地目录\n shutil.copy('CNN_model.py', save_result_dir) # 把CNN_Model模型保存到本地目录\n shutil.copy('run.py', save_result_dir) # 把运行文件 run.py 保存到本地目录\n print(\"shutil.copy is finished\")\n\n # #以下出错!!! 在c302 实验室服务器跑时不会出错 trail(i) = repeat = 5 # 俗称epoch\n # epoch_valid_tensorboard_dir = 'logdir\\\\epoch_valid' # 每个epoch valid\n # iter_train_tensorboard_dir = 'logdir\\\\iter_train' # 每个iteration train\n\n new_fold = 'test_acc_' + str(round(test_acc, 4)) + '_' + 'test_loss' + str(round(test_loss, 4)) \\\n + '_' + 'trail_' + str(i) + '_fc_dropout_' + str(k) + '_lr_' + str(j) + '_batch_size_' + str(bat)\n\n if not os.path.exists(new_fold):\n os.makedirs(new_fold)\n\n shutil.move(tensorboard_dir, new_fold)\n # shutil.move(iter_train_tensorboard_dir, new_fold)\n # shutil.move(epoch_valid_tensorboard_dir, new_fold)\n shutil.move(save_dir, new_fold)\n shutil.move(save_result_dir, new_fold)\n # 把 save_result_dir 文件夹里的文件都 被移动到 命名为 \"trail 0 fc_drop_out 0.5 lr 0.005\" 的文件里\n\n print(\" shutil.move is finished\")\n\n with open('results.json', 'w') as f:\n f.write(json.dumps(results, default=lambda o: o.__dict__, indent=4))\n\nprint(\" ***************************************All Finished*********************************************\")\n\n\"\"\"\n【关于训练集、验证集、测试集的使用】\n1 训练集: 拟合数据样本, 进行学习,更新参数\n2 验证集: 用于调试超参数、使用多次,每几个epoch跑一次,!!!必须有!!!\n 模型训练过程单独留下的样本集, 用于调整超参数和用于对模型进行初步评估\n 验证集可以哟用在训练的过程中, 一般在训练时,几个epoch结束后跑一次验证集看看效果。\n (但是验证的太频繁会影响训练速度)\n 优点:\n 1) 可以及时发现模型或者参数问题,比如验证集发散、出现奇怪的值(无穷大)、准确率不增长或者很慢,\n 此时可以及时终止训练,重新调参或者调整模型,而不需要等到训练结束。就是可以实时监控!\n 2)还有就是验证模型的泛化能力, 如果验证集上的效果比训练集上差很多,就可以考虑模型是否过拟合\n 一旦 validation_data 的分类精度达到饱和,就停止训练。这种策略叫做提前终止(early stopping)\n 3) 可以通过验证集对比不同的模型。在一般的神经网络中,我们用验证集去寻找最优的网络深度(number of hidden layers)\n 或者决定反向传播的停止点,或者在神经网络中选择隐藏神经元的个数\n 4) 交叉验证(Cross Validation) 就是把训练数据集本身再细分成不同的验证数据集\n 缺点:\n 1) 模型在一次次手动调参并举行训练后逼近的验证集, 可能只代表一部分非训练集,导致最终的模型泛化还不够好\n3 测试集: !!!可以没有,可以用验证集来代替!!!\n 所有训练、验证、模型调整完毕以后,用整个测试集跑一次,看模型的泛化能力\n 不能作为调参、选择特征等算法相关的选择的依据。\n4 验证集和测试集相互关系:\n 验证集具有足够泛化性(一般来说,如果验证集足够大到包括大部分非训练集时,也等于具有足够泛化性了)\n 验证集具有足够泛化性时,测试集就没有存在的必要了\n 如果验证集具有足够泛化代表性,测试集是可以没有的,但验证集是必须有的。\n\nPS: 1) test_data是模型出炉的最后一道检测工序,\n test_data 来防止过拟合。如果基于 test_data 的评估结果设置超参数,有可能我们的网络最后是对 test_data 过拟合。\n 也就是说,我们或许只是找到了适合 test_data 具体特征的超参数,网络的性能不能推广到其它的数据集。\n 2) 普通参数可以通过网络来更新,自动调参(训练集训练),超参数是人工手动\"更新\",手动调参(验证集也类似在训练),\n 所以测试集有存在的必要!\n\"\"\"\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"后期/训练模型/模型训练文件/MT3_micphone/训练文件代码/test_acc_0.9142_test_loss0.1945_trail_0_fc_dropout_0.5_lr_0.01_batch_size_2048/CNN_Model_results/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":29905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"355770671","text":"from threading import Thread\nfrom logging import Formatter\nimport logging\nfrom logging.handlers import RotatingFileHandler\nimport os\nfrom flask import Flask\n\napp = Flask(__name__)\n\n\ndef init():\n log_formatter = Formatter('%(asctime)s %(levelname)s: %(message)s ')\n\n file_handler = RotatingFileHandler(os.path.dirname(os.path.abspath(__file__)) + '/../log/app.log')\n file_handler.setFormatter(log_formatter)\n file_handler.setLevel(logging.INFO)\n\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(log_formatter)\n stream_handler.setLevel(logging.DEBUG)\n\n app.logger.addHandler(file_handler)\n app.logger.addHandler(stream_handler)\n app.logger.setLevel(logging.DEBUG)\n\n logging.getLogger('werkzeug').setLevel(logging.ERROR)\n\n app.logger.info('tch-1 started')\n\n\ninit()\n\n\n@app.route('/t1')\ndef t1():\n a = 2 / 0\n return str(a)\n\n\ndef _t2():\n return 3 / 0\n\n\n@app.route('/t2')\ndef t2():\n thread = Thread(target=_t2)\n thread.start()\n return 'ok'\n","sub_path":"tch1/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"643453620","text":"\r\n#author1:\r\n#author2:\r\nimport time\r\n\r\nfrom grid import *\r\nfrom visualizer import *\r\nimport threading\r\nimport operator\r\nimport math\r\nimport cozmo\r\n\r\nfrom cozmo.util import degrees, distance_mm, speed_mmps\r\nfrom cozmo.objects import LightCube1Id, LightCube2Id, LightCube3Id\r\n\r\ndef astar(grid, heuristic):\r\n \"\"\"Perform the A* search algorithm on a defined grid\r\n\r\n Arguments:\r\n grid -- CozGrid instance to perform search on\r\n heuristic -- supplied heuristic function\r\n \"\"\"\r\n\r\n start = grid.getStart()\r\n goal = grid.getGoals()[0]\r\n\r\n closed = []\r\n open = [start]\r\n\r\n reverseLookup = {}\r\n gScore = {start:0}\r\n fScore = {start:heuristic(start, goal)}\r\n\r\n while open:\r\n for entry in sorted(fScore.items(), key=operator.itemgetter(1)):\r\n if entry[0] in open:\r\n current = entry[0]\r\n break\r\n\r\n if current == goal:\r\n grid.setPath(getPath(reverseLookup, goal))\r\n break\r\n\r\n open.remove(current)\r\n closed.append(current)\r\n grid.addVisited(current)\r\n\r\n for neighbor in grid.getNeighbors(current):\r\n neighbor = neighbor[0]\r\n if neighbor in closed:\r\n continue\r\n\r\n if neighbor not in open:\r\n open.append(neighbor)\r\n\r\n tentativeG = gScore.get(current, math.inf) + euclideanDistance(current, neighbor)\r\n if tentativeG >= gScore.get(neighbor, math.inf):\r\n continue\r\n\r\n reverseLookup[neighbor] = current\r\n gScore[neighbor] = tentativeG\r\n fScore[neighbor] = gScore[neighbor] + heuristic(neighbor, goal)\r\n\r\n pass # Your code here\r\n\r\ndef getPath(reverseLookup, goal):\r\n path = [goal]\r\n current = goal\r\n while current in reverseLookup:\r\n current = reverseLookup[current]\r\n path.append(current)\r\n path.reverse()\r\n return path\r\n\r\ndef heuristic(current, goal):\r\n \"\"\"Heuristic function for A* algorithm\r\n\r\n Arguments:\r\n current -- current cell\r\n goal -- desired goal cell\r\n \"\"\"\r\n return euclideanDistance(current, goal) # Simple euclidean distance heuristic\r\n\r\ndef euclideanDistance(start, end):\r\n return math.sqrt((end[0]-start[0])**2 + (end[1]-start[1])**2)\r\n\r\ndef cozmoBehavior(robot: cozmo.robot.Robot):\r\n \"\"\"Cozmo search behavior. See assignment description for details\r\n\r\n Has global access to grid, a CozGrid instance created by the main thread, and\r\n stopevent, a threading.Event instance used to signal when the main thread has stopped.\r\n You can use stopevent.is_set() to check its status or stopevent.wait() to wait for the\r\n main thread to finish.\r\n\r\n Arguments:\r\n robot -- cozmo.robot.Robot instance, supplied by cozmo.run_program\r\n \"\"\"\r\n \r\n global grid, stopevent\r\n\r\n # Reset the lift and head\r\n robot.move_lift(-3)\r\n robot.set_head_angle(degrees(0)).wait_for_completed()\r\n robot.world.connect_to_cubes()\r\n robot.world.auto_disconnect_from_cubes_at_end()\r\n time.sleep(3) # Not sure why this helps but it does; the cube detection API is garbage\r\n\r\n # Look around and try to find a cube\r\n look_around = robot.start_behavior(cozmo.behavior.BehaviorTypes.LookAroundInPlace)\r\n robot.world.wait_for_observed_light_cube()\r\n look_around.stop()\r\n robot.turn_in_place(degrees(-robot.pose.rotation.angle_z.degrees)).wait_for_completed()\r\n\r\n origin = grid.getStart()\r\n scale = grid.scale\r\n\r\n curCell = origin\r\n curOrientation = 0\r\n goal_observed = None\r\n obstacle1_observed = None\r\n obstacle2_observed = None\r\n\r\n obstacles = {}\r\n while not stopevent.is_set():\r\n # Recalibrate where the goal is, if possible\r\n goal = robot.world.get_light_cube(LightCube1Id) # paperclip\r\n if goal.last_observed_time != goal_observed:\r\n goal_observed = goal.last_observed_time\r\n grid.clearGoals()\r\n goal_offset = offsetFromCubeAngle(goal.pose.rotation.angle_z.degrees)\r\n goal_grid = objectToGridCoords(goal, scale, origin)\r\n obstacles['goal'] = padObject(goal_grid, grid)\r\n grid.clearObstacles()\r\n grid.addObstacles([item for sublist in obstacles.values() for item in sublist])\r\n goal_grid = (goal_grid[0]+goal_offset[0], goal_grid[1]+goal_offset[1])\r\n grid.addGoal(goal_grid)\r\n\r\n # Recalibrate where the obstacles are, if possible\r\n obstacle1 = robot.world.get_light_cube(LightCube2Id) # heart\r\n if (obstacle1 and obstacle1.last_observed_time != obstacle1_observed):\r\n obstacle1_observed = obstacle1.last_observed_time\r\n obstacles['obstacle1'] = padObject(objectToGridCoords(obstacle1, scale, origin), grid)\r\n grid.clearObstacles()\r\n grid.addObstacles([item for sublist in obstacles.values() for item in sublist])\r\n\r\n obstacle2 = robot.world.get_light_cube(LightCube3Id) # weird\r\n if (obstacle2 and obstacle2.last_observed_time != obstacle2_observed):\r\n obstacle2_observed = obstacle2.last_observed_time\r\n obstacles['obstacle2'] = padObject(objectToGridCoords(obstacle2, scale, origin), grid)\r\n grid.clearObstacles()\r\n grid.addObstacles([item for sublist in obstacles.values() for item in sublist])\r\n\r\n # Recalculate the path to take\r\n grid.clearPath()\r\n grid.setStart(curCell)\r\n astar(grid, heuristic)\r\n\r\n # Make the first move along the calculated path\r\n if (len(grid.getPath())>2):\r\n toCell = grid.getPath()[1]\r\n curOrientation = move(robot, curCell, curOrientation, toCell, grid)\r\n #curCell = objectToGridCoords(robot, scale, origin)\r\n curCell = toCell\r\n\r\ndef move(robot, curCell, curOrientation, toCell, grid):\r\n offset = (toCell[0]-curCell[0], toCell[1]-curCell[1])\r\n angle = angleFromOffset(offset)\r\n turn = angle-curOrientation\r\n if turn>180:\r\n turn = turn-360\r\n elif turn<-180:\r\n turn = turn+360\r\n\r\n robot.turn_in_place(degrees(turn)).wait_for_completed()\r\n mm = euclideanDistance((0,0), offset) * grid.scale\r\n robot.drive_straight(distance_mm(mm), speed_mmps(mm)).wait_for_completed()\r\n return angle\r\n\r\ndef angleFromOffset(offset):\r\n return {\r\n (1,0): 0,\r\n (1,1): 45,\r\n (0,1): 90,\r\n (-1,1): 135,\r\n (-1,0): 180,\r\n (-1,-1): 225,\r\n (0,-1): 270,\r\n (1,-1): 315\r\n }[offset]\r\n\r\ndef offsetFromCubeAngle(angle):\r\n scalingFactor = 3\r\n if angle>=-22.5 and angle<22.5:\r\n return (-1*scalingFactor, 0*scalingFactor)\r\n elif angle>=22.5 and angle<67.5:\r\n return (-1*scalingFactor, -1*scalingFactor)\r\n elif angle>=67.5 and angle<112.5:\r\n return (0*scalingFactor, -1*scalingFactor)\r\n elif angle>=112.5 and angle<157.5:\r\n return (1*scalingFactor, -1*scalingFactor)\r\n\r\n elif angle<-22.5 and angle>=-67.5:\r\n return (-1*scalingFactor, 1*scalingFactor)\r\n elif angle<-67.5 and angle>=-112.5:\r\n return (0*scalingFactor, 1*scalingFactor)\r\n elif angle<-112.5 and angle>=-157.5:\r\n return (1*scalingFactor, 1*scalingFactor)\r\n else:\r\n return (1*scalingFactor, 0*scalingFactor)\r\n\r\ndef padObject(objCoords, grid):\r\n obstacles = []\r\n for i in range(-2, 3, 1):\r\n for j in range(-2, 3, 1):\r\n cell = (objCoords[0] + i, objCoords[1] + j)\r\n if grid.coordInBounds(cell) and cell not in obstacles:\r\n obstacles.append(cell)\r\n return obstacles\r\n\r\ndef objectToGridCoords(lightCube, scale, origin):\r\n x = lightCube.pose.position.x // scale + origin[0]\r\n y = lightCube.pose.position.y // scale + origin[1]\r\n return (x, y)\r\n\r\n######################## DO NOT MODIFY CODE BELOW THIS LINE ####################################\r\n\r\n\r\nclass RobotThread(threading.Thread):\r\n \"\"\"Thread to run cozmo code separate from main thread\r\n \"\"\"\r\n \r\n def __init__(self):\r\n threading.Thread.__init__(self, daemon=True)\r\n\r\n def run(self):\r\n cozmo.run_program(cozmoBehavior)\r\n\r\n\r\n# If run as executable, start RobotThread and launch visualizer with empty grid file\r\nif __name__ == \"__main__\":\r\n global grid, stopevent\r\n stopevent = threading.Event()\r\n grid = CozGrid(\"emptygrid.json\")\r\n visualizer = Visualizer(grid)\r\n updater = UpdateThread(visualizer)\r\n updater.start()\r\n robot = RobotThread()\r\n robot.start()\r\n visualizer.start()\r\n stopevent.set()\r\n\r\n","sub_path":"lab10/planning.py","file_name":"planning.py","file_ext":"py","file_size_in_byte":8563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"443634408","text":"from os import environ\nfrom emailverifier import Client\nfrom emailverifier.exceptions import ApiBaseException\n\nfrom flask import current_app, _app_ctx_stack\n\nCONFIG_KEY = \"EMAIL_VERIFIER_KEY\"\n\n\nclass EmailVerifier(object):\n def __init__(self, app=None):\n self.app = app\n if app is not None:\n self.init_app(app)\n\n def init_app(self, app):\n api_key = app.config.get(CONFIG_KEY) or environ.get(CONFIG_KEY)\n if not api_key:\n raise Exception(\"\"\"No API key was supplied for performing email verification. \n Please set a value for EMAIL_VERIFIER_KEY.\"\"\")\n\n self.client = Client(api_key)\n\n def verify(self, email, options=None):\n try:\n data = self.client.get(email, options)\n except ApiBaseException:\n data = None\n\n return data\n","sub_path":"flask_email_verifier.py","file_name":"flask_email_verifier.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"484282614","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\n\nfrom data import *\ndata_gen_args = dict(rotation_range=0.1,\n width_shift_range=0.01,\n height_shift_range=0.01,\n shear_range=0.01,\n zoom_range=0.01,\n horizontal_flip=False,\n fill_mode='nearest')\nnum_batch = 1\nmyGenerator = trainGenerator(9,'data/qrqm/test','test_src','test_src',data_gen_args,target_size=(512,512),save_to_dir = \"data/qrqm/test/test_src\")\n# myGenerator = trainGenerator(1,'data/membrane/train','image','label',data_gen_args,save_to_dir = \"data/membrane/train/aug\")\nfor i,batch in enumerate(myGenerator):\n if (i > num_batch):\n break","sub_path":"dataPrepare.py","file_name":"dataPrepare.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"107003500","text":"from flask import (render_template,redirect,url_for,request,flash,abort)\nfrom . import main\nfrom flask_bootstrap import Bootstrap\nfrom flask_login import login_required,current_user\nfrom ..models import User,Blog,Comment\nfrom .forms import UpdateProfile,BlogForm,CommentForm\nfrom .. import db,photos\nfrom ..requests import get_random_quote\nimport datetime\n\n# Views\n@main.route('/')\ndef index():\n blogs = Blog.query.all()\n quote = get_random_quote()\n '''\n View root page function that returns the index page and its data\n '''\n title = 'Natalie - Blog '\n\n return render_template('index.html', title = title, blogs = blogs,quote = quote)\n\n@main.route('/user/')\ndef profile(uname):\n user = User.query.filter_by(username = uname).first()\n\n if user is None:\n abort(404)\n\n return render_template(\"profile/profile.html\", user = user)\n\n@main.route('/user//update',methods = ['GET','POST'])\n@login_required\ndef update_profile(uname):\n user = User.query.filter_by(username = uname).first()\n if user is None:\n abort(404)\n\n form = UpdateProfile()\n\n if form.validate_on_submit():\n user.bio = form.bio.data\n\n db.session.add(user)\n db.session.commit()\n\n return redirect(url_for('.profile',uname=user.username))\n \n return render_template('profile/update.html',form =form)\n\n@main.route('/user//update/pic',methods= ['POST'])\n@login_required\ndef update_pic(uname):\n user = User.query.filter_by(username = uname).first()\n if 'photo' in request.files:\n filename = photos.save(request.files['photo'])\n path = f'photos/{filename}'\n user.profile_pic_path = path \n db.session.commit()\n return redirect(url_for('main.profile',uname=uname))\n\n@main.route('/blog/new', methods = ['GET','POST'])\n@login_required\ndef new_blog():\n blog_form = BlogForm()\n if blog_form.validate_on_submit():\n title = blog_form.title.data\n blog = blog_form.text.data\n\n # Updated blog instance\n new_blog = Blog(blog_title=title,blog_content=blog,username=current_user.username,likes=0,dislikes=0)\n\n # Save blog method\n new_blog.save_blog()\n return redirect(url_for('main.index'))\n\n title = 'New blog'\n return render_template('new_blog.html',title = title,blog_form=blog_form )\n\n@main.route('/blog/', methods = ['GET','POST'])\ndef blog(id):\n blog = Blog.get_blog(id)\n posted_date = blog.posted.strftime('%b %d, %Y')\n\n if request.args.get(\"like\"):\n blog.likes = blog.likes + 1\n\n db.session.add(blog)\n db.session.commit()\n\n return redirect(\"/blog/{blog_id}\".format(blog_id=blog.id))\n\n elif request.args.get(\"dislike\"):\n blog.dislikes = blog.dislikes + 1\n\n db.session.add(blog)\n db.session.commit()\n\n return redirect(\"/blog/{blog_id}\".format(blog_id=blog.id))\n \n comment_form = CommentForm()\n if comment_form.validate_on_submit():\n comment = comment_form.text.data\n\n new_comment = Comment(comment = comment,user = current_user,blog_id = blog)\n\n new_comment.save_comment()\n\n\n comments = Comment.get_comments(blog)\n \n return render_template('blog.html', blog = blog, date = posted_date, comment_form = comment_form, comments = comments)\n\n@main.route('/user//blogs')\ndef user_blogs(uname):\n user = User.query.filter_by(username=uname).first()\n blogs = Blog.query.filter_by(user_id = user.id).all()\n blogs_count = Blog.count_blogs(uname)\n user_joined = user.date_joined.strftime('%b,%d,%y')\n \n return render_template(\"profile/blogs.html\",user = user, blogs = blogs, blogs_count= blogs_count,date= user_joined)\n\n@main.route(\"/blog//update\",methods = ['GET','POST'])\n@login_required\ndef update_blog(id):\n blog = Blog.query.get_or_404(id)\n if blog.username != current_user.username:\n abort(403)\n blog_form = BlogForm()\n if blog_form.validate_on_submit():\n blog.blog_title = blog_form.title.data\n blog.blog_content = blog_form.text.data\n db.session.commit()\n flash('Your blog has been updated!', 'success')\n return redirect(url_for('main.blog', id=blog.id))\n elif request.method == 'GET':\n blog_form.title.data = blog.blog_title\n blog_form.text.data = blog.blog_content\n \n return render_template('new_blog.html',title = 'Update Blog',blog_form=blog_form )\n\n@main.route(\"/blog//delete\", methods=['POST'])\n@login_required\ndef delete_blog(id):\n blog = Blog.query.get_or_404(id)\n if blog.username != current_user.username:\n abort(403)\n db.session.delete(blog)\n db.session.commit()\n flash('Your post has been deleted!', 'success')\n return redirect(url_for('main.index'))","sub_path":"app/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"427222206","text":"# knock56.py\n# coding = utf-8\n\nimport sys\nimport xml.etree.ElementTree as ET\n\n # PERSON\ntree = ET.parse(\"nlp.txt.xml\")\nroot = tree.getroot()\n\nsentenceNodes = root.findall(\".//sentences/sentence\")\nsentList =[]\n\nprint (len(sentenceNodes))\nprint (\"==========================\")\n\nfor i, sent in enumerate(sentenceNodes):\n\twords = []\n\n\ttokens = sent.findall(\".//token\")\n\n\tfor token in tokens:\n\t\t# ner = token.find(\"NER\").text\n\t\t# if(ner == \"PERSON\"):\n\t\t# wordNodes = token.find(\"word\").text\n\t\twords.append(token.find(\"word\").text)\n\tsentList.append(words)\nlineNo =0 \nfor sent in sentList:\n\tprint(lineNo, \" \".join(sent))\n\tlineNo +=1\n\n\nprint (\"==========================\")\n\nreferList = []\nreferNodes = root.findall(\".//coreference\")\n\n# \n# \t18\n# \t23\n# \t25\n# \t24\n# \tlanguage processing\n# \n\nfor ref in referNodes:\n\tmentList = ref.findall(\".//mention\")\n\trepresentative = mentList[0].find(\"text\").text\n\trepresentList = representative.split()\n\trepresentList.insert(0, \"(\")\n\trepresentList.append(\"|\")\n\tfor ment in mentList[1:]:\n\t\tsenId = int(ment.find(\"sentence\").text) - 1\n\t\tstart = int(ment.find(\"start\").text) -1\n\t\tend = int(ment.find(\"end\").text)-1\n\n\t\tsentence = sentList[senId]\n\n\t\t# sentence.insert(start, \"\")\n\t\tsentence.insert(end, \")\")\n\t\t# http://www.karak.jp/blog/python-list.html\n\t\tsentence[start:start] = representList\n\n\nlineNo =0 \nfor sent in sentList:\n\tprint(lineNo, \" \".join(sent))\n\tlineNo +=1\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"aron/chapter06/knock56.py","file_name":"knock56.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"162537079","text":"#!/usr/bin/env python\n# -*- coding: cp1252 -*-\n# -*- coding: 850 -*-\n# -*- coding: utf-8 -*-\n# JarvisIA is an idea of Daniel Dieser @initiasur, @NiperiaLab, independent robotics researcher and I.A. JarvisIA is the first voice assistant in Spanish and was created to encourage the use and development of Artificial Intelligence technologies among Latinos.\n#The collaborators of this development are a cool team that were students of programming and hacking Daniel Dieser in the city of Puerto Madryn, Argentina. They are: Dante Vargas, Cristian Aparicio, Mauricio Vega, Pichu @MgMoy1, and Matias Gimenez.\nimport pyttsx\nimport pygame\nimport os\nimport requests\n\nengine = pyttsx.init()\nengine.setProperty('rate', 180) \nengine.setProperty('voice', \"spanish\")\n\ndef habla(texto):\n engine.say(texto)\n engine.runAndWait()\napi_address = 'http://api.openweathermap.org/data/2.5/weather?q='\ncity = \"puerto madryn\"\n#agregamos la ciudad buscada\nurl = api_address + city\n#completamos la url de manera que el \"appid\" quede al final\nresto = '&lang=es&units=metric&APPID=28b7bbe6a9320e655c0f599979011c1b'\nurl = url + resto\n#busca en la data de json el clima\njson_data = requests.get(url).json()\ndescription = json_data['weather'][0]['description']\n\n#busca en la data de json la temperatura\njson_data = requests.get(url).json()\ntemp = json_data['main']['temp']\n\n\nos.system('google_speech -l es \"La Temperatura en Puerto Madryn es de grados:%.2f\" -e speed 1 pitch -600 ' % (temp))\nos.system('python ./twitter.py \"temperatura en madryn es de grados:%.2f\"' % (temp))\n\nos.system('google_speech -l es \"El clima:{} \" -e speed 1 pitch -600 '.format(description))\nos.system('python ./twitter.py \"El clima:{}\"'.format(description))\nif (temp) > 18 and (description) == (\"cielo claro\"):\n os.system('google_speech -l es \"Hoy es un día maravilloso para alguna actividad recreativa al aire libre, como paseo por la costa o caminata\" -e speed 1 pitch -600')\n os.system('python ./twitter.py \"Hoy es un día maravilloso para alguna actividad recreativa al aire libre, como paseo por la costa o caminata :-)\" ')\n\nelif (description) == (\"lluvia ligera\" or \"lluvia\" or \"llovizna ligera\"):\n os.system('google_speech -l es \"Si sales lleva paraguas, el pronóstico indica lluvia\" -e speed 1 pitch -600')\n\n os.system('python ./twitter.py \"Si sales lleva paraguas, el pronostico indica lluvia\"')\n\nelif (temp) < 15 :\n os.system('python ./twitter.py \" Si sales lleva abrigo la temperatura esta en descenso\"')\n\n","sub_path":"clima1.py","file_name":"clima1.py","file_ext":"py","file_size_in_byte":2487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"178205272","text":"from collections import OrderedDict\nimport xml.etree.ElementTree as ET\nimport os\n\n# ideally all loader files would be in one folder, and all loader files in another folder\ninboundOrderPath = 'C:/Users/deifen/PycharmProjects/OrderCorrelation/order-time-analysis/Data/IncomingOrders'\ninboundOrdersSet = {}\ninboundOrderFileCount = 0\ninboundOrdersFiltered = 0\ninboundOrdersTotal = 0\nallocationCount = 0\nfileCount = 0\nPENDING_STATUS_FILTER = \"PENDING\"\n\nprint(\"Building Inbound Orders set...\")\n\n# loop through each file in the specified path\nfor filename in os.listdir(inboundOrderPath):\n fileCount += 1\n inboundOrderFileCount += 1\n tree = ET.parse(os.path.join(inboundOrderPath, filename))\n root = tree.getroot()\n# strip time elements from filename, and convert to seconds elapsed since midnight\n timestamp = filename[-13:filename.find(\".xml\")]\n hours = timestamp[0:2]\n minutes = timestamp[2:4]\n seconds = timestamp[4:6]\n subseconds = timestamp[6:]\n timeInSeconds = float(str(int(hours) * 60 * 60 + int(minutes) * 60 + int(seconds)) + \".\" + subseconds)\n# collect all the attributes common to the whole file\n for child in root:\n if child.tag == \"portId\":\n portId = child.text\n if child.tag == \"typ\":\n type = child.text\n# find all Order elements in the xml file, and loop through to extract the attributes of each order\n orders = tree.findall(\".//Order\")\n for o in orders:\n order = OrderedDict()\n inboundOrdersTotal += 1\n for child in o:\n if child.tag == \"allocId\":\n allocId = child.text\n if child.tag == \"status\":\n status = child.text\n# if the order status is PENDING, then build the order and add it to the order set\n if status == PENDING_STATUS_FILTER:\n allocationCount += 1\n order[\"allocId\"] = allocId\n order[\"typ\"] = type\n order[\"status\"] = status\n order[\"portId\"] = portId\n order[\"timeInSeconds\"] = timeInSeconds\n inboundOrdersSet[allocId] = order\n# keep track of how many orders were filtered with status <> PENDING\n else:\n inboundOrdersFiltered += 1\n\nprint(\"Inbound Orders set complete\")\nprint(\"Saving to disk...\")\n\ncolumnHeaders = \"AllocID, MessageType, Status, PortID, SecondsFromMidnight\"\ninboundOrders = [columnHeaders]\n\n# loop through order set and prepare for output to file by packaging as a comma delimited string\n\nfor o in inboundOrdersSet.keys():\n tString = \"\"\n k = 1\n for i in inboundOrdersSet[o].keys():\n# lets not add a comma after the last element in the set......\n if k == len(inboundOrdersSet[o]):\n tString += (str(inboundOrdersSet[o][i]))\n else:\n tString += (str(inboundOrdersSet[o][i])) + \", \"\n k += 1\n inboundOrders.append(tString)\n\nf2 = open('inboundOrders.txt', 'w')\n\nfor line in inboundOrders:\n f2.write(\"%s\\n\" % line)\n\nf2.close()\n\n# print run stats\nprint(\"Save complete\")\nprint(\"Found \" + str(allocationCount) + \" allocations in \" + str(fileCount) + \" files\")\nprint(\"Filtered \" + str(inboundOrdersFiltered) + \" orders out of a total of \" + str(inboundOrdersTotal))","sub_path":"IncomingOrderCorrelation.py","file_name":"IncomingOrderCorrelation.py","file_ext":"py","file_size_in_byte":3194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"488566172","text":"import sys, functools\n\n\ndef rule(rule_func):\n \"\"\"Wraps functools.partial\"\"\"\n def wrapper(*wargs, **wkargs):\n return functools.partial(rule_func, *wargs, **wkargs)\n return wrapper\n\n\n@rule\ndef divisable(value, divisor, word):\n if not value % divisor:\n return word\n return \"\"\n\n\ndef fizzbuzz(length):\n \"\"\"Plays FizzBuzz\"\"\"\n rules = [\n divisable(divisor=3, word=\"Fizz\"),\n divisable(divisor=5, word=\"Buzz\")\n ]\n\n for i in range(1, int(length)+1):\n # Apply each rule to i\n words = [rule(i) for rule in rules]\n if all([not x for x in words]): # True if every word is blank\n print(i)\n else:\n print(\"\".join(words))\n\n\nif __name__ == \"__main__\":\n try:\n arg_length = sys.argv[1]\n except IndexError:\n sys.exit(\"I need a length!\")\n\n fizzbuzz(arg_length)","sub_path":"fizzbuzz.py","file_name":"fizzbuzz.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"92549002","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: D:\\Programmieren\\dataScryer\\datascryer\\helper\\time_converter.py\n# Compiled at: 2016-07-05 08:30:27\n\n\nclass Units:\n ms = 1\n s = ms * 1000\n m = s * 60\n h = m * 60\n d = h * 24\n\n @classmethod\n def fromstring(cls, string):\n return getattr(cls, string.lower(), None)\n\n\ndef string_to_ms(string):\n unit = string[-1:]\n value = int(string[:-1])\n return int(value * Units.fromstring(unit))","sub_path":"pycfiles/DataScryer-0.0.4-py2.py3-none-any/time_converter.py","file_name":"time_converter.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"370161920","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Sep 10 21:36:59 2017\n\n@author: Cyril\n\"\"\"\n# doc https://pyowm.readthedocs.io/en/latest/\n# doc https://github.com/csparpa/pyowm/blob/master/pyowm/docs/usage-examples.md\nimport pyowm\n\nowm = pyowm.OWM('f20e1afb114d39a7c88d271d70dedaef') # You MUST provide a valid API key\n\n# Have a pro subscription? Then use:\n# owm = pyowm.OWM(API_key='your-API-key', subscription_type='pro')\n\n# Will it be sunny tomorrow at this time in Milan (Italy) ?\nforecast = owm.daily_forecast(\"Milan,it\")\ntomorrow = pyowm.timeutils.tomorrow()\nforecast.will_be_sunny_at(tomorrow) # Always True in Italy, right? ;-)\n\n# Search for current weather in London (UK)\nobservation = owm.weather_at_place('London,uk')\nw = observation.get_weather()\nprint(w) # \n\n# Weather details\nw.get_wind() # {'speed': 4.6, 'deg': 330}\nw.get_humidity() # 87\nw.get_temperature('celsius') # {'temp_max': 10.5, 'temp': 9.7, 'temp_min': 9.0}\n\n# Search current weather observations in the surroundings of\n# lat=22.57W, lon=43.12S (Rio de Janeiro, BR)\nobservation_list = owm.weather_around_coords(-22.57, -43.12)\nprint(observation_list)\nobservation_list = owm.three_hours_forecast_at_coords(-22.57, -43.12)\ntime1 = \"2017-09-21 11:00:00+00\"# ``YYYY-MM-DD HH:MM:SS+00``\ntime2 = \"2017-09-21 18:00:00+00\"# ``YYYY-MM-DD HH:MM:SS+00``\nw1= observation_list.get_weather_at(time1)\nw2= observation_list.get_weather_at(time2)\n\nprint(w1.get_temperature()) \nprint(w2.get_temperature()) \n\n","sub_path":"Central/owm/my_owm.py","file_name":"my_owm.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"633241842","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n\"\"\"\n__author__: Wuxiaoshen\n__software__: PyCharm\n__project__:Learn_Scrapy\n__file__: test_xpath\n__time__: 2017/1/7 19:46\n\"\"\"\nfrom pprint import pprint\n\nimport aiohttp\nimport asyncio\nfrom lxml import etree\nurl = \"http://www.cninfo.com.cn/cninfo-new/memo-2?queryDate=2017-01-06&queryType=queryType1\"\n\nasync def fetch(url):\n async with aiohttp.ClientSession() as session:\n async with session.get(url) as html:\n response = await html.text(encoding=\"utf-8\")\n # print(response.status)\n return response\n\nasync def parse():\n response = await fetch(url)\n selector = etree.HTML(response)\n content = selector.xpath('//div[starts-with(@class,\"list\")]')\n items = []\n for one in content:\n info_items = one.xpath('ul/li[starts-with(@class,\"ta\")]/text()')\n if len(info_items) < 6:\n continue\n item = {\n \"code\": info_items[0],\n \"name\": info_items[1],\n \"endtime\": info_items[2],\n \"starttime\": info_items[3]if info_items[3] != \" \" else None,\n \"timestamp\": info_items[4]if info_items[4] != \" \" else None,\n \"reason\": info_items[5]\n }\n #pprint(item)\n items.append(item)\n return items\nloop = asyncio.get_event_loop()\na = loop.run_until_complete(parse())\npprint(a)\n\nif __name__ == '__main__':\n url = \"http://www.cninfo.com.cn/cninfo-new/memo-2?queryDate=2017-01-06&queryType=queryType1\"","sub_path":"Learn_Scrapy/daily/test_xpath.py","file_name":"test_xpath.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"150852956","text":"import os.path\nimport sys\n\nsys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'external'))\n\nimport arcpy\nimport logging\nfrom utils.arcgis_logging import setup_logging\n\nfrom utils.addresulttodisplay import add_result_to_display\nfrom collections import OrderedDict\nfrom gistools.utils.collection import MemCollection\nfrom gistools.tools.clean import connect_lines\n\nlogging.basicConfig(level=logging.INFO)\nsetup_logging(arcpy)\nlog = logging.getLogger(__file__)\nlog.setLevel(logging.INFO)\n\n# Read the parameter values\n# 0: Lijnenbestand\n# 1: Split de lijnen op connecties\n# 2: Buffer (around vertex in m)\n# 3: Doelbestand\n\ninput_line_fl = arcpy.GetParameterAsText(0)\nsplit_on_connections = arcpy.GetParameter(1)\nbuffer_value = arcpy.GetParameter(2)\noutput_file = arcpy.GetParameterAsText(3)\n\n# Read the parameter values\n# input_line_fl = './testdata/input/Testdata_watergangen.shp'\n# split_on_connections = True\n# buffer_value = 2\n# output_file = './testdata/output/1_a2.shp'\n\n# voorbereiden data typen en inlezen data\nlog.info('Bezig met voorbereiden van de data...')\n\nline_col = MemCollection(geometry_type='MultiLinestring')\nrecords = []\nrows = arcpy.SearchCursor(input_line_fl)\nfields = arcpy.ListFields(input_line_fl)\npoint = arcpy.Point()\n\n# vullen collection\nfor row in rows:\n geom = row.getValue('SHAPE')\n properties = OrderedDict()\n for field in fields:\n if field.name.lower() != 'shape':\n properties[field.name] = row.getValue(field.name)\n\n records.append({'geometry': {'type': 'MultiLineString',\n 'coordinates': [[(point.X, point.Y) for\n point in line] for line in geom]},\n 'properties': properties})\n\nline_col.writerecords(records)\n\n# aanroepen tool\nlog.info('Bezig met uitvoeren van cleanen van lijnen')\n\narcpy.AddMessage('Bezig met uitvoeren van cleanen van lijnen')\n\nnew_lines = connect_lines(line_col, buffer_value,\n split_line_at_connection=split_on_connections)\n\n# wegschrijven tool resultaat\n\nlog.info('Bezig met het genereren van het doelbestand...')\n\nspatial_reference = arcpy.Describe(input_line_fl).spatialReference\n\noutput_name = os.path.basename(output_file).split('.')[0]\noutput_dir = os.path.dirname(output_file)\n\noutput_fl = arcpy.CreateFeatureclass_management(output_dir, output_name, 'POLYLINE',\n spatial_reference=spatial_reference)\n\n# copy fields from input\nfor field in fields:\n if field.editable and field.type.lower() not in ['geometry']:\n arcpy.AddField_management(output_fl, field.name, field.type, field.precision, field.scale,\n field.length, field.aliasName, field.isNullable, field.required, field.domain)\n\n# add additional fields with output of tool\narcpy.AddField_management(output_fl, 'part', 'integer', field_is_nullable=True)\n\ndataset = arcpy.InsertCursor(output_fl)\n\nfor l in new_lines:\n row = dataset.newRow()\n\n mline = arcpy.Array()\n for sub_line in l['geometry']['coordinates']:\n array = arcpy.Array()\n for p in sub_line:\n point.X = p[0]\n point.Y = p[1]\n array.add(point)\n\n mline.add(array)\n\n row.Shape = mline\n\n for field in fields:\n if field.editable and field.type.lower() not in ['geometry']:\n log.debug(\"field: %s, type: %s, editable: %s, value: %s\",\n field.name,\n field.type,\n field.editable,\n l['properties'].get(field.name, None))\n row.setValue(field.name, l['properties'].get(field.name, field.defaultValue))\n\n row.setValue('part', l['properties'].get('part', None))\n\n dataset.insertRow(row)\n\narcpy.DeleteField_management(output_file, [\"Id\"])\n\nadd_result_to_display(output_fl, output_name)\n\nlog.info('Gereed')\n\n","sub_path":"1_a2_clean_lines.py","file_name":"1_a2_clean_lines.py","file_ext":"py","file_size_in_byte":3908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"86311625","text":"import z3\nfrom acsploit.options import Options\nfrom .z3_common import get_collisions\n\n\noptions = Options()\noptions.add_option('n_collisions', 10, 'Number of colliding strings to create')\noptions.add_option('length', 10, 'Length of strings to create')\noptions.add_option('hash_table_size', 100, 'Size of target hash table')\noptions.add_option('width', 16, 'Bit-width of Fletcher checksum', [16, 32, 64])\noptions.add_option('target_type', 'preimage', 'Whether the target is an image (hash output) or preimage (hash input)',\n ['image', 'preimage'])\noptions.add_option('target', 'hello', 'Image or preimage of desired hash value')\n\nDESCRIPTION = 'Produces hash collisions for the fletcher checksum function.' \\\n '\\n\\n ' \\\n 'This exploit works by using z3 to \"solve\" for hash collisions. An implementation of the Fletcher ' \\\n 'checksum for z3 is used to generate a constraint system that z3 solves to find colliding hash inputs.'\n\nNO_INPUT = True\n\n\ndef run(output):\n ret = get_collisions(z3fletcher, options['target'], options['target_type'], options['length'], options['n_collisions'],\n options['hash_table_size'], (2**(options['width']/2))-1) # eg, fletcher16 => 255\n output.output(ret)\n\n\ndef z3fletcher(bytes, hash_table_size, modulus): # computes the z3form of the fletcher checksum\n v1 = 0\n v2 = 0\n for byte in bytes:\n v1 = (v1 + z3.ZeroExt(56, byte)) % modulus\n v2 = (v2 + v1) % modulus\n return (v2 * (modulus+1) + v1) % hash_table_size\n","sub_path":"acsploit/exploits/hashes/collisions/fletcher.py","file_name":"fletcher.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"135473018","text":"'''\n423. Reconstruct Original Digits from English\n\nGiven a non-empty string containing an out-of-order English representation of digits 0-9, output the digits in ascending order.\n\nNote:\nInput contains only lowercase English letters.\nInput is guaranteed to be valid and can be transformed to its original digits. That means invalid inputs such as \"abc\" or \"zerone\" are not permitted.\nInput length is less than 50,000.\nExample 1:\nInput: \"owoztneoer\"\n\nOutput: \"012\"\nExample 2:\nInput: \"fviefuro\"\n\nOutput: \"45\"\n'''\nimport collections\nclass Solution(object):\n def originalDigits(self, s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n# store digits in dict\n #memo = collections.defaultdict(int)\n #for c in s:\n # memo[c] += 1\n #for i, v in memo.items():\n # if v == 'w'\n# equals to:\n memo = collections.Counter(s)\n res = []\n for x in '0eroz 6six 7evens 5fiev 8eihtg 4ourf 3treeh 2tow 1neo 9nnei'.split():\n res.append(x[0] * memo[x[-1]])\n for c in x:\n memo[c] -= memo[x[-1]]\n return ''.join(sorted(res))\n\nif __name__ == \"__main__\":\n s = \"owoztneoer\"\n res = Solution().originalDigits(s)\n print(res)\n","sub_path":"423_originalDigits.py","file_name":"423_originalDigits.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"479568968","text":"# ----------\n# User Instructions:\n#\n# Write a function optimum_policy that returns\n# a grid which shows the optimum policy for robot\n# motion. This means there should be an optimum\n# direction associated with each navigable cell from\n# which the goal can be reached.\n#\n# Unnavigable cells as well as cells from which\n# the goal cannot be reached should have a string\n# containing a single space (' '), as shown in the\n# previous video. The goal cell should have '*'.\n# ----------\n\ngrid = [[0, 1, 0, 0, 0, 0],\n [0, 1, 1, 0, 1, 0],\n [0, 0, 0, 0, 1, 0],\n [0, 1, 1, 1, 1, 0],\n [0, 1, 0, 1, 1, 0]]\ngoal = [len(grid)-1, len(grid[0])-1]\ncost = 1 # the cost associated with moving from a cell to an adjacent one\n\ndelta = [[-1, 0], # go up\n [0, -1], # go left\n [1, 0], # go down\n [0, 1]] # go right\n\ndelta_name = ['^', '<', 'v', '>']\ndelta_name_inv = ['v', '>', '^', '<']\nbig_value = 99\n\n\ndef optimum_policy(grid, goal, cost):\n # ----------------------------------------\n # insert code below\n # ----------------------------------------\n\n # make sure your function returns a grid of values as\n # demonstrated in the previous video.\n occupied = [x[:] for x in grid]\n scores = [[big_value for i in x] for x in grid]\n moves = [[' ' for i in x] for x in grid]\n moves[goal[0]][goal[1]] = '*'\n to_check = [goal]\n scores[goal[0]][goal[1]] = 0\n while True:\n if not to_check:\n break\n current_cell = to_check.pop(0)\n directions = [[x + y for x, y in zip(current_cell, delta_i)]\n for delta_i in delta]\n directions[:] = [x if (x[0] >= 0 and x[0] < len(grid)) and\n (x[1] >= 0 and x[1] < len(grid[0])) and\n occupied[x[0]][x[1]] == 0 else -1 for x in directions]\n for idx, d in enumerate(directions):\n if d != -1:\n scores[d[0]][d[1]] = scores[current_cell[0]][current_cell[1]] + cost\n moves[d[0]][d[1]] = delta_name_inv[idx]\n to_check.append(d)\n occupied[d[0]][d[1]] = 2\n occupied[current_cell[0]][current_cell[1]] = 1\n\n return moves\n\n\nres = optimum_policy(grid, goal, cost)\nfor j in res:\n print(j)\n","sub_path":"l4_motion_planning/q6_optimum_policy.py","file_name":"q6_optimum_policy.py","file_ext":"py","file_size_in_byte":2266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"330692063","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Nov 04 16:28:38 2016\r\n\r\n@author: pastahl\r\n\"\"\"\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n\r\nstudent_dataset = pd.read_csv('C:\\Data\\Projektit\\DAT210x\\Datasets\\students.data', index_col=0)\r\nmy_dataframe = student_dataset[['G3', 'G2','G1']]\r\n\r\nplt.imshow(my_dataframe.corr(), cmap=plt.cm.Blues, interpolation='nearest')\r\nplt.colorbar()\r\ntick_marks = [i for i in range(len(my_dataframe.columns))]\r\nplt.xticks(tick_marks, my_dataframe.columns, rotation='vertical')\r\nplt.yticks(tick_marks, my_dataframe.columns)\r\nplt.show() ","sub_path":"CorrelationMatrix.py","file_name":"CorrelationMatrix.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"230431682","text":"''' dAmnViper.parse module\r\n Copyright (c) 2011, Henry \"photofroggy\" Rapley.\r\n Released under the ISC License.\r\n \r\n This module provides objects for parsing data going to and from the dAmn\r\n server. Tablumps and packets are handled by classes here, and a class is\r\n provided to generate output messages for stdout based on packets received\r\n from dAmn.\r\n'''\r\n\r\nimport re\r\nfrom collections import OrderedDict\r\n\r\nclass Packet(object):\r\n \"\"\" Use this class to parse dAmn packets.\r\n \r\n This object processes given strings as dAmn packets, and stores\r\n information from the string in different object attributes. This\r\n makes it easier to work with packets in other parts of the API.\r\n \"\"\"\r\n\r\n def __init__(self, data=None, sep='='):\r\n self.cmd, self.param, self.args, self.body, self.raw = None, None, {}, None, data\r\n \r\n if not data:\r\n return\r\n \r\n data = data.partition('\\n\\n')\r\n self.body = data[2] or None\r\n data = data[0].partition('\\n')\r\n \r\n if not data[0]:\r\n return\r\n \r\n if not sep in data[0]:\r\n head = data[0].partition(' ')\r\n self.cmd = head[0] or None\r\n self.param = head[2] or None\r\n data = data[2].partition('\\n')\r\n \r\n while data[0]:\r\n arg = data[0].partition(sep)\r\n data = data[2].partition('\\n')\r\n if not arg[1] or not arg[2]:\r\n continue\r\n self.args[arg[0]] = arg[2]\r\n \r\n # And that's the end of that chapter.\r\n \r\n def compile(self, sep='='):\r\n \"\"\" Return a plain text packet based on the packet's values. \"\"\"\r\n if self.cmd is None:\r\n raise ValueError('Empty packet')\r\n \r\n args = '\\n'.join([\r\n sep.join([key, value]) for key, value in self.args.iteritems() if value\r\n ]) or None\r\n \r\n return '{0}{1}\\n{2}{3}'.format(\r\n self.cmd,\r\n '' if self.param is None else ' {0}'.format(self.param),\r\n '' if args is None else '{0}\\n'.format(args),\r\n '' if self.body is None else '\\n{0}'.format(self.body)\r\n )\r\n\r\nclass PacketEvent(object):\r\n \"\"\" Packet event.\r\n \r\n This object is a data structure which stores packet data under specific\r\n keys according to the mapping rules defined in the Protocol Parser.\r\n \"\"\"\r\n \r\n def __init__(self, event, args=None):\r\n self.name = event\r\n self.arguments = OrderedDict() if args is None else OrderedDict(args)\r\n \r\n def __call__(self, argument, value=None):\r\n \"\"\" Shortcut for `.arg` and `.set_arg`. \"\"\"\r\n \r\n if value is None:\r\n return self.arguments[argument]\r\n \r\n self.arguments[argument] = value\r\n return value\r\n\r\n\r\nclass Tablumps(object):\r\n \"\"\" dAmn tablumps parser.\r\n \r\n dAmn sends certain information formatted in a specific manner.\r\n Links, images, thumbs, and other forms of data are formatted\r\n in strings where the different attributes of these values are\r\n separated by tab characters (``\\\\t``), and usually begin with an\r\n ampersand.\r\n \r\n We refer to these items as \"tablumps\" because of the tab\r\n characters being used as delimeters. The job of this class is to\r\n replace tablumps with readable strings, or to extract the data\r\n given in the tablumps.\r\n \"\"\"\r\n \r\n expressions = None\r\n replace = None\r\n titles = None\r\n subs = None\r\n \r\n def __init__(self):\r\n \"\"\"Populate the expressions and replaces used when parsing tablumps.\"\"\"\r\n if self.expressions is not None:\r\n return\r\n # Regular expression objects used to find any complicated tablumps.\r\n self.expressions = [\r\n re.compile(\"&avatar\\t([a-zA-Z0-9-]+)\\t([0-9]+)\\t\"),\r\n re.compile(\"&dev\\t(.)\\t([a-zA-Z0-9-]+)\\t\"),\r\n re.compile(\"&emote\\t([^\\t]+)\\t([0-9]+)\\t([0-9]+)\\t(.*?)\\t([a-z0-9./=-_]+)\\t\"),\r\n re.compile(\"&a\\t([^\\t]+)\\t([^\\t]*)\\t\"),\r\n re.compile(\"&link\\t([^\\t]+)\\t&\\t\"),\r\n re.compile(\"&link\\t([^\\t]+)\\t([^\\t]+)\\t&\\t\"),\r\n re.compile(\"&acro\\t([^\\t]+)\\t(.*)&\\/acro\\t\"),\r\n re.compile(\"&abbr\\t([^\\t]+)\\t(.*)&\\/abbr\\t\"),\r\n re.compile(\"&thumb\\t(?P[0-9]+)\\t([^\\t]+)\\t([^\\t]+)\\t([^\\t]+)\\t([^\\t]+)\\t([^\\t]+)\\t([^\\t]+)\\t\"),\r\n re.compile(\"&img\\t([^\\t]+)\\t([^\\t]*)\\t([^\\t]*)\\t\"),\r\n re.compile(\"&iframe\\t([^\\t]+)\\t([0-9%]*)\\t([0-9%]*)\\t&\\/iframe\\t\"),\r\n ]\r\n self.titles = ('avatar', 'dev', 'emote', 'a', 'link', 'link', 'acronym', 'abbr', 'thumb', 'img', 'iframe')\r\n # Regular expression objects used to find and replace complicated tablumps.\r\n self.subs = [\r\n (re.compile(\"&avatar\\t([a-zA-Z0-9-]+)\\t([0-9]+)\\t\"), \":icon\\\\1:\"),\r\n (re.compile(\"&dev\\t(.)\\t([a-zA-Z0-9-]+)\\t\"), \":dev\\\\2:\"),\r\n (re.compile(\"&emote\\t([^\\t]+)\\t([0-9]+)\\t([0-9]+)\\t(.*?)\\t([a-z0-9./=-_]+)\\t\"), \"\\\\1\"),\r\n (re.compile(\"&a\\t([^\\t]+)\\t([^\\t]*)\\t\"), \"\"),\r\n (re.compile(\"&link\\t([^\\t]+)\\t&\\t\"), \"\\\\1\"),\r\n (re.compile(\"&link\\t([^\\t]+)\\t([^\\t]+)\\t&\\t\"), \"\\\\1 (\\\\2)\"),\r\n (re.compile(\"&acro\\t([^\\t]+)\\t\"), \"\"),\r\n (re.compile(\"&abbr\\t([^\\t]+)\\t\"), \"\"),\r\n (re.compile(\"&thumb\\t([0-9]+)\\t([^\\t]+)\\t([^\\t]+)\\t([^\\t]+)\\t([^\\t]+)\\t([^\\t]+)\\t([^\\t]+)\\t\"), \":thumb\\\\1:\"),\r\n (re.compile(\"&img\\t([^\\t]+)\\t([^\\t]*)\\t([^\\t]*)\\t\"), \"\\\"\\\\2\\\"\"),\r\n (re.compile(\"&iframe\\t([^\\t]+)\\t([0-9%]*)\\t([0-9%]*)\\t&\\/iframe\\t\"), \"